| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/v8.h" | 5 #include "src/v8.h" |
| 6 | 6 |
| 7 #include "src/code-factory.h" | 7 #include "src/code-factory.h" |
| 8 #include "src/code-stubs.h" | 8 #include "src/code-stubs.h" |
| 9 #include "src/hydrogen-osr.h" | 9 #include "src/hydrogen-osr.h" |
| 10 #include "src/ic/ic.h" | 10 #include "src/ic/ic.h" |
| (...skipping 1049 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1060 HMod* hmod = instr->hydrogen(); | 1060 HMod* hmod = instr->hydrogen(); |
| 1061 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); | 1061 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); |
| 1062 Label dividend_is_not_negative, done; | 1062 Label dividend_is_not_negative, done; |
| 1063 | 1063 |
| 1064 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { | 1064 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { |
| 1065 __ Branch(÷nd_is_not_negative, ge, dividend, Operand(zero_reg)); | 1065 __ Branch(÷nd_is_not_negative, ge, dividend, Operand(zero_reg)); |
| 1066 // Note: The code below even works when right contains kMinInt. | 1066 // Note: The code below even works when right contains kMinInt. |
| 1067 __ dsubu(dividend, zero_reg, dividend); | 1067 __ dsubu(dividend, zero_reg, dividend); |
| 1068 __ And(dividend, dividend, Operand(mask)); | 1068 __ And(dividend, dividend, Operand(mask)); |
| 1069 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1069 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1070 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend, | 1070 DeoptimizeIf(eq, instr, "minus zero", dividend, Operand(zero_reg)); |
| 1071 Operand(zero_reg)); | |
| 1072 } | 1071 } |
| 1073 __ Branch(USE_DELAY_SLOT, &done); | 1072 __ Branch(USE_DELAY_SLOT, &done); |
| 1074 __ dsubu(dividend, zero_reg, dividend); | 1073 __ dsubu(dividend, zero_reg, dividend); |
| 1075 } | 1074 } |
| 1076 | 1075 |
| 1077 __ bind(÷nd_is_not_negative); | 1076 __ bind(÷nd_is_not_negative); |
| 1078 __ And(dividend, dividend, Operand(mask)); | 1077 __ And(dividend, dividend, Operand(mask)); |
| 1079 __ bind(&done); | 1078 __ bind(&done); |
| 1080 } | 1079 } |
| 1081 | 1080 |
| 1082 | 1081 |
| 1083 void LCodeGen::DoModByConstI(LModByConstI* instr) { | 1082 void LCodeGen::DoModByConstI(LModByConstI* instr) { |
| 1084 Register dividend = ToRegister(instr->dividend()); | 1083 Register dividend = ToRegister(instr->dividend()); |
| 1085 int32_t divisor = instr->divisor(); | 1084 int32_t divisor = instr->divisor(); |
| 1086 Register result = ToRegister(instr->result()); | 1085 Register result = ToRegister(instr->result()); |
| 1087 DCHECK(!dividend.is(result)); | 1086 DCHECK(!dividend.is(result)); |
| 1088 | 1087 |
| 1089 if (divisor == 0) { | 1088 if (divisor == 0) { |
| 1090 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero); | 1089 DeoptimizeIf(al, instr, "division by zero"); |
| 1091 return; | 1090 return; |
| 1092 } | 1091 } |
| 1093 | 1092 |
| 1094 __ TruncatingDiv(result, dividend, Abs(divisor)); | 1093 __ TruncatingDiv(result, dividend, Abs(divisor)); |
| 1095 __ Dmul(result, result, Operand(Abs(divisor))); | 1094 __ Dmul(result, result, Operand(Abs(divisor))); |
| 1096 __ Dsubu(result, dividend, Operand(result)); | 1095 __ Dsubu(result, dividend, Operand(result)); |
| 1097 | 1096 |
| 1098 // Check for negative zero. | 1097 // Check for negative zero. |
| 1099 HMod* hmod = instr->hydrogen(); | 1098 HMod* hmod = instr->hydrogen(); |
| 1100 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1099 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1101 Label remainder_not_zero; | 1100 Label remainder_not_zero; |
| 1102 __ Branch(&remainder_not_zero, ne, result, Operand(zero_reg)); | 1101 __ Branch(&remainder_not_zero, ne, result, Operand(zero_reg)); |
| 1103 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, dividend, | 1102 DeoptimizeIf(lt, instr, "minus zero", dividend, Operand(zero_reg)); |
| 1104 Operand(zero_reg)); | |
| 1105 __ bind(&remainder_not_zero); | 1103 __ bind(&remainder_not_zero); |
| 1106 } | 1104 } |
| 1107 } | 1105 } |
| 1108 | 1106 |
| 1109 | 1107 |
| 1110 void LCodeGen::DoModI(LModI* instr) { | 1108 void LCodeGen::DoModI(LModI* instr) { |
| 1111 HMod* hmod = instr->hydrogen(); | 1109 HMod* hmod = instr->hydrogen(); |
| 1112 const Register left_reg = ToRegister(instr->left()); | 1110 const Register left_reg = ToRegister(instr->left()); |
| 1113 const Register right_reg = ToRegister(instr->right()); | 1111 const Register right_reg = ToRegister(instr->right()); |
| 1114 const Register result_reg = ToRegister(instr->result()); | 1112 const Register result_reg = ToRegister(instr->result()); |
| 1115 | 1113 |
| 1116 // div runs in the background while we check for special cases. | 1114 // div runs in the background while we check for special cases. |
| 1117 __ Dmod(result_reg, left_reg, right_reg); | 1115 __ Dmod(result_reg, left_reg, right_reg); |
| 1118 | 1116 |
| 1119 Label done; | 1117 Label done; |
| 1120 // Check for x % 0, we have to deopt in this case because we can't return a | 1118 // Check for x % 0, we have to deopt in this case because we can't return a |
| 1121 // NaN. | 1119 // NaN. |
| 1122 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { | 1120 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { |
| 1123 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, right_reg, | 1121 DeoptimizeIf(eq, instr, "division by zero", right_reg, Operand(zero_reg)); |
| 1124 Operand(zero_reg)); | |
| 1125 } | 1122 } |
| 1126 | 1123 |
| 1127 // Check for kMinInt % -1, div will return kMinInt, which is not what we | 1124 // Check for kMinInt % -1, div will return kMinInt, which is not what we |
| 1128 // want. We have to deopt if we care about -0, because we can't return that. | 1125 // want. We have to deopt if we care about -0, because we can't return that. |
| 1129 if (hmod->CheckFlag(HValue::kCanOverflow)) { | 1126 if (hmod->CheckFlag(HValue::kCanOverflow)) { |
| 1130 Label no_overflow_possible; | 1127 Label no_overflow_possible; |
| 1131 __ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt)); | 1128 __ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt)); |
| 1132 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1129 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1133 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, right_reg, Operand(-1)); | 1130 DeoptimizeIf(eq, instr, "minus zero", right_reg, Operand(-1)); |
| 1134 } else { | 1131 } else { |
| 1135 __ Branch(&no_overflow_possible, ne, right_reg, Operand(-1)); | 1132 __ Branch(&no_overflow_possible, ne, right_reg, Operand(-1)); |
| 1136 __ Branch(USE_DELAY_SLOT, &done); | 1133 __ Branch(USE_DELAY_SLOT, &done); |
| 1137 __ mov(result_reg, zero_reg); | 1134 __ mov(result_reg, zero_reg); |
| 1138 } | 1135 } |
| 1139 __ bind(&no_overflow_possible); | 1136 __ bind(&no_overflow_possible); |
| 1140 } | 1137 } |
| 1141 | 1138 |
| 1142 // If we care about -0, test if the dividend is <0 and the result is 0. | 1139 // If we care about -0, test if the dividend is <0 and the result is 0. |
| 1143 __ Branch(&done, ge, left_reg, Operand(zero_reg)); | 1140 __ Branch(&done, ge, left_reg, Operand(zero_reg)); |
| 1144 | 1141 |
| 1145 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1142 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1146 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result_reg, | 1143 DeoptimizeIf(eq, instr, "minus zero", result_reg, Operand(zero_reg)); |
| 1147 Operand(zero_reg)); | |
| 1148 } | 1144 } |
| 1149 __ bind(&done); | 1145 __ bind(&done); |
| 1150 } | 1146 } |
| 1151 | 1147 |
| 1152 | 1148 |
| 1153 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { | 1149 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { |
| 1154 Register dividend = ToRegister(instr->dividend()); | 1150 Register dividend = ToRegister(instr->dividend()); |
| 1155 int32_t divisor = instr->divisor(); | 1151 int32_t divisor = instr->divisor(); |
| 1156 Register result = ToRegister(instr->result()); | 1152 Register result = ToRegister(instr->result()); |
| 1157 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); | 1153 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); |
| 1158 DCHECK(!result.is(dividend)); | 1154 DCHECK(!result.is(dividend)); |
| 1159 | 1155 |
| 1160 // Check for (0 / -x) that will produce negative zero. | 1156 // Check for (0 / -x) that will produce negative zero. |
| 1161 HDiv* hdiv = instr->hydrogen(); | 1157 HDiv* hdiv = instr->hydrogen(); |
| 1162 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 1158 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
| 1163 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend, | 1159 DeoptimizeIf(eq, instr, "minus zero", dividend, Operand(zero_reg)); |
| 1164 Operand(zero_reg)); | |
| 1165 } | 1160 } |
| 1166 // Check for (kMinInt / -1). | 1161 // Check for (kMinInt / -1). |
| 1167 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { | 1162 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { |
| 1168 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, dividend, Operand(kMinInt)); | 1163 DeoptimizeIf(eq, instr, "overflow", dividend, Operand(kMinInt)); |
| 1169 } | 1164 } |
| 1170 // Deoptimize if remainder will not be 0. | 1165 // Deoptimize if remainder will not be 0. |
| 1171 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && | 1166 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && |
| 1172 divisor != 1 && divisor != -1) { | 1167 divisor != 1 && divisor != -1) { |
| 1173 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); | 1168 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); |
| 1174 __ And(at, dividend, Operand(mask)); | 1169 __ And(at, dividend, Operand(mask)); |
| 1175 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, at, Operand(zero_reg)); | 1170 DeoptimizeIf(ne, instr, "lost precision", at, Operand(zero_reg)); |
| 1176 } | 1171 } |
| 1177 | 1172 |
| 1178 if (divisor == -1) { // Nice shortcut, not needed for correctness. | 1173 if (divisor == -1) { // Nice shortcut, not needed for correctness. |
| 1179 __ Dsubu(result, zero_reg, dividend); | 1174 __ Dsubu(result, zero_reg, dividend); |
| 1180 return; | 1175 return; |
| 1181 } | 1176 } |
| 1182 uint16_t shift = WhichPowerOf2Abs(divisor); | 1177 uint16_t shift = WhichPowerOf2Abs(divisor); |
| 1183 if (shift == 0) { | 1178 if (shift == 0) { |
| 1184 __ Move(result, dividend); | 1179 __ Move(result, dividend); |
| 1185 } else if (shift == 1) { | 1180 } else if (shift == 1) { |
| 1186 __ dsrl32(result, dividend, 31); | 1181 __ dsrl32(result, dividend, 31); |
| 1187 __ Daddu(result, dividend, Operand(result)); | 1182 __ Daddu(result, dividend, Operand(result)); |
| 1188 } else { | 1183 } else { |
| 1189 __ dsra32(result, dividend, 31); | 1184 __ dsra32(result, dividend, 31); |
| 1190 __ dsrl32(result, result, 32 - shift); | 1185 __ dsrl32(result, result, 32 - shift); |
| 1191 __ Daddu(result, dividend, Operand(result)); | 1186 __ Daddu(result, dividend, Operand(result)); |
| 1192 } | 1187 } |
| 1193 if (shift > 0) __ dsra(result, result, shift); | 1188 if (shift > 0) __ dsra(result, result, shift); |
| 1194 if (divisor < 0) __ Dsubu(result, zero_reg, result); | 1189 if (divisor < 0) __ Dsubu(result, zero_reg, result); |
| 1195 } | 1190 } |
| 1196 | 1191 |
| 1197 | 1192 |
| 1198 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { | 1193 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { |
| 1199 Register dividend = ToRegister(instr->dividend()); | 1194 Register dividend = ToRegister(instr->dividend()); |
| 1200 int32_t divisor = instr->divisor(); | 1195 int32_t divisor = instr->divisor(); |
| 1201 Register result = ToRegister(instr->result()); | 1196 Register result = ToRegister(instr->result()); |
| 1202 DCHECK(!dividend.is(result)); | 1197 DCHECK(!dividend.is(result)); |
| 1203 | 1198 |
| 1204 if (divisor == 0) { | 1199 if (divisor == 0) { |
| 1205 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero); | 1200 DeoptimizeIf(al, instr, "division by zero"); |
| 1206 return; | 1201 return; |
| 1207 } | 1202 } |
| 1208 | 1203 |
| 1209 // Check for (0 / -x) that will produce negative zero. | 1204 // Check for (0 / -x) that will produce negative zero. |
| 1210 HDiv* hdiv = instr->hydrogen(); | 1205 HDiv* hdiv = instr->hydrogen(); |
| 1211 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 1206 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
| 1212 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend, | 1207 DeoptimizeIf(eq, instr, "minus zero", dividend, Operand(zero_reg)); |
| 1213 Operand(zero_reg)); | |
| 1214 } | 1208 } |
| 1215 | 1209 |
| 1216 __ TruncatingDiv(result, dividend, Abs(divisor)); | 1210 __ TruncatingDiv(result, dividend, Abs(divisor)); |
| 1217 if (divisor < 0) __ Subu(result, zero_reg, result); | 1211 if (divisor < 0) __ Subu(result, zero_reg, result); |
| 1218 | 1212 |
| 1219 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { | 1213 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { |
| 1220 __ Dmul(scratch0(), result, Operand(divisor)); | 1214 __ Dmul(scratch0(), result, Operand(divisor)); |
| 1221 __ Dsubu(scratch0(), scratch0(), dividend); | 1215 __ Dsubu(scratch0(), scratch0(), dividend); |
| 1222 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, scratch0(), | 1216 DeoptimizeIf(ne, instr, "lost precision", scratch0(), Operand(zero_reg)); |
| 1223 Operand(zero_reg)); | |
| 1224 } | 1217 } |
| 1225 } | 1218 } |
| 1226 | 1219 |
| 1227 | 1220 |
| 1228 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. | 1221 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. |
| 1229 void LCodeGen::DoDivI(LDivI* instr) { | 1222 void LCodeGen::DoDivI(LDivI* instr) { |
| 1230 HBinaryOperation* hdiv = instr->hydrogen(); | 1223 HBinaryOperation* hdiv = instr->hydrogen(); |
| 1231 Register dividend = ToRegister(instr->dividend()); | 1224 Register dividend = ToRegister(instr->dividend()); |
| 1232 Register divisor = ToRegister(instr->divisor()); | 1225 Register divisor = ToRegister(instr->divisor()); |
| 1233 const Register result = ToRegister(instr->result()); | 1226 const Register result = ToRegister(instr->result()); |
| 1234 | 1227 |
| 1235 // On MIPS div is asynchronous - it will run in the background while we | 1228 // On MIPS div is asynchronous - it will run in the background while we |
| 1236 // check for special cases. | 1229 // check for special cases. |
| 1237 __ Ddiv(result, dividend, divisor); | 1230 __ Ddiv(result, dividend, divisor); |
| 1238 | 1231 |
| 1239 // Check for x / 0. | 1232 // Check for x / 0. |
| 1240 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { | 1233 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { |
| 1241 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, divisor, | 1234 DeoptimizeIf(eq, instr, "division by zero", divisor, Operand(zero_reg)); |
| 1242 Operand(zero_reg)); | |
| 1243 } | 1235 } |
| 1244 | 1236 |
| 1245 // Check for (0 / -x) that will produce negative zero. | 1237 // Check for (0 / -x) that will produce negative zero. |
| 1246 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1238 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1247 Label left_not_zero; | 1239 Label left_not_zero; |
| 1248 __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg)); | 1240 __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg)); |
| 1249 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, divisor, | 1241 DeoptimizeIf(lt, instr, "minus zero", divisor, Operand(zero_reg)); |
| 1250 Operand(zero_reg)); | |
| 1251 __ bind(&left_not_zero); | 1242 __ bind(&left_not_zero); |
| 1252 } | 1243 } |
| 1253 | 1244 |
| 1254 // Check for (kMinInt / -1). | 1245 // Check for (kMinInt / -1). |
| 1255 if (hdiv->CheckFlag(HValue::kCanOverflow) && | 1246 if (hdiv->CheckFlag(HValue::kCanOverflow) && |
| 1256 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { | 1247 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { |
| 1257 Label left_not_min_int; | 1248 Label left_not_min_int; |
| 1258 __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt)); | 1249 __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt)); |
| 1259 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, divisor, Operand(-1)); | 1250 DeoptimizeIf(eq, instr, "overflow", divisor, Operand(-1)); |
| 1260 __ bind(&left_not_min_int); | 1251 __ bind(&left_not_min_int); |
| 1261 } | 1252 } |
| 1262 | 1253 |
| 1263 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { | 1254 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { |
| 1264 // Calculate remainder. | 1255 // Calculate remainder. |
| 1265 Register remainder = ToRegister(instr->temp()); | 1256 Register remainder = ToRegister(instr->temp()); |
| 1266 if (kArchVariant != kMips64r6) { | 1257 if (kArchVariant != kMips64r6) { |
| 1267 __ mfhi(remainder); | 1258 __ mfhi(remainder); |
| 1268 } else { | 1259 } else { |
| 1269 __ dmod(remainder, dividend, divisor); | 1260 __ dmod(remainder, dividend, divisor); |
| 1270 } | 1261 } |
| 1271 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, remainder, | 1262 DeoptimizeIf(ne, instr, "lost precision", remainder, Operand(zero_reg)); |
| 1272 Operand(zero_reg)); | |
| 1273 } | 1263 } |
| 1274 } | 1264 } |
| 1275 | 1265 |
| 1276 | 1266 |
| 1277 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) { | 1267 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) { |
| 1278 DoubleRegister addend = ToDoubleRegister(instr->addend()); | 1268 DoubleRegister addend = ToDoubleRegister(instr->addend()); |
| 1279 DoubleRegister multiplier = ToDoubleRegister(instr->multiplier()); | 1269 DoubleRegister multiplier = ToDoubleRegister(instr->multiplier()); |
| 1280 DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand()); | 1270 DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand()); |
| 1281 | 1271 |
| 1282 // This is computed in-place. | 1272 // This is computed in-place. |
| (...skipping 24 matching lines...) Expand all Loading... |
| 1307 return; | 1297 return; |
| 1308 } | 1298 } |
| 1309 | 1299 |
| 1310 // If the divisor is negative, we have to negate and handle edge cases. | 1300 // If the divisor is negative, we have to negate and handle edge cases. |
| 1311 // Dividend can be the same register as result so save the value of it | 1301 // Dividend can be the same register as result so save the value of it |
| 1312 // for checking overflow. | 1302 // for checking overflow. |
| 1313 __ Move(scratch, dividend); | 1303 __ Move(scratch, dividend); |
| 1314 | 1304 |
| 1315 __ Dsubu(result, zero_reg, dividend); | 1305 __ Dsubu(result, zero_reg, dividend); |
| 1316 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1306 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1317 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result, Operand(zero_reg)); | 1307 DeoptimizeIf(eq, instr, "minus zero", result, Operand(zero_reg)); |
| 1318 } | 1308 } |
| 1319 | 1309 |
| 1320 __ Xor(scratch, scratch, result); | 1310 __ Xor(scratch, scratch, result); |
| 1321 // Dividing by -1 is basically negation, unless we overflow. | 1311 // Dividing by -1 is basically negation, unless we overflow. |
| 1322 if (divisor == -1) { | 1312 if (divisor == -1) { |
| 1323 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { | 1313 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { |
| 1324 DeoptimizeIf(gt, instr, Deoptimizer::kOverflow, result, Operand(kMaxInt)); | 1314 DeoptimizeIf(gt, instr, "overflow", result, Operand(kMaxInt)); |
| 1325 } | 1315 } |
| 1326 return; | 1316 return; |
| 1327 } | 1317 } |
| 1328 | 1318 |
| 1329 // If the negation could not overflow, simply shifting is OK. | 1319 // If the negation could not overflow, simply shifting is OK. |
| 1330 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { | 1320 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { |
| 1331 __ dsra(result, result, shift); | 1321 __ dsra(result, result, shift); |
| 1332 return; | 1322 return; |
| 1333 } | 1323 } |
| 1334 | 1324 |
| 1335 Label no_overflow, done; | 1325 Label no_overflow, done; |
| 1336 __ Branch(&no_overflow, lt, scratch, Operand(zero_reg)); | 1326 __ Branch(&no_overflow, lt, scratch, Operand(zero_reg)); |
| 1337 __ li(result, Operand(kMinInt / divisor), CONSTANT_SIZE); | 1327 __ li(result, Operand(kMinInt / divisor), CONSTANT_SIZE); |
| 1338 __ Branch(&done); | 1328 __ Branch(&done); |
| 1339 __ bind(&no_overflow); | 1329 __ bind(&no_overflow); |
| 1340 __ dsra(result, result, shift); | 1330 __ dsra(result, result, shift); |
| 1341 __ bind(&done); | 1331 __ bind(&done); |
| 1342 } | 1332 } |
| 1343 | 1333 |
| 1344 | 1334 |
| 1345 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { | 1335 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { |
| 1346 Register dividend = ToRegister(instr->dividend()); | 1336 Register dividend = ToRegister(instr->dividend()); |
| 1347 int32_t divisor = instr->divisor(); | 1337 int32_t divisor = instr->divisor(); |
| 1348 Register result = ToRegister(instr->result()); | 1338 Register result = ToRegister(instr->result()); |
| 1349 DCHECK(!dividend.is(result)); | 1339 DCHECK(!dividend.is(result)); |
| 1350 | 1340 |
| 1351 if (divisor == 0) { | 1341 if (divisor == 0) { |
| 1352 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero); | 1342 DeoptimizeIf(al, instr, "division by zero"); |
| 1353 return; | 1343 return; |
| 1354 } | 1344 } |
| 1355 | 1345 |
| 1356 // Check for (0 / -x) that will produce negative zero. | 1346 // Check for (0 / -x) that will produce negative zero. |
| 1357 HMathFloorOfDiv* hdiv = instr->hydrogen(); | 1347 HMathFloorOfDiv* hdiv = instr->hydrogen(); |
| 1358 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 1348 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
| 1359 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend, | 1349 DeoptimizeIf(eq, instr, "minus zero", dividend, Operand(zero_reg)); |
| 1360 Operand(zero_reg)); | |
| 1361 } | 1350 } |
| 1362 | 1351 |
| 1363 // Easy case: We need no dynamic check for the dividend and the flooring | 1352 // Easy case: We need no dynamic check for the dividend and the flooring |
| 1364 // division is the same as the truncating division. | 1353 // division is the same as the truncating division. |
| 1365 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || | 1354 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || |
| 1366 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { | 1355 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { |
| 1367 __ TruncatingDiv(result, dividend, Abs(divisor)); | 1356 __ TruncatingDiv(result, dividend, Abs(divisor)); |
| 1368 if (divisor < 0) __ Dsubu(result, zero_reg, result); | 1357 if (divisor < 0) __ Dsubu(result, zero_reg, result); |
| 1369 return; | 1358 return; |
| 1370 } | 1359 } |
| (...skipping 23 matching lines...) Expand all Loading... |
| 1394 Register dividend = ToRegister(instr->dividend()); | 1383 Register dividend = ToRegister(instr->dividend()); |
| 1395 Register divisor = ToRegister(instr->divisor()); | 1384 Register divisor = ToRegister(instr->divisor()); |
| 1396 const Register result = ToRegister(instr->result()); | 1385 const Register result = ToRegister(instr->result()); |
| 1397 | 1386 |
| 1398 // On MIPS div is asynchronous - it will run in the background while we | 1387 // On MIPS div is asynchronous - it will run in the background while we |
| 1399 // check for special cases. | 1388 // check for special cases. |
| 1400 __ Ddiv(result, dividend, divisor); | 1389 __ Ddiv(result, dividend, divisor); |
| 1401 | 1390 |
| 1402 // Check for x / 0. | 1391 // Check for x / 0. |
| 1403 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { | 1392 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { |
| 1404 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, divisor, | 1393 DeoptimizeIf(eq, instr, "division by zero", divisor, Operand(zero_reg)); |
| 1405 Operand(zero_reg)); | |
| 1406 } | 1394 } |
| 1407 | 1395 |
| 1408 // Check for (0 / -x) that will produce negative zero. | 1396 // Check for (0 / -x) that will produce negative zero. |
| 1409 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1397 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1410 Label left_not_zero; | 1398 Label left_not_zero; |
| 1411 __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg)); | 1399 __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg)); |
| 1412 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, divisor, | 1400 DeoptimizeIf(lt, instr, "minus zero", divisor, Operand(zero_reg)); |
| 1413 Operand(zero_reg)); | |
| 1414 __ bind(&left_not_zero); | 1401 __ bind(&left_not_zero); |
| 1415 } | 1402 } |
| 1416 | 1403 |
| 1417 // Check for (kMinInt / -1). | 1404 // Check for (kMinInt / -1). |
| 1418 if (hdiv->CheckFlag(HValue::kCanOverflow) && | 1405 if (hdiv->CheckFlag(HValue::kCanOverflow) && |
| 1419 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { | 1406 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { |
| 1420 Label left_not_min_int; | 1407 Label left_not_min_int; |
| 1421 __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt)); | 1408 __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt)); |
| 1422 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, divisor, Operand(-1)); | 1409 DeoptimizeIf(eq, instr, "overflow", divisor, Operand(-1)); |
| 1423 __ bind(&left_not_min_int); | 1410 __ bind(&left_not_min_int); |
| 1424 } | 1411 } |
| 1425 | 1412 |
| 1426 // We performed a truncating division. Correct the result if necessary. | 1413 // We performed a truncating division. Correct the result if necessary. |
| 1427 Label done; | 1414 Label done; |
| 1428 Register remainder = scratch0(); | 1415 Register remainder = scratch0(); |
| 1429 if (kArchVariant != kMips64r6) { | 1416 if (kArchVariant != kMips64r6) { |
| 1430 __ mfhi(remainder); | 1417 __ mfhi(remainder); |
| 1431 } else { | 1418 } else { |
| 1432 __ dmod(remainder, dividend, divisor); | 1419 __ dmod(remainder, dividend, divisor); |
| (...skipping 16 matching lines...) Expand all Loading... |
| 1449 bool bailout_on_minus_zero = | 1436 bool bailout_on_minus_zero = |
| 1450 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); | 1437 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); |
| 1451 bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | 1438 bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
| 1452 | 1439 |
| 1453 if (right_op->IsConstantOperand()) { | 1440 if (right_op->IsConstantOperand()) { |
| 1454 int32_t constant = ToInteger32(LConstantOperand::cast(right_op)); | 1441 int32_t constant = ToInteger32(LConstantOperand::cast(right_op)); |
| 1455 | 1442 |
| 1456 if (bailout_on_minus_zero && (constant < 0)) { | 1443 if (bailout_on_minus_zero && (constant < 0)) { |
| 1457 // The case of a null constant will be handled separately. | 1444 // The case of a null constant will be handled separately. |
| 1458 // If constant is negative and left is null, the result should be -0. | 1445 // If constant is negative and left is null, the result should be -0. |
| 1459 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, left, Operand(zero_reg)); | 1446 DeoptimizeIf(eq, instr, "minus zero", left, Operand(zero_reg)); |
| 1460 } | 1447 } |
| 1461 | 1448 |
| 1462 switch (constant) { | 1449 switch (constant) { |
| 1463 case -1: | 1450 case -1: |
| 1464 if (overflow) { | 1451 if (overflow) { |
| 1465 __ SubuAndCheckForOverflow(result, zero_reg, left, scratch); | 1452 __ SubuAndCheckForOverflow(result, zero_reg, left, scratch); |
| 1466 DeoptimizeIf(gt, instr, Deoptimizer::kOverflow, scratch, | 1453 DeoptimizeIf(gt, instr, "overflow", scratch, Operand(kMaxInt)); |
| 1467 Operand(kMaxInt)); | |
| 1468 } else { | 1454 } else { |
| 1469 __ Dsubu(result, zero_reg, left); | 1455 __ Dsubu(result, zero_reg, left); |
| 1470 } | 1456 } |
| 1471 break; | 1457 break; |
| 1472 case 0: | 1458 case 0: |
| 1473 if (bailout_on_minus_zero) { | 1459 if (bailout_on_minus_zero) { |
| 1474 // If left is strictly negative and the constant is null, the | 1460 // If left is strictly negative and the constant is null, the |
| 1475 // result is -0. Deoptimize if required, otherwise return 0. | 1461 // result is -0. Deoptimize if required, otherwise return 0. |
| 1476 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, left, | 1462 DeoptimizeIf(lt, instr, "minus zero", left, Operand(zero_reg)); |
| 1477 Operand(zero_reg)); | |
| 1478 } | 1463 } |
| 1479 __ mov(result, zero_reg); | 1464 __ mov(result, zero_reg); |
| 1480 break; | 1465 break; |
| 1481 case 1: | 1466 case 1: |
| 1482 // Nothing to do. | 1467 // Nothing to do. |
| 1483 __ Move(result, left); | 1468 __ Move(result, left); |
| 1484 break; | 1469 break; |
| 1485 default: | 1470 default: |
| 1486 // Multiplying by powers of two and powers of two plus or minus | 1471 // Multiplying by powers of two and powers of two plus or minus |
| 1487 // one can be done faster with shifted operands. | 1472 // one can be done faster with shifted operands. |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1522 if (instr->hydrogen()->representation().IsSmi()) { | 1507 if (instr->hydrogen()->representation().IsSmi()) { |
| 1523 __ Dmulh(result, left, right); | 1508 __ Dmulh(result, left, right); |
| 1524 } else { | 1509 } else { |
| 1525 __ Dmul(result, left, right); | 1510 __ Dmul(result, left, right); |
| 1526 } | 1511 } |
| 1527 __ dsra32(scratch, result, 0); | 1512 __ dsra32(scratch, result, 0); |
| 1528 __ sra(at, result, 31); | 1513 __ sra(at, result, 31); |
| 1529 if (instr->hydrogen()->representation().IsSmi()) { | 1514 if (instr->hydrogen()->representation().IsSmi()) { |
| 1530 __ SmiTag(result); | 1515 __ SmiTag(result); |
| 1531 } | 1516 } |
| 1532 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, scratch, Operand(at)); | 1517 DeoptimizeIf(ne, instr, "overflow", scratch, Operand(at)); |
| 1533 } else { | 1518 } else { |
| 1534 if (instr->hydrogen()->representation().IsSmi()) { | 1519 if (instr->hydrogen()->representation().IsSmi()) { |
| 1535 __ SmiUntag(result, left); | 1520 __ SmiUntag(result, left); |
| 1536 __ Dmul(result, result, right); | 1521 __ Dmul(result, result, right); |
| 1537 } else { | 1522 } else { |
| 1538 __ Dmul(result, left, right); | 1523 __ Dmul(result, left, right); |
| 1539 } | 1524 } |
| 1540 } | 1525 } |
| 1541 | 1526 |
| 1542 if (bailout_on_minus_zero) { | 1527 if (bailout_on_minus_zero) { |
| 1543 Label done; | 1528 Label done; |
| 1544 __ Xor(at, left, right); | 1529 __ Xor(at, left, right); |
| 1545 __ Branch(&done, ge, at, Operand(zero_reg)); | 1530 __ Branch(&done, ge, at, Operand(zero_reg)); |
| 1546 // Bail out if the result is minus zero. | 1531 // Bail out if the result is minus zero. |
| 1547 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result, | 1532 DeoptimizeIf(eq, instr, "minus zero", result, Operand(zero_reg)); |
| 1548 Operand(zero_reg)); | |
| 1549 __ bind(&done); | 1533 __ bind(&done); |
| 1550 } | 1534 } |
| 1551 } | 1535 } |
| 1552 } | 1536 } |
| 1553 | 1537 |
| 1554 | 1538 |
| 1555 void LCodeGen::DoBitI(LBitI* instr) { | 1539 void LCodeGen::DoBitI(LBitI* instr) { |
| 1556 LOperand* left_op = instr->left(); | 1540 LOperand* left_op = instr->left(); |
| 1557 LOperand* right_op = instr->right(); | 1541 LOperand* right_op = instr->right(); |
| 1558 DCHECK(left_op->IsRegister()); | 1542 DCHECK(left_op->IsRegister()); |
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1602 case Token::ROR: | 1586 case Token::ROR: |
| 1603 __ Ror(result, left, Operand(ToRegister(right_op))); | 1587 __ Ror(result, left, Operand(ToRegister(right_op))); |
| 1604 break; | 1588 break; |
| 1605 case Token::SAR: | 1589 case Token::SAR: |
| 1606 __ srav(result, left, ToRegister(right_op)); | 1590 __ srav(result, left, ToRegister(right_op)); |
| 1607 break; | 1591 break; |
| 1608 case Token::SHR: | 1592 case Token::SHR: |
| 1609 __ srlv(result, left, ToRegister(right_op)); | 1593 __ srlv(result, left, ToRegister(right_op)); |
| 1610 if (instr->can_deopt()) { | 1594 if (instr->can_deopt()) { |
| 1611 // TODO(yy): (-1) >>> 0. anything else? | 1595 // TODO(yy): (-1) >>> 0. anything else? |
| 1612 DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue, result, | 1596 DeoptimizeIf(lt, instr, "negative value", result, Operand(zero_reg)); |
| 1613 Operand(zero_reg)); | 1597 DeoptimizeIf(gt, instr, "negative value", result, Operand(kMaxInt)); |
| 1614 DeoptimizeIf(gt, instr, Deoptimizer::kNegativeValue, result, | |
| 1615 Operand(kMaxInt)); | |
| 1616 } | 1598 } |
| 1617 break; | 1599 break; |
| 1618 case Token::SHL: | 1600 case Token::SHL: |
| 1619 __ sllv(result, left, ToRegister(right_op)); | 1601 __ sllv(result, left, ToRegister(right_op)); |
| 1620 break; | 1602 break; |
| 1621 default: | 1603 default: |
| 1622 UNREACHABLE(); | 1604 UNREACHABLE(); |
| 1623 break; | 1605 break; |
| 1624 } | 1606 } |
| 1625 } else { | 1607 } else { |
| (...skipping 14 matching lines...) Expand all Loading... |
| 1640 } else { | 1622 } else { |
| 1641 __ Move(result, left); | 1623 __ Move(result, left); |
| 1642 } | 1624 } |
| 1643 break; | 1625 break; |
| 1644 case Token::SHR: | 1626 case Token::SHR: |
| 1645 if (shift_count != 0) { | 1627 if (shift_count != 0) { |
| 1646 __ srl(result, left, shift_count); | 1628 __ srl(result, left, shift_count); |
| 1647 } else { | 1629 } else { |
| 1648 if (instr->can_deopt()) { | 1630 if (instr->can_deopt()) { |
| 1649 __ And(at, left, Operand(0x80000000)); | 1631 __ And(at, left, Operand(0x80000000)); |
| 1650 DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue, at, | 1632 DeoptimizeIf(ne, instr, "negative value", at, Operand(zero_reg)); |
| 1651 Operand(zero_reg)); | |
| 1652 } | 1633 } |
| 1653 __ Move(result, left); | 1634 __ Move(result, left); |
| 1654 } | 1635 } |
| 1655 break; | 1636 break; |
| 1656 case Token::SHL: | 1637 case Token::SHL: |
| 1657 if (shift_count != 0) { | 1638 if (shift_count != 0) { |
| 1658 if (instr->hydrogen_value()->representation().IsSmi()) { | 1639 if (instr->hydrogen_value()->representation().IsSmi()) { |
| 1659 __ dsll(result, left, shift_count); | 1640 __ dsll(result, left, shift_count); |
| 1660 } else { | 1641 } else { |
| 1661 __ sll(result, left, shift_count); | 1642 __ sll(result, left, shift_count); |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1697 overflow); // Reg at also used as scratch. | 1678 overflow); // Reg at also used as scratch. |
| 1698 } else { | 1679 } else { |
| 1699 DCHECK(right->IsRegister()); | 1680 DCHECK(right->IsRegister()); |
| 1700 // Due to overflow check macros not supporting constant operands, | 1681 // Due to overflow check macros not supporting constant operands, |
| 1701 // handling the IsConstantOperand case was moved to prev if clause. | 1682 // handling the IsConstantOperand case was moved to prev if clause. |
| 1702 __ SubuAndCheckForOverflow(ToRegister(result), | 1683 __ SubuAndCheckForOverflow(ToRegister(result), |
| 1703 ToRegister(left), | 1684 ToRegister(left), |
| 1704 ToRegister(right), | 1685 ToRegister(right), |
| 1705 overflow); // Reg at also used as scratch. | 1686 overflow); // Reg at also used as scratch. |
| 1706 } | 1687 } |
| 1707 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, overflow, | 1688 DeoptimizeIf(lt, instr, "overflow", overflow, Operand(zero_reg)); |
| 1708 Operand(zero_reg)); | |
| 1709 if (!instr->hydrogen()->representation().IsSmi()) { | 1689 if (!instr->hydrogen()->representation().IsSmi()) { |
| 1710 DeoptimizeIf(gt, instr, Deoptimizer::kOverflow, ToRegister(result), | 1690 DeoptimizeIf(gt, instr, "overflow", ToRegister(result), Operand(kMaxInt)); |
| 1711 Operand(kMaxInt)); | 1691 DeoptimizeIf(lt, instr, "overflow", ToRegister(result), Operand(kMinInt)); |
| 1712 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, ToRegister(result), | |
| 1713 Operand(kMinInt)); | |
| 1714 } | 1692 } |
| 1715 } | 1693 } |
| 1716 } | 1694 } |
| 1717 | 1695 |
| 1718 | 1696 |
| 1719 void LCodeGen::DoConstantI(LConstantI* instr) { | 1697 void LCodeGen::DoConstantI(LConstantI* instr) { |
| 1720 __ li(ToRegister(instr->result()), Operand(instr->value())); | 1698 __ li(ToRegister(instr->result()), Operand(instr->value())); |
| 1721 } | 1699 } |
| 1722 | 1700 |
| 1723 | 1701 |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1758 Register result = ToRegister(instr->result()); | 1736 Register result = ToRegister(instr->result()); |
| 1759 Register scratch = ToRegister(instr->temp()); | 1737 Register scratch = ToRegister(instr->temp()); |
| 1760 Smi* index = instr->index(); | 1738 Smi* index = instr->index(); |
| 1761 Label runtime, done; | 1739 Label runtime, done; |
| 1762 DCHECK(object.is(a0)); | 1740 DCHECK(object.is(a0)); |
| 1763 DCHECK(result.is(v0)); | 1741 DCHECK(result.is(v0)); |
| 1764 DCHECK(!scratch.is(scratch0())); | 1742 DCHECK(!scratch.is(scratch0())); |
| 1765 DCHECK(!scratch.is(object)); | 1743 DCHECK(!scratch.is(object)); |
| 1766 | 1744 |
| 1767 __ SmiTst(object, at); | 1745 __ SmiTst(object, at); |
| 1768 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg)); | 1746 DeoptimizeIf(eq, instr, "Smi", at, Operand(zero_reg)); |
| 1769 __ GetObjectType(object, scratch, scratch); | 1747 __ GetObjectType(object, scratch, scratch); |
| 1770 DeoptimizeIf(ne, instr, Deoptimizer::kNotADateObject, scratch, | 1748 DeoptimizeIf(ne, instr, "not a date object", scratch, Operand(JS_DATE_TYPE)); |
| 1771 Operand(JS_DATE_TYPE)); | |
| 1772 | 1749 |
| 1773 if (index->value() == 0) { | 1750 if (index->value() == 0) { |
| 1774 __ ld(result, FieldMemOperand(object, JSDate::kValueOffset)); | 1751 __ ld(result, FieldMemOperand(object, JSDate::kValueOffset)); |
| 1775 } else { | 1752 } else { |
| 1776 if (index->value() < JSDate::kFirstUncachedField) { | 1753 if (index->value() < JSDate::kFirstUncachedField) { |
| 1777 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); | 1754 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); |
| 1778 __ li(scratch, Operand(stamp)); | 1755 __ li(scratch, Operand(stamp)); |
| 1779 __ ld(scratch, MemOperand(scratch)); | 1756 __ ld(scratch, MemOperand(scratch)); |
| 1780 __ ld(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset)); | 1757 __ ld(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset)); |
| 1781 __ Branch(&runtime, ne, scratch, Operand(scratch0())); | 1758 __ Branch(&runtime, ne, scratch, Operand(scratch0())); |
| (...skipping 114 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1896 overflow); // Reg at also used as scratch. | 1873 overflow); // Reg at also used as scratch. |
| 1897 } else { | 1874 } else { |
| 1898 DCHECK(right->IsRegister()); | 1875 DCHECK(right->IsRegister()); |
| 1899 // Due to overflow check macros not supporting constant operands, | 1876 // Due to overflow check macros not supporting constant operands, |
| 1900 // handling the IsConstantOperand case was moved to prev if clause. | 1877 // handling the IsConstantOperand case was moved to prev if clause. |
| 1901 __ AdduAndCheckForOverflow(ToRegister(result), | 1878 __ AdduAndCheckForOverflow(ToRegister(result), |
| 1902 ToRegister(left), | 1879 ToRegister(left), |
| 1903 ToRegister(right), | 1880 ToRegister(right), |
| 1904 overflow); // Reg at also used as scratch. | 1881 overflow); // Reg at also used as scratch. |
| 1905 } | 1882 } |
| 1906 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, overflow, | 1883 DeoptimizeIf(lt, instr, "overflow", overflow, Operand(zero_reg)); |
| 1907 Operand(zero_reg)); | |
| 1908 // if not smi, it must int32. | 1884 // if not smi, it must int32. |
| 1909 if (!instr->hydrogen()->representation().IsSmi()) { | 1885 if (!instr->hydrogen()->representation().IsSmi()) { |
| 1910 DeoptimizeIf(gt, instr, Deoptimizer::kOverflow, ToRegister(result), | 1886 DeoptimizeIf(gt, instr, "overflow", ToRegister(result), Operand(kMaxInt)); |
| 1911 Operand(kMaxInt)); | 1887 DeoptimizeIf(lt, instr, "overflow", ToRegister(result), Operand(kMinInt)); |
| 1912 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, ToRegister(result), | |
| 1913 Operand(kMinInt)); | |
| 1914 } | 1888 } |
| 1915 } | 1889 } |
| 1916 } | 1890 } |
| 1917 | 1891 |
| 1918 | 1892 |
| 1919 void LCodeGen::DoMathMinMax(LMathMinMax* instr) { | 1893 void LCodeGen::DoMathMinMax(LMathMinMax* instr) { |
| 1920 LOperand* left = instr->left(); | 1894 LOperand* left = instr->left(); |
| 1921 LOperand* right = instr->right(); | 1895 LOperand* right = instr->right(); |
| 1922 HMathMinMax::Operation operation = instr->hydrogen()->operation(); | 1896 HMathMinMax::Operation operation = instr->hydrogen()->operation(); |
| 1923 Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge; | 1897 Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge; |
| (...skipping 241 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2165 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at)); | 2139 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at)); |
| 2166 } | 2140 } |
| 2167 | 2141 |
| 2168 if (expected.Contains(ToBooleanStub::SMI)) { | 2142 if (expected.Contains(ToBooleanStub::SMI)) { |
| 2169 // Smis: 0 -> false, all other -> true. | 2143 // Smis: 0 -> false, all other -> true. |
| 2170 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg)); | 2144 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg)); |
| 2171 __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); | 2145 __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); |
| 2172 } else if (expected.NeedsMap()) { | 2146 } else if (expected.NeedsMap()) { |
| 2173 // If we need a map later and have a Smi -> deopt. | 2147 // If we need a map later and have a Smi -> deopt. |
| 2174 __ SmiTst(reg, at); | 2148 __ SmiTst(reg, at); |
| 2175 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg)); | 2149 DeoptimizeIf(eq, instr, "Smi", at, Operand(zero_reg)); |
| 2176 } | 2150 } |
| 2177 | 2151 |
| 2178 const Register map = scratch0(); | 2152 const Register map = scratch0(); |
| 2179 if (expected.NeedsMap()) { | 2153 if (expected.NeedsMap()) { |
| 2180 __ ld(map, FieldMemOperand(reg, HeapObject::kMapOffset)); | 2154 __ ld(map, FieldMemOperand(reg, HeapObject::kMapOffset)); |
| 2181 if (expected.CanBeUndetectable()) { | 2155 if (expected.CanBeUndetectable()) { |
| 2182 // Undetectable -> false. | 2156 // Undetectable -> false. |
| 2183 __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset)); | 2157 __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset)); |
| 2184 __ And(at, at, Operand(1 << Map::kIsUndetectable)); | 2158 __ And(at, at, Operand(1 << Map::kIsUndetectable)); |
| 2185 __ Branch(instr->FalseLabel(chunk_), ne, at, Operand(zero_reg)); | 2159 __ Branch(instr->FalseLabel(chunk_), ne, at, Operand(zero_reg)); |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2221 __ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), | 2195 __ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), |
| 2222 ne, dbl_scratch, kDoubleRegZero); | 2196 ne, dbl_scratch, kDoubleRegZero); |
| 2223 // Falls through if dbl_scratch == 0. | 2197 // Falls through if dbl_scratch == 0. |
| 2224 __ Branch(instr->FalseLabel(chunk_)); | 2198 __ Branch(instr->FalseLabel(chunk_)); |
| 2225 __ bind(¬_heap_number); | 2199 __ bind(¬_heap_number); |
| 2226 } | 2200 } |
| 2227 | 2201 |
| 2228 if (!expected.IsGeneric()) { | 2202 if (!expected.IsGeneric()) { |
| 2229 // We've seen something for the first time -> deopt. | 2203 // We've seen something for the first time -> deopt. |
| 2230 // This can only happen if we are not generic already. | 2204 // This can only happen if we are not generic already. |
| 2231 DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject, zero_reg, | 2205 DeoptimizeIf(al, instr, "unexpected object", zero_reg, |
| 2232 Operand(zero_reg)); | 2206 Operand(zero_reg)); |
| 2233 } | 2207 } |
| 2234 } | 2208 } |
| 2235 } | 2209 } |
| 2236 } | 2210 } |
| 2237 | 2211 |
| 2238 | 2212 |
| 2239 void LCodeGen::EmitGoto(int block) { | 2213 void LCodeGen::EmitGoto(int block) { |
| 2240 if (!IsNextEmittedBlock(block)) { | 2214 if (!IsNextEmittedBlock(block)) { |
| 2241 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block))); | 2215 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block))); |
| (...skipping 629 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2871 } | 2845 } |
| 2872 } | 2846 } |
| 2873 | 2847 |
| 2874 | 2848 |
| 2875 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { | 2849 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { |
| 2876 Register result = ToRegister(instr->result()); | 2850 Register result = ToRegister(instr->result()); |
| 2877 __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell().handle()))); | 2851 __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell().handle()))); |
| 2878 __ ld(result, FieldMemOperand(at, Cell::kValueOffset)); | 2852 __ ld(result, FieldMemOperand(at, Cell::kValueOffset)); |
| 2879 if (instr->hydrogen()->RequiresHoleCheck()) { | 2853 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 2880 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); | 2854 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); |
| 2881 DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at)); | 2855 DeoptimizeIf(eq, instr, "hole", result, Operand(at)); |
| 2882 } | 2856 } |
| 2883 } | 2857 } |
| 2884 | 2858 |
| 2885 | 2859 |
| 2886 template <class T> | 2860 template <class T> |
| 2887 void LCodeGen::EmitVectorLoadICRegisters(T* instr) { | 2861 void LCodeGen::EmitVectorLoadICRegisters(T* instr) { |
| 2888 DCHECK(FLAG_vector_ics); | 2862 DCHECK(FLAG_vector_ics); |
| 2889 Register vector_register = ToRegister(instr->temp_vector()); | 2863 Register vector_register = ToRegister(instr->temp_vector()); |
| 2890 Register slot_register = VectorLoadICDescriptor::SlotRegister(); | 2864 Register slot_register = VectorLoadICDescriptor::SlotRegister(); |
| 2891 DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister())); | 2865 DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister())); |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2926 | 2900 |
| 2927 // If the cell we are storing to contains the hole it could have | 2901 // If the cell we are storing to contains the hole it could have |
| 2928 // been deleted from the property dictionary. In that case, we need | 2902 // been deleted from the property dictionary. In that case, we need |
| 2929 // to update the property details in the property dictionary to mark | 2903 // to update the property details in the property dictionary to mark |
| 2930 // it as no longer deleted. | 2904 // it as no longer deleted. |
| 2931 if (instr->hydrogen()->RequiresHoleCheck()) { | 2905 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 2932 // We use a temp to check the payload. | 2906 // We use a temp to check the payload. |
| 2933 Register payload = ToRegister(instr->temp()); | 2907 Register payload = ToRegister(instr->temp()); |
| 2934 __ ld(payload, FieldMemOperand(cell, Cell::kValueOffset)); | 2908 __ ld(payload, FieldMemOperand(cell, Cell::kValueOffset)); |
| 2935 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); | 2909 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); |
| 2936 DeoptimizeIf(eq, instr, Deoptimizer::kHole, payload, Operand(at)); | 2910 DeoptimizeIf(eq, instr, "hole", payload, Operand(at)); |
| 2937 } | 2911 } |
| 2938 | 2912 |
| 2939 // Store the value. | 2913 // Store the value. |
| 2940 __ sd(value, FieldMemOperand(cell, Cell::kValueOffset)); | 2914 __ sd(value, FieldMemOperand(cell, Cell::kValueOffset)); |
| 2941 // Cells are always rescanned, so no write barrier here. | 2915 // Cells are always rescanned, so no write barrier here. |
| 2942 } | 2916 } |
| 2943 | 2917 |
| 2944 | 2918 |
| 2945 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { | 2919 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { |
| 2946 Register context = ToRegister(instr->context()); | 2920 Register context = ToRegister(instr->context()); |
| 2947 Register result = ToRegister(instr->result()); | 2921 Register result = ToRegister(instr->result()); |
| 2948 | 2922 |
| 2949 __ ld(result, ContextOperand(context, instr->slot_index())); | 2923 __ ld(result, ContextOperand(context, instr->slot_index())); |
| 2950 if (instr->hydrogen()->RequiresHoleCheck()) { | 2924 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 2951 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); | 2925 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); |
| 2952 | 2926 |
| 2953 if (instr->hydrogen()->DeoptimizesOnHole()) { | 2927 if (instr->hydrogen()->DeoptimizesOnHole()) { |
| 2954 DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at)); | 2928 DeoptimizeIf(eq, instr, "hole", result, Operand(at)); |
| 2955 } else { | 2929 } else { |
| 2956 Label is_not_hole; | 2930 Label is_not_hole; |
| 2957 __ Branch(&is_not_hole, ne, result, Operand(at)); | 2931 __ Branch(&is_not_hole, ne, result, Operand(at)); |
| 2958 __ LoadRoot(result, Heap::kUndefinedValueRootIndex); | 2932 __ LoadRoot(result, Heap::kUndefinedValueRootIndex); |
| 2959 __ bind(&is_not_hole); | 2933 __ bind(&is_not_hole); |
| 2960 } | 2934 } |
| 2961 } | 2935 } |
| 2962 } | 2936 } |
| 2963 | 2937 |
| 2964 | 2938 |
| 2965 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { | 2939 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { |
| 2966 Register context = ToRegister(instr->context()); | 2940 Register context = ToRegister(instr->context()); |
| 2967 Register value = ToRegister(instr->value()); | 2941 Register value = ToRegister(instr->value()); |
| 2968 Register scratch = scratch0(); | 2942 Register scratch = scratch0(); |
| 2969 MemOperand target = ContextOperand(context, instr->slot_index()); | 2943 MemOperand target = ContextOperand(context, instr->slot_index()); |
| 2970 | 2944 |
| 2971 Label skip_assignment; | 2945 Label skip_assignment; |
| 2972 | 2946 |
| 2973 if (instr->hydrogen()->RequiresHoleCheck()) { | 2947 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 2974 __ ld(scratch, target); | 2948 __ ld(scratch, target); |
| 2975 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); | 2949 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); |
| 2976 | 2950 |
| 2977 if (instr->hydrogen()->DeoptimizesOnHole()) { | 2951 if (instr->hydrogen()->DeoptimizesOnHole()) { |
| 2978 DeoptimizeIf(eq, instr, Deoptimizer::kHole, scratch, Operand(at)); | 2952 DeoptimizeIf(eq, instr, "hole", scratch, Operand(at)); |
| 2979 } else { | 2953 } else { |
| 2980 __ Branch(&skip_assignment, ne, scratch, Operand(at)); | 2954 __ Branch(&skip_assignment, ne, scratch, Operand(at)); |
| 2981 } | 2955 } |
| 2982 } | 2956 } |
| 2983 | 2957 |
| 2984 __ sd(value, target); | 2958 __ sd(value, target); |
| 2985 if (instr->hydrogen()->NeedsWriteBarrier()) { | 2959 if (instr->hydrogen()->NeedsWriteBarrier()) { |
| 2986 SmiCheck check_needed = | 2960 SmiCheck check_needed = |
| 2987 instr->hydrogen()->value()->type().IsHeapObject() | 2961 instr->hydrogen()->value()->type().IsHeapObject() |
| 2988 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; | 2962 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; |
| (...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3063 Register scratch = scratch0(); | 3037 Register scratch = scratch0(); |
| 3064 Register function = ToRegister(instr->function()); | 3038 Register function = ToRegister(instr->function()); |
| 3065 Register result = ToRegister(instr->result()); | 3039 Register result = ToRegister(instr->result()); |
| 3066 | 3040 |
| 3067 // Get the prototype or initial map from the function. | 3041 // Get the prototype or initial map from the function. |
| 3068 __ ld(result, | 3042 __ ld(result, |
| 3069 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); | 3043 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); |
| 3070 | 3044 |
| 3071 // Check that the function has a prototype or an initial map. | 3045 // Check that the function has a prototype or an initial map. |
| 3072 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); | 3046 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); |
| 3073 DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at)); | 3047 DeoptimizeIf(eq, instr, "hole", result, Operand(at)); |
| 3074 | 3048 |
| 3075 // If the function does not have an initial map, we're done. | 3049 // If the function does not have an initial map, we're done. |
| 3076 Label done; | 3050 Label done; |
| 3077 __ GetObjectType(result, scratch, scratch); | 3051 __ GetObjectType(result, scratch, scratch); |
| 3078 __ Branch(&done, ne, scratch, Operand(MAP_TYPE)); | 3052 __ Branch(&done, ne, scratch, Operand(MAP_TYPE)); |
| 3079 | 3053 |
| 3080 // Get the prototype from the initial map. | 3054 // Get the prototype from the initial map. |
| 3081 __ ld(result, FieldMemOperand(result, Map::kPrototypeOffset)); | 3055 __ ld(result, FieldMemOperand(result, Map::kPrototypeOffset)); |
| 3082 | 3056 |
| 3083 // All done. | 3057 // All done. |
| (...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3209 __ lhu(result, mem_operand); | 3183 __ lhu(result, mem_operand); |
| 3210 break; | 3184 break; |
| 3211 case EXTERNAL_INT32_ELEMENTS: | 3185 case EXTERNAL_INT32_ELEMENTS: |
| 3212 case INT32_ELEMENTS: | 3186 case INT32_ELEMENTS: |
| 3213 __ lw(result, mem_operand); | 3187 __ lw(result, mem_operand); |
| 3214 break; | 3188 break; |
| 3215 case EXTERNAL_UINT32_ELEMENTS: | 3189 case EXTERNAL_UINT32_ELEMENTS: |
| 3216 case UINT32_ELEMENTS: | 3190 case UINT32_ELEMENTS: |
| 3217 __ lw(result, mem_operand); | 3191 __ lw(result, mem_operand); |
| 3218 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { | 3192 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { |
| 3219 DeoptimizeIf(Ugreater_equal, instr, Deoptimizer::kNegativeValue, | 3193 DeoptimizeIf(Ugreater_equal, instr, "negative value", result, |
| 3220 result, Operand(0x80000000)); | 3194 Operand(0x80000000)); |
| 3221 } | 3195 } |
| 3222 break; | 3196 break; |
| 3223 case FLOAT32_ELEMENTS: | 3197 case FLOAT32_ELEMENTS: |
| 3224 case FLOAT64_ELEMENTS: | 3198 case FLOAT64_ELEMENTS: |
| 3225 case EXTERNAL_FLOAT32_ELEMENTS: | 3199 case EXTERNAL_FLOAT32_ELEMENTS: |
| 3226 case EXTERNAL_FLOAT64_ELEMENTS: | 3200 case EXTERNAL_FLOAT64_ELEMENTS: |
| 3227 case FAST_DOUBLE_ELEMENTS: | 3201 case FAST_DOUBLE_ELEMENTS: |
| 3228 case FAST_ELEMENTS: | 3202 case FAST_ELEMENTS: |
| 3229 case FAST_SMI_ELEMENTS: | 3203 case FAST_SMI_ELEMENTS: |
| 3230 case FAST_HOLEY_DOUBLE_ELEMENTS: | 3204 case FAST_HOLEY_DOUBLE_ELEMENTS: |
| (...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3270 } else { | 3244 } else { |
| 3271 __ dsra(at, key, -shift_size); | 3245 __ dsra(at, key, -shift_size); |
| 3272 } | 3246 } |
| 3273 __ Daddu(scratch, scratch, at); | 3247 __ Daddu(scratch, scratch, at); |
| 3274 } | 3248 } |
| 3275 | 3249 |
| 3276 __ ldc1(result, MemOperand(scratch)); | 3250 __ ldc1(result, MemOperand(scratch)); |
| 3277 | 3251 |
| 3278 if (instr->hydrogen()->RequiresHoleCheck()) { | 3252 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 3279 __ lwu(scratch, MemOperand(scratch, sizeof(kHoleNanLower32))); | 3253 __ lwu(scratch, MemOperand(scratch, sizeof(kHoleNanLower32))); |
| 3280 DeoptimizeIf(eq, instr, Deopt::kHole, scratch, Operand(kHoleNanUpper32)); | 3254 DeoptimizeIf(eq, instr, "hole", scratch, Operand(kHoleNanUpper32)); |
| 3281 } | 3255 } |
| 3282 } | 3256 } |
| 3283 | 3257 |
| 3284 | 3258 |
| 3285 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { | 3259 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { |
| 3286 HLoadKeyed* hinstr = instr->hydrogen(); | 3260 HLoadKeyed* hinstr = instr->hydrogen(); |
| 3287 Register elements = ToRegister(instr->elements()); | 3261 Register elements = ToRegister(instr->elements()); |
| 3288 Register result = ToRegister(instr->result()); | 3262 Register result = ToRegister(instr->result()); |
| 3289 Register scratch = scratch0(); | 3263 Register scratch = scratch0(); |
| 3290 Register store_base = scratch; | 3264 Register store_base = scratch; |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3324 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32); | 3298 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32); |
| 3325 offset += kPointerSize / 2; | 3299 offset += kPointerSize / 2; |
| 3326 } | 3300 } |
| 3327 | 3301 |
| 3328 __ Load(result, MemOperand(store_base, offset), representation); | 3302 __ Load(result, MemOperand(store_base, offset), representation); |
| 3329 | 3303 |
| 3330 // Check for the hole value. | 3304 // Check for the hole value. |
| 3331 if (hinstr->RequiresHoleCheck()) { | 3305 if (hinstr->RequiresHoleCheck()) { |
| 3332 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { | 3306 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { |
| 3333 __ SmiTst(result, scratch); | 3307 __ SmiTst(result, scratch); |
| 3334 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, scratch, | 3308 DeoptimizeIf(ne, instr, "not a Smi", scratch, Operand(zero_reg)); |
| 3335 Operand(zero_reg)); | |
| 3336 } else { | 3309 } else { |
| 3337 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); | 3310 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); |
| 3338 DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(scratch)); | 3311 DeoptimizeIf(eq, instr, "hole", result, Operand(scratch)); |
| 3339 } | 3312 } |
| 3340 } | 3313 } |
| 3341 } | 3314 } |
| 3342 | 3315 |
| 3343 | 3316 |
| 3344 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { | 3317 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { |
| 3345 if (instr->is_typed_elements()) { | 3318 if (instr->is_typed_elements()) { |
| 3346 DoLoadKeyedExternalArray(instr); | 3319 DoLoadKeyedExternalArray(instr); |
| 3347 } else if (instr->hydrogen()->representation().IsDouble()) { | 3320 } else if (instr->hydrogen()->representation().IsDouble()) { |
| 3348 DoLoadKeyedFixedDoubleArray(instr); | 3321 DoLoadKeyedFixedDoubleArray(instr); |
| (...skipping 135 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3484 } | 3457 } |
| 3485 | 3458 |
| 3486 // Normal function. Replace undefined or null with global receiver. | 3459 // Normal function. Replace undefined or null with global receiver. |
| 3487 __ LoadRoot(scratch, Heap::kNullValueRootIndex); | 3460 __ LoadRoot(scratch, Heap::kNullValueRootIndex); |
| 3488 __ Branch(&global_object, eq, receiver, Operand(scratch)); | 3461 __ Branch(&global_object, eq, receiver, Operand(scratch)); |
| 3489 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); | 3462 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); |
| 3490 __ Branch(&global_object, eq, receiver, Operand(scratch)); | 3463 __ Branch(&global_object, eq, receiver, Operand(scratch)); |
| 3491 | 3464 |
| 3492 // Deoptimize if the receiver is not a JS object. | 3465 // Deoptimize if the receiver is not a JS object. |
| 3493 __ SmiTst(receiver, scratch); | 3466 __ SmiTst(receiver, scratch); |
| 3494 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, scratch, Operand(zero_reg)); | 3467 DeoptimizeIf(eq, instr, "Smi", scratch, Operand(zero_reg)); |
| 3495 | 3468 |
| 3496 __ GetObjectType(receiver, scratch, scratch); | 3469 __ GetObjectType(receiver, scratch, scratch); |
| 3497 DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject, scratch, | 3470 DeoptimizeIf(lt, instr, "not a JavaScript object", scratch, |
| 3498 Operand(FIRST_SPEC_OBJECT_TYPE)); | 3471 Operand(FIRST_SPEC_OBJECT_TYPE)); |
| 3499 __ Branch(&result_in_receiver); | 3472 __ Branch(&result_in_receiver); |
| 3500 | 3473 |
| 3501 __ bind(&global_object); | 3474 __ bind(&global_object); |
| 3502 __ ld(result, FieldMemOperand(function, JSFunction::kContextOffset)); | 3475 __ ld(result, FieldMemOperand(function, JSFunction::kContextOffset)); |
| 3503 __ ld(result, | 3476 __ ld(result, |
| 3504 ContextOperand(result, Context::GLOBAL_OBJECT_INDEX)); | 3477 ContextOperand(result, Context::GLOBAL_OBJECT_INDEX)); |
| 3505 __ ld(result, | 3478 __ ld(result, |
| 3506 FieldMemOperand(result, GlobalObject::kGlobalProxyOffset)); | 3479 FieldMemOperand(result, GlobalObject::kGlobalProxyOffset)); |
| 3507 | 3480 |
| (...skipping 15 matching lines...) Expand all Loading... |
| 3523 Register length = ToRegister(instr->length()); | 3496 Register length = ToRegister(instr->length()); |
| 3524 Register elements = ToRegister(instr->elements()); | 3497 Register elements = ToRegister(instr->elements()); |
| 3525 Register scratch = scratch0(); | 3498 Register scratch = scratch0(); |
| 3526 DCHECK(receiver.is(a0)); // Used for parameter count. | 3499 DCHECK(receiver.is(a0)); // Used for parameter count. |
| 3527 DCHECK(function.is(a1)); // Required by InvokeFunction. | 3500 DCHECK(function.is(a1)); // Required by InvokeFunction. |
| 3528 DCHECK(ToRegister(instr->result()).is(v0)); | 3501 DCHECK(ToRegister(instr->result()).is(v0)); |
| 3529 | 3502 |
| 3530 // Copy the arguments to this function possibly from the | 3503 // Copy the arguments to this function possibly from the |
| 3531 // adaptor frame below it. | 3504 // adaptor frame below it. |
| 3532 const uint32_t kArgumentsLimit = 1 * KB; | 3505 const uint32_t kArgumentsLimit = 1 * KB; |
| 3533 DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments, length, | 3506 DeoptimizeIf(hi, instr, "too many arguments", length, |
| 3534 Operand(kArgumentsLimit)); | 3507 Operand(kArgumentsLimit)); |
| 3535 | 3508 |
| 3536 // Push the receiver and use the register to keep the original | 3509 // Push the receiver and use the register to keep the original |
| 3537 // number of arguments. | 3510 // number of arguments. |
| 3538 __ push(receiver); | 3511 __ push(receiver); |
| 3539 __ Move(receiver, length); | 3512 __ Move(receiver, length); |
| 3540 // The arguments are at a one pointer size offset from elements. | 3513 // The arguments are at a one pointer size offset from elements. |
| 3541 __ Daddu(elements, elements, Operand(1 * kPointerSize)); | 3514 __ Daddu(elements, elements, Operand(1 * kPointerSize)); |
| 3542 | 3515 |
| 3543 // Loop through the arguments pushing them onto the execution | 3516 // Loop through the arguments pushing them onto the execution |
| (...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3649 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { | 3622 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { |
| 3650 DCHECK(instr->context() != NULL); | 3623 DCHECK(instr->context() != NULL); |
| 3651 DCHECK(ToRegister(instr->context()).is(cp)); | 3624 DCHECK(ToRegister(instr->context()).is(cp)); |
| 3652 Register input = ToRegister(instr->value()); | 3625 Register input = ToRegister(instr->value()); |
| 3653 Register result = ToRegister(instr->result()); | 3626 Register result = ToRegister(instr->result()); |
| 3654 Register scratch = scratch0(); | 3627 Register scratch = scratch0(); |
| 3655 | 3628 |
| 3656 // Deoptimize if not a heap number. | 3629 // Deoptimize if not a heap number. |
| 3657 __ ld(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); | 3630 __ ld(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); |
| 3658 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); | 3631 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); |
| 3659 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch, Operand(at)); | 3632 DeoptimizeIf(ne, instr, "not a heap number", scratch, Operand(at)); |
| 3660 | 3633 |
| 3661 Label done; | 3634 Label done; |
| 3662 Register exponent = scratch0(); | 3635 Register exponent = scratch0(); |
| 3663 scratch = no_reg; | 3636 scratch = no_reg; |
| 3664 __ lwu(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); | 3637 __ lwu(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); |
| 3665 // Check the sign of the argument. If the argument is positive, just | 3638 // Check the sign of the argument. If the argument is positive, just |
| 3666 // return it. | 3639 // return it. |
| 3667 __ Move(result, input); | 3640 __ Move(result, input); |
| 3668 __ And(at, exponent, Operand(HeapNumber::kSignMask)); | 3641 __ And(at, exponent, Operand(HeapNumber::kSignMask)); |
| 3669 __ Branch(&done, eq, at, Operand(zero_reg)); | 3642 __ Branch(&done, eq, at, Operand(zero_reg)); |
| (...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3716 | 3689 |
| 3717 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) { | 3690 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) { |
| 3718 Register input = ToRegister(instr->value()); | 3691 Register input = ToRegister(instr->value()); |
| 3719 Register result = ToRegister(instr->result()); | 3692 Register result = ToRegister(instr->result()); |
| 3720 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); | 3693 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); |
| 3721 Label done; | 3694 Label done; |
| 3722 __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg)); | 3695 __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg)); |
| 3723 __ mov(result, input); | 3696 __ mov(result, input); |
| 3724 __ dsubu(result, zero_reg, input); | 3697 __ dsubu(result, zero_reg, input); |
| 3725 // Overflow if result is still negative, i.e. 0x80000000. | 3698 // Overflow if result is still negative, i.e. 0x80000000. |
| 3726 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, result, Operand(zero_reg)); | 3699 DeoptimizeIf(lt, instr, "overflow", result, Operand(zero_reg)); |
| 3727 __ bind(&done); | 3700 __ bind(&done); |
| 3728 } | 3701 } |
| 3729 | 3702 |
| 3730 | 3703 |
| 3731 void LCodeGen::DoMathAbs(LMathAbs* instr) { | 3704 void LCodeGen::DoMathAbs(LMathAbs* instr) { |
| 3732 // Class for deferred case. | 3705 // Class for deferred case. |
| 3733 class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode { | 3706 class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode { |
| 3734 public: | 3707 public: |
| 3735 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr) | 3708 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr) |
| 3736 : LDeferredCode(codegen), instr_(instr) { } | 3709 : LDeferredCode(codegen), instr_(instr) { } |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3771 Register except_flag = ToRegister(instr->temp()); | 3744 Register except_flag = ToRegister(instr->temp()); |
| 3772 | 3745 |
| 3773 __ EmitFPUTruncate(kRoundToMinusInf, | 3746 __ EmitFPUTruncate(kRoundToMinusInf, |
| 3774 result, | 3747 result, |
| 3775 input, | 3748 input, |
| 3776 scratch1, | 3749 scratch1, |
| 3777 double_scratch0(), | 3750 double_scratch0(), |
| 3778 except_flag); | 3751 except_flag); |
| 3779 | 3752 |
| 3780 // Deopt if the operation did not succeed. | 3753 // Deopt if the operation did not succeed. |
| 3781 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag, | 3754 DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag, |
| 3782 Operand(zero_reg)); | 3755 Operand(zero_reg)); |
| 3783 | 3756 |
| 3784 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3757 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 3785 // Test for -0. | 3758 // Test for -0. |
| 3786 Label done; | 3759 Label done; |
| 3787 __ Branch(&done, ne, result, Operand(zero_reg)); | 3760 __ Branch(&done, ne, result, Operand(zero_reg)); |
| 3788 __ mfhc1(scratch1, input); // Get exponent/sign bits. | 3761 __ mfhc1(scratch1, input); // Get exponent/sign bits. |
| 3789 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); | 3762 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); |
| 3790 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1, | 3763 DeoptimizeIf(ne, instr, "minus zero", scratch1, Operand(zero_reg)); |
| 3791 Operand(zero_reg)); | |
| 3792 __ bind(&done); | 3764 __ bind(&done); |
| 3793 } | 3765 } |
| 3794 } | 3766 } |
| 3795 | 3767 |
| 3796 | 3768 |
| 3797 void LCodeGen::DoMathRound(LMathRound* instr) { | 3769 void LCodeGen::DoMathRound(LMathRound* instr) { |
| 3798 DoubleRegister input = ToDoubleRegister(instr->value()); | 3770 DoubleRegister input = ToDoubleRegister(instr->value()); |
| 3799 Register result = ToRegister(instr->result()); | 3771 Register result = ToRegister(instr->result()); |
| 3800 DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp()); | 3772 DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp()); |
| 3801 Register scratch = scratch0(); | 3773 Register scratch = scratch0(); |
| (...skipping 12 matching lines...) Expand all Loading... |
| 3814 __ mov(result, zero_reg); | 3786 __ mov(result, zero_reg); |
| 3815 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3787 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 3816 __ Branch(&check_sign_on_zero); | 3788 __ Branch(&check_sign_on_zero); |
| 3817 } else { | 3789 } else { |
| 3818 __ Branch(&done); | 3790 __ Branch(&done); |
| 3819 } | 3791 } |
| 3820 __ bind(&skip1); | 3792 __ bind(&skip1); |
| 3821 | 3793 |
| 3822 // The following conversion will not work with numbers | 3794 // The following conversion will not work with numbers |
| 3823 // outside of ]-2^32, 2^32[. | 3795 // outside of ]-2^32, 2^32[. |
| 3824 DeoptimizeIf(ge, instr, Deoptimizer::kOverflow, scratch, | 3796 DeoptimizeIf(ge, instr, "overflow", scratch, |
| 3825 Operand(HeapNumber::kExponentBias + 32)); | 3797 Operand(HeapNumber::kExponentBias + 32)); |
| 3826 | 3798 |
| 3827 // Save the original sign for later comparison. | 3799 // Save the original sign for later comparison. |
| 3828 __ And(scratch, result, Operand(HeapNumber::kSignMask)); | 3800 __ And(scratch, result, Operand(HeapNumber::kSignMask)); |
| 3829 | 3801 |
| 3830 __ Move(double_scratch0(), 0.5); | 3802 __ Move(double_scratch0(), 0.5); |
| 3831 __ add_d(double_scratch0(), input, double_scratch0()); | 3803 __ add_d(double_scratch0(), input, double_scratch0()); |
| 3832 | 3804 |
| 3833 // Check sign of the result: if the sign changed, the input | 3805 // Check sign of the result: if the sign changed, the input |
| 3834 // value was in ]0.5, 0[ and the result should be -0. | 3806 // value was in ]0.5, 0[ and the result should be -0. |
| 3835 __ mfhc1(result, double_scratch0()); | 3807 __ mfhc1(result, double_scratch0()); |
| 3836 // mfhc1 sign-extends, clear the upper bits. | 3808 // mfhc1 sign-extends, clear the upper bits. |
| 3837 __ dsll32(result, result, 0); | 3809 __ dsll32(result, result, 0); |
| 3838 __ dsrl32(result, result, 0); | 3810 __ dsrl32(result, result, 0); |
| 3839 __ Xor(result, result, Operand(scratch)); | 3811 __ Xor(result, result, Operand(scratch)); |
| 3840 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3812 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 3841 // ARM uses 'mi' here, which is 'lt' | 3813 // ARM uses 'mi' here, which is 'lt' |
| 3842 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, result, Operand(zero_reg)); | 3814 DeoptimizeIf(lt, instr, "minus zero", result, Operand(zero_reg)); |
| 3843 } else { | 3815 } else { |
| 3844 Label skip2; | 3816 Label skip2; |
| 3845 // ARM uses 'mi' here, which is 'lt' | 3817 // ARM uses 'mi' here, which is 'lt' |
| 3846 // Negating it results in 'ge' | 3818 // Negating it results in 'ge' |
| 3847 __ Branch(&skip2, ge, result, Operand(zero_reg)); | 3819 __ Branch(&skip2, ge, result, Operand(zero_reg)); |
| 3848 __ mov(result, zero_reg); | 3820 __ mov(result, zero_reg); |
| 3849 __ Branch(&done); | 3821 __ Branch(&done); |
| 3850 __ bind(&skip2); | 3822 __ bind(&skip2); |
| 3851 } | 3823 } |
| 3852 | 3824 |
| 3853 Register except_flag = scratch; | 3825 Register except_flag = scratch; |
| 3854 __ EmitFPUTruncate(kRoundToMinusInf, | 3826 __ EmitFPUTruncate(kRoundToMinusInf, |
| 3855 result, | 3827 result, |
| 3856 double_scratch0(), | 3828 double_scratch0(), |
| 3857 at, | 3829 at, |
| 3858 double_scratch1, | 3830 double_scratch1, |
| 3859 except_flag); | 3831 except_flag); |
| 3860 | 3832 |
| 3861 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag, | 3833 DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag, |
| 3862 Operand(zero_reg)); | 3834 Operand(zero_reg)); |
| 3863 | 3835 |
| 3864 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3836 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 3865 // Test for -0. | 3837 // Test for -0. |
| 3866 __ Branch(&done, ne, result, Operand(zero_reg)); | 3838 __ Branch(&done, ne, result, Operand(zero_reg)); |
| 3867 __ bind(&check_sign_on_zero); | 3839 __ bind(&check_sign_on_zero); |
| 3868 __ mfhc1(scratch, input); // Get exponent/sign bits. | 3840 __ mfhc1(scratch, input); // Get exponent/sign bits. |
| 3869 __ And(scratch, scratch, Operand(HeapNumber::kSignMask)); | 3841 __ And(scratch, scratch, Operand(HeapNumber::kSignMask)); |
| 3870 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch, | 3842 DeoptimizeIf(ne, instr, "minus zero", scratch, Operand(zero_reg)); |
| 3871 Operand(zero_reg)); | |
| 3872 } | 3843 } |
| 3873 __ bind(&done); | 3844 __ bind(&done); |
| 3874 } | 3845 } |
| 3875 | 3846 |
| 3876 | 3847 |
| 3877 void LCodeGen::DoMathFround(LMathFround* instr) { | 3848 void LCodeGen::DoMathFround(LMathFround* instr) { |
| 3878 DoubleRegister input = ToDoubleRegister(instr->value()); | 3849 DoubleRegister input = ToDoubleRegister(instr->value()); |
| 3879 DoubleRegister result = ToDoubleRegister(instr->result()); | 3850 DoubleRegister result = ToDoubleRegister(instr->result()); |
| 3880 __ cvt_s_d(result, input); | 3851 __ cvt_s_d(result, input); |
| 3881 __ cvt_d_s(result, result); | 3852 __ cvt_d_s(result, result); |
| (...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3927 | 3898 |
| 3928 if (exponent_type.IsSmi()) { | 3899 if (exponent_type.IsSmi()) { |
| 3929 MathPowStub stub(isolate(), MathPowStub::TAGGED); | 3900 MathPowStub stub(isolate(), MathPowStub::TAGGED); |
| 3930 __ CallStub(&stub); | 3901 __ CallStub(&stub); |
| 3931 } else if (exponent_type.IsTagged()) { | 3902 } else if (exponent_type.IsTagged()) { |
| 3932 Label no_deopt; | 3903 Label no_deopt; |
| 3933 __ JumpIfSmi(tagged_exponent, &no_deopt); | 3904 __ JumpIfSmi(tagged_exponent, &no_deopt); |
| 3934 DCHECK(!a7.is(tagged_exponent)); | 3905 DCHECK(!a7.is(tagged_exponent)); |
| 3935 __ lw(a7, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset)); | 3906 __ lw(a7, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset)); |
| 3936 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); | 3907 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); |
| 3937 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, a7, Operand(at)); | 3908 DeoptimizeIf(ne, instr, "not a heap number", a7, Operand(at)); |
| 3938 __ bind(&no_deopt); | 3909 __ bind(&no_deopt); |
| 3939 MathPowStub stub(isolate(), MathPowStub::TAGGED); | 3910 MathPowStub stub(isolate(), MathPowStub::TAGGED); |
| 3940 __ CallStub(&stub); | 3911 __ CallStub(&stub); |
| 3941 } else if (exponent_type.IsInteger32()) { | 3912 } else if (exponent_type.IsInteger32()) { |
| 3942 MathPowStub stub(isolate(), MathPowStub::INTEGER); | 3913 MathPowStub stub(isolate(), MathPowStub::INTEGER); |
| 3943 __ CallStub(&stub); | 3914 __ CallStub(&stub); |
| 3944 } else { | 3915 } else { |
| 3945 DCHECK(exponent_type.IsDouble()); | 3916 DCHECK(exponent_type.IsDouble()); |
| 3946 MathPowStub stub(isolate(), MathPowStub::DOUBLE); | 3917 MathPowStub stub(isolate(), MathPowStub::DOUBLE); |
| 3947 __ CallStub(&stub); | 3918 __ CallStub(&stub); |
| (...skipping 380 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4328 } else { | 4299 } else { |
| 4329 reg = ToRegister(instr->index()); | 4300 reg = ToRegister(instr->index()); |
| 4330 operand = ToOperand(instr->length()); | 4301 operand = ToOperand(instr->length()); |
| 4331 } | 4302 } |
| 4332 if (FLAG_debug_code && instr->hydrogen()->skip_check()) { | 4303 if (FLAG_debug_code && instr->hydrogen()->skip_check()) { |
| 4333 Label done; | 4304 Label done; |
| 4334 __ Branch(&done, NegateCondition(cc), reg, operand); | 4305 __ Branch(&done, NegateCondition(cc), reg, operand); |
| 4335 __ stop("eliminated bounds check failed"); | 4306 __ stop("eliminated bounds check failed"); |
| 4336 __ bind(&done); | 4307 __ bind(&done); |
| 4337 } else { | 4308 } else { |
| 4338 DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds, reg, operand); | 4309 DeoptimizeIf(cc, instr, "out of bounds", reg, operand); |
| 4339 } | 4310 } |
| 4340 } | 4311 } |
| 4341 | 4312 |
| 4342 | 4313 |
| 4343 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { | 4314 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { |
| 4344 Register external_pointer = ToRegister(instr->elements()); | 4315 Register external_pointer = ToRegister(instr->elements()); |
| 4345 Register key = no_reg; | 4316 Register key = no_reg; |
| 4346 ElementsKind elements_kind = instr->elements_kind(); | 4317 ElementsKind elements_kind = instr->elements_kind(); |
| 4347 bool key_is_constant = instr->key()->IsConstantOperand(); | 4318 bool key_is_constant = instr->key()->IsConstantOperand(); |
| 4348 int constant_key = 0; | 4319 int constant_key = 0; |
| (...skipping 270 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4619 __ bind(¬_applicable); | 4590 __ bind(¬_applicable); |
| 4620 } | 4591 } |
| 4621 | 4592 |
| 4622 | 4593 |
| 4623 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { | 4594 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { |
| 4624 Register object = ToRegister(instr->object()); | 4595 Register object = ToRegister(instr->object()); |
| 4625 Register temp = ToRegister(instr->temp()); | 4596 Register temp = ToRegister(instr->temp()); |
| 4626 Label no_memento_found; | 4597 Label no_memento_found; |
| 4627 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found, | 4598 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found, |
| 4628 ne, &no_memento_found); | 4599 ne, &no_memento_found); |
| 4629 DeoptimizeIf(al, instr, Deoptimizer::kMementoFound); | 4600 DeoptimizeIf(al, instr, "memento found"); |
| 4630 __ bind(&no_memento_found); | 4601 __ bind(&no_memento_found); |
| 4631 } | 4602 } |
| 4632 | 4603 |
| 4633 | 4604 |
| 4634 void LCodeGen::DoStringAdd(LStringAdd* instr) { | 4605 void LCodeGen::DoStringAdd(LStringAdd* instr) { |
| 4635 DCHECK(ToRegister(instr->context()).is(cp)); | 4606 DCHECK(ToRegister(instr->context()).is(cp)); |
| 4636 DCHECK(ToRegister(instr->left()).is(a1)); | 4607 DCHECK(ToRegister(instr->left()).is(a1)); |
| 4637 DCHECK(ToRegister(instr->right()).is(a0)); | 4608 DCHECK(ToRegister(instr->right()).is(a0)); |
| 4638 StringAddStub stub(isolate(), | 4609 StringAddStub stub(isolate(), |
| 4639 instr->hydrogen()->flags(), | 4610 instr->hydrogen()->flags(), |
| (...skipping 282 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4922 } | 4893 } |
| 4923 | 4894 |
| 4924 | 4895 |
| 4925 void LCodeGen::DoSmiTag(LSmiTag* instr) { | 4896 void LCodeGen::DoSmiTag(LSmiTag* instr) { |
| 4926 HChange* hchange = instr->hydrogen(); | 4897 HChange* hchange = instr->hydrogen(); |
| 4927 Register input = ToRegister(instr->value()); | 4898 Register input = ToRegister(instr->value()); |
| 4928 Register output = ToRegister(instr->result()); | 4899 Register output = ToRegister(instr->result()); |
| 4929 if (hchange->CheckFlag(HValue::kCanOverflow) && | 4900 if (hchange->CheckFlag(HValue::kCanOverflow) && |
| 4930 hchange->value()->CheckFlag(HValue::kUint32)) { | 4901 hchange->value()->CheckFlag(HValue::kUint32)) { |
| 4931 __ And(at, input, Operand(0x80000000)); | 4902 __ And(at, input, Operand(0x80000000)); |
| 4932 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, at, Operand(zero_reg)); | 4903 DeoptimizeIf(ne, instr, "overflow", at, Operand(zero_reg)); |
| 4933 } | 4904 } |
| 4934 if (hchange->CheckFlag(HValue::kCanOverflow) && | 4905 if (hchange->CheckFlag(HValue::kCanOverflow) && |
| 4935 !hchange->value()->CheckFlag(HValue::kUint32)) { | 4906 !hchange->value()->CheckFlag(HValue::kUint32)) { |
| 4936 __ SmiTagCheckOverflow(output, input, at); | 4907 __ SmiTagCheckOverflow(output, input, at); |
| 4937 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, at, Operand(zero_reg)); | 4908 DeoptimizeIf(lt, instr, "overflow", at, Operand(zero_reg)); |
| 4938 } else { | 4909 } else { |
| 4939 __ SmiTag(output, input); | 4910 __ SmiTag(output, input); |
| 4940 } | 4911 } |
| 4941 } | 4912 } |
| 4942 | 4913 |
| 4943 | 4914 |
| 4944 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { | 4915 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { |
| 4945 Register scratch = scratch0(); | 4916 Register scratch = scratch0(); |
| 4946 Register input = ToRegister(instr->value()); | 4917 Register input = ToRegister(instr->value()); |
| 4947 Register result = ToRegister(instr->result()); | 4918 Register result = ToRegister(instr->result()); |
| 4948 if (instr->needs_check()) { | 4919 if (instr->needs_check()) { |
| 4949 STATIC_ASSERT(kHeapObjectTag == 1); | 4920 STATIC_ASSERT(kHeapObjectTag == 1); |
| 4950 // If the input is a HeapObject, value of scratch won't be zero. | 4921 // If the input is a HeapObject, value of scratch won't be zero. |
| 4951 __ And(scratch, input, Operand(kHeapObjectTag)); | 4922 __ And(scratch, input, Operand(kHeapObjectTag)); |
| 4952 __ SmiUntag(result, input); | 4923 __ SmiUntag(result, input); |
| 4953 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, scratch, Operand(zero_reg)); | 4924 DeoptimizeIf(ne, instr, "not a Smi", scratch, Operand(zero_reg)); |
| 4954 } else { | 4925 } else { |
| 4955 __ SmiUntag(result, input); | 4926 __ SmiUntag(result, input); |
| 4956 } | 4927 } |
| 4957 } | 4928 } |
| 4958 | 4929 |
| 4959 | 4930 |
| 4960 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg, | 4931 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg, |
| 4961 DoubleRegister result_reg, | 4932 DoubleRegister result_reg, |
| 4962 NumberUntagDMode mode) { | 4933 NumberUntagDMode mode) { |
| 4963 bool can_convert_undefined_to_nan = | 4934 bool can_convert_undefined_to_nan = |
| 4964 instr->hydrogen()->can_convert_undefined_to_nan(); | 4935 instr->hydrogen()->can_convert_undefined_to_nan(); |
| 4965 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero(); | 4936 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero(); |
| 4966 | 4937 |
| 4967 Register scratch = scratch0(); | 4938 Register scratch = scratch0(); |
| 4968 Label convert, load_smi, done; | 4939 Label convert, load_smi, done; |
| 4969 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { | 4940 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { |
| 4970 // Smi check. | 4941 // Smi check. |
| 4971 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); | 4942 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); |
| 4972 // Heap number map check. | 4943 // Heap number map check. |
| 4973 __ ld(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); | 4944 __ ld(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); |
| 4974 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); | 4945 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); |
| 4975 if (can_convert_undefined_to_nan) { | 4946 if (can_convert_undefined_to_nan) { |
| 4976 __ Branch(&convert, ne, scratch, Operand(at)); | 4947 __ Branch(&convert, ne, scratch, Operand(at)); |
| 4977 } else { | 4948 } else { |
| 4978 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch, | 4949 DeoptimizeIf(ne, instr, "not a heap number", scratch, Operand(at)); |
| 4979 Operand(at)); | |
| 4980 } | 4950 } |
| 4981 // Load heap number. | 4951 // Load heap number. |
| 4982 __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); | 4952 __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); |
| 4983 if (deoptimize_on_minus_zero) { | 4953 if (deoptimize_on_minus_zero) { |
| 4984 __ mfc1(at, result_reg); | 4954 __ mfc1(at, result_reg); |
| 4985 __ Branch(&done, ne, at, Operand(zero_reg)); | 4955 __ Branch(&done, ne, at, Operand(zero_reg)); |
| 4986 __ mfhc1(scratch, result_reg); // Get exponent/sign bits. | 4956 __ mfhc1(scratch, result_reg); // Get exponent/sign bits. |
| 4987 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, scratch, | 4957 DeoptimizeIf(eq, instr, "minus zero", scratch, |
| 4988 Operand(HeapNumber::kSignMask)); | 4958 Operand(HeapNumber::kSignMask)); |
| 4989 } | 4959 } |
| 4990 __ Branch(&done); | 4960 __ Branch(&done); |
| 4991 if (can_convert_undefined_to_nan) { | 4961 if (can_convert_undefined_to_nan) { |
| 4992 __ bind(&convert); | 4962 __ bind(&convert); |
| 4993 // Convert undefined (and hole) to NaN. | 4963 // Convert undefined (and hole) to NaN. |
| 4994 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); | 4964 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); |
| 4995 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined, input_reg, | 4965 DeoptimizeIf(ne, instr, "not a heap number/undefined", input_reg, |
| 4996 Operand(at)); | 4966 Operand(at)); |
| 4997 __ LoadRoot(scratch, Heap::kNanValueRootIndex); | 4967 __ LoadRoot(scratch, Heap::kNanValueRootIndex); |
| 4998 __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset)); | 4968 __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset)); |
| 4999 __ Branch(&done); | 4969 __ Branch(&done); |
| 5000 } | 4970 } |
| 5001 } else { | 4971 } else { |
| 5002 __ SmiUntag(scratch, input_reg); | 4972 __ SmiUntag(scratch, input_reg); |
| 5003 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); | 4973 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); |
| 5004 } | 4974 } |
| 5005 // Smi to double register conversion | 4975 // Smi to double register conversion |
| (...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5050 __ mov(input_reg, zero_reg); // In delay slot. | 5020 __ mov(input_reg, zero_reg); // In delay slot. |
| 5051 | 5021 |
| 5052 __ bind(&check_bools); | 5022 __ bind(&check_bools); |
| 5053 __ LoadRoot(at, Heap::kTrueValueRootIndex); | 5023 __ LoadRoot(at, Heap::kTrueValueRootIndex); |
| 5054 __ Branch(&check_false, ne, scratch2, Operand(at)); | 5024 __ Branch(&check_false, ne, scratch2, Operand(at)); |
| 5055 __ Branch(USE_DELAY_SLOT, &done); | 5025 __ Branch(USE_DELAY_SLOT, &done); |
| 5056 __ li(input_reg, Operand(1)); // In delay slot. | 5026 __ li(input_reg, Operand(1)); // In delay slot. |
| 5057 | 5027 |
| 5058 __ bind(&check_false); | 5028 __ bind(&check_false); |
| 5059 __ LoadRoot(at, Heap::kFalseValueRootIndex); | 5029 __ LoadRoot(at, Heap::kFalseValueRootIndex); |
| 5060 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedTrueFalse, | 5030 DeoptimizeIf(ne, instr, "not a heap number/undefined/true/false", scratch2, |
| 5061 scratch2, Operand(at)); | 5031 Operand(at)); |
| 5062 __ Branch(USE_DELAY_SLOT, &done); | 5032 __ Branch(USE_DELAY_SLOT, &done); |
| 5063 __ mov(input_reg, zero_reg); // In delay slot. | 5033 __ mov(input_reg, zero_reg); // In delay slot. |
| 5064 } else { | 5034 } else { |
| 5065 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch1, | 5035 DeoptimizeIf(ne, instr, "not a heap number", scratch1, Operand(at)); |
| 5066 Operand(at)); | |
| 5067 | 5036 |
| 5068 // Load the double value. | 5037 // Load the double value. |
| 5069 __ ldc1(double_scratch, | 5038 __ ldc1(double_scratch, |
| 5070 FieldMemOperand(input_reg, HeapNumber::kValueOffset)); | 5039 FieldMemOperand(input_reg, HeapNumber::kValueOffset)); |
| 5071 | 5040 |
| 5072 Register except_flag = scratch2; | 5041 Register except_flag = scratch2; |
| 5073 __ EmitFPUTruncate(kRoundToZero, | 5042 __ EmitFPUTruncate(kRoundToZero, |
| 5074 input_reg, | 5043 input_reg, |
| 5075 double_scratch, | 5044 double_scratch, |
| 5076 scratch1, | 5045 scratch1, |
| 5077 double_scratch2, | 5046 double_scratch2, |
| 5078 except_flag, | 5047 except_flag, |
| 5079 kCheckForInexactConversion); | 5048 kCheckForInexactConversion); |
| 5080 | 5049 |
| 5081 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag, | 5050 DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag, |
| 5082 Operand(zero_reg)); | 5051 Operand(zero_reg)); |
| 5083 | 5052 |
| 5084 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 5053 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 5085 __ Branch(&done, ne, input_reg, Operand(zero_reg)); | 5054 __ Branch(&done, ne, input_reg, Operand(zero_reg)); |
| 5086 | 5055 |
| 5087 __ mfhc1(scratch1, double_scratch); // Get exponent/sign bits. | 5056 __ mfhc1(scratch1, double_scratch); // Get exponent/sign bits. |
| 5088 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); | 5057 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); |
| 5089 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1, | 5058 DeoptimizeIf(ne, instr, "minus zero", scratch1, Operand(zero_reg)); |
| 5090 Operand(zero_reg)); | |
| 5091 } | 5059 } |
| 5092 } | 5060 } |
| 5093 __ bind(&done); | 5061 __ bind(&done); |
| 5094 } | 5062 } |
| 5095 | 5063 |
| 5096 | 5064 |
| 5097 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { | 5065 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { |
| 5098 class DeferredTaggedToI FINAL : public LDeferredCode { | 5066 class DeferredTaggedToI FINAL : public LDeferredCode { |
| 5099 public: | 5067 public: |
| 5100 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) | 5068 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) |
| (...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5156 | 5124 |
| 5157 __ EmitFPUTruncate(kRoundToMinusInf, | 5125 __ EmitFPUTruncate(kRoundToMinusInf, |
| 5158 result_reg, | 5126 result_reg, |
| 5159 double_input, | 5127 double_input, |
| 5160 scratch1, | 5128 scratch1, |
| 5161 double_scratch0(), | 5129 double_scratch0(), |
| 5162 except_flag, | 5130 except_flag, |
| 5163 kCheckForInexactConversion); | 5131 kCheckForInexactConversion); |
| 5164 | 5132 |
| 5165 // Deopt if the operation did not succeed (except_flag != 0). | 5133 // Deopt if the operation did not succeed (except_flag != 0). |
| 5166 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag, | 5134 DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag, |
| 5167 Operand(zero_reg)); | 5135 Operand(zero_reg)); |
| 5168 | 5136 |
| 5169 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 5137 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 5170 Label done; | 5138 Label done; |
| 5171 __ Branch(&done, ne, result_reg, Operand(zero_reg)); | 5139 __ Branch(&done, ne, result_reg, Operand(zero_reg)); |
| 5172 __ mfhc1(scratch1, double_input); // Get exponent/sign bits. | 5140 __ mfhc1(scratch1, double_input); // Get exponent/sign bits. |
| 5173 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); | 5141 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); |
| 5174 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1, | 5142 DeoptimizeIf(ne, instr, "minus zero", scratch1, Operand(zero_reg)); |
| 5175 Operand(zero_reg)); | |
| 5176 __ bind(&done); | 5143 __ bind(&done); |
| 5177 } | 5144 } |
| 5178 } | 5145 } |
| 5179 } | 5146 } |
| 5180 | 5147 |
| 5181 | 5148 |
| 5182 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { | 5149 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { |
| 5183 Register result_reg = ToRegister(instr->result()); | 5150 Register result_reg = ToRegister(instr->result()); |
| 5184 Register scratch1 = LCodeGen::scratch0(); | 5151 Register scratch1 = LCodeGen::scratch0(); |
| 5185 DoubleRegister double_input = ToDoubleRegister(instr->value()); | 5152 DoubleRegister double_input = ToDoubleRegister(instr->value()); |
| 5186 | 5153 |
| 5187 if (instr->truncating()) { | 5154 if (instr->truncating()) { |
| 5188 __ TruncateDoubleToI(result_reg, double_input); | 5155 __ TruncateDoubleToI(result_reg, double_input); |
| 5189 } else { | 5156 } else { |
| 5190 Register except_flag = LCodeGen::scratch1(); | 5157 Register except_flag = LCodeGen::scratch1(); |
| 5191 | 5158 |
| 5192 __ EmitFPUTruncate(kRoundToMinusInf, | 5159 __ EmitFPUTruncate(kRoundToMinusInf, |
| 5193 result_reg, | 5160 result_reg, |
| 5194 double_input, | 5161 double_input, |
| 5195 scratch1, | 5162 scratch1, |
| 5196 double_scratch0(), | 5163 double_scratch0(), |
| 5197 except_flag, | 5164 except_flag, |
| 5198 kCheckForInexactConversion); | 5165 kCheckForInexactConversion); |
| 5199 | 5166 |
| 5200 // Deopt if the operation did not succeed (except_flag != 0). | 5167 // Deopt if the operation did not succeed (except_flag != 0). |
| 5201 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag, | 5168 DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag, |
| 5202 Operand(zero_reg)); | 5169 Operand(zero_reg)); |
| 5203 | 5170 |
| 5204 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 5171 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 5205 Label done; | 5172 Label done; |
| 5206 __ Branch(&done, ne, result_reg, Operand(zero_reg)); | 5173 __ Branch(&done, ne, result_reg, Operand(zero_reg)); |
| 5207 __ mfhc1(scratch1, double_input); // Get exponent/sign bits. | 5174 __ mfhc1(scratch1, double_input); // Get exponent/sign bits. |
| 5208 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); | 5175 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); |
| 5209 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1, | 5176 DeoptimizeIf(ne, instr, "minus zero", scratch1, Operand(zero_reg)); |
| 5210 Operand(zero_reg)); | |
| 5211 __ bind(&done); | 5177 __ bind(&done); |
| 5212 } | 5178 } |
| 5213 } | 5179 } |
| 5214 __ SmiTag(result_reg, result_reg); | 5180 __ SmiTag(result_reg, result_reg); |
| 5215 } | 5181 } |
| 5216 | 5182 |
| 5217 | 5183 |
| 5218 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { | 5184 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { |
| 5219 LOperand* input = instr->value(); | 5185 LOperand* input = instr->value(); |
| 5220 __ SmiTst(ToRegister(input), at); | 5186 __ SmiTst(ToRegister(input), at); |
| 5221 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, at, Operand(zero_reg)); | 5187 DeoptimizeIf(ne, instr, "not a Smi", at, Operand(zero_reg)); |
| 5222 } | 5188 } |
| 5223 | 5189 |
| 5224 | 5190 |
| 5225 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { | 5191 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { |
| 5226 if (!instr->hydrogen()->value()->type().IsHeapObject()) { | 5192 if (!instr->hydrogen()->value()->type().IsHeapObject()) { |
| 5227 LOperand* input = instr->value(); | 5193 LOperand* input = instr->value(); |
| 5228 __ SmiTst(ToRegister(input), at); | 5194 __ SmiTst(ToRegister(input), at); |
| 5229 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg)); | 5195 DeoptimizeIf(eq, instr, "Smi", at, Operand(zero_reg)); |
| 5230 } | 5196 } |
| 5231 } | 5197 } |
| 5232 | 5198 |
| 5233 | 5199 |
| 5234 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { | 5200 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { |
| 5235 Register input = ToRegister(instr->value()); | 5201 Register input = ToRegister(instr->value()); |
| 5236 Register scratch = scratch0(); | 5202 Register scratch = scratch0(); |
| 5237 | 5203 |
| 5238 __ GetObjectType(input, scratch, scratch); | 5204 __ GetObjectType(input, scratch, scratch); |
| 5239 | 5205 |
| 5240 if (instr->hydrogen()->is_interval_check()) { | 5206 if (instr->hydrogen()->is_interval_check()) { |
| 5241 InstanceType first; | 5207 InstanceType first; |
| 5242 InstanceType last; | 5208 InstanceType last; |
| 5243 instr->hydrogen()->GetCheckInterval(&first, &last); | 5209 instr->hydrogen()->GetCheckInterval(&first, &last); |
| 5244 | 5210 |
| 5245 // If there is only one type in the interval check for equality. | 5211 // If there is only one type in the interval check for equality. |
| 5246 if (first == last) { | 5212 if (first == last) { |
| 5247 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType, scratch, | 5213 DeoptimizeIf(ne, instr, "wrong instance type", scratch, Operand(first)); |
| 5248 Operand(first)); | |
| 5249 } else { | 5214 } else { |
| 5250 DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType, scratch, | 5215 DeoptimizeIf(lo, instr, "wrong instance type", scratch, Operand(first)); |
| 5251 Operand(first)); | |
| 5252 // Omit check for the last type. | 5216 // Omit check for the last type. |
| 5253 if (last != LAST_TYPE) { | 5217 if (last != LAST_TYPE) { |
| 5254 DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType, scratch, | 5218 DeoptimizeIf(hi, instr, "wrong instance type", scratch, Operand(last)); |
| 5255 Operand(last)); | |
| 5256 } | 5219 } |
| 5257 } | 5220 } |
| 5258 } else { | 5221 } else { |
| 5259 uint8_t mask; | 5222 uint8_t mask; |
| 5260 uint8_t tag; | 5223 uint8_t tag; |
| 5261 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); | 5224 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); |
| 5262 | 5225 |
| 5263 if (base::bits::IsPowerOfTwo32(mask)) { | 5226 if (base::bits::IsPowerOfTwo32(mask)) { |
| 5264 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag)); | 5227 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag)); |
| 5265 __ And(at, scratch, mask); | 5228 __ And(at, scratch, mask); |
| 5266 DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType, | 5229 DeoptimizeIf(tag == 0 ? ne : eq, instr, "wrong instance type", at, |
| 5267 at, Operand(zero_reg)); | 5230 Operand(zero_reg)); |
| 5268 } else { | 5231 } else { |
| 5269 __ And(scratch, scratch, Operand(mask)); | 5232 __ And(scratch, scratch, Operand(mask)); |
| 5270 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType, scratch, | 5233 DeoptimizeIf(ne, instr, "wrong instance type", scratch, Operand(tag)); |
| 5271 Operand(tag)); | |
| 5272 } | 5234 } |
| 5273 } | 5235 } |
| 5274 } | 5236 } |
| 5275 | 5237 |
| 5276 | 5238 |
| 5277 void LCodeGen::DoCheckValue(LCheckValue* instr) { | 5239 void LCodeGen::DoCheckValue(LCheckValue* instr) { |
| 5278 Register reg = ToRegister(instr->value()); | 5240 Register reg = ToRegister(instr->value()); |
| 5279 Handle<HeapObject> object = instr->hydrogen()->object().handle(); | 5241 Handle<HeapObject> object = instr->hydrogen()->object().handle(); |
| 5280 AllowDeferredHandleDereference smi_check; | 5242 AllowDeferredHandleDereference smi_check; |
| 5281 if (isolate()->heap()->InNewSpace(*object)) { | 5243 if (isolate()->heap()->InNewSpace(*object)) { |
| 5282 Register reg = ToRegister(instr->value()); | 5244 Register reg = ToRegister(instr->value()); |
| 5283 Handle<Cell> cell = isolate()->factory()->NewCell(object); | 5245 Handle<Cell> cell = isolate()->factory()->NewCell(object); |
| 5284 __ li(at, Operand(Handle<Object>(cell))); | 5246 __ li(at, Operand(Handle<Object>(cell))); |
| 5285 __ ld(at, FieldMemOperand(at, Cell::kValueOffset)); | 5247 __ ld(at, FieldMemOperand(at, Cell::kValueOffset)); |
| 5286 DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch, reg, Operand(at)); | 5248 DeoptimizeIf(ne, instr, "value mismatch", reg, Operand(at)); |
| 5287 } else { | 5249 } else { |
| 5288 DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch, reg, Operand(object)); | 5250 DeoptimizeIf(ne, instr, "value mismatch", reg, Operand(object)); |
| 5289 } | 5251 } |
| 5290 } | 5252 } |
| 5291 | 5253 |
| 5292 | 5254 |
| 5293 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { | 5255 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { |
| 5294 { | 5256 { |
| 5295 PushSafepointRegistersScope scope(this); | 5257 PushSafepointRegistersScope scope(this); |
| 5296 __ push(object); | 5258 __ push(object); |
| 5297 __ mov(cp, zero_reg); | 5259 __ mov(cp, zero_reg); |
| 5298 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); | 5260 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); |
| 5299 RecordSafepointWithRegisters( | 5261 RecordSafepointWithRegisters( |
| 5300 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); | 5262 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); |
| 5301 __ StoreToSafepointRegisterSlot(v0, scratch0()); | 5263 __ StoreToSafepointRegisterSlot(v0, scratch0()); |
| 5302 } | 5264 } |
| 5303 __ SmiTst(scratch0(), at); | 5265 __ SmiTst(scratch0(), at); |
| 5304 DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed, at, | 5266 DeoptimizeIf(eq, instr, "instance migration failed", at, Operand(zero_reg)); |
| 5305 Operand(zero_reg)); | |
| 5306 } | 5267 } |
| 5307 | 5268 |
| 5308 | 5269 |
| 5309 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { | 5270 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { |
| 5310 class DeferredCheckMaps FINAL : public LDeferredCode { | 5271 class DeferredCheckMaps FINAL : public LDeferredCode { |
| 5311 public: | 5272 public: |
| 5312 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) | 5273 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) |
| 5313 : LDeferredCode(codegen), instr_(instr), object_(object) { | 5274 : LDeferredCode(codegen), instr_(instr), object_(object) { |
| 5314 SetExit(check_maps()); | 5275 SetExit(check_maps()); |
| 5315 } | 5276 } |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5349 Label success; | 5310 Label success; |
| 5350 for (int i = 0; i < maps->size() - 1; i++) { | 5311 for (int i = 0; i < maps->size() - 1; i++) { |
| 5351 Handle<Map> map = maps->at(i).handle(); | 5312 Handle<Map> map = maps->at(i).handle(); |
| 5352 __ CompareMapAndBranch(map_reg, map, &success, eq, &success); | 5313 __ CompareMapAndBranch(map_reg, map, &success, eq, &success); |
| 5353 } | 5314 } |
| 5354 Handle<Map> map = maps->at(maps->size() - 1).handle(); | 5315 Handle<Map> map = maps->at(maps->size() - 1).handle(); |
| 5355 // Do the CompareMap() directly within the Branch() and DeoptimizeIf(). | 5316 // Do the CompareMap() directly within the Branch() and DeoptimizeIf(). |
| 5356 if (instr->hydrogen()->HasMigrationTarget()) { | 5317 if (instr->hydrogen()->HasMigrationTarget()) { |
| 5357 __ Branch(deferred->entry(), ne, map_reg, Operand(map)); | 5318 __ Branch(deferred->entry(), ne, map_reg, Operand(map)); |
| 5358 } else { | 5319 } else { |
| 5359 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, map_reg, Operand(map)); | 5320 DeoptimizeIf(ne, instr, "wrong map", map_reg, Operand(map)); |
| 5360 } | 5321 } |
| 5361 | 5322 |
| 5362 __ bind(&success); | 5323 __ bind(&success); |
| 5363 } | 5324 } |
| 5364 | 5325 |
| 5365 | 5326 |
| 5366 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { | 5327 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { |
| 5367 DoubleRegister value_reg = ToDoubleRegister(instr->unclamped()); | 5328 DoubleRegister value_reg = ToDoubleRegister(instr->unclamped()); |
| 5368 Register result_reg = ToRegister(instr->result()); | 5329 Register result_reg = ToRegister(instr->result()); |
| 5369 DoubleRegister temp_reg = ToDoubleRegister(instr->temp()); | 5330 DoubleRegister temp_reg = ToDoubleRegister(instr->temp()); |
| (...skipping 17 matching lines...) Expand all Loading... |
| 5387 | 5348 |
| 5388 // Both smi and heap number cases are handled. | 5349 // Both smi and heap number cases are handled. |
| 5389 __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi); | 5350 __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi); |
| 5390 | 5351 |
| 5391 // Check for heap number | 5352 // Check for heap number |
| 5392 __ ld(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); | 5353 __ ld(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); |
| 5393 __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map())); | 5354 __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map())); |
| 5394 | 5355 |
| 5395 // Check for undefined. Undefined is converted to zero for clamping | 5356 // Check for undefined. Undefined is converted to zero for clamping |
| 5396 // conversions. | 5357 // conversions. |
| 5397 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined, input_reg, | 5358 DeoptimizeIf(ne, instr, "not a heap number/undefined", input_reg, |
| 5398 Operand(factory()->undefined_value())); | 5359 Operand(factory()->undefined_value())); |
| 5399 __ mov(result_reg, zero_reg); | 5360 __ mov(result_reg, zero_reg); |
| 5400 __ jmp(&done); | 5361 __ jmp(&done); |
| 5401 | 5362 |
| 5402 // Heap number | 5363 // Heap number |
| 5403 __ bind(&heap_number); | 5364 __ bind(&heap_number); |
| 5404 __ ldc1(double_scratch0(), FieldMemOperand(input_reg, | 5365 __ ldc1(double_scratch0(), FieldMemOperand(input_reg, |
| 5405 HeapNumber::kValueOffset)); | 5366 HeapNumber::kValueOffset)); |
| 5406 __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg); | 5367 __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg); |
| 5407 __ jmp(&done); | 5368 __ jmp(&done); |
| (...skipping 495 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5903 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); | 5864 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); |
| 5904 | 5865 |
| 5905 GenerateOsrPrologue(); | 5866 GenerateOsrPrologue(); |
| 5906 } | 5867 } |
| 5907 | 5868 |
| 5908 | 5869 |
| 5909 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { | 5870 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { |
| 5910 Register result = ToRegister(instr->result()); | 5871 Register result = ToRegister(instr->result()); |
| 5911 Register object = ToRegister(instr->object()); | 5872 Register object = ToRegister(instr->object()); |
| 5912 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); | 5873 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); |
| 5913 DeoptimizeIf(eq, instr, Deoptimizer::kUndefined, object, Operand(at)); | 5874 DeoptimizeIf(eq, instr, "undefined", object, Operand(at)); |
| 5914 | 5875 |
| 5915 Register null_value = a5; | 5876 Register null_value = a5; |
| 5916 __ LoadRoot(null_value, Heap::kNullValueRootIndex); | 5877 __ LoadRoot(null_value, Heap::kNullValueRootIndex); |
| 5917 DeoptimizeIf(eq, instr, Deoptimizer::kNull, object, Operand(null_value)); | 5878 DeoptimizeIf(eq, instr, "null", object, Operand(null_value)); |
| 5918 | 5879 |
| 5919 __ And(at, object, kSmiTagMask); | 5880 __ And(at, object, kSmiTagMask); |
| 5920 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg)); | 5881 DeoptimizeIf(eq, instr, "Smi", at, Operand(zero_reg)); |
| 5921 | 5882 |
| 5922 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); | 5883 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); |
| 5923 __ GetObjectType(object, a1, a1); | 5884 __ GetObjectType(object, a1, a1); |
| 5924 DeoptimizeIf(le, instr, Deoptimizer::kNotAJavaScriptObject, a1, | 5885 DeoptimizeIf(le, instr, "not a JavaScript object", a1, |
| 5925 Operand(LAST_JS_PROXY_TYPE)); | 5886 Operand(LAST_JS_PROXY_TYPE)); |
| 5926 | 5887 |
| 5927 Label use_cache, call_runtime; | 5888 Label use_cache, call_runtime; |
| 5928 DCHECK(object.is(a0)); | 5889 DCHECK(object.is(a0)); |
| 5929 __ CheckEnumCache(null_value, &call_runtime); | 5890 __ CheckEnumCache(null_value, &call_runtime); |
| 5930 | 5891 |
| 5931 __ ld(result, FieldMemOperand(object, HeapObject::kMapOffset)); | 5892 __ ld(result, FieldMemOperand(object, HeapObject::kMapOffset)); |
| 5932 __ Branch(&use_cache); | 5893 __ Branch(&use_cache); |
| 5933 | 5894 |
| 5934 // Get the set of properties to enumerate. | 5895 // Get the set of properties to enumerate. |
| 5935 __ bind(&call_runtime); | 5896 __ bind(&call_runtime); |
| 5936 __ push(object); | 5897 __ push(object); |
| 5937 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); | 5898 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); |
| 5938 | 5899 |
| 5939 __ ld(a1, FieldMemOperand(v0, HeapObject::kMapOffset)); | 5900 __ ld(a1, FieldMemOperand(v0, HeapObject::kMapOffset)); |
| 5940 DCHECK(result.is(v0)); | 5901 DCHECK(result.is(v0)); |
| 5941 __ LoadRoot(at, Heap::kMetaMapRootIndex); | 5902 __ LoadRoot(at, Heap::kMetaMapRootIndex); |
| 5942 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, a1, Operand(at)); | 5903 DeoptimizeIf(ne, instr, "wrong map", a1, Operand(at)); |
| 5943 __ bind(&use_cache); | 5904 __ bind(&use_cache); |
| 5944 } | 5905 } |
| 5945 | 5906 |
| 5946 | 5907 |
| 5947 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { | 5908 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { |
| 5948 Register map = ToRegister(instr->map()); | 5909 Register map = ToRegister(instr->map()); |
| 5949 Register result = ToRegister(instr->result()); | 5910 Register result = ToRegister(instr->result()); |
| 5950 Label load_cache, done; | 5911 Label load_cache, done; |
| 5951 __ EnumLength(result, map); | 5912 __ EnumLength(result, map); |
| 5952 __ Branch(&load_cache, ne, result, Operand(Smi::FromInt(0))); | 5913 __ Branch(&load_cache, ne, result, Operand(Smi::FromInt(0))); |
| 5953 __ li(result, Operand(isolate()->factory()->empty_fixed_array())); | 5914 __ li(result, Operand(isolate()->factory()->empty_fixed_array())); |
| 5954 __ jmp(&done); | 5915 __ jmp(&done); |
| 5955 | 5916 |
| 5956 __ bind(&load_cache); | 5917 __ bind(&load_cache); |
| 5957 __ LoadInstanceDescriptors(map, result); | 5918 __ LoadInstanceDescriptors(map, result); |
| 5958 __ ld(result, | 5919 __ ld(result, |
| 5959 FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); | 5920 FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); |
| 5960 __ ld(result, | 5921 __ ld(result, |
| 5961 FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); | 5922 FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); |
| 5962 DeoptimizeIf(eq, instr, Deoptimizer::kNoCache, result, Operand(zero_reg)); | 5923 DeoptimizeIf(eq, instr, "no cache", result, Operand(zero_reg)); |
| 5963 | 5924 |
| 5964 __ bind(&done); | 5925 __ bind(&done); |
| 5965 } | 5926 } |
| 5966 | 5927 |
| 5967 | 5928 |
| 5968 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { | 5929 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { |
| 5969 Register object = ToRegister(instr->value()); | 5930 Register object = ToRegister(instr->value()); |
| 5970 Register map = ToRegister(instr->map()); | 5931 Register map = ToRegister(instr->map()); |
| 5971 __ ld(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset)); | 5932 __ ld(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset)); |
| 5972 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, map, Operand(scratch0())); | 5933 DeoptimizeIf(ne, instr, "wrong map", map, Operand(scratch0())); |
| 5973 } | 5934 } |
| 5974 | 5935 |
| 5975 | 5936 |
| 5976 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, | 5937 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, |
| 5977 Register result, | 5938 Register result, |
| 5978 Register object, | 5939 Register object, |
| 5979 Register index) { | 5940 Register index) { |
| 5980 PushSafepointRegistersScope scope(this); | 5941 PushSafepointRegistersScope scope(this); |
| 5981 __ Push(object, index); | 5942 __ Push(object, index); |
| 5982 __ mov(cp, zero_reg); | 5943 __ mov(cp, zero_reg); |
| (...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 6057 __ li(at, scope_info); | 6018 __ li(at, scope_info); |
| 6058 __ Push(at, ToRegister(instr->function())); | 6019 __ Push(at, ToRegister(instr->function())); |
| 6059 CallRuntime(Runtime::kPushBlockContext, 2, instr); | 6020 CallRuntime(Runtime::kPushBlockContext, 2, instr); |
| 6060 RecordSafepoint(Safepoint::kNoLazyDeopt); | 6021 RecordSafepoint(Safepoint::kNoLazyDeopt); |
| 6061 } | 6022 } |
| 6062 | 6023 |
| 6063 | 6024 |
| 6064 #undef __ | 6025 #undef __ |
| 6065 | 6026 |
| 6066 } } // namespace v8::internal | 6027 } } // namespace v8::internal |
| OLD | NEW |