OLD | NEW |
1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/v8.h" | 5 #include "src/v8.h" |
6 | 6 |
7 #include "src/base/bits.h" | 7 #include "src/base/bits.h" |
8 #include "src/code-factory.h" | 8 #include "src/code-factory.h" |
9 #include "src/code-stubs.h" | 9 #include "src/code-stubs.h" |
10 #include "src/hydrogen-osr.h" | 10 #include "src/hydrogen-osr.h" |
(...skipping 1047 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1058 Label dividend_is_not_negative, done; | 1058 Label dividend_is_not_negative, done; |
1059 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { | 1059 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { |
1060 __ cmpwi(dividend, Operand::Zero()); | 1060 __ cmpwi(dividend, Operand::Zero()); |
1061 __ bge(÷nd_is_not_negative); | 1061 __ bge(÷nd_is_not_negative); |
1062 if (shift) { | 1062 if (shift) { |
1063 // Note that this is correct even for kMinInt operands. | 1063 // Note that this is correct even for kMinInt operands. |
1064 __ neg(dividend, dividend); | 1064 __ neg(dividend, dividend); |
1065 __ ExtractBitRange(dividend, dividend, shift - 1, 0); | 1065 __ ExtractBitRange(dividend, dividend, shift - 1, 0); |
1066 __ neg(dividend, dividend, LeaveOE, SetRC); | 1066 __ neg(dividend, dividend, LeaveOE, SetRC); |
1067 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1067 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
1068 DeoptimizeIf(eq, instr, "minus zero", cr0); | 1068 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, cr0); |
1069 } | 1069 } |
1070 } else if (!hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1070 } else if (!hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
1071 __ li(dividend, Operand::Zero()); | 1071 __ li(dividend, Operand::Zero()); |
1072 } else { | 1072 } else { |
1073 DeoptimizeIf(al, instr, "minus zero"); | 1073 DeoptimizeIf(al, instr, Deoptimizer::kMinusZero); |
1074 } | 1074 } |
1075 __ b(&done); | 1075 __ b(&done); |
1076 } | 1076 } |
1077 | 1077 |
1078 __ bind(÷nd_is_not_negative); | 1078 __ bind(÷nd_is_not_negative); |
1079 if (shift) { | 1079 if (shift) { |
1080 __ ExtractBitRange(dividend, dividend, shift - 1, 0); | 1080 __ ExtractBitRange(dividend, dividend, shift - 1, 0); |
1081 } else { | 1081 } else { |
1082 __ li(dividend, Operand::Zero()); | 1082 __ li(dividend, Operand::Zero()); |
1083 } | 1083 } |
1084 __ bind(&done); | 1084 __ bind(&done); |
1085 } | 1085 } |
1086 | 1086 |
1087 | 1087 |
1088 void LCodeGen::DoModByConstI(LModByConstI* instr) { | 1088 void LCodeGen::DoModByConstI(LModByConstI* instr) { |
1089 Register dividend = ToRegister(instr->dividend()); | 1089 Register dividend = ToRegister(instr->dividend()); |
1090 int32_t divisor = instr->divisor(); | 1090 int32_t divisor = instr->divisor(); |
1091 Register result = ToRegister(instr->result()); | 1091 Register result = ToRegister(instr->result()); |
1092 DCHECK(!dividend.is(result)); | 1092 DCHECK(!dividend.is(result)); |
1093 | 1093 |
1094 if (divisor == 0) { | 1094 if (divisor == 0) { |
1095 DeoptimizeIf(al, instr, "division by zero"); | 1095 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero); |
1096 return; | 1096 return; |
1097 } | 1097 } |
1098 | 1098 |
1099 __ TruncatingDiv(result, dividend, Abs(divisor)); | 1099 __ TruncatingDiv(result, dividend, Abs(divisor)); |
1100 __ mov(ip, Operand(Abs(divisor))); | 1100 __ mov(ip, Operand(Abs(divisor))); |
1101 __ mullw(result, result, ip); | 1101 __ mullw(result, result, ip); |
1102 __ sub(result, dividend, result, LeaveOE, SetRC); | 1102 __ sub(result, dividend, result, LeaveOE, SetRC); |
1103 | 1103 |
1104 // Check for negative zero. | 1104 // Check for negative zero. |
1105 HMod* hmod = instr->hydrogen(); | 1105 HMod* hmod = instr->hydrogen(); |
1106 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1106 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
1107 Label remainder_not_zero; | 1107 Label remainder_not_zero; |
1108 __ bne(&remainder_not_zero, cr0); | 1108 __ bne(&remainder_not_zero, cr0); |
1109 __ cmpwi(dividend, Operand::Zero()); | 1109 __ cmpwi(dividend, Operand::Zero()); |
1110 DeoptimizeIf(lt, instr, "minus zero"); | 1110 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); |
1111 __ bind(&remainder_not_zero); | 1111 __ bind(&remainder_not_zero); |
1112 } | 1112 } |
1113 } | 1113 } |
1114 | 1114 |
1115 | 1115 |
1116 void LCodeGen::DoModI(LModI* instr) { | 1116 void LCodeGen::DoModI(LModI* instr) { |
1117 HMod* hmod = instr->hydrogen(); | 1117 HMod* hmod = instr->hydrogen(); |
1118 Register left_reg = ToRegister(instr->left()); | 1118 Register left_reg = ToRegister(instr->left()); |
1119 Register right_reg = ToRegister(instr->right()); | 1119 Register right_reg = ToRegister(instr->right()); |
1120 Register result_reg = ToRegister(instr->result()); | 1120 Register result_reg = ToRegister(instr->result()); |
1121 Register scratch = scratch0(); | 1121 Register scratch = scratch0(); |
1122 Label done; | 1122 Label done; |
1123 | 1123 |
1124 if (hmod->CheckFlag(HValue::kCanOverflow)) { | 1124 if (hmod->CheckFlag(HValue::kCanOverflow)) { |
1125 __ li(r0, Operand::Zero()); // clear xer | 1125 __ li(r0, Operand::Zero()); // clear xer |
1126 __ mtxer(r0); | 1126 __ mtxer(r0); |
1127 } | 1127 } |
1128 | 1128 |
1129 __ divw(scratch, left_reg, right_reg, SetOE, SetRC); | 1129 __ divw(scratch, left_reg, right_reg, SetOE, SetRC); |
1130 | 1130 |
1131 // Check for x % 0. | 1131 // Check for x % 0. |
1132 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { | 1132 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { |
1133 __ cmpwi(right_reg, Operand::Zero()); | 1133 __ cmpwi(right_reg, Operand::Zero()); |
1134 DeoptimizeIf(eq, instr, "division by zero"); | 1134 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero); |
1135 } | 1135 } |
1136 | 1136 |
1137 // Check for kMinInt % -1, divw will return undefined, which is not what we | 1137 // Check for kMinInt % -1, divw will return undefined, which is not what we |
1138 // want. We have to deopt if we care about -0, because we can't return that. | 1138 // want. We have to deopt if we care about -0, because we can't return that. |
1139 if (hmod->CheckFlag(HValue::kCanOverflow)) { | 1139 if (hmod->CheckFlag(HValue::kCanOverflow)) { |
1140 Label no_overflow_possible; | 1140 Label no_overflow_possible; |
1141 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1141 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
1142 DeoptimizeIf(overflow, instr, "minus zero", cr0); | 1142 DeoptimizeIf(overflow, instr, Deoptimizer::kMinusZero, cr0); |
1143 } else { | 1143 } else { |
1144 __ bnooverflow(&no_overflow_possible, cr0); | 1144 __ bnooverflow(&no_overflow_possible, cr0); |
1145 __ li(result_reg, Operand::Zero()); | 1145 __ li(result_reg, Operand::Zero()); |
1146 __ b(&done); | 1146 __ b(&done); |
1147 } | 1147 } |
1148 __ bind(&no_overflow_possible); | 1148 __ bind(&no_overflow_possible); |
1149 } | 1149 } |
1150 | 1150 |
1151 __ mullw(scratch, right_reg, scratch); | 1151 __ mullw(scratch, right_reg, scratch); |
1152 __ sub(result_reg, left_reg, scratch, LeaveOE, SetRC); | 1152 __ sub(result_reg, left_reg, scratch, LeaveOE, SetRC); |
1153 | 1153 |
1154 // If we care about -0, test if the dividend is <0 and the result is 0. | 1154 // If we care about -0, test if the dividend is <0 and the result is 0. |
1155 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1155 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
1156 __ bne(&done, cr0); | 1156 __ bne(&done, cr0); |
1157 __ cmpwi(left_reg, Operand::Zero()); | 1157 __ cmpwi(left_reg, Operand::Zero()); |
1158 DeoptimizeIf(lt, instr, "minus zero"); | 1158 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); |
1159 } | 1159 } |
1160 | 1160 |
1161 __ bind(&done); | 1161 __ bind(&done); |
1162 } | 1162 } |
1163 | 1163 |
1164 | 1164 |
1165 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { | 1165 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { |
1166 Register dividend = ToRegister(instr->dividend()); | 1166 Register dividend = ToRegister(instr->dividend()); |
1167 int32_t divisor = instr->divisor(); | 1167 int32_t divisor = instr->divisor(); |
1168 Register result = ToRegister(instr->result()); | 1168 Register result = ToRegister(instr->result()); |
1169 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); | 1169 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); |
1170 DCHECK(!result.is(dividend)); | 1170 DCHECK(!result.is(dividend)); |
1171 | 1171 |
1172 // Check for (0 / -x) that will produce negative zero. | 1172 // Check for (0 / -x) that will produce negative zero. |
1173 HDiv* hdiv = instr->hydrogen(); | 1173 HDiv* hdiv = instr->hydrogen(); |
1174 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 1174 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
1175 __ cmpwi(dividend, Operand::Zero()); | 1175 __ cmpwi(dividend, Operand::Zero()); |
1176 DeoptimizeIf(eq, instr, "minus zero"); | 1176 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); |
1177 } | 1177 } |
1178 // Check for (kMinInt / -1). | 1178 // Check for (kMinInt / -1). |
1179 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { | 1179 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { |
1180 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000))); | 1180 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000))); |
1181 __ cmpw(dividend, r0); | 1181 __ cmpw(dividend, r0); |
1182 DeoptimizeIf(eq, instr, "overflow"); | 1182 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow); |
1183 } | 1183 } |
1184 | 1184 |
1185 int32_t shift = WhichPowerOf2Abs(divisor); | 1185 int32_t shift = WhichPowerOf2Abs(divisor); |
1186 | 1186 |
1187 // Deoptimize if remainder will not be 0. | 1187 // Deoptimize if remainder will not be 0. |
1188 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && shift) { | 1188 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && shift) { |
1189 __ TestBitRange(dividend, shift - 1, 0, r0); | 1189 __ TestBitRange(dividend, shift - 1, 0, r0); |
1190 DeoptimizeIf(ne, instr, "lost precision", cr0); | 1190 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, cr0); |
1191 } | 1191 } |
1192 | 1192 |
1193 if (divisor == -1) { // Nice shortcut, not needed for correctness. | 1193 if (divisor == -1) { // Nice shortcut, not needed for correctness. |
1194 __ neg(result, dividend); | 1194 __ neg(result, dividend); |
1195 return; | 1195 return; |
1196 } | 1196 } |
1197 if (shift == 0) { | 1197 if (shift == 0) { |
1198 __ mr(result, dividend); | 1198 __ mr(result, dividend); |
1199 } else { | 1199 } else { |
1200 if (shift == 1) { | 1200 if (shift == 1) { |
1201 __ srwi(result, dividend, Operand(31)); | 1201 __ srwi(result, dividend, Operand(31)); |
1202 } else { | 1202 } else { |
1203 __ srawi(result, dividend, 31); | 1203 __ srawi(result, dividend, 31); |
1204 __ srwi(result, result, Operand(32 - shift)); | 1204 __ srwi(result, result, Operand(32 - shift)); |
1205 } | 1205 } |
1206 __ add(result, dividend, result); | 1206 __ add(result, dividend, result); |
1207 __ srawi(result, result, shift); | 1207 __ srawi(result, result, shift); |
1208 } | 1208 } |
1209 if (divisor < 0) __ neg(result, result); | 1209 if (divisor < 0) __ neg(result, result); |
1210 } | 1210 } |
1211 | 1211 |
1212 | 1212 |
1213 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { | 1213 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { |
1214 Register dividend = ToRegister(instr->dividend()); | 1214 Register dividend = ToRegister(instr->dividend()); |
1215 int32_t divisor = instr->divisor(); | 1215 int32_t divisor = instr->divisor(); |
1216 Register result = ToRegister(instr->result()); | 1216 Register result = ToRegister(instr->result()); |
1217 DCHECK(!dividend.is(result)); | 1217 DCHECK(!dividend.is(result)); |
1218 | 1218 |
1219 if (divisor == 0) { | 1219 if (divisor == 0) { |
1220 DeoptimizeIf(al, instr, "division by zero"); | 1220 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero); |
1221 return; | 1221 return; |
1222 } | 1222 } |
1223 | 1223 |
1224 // Check for (0 / -x) that will produce negative zero. | 1224 // Check for (0 / -x) that will produce negative zero. |
1225 HDiv* hdiv = instr->hydrogen(); | 1225 HDiv* hdiv = instr->hydrogen(); |
1226 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 1226 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
1227 __ cmpwi(dividend, Operand::Zero()); | 1227 __ cmpwi(dividend, Operand::Zero()); |
1228 DeoptimizeIf(eq, instr, "minus zero"); | 1228 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); |
1229 } | 1229 } |
1230 | 1230 |
1231 __ TruncatingDiv(result, dividend, Abs(divisor)); | 1231 __ TruncatingDiv(result, dividend, Abs(divisor)); |
1232 if (divisor < 0) __ neg(result, result); | 1232 if (divisor < 0) __ neg(result, result); |
1233 | 1233 |
1234 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { | 1234 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { |
1235 Register scratch = scratch0(); | 1235 Register scratch = scratch0(); |
1236 __ mov(ip, Operand(divisor)); | 1236 __ mov(ip, Operand(divisor)); |
1237 __ mullw(scratch, result, ip); | 1237 __ mullw(scratch, result, ip); |
1238 __ cmpw(scratch, dividend); | 1238 __ cmpw(scratch, dividend); |
1239 DeoptimizeIf(ne, instr, "lost precision"); | 1239 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision); |
1240 } | 1240 } |
1241 } | 1241 } |
1242 | 1242 |
1243 | 1243 |
1244 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. | 1244 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. |
1245 void LCodeGen::DoDivI(LDivI* instr) { | 1245 void LCodeGen::DoDivI(LDivI* instr) { |
1246 HBinaryOperation* hdiv = instr->hydrogen(); | 1246 HBinaryOperation* hdiv = instr->hydrogen(); |
1247 const Register dividend = ToRegister(instr->dividend()); | 1247 const Register dividend = ToRegister(instr->dividend()); |
1248 const Register divisor = ToRegister(instr->divisor()); | 1248 const Register divisor = ToRegister(instr->divisor()); |
1249 Register result = ToRegister(instr->result()); | 1249 Register result = ToRegister(instr->result()); |
1250 | 1250 |
1251 DCHECK(!dividend.is(result)); | 1251 DCHECK(!dividend.is(result)); |
1252 DCHECK(!divisor.is(result)); | 1252 DCHECK(!divisor.is(result)); |
1253 | 1253 |
1254 if (hdiv->CheckFlag(HValue::kCanOverflow)) { | 1254 if (hdiv->CheckFlag(HValue::kCanOverflow)) { |
1255 __ li(r0, Operand::Zero()); // clear xer | 1255 __ li(r0, Operand::Zero()); // clear xer |
1256 __ mtxer(r0); | 1256 __ mtxer(r0); |
1257 } | 1257 } |
1258 | 1258 |
1259 __ divw(result, dividend, divisor, SetOE, SetRC); | 1259 __ divw(result, dividend, divisor, SetOE, SetRC); |
1260 | 1260 |
1261 // Check for x / 0. | 1261 // Check for x / 0. |
1262 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { | 1262 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { |
1263 __ cmpwi(divisor, Operand::Zero()); | 1263 __ cmpwi(divisor, Operand::Zero()); |
1264 DeoptimizeIf(eq, instr, "division by zero"); | 1264 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero); |
1265 } | 1265 } |
1266 | 1266 |
1267 // Check for (0 / -x) that will produce negative zero. | 1267 // Check for (0 / -x) that will produce negative zero. |
1268 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1268 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { |
1269 Label dividend_not_zero; | 1269 Label dividend_not_zero; |
1270 __ cmpwi(dividend, Operand::Zero()); | 1270 __ cmpwi(dividend, Operand::Zero()); |
1271 __ bne(÷nd_not_zero); | 1271 __ bne(÷nd_not_zero); |
1272 __ cmpwi(divisor, Operand::Zero()); | 1272 __ cmpwi(divisor, Operand::Zero()); |
1273 DeoptimizeIf(lt, instr, "minus zero"); | 1273 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); |
1274 __ bind(÷nd_not_zero); | 1274 __ bind(÷nd_not_zero); |
1275 } | 1275 } |
1276 | 1276 |
1277 // Check for (kMinInt / -1). | 1277 // Check for (kMinInt / -1). |
1278 if (hdiv->CheckFlag(HValue::kCanOverflow)) { | 1278 if (hdiv->CheckFlag(HValue::kCanOverflow)) { |
1279 Label no_overflow_possible; | 1279 Label no_overflow_possible; |
1280 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { | 1280 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { |
1281 DeoptimizeIf(overflow, instr, "overflow", cr0); | 1281 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0); |
1282 } else { | 1282 } else { |
1283 // When truncating, we want kMinInt / -1 = kMinInt. | 1283 // When truncating, we want kMinInt / -1 = kMinInt. |
1284 __ bnooverflow(&no_overflow_possible, cr0); | 1284 __ bnooverflow(&no_overflow_possible, cr0); |
1285 __ mr(result, dividend); | 1285 __ mr(result, dividend); |
1286 } | 1286 } |
1287 __ bind(&no_overflow_possible); | 1287 __ bind(&no_overflow_possible); |
1288 } | 1288 } |
1289 | 1289 |
1290 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { | 1290 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { |
1291 // Deoptimize if remainder is not 0. | 1291 // Deoptimize if remainder is not 0. |
1292 Register scratch = scratch0(); | 1292 Register scratch = scratch0(); |
1293 __ mullw(scratch, divisor, result); | 1293 __ mullw(scratch, divisor, result); |
1294 __ cmpw(dividend, scratch); | 1294 __ cmpw(dividend, scratch); |
1295 DeoptimizeIf(ne, instr, "lost precision"); | 1295 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision); |
1296 } | 1296 } |
1297 } | 1297 } |
1298 | 1298 |
1299 | 1299 |
1300 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { | 1300 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { |
1301 HBinaryOperation* hdiv = instr->hydrogen(); | 1301 HBinaryOperation* hdiv = instr->hydrogen(); |
1302 Register dividend = ToRegister(instr->dividend()); | 1302 Register dividend = ToRegister(instr->dividend()); |
1303 Register result = ToRegister(instr->result()); | 1303 Register result = ToRegister(instr->result()); |
1304 int32_t divisor = instr->divisor(); | 1304 int32_t divisor = instr->divisor(); |
1305 | 1305 |
1306 // If the divisor is positive, things are easy: There can be no deopts and we | 1306 // If the divisor is positive, things are easy: There can be no deopts and we |
1307 // can simply do an arithmetic right shift. | 1307 // can simply do an arithmetic right shift. |
1308 int32_t shift = WhichPowerOf2Abs(divisor); | 1308 int32_t shift = WhichPowerOf2Abs(divisor); |
1309 if (divisor > 0) { | 1309 if (divisor > 0) { |
1310 if (shift || !result.is(dividend)) { | 1310 if (shift || !result.is(dividend)) { |
1311 __ srawi(result, dividend, shift); | 1311 __ srawi(result, dividend, shift); |
1312 } | 1312 } |
1313 return; | 1313 return; |
1314 } | 1314 } |
1315 | 1315 |
1316 // If the divisor is negative, we have to negate and handle edge cases. | 1316 // If the divisor is negative, we have to negate and handle edge cases. |
1317 OEBit oe = LeaveOE; | 1317 OEBit oe = LeaveOE; |
1318 #if V8_TARGET_ARCH_PPC64 | 1318 #if V8_TARGET_ARCH_PPC64 |
1319 if (divisor == -1 && hdiv->CheckFlag(HValue::kLeftCanBeMinInt)) { | 1319 if (divisor == -1 && hdiv->CheckFlag(HValue::kLeftCanBeMinInt)) { |
1320 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000))); | 1320 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000))); |
1321 __ cmpw(dividend, r0); | 1321 __ cmpw(dividend, r0); |
1322 DeoptimizeIf(eq, instr, "overflow"); | 1322 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow); |
1323 } | 1323 } |
1324 #else | 1324 #else |
1325 if (hdiv->CheckFlag(HValue::kLeftCanBeMinInt)) { | 1325 if (hdiv->CheckFlag(HValue::kLeftCanBeMinInt)) { |
1326 __ li(r0, Operand::Zero()); // clear xer | 1326 __ li(r0, Operand::Zero()); // clear xer |
1327 __ mtxer(r0); | 1327 __ mtxer(r0); |
1328 oe = SetOE; | 1328 oe = SetOE; |
1329 } | 1329 } |
1330 #endif | 1330 #endif |
1331 | 1331 |
1332 __ neg(result, dividend, oe, SetRC); | 1332 __ neg(result, dividend, oe, SetRC); |
1333 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1333 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { |
1334 DeoptimizeIf(eq, instr, "minus zero", cr0); | 1334 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, cr0); |
1335 } | 1335 } |
1336 | 1336 |
1337 // If the negation could not overflow, simply shifting is OK. | 1337 // If the negation could not overflow, simply shifting is OK. |
1338 #if !V8_TARGET_ARCH_PPC64 | 1338 #if !V8_TARGET_ARCH_PPC64 |
1339 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { | 1339 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { |
1340 #endif | 1340 #endif |
1341 if (shift) { | 1341 if (shift) { |
1342 __ ShiftRightArithImm(result, result, shift); | 1342 __ ShiftRightArithImm(result, result, shift); |
1343 } | 1343 } |
1344 return; | 1344 return; |
1345 #if !V8_TARGET_ARCH_PPC64 | 1345 #if !V8_TARGET_ARCH_PPC64 |
1346 } | 1346 } |
1347 | 1347 |
1348 // Dividing by -1 is basically negation, unless we overflow. | 1348 // Dividing by -1 is basically negation, unless we overflow. |
1349 if (divisor == -1) { | 1349 if (divisor == -1) { |
1350 DeoptimizeIf(overflow, instr, "overflow", cr0); | 1350 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0); |
1351 return; | 1351 return; |
1352 } | 1352 } |
1353 | 1353 |
1354 Label overflow, done; | 1354 Label overflow, done; |
1355 __ boverflow(&overflow, cr0); | 1355 __ boverflow(&overflow, cr0); |
1356 __ srawi(result, result, shift); | 1356 __ srawi(result, result, shift); |
1357 __ b(&done); | 1357 __ b(&done); |
1358 __ bind(&overflow); | 1358 __ bind(&overflow); |
1359 __ mov(result, Operand(kMinInt / divisor)); | 1359 __ mov(result, Operand(kMinInt / divisor)); |
1360 __ bind(&done); | 1360 __ bind(&done); |
1361 #endif | 1361 #endif |
1362 } | 1362 } |
1363 | 1363 |
1364 | 1364 |
1365 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { | 1365 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { |
1366 Register dividend = ToRegister(instr->dividend()); | 1366 Register dividend = ToRegister(instr->dividend()); |
1367 int32_t divisor = instr->divisor(); | 1367 int32_t divisor = instr->divisor(); |
1368 Register result = ToRegister(instr->result()); | 1368 Register result = ToRegister(instr->result()); |
1369 DCHECK(!dividend.is(result)); | 1369 DCHECK(!dividend.is(result)); |
1370 | 1370 |
1371 if (divisor == 0) { | 1371 if (divisor == 0) { |
1372 DeoptimizeIf(al, instr, "division by zero"); | 1372 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero); |
1373 return; | 1373 return; |
1374 } | 1374 } |
1375 | 1375 |
1376 // Check for (0 / -x) that will produce negative zero. | 1376 // Check for (0 / -x) that will produce negative zero. |
1377 HMathFloorOfDiv* hdiv = instr->hydrogen(); | 1377 HMathFloorOfDiv* hdiv = instr->hydrogen(); |
1378 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 1378 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
1379 __ cmpwi(dividend, Operand::Zero()); | 1379 __ cmpwi(dividend, Operand::Zero()); |
1380 DeoptimizeIf(eq, instr, "minus zero"); | 1380 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); |
1381 } | 1381 } |
1382 | 1382 |
1383 // Easy case: We need no dynamic check for the dividend and the flooring | 1383 // Easy case: We need no dynamic check for the dividend and the flooring |
1384 // division is the same as the truncating division. | 1384 // division is the same as the truncating division. |
1385 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || | 1385 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || |
1386 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { | 1386 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { |
1387 __ TruncatingDiv(result, dividend, Abs(divisor)); | 1387 __ TruncatingDiv(result, dividend, Abs(divisor)); |
1388 if (divisor < 0) __ neg(result, result); | 1388 if (divisor < 0) __ neg(result, result); |
1389 return; | 1389 return; |
1390 } | 1390 } |
(...skipping 30 matching lines...) Expand all Loading... |
1421 if (hdiv->CheckFlag(HValue::kCanOverflow)) { | 1421 if (hdiv->CheckFlag(HValue::kCanOverflow)) { |
1422 __ li(r0, Operand::Zero()); // clear xer | 1422 __ li(r0, Operand::Zero()); // clear xer |
1423 __ mtxer(r0); | 1423 __ mtxer(r0); |
1424 } | 1424 } |
1425 | 1425 |
1426 __ divw(result, dividend, divisor, SetOE, SetRC); | 1426 __ divw(result, dividend, divisor, SetOE, SetRC); |
1427 | 1427 |
1428 // Check for x / 0. | 1428 // Check for x / 0. |
1429 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { | 1429 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { |
1430 __ cmpwi(divisor, Operand::Zero()); | 1430 __ cmpwi(divisor, Operand::Zero()); |
1431 DeoptimizeIf(eq, instr, "division by zero"); | 1431 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero); |
1432 } | 1432 } |
1433 | 1433 |
1434 // Check for (0 / -x) that will produce negative zero. | 1434 // Check for (0 / -x) that will produce negative zero. |
1435 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1435 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { |
1436 Label dividend_not_zero; | 1436 Label dividend_not_zero; |
1437 __ cmpwi(dividend, Operand::Zero()); | 1437 __ cmpwi(dividend, Operand::Zero()); |
1438 __ bne(÷nd_not_zero); | 1438 __ bne(÷nd_not_zero); |
1439 __ cmpwi(divisor, Operand::Zero()); | 1439 __ cmpwi(divisor, Operand::Zero()); |
1440 DeoptimizeIf(lt, instr, "minus zero"); | 1440 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); |
1441 __ bind(÷nd_not_zero); | 1441 __ bind(÷nd_not_zero); |
1442 } | 1442 } |
1443 | 1443 |
1444 // Check for (kMinInt / -1). | 1444 // Check for (kMinInt / -1). |
1445 if (hdiv->CheckFlag(HValue::kCanOverflow)) { | 1445 if (hdiv->CheckFlag(HValue::kCanOverflow)) { |
1446 Label no_overflow_possible; | 1446 Label no_overflow_possible; |
1447 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { | 1447 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { |
1448 DeoptimizeIf(overflow, instr, "overflow", cr0); | 1448 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0); |
1449 } else { | 1449 } else { |
1450 // When truncating, we want kMinInt / -1 = kMinInt. | 1450 // When truncating, we want kMinInt / -1 = kMinInt. |
1451 __ bnooverflow(&no_overflow_possible, cr0); | 1451 __ bnooverflow(&no_overflow_possible, cr0); |
1452 __ mr(result, dividend); | 1452 __ mr(result, dividend); |
1453 } | 1453 } |
1454 __ bind(&no_overflow_possible); | 1454 __ bind(&no_overflow_possible); |
1455 } | 1455 } |
1456 | 1456 |
1457 Label done; | 1457 Label done; |
1458 Register scratch = scratch0(); | 1458 Register scratch = scratch0(); |
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1508 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); | 1508 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); |
1509 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | 1509 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
1510 | 1510 |
1511 if (right_op->IsConstantOperand()) { | 1511 if (right_op->IsConstantOperand()) { |
1512 int32_t constant = ToInteger32(LConstantOperand::cast(right_op)); | 1512 int32_t constant = ToInteger32(LConstantOperand::cast(right_op)); |
1513 | 1513 |
1514 if (bailout_on_minus_zero && (constant < 0)) { | 1514 if (bailout_on_minus_zero && (constant < 0)) { |
1515 // The case of a null constant will be handled separately. | 1515 // The case of a null constant will be handled separately. |
1516 // If constant is negative and left is null, the result should be -0. | 1516 // If constant is negative and left is null, the result should be -0. |
1517 __ cmpi(left, Operand::Zero()); | 1517 __ cmpi(left, Operand::Zero()); |
1518 DeoptimizeIf(eq, instr, "minus zero"); | 1518 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); |
1519 } | 1519 } |
1520 | 1520 |
1521 switch (constant) { | 1521 switch (constant) { |
1522 case -1: | 1522 case -1: |
1523 if (can_overflow) { | 1523 if (can_overflow) { |
1524 #if V8_TARGET_ARCH_PPC64 | 1524 #if V8_TARGET_ARCH_PPC64 |
1525 if (instr->hydrogen()->representation().IsSmi()) { | 1525 if (instr->hydrogen()->representation().IsSmi()) { |
1526 #endif | 1526 #endif |
1527 __ li(r0, Operand::Zero()); // clear xer | 1527 __ li(r0, Operand::Zero()); // clear xer |
1528 __ mtxer(r0); | 1528 __ mtxer(r0); |
1529 __ neg(result, left, SetOE, SetRC); | 1529 __ neg(result, left, SetOE, SetRC); |
1530 DeoptimizeIf(overflow, instr, "overflow", cr0); | 1530 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0); |
1531 #if V8_TARGET_ARCH_PPC64 | 1531 #if V8_TARGET_ARCH_PPC64 |
1532 } else { | 1532 } else { |
1533 __ neg(result, left); | 1533 __ neg(result, left); |
1534 __ TestIfInt32(result, scratch, r0); | 1534 __ TestIfInt32(result, scratch, r0); |
1535 DeoptimizeIf(ne, instr, "overflow"); | 1535 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow); |
1536 } | 1536 } |
1537 #endif | 1537 #endif |
1538 } else { | 1538 } else { |
1539 __ neg(result, left); | 1539 __ neg(result, left); |
1540 } | 1540 } |
1541 break; | 1541 break; |
1542 case 0: | 1542 case 0: |
1543 if (bailout_on_minus_zero) { | 1543 if (bailout_on_minus_zero) { |
1544 // If left is strictly negative and the constant is null, the | 1544 // If left is strictly negative and the constant is null, the |
1545 // result is -0. Deoptimize if required, otherwise return 0. | 1545 // result is -0. Deoptimize if required, otherwise return 0. |
1546 #if V8_TARGET_ARCH_PPC64 | 1546 #if V8_TARGET_ARCH_PPC64 |
1547 if (instr->hydrogen()->representation().IsSmi()) { | 1547 if (instr->hydrogen()->representation().IsSmi()) { |
1548 #endif | 1548 #endif |
1549 __ cmpi(left, Operand::Zero()); | 1549 __ cmpi(left, Operand::Zero()); |
1550 #if V8_TARGET_ARCH_PPC64 | 1550 #if V8_TARGET_ARCH_PPC64 |
1551 } else { | 1551 } else { |
1552 __ cmpwi(left, Operand::Zero()); | 1552 __ cmpwi(left, Operand::Zero()); |
1553 } | 1553 } |
1554 #endif | 1554 #endif |
1555 DeoptimizeIf(lt, instr, "minus zero"); | 1555 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); |
1556 } | 1556 } |
1557 __ li(result, Operand::Zero()); | 1557 __ li(result, Operand::Zero()); |
1558 break; | 1558 break; |
1559 case 1: | 1559 case 1: |
1560 __ Move(result, left); | 1560 __ Move(result, left); |
1561 break; | 1561 break; |
1562 default: | 1562 default: |
1563 // Multiplying by powers of two and powers of two plus or minus | 1563 // Multiplying by powers of two and powers of two plus or minus |
1564 // one can be done faster with shifted operands. | 1564 // one can be done faster with shifted operands. |
1565 // For other constants we emit standard code. | 1565 // For other constants we emit standard code. |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1598 #if V8_TARGET_ARCH_PPC64 | 1598 #if V8_TARGET_ARCH_PPC64 |
1599 // result = left * right. | 1599 // result = left * right. |
1600 if (instr->hydrogen()->representation().IsSmi()) { | 1600 if (instr->hydrogen()->representation().IsSmi()) { |
1601 __ SmiUntag(result, left); | 1601 __ SmiUntag(result, left); |
1602 __ SmiUntag(scratch, right); | 1602 __ SmiUntag(scratch, right); |
1603 __ Mul(result, result, scratch); | 1603 __ Mul(result, result, scratch); |
1604 } else { | 1604 } else { |
1605 __ Mul(result, left, right); | 1605 __ Mul(result, left, right); |
1606 } | 1606 } |
1607 __ TestIfInt32(result, scratch, r0); | 1607 __ TestIfInt32(result, scratch, r0); |
1608 DeoptimizeIf(ne, instr, "overflow"); | 1608 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow); |
1609 if (instr->hydrogen()->representation().IsSmi()) { | 1609 if (instr->hydrogen()->representation().IsSmi()) { |
1610 __ SmiTag(result); | 1610 __ SmiTag(result); |
1611 } | 1611 } |
1612 #else | 1612 #else |
1613 // scratch:result = left * right. | 1613 // scratch:result = left * right. |
1614 if (instr->hydrogen()->representation().IsSmi()) { | 1614 if (instr->hydrogen()->representation().IsSmi()) { |
1615 __ SmiUntag(result, left); | 1615 __ SmiUntag(result, left); |
1616 __ mulhw(scratch, result, right); | 1616 __ mulhw(scratch, result, right); |
1617 __ mullw(result, result, right); | 1617 __ mullw(result, result, right); |
1618 } else { | 1618 } else { |
1619 __ mulhw(scratch, left, right); | 1619 __ mulhw(scratch, left, right); |
1620 __ mullw(result, left, right); | 1620 __ mullw(result, left, right); |
1621 } | 1621 } |
1622 __ TestIfInt32(scratch, result, r0); | 1622 __ TestIfInt32(scratch, result, r0); |
1623 DeoptimizeIf(ne, instr, "overflow"); | 1623 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow); |
1624 #endif | 1624 #endif |
1625 } else { | 1625 } else { |
1626 if (instr->hydrogen()->representation().IsSmi()) { | 1626 if (instr->hydrogen()->representation().IsSmi()) { |
1627 __ SmiUntag(result, left); | 1627 __ SmiUntag(result, left); |
1628 __ Mul(result, result, right); | 1628 __ Mul(result, result, right); |
1629 } else { | 1629 } else { |
1630 __ Mul(result, left, right); | 1630 __ Mul(result, left, right); |
1631 } | 1631 } |
1632 } | 1632 } |
1633 | 1633 |
1634 if (bailout_on_minus_zero) { | 1634 if (bailout_on_minus_zero) { |
1635 Label done; | 1635 Label done; |
1636 #if V8_TARGET_ARCH_PPC64 | 1636 #if V8_TARGET_ARCH_PPC64 |
1637 if (instr->hydrogen()->representation().IsSmi()) { | 1637 if (instr->hydrogen()->representation().IsSmi()) { |
1638 #endif | 1638 #endif |
1639 __ xor_(r0, left, right, SetRC); | 1639 __ xor_(r0, left, right, SetRC); |
1640 __ bge(&done, cr0); | 1640 __ bge(&done, cr0); |
1641 #if V8_TARGET_ARCH_PPC64 | 1641 #if V8_TARGET_ARCH_PPC64 |
1642 } else { | 1642 } else { |
1643 __ xor_(r0, left, right); | 1643 __ xor_(r0, left, right); |
1644 __ cmpwi(r0, Operand::Zero()); | 1644 __ cmpwi(r0, Operand::Zero()); |
1645 __ bge(&done); | 1645 __ bge(&done); |
1646 } | 1646 } |
1647 #endif | 1647 #endif |
1648 // Bail out if the result is minus zero. | 1648 // Bail out if the result is minus zero. |
1649 __ cmpi(result, Operand::Zero()); | 1649 __ cmpi(result, Operand::Zero()); |
1650 DeoptimizeIf(eq, instr, "minus zero"); | 1650 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); |
1651 __ bind(&done); | 1651 __ bind(&done); |
1652 } | 1652 } |
1653 } | 1653 } |
1654 } | 1654 } |
1655 | 1655 |
1656 | 1656 |
1657 void LCodeGen::DoBitI(LBitI* instr) { | 1657 void LCodeGen::DoBitI(LBitI* instr) { |
1658 LOperand* left_op = instr->left(); | 1658 LOperand* left_op = instr->left(); |
1659 LOperand* right_op = instr->right(); | 1659 LOperand* right_op = instr->right(); |
1660 DCHECK(left_op->IsRegister()); | 1660 DCHECK(left_op->IsRegister()); |
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1726 break; | 1726 break; |
1727 case Token::SAR: | 1727 case Token::SAR: |
1728 __ sraw(result, left, scratch); | 1728 __ sraw(result, left, scratch); |
1729 break; | 1729 break; |
1730 case Token::SHR: | 1730 case Token::SHR: |
1731 if (instr->can_deopt()) { | 1731 if (instr->can_deopt()) { |
1732 __ srw(result, left, scratch, SetRC); | 1732 __ srw(result, left, scratch, SetRC); |
1733 #if V8_TARGET_ARCH_PPC64 | 1733 #if V8_TARGET_ARCH_PPC64 |
1734 __ extsw(result, result, SetRC); | 1734 __ extsw(result, result, SetRC); |
1735 #endif | 1735 #endif |
1736 DeoptimizeIf(lt, instr, "negative value", cr0); | 1736 DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue, cr0); |
1737 } else { | 1737 } else { |
1738 __ srw(result, left, scratch); | 1738 __ srw(result, left, scratch); |
1739 } | 1739 } |
1740 break; | 1740 break; |
1741 case Token::SHL: | 1741 case Token::SHL: |
1742 __ slw(result, left, scratch); | 1742 __ slw(result, left, scratch); |
1743 #if V8_TARGET_ARCH_PPC64 | 1743 #if V8_TARGET_ARCH_PPC64 |
1744 __ extsw(result, result); | 1744 __ extsw(result, result); |
1745 #endif | 1745 #endif |
1746 break; | 1746 break; |
(...skipping 19 matching lines...) Expand all Loading... |
1766 } else { | 1766 } else { |
1767 __ Move(result, left); | 1767 __ Move(result, left); |
1768 } | 1768 } |
1769 break; | 1769 break; |
1770 case Token::SHR: | 1770 case Token::SHR: |
1771 if (shift_count != 0) { | 1771 if (shift_count != 0) { |
1772 __ srwi(result, left, Operand(shift_count)); | 1772 __ srwi(result, left, Operand(shift_count)); |
1773 } else { | 1773 } else { |
1774 if (instr->can_deopt()) { | 1774 if (instr->can_deopt()) { |
1775 __ cmpwi(left, Operand::Zero()); | 1775 __ cmpwi(left, Operand::Zero()); |
1776 DeoptimizeIf(lt, instr, "negative value"); | 1776 DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue); |
1777 } | 1777 } |
1778 __ Move(result, left); | 1778 __ Move(result, left); |
1779 } | 1779 } |
1780 break; | 1780 break; |
1781 case Token::SHL: | 1781 case Token::SHL: |
1782 if (shift_count != 0) { | 1782 if (shift_count != 0) { |
1783 #if V8_TARGET_ARCH_PPC64 | 1783 #if V8_TARGET_ARCH_PPC64 |
1784 if (instr->hydrogen_value()->representation().IsSmi()) { | 1784 if (instr->hydrogen_value()->representation().IsSmi()) { |
1785 __ sldi(result, left, Operand(shift_count)); | 1785 __ sldi(result, left, Operand(shift_count)); |
1786 #else | 1786 #else |
1787 if (instr->hydrogen_value()->representation().IsSmi() && | 1787 if (instr->hydrogen_value()->representation().IsSmi() && |
1788 instr->can_deopt()) { | 1788 instr->can_deopt()) { |
1789 if (shift_count != 1) { | 1789 if (shift_count != 1) { |
1790 __ slwi(result, left, Operand(shift_count - 1)); | 1790 __ slwi(result, left, Operand(shift_count - 1)); |
1791 __ SmiTagCheckOverflow(result, result, scratch); | 1791 __ SmiTagCheckOverflow(result, result, scratch); |
1792 } else { | 1792 } else { |
1793 __ SmiTagCheckOverflow(result, left, scratch); | 1793 __ SmiTagCheckOverflow(result, left, scratch); |
1794 } | 1794 } |
1795 DeoptimizeIf(lt, instr, "overflow", cr0); | 1795 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0); |
1796 #endif | 1796 #endif |
1797 } else { | 1797 } else { |
1798 __ slwi(result, left, Operand(shift_count)); | 1798 __ slwi(result, left, Operand(shift_count)); |
1799 #if V8_TARGET_ARCH_PPC64 | 1799 #if V8_TARGET_ARCH_PPC64 |
1800 __ extsw(result, result); | 1800 __ extsw(result, result); |
1801 #endif | 1801 #endif |
1802 } | 1802 } |
1803 } else { | 1803 } else { |
1804 __ Move(result, left); | 1804 __ Move(result, left); |
1805 } | 1805 } |
(...skipping 24 matching lines...) Expand all Loading... |
1830 } else { | 1830 } else { |
1831 __ SubAndCheckForOverflow(result, left, EmitLoadRegister(right, ip), | 1831 __ SubAndCheckForOverflow(result, left, EmitLoadRegister(right, ip), |
1832 scratch0(), r0); | 1832 scratch0(), r0); |
1833 } | 1833 } |
1834 // Doptimize on overflow | 1834 // Doptimize on overflow |
1835 #if V8_TARGET_ARCH_PPC64 | 1835 #if V8_TARGET_ARCH_PPC64 |
1836 if (!instr->hydrogen()->representation().IsSmi()) { | 1836 if (!instr->hydrogen()->representation().IsSmi()) { |
1837 __ extsw(scratch0(), scratch0(), SetRC); | 1837 __ extsw(scratch0(), scratch0(), SetRC); |
1838 } | 1838 } |
1839 #endif | 1839 #endif |
1840 DeoptimizeIf(lt, instr, "overflow", cr0); | 1840 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0); |
1841 } | 1841 } |
1842 | 1842 |
1843 #if V8_TARGET_ARCH_PPC64 | 1843 #if V8_TARGET_ARCH_PPC64 |
1844 if (!instr->hydrogen()->representation().IsSmi()) { | 1844 if (!instr->hydrogen()->representation().IsSmi()) { |
1845 __ extsw(result, result); | 1845 __ extsw(result, result); |
1846 } | 1846 } |
1847 #endif | 1847 #endif |
1848 } | 1848 } |
1849 | 1849 |
1850 | 1850 |
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1910 Register result = ToRegister(instr->result()); | 1910 Register result = ToRegister(instr->result()); |
1911 Register scratch = ToRegister(instr->temp()); | 1911 Register scratch = ToRegister(instr->temp()); |
1912 Smi* index = instr->index(); | 1912 Smi* index = instr->index(); |
1913 Label runtime, done; | 1913 Label runtime, done; |
1914 DCHECK(object.is(result)); | 1914 DCHECK(object.is(result)); |
1915 DCHECK(object.is(r3)); | 1915 DCHECK(object.is(r3)); |
1916 DCHECK(!scratch.is(scratch0())); | 1916 DCHECK(!scratch.is(scratch0())); |
1917 DCHECK(!scratch.is(object)); | 1917 DCHECK(!scratch.is(object)); |
1918 | 1918 |
1919 __ TestIfSmi(object, r0); | 1919 __ TestIfSmi(object, r0); |
1920 DeoptimizeIf(eq, instr, "Smi", cr0); | 1920 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0); |
1921 __ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE); | 1921 __ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE); |
1922 DeoptimizeIf(ne, instr, "not a date object"); | 1922 DeoptimizeIf(ne, instr, Deoptimizer::kNotADateObject); |
1923 | 1923 |
1924 if (index->value() == 0) { | 1924 if (index->value() == 0) { |
1925 __ LoadP(result, FieldMemOperand(object, JSDate::kValueOffset)); | 1925 __ LoadP(result, FieldMemOperand(object, JSDate::kValueOffset)); |
1926 } else { | 1926 } else { |
1927 if (index->value() < JSDate::kFirstUncachedField) { | 1927 if (index->value() < JSDate::kFirstUncachedField) { |
1928 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); | 1928 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); |
1929 __ mov(scratch, Operand(stamp)); | 1929 __ mov(scratch, Operand(stamp)); |
1930 __ LoadP(scratch, MemOperand(scratch)); | 1930 __ LoadP(scratch, MemOperand(scratch)); |
1931 __ LoadP(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset)); | 1931 __ LoadP(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset)); |
1932 __ cmp(scratch, scratch0()); | 1932 __ cmp(scratch, scratch0()); |
(...skipping 113 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2046 } else { | 2046 } else { |
2047 __ AddAndCheckForOverflow(result, left, EmitLoadRegister(right, ip), | 2047 __ AddAndCheckForOverflow(result, left, EmitLoadRegister(right, ip), |
2048 scratch0(), r0); | 2048 scratch0(), r0); |
2049 } | 2049 } |
2050 // Doptimize on overflow | 2050 // Doptimize on overflow |
2051 #if V8_TARGET_ARCH_PPC64 | 2051 #if V8_TARGET_ARCH_PPC64 |
2052 if (isInteger) { | 2052 if (isInteger) { |
2053 __ extsw(scratch0(), scratch0(), SetRC); | 2053 __ extsw(scratch0(), scratch0(), SetRC); |
2054 } | 2054 } |
2055 #endif | 2055 #endif |
2056 DeoptimizeIf(lt, instr, "overflow", cr0); | 2056 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0); |
2057 } | 2057 } |
2058 | 2058 |
2059 #if V8_TARGET_ARCH_PPC64 | 2059 #if V8_TARGET_ARCH_PPC64 |
2060 if (isInteger) { | 2060 if (isInteger) { |
2061 __ extsw(result, result); | 2061 __ extsw(result, result); |
2062 } | 2062 } |
2063 #endif | 2063 #endif |
2064 } | 2064 } |
2065 | 2065 |
2066 | 2066 |
(...skipping 221 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2288 } | 2288 } |
2289 | 2289 |
2290 if (expected.Contains(ToBooleanStub::SMI)) { | 2290 if (expected.Contains(ToBooleanStub::SMI)) { |
2291 // Smis: 0 -> false, all other -> true. | 2291 // Smis: 0 -> false, all other -> true. |
2292 __ cmpi(reg, Operand::Zero()); | 2292 __ cmpi(reg, Operand::Zero()); |
2293 __ beq(instr->FalseLabel(chunk_)); | 2293 __ beq(instr->FalseLabel(chunk_)); |
2294 __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); | 2294 __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); |
2295 } else if (expected.NeedsMap()) { | 2295 } else if (expected.NeedsMap()) { |
2296 // If we need a map later and have a Smi -> deopt. | 2296 // If we need a map later and have a Smi -> deopt. |
2297 __ TestIfSmi(reg, r0); | 2297 __ TestIfSmi(reg, r0); |
2298 DeoptimizeIf(eq, instr, "Smi", cr0); | 2298 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0); |
2299 } | 2299 } |
2300 | 2300 |
2301 const Register map = scratch0(); | 2301 const Register map = scratch0(); |
2302 if (expected.NeedsMap()) { | 2302 if (expected.NeedsMap()) { |
2303 __ LoadP(map, FieldMemOperand(reg, HeapObject::kMapOffset)); | 2303 __ LoadP(map, FieldMemOperand(reg, HeapObject::kMapOffset)); |
2304 | 2304 |
2305 if (expected.CanBeUndetectable()) { | 2305 if (expected.CanBeUndetectable()) { |
2306 // Undetectable -> false. | 2306 // Undetectable -> false. |
2307 __ lbz(ip, FieldMemOperand(map, Map::kBitFieldOffset)); | 2307 __ lbz(ip, FieldMemOperand(map, Map::kBitFieldOffset)); |
2308 __ TestBit(ip, Map::kIsUndetectable, r0); | 2308 __ TestBit(ip, Map::kIsUndetectable, r0); |
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2345 __ mfcr(r0); | 2345 __ mfcr(r0); |
2346 __ andi(r0, r0, Operand(crZOrNaNBits)); | 2346 __ andi(r0, r0, Operand(crZOrNaNBits)); |
2347 __ bne(instr->FalseLabel(chunk_), cr0); | 2347 __ bne(instr->FalseLabel(chunk_), cr0); |
2348 __ b(instr->TrueLabel(chunk_)); | 2348 __ b(instr->TrueLabel(chunk_)); |
2349 __ bind(¬_heap_number); | 2349 __ bind(¬_heap_number); |
2350 } | 2350 } |
2351 | 2351 |
2352 if (!expected.IsGeneric()) { | 2352 if (!expected.IsGeneric()) { |
2353 // We've seen something for the first time -> deopt. | 2353 // We've seen something for the first time -> deopt. |
2354 // This can only happen if we are not generic already. | 2354 // This can only happen if we are not generic already. |
2355 DeoptimizeIf(al, instr, "unexpected object"); | 2355 DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject); |
2356 } | 2356 } |
2357 } | 2357 } |
2358 } | 2358 } |
2359 } | 2359 } |
2360 | 2360 |
2361 | 2361 |
2362 void LCodeGen::EmitGoto(int block) { | 2362 void LCodeGen::EmitGoto(int block) { |
2363 if (!IsNextEmittedBlock(block)) { | 2363 if (!IsNextEmittedBlock(block)) { |
2364 __ b(chunk_->GetAssemblyLabel(LookupDestination(block))); | 2364 __ b(chunk_->GetAssemblyLabel(LookupDestination(block))); |
2365 } | 2365 } |
(...skipping 638 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3004 } | 3004 } |
3005 | 3005 |
3006 | 3006 |
3007 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { | 3007 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { |
3008 Register result = ToRegister(instr->result()); | 3008 Register result = ToRegister(instr->result()); |
3009 __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell().handle()))); | 3009 __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell().handle()))); |
3010 __ LoadP(result, FieldMemOperand(ip, Cell::kValueOffset)); | 3010 __ LoadP(result, FieldMemOperand(ip, Cell::kValueOffset)); |
3011 if (instr->hydrogen()->RequiresHoleCheck()) { | 3011 if (instr->hydrogen()->RequiresHoleCheck()) { |
3012 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | 3012 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
3013 __ cmp(result, ip); | 3013 __ cmp(result, ip); |
3014 DeoptimizeIf(eq, instr, "hole"); | 3014 DeoptimizeIf(eq, instr, Deoptimizer::kHole); |
3015 } | 3015 } |
3016 } | 3016 } |
3017 | 3017 |
3018 | 3018 |
3019 template <class T> | 3019 template <class T> |
3020 void LCodeGen::EmitVectorLoadICRegisters(T* instr) { | 3020 void LCodeGen::EmitVectorLoadICRegisters(T* instr) { |
3021 DCHECK(FLAG_vector_ics); | 3021 DCHECK(FLAG_vector_ics); |
3022 Register vector = ToRegister(instr->temp_vector()); | 3022 Register vector = ToRegister(instr->temp_vector()); |
3023 DCHECK(vector.is(VectorLoadICDescriptor::VectorRegister())); | 3023 DCHECK(vector.is(VectorLoadICDescriptor::VectorRegister())); |
3024 __ Move(vector, instr->hydrogen()->feedback_vector()); | 3024 __ Move(vector, instr->hydrogen()->feedback_vector()); |
(...skipping 29 matching lines...) Expand all Loading... |
3054 | 3054 |
3055 // If the cell we are storing to contains the hole it could have | 3055 // If the cell we are storing to contains the hole it could have |
3056 // been deleted from the property dictionary. In that case, we need | 3056 // been deleted from the property dictionary. In that case, we need |
3057 // to update the property details in the property dictionary to mark | 3057 // to update the property details in the property dictionary to mark |
3058 // it as no longer deleted. | 3058 // it as no longer deleted. |
3059 if (instr->hydrogen()->RequiresHoleCheck()) { | 3059 if (instr->hydrogen()->RequiresHoleCheck()) { |
3060 // We use a temp to check the payload (CompareRoot might clobber ip). | 3060 // We use a temp to check the payload (CompareRoot might clobber ip). |
3061 Register payload = ToRegister(instr->temp()); | 3061 Register payload = ToRegister(instr->temp()); |
3062 __ LoadP(payload, FieldMemOperand(cell, Cell::kValueOffset)); | 3062 __ LoadP(payload, FieldMemOperand(cell, Cell::kValueOffset)); |
3063 __ CompareRoot(payload, Heap::kTheHoleValueRootIndex); | 3063 __ CompareRoot(payload, Heap::kTheHoleValueRootIndex); |
3064 DeoptimizeIf(eq, instr, "hole"); | 3064 DeoptimizeIf(eq, instr, Deoptimizer::kHole); |
3065 } | 3065 } |
3066 | 3066 |
3067 // Store the value. | 3067 // Store the value. |
3068 __ StoreP(value, FieldMemOperand(cell, Cell::kValueOffset), r0); | 3068 __ StoreP(value, FieldMemOperand(cell, Cell::kValueOffset), r0); |
3069 // Cells are always rescanned, so no write barrier here. | 3069 // Cells are always rescanned, so no write barrier here. |
3070 } | 3070 } |
3071 | 3071 |
3072 | 3072 |
3073 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { | 3073 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { |
3074 Register context = ToRegister(instr->context()); | 3074 Register context = ToRegister(instr->context()); |
3075 Register result = ToRegister(instr->result()); | 3075 Register result = ToRegister(instr->result()); |
3076 __ LoadP(result, ContextOperand(context, instr->slot_index())); | 3076 __ LoadP(result, ContextOperand(context, instr->slot_index())); |
3077 if (instr->hydrogen()->RequiresHoleCheck()) { | 3077 if (instr->hydrogen()->RequiresHoleCheck()) { |
3078 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | 3078 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
3079 __ cmp(result, ip); | 3079 __ cmp(result, ip); |
3080 if (instr->hydrogen()->DeoptimizesOnHole()) { | 3080 if (instr->hydrogen()->DeoptimizesOnHole()) { |
3081 DeoptimizeIf(eq, instr, "hole"); | 3081 DeoptimizeIf(eq, instr, Deoptimizer::kHole); |
3082 } else { | 3082 } else { |
3083 Label skip; | 3083 Label skip; |
3084 __ bne(&skip); | 3084 __ bne(&skip); |
3085 __ mov(result, Operand(factory()->undefined_value())); | 3085 __ mov(result, Operand(factory()->undefined_value())); |
3086 __ bind(&skip); | 3086 __ bind(&skip); |
3087 } | 3087 } |
3088 } | 3088 } |
3089 } | 3089 } |
3090 | 3090 |
3091 | 3091 |
3092 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { | 3092 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { |
3093 Register context = ToRegister(instr->context()); | 3093 Register context = ToRegister(instr->context()); |
3094 Register value = ToRegister(instr->value()); | 3094 Register value = ToRegister(instr->value()); |
3095 Register scratch = scratch0(); | 3095 Register scratch = scratch0(); |
3096 MemOperand target = ContextOperand(context, instr->slot_index()); | 3096 MemOperand target = ContextOperand(context, instr->slot_index()); |
3097 | 3097 |
3098 Label skip_assignment; | 3098 Label skip_assignment; |
3099 | 3099 |
3100 if (instr->hydrogen()->RequiresHoleCheck()) { | 3100 if (instr->hydrogen()->RequiresHoleCheck()) { |
3101 __ LoadP(scratch, target); | 3101 __ LoadP(scratch, target); |
3102 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | 3102 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
3103 __ cmp(scratch, ip); | 3103 __ cmp(scratch, ip); |
3104 if (instr->hydrogen()->DeoptimizesOnHole()) { | 3104 if (instr->hydrogen()->DeoptimizesOnHole()) { |
3105 DeoptimizeIf(eq, instr, "hole"); | 3105 DeoptimizeIf(eq, instr, Deoptimizer::kHole); |
3106 } else { | 3106 } else { |
3107 __ bne(&skip_assignment); | 3107 __ bne(&skip_assignment); |
3108 } | 3108 } |
3109 } | 3109 } |
3110 | 3110 |
3111 __ StoreP(value, target, r0); | 3111 __ StoreP(value, target, r0); |
3112 if (instr->hydrogen()->NeedsWriteBarrier()) { | 3112 if (instr->hydrogen()->NeedsWriteBarrier()) { |
3113 SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject() | 3113 SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject() |
3114 ? OMIT_SMI_CHECK | 3114 ? OMIT_SMI_CHECK |
3115 : INLINE_SMI_CHECK; | 3115 : INLINE_SMI_CHECK; |
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3187 Register function = ToRegister(instr->function()); | 3187 Register function = ToRegister(instr->function()); |
3188 Register result = ToRegister(instr->result()); | 3188 Register result = ToRegister(instr->result()); |
3189 | 3189 |
3190 // Get the prototype or initial map from the function. | 3190 // Get the prototype or initial map from the function. |
3191 __ LoadP(result, | 3191 __ LoadP(result, |
3192 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); | 3192 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); |
3193 | 3193 |
3194 // Check that the function has a prototype or an initial map. | 3194 // Check that the function has a prototype or an initial map. |
3195 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | 3195 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
3196 __ cmp(result, ip); | 3196 __ cmp(result, ip); |
3197 DeoptimizeIf(eq, instr, "hole"); | 3197 DeoptimizeIf(eq, instr, Deoptimizer::kHole); |
3198 | 3198 |
3199 // If the function does not have an initial map, we're done. | 3199 // If the function does not have an initial map, we're done. |
3200 Label done; | 3200 Label done; |
3201 __ CompareObjectType(result, scratch, scratch, MAP_TYPE); | 3201 __ CompareObjectType(result, scratch, scratch, MAP_TYPE); |
3202 __ bne(&done); | 3202 __ bne(&done); |
3203 | 3203 |
3204 // Get the prototype from the initial map. | 3204 // Get the prototype from the initial map. |
3205 __ LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset)); | 3205 __ LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset)); |
3206 | 3206 |
3207 // All done. | 3207 // All done. |
(...skipping 139 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3347 case EXTERNAL_UINT32_ELEMENTS: | 3347 case EXTERNAL_UINT32_ELEMENTS: |
3348 case UINT32_ELEMENTS: | 3348 case UINT32_ELEMENTS: |
3349 if (key_is_constant) { | 3349 if (key_is_constant) { |
3350 __ LoadWord(result, mem_operand, r0); | 3350 __ LoadWord(result, mem_operand, r0); |
3351 } else { | 3351 } else { |
3352 __ lwzx(result, mem_operand); | 3352 __ lwzx(result, mem_operand); |
3353 } | 3353 } |
3354 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { | 3354 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { |
3355 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000))); | 3355 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000))); |
3356 __ cmplw(result, r0); | 3356 __ cmplw(result, r0); |
3357 DeoptimizeIf(ge, instr, "negative value"); | 3357 DeoptimizeIf(ge, instr, Deoptimizer::kNegativeValue); |
3358 } | 3358 } |
3359 break; | 3359 break; |
3360 case FLOAT32_ELEMENTS: | 3360 case FLOAT32_ELEMENTS: |
3361 case FLOAT64_ELEMENTS: | 3361 case FLOAT64_ELEMENTS: |
3362 case EXTERNAL_FLOAT32_ELEMENTS: | 3362 case EXTERNAL_FLOAT32_ELEMENTS: |
3363 case EXTERNAL_FLOAT64_ELEMENTS: | 3363 case EXTERNAL_FLOAT64_ELEMENTS: |
3364 case FAST_HOLEY_DOUBLE_ELEMENTS: | 3364 case FAST_HOLEY_DOUBLE_ELEMENTS: |
3365 case FAST_HOLEY_ELEMENTS: | 3365 case FAST_HOLEY_ELEMENTS: |
3366 case FAST_HOLEY_SMI_ELEMENTS: | 3366 case FAST_HOLEY_SMI_ELEMENTS: |
3367 case FAST_DOUBLE_ELEMENTS: | 3367 case FAST_DOUBLE_ELEMENTS: |
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3410 | 3410 |
3411 if (instr->hydrogen()->RequiresHoleCheck()) { | 3411 if (instr->hydrogen()->RequiresHoleCheck()) { |
3412 if (is_int16(base_offset + Register::kExponentOffset)) { | 3412 if (is_int16(base_offset + Register::kExponentOffset)) { |
3413 __ lwz(scratch, | 3413 __ lwz(scratch, |
3414 MemOperand(elements, base_offset + Register::kExponentOffset)); | 3414 MemOperand(elements, base_offset + Register::kExponentOffset)); |
3415 } else { | 3415 } else { |
3416 __ addi(scratch, elements, Operand(base_offset)); | 3416 __ addi(scratch, elements, Operand(base_offset)); |
3417 __ lwz(scratch, MemOperand(scratch, Register::kExponentOffset)); | 3417 __ lwz(scratch, MemOperand(scratch, Register::kExponentOffset)); |
3418 } | 3418 } |
3419 __ Cmpi(scratch, Operand(kHoleNanUpper32), r0); | 3419 __ Cmpi(scratch, Operand(kHoleNanUpper32), r0); |
3420 DeoptimizeIf(eq, instr, "hole"); | 3420 DeoptimizeIf(eq, instr, Deoptimizer::kHole); |
3421 } | 3421 } |
3422 } | 3422 } |
3423 | 3423 |
3424 | 3424 |
3425 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { | 3425 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { |
3426 HLoadKeyed* hinstr = instr->hydrogen(); | 3426 HLoadKeyed* hinstr = instr->hydrogen(); |
3427 Register elements = ToRegister(instr->elements()); | 3427 Register elements = ToRegister(instr->elements()); |
3428 Register result = ToRegister(instr->result()); | 3428 Register result = ToRegister(instr->result()); |
3429 Register scratch = scratch0(); | 3429 Register scratch = scratch0(); |
3430 Register store_base = scratch; | 3430 Register store_base = scratch; |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3465 } | 3465 } |
3466 #endif | 3466 #endif |
3467 | 3467 |
3468 __ LoadRepresentation(result, MemOperand(store_base, offset), representation, | 3468 __ LoadRepresentation(result, MemOperand(store_base, offset), representation, |
3469 r0); | 3469 r0); |
3470 | 3470 |
3471 // Check for the hole value. | 3471 // Check for the hole value. |
3472 if (requires_hole_check) { | 3472 if (requires_hole_check) { |
3473 if (IsFastSmiElementsKind(hinstr->elements_kind())) { | 3473 if (IsFastSmiElementsKind(hinstr->elements_kind())) { |
3474 __ TestIfSmi(result, r0); | 3474 __ TestIfSmi(result, r0); |
3475 DeoptimizeIf(ne, instr, "not a Smi", cr0); | 3475 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0); |
3476 } else { | 3476 } else { |
3477 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); | 3477 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); |
3478 __ cmp(result, scratch); | 3478 __ cmp(result, scratch); |
3479 DeoptimizeIf(eq, instr, "hole"); | 3479 DeoptimizeIf(eq, instr, Deoptimizer::kHole); |
3480 } | 3480 } |
3481 } | 3481 } |
3482 } | 3482 } |
3483 | 3483 |
3484 | 3484 |
3485 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { | 3485 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { |
3486 if (instr->is_typed_elements()) { | 3486 if (instr->is_typed_elements()) { |
3487 DoLoadKeyedExternalArray(instr); | 3487 DoLoadKeyedExternalArray(instr); |
3488 } else if (instr->hydrogen()->representation().IsDouble()) { | 3488 } else if (instr->hydrogen()->representation().IsDouble()) { |
3489 DoLoadKeyedFixedDoubleArray(instr); | 3489 DoLoadKeyedFixedDoubleArray(instr); |
(...skipping 138 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3628 // Normal function. Replace undefined or null with global receiver. | 3628 // Normal function. Replace undefined or null with global receiver. |
3629 __ LoadRoot(scratch, Heap::kNullValueRootIndex); | 3629 __ LoadRoot(scratch, Heap::kNullValueRootIndex); |
3630 __ cmp(receiver, scratch); | 3630 __ cmp(receiver, scratch); |
3631 __ beq(&global_object); | 3631 __ beq(&global_object); |
3632 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); | 3632 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); |
3633 __ cmp(receiver, scratch); | 3633 __ cmp(receiver, scratch); |
3634 __ beq(&global_object); | 3634 __ beq(&global_object); |
3635 | 3635 |
3636 // Deoptimize if the receiver is not a JS object. | 3636 // Deoptimize if the receiver is not a JS object. |
3637 __ TestIfSmi(receiver, r0); | 3637 __ TestIfSmi(receiver, r0); |
3638 DeoptimizeIf(eq, instr, "Smi"); | 3638 DeoptimizeIf(eq, instr, Deoptimizer::kSmi); |
3639 __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE); | 3639 __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE); |
3640 DeoptimizeIf(lt, instr, "not a JavaScript object"); | 3640 DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject); |
3641 | 3641 |
3642 __ b(&result_in_receiver); | 3642 __ b(&result_in_receiver); |
3643 __ bind(&global_object); | 3643 __ bind(&global_object); |
3644 __ LoadP(result, FieldMemOperand(function, JSFunction::kContextOffset)); | 3644 __ LoadP(result, FieldMemOperand(function, JSFunction::kContextOffset)); |
3645 __ LoadP(result, ContextOperand(result, Context::GLOBAL_OBJECT_INDEX)); | 3645 __ LoadP(result, ContextOperand(result, Context::GLOBAL_OBJECT_INDEX)); |
3646 __ LoadP(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset)); | 3646 __ LoadP(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset)); |
3647 if (result.is(receiver)) { | 3647 if (result.is(receiver)) { |
3648 __ bind(&result_in_receiver); | 3648 __ bind(&result_in_receiver); |
3649 } else { | 3649 } else { |
3650 Label result_ok; | 3650 Label result_ok; |
(...skipping 12 matching lines...) Expand all Loading... |
3663 Register elements = ToRegister(instr->elements()); | 3663 Register elements = ToRegister(instr->elements()); |
3664 Register scratch = scratch0(); | 3664 Register scratch = scratch0(); |
3665 DCHECK(receiver.is(r3)); // Used for parameter count. | 3665 DCHECK(receiver.is(r3)); // Used for parameter count. |
3666 DCHECK(function.is(r4)); // Required by InvokeFunction. | 3666 DCHECK(function.is(r4)); // Required by InvokeFunction. |
3667 DCHECK(ToRegister(instr->result()).is(r3)); | 3667 DCHECK(ToRegister(instr->result()).is(r3)); |
3668 | 3668 |
3669 // Copy the arguments to this function possibly from the | 3669 // Copy the arguments to this function possibly from the |
3670 // adaptor frame below it. | 3670 // adaptor frame below it. |
3671 const uint32_t kArgumentsLimit = 1 * KB; | 3671 const uint32_t kArgumentsLimit = 1 * KB; |
3672 __ cmpli(length, Operand(kArgumentsLimit)); | 3672 __ cmpli(length, Operand(kArgumentsLimit)); |
3673 DeoptimizeIf(gt, instr, "too many arguments"); | 3673 DeoptimizeIf(gt, instr, Deoptimizer::kTooManyArguments); |
3674 | 3674 |
3675 // Push the receiver and use the register to keep the original | 3675 // Push the receiver and use the register to keep the original |
3676 // number of arguments. | 3676 // number of arguments. |
3677 __ push(receiver); | 3677 __ push(receiver); |
3678 __ mr(receiver, length); | 3678 __ mr(receiver, length); |
3679 // The arguments are at a one pointer size offset from elements. | 3679 // The arguments are at a one pointer size offset from elements. |
3680 __ addi(elements, elements, Operand(1 * kPointerSize)); | 3680 __ addi(elements, elements, Operand(1 * kPointerSize)); |
3681 | 3681 |
3682 // Loop through the arguments pushing them onto the execution | 3682 // Loop through the arguments pushing them onto the execution |
3683 // stack. | 3683 // stack. |
(...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3796 DCHECK(instr->context() != NULL); | 3796 DCHECK(instr->context() != NULL); |
3797 DCHECK(ToRegister(instr->context()).is(cp)); | 3797 DCHECK(ToRegister(instr->context()).is(cp)); |
3798 Register input = ToRegister(instr->value()); | 3798 Register input = ToRegister(instr->value()); |
3799 Register result = ToRegister(instr->result()); | 3799 Register result = ToRegister(instr->result()); |
3800 Register scratch = scratch0(); | 3800 Register scratch = scratch0(); |
3801 | 3801 |
3802 // Deoptimize if not a heap number. | 3802 // Deoptimize if not a heap number. |
3803 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); | 3803 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); |
3804 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); | 3804 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); |
3805 __ cmp(scratch, ip); | 3805 __ cmp(scratch, ip); |
3806 DeoptimizeIf(ne, instr, "not a heap number"); | 3806 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber); |
3807 | 3807 |
3808 Label done; | 3808 Label done; |
3809 Register exponent = scratch0(); | 3809 Register exponent = scratch0(); |
3810 scratch = no_reg; | 3810 scratch = no_reg; |
3811 __ lwz(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); | 3811 __ lwz(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); |
3812 // Check the sign of the argument. If the argument is positive, just | 3812 // Check the sign of the argument. If the argument is positive, just |
3813 // return it. | 3813 // return it. |
3814 __ cmpwi(exponent, Operand::Zero()); | 3814 __ cmpwi(exponent, Operand::Zero()); |
3815 // Move the input to the result if necessary. | 3815 // Move the input to the result if necessary. |
3816 __ Move(result, input); | 3816 __ Move(result, input); |
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3866 Register input = ToRegister(instr->value()); | 3866 Register input = ToRegister(instr->value()); |
3867 Register result = ToRegister(instr->result()); | 3867 Register result = ToRegister(instr->result()); |
3868 Label done; | 3868 Label done; |
3869 __ cmpi(input, Operand::Zero()); | 3869 __ cmpi(input, Operand::Zero()); |
3870 __ Move(result, input); | 3870 __ Move(result, input); |
3871 __ bge(&done); | 3871 __ bge(&done); |
3872 __ li(r0, Operand::Zero()); // clear xer | 3872 __ li(r0, Operand::Zero()); // clear xer |
3873 __ mtxer(r0); | 3873 __ mtxer(r0); |
3874 __ neg(result, result, SetOE, SetRC); | 3874 __ neg(result, result, SetOE, SetRC); |
3875 // Deoptimize on overflow. | 3875 // Deoptimize on overflow. |
3876 DeoptimizeIf(overflow, instr, "overflow", cr0); | 3876 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0); |
3877 __ bind(&done); | 3877 __ bind(&done); |
3878 } | 3878 } |
3879 | 3879 |
3880 | 3880 |
3881 #if V8_TARGET_ARCH_PPC64 | 3881 #if V8_TARGET_ARCH_PPC64 |
3882 void LCodeGen::EmitInteger32MathAbs(LMathAbs* instr) { | 3882 void LCodeGen::EmitInteger32MathAbs(LMathAbs* instr) { |
3883 Register input = ToRegister(instr->value()); | 3883 Register input = ToRegister(instr->value()); |
3884 Register result = ToRegister(instr->result()); | 3884 Register result = ToRegister(instr->result()); |
3885 Label done; | 3885 Label done; |
3886 __ cmpwi(input, Operand::Zero()); | 3886 __ cmpwi(input, Operand::Zero()); |
3887 __ Move(result, input); | 3887 __ Move(result, input); |
3888 __ bge(&done); | 3888 __ bge(&done); |
3889 | 3889 |
3890 // Deoptimize on overflow. | 3890 // Deoptimize on overflow. |
3891 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000))); | 3891 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000))); |
3892 __ cmpw(input, r0); | 3892 __ cmpw(input, r0); |
3893 DeoptimizeIf(eq, instr, "overflow"); | 3893 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow); |
3894 | 3894 |
3895 __ neg(result, result); | 3895 __ neg(result, result); |
3896 __ bind(&done); | 3896 __ bind(&done); |
3897 } | 3897 } |
3898 #endif | 3898 #endif |
3899 | 3899 |
3900 | 3900 |
3901 void LCodeGen::DoMathAbs(LMathAbs* instr) { | 3901 void LCodeGen::DoMathAbs(LMathAbs* instr) { |
3902 // Class for deferred case. | 3902 // Class for deferred case. |
3903 class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode { | 3903 class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode { |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3942 | 3942 |
3943 void LCodeGen::DoMathFloor(LMathFloor* instr) { | 3943 void LCodeGen::DoMathFloor(LMathFloor* instr) { |
3944 DoubleRegister input = ToDoubleRegister(instr->value()); | 3944 DoubleRegister input = ToDoubleRegister(instr->value()); |
3945 Register result = ToRegister(instr->result()); | 3945 Register result = ToRegister(instr->result()); |
3946 Register input_high = scratch0(); | 3946 Register input_high = scratch0(); |
3947 Register scratch = ip; | 3947 Register scratch = ip; |
3948 Label done, exact; | 3948 Label done, exact; |
3949 | 3949 |
3950 __ TryInt32Floor(result, input, input_high, scratch, double_scratch0(), &done, | 3950 __ TryInt32Floor(result, input, input_high, scratch, double_scratch0(), &done, |
3951 &exact); | 3951 &exact); |
3952 DeoptimizeIf(al, instr, "lost precision or NaN"); | 3952 DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN); |
3953 | 3953 |
3954 __ bind(&exact); | 3954 __ bind(&exact); |
3955 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3955 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
3956 // Test for -0. | 3956 // Test for -0. |
3957 __ cmpi(result, Operand::Zero()); | 3957 __ cmpi(result, Operand::Zero()); |
3958 __ bne(&done); | 3958 __ bne(&done); |
3959 __ cmpwi(input_high, Operand::Zero()); | 3959 __ cmpwi(input_high, Operand::Zero()); |
3960 DeoptimizeIf(lt, instr, "minus zero"); | 3960 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); |
3961 } | 3961 } |
3962 __ bind(&done); | 3962 __ bind(&done); |
3963 } | 3963 } |
3964 | 3964 |
3965 | 3965 |
3966 void LCodeGen::DoMathRound(LMathRound* instr) { | 3966 void LCodeGen::DoMathRound(LMathRound* instr) { |
3967 DoubleRegister input = ToDoubleRegister(instr->value()); | 3967 DoubleRegister input = ToDoubleRegister(instr->value()); |
3968 Register result = ToRegister(instr->result()); | 3968 Register result = ToRegister(instr->result()); |
3969 DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp()); | 3969 DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp()); |
3970 DoubleRegister input_plus_dot_five = double_scratch1; | 3970 DoubleRegister input_plus_dot_five = double_scratch1; |
3971 Register scratch1 = scratch0(); | 3971 Register scratch1 = scratch0(); |
3972 Register scratch2 = ip; | 3972 Register scratch2 = ip; |
3973 DoubleRegister dot_five = double_scratch0(); | 3973 DoubleRegister dot_five = double_scratch0(); |
3974 Label convert, done; | 3974 Label convert, done; |
3975 | 3975 |
3976 __ LoadDoubleLiteral(dot_five, 0.5, r0); | 3976 __ LoadDoubleLiteral(dot_five, 0.5, r0); |
3977 __ fabs(double_scratch1, input); | 3977 __ fabs(double_scratch1, input); |
3978 __ fcmpu(double_scratch1, dot_five); | 3978 __ fcmpu(double_scratch1, dot_five); |
3979 DeoptimizeIf(unordered, instr, "lost precision or NaN"); | 3979 DeoptimizeIf(unordered, instr, Deoptimizer::kLostPrecisionOrNaN); |
3980 // If input is in [-0.5, -0], the result is -0. | 3980 // If input is in [-0.5, -0], the result is -0. |
3981 // If input is in [+0, +0.5[, the result is +0. | 3981 // If input is in [+0, +0.5[, the result is +0. |
3982 // If the input is +0.5, the result is 1. | 3982 // If the input is +0.5, the result is 1. |
3983 __ bgt(&convert); // Out of [-0.5, +0.5]. | 3983 __ bgt(&convert); // Out of [-0.5, +0.5]. |
3984 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3984 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
3985 #if V8_TARGET_ARCH_PPC64 | 3985 #if V8_TARGET_ARCH_PPC64 |
3986 __ MovDoubleToInt64(scratch1, input); | 3986 __ MovDoubleToInt64(scratch1, input); |
3987 #else | 3987 #else |
3988 __ MovDoubleHighToInt(scratch1, input); | 3988 __ MovDoubleHighToInt(scratch1, input); |
3989 #endif | 3989 #endif |
3990 __ cmpi(scratch1, Operand::Zero()); | 3990 __ cmpi(scratch1, Operand::Zero()); |
3991 // [-0.5, -0]. | 3991 // [-0.5, -0]. |
3992 DeoptimizeIf(lt, instr, "minus zero"); | 3992 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); |
3993 } | 3993 } |
3994 Label return_zero; | 3994 Label return_zero; |
3995 __ fcmpu(input, dot_five); | 3995 __ fcmpu(input, dot_five); |
3996 __ bne(&return_zero); | 3996 __ bne(&return_zero); |
3997 __ li(result, Operand(1)); // +0.5. | 3997 __ li(result, Operand(1)); // +0.5. |
3998 __ b(&done); | 3998 __ b(&done); |
3999 // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on | 3999 // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on |
4000 // flag kBailoutOnMinusZero. | 4000 // flag kBailoutOnMinusZero. |
4001 __ bind(&return_zero); | 4001 __ bind(&return_zero); |
4002 __ li(result, Operand::Zero()); | 4002 __ li(result, Operand::Zero()); |
4003 __ b(&done); | 4003 __ b(&done); |
4004 | 4004 |
4005 __ bind(&convert); | 4005 __ bind(&convert); |
4006 __ fadd(input_plus_dot_five, input, dot_five); | 4006 __ fadd(input_plus_dot_five, input, dot_five); |
4007 // Reuse dot_five (double_scratch0) as we no longer need this value. | 4007 // Reuse dot_five (double_scratch0) as we no longer need this value. |
4008 __ TryInt32Floor(result, input_plus_dot_five, scratch1, scratch2, | 4008 __ TryInt32Floor(result, input_plus_dot_five, scratch1, scratch2, |
4009 double_scratch0(), &done, &done); | 4009 double_scratch0(), &done, &done); |
4010 DeoptimizeIf(al, instr, "lost precision or NaN"); | 4010 DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN); |
4011 __ bind(&done); | 4011 __ bind(&done); |
4012 } | 4012 } |
4013 | 4013 |
4014 | 4014 |
4015 void LCodeGen::DoMathFround(LMathFround* instr) { | 4015 void LCodeGen::DoMathFround(LMathFround* instr) { |
4016 DoubleRegister input_reg = ToDoubleRegister(instr->value()); | 4016 DoubleRegister input_reg = ToDoubleRegister(instr->value()); |
4017 DoubleRegister output_reg = ToDoubleRegister(instr->result()); | 4017 DoubleRegister output_reg = ToDoubleRegister(instr->result()); |
4018 __ frsp(output_reg, input_reg); | 4018 __ frsp(output_reg, input_reg); |
4019 } | 4019 } |
4020 | 4020 |
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4066 | 4066 |
4067 if (exponent_type.IsSmi()) { | 4067 if (exponent_type.IsSmi()) { |
4068 MathPowStub stub(isolate(), MathPowStub::TAGGED); | 4068 MathPowStub stub(isolate(), MathPowStub::TAGGED); |
4069 __ CallStub(&stub); | 4069 __ CallStub(&stub); |
4070 } else if (exponent_type.IsTagged()) { | 4070 } else if (exponent_type.IsTagged()) { |
4071 Label no_deopt; | 4071 Label no_deopt; |
4072 __ JumpIfSmi(r5, &no_deopt); | 4072 __ JumpIfSmi(r5, &no_deopt); |
4073 __ LoadP(r10, FieldMemOperand(r5, HeapObject::kMapOffset)); | 4073 __ LoadP(r10, FieldMemOperand(r5, HeapObject::kMapOffset)); |
4074 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); | 4074 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); |
4075 __ cmp(r10, ip); | 4075 __ cmp(r10, ip); |
4076 DeoptimizeIf(ne, instr, "not a heap number"); | 4076 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber); |
4077 __ bind(&no_deopt); | 4077 __ bind(&no_deopt); |
4078 MathPowStub stub(isolate(), MathPowStub::TAGGED); | 4078 MathPowStub stub(isolate(), MathPowStub::TAGGED); |
4079 __ CallStub(&stub); | 4079 __ CallStub(&stub); |
4080 } else if (exponent_type.IsInteger32()) { | 4080 } else if (exponent_type.IsInteger32()) { |
4081 MathPowStub stub(isolate(), MathPowStub::INTEGER); | 4081 MathPowStub stub(isolate(), MathPowStub::INTEGER); |
4082 __ CallStub(&stub); | 4082 __ CallStub(&stub); |
4083 } else { | 4083 } else { |
4084 DCHECK(exponent_type.IsDouble()); | 4084 DCHECK(exponent_type.IsDouble()); |
4085 MathPowStub stub(isolate(), MathPowStub::DOUBLE); | 4085 MathPowStub stub(isolate(), MathPowStub::DOUBLE); |
4086 __ CallStub(&stub); | 4086 __ CallStub(&stub); |
(...skipping 358 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4445 } else { | 4445 } else { |
4446 __ cmplw(length, index); | 4446 __ cmplw(length, index); |
4447 } | 4447 } |
4448 } | 4448 } |
4449 if (FLAG_debug_code && instr->hydrogen()->skip_check()) { | 4449 if (FLAG_debug_code && instr->hydrogen()->skip_check()) { |
4450 Label done; | 4450 Label done; |
4451 __ b(NegateCondition(cc), &done); | 4451 __ b(NegateCondition(cc), &done); |
4452 __ stop("eliminated bounds check failed"); | 4452 __ stop("eliminated bounds check failed"); |
4453 __ bind(&done); | 4453 __ bind(&done); |
4454 } else { | 4454 } else { |
4455 DeoptimizeIf(cc, instr, "out of bounds"); | 4455 DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds); |
4456 } | 4456 } |
4457 } | 4457 } |
4458 | 4458 |
4459 | 4459 |
4460 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { | 4460 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { |
4461 Register external_pointer = ToRegister(instr->elements()); | 4461 Register external_pointer = ToRegister(instr->elements()); |
4462 Register key = no_reg; | 4462 Register key = no_reg; |
4463 ElementsKind elements_kind = instr->elements_kind(); | 4463 ElementsKind elements_kind = instr->elements_kind(); |
4464 bool key_is_constant = instr->key()->IsConstantOperand(); | 4464 bool key_is_constant = instr->key()->IsConstantOperand(); |
4465 int constant_key = 0; | 4465 int constant_key = 0; |
(...skipping 252 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4718 } | 4718 } |
4719 __ bind(¬_applicable); | 4719 __ bind(¬_applicable); |
4720 } | 4720 } |
4721 | 4721 |
4722 | 4722 |
4723 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { | 4723 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { |
4724 Register object = ToRegister(instr->object()); | 4724 Register object = ToRegister(instr->object()); |
4725 Register temp = ToRegister(instr->temp()); | 4725 Register temp = ToRegister(instr->temp()); |
4726 Label no_memento_found; | 4726 Label no_memento_found; |
4727 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found); | 4727 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found); |
4728 DeoptimizeIf(eq, instr, "memento found"); | 4728 DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound); |
4729 __ bind(&no_memento_found); | 4729 __ bind(&no_memento_found); |
4730 } | 4730 } |
4731 | 4731 |
4732 | 4732 |
4733 void LCodeGen::DoStringAdd(LStringAdd* instr) { | 4733 void LCodeGen::DoStringAdd(LStringAdd* instr) { |
4734 DCHECK(ToRegister(instr->context()).is(cp)); | 4734 DCHECK(ToRegister(instr->context()).is(cp)); |
4735 DCHECK(ToRegister(instr->left()).is(r4)); | 4735 DCHECK(ToRegister(instr->left()).is(r4)); |
4736 DCHECK(ToRegister(instr->right()).is(r3)); | 4736 DCHECK(ToRegister(instr->right()).is(r3)); |
4737 StringAddStub stub(isolate(), instr->hydrogen()->flags(), | 4737 StringAddStub stub(isolate(), instr->hydrogen()->flags(), |
4738 instr->hydrogen()->pretenure_flag()); | 4738 instr->hydrogen()->pretenure_flag()); |
(...skipping 294 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5033 } | 5033 } |
5034 | 5034 |
5035 | 5035 |
5036 void LCodeGen::DoSmiTag(LSmiTag* instr) { | 5036 void LCodeGen::DoSmiTag(LSmiTag* instr) { |
5037 HChange* hchange = instr->hydrogen(); | 5037 HChange* hchange = instr->hydrogen(); |
5038 Register input = ToRegister(instr->value()); | 5038 Register input = ToRegister(instr->value()); |
5039 Register output = ToRegister(instr->result()); | 5039 Register output = ToRegister(instr->result()); |
5040 if (hchange->CheckFlag(HValue::kCanOverflow) && | 5040 if (hchange->CheckFlag(HValue::kCanOverflow) && |
5041 hchange->value()->CheckFlag(HValue::kUint32)) { | 5041 hchange->value()->CheckFlag(HValue::kUint32)) { |
5042 __ TestUnsignedSmiCandidate(input, r0); | 5042 __ TestUnsignedSmiCandidate(input, r0); |
5043 DeoptimizeIf(ne, instr, "overflow", cr0); | 5043 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, cr0); |
5044 } | 5044 } |
5045 #if !V8_TARGET_ARCH_PPC64 | 5045 #if !V8_TARGET_ARCH_PPC64 |
5046 if (hchange->CheckFlag(HValue::kCanOverflow) && | 5046 if (hchange->CheckFlag(HValue::kCanOverflow) && |
5047 !hchange->value()->CheckFlag(HValue::kUint32)) { | 5047 !hchange->value()->CheckFlag(HValue::kUint32)) { |
5048 __ SmiTagCheckOverflow(output, input, r0); | 5048 __ SmiTagCheckOverflow(output, input, r0); |
5049 DeoptimizeIf(lt, instr, "overflow", cr0); | 5049 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0); |
5050 } else { | 5050 } else { |
5051 #endif | 5051 #endif |
5052 __ SmiTag(output, input); | 5052 __ SmiTag(output, input); |
5053 #if !V8_TARGET_ARCH_PPC64 | 5053 #if !V8_TARGET_ARCH_PPC64 |
5054 } | 5054 } |
5055 #endif | 5055 #endif |
5056 } | 5056 } |
5057 | 5057 |
5058 | 5058 |
5059 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { | 5059 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { |
5060 Register scratch = scratch0(); | 5060 Register scratch = scratch0(); |
5061 Register input = ToRegister(instr->value()); | 5061 Register input = ToRegister(instr->value()); |
5062 Register result = ToRegister(instr->result()); | 5062 Register result = ToRegister(instr->result()); |
5063 if (instr->needs_check()) { | 5063 if (instr->needs_check()) { |
5064 STATIC_ASSERT(kHeapObjectTag == 1); | 5064 STATIC_ASSERT(kHeapObjectTag == 1); |
5065 // If the input is a HeapObject, value of scratch won't be zero. | 5065 // If the input is a HeapObject, value of scratch won't be zero. |
5066 __ andi(scratch, input, Operand(kHeapObjectTag)); | 5066 __ andi(scratch, input, Operand(kHeapObjectTag)); |
5067 __ SmiUntag(result, input); | 5067 __ SmiUntag(result, input); |
5068 DeoptimizeIf(ne, instr, "not a Smi", cr0); | 5068 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0); |
5069 } else { | 5069 } else { |
5070 __ SmiUntag(result, input); | 5070 __ SmiUntag(result, input); |
5071 } | 5071 } |
5072 } | 5072 } |
5073 | 5073 |
5074 | 5074 |
5075 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg, | 5075 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg, |
5076 DoubleRegister result_reg, | 5076 DoubleRegister result_reg, |
5077 NumberUntagDMode mode) { | 5077 NumberUntagDMode mode) { |
5078 bool can_convert_undefined_to_nan = | 5078 bool can_convert_undefined_to_nan = |
5079 instr->hydrogen()->can_convert_undefined_to_nan(); | 5079 instr->hydrogen()->can_convert_undefined_to_nan(); |
5080 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero(); | 5080 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero(); |
5081 | 5081 |
5082 Register scratch = scratch0(); | 5082 Register scratch = scratch0(); |
5083 DCHECK(!result_reg.is(double_scratch0())); | 5083 DCHECK(!result_reg.is(double_scratch0())); |
5084 | 5084 |
5085 Label convert, load_smi, done; | 5085 Label convert, load_smi, done; |
5086 | 5086 |
5087 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { | 5087 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { |
5088 // Smi check. | 5088 // Smi check. |
5089 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); | 5089 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); |
5090 | 5090 |
5091 // Heap number map check. | 5091 // Heap number map check. |
5092 __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); | 5092 __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); |
5093 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); | 5093 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); |
5094 __ cmp(scratch, ip); | 5094 __ cmp(scratch, ip); |
5095 if (can_convert_undefined_to_nan) { | 5095 if (can_convert_undefined_to_nan) { |
5096 __ bne(&convert); | 5096 __ bne(&convert); |
5097 } else { | 5097 } else { |
5098 DeoptimizeIf(ne, instr, "not a heap number"); | 5098 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber); |
5099 } | 5099 } |
5100 // load heap number | 5100 // load heap number |
5101 __ lfd(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); | 5101 __ lfd(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); |
5102 if (deoptimize_on_minus_zero) { | 5102 if (deoptimize_on_minus_zero) { |
5103 #if V8_TARGET_ARCH_PPC64 | 5103 #if V8_TARGET_ARCH_PPC64 |
5104 __ MovDoubleToInt64(scratch, result_reg); | 5104 __ MovDoubleToInt64(scratch, result_reg); |
5105 // rotate left by one for simple compare. | 5105 // rotate left by one for simple compare. |
5106 __ rldicl(scratch, scratch, 1, 0); | 5106 __ rldicl(scratch, scratch, 1, 0); |
5107 __ cmpi(scratch, Operand(1)); | 5107 __ cmpi(scratch, Operand(1)); |
5108 #else | 5108 #else |
5109 __ MovDoubleToInt64(scratch, ip, result_reg); | 5109 __ MovDoubleToInt64(scratch, ip, result_reg); |
5110 __ cmpi(ip, Operand::Zero()); | 5110 __ cmpi(ip, Operand::Zero()); |
5111 __ bne(&done); | 5111 __ bne(&done); |
5112 __ Cmpi(scratch, Operand(HeapNumber::kSignMask), r0); | 5112 __ Cmpi(scratch, Operand(HeapNumber::kSignMask), r0); |
5113 #endif | 5113 #endif |
5114 DeoptimizeIf(eq, instr, "minus zero"); | 5114 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); |
5115 } | 5115 } |
5116 __ b(&done); | 5116 __ b(&done); |
5117 if (can_convert_undefined_to_nan) { | 5117 if (can_convert_undefined_to_nan) { |
5118 __ bind(&convert); | 5118 __ bind(&convert); |
5119 // Convert undefined (and hole) to NaN. | 5119 // Convert undefined (and hole) to NaN. |
5120 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); | 5120 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); |
5121 __ cmp(input_reg, ip); | 5121 __ cmp(input_reg, ip); |
5122 DeoptimizeIf(ne, instr, "not a heap number/undefined"); | 5122 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined); |
5123 __ LoadRoot(scratch, Heap::kNanValueRootIndex); | 5123 __ LoadRoot(scratch, Heap::kNanValueRootIndex); |
5124 __ lfd(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset)); | 5124 __ lfd(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset)); |
5125 __ b(&done); | 5125 __ b(&done); |
5126 } | 5126 } |
5127 } else { | 5127 } else { |
5128 __ SmiUntag(scratch, input_reg); | 5128 __ SmiUntag(scratch, input_reg); |
5129 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); | 5129 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); |
5130 } | 5130 } |
5131 // Smi to double register conversion | 5131 // Smi to double register conversion |
5132 __ bind(&load_smi); | 5132 __ bind(&load_smi); |
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5174 __ bind(&check_bools); | 5174 __ bind(&check_bools); |
5175 __ LoadRoot(ip, Heap::kTrueValueRootIndex); | 5175 __ LoadRoot(ip, Heap::kTrueValueRootIndex); |
5176 __ cmp(input_reg, ip); | 5176 __ cmp(input_reg, ip); |
5177 __ bne(&check_false); | 5177 __ bne(&check_false); |
5178 __ li(input_reg, Operand(1)); | 5178 __ li(input_reg, Operand(1)); |
5179 __ b(&done); | 5179 __ b(&done); |
5180 | 5180 |
5181 __ bind(&check_false); | 5181 __ bind(&check_false); |
5182 __ LoadRoot(ip, Heap::kFalseValueRootIndex); | 5182 __ LoadRoot(ip, Heap::kFalseValueRootIndex); |
5183 __ cmp(input_reg, ip); | 5183 __ cmp(input_reg, ip); |
5184 DeoptimizeIf(ne, instr, "not a heap number/undefined/true/false", cr7); | 5184 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedTrueFalse, |
| 5185 cr7); |
5185 __ li(input_reg, Operand::Zero()); | 5186 __ li(input_reg, Operand::Zero()); |
5186 } else { | 5187 } else { |
5187 DeoptimizeIf(ne, instr, "not a heap number", cr7); | 5188 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, cr7); |
5188 | 5189 |
5189 __ lfd(double_scratch2, | 5190 __ lfd(double_scratch2, |
5190 FieldMemOperand(input_reg, HeapNumber::kValueOffset)); | 5191 FieldMemOperand(input_reg, HeapNumber::kValueOffset)); |
5191 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 5192 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
5192 // preserve heap number pointer in scratch2 for minus zero check below | 5193 // preserve heap number pointer in scratch2 for minus zero check below |
5193 __ mr(scratch2, input_reg); | 5194 __ mr(scratch2, input_reg); |
5194 } | 5195 } |
5195 __ TryDoubleToInt32Exact(input_reg, double_scratch2, scratch1, | 5196 __ TryDoubleToInt32Exact(input_reg, double_scratch2, scratch1, |
5196 double_scratch); | 5197 double_scratch); |
5197 DeoptimizeIf(ne, instr, "lost precision or NaN", cr7); | 5198 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, cr7); |
5198 | 5199 |
5199 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 5200 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
5200 __ cmpi(input_reg, Operand::Zero()); | 5201 __ cmpi(input_reg, Operand::Zero()); |
5201 __ bne(&done); | 5202 __ bne(&done); |
5202 __ lwz(scratch1, | 5203 __ lwz(scratch1, |
5203 FieldMemOperand(scratch2, HeapNumber::kValueOffset + | 5204 FieldMemOperand(scratch2, HeapNumber::kValueOffset + |
5204 Register::kExponentOffset)); | 5205 Register::kExponentOffset)); |
5205 __ cmpwi(scratch1, Operand::Zero()); | 5206 __ cmpwi(scratch1, Operand::Zero()); |
5206 DeoptimizeIf(lt, instr, "minus zero", cr7); | 5207 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, cr7); |
5207 } | 5208 } |
5208 } | 5209 } |
5209 __ bind(&done); | 5210 __ bind(&done); |
5210 } | 5211 } |
5211 | 5212 |
5212 | 5213 |
5213 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { | 5214 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { |
5214 class DeferredTaggedToI FINAL : public LDeferredCode { | 5215 class DeferredTaggedToI FINAL : public LDeferredCode { |
5215 public: | 5216 public: |
5216 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) | 5217 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) |
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5265 Register scratch1 = scratch0(); | 5266 Register scratch1 = scratch0(); |
5266 DoubleRegister double_input = ToDoubleRegister(instr->value()); | 5267 DoubleRegister double_input = ToDoubleRegister(instr->value()); |
5267 DoubleRegister double_scratch = double_scratch0(); | 5268 DoubleRegister double_scratch = double_scratch0(); |
5268 | 5269 |
5269 if (instr->truncating()) { | 5270 if (instr->truncating()) { |
5270 __ TruncateDoubleToI(result_reg, double_input); | 5271 __ TruncateDoubleToI(result_reg, double_input); |
5271 } else { | 5272 } else { |
5272 __ TryDoubleToInt32Exact(result_reg, double_input, scratch1, | 5273 __ TryDoubleToInt32Exact(result_reg, double_input, scratch1, |
5273 double_scratch); | 5274 double_scratch); |
5274 // Deoptimize if the input wasn't a int32 (inside a double). | 5275 // Deoptimize if the input wasn't a int32 (inside a double). |
5275 DeoptimizeIf(ne, instr, "lost precision or NaN"); | 5276 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN); |
5276 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 5277 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
5277 Label done; | 5278 Label done; |
5278 __ cmpi(result_reg, Operand::Zero()); | 5279 __ cmpi(result_reg, Operand::Zero()); |
5279 __ bne(&done); | 5280 __ bne(&done); |
5280 #if V8_TARGET_ARCH_PPC64 | 5281 #if V8_TARGET_ARCH_PPC64 |
5281 __ MovDoubleToInt64(scratch1, double_input); | 5282 __ MovDoubleToInt64(scratch1, double_input); |
5282 #else | 5283 #else |
5283 __ MovDoubleHighToInt(scratch1, double_input); | 5284 __ MovDoubleHighToInt(scratch1, double_input); |
5284 #endif | 5285 #endif |
5285 __ cmpi(scratch1, Operand::Zero()); | 5286 __ cmpi(scratch1, Operand::Zero()); |
5286 DeoptimizeIf(lt, instr, "minus zero"); | 5287 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); |
5287 __ bind(&done); | 5288 __ bind(&done); |
5288 } | 5289 } |
5289 } | 5290 } |
5290 } | 5291 } |
5291 | 5292 |
5292 | 5293 |
5293 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { | 5294 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { |
5294 Register result_reg = ToRegister(instr->result()); | 5295 Register result_reg = ToRegister(instr->result()); |
5295 Register scratch1 = scratch0(); | 5296 Register scratch1 = scratch0(); |
5296 DoubleRegister double_input = ToDoubleRegister(instr->value()); | 5297 DoubleRegister double_input = ToDoubleRegister(instr->value()); |
5297 DoubleRegister double_scratch = double_scratch0(); | 5298 DoubleRegister double_scratch = double_scratch0(); |
5298 | 5299 |
5299 if (instr->truncating()) { | 5300 if (instr->truncating()) { |
5300 __ TruncateDoubleToI(result_reg, double_input); | 5301 __ TruncateDoubleToI(result_reg, double_input); |
5301 } else { | 5302 } else { |
5302 __ TryDoubleToInt32Exact(result_reg, double_input, scratch1, | 5303 __ TryDoubleToInt32Exact(result_reg, double_input, scratch1, |
5303 double_scratch); | 5304 double_scratch); |
5304 // Deoptimize if the input wasn't a int32 (inside a double). | 5305 // Deoptimize if the input wasn't a int32 (inside a double). |
5305 DeoptimizeIf(ne, instr, "lost precision or NaN"); | 5306 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN); |
5306 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 5307 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
5307 Label done; | 5308 Label done; |
5308 __ cmpi(result_reg, Operand::Zero()); | 5309 __ cmpi(result_reg, Operand::Zero()); |
5309 __ bne(&done); | 5310 __ bne(&done); |
5310 #if V8_TARGET_ARCH_PPC64 | 5311 #if V8_TARGET_ARCH_PPC64 |
5311 __ MovDoubleToInt64(scratch1, double_input); | 5312 __ MovDoubleToInt64(scratch1, double_input); |
5312 #else | 5313 #else |
5313 __ MovDoubleHighToInt(scratch1, double_input); | 5314 __ MovDoubleHighToInt(scratch1, double_input); |
5314 #endif | 5315 #endif |
5315 __ cmpi(scratch1, Operand::Zero()); | 5316 __ cmpi(scratch1, Operand::Zero()); |
5316 DeoptimizeIf(lt, instr, "minus zero"); | 5317 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); |
5317 __ bind(&done); | 5318 __ bind(&done); |
5318 } | 5319 } |
5319 } | 5320 } |
5320 #if V8_TARGET_ARCH_PPC64 | 5321 #if V8_TARGET_ARCH_PPC64 |
5321 __ SmiTag(result_reg); | 5322 __ SmiTag(result_reg); |
5322 #else | 5323 #else |
5323 __ SmiTagCheckOverflow(result_reg, r0); | 5324 __ SmiTagCheckOverflow(result_reg, r0); |
5324 DeoptimizeIf(lt, instr, "overflow", cr0); | 5325 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0); |
5325 #endif | 5326 #endif |
5326 } | 5327 } |
5327 | 5328 |
5328 | 5329 |
5329 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { | 5330 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { |
5330 LOperand* input = instr->value(); | 5331 LOperand* input = instr->value(); |
5331 __ TestIfSmi(ToRegister(input), r0); | 5332 __ TestIfSmi(ToRegister(input), r0); |
5332 DeoptimizeIf(ne, instr, "not a Smi", cr0); | 5333 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0); |
5333 } | 5334 } |
5334 | 5335 |
5335 | 5336 |
5336 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { | 5337 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { |
5337 if (!instr->hydrogen()->value()->type().IsHeapObject()) { | 5338 if (!instr->hydrogen()->value()->type().IsHeapObject()) { |
5338 LOperand* input = instr->value(); | 5339 LOperand* input = instr->value(); |
5339 __ TestIfSmi(ToRegister(input), r0); | 5340 __ TestIfSmi(ToRegister(input), r0); |
5340 DeoptimizeIf(eq, instr, "Smi", cr0); | 5341 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0); |
5341 } | 5342 } |
5342 } | 5343 } |
5343 | 5344 |
5344 | 5345 |
5345 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { | 5346 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { |
5346 Register input = ToRegister(instr->value()); | 5347 Register input = ToRegister(instr->value()); |
5347 Register scratch = scratch0(); | 5348 Register scratch = scratch0(); |
5348 | 5349 |
5349 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); | 5350 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); |
5350 __ lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); | 5351 __ lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); |
5351 | 5352 |
5352 if (instr->hydrogen()->is_interval_check()) { | 5353 if (instr->hydrogen()->is_interval_check()) { |
5353 InstanceType first; | 5354 InstanceType first; |
5354 InstanceType last; | 5355 InstanceType last; |
5355 instr->hydrogen()->GetCheckInterval(&first, &last); | 5356 instr->hydrogen()->GetCheckInterval(&first, &last); |
5356 | 5357 |
5357 __ cmpli(scratch, Operand(first)); | 5358 __ cmpli(scratch, Operand(first)); |
5358 | 5359 |
5359 // If there is only one type in the interval check for equality. | 5360 // If there is only one type in the interval check for equality. |
5360 if (first == last) { | 5361 if (first == last) { |
5361 DeoptimizeIf(ne, instr, "wrong instance type"); | 5362 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType); |
5362 } else { | 5363 } else { |
5363 DeoptimizeIf(lt, instr, "wrong instance type"); | 5364 DeoptimizeIf(lt, instr, Deoptimizer::kWrongInstanceType); |
5364 // Omit check for the last type. | 5365 // Omit check for the last type. |
5365 if (last != LAST_TYPE) { | 5366 if (last != LAST_TYPE) { |
5366 __ cmpli(scratch, Operand(last)); | 5367 __ cmpli(scratch, Operand(last)); |
5367 DeoptimizeIf(gt, instr, "wrong instance type"); | 5368 DeoptimizeIf(gt, instr, Deoptimizer::kWrongInstanceType); |
5368 } | 5369 } |
5369 } | 5370 } |
5370 } else { | 5371 } else { |
5371 uint8_t mask; | 5372 uint8_t mask; |
5372 uint8_t tag; | 5373 uint8_t tag; |
5373 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); | 5374 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); |
5374 | 5375 |
5375 if (base::bits::IsPowerOfTwo32(mask)) { | 5376 if (base::bits::IsPowerOfTwo32(mask)) { |
5376 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag)); | 5377 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag)); |
5377 __ andi(r0, scratch, Operand(mask)); | 5378 __ andi(r0, scratch, Operand(mask)); |
5378 DeoptimizeIf(tag == 0 ? ne : eq, instr, "wrong instance type", cr0); | 5379 DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType, |
| 5380 cr0); |
5379 } else { | 5381 } else { |
5380 __ andi(scratch, scratch, Operand(mask)); | 5382 __ andi(scratch, scratch, Operand(mask)); |
5381 __ cmpi(scratch, Operand(tag)); | 5383 __ cmpi(scratch, Operand(tag)); |
5382 DeoptimizeIf(ne, instr, "wrong instance type"); | 5384 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType); |
5383 } | 5385 } |
5384 } | 5386 } |
5385 } | 5387 } |
5386 | 5388 |
5387 | 5389 |
5388 void LCodeGen::DoCheckValue(LCheckValue* instr) { | 5390 void LCodeGen::DoCheckValue(LCheckValue* instr) { |
5389 Register reg = ToRegister(instr->value()); | 5391 Register reg = ToRegister(instr->value()); |
5390 Handle<HeapObject> object = instr->hydrogen()->object().handle(); | 5392 Handle<HeapObject> object = instr->hydrogen()->object().handle(); |
5391 AllowDeferredHandleDereference smi_check; | 5393 AllowDeferredHandleDereference smi_check; |
5392 if (isolate()->heap()->InNewSpace(*object)) { | 5394 if (isolate()->heap()->InNewSpace(*object)) { |
5393 Register reg = ToRegister(instr->value()); | 5395 Register reg = ToRegister(instr->value()); |
5394 Handle<Cell> cell = isolate()->factory()->NewCell(object); | 5396 Handle<Cell> cell = isolate()->factory()->NewCell(object); |
5395 __ mov(ip, Operand(Handle<Object>(cell))); | 5397 __ mov(ip, Operand(Handle<Object>(cell))); |
5396 __ LoadP(ip, FieldMemOperand(ip, Cell::kValueOffset)); | 5398 __ LoadP(ip, FieldMemOperand(ip, Cell::kValueOffset)); |
5397 __ cmp(reg, ip); | 5399 __ cmp(reg, ip); |
5398 } else { | 5400 } else { |
5399 __ Cmpi(reg, Operand(object), r0); | 5401 __ Cmpi(reg, Operand(object), r0); |
5400 } | 5402 } |
5401 DeoptimizeIf(ne, instr, "value mismatch"); | 5403 DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch); |
5402 } | 5404 } |
5403 | 5405 |
5404 | 5406 |
5405 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { | 5407 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { |
5406 { | 5408 { |
5407 PushSafepointRegistersScope scope(this); | 5409 PushSafepointRegistersScope scope(this); |
5408 __ push(object); | 5410 __ push(object); |
5409 __ li(cp, Operand::Zero()); | 5411 __ li(cp, Operand::Zero()); |
5410 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); | 5412 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); |
5411 RecordSafepointWithRegisters(instr->pointer_map(), 1, | 5413 RecordSafepointWithRegisters(instr->pointer_map(), 1, |
5412 Safepoint::kNoLazyDeopt); | 5414 Safepoint::kNoLazyDeopt); |
5413 __ StoreToSafepointRegisterSlot(r3, scratch0()); | 5415 __ StoreToSafepointRegisterSlot(r3, scratch0()); |
5414 } | 5416 } |
5415 __ TestIfSmi(scratch0(), r0); | 5417 __ TestIfSmi(scratch0(), r0); |
5416 DeoptimizeIf(eq, instr, "instance migration failed", cr0); | 5418 DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed, cr0); |
5417 } | 5419 } |
5418 | 5420 |
5419 | 5421 |
5420 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { | 5422 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { |
5421 class DeferredCheckMaps FINAL : public LDeferredCode { | 5423 class DeferredCheckMaps FINAL : public LDeferredCode { |
5422 public: | 5424 public: |
5423 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) | 5425 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) |
5424 : LDeferredCode(codegen), instr_(instr), object_(object) { | 5426 : LDeferredCode(codegen), instr_(instr), object_(object) { |
5425 SetExit(check_maps()); | 5427 SetExit(check_maps()); |
5426 } | 5428 } |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5464 Handle<Map> map = maps->at(i).handle(); | 5466 Handle<Map> map = maps->at(i).handle(); |
5465 __ CompareMap(map_reg, map, &success); | 5467 __ CompareMap(map_reg, map, &success); |
5466 __ beq(&success); | 5468 __ beq(&success); |
5467 } | 5469 } |
5468 | 5470 |
5469 Handle<Map> map = maps->at(maps->size() - 1).handle(); | 5471 Handle<Map> map = maps->at(maps->size() - 1).handle(); |
5470 __ CompareMap(map_reg, map, &success); | 5472 __ CompareMap(map_reg, map, &success); |
5471 if (instr->hydrogen()->HasMigrationTarget()) { | 5473 if (instr->hydrogen()->HasMigrationTarget()) { |
5472 __ bne(deferred->entry()); | 5474 __ bne(deferred->entry()); |
5473 } else { | 5475 } else { |
5474 DeoptimizeIf(ne, instr, "wrong map"); | 5476 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap); |
5475 } | 5477 } |
5476 | 5478 |
5477 __ bind(&success); | 5479 __ bind(&success); |
5478 } | 5480 } |
5479 | 5481 |
5480 | 5482 |
5481 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { | 5483 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { |
5482 DoubleRegister value_reg = ToDoubleRegister(instr->unclamped()); | 5484 DoubleRegister value_reg = ToDoubleRegister(instr->unclamped()); |
5483 Register result_reg = ToRegister(instr->result()); | 5485 Register result_reg = ToRegister(instr->result()); |
5484 __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0()); | 5486 __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0()); |
(...skipping 18 matching lines...) Expand all Loading... |
5503 __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi); | 5505 __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi); |
5504 | 5506 |
5505 // Check for heap number | 5507 // Check for heap number |
5506 __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); | 5508 __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); |
5507 __ Cmpi(scratch, Operand(factory()->heap_number_map()), r0); | 5509 __ Cmpi(scratch, Operand(factory()->heap_number_map()), r0); |
5508 __ beq(&heap_number); | 5510 __ beq(&heap_number); |
5509 | 5511 |
5510 // Check for undefined. Undefined is converted to zero for clamping | 5512 // Check for undefined. Undefined is converted to zero for clamping |
5511 // conversions. | 5513 // conversions. |
5512 __ Cmpi(input_reg, Operand(factory()->undefined_value()), r0); | 5514 __ Cmpi(input_reg, Operand(factory()->undefined_value()), r0); |
5513 DeoptimizeIf(ne, instr, "not a heap number/undefined"); | 5515 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined); |
5514 __ li(result_reg, Operand::Zero()); | 5516 __ li(result_reg, Operand::Zero()); |
5515 __ b(&done); | 5517 __ b(&done); |
5516 | 5518 |
5517 // Heap number | 5519 // Heap number |
5518 __ bind(&heap_number); | 5520 __ bind(&heap_number); |
5519 __ lfd(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); | 5521 __ lfd(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); |
5520 __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0()); | 5522 __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0()); |
5521 __ b(&done); | 5523 __ b(&done); |
5522 | 5524 |
5523 // smi | 5525 // smi |
(...skipping 451 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5975 DCHECK(!environment->HasBeenRegistered()); | 5977 DCHECK(!environment->HasBeenRegistered()); |
5976 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); | 5978 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); |
5977 | 5979 |
5978 GenerateOsrPrologue(); | 5980 GenerateOsrPrologue(); |
5979 } | 5981 } |
5980 | 5982 |
5981 | 5983 |
5982 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { | 5984 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { |
5983 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); | 5985 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); |
5984 __ cmp(r3, ip); | 5986 __ cmp(r3, ip); |
5985 DeoptimizeIf(eq, instr, "undefined"); | 5987 DeoptimizeIf(eq, instr, Deoptimizer::kUndefined); |
5986 | 5988 |
5987 Register null_value = r8; | 5989 Register null_value = r8; |
5988 __ LoadRoot(null_value, Heap::kNullValueRootIndex); | 5990 __ LoadRoot(null_value, Heap::kNullValueRootIndex); |
5989 __ cmp(r3, null_value); | 5991 __ cmp(r3, null_value); |
5990 DeoptimizeIf(eq, instr, "null"); | 5992 DeoptimizeIf(eq, instr, Deoptimizer::kNull); |
5991 | 5993 |
5992 __ TestIfSmi(r3, r0); | 5994 __ TestIfSmi(r3, r0); |
5993 DeoptimizeIf(eq, instr, "Smi", cr0); | 5995 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0); |
5994 | 5996 |
5995 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); | 5997 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); |
5996 __ CompareObjectType(r3, r4, r4, LAST_JS_PROXY_TYPE); | 5998 __ CompareObjectType(r3, r4, r4, LAST_JS_PROXY_TYPE); |
5997 DeoptimizeIf(le, instr, "wrong instance type"); | 5999 DeoptimizeIf(le, instr, Deoptimizer::kWrongInstanceType); |
5998 | 6000 |
5999 Label use_cache, call_runtime; | 6001 Label use_cache, call_runtime; |
6000 __ CheckEnumCache(null_value, &call_runtime); | 6002 __ CheckEnumCache(null_value, &call_runtime); |
6001 | 6003 |
6002 __ LoadP(r3, FieldMemOperand(r3, HeapObject::kMapOffset)); | 6004 __ LoadP(r3, FieldMemOperand(r3, HeapObject::kMapOffset)); |
6003 __ b(&use_cache); | 6005 __ b(&use_cache); |
6004 | 6006 |
6005 // Get the set of properties to enumerate. | 6007 // Get the set of properties to enumerate. |
6006 __ bind(&call_runtime); | 6008 __ bind(&call_runtime); |
6007 __ push(r3); | 6009 __ push(r3); |
6008 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); | 6010 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); |
6009 | 6011 |
6010 __ LoadP(r4, FieldMemOperand(r3, HeapObject::kMapOffset)); | 6012 __ LoadP(r4, FieldMemOperand(r3, HeapObject::kMapOffset)); |
6011 __ LoadRoot(ip, Heap::kMetaMapRootIndex); | 6013 __ LoadRoot(ip, Heap::kMetaMapRootIndex); |
6012 __ cmp(r4, ip); | 6014 __ cmp(r4, ip); |
6013 DeoptimizeIf(ne, instr, "wrong map"); | 6015 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap); |
6014 __ bind(&use_cache); | 6016 __ bind(&use_cache); |
6015 } | 6017 } |
6016 | 6018 |
6017 | 6019 |
6018 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { | 6020 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { |
6019 Register map = ToRegister(instr->map()); | 6021 Register map = ToRegister(instr->map()); |
6020 Register result = ToRegister(instr->result()); | 6022 Register result = ToRegister(instr->result()); |
6021 Label load_cache, done; | 6023 Label load_cache, done; |
6022 __ EnumLength(result, map); | 6024 __ EnumLength(result, map); |
6023 __ CmpSmiLiteral(result, Smi::FromInt(0), r0); | 6025 __ CmpSmiLiteral(result, Smi::FromInt(0), r0); |
6024 __ bne(&load_cache); | 6026 __ bne(&load_cache); |
6025 __ mov(result, Operand(isolate()->factory()->empty_fixed_array())); | 6027 __ mov(result, Operand(isolate()->factory()->empty_fixed_array())); |
6026 __ b(&done); | 6028 __ b(&done); |
6027 | 6029 |
6028 __ bind(&load_cache); | 6030 __ bind(&load_cache); |
6029 __ LoadInstanceDescriptors(map, result); | 6031 __ LoadInstanceDescriptors(map, result); |
6030 __ LoadP(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); | 6032 __ LoadP(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); |
6031 __ LoadP(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); | 6033 __ LoadP(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); |
6032 __ cmpi(result, Operand::Zero()); | 6034 __ cmpi(result, Operand::Zero()); |
6033 DeoptimizeIf(eq, instr, "no cache"); | 6035 DeoptimizeIf(eq, instr, Deoptimizer::kNoCache); |
6034 | 6036 |
6035 __ bind(&done); | 6037 __ bind(&done); |
6036 } | 6038 } |
6037 | 6039 |
6038 | 6040 |
6039 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { | 6041 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { |
6040 Register object = ToRegister(instr->value()); | 6042 Register object = ToRegister(instr->value()); |
6041 Register map = ToRegister(instr->map()); | 6043 Register map = ToRegister(instr->map()); |
6042 __ LoadP(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset)); | 6044 __ LoadP(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset)); |
6043 __ cmp(map, scratch0()); | 6045 __ cmp(map, scratch0()); |
6044 DeoptimizeIf(ne, instr, "wrong map"); | 6046 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap); |
6045 } | 6047 } |
6046 | 6048 |
6047 | 6049 |
6048 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, | 6050 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, |
6049 Register result, Register object, | 6051 Register result, Register object, |
6050 Register index) { | 6052 Register index) { |
6051 PushSafepointRegistersScope scope(this); | 6053 PushSafepointRegistersScope scope(this); |
6052 __ Push(object, index); | 6054 __ Push(object, index); |
6053 __ li(cp, Operand::Zero()); | 6055 __ li(cp, Operand::Zero()); |
6054 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble); | 6056 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble); |
(...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6127 __ Push(scope_info); | 6129 __ Push(scope_info); |
6128 __ push(ToRegister(instr->function())); | 6130 __ push(ToRegister(instr->function())); |
6129 CallRuntime(Runtime::kPushBlockContext, 2, instr); | 6131 CallRuntime(Runtime::kPushBlockContext, 2, instr); |
6130 RecordSafepoint(Safepoint::kNoLazyDeopt); | 6132 RecordSafepoint(Safepoint::kNoLazyDeopt); |
6131 } | 6133 } |
6132 | 6134 |
6133 | 6135 |
6134 #undef __ | 6136 #undef __ |
6135 } | 6137 } |
6136 } // namespace v8::internal | 6138 } // namespace v8::internal |
OLD | NEW |