OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 183 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
194 LoadRoot(kScratchRegister, index); | 194 LoadRoot(kScratchRegister, index); |
195 cmpq(with, kScratchRegister); | 195 cmpq(with, kScratchRegister); |
196 } | 196 } |
197 | 197 |
198 | 198 |
199 void MacroAssembler::RecordWriteHelper(Register object, | 199 void MacroAssembler::RecordWriteHelper(Register object, |
200 Register addr, | 200 Register addr, |
201 Register scratch) { | 201 Register scratch) { |
202 if (emit_debug_code()) { | 202 if (emit_debug_code()) { |
203 // Check that the object is not in new space. | 203 // Check that the object is not in new space. |
204 NearLabel not_in_new_space; | 204 Label not_in_new_space; |
205 InNewSpace(object, scratch, not_equal, ¬_in_new_space); | 205 InNewSpace(object, scratch, not_equal, ¬_in_new_space, Label::kNear); |
206 Abort("new-space object passed to RecordWriteHelper"); | 206 Abort("new-space object passed to RecordWriteHelper"); |
207 bind(¬_in_new_space); | 207 bind(¬_in_new_space); |
208 } | 208 } |
209 | 209 |
210 // Compute the page start address from the heap object pointer, and reuse | 210 // Compute the page start address from the heap object pointer, and reuse |
211 // the 'object' register for it. | 211 // the 'object' register for it. |
212 and_(object, Immediate(~Page::kPageAlignmentMask)); | 212 and_(object, Immediate(~Page::kPageAlignmentMask)); |
213 | 213 |
214 // Compute number of region covering addr. See Page::GetRegionNumberForAddress | 214 // Compute number of region covering addr. See Page::GetRegionNumberForAddress |
215 // method for more details. | 215 // method for more details. |
216 shrl(addr, Immediate(Page::kRegionSizeLog2)); | 216 shrl(addr, Immediate(Page::kRegionSizeLog2)); |
217 andl(addr, Immediate(Page::kPageAlignmentMask >> Page::kRegionSizeLog2)); | 217 andl(addr, Immediate(Page::kPageAlignmentMask >> Page::kRegionSizeLog2)); |
218 | 218 |
219 // Set dirty mark for region. | 219 // Set dirty mark for region. |
220 bts(Operand(object, Page::kDirtyFlagOffset), addr); | 220 bts(Operand(object, Page::kDirtyFlagOffset), addr); |
221 } | 221 } |
222 | 222 |
223 | 223 |
| 224 void MacroAssembler::InNewSpace(Register object, |
| 225 Register scratch, |
| 226 Condition cc, |
| 227 Label* branch, |
| 228 Label::Distance near_jump) { |
| 229 if (Serializer::enabled()) { |
| 230 // Can't do arithmetic on external references if it might get serialized. |
| 231 // The mask isn't really an address. We load it as an external reference in |
| 232 // case the size of the new space is different between the snapshot maker |
| 233 // and the running system. |
| 234 if (scratch.is(object)) { |
| 235 movq(kScratchRegister, ExternalReference::new_space_mask(isolate())); |
| 236 and_(scratch, kScratchRegister); |
| 237 } else { |
| 238 movq(scratch, ExternalReference::new_space_mask(isolate())); |
| 239 and_(scratch, object); |
| 240 } |
| 241 movq(kScratchRegister, ExternalReference::new_space_start(isolate())); |
| 242 cmpq(scratch, kScratchRegister); |
| 243 j(cc, branch, near_jump); |
| 244 } else { |
| 245 ASSERT(is_int32(static_cast<int64_t>(HEAP->NewSpaceMask()))); |
| 246 intptr_t new_space_start = |
| 247 reinterpret_cast<intptr_t>(HEAP->NewSpaceStart()); |
| 248 movq(kScratchRegister, -new_space_start, RelocInfo::NONE); |
| 249 if (scratch.is(object)) { |
| 250 addq(scratch, kScratchRegister); |
| 251 } else { |
| 252 lea(scratch, Operand(object, kScratchRegister, times_1, 0)); |
| 253 } |
| 254 and_(scratch, Immediate(static_cast<int32_t>(HEAP->NewSpaceMask()))); |
| 255 j(cc, branch, near_jump); |
| 256 } |
| 257 } |
| 258 |
| 259 |
224 void MacroAssembler::RecordWrite(Register object, | 260 void MacroAssembler::RecordWrite(Register object, |
225 int offset, | 261 int offset, |
226 Register value, | 262 Register value, |
227 Register index) { | 263 Register index) { |
228 // The compiled code assumes that record write doesn't change the | 264 // The compiled code assumes that record write doesn't change the |
229 // context register, so we check that none of the clobbered | 265 // context register, so we check that none of the clobbered |
230 // registers are rsi. | 266 // registers are rsi. |
231 ASSERT(!object.is(rsi) && !value.is(rsi) && !index.is(rsi)); | 267 ASSERT(!object.is(rsi) && !value.is(rsi) && !index.is(rsi)); |
232 | 268 |
233 // First, check if a write barrier is even needed. The tests below | 269 // First, check if a write barrier is even needed. The tests below |
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
280 } | 316 } |
281 | 317 |
282 | 318 |
283 void MacroAssembler::RecordWriteNonSmi(Register object, | 319 void MacroAssembler::RecordWriteNonSmi(Register object, |
284 int offset, | 320 int offset, |
285 Register scratch, | 321 Register scratch, |
286 Register index) { | 322 Register index) { |
287 Label done; | 323 Label done; |
288 | 324 |
289 if (emit_debug_code()) { | 325 if (emit_debug_code()) { |
290 NearLabel okay; | 326 Label okay; |
291 JumpIfNotSmi(object, &okay); | 327 JumpIfNotSmi(object, &okay, Label::kNear); |
292 Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis"); | 328 Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis"); |
293 bind(&okay); | 329 bind(&okay); |
294 | 330 |
295 if (offset == 0) { | 331 if (offset == 0) { |
296 // index must be int32. | 332 // index must be int32. |
297 Register tmp = index.is(rax) ? rbx : rax; | 333 Register tmp = index.is(rax) ? rbx : rax; |
298 push(tmp); | 334 push(tmp); |
299 movl(tmp, index); | 335 movl(tmp, index); |
300 cmpq(tmp, index); | 336 cmpq(tmp, index); |
301 Check(equal, "Index register for RecordWrite must be untagged int32."); | 337 Check(equal, "Index register for RecordWrite must be untagged int32."); |
(...skipping 744 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1046 int power) { | 1082 int power) { |
1047 ASSERT((0 <= power) && (power < 32)); | 1083 ASSERT((0 <= power) && (power < 32)); |
1048 if (dst.is(src)) { | 1084 if (dst.is(src)) { |
1049 shr(dst, Immediate(power + kSmiShift)); | 1085 shr(dst, Immediate(power + kSmiShift)); |
1050 } else { | 1086 } else { |
1051 UNIMPLEMENTED(); // Not used. | 1087 UNIMPLEMENTED(); // Not used. |
1052 } | 1088 } |
1053 } | 1089 } |
1054 | 1090 |
1055 | 1091 |
| 1092 void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2, |
| 1093 Label* on_not_smis, |
| 1094 Label::Distance near_jump) { |
| 1095 if (dst.is(src1) || dst.is(src2)) { |
| 1096 ASSERT(!src1.is(kScratchRegister)); |
| 1097 ASSERT(!src2.is(kScratchRegister)); |
| 1098 movq(kScratchRegister, src1); |
| 1099 or_(kScratchRegister, src2); |
| 1100 JumpIfNotSmi(kScratchRegister, on_not_smis, near_jump); |
| 1101 movq(dst, kScratchRegister); |
| 1102 } else { |
| 1103 movq(dst, src1); |
| 1104 or_(dst, src2); |
| 1105 JumpIfNotSmi(dst, on_not_smis, near_jump); |
| 1106 } |
| 1107 } |
| 1108 |
| 1109 |
1056 Condition MacroAssembler::CheckSmi(Register src) { | 1110 Condition MacroAssembler::CheckSmi(Register src) { |
1057 ASSERT_EQ(0, kSmiTag); | 1111 ASSERT_EQ(0, kSmiTag); |
1058 testb(src, Immediate(kSmiTagMask)); | 1112 testb(src, Immediate(kSmiTagMask)); |
1059 return zero; | 1113 return zero; |
1060 } | 1114 } |
1061 | 1115 |
1062 | 1116 |
1063 Condition MacroAssembler::CheckSmi(const Operand& src) { | 1117 Condition MacroAssembler::CheckSmi(const Operand& src) { |
1064 ASSERT_EQ(0, kSmiTag); | 1118 ASSERT_EQ(0, kSmiTag); |
1065 testb(src, Immediate(kSmiTagMask)); | 1119 testb(src, Immediate(kSmiTagMask)); |
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1156 if (!(src.AddressUsesRegister(dst))) { | 1210 if (!(src.AddressUsesRegister(dst))) { |
1157 movl(dst, Immediate(kSmiTagMask)); | 1211 movl(dst, Immediate(kSmiTagMask)); |
1158 andl(dst, src); | 1212 andl(dst, src); |
1159 } else { | 1213 } else { |
1160 movl(dst, src); | 1214 movl(dst, src); |
1161 andl(dst, Immediate(kSmiTagMask)); | 1215 andl(dst, Immediate(kSmiTagMask)); |
1162 } | 1216 } |
1163 } | 1217 } |
1164 | 1218 |
1165 | 1219 |
| 1220 void MacroAssembler::JumpIfNotValidSmiValue(Register src, |
| 1221 Label* on_invalid, |
| 1222 Label::Distance near_jump) { |
| 1223 Condition is_valid = CheckInteger32ValidSmiValue(src); |
| 1224 j(NegateCondition(is_valid), on_invalid, near_jump); |
| 1225 } |
| 1226 |
| 1227 |
| 1228 void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src, |
| 1229 Label* on_invalid, |
| 1230 Label::Distance near_jump) { |
| 1231 Condition is_valid = CheckUInteger32ValidSmiValue(src); |
| 1232 j(NegateCondition(is_valid), on_invalid, near_jump); |
| 1233 } |
| 1234 |
| 1235 |
| 1236 void MacroAssembler::JumpIfSmi(Register src, |
| 1237 Label* on_smi, |
| 1238 Label::Distance near_jump) { |
| 1239 Condition smi = CheckSmi(src); |
| 1240 j(smi, on_smi, near_jump); |
| 1241 } |
| 1242 |
| 1243 |
| 1244 void MacroAssembler::JumpIfNotSmi(Register src, |
| 1245 Label* on_not_smi, |
| 1246 Label::Distance near_jump) { |
| 1247 Condition smi = CheckSmi(src); |
| 1248 j(NegateCondition(smi), on_not_smi, near_jump); |
| 1249 } |
| 1250 |
| 1251 |
| 1252 void MacroAssembler::JumpUnlessNonNegativeSmi( |
| 1253 Register src, Label* on_not_smi_or_negative, |
| 1254 Label::Distance near_jump) { |
| 1255 Condition non_negative_smi = CheckNonNegativeSmi(src); |
| 1256 j(NegateCondition(non_negative_smi), on_not_smi_or_negative, near_jump); |
| 1257 } |
| 1258 |
| 1259 |
| 1260 void MacroAssembler::JumpIfSmiEqualsConstant(Register src, |
| 1261 Smi* constant, |
| 1262 Label* on_equals, |
| 1263 Label::Distance near_jump) { |
| 1264 SmiCompare(src, constant); |
| 1265 j(equal, on_equals, near_jump); |
| 1266 } |
| 1267 |
| 1268 |
| 1269 void MacroAssembler::JumpIfNotBothSmi(Register src1, |
| 1270 Register src2, |
| 1271 Label* on_not_both_smi, |
| 1272 Label::Distance near_jump) { |
| 1273 Condition both_smi = CheckBothSmi(src1, src2); |
| 1274 j(NegateCondition(both_smi), on_not_both_smi, near_jump); |
| 1275 } |
| 1276 |
| 1277 |
| 1278 void MacroAssembler::JumpUnlessBothNonNegativeSmi(Register src1, |
| 1279 Register src2, |
| 1280 Label* on_not_both_smi, |
| 1281 Label::Distance near_jump) { |
| 1282 Condition both_smi = CheckBothNonNegativeSmi(src1, src2); |
| 1283 j(NegateCondition(both_smi), on_not_both_smi, near_jump); |
| 1284 } |
| 1285 |
| 1286 |
| 1287 void MacroAssembler::SmiTryAddConstant(Register dst, |
| 1288 Register src, |
| 1289 Smi* constant, |
| 1290 Label* on_not_smi_result, |
| 1291 Label::Distance near_jump) { |
| 1292 // Does not assume that src is a smi. |
| 1293 ASSERT_EQ(static_cast<int>(1), static_cast<int>(kSmiTagMask)); |
| 1294 ASSERT_EQ(0, kSmiTag); |
| 1295 ASSERT(!dst.is(kScratchRegister)); |
| 1296 ASSERT(!src.is(kScratchRegister)); |
| 1297 |
| 1298 JumpIfNotSmi(src, on_not_smi_result, near_jump); |
| 1299 Register tmp = (dst.is(src) ? kScratchRegister : dst); |
| 1300 LoadSmiConstant(tmp, constant); |
| 1301 addq(tmp, src); |
| 1302 j(overflow, on_not_smi_result, near_jump); |
| 1303 if (dst.is(src)) { |
| 1304 movq(dst, tmp); |
| 1305 } |
| 1306 } |
| 1307 |
| 1308 |
1166 void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) { | 1309 void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) { |
1167 if (constant->value() == 0) { | 1310 if (constant->value() == 0) { |
1168 if (!dst.is(src)) { | 1311 if (!dst.is(src)) { |
1169 movq(dst, src); | 1312 movq(dst, src); |
1170 } | 1313 } |
1171 return; | 1314 return; |
1172 } else if (dst.is(src)) { | 1315 } else if (dst.is(src)) { |
1173 ASSERT(!dst.is(kScratchRegister)); | 1316 ASSERT(!dst.is(kScratchRegister)); |
1174 switch (constant->value()) { | 1317 switch (constant->value()) { |
1175 case 1: | 1318 case 1: |
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1212 } | 1355 } |
1213 | 1356 |
1214 | 1357 |
1215 void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) { | 1358 void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) { |
1216 if (constant->value() != 0) { | 1359 if (constant->value() != 0) { |
1217 addl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(constant->value())); | 1360 addl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(constant->value())); |
1218 } | 1361 } |
1219 } | 1362 } |
1220 | 1363 |
1221 | 1364 |
| 1365 void MacroAssembler::SmiAddConstant(Register dst, |
| 1366 Register src, |
| 1367 Smi* constant, |
| 1368 Label* on_not_smi_result, |
| 1369 Label::Distance near_jump) { |
| 1370 if (constant->value() == 0) { |
| 1371 if (!dst.is(src)) { |
| 1372 movq(dst, src); |
| 1373 } |
| 1374 } else if (dst.is(src)) { |
| 1375 ASSERT(!dst.is(kScratchRegister)); |
| 1376 |
| 1377 LoadSmiConstant(kScratchRegister, constant); |
| 1378 addq(kScratchRegister, src); |
| 1379 j(overflow, on_not_smi_result, near_jump); |
| 1380 movq(dst, kScratchRegister); |
| 1381 } else { |
| 1382 LoadSmiConstant(dst, constant); |
| 1383 addq(dst, src); |
| 1384 j(overflow, on_not_smi_result, near_jump); |
| 1385 } |
| 1386 } |
| 1387 |
| 1388 |
1222 void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) { | 1389 void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) { |
1223 if (constant->value() == 0) { | 1390 if (constant->value() == 0) { |
1224 if (!dst.is(src)) { | 1391 if (!dst.is(src)) { |
1225 movq(dst, src); | 1392 movq(dst, src); |
1226 } | 1393 } |
1227 } else if (dst.is(src)) { | 1394 } else if (dst.is(src)) { |
1228 ASSERT(!dst.is(kScratchRegister)); | 1395 ASSERT(!dst.is(kScratchRegister)); |
1229 Register constant_reg = GetSmiConstant(constant); | 1396 Register constant_reg = GetSmiConstant(constant); |
1230 subq(dst, constant_reg); | 1397 subq(dst, constant_reg); |
1231 } else { | 1398 } else { |
1232 if (constant->value() == Smi::kMinValue) { | 1399 if (constant->value() == Smi::kMinValue) { |
1233 LoadSmiConstant(dst, constant); | 1400 LoadSmiConstant(dst, constant); |
1234 // Adding and subtracting the min-value gives the same result, it only | 1401 // Adding and subtracting the min-value gives the same result, it only |
1235 // differs on the overflow bit, which we don't check here. | 1402 // differs on the overflow bit, which we don't check here. |
1236 addq(dst, src); | 1403 addq(dst, src); |
1237 } else { | 1404 } else { |
1238 // Subtract by adding the negation. | 1405 // Subtract by adding the negation. |
1239 LoadSmiConstant(dst, Smi::FromInt(-constant->value())); | 1406 LoadSmiConstant(dst, Smi::FromInt(-constant->value())); |
1240 addq(dst, src); | 1407 addq(dst, src); |
1241 } | 1408 } |
1242 } | 1409 } |
1243 } | 1410 } |
1244 | 1411 |
1245 | 1412 |
| 1413 void MacroAssembler::SmiSubConstant(Register dst, |
| 1414 Register src, |
| 1415 Smi* constant, |
| 1416 Label* on_not_smi_result, |
| 1417 Label::Distance near_jump) { |
| 1418 if (constant->value() == 0) { |
| 1419 if (!dst.is(src)) { |
| 1420 movq(dst, src); |
| 1421 } |
| 1422 } else if (dst.is(src)) { |
| 1423 ASSERT(!dst.is(kScratchRegister)); |
| 1424 if (constant->value() == Smi::kMinValue) { |
| 1425 // Subtracting min-value from any non-negative value will overflow. |
| 1426 // We test the non-negativeness before doing the subtraction. |
| 1427 testq(src, src); |
| 1428 j(not_sign, on_not_smi_result, near_jump); |
| 1429 LoadSmiConstant(kScratchRegister, constant); |
| 1430 subq(dst, kScratchRegister); |
| 1431 } else { |
| 1432 // Subtract by adding the negation. |
| 1433 LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value())); |
| 1434 addq(kScratchRegister, dst); |
| 1435 j(overflow, on_not_smi_result, near_jump); |
| 1436 movq(dst, kScratchRegister); |
| 1437 } |
| 1438 } else { |
| 1439 if (constant->value() == Smi::kMinValue) { |
| 1440 // Subtracting min-value from any non-negative value will overflow. |
| 1441 // We test the non-negativeness before doing the subtraction. |
| 1442 testq(src, src); |
| 1443 j(not_sign, on_not_smi_result, near_jump); |
| 1444 LoadSmiConstant(dst, constant); |
| 1445 // Adding and subtracting the min-value gives the same result, it only |
| 1446 // differs on the overflow bit, which we don't check here. |
| 1447 addq(dst, src); |
| 1448 } else { |
| 1449 // Subtract by adding the negation. |
| 1450 LoadSmiConstant(dst, Smi::FromInt(-(constant->value()))); |
| 1451 addq(dst, src); |
| 1452 j(overflow, on_not_smi_result, near_jump); |
| 1453 } |
| 1454 } |
| 1455 } |
| 1456 |
| 1457 |
| 1458 void MacroAssembler::SmiNeg(Register dst, |
| 1459 Register src, |
| 1460 Label* on_smi_result, |
| 1461 Label::Distance near_jump) { |
| 1462 if (dst.is(src)) { |
| 1463 ASSERT(!dst.is(kScratchRegister)); |
| 1464 movq(kScratchRegister, src); |
| 1465 neg(dst); // Low 32 bits are retained as zero by negation. |
| 1466 // Test if result is zero or Smi::kMinValue. |
| 1467 cmpq(dst, kScratchRegister); |
| 1468 j(not_equal, on_smi_result, near_jump); |
| 1469 movq(src, kScratchRegister); |
| 1470 } else { |
| 1471 movq(dst, src); |
| 1472 neg(dst); |
| 1473 cmpq(dst, src); |
| 1474 // If the result is zero or Smi::kMinValue, negation failed to create a smi. |
| 1475 j(not_equal, on_smi_result, near_jump); |
| 1476 } |
| 1477 } |
| 1478 |
| 1479 |
| 1480 void MacroAssembler::SmiAdd(Register dst, |
| 1481 Register src1, |
| 1482 Register src2, |
| 1483 Label* on_not_smi_result, |
| 1484 Label::Distance near_jump) { |
| 1485 ASSERT_NOT_NULL(on_not_smi_result); |
| 1486 ASSERT(!dst.is(src2)); |
| 1487 if (dst.is(src1)) { |
| 1488 movq(kScratchRegister, src1); |
| 1489 addq(kScratchRegister, src2); |
| 1490 j(overflow, on_not_smi_result, near_jump); |
| 1491 movq(dst, kScratchRegister); |
| 1492 } else { |
| 1493 movq(dst, src1); |
| 1494 addq(dst, src2); |
| 1495 j(overflow, on_not_smi_result, near_jump); |
| 1496 } |
| 1497 } |
| 1498 |
| 1499 |
| 1500 void MacroAssembler::SmiAdd(Register dst, |
| 1501 Register src1, |
| 1502 const Operand& src2, |
| 1503 Label* on_not_smi_result, |
| 1504 Label::Distance near_jump) { |
| 1505 ASSERT_NOT_NULL(on_not_smi_result); |
| 1506 if (dst.is(src1)) { |
| 1507 movq(kScratchRegister, src1); |
| 1508 addq(kScratchRegister, src2); |
| 1509 j(overflow, on_not_smi_result, near_jump); |
| 1510 movq(dst, kScratchRegister); |
| 1511 } else { |
| 1512 ASSERT(!src2.AddressUsesRegister(dst)); |
| 1513 movq(dst, src1); |
| 1514 addq(dst, src2); |
| 1515 j(overflow, on_not_smi_result, near_jump); |
| 1516 } |
| 1517 } |
| 1518 |
| 1519 |
1246 void MacroAssembler::SmiAdd(Register dst, | 1520 void MacroAssembler::SmiAdd(Register dst, |
1247 Register src1, | 1521 Register src1, |
1248 Register src2) { | 1522 Register src2) { |
1249 // No overflow checking. Use only when it's known that | 1523 // No overflow checking. Use only when it's known that |
1250 // overflowing is impossible. | 1524 // overflowing is impossible. |
1251 if (!dst.is(src1)) { | 1525 if (!dst.is(src1)) { |
1252 if (emit_debug_code()) { | 1526 if (emit_debug_code()) { |
1253 movq(kScratchRegister, src1); | 1527 movq(kScratchRegister, src1); |
1254 addq(kScratchRegister, src2); | 1528 addq(kScratchRegister, src2); |
1255 Check(no_overflow, "Smi addition overflow"); | 1529 Check(no_overflow, "Smi addition overflow"); |
1256 } | 1530 } |
1257 lea(dst, Operand(src1, src2, times_1, 0)); | 1531 lea(dst, Operand(src1, src2, times_1, 0)); |
1258 } else { | 1532 } else { |
1259 addq(dst, src2); | 1533 addq(dst, src2); |
1260 Assert(no_overflow, "Smi addition overflow"); | 1534 Assert(no_overflow, "Smi addition overflow"); |
1261 } | 1535 } |
1262 } | 1536 } |
1263 | 1537 |
1264 | 1538 |
| 1539 void MacroAssembler::SmiSub(Register dst, |
| 1540 Register src1, |
| 1541 Register src2, |
| 1542 Label* on_not_smi_result, |
| 1543 Label::Distance near_jump) { |
| 1544 ASSERT_NOT_NULL(on_not_smi_result); |
| 1545 ASSERT(!dst.is(src2)); |
| 1546 if (dst.is(src1)) { |
| 1547 cmpq(dst, src2); |
| 1548 j(overflow, on_not_smi_result, near_jump); |
| 1549 subq(dst, src2); |
| 1550 } else { |
| 1551 movq(dst, src1); |
| 1552 subq(dst, src2); |
| 1553 j(overflow, on_not_smi_result, near_jump); |
| 1554 } |
| 1555 } |
| 1556 |
| 1557 |
1265 void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) { | 1558 void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) { |
1266 // No overflow checking. Use only when it's known that | 1559 // No overflow checking. Use only when it's known that |
1267 // overflowing is impossible (e.g., subtracting two positive smis). | 1560 // overflowing is impossible (e.g., subtracting two positive smis). |
1268 ASSERT(!dst.is(src2)); | 1561 ASSERT(!dst.is(src2)); |
1269 if (!dst.is(src1)) { | 1562 if (!dst.is(src1)) { |
1270 movq(dst, src1); | 1563 movq(dst, src1); |
1271 } | 1564 } |
1272 subq(dst, src2); | 1565 subq(dst, src2); |
1273 Assert(no_overflow, "Smi subtraction overflow"); | 1566 Assert(no_overflow, "Smi subtraction overflow"); |
1274 } | 1567 } |
1275 | 1568 |
1276 | 1569 |
1277 void MacroAssembler::SmiSub(Register dst, | 1570 void MacroAssembler::SmiSub(Register dst, |
1278 Register src1, | 1571 Register src1, |
| 1572 const Operand& src2, |
| 1573 Label* on_not_smi_result, |
| 1574 Label::Distance near_jump) { |
| 1575 ASSERT_NOT_NULL(on_not_smi_result); |
| 1576 if (dst.is(src1)) { |
| 1577 movq(kScratchRegister, src2); |
| 1578 cmpq(src1, kScratchRegister); |
| 1579 j(overflow, on_not_smi_result, near_jump); |
| 1580 subq(src1, kScratchRegister); |
| 1581 } else { |
| 1582 movq(dst, src1); |
| 1583 subq(dst, src2); |
| 1584 j(overflow, on_not_smi_result, near_jump); |
| 1585 } |
| 1586 } |
| 1587 |
| 1588 |
| 1589 void MacroAssembler::SmiSub(Register dst, |
| 1590 Register src1, |
1279 const Operand& src2) { | 1591 const Operand& src2) { |
1280 // No overflow checking. Use only when it's known that | 1592 // No overflow checking. Use only when it's known that |
1281 // overflowing is impossible (e.g., subtracting two positive smis). | 1593 // overflowing is impossible (e.g., subtracting two positive smis). |
1282 if (!dst.is(src1)) { | 1594 if (!dst.is(src1)) { |
1283 movq(dst, src1); | 1595 movq(dst, src1); |
1284 } | 1596 } |
1285 subq(dst, src2); | 1597 subq(dst, src2); |
1286 Assert(no_overflow, "Smi subtraction overflow"); | 1598 Assert(no_overflow, "Smi subtraction overflow"); |
1287 } | 1599 } |
1288 | 1600 |
1289 | 1601 |
| 1602 void MacroAssembler::SmiMul(Register dst, |
| 1603 Register src1, |
| 1604 Register src2, |
| 1605 Label* on_not_smi_result, |
| 1606 Label::Distance near_jump) { |
| 1607 ASSERT(!dst.is(src2)); |
| 1608 ASSERT(!dst.is(kScratchRegister)); |
| 1609 ASSERT(!src1.is(kScratchRegister)); |
| 1610 ASSERT(!src2.is(kScratchRegister)); |
| 1611 |
| 1612 if (dst.is(src1)) { |
| 1613 Label failure, zero_correct_result; |
| 1614 movq(kScratchRegister, src1); // Create backup for later testing. |
| 1615 SmiToInteger64(dst, src1); |
| 1616 imul(dst, src2); |
| 1617 j(overflow, &failure, Label::kNear); |
| 1618 |
| 1619 // Check for negative zero result. If product is zero, and one |
| 1620 // argument is negative, go to slow case. |
| 1621 Label correct_result; |
| 1622 testq(dst, dst); |
| 1623 j(not_zero, &correct_result, Label::kNear); |
| 1624 |
| 1625 movq(dst, kScratchRegister); |
| 1626 xor_(dst, src2); |
| 1627 // Result was positive zero. |
| 1628 j(positive, &zero_correct_result, Label::kNear); |
| 1629 |
| 1630 bind(&failure); // Reused failure exit, restores src1. |
| 1631 movq(src1, kScratchRegister); |
| 1632 jmp(on_not_smi_result, near_jump); |
| 1633 |
| 1634 bind(&zero_correct_result); |
| 1635 Set(dst, 0); |
| 1636 |
| 1637 bind(&correct_result); |
| 1638 } else { |
| 1639 SmiToInteger64(dst, src1); |
| 1640 imul(dst, src2); |
| 1641 j(overflow, on_not_smi_result, near_jump); |
| 1642 // Check for negative zero result. If product is zero, and one |
| 1643 // argument is negative, go to slow case. |
| 1644 Label correct_result; |
| 1645 testq(dst, dst); |
| 1646 j(not_zero, &correct_result, Label::kNear); |
| 1647 // One of src1 and src2 is zero, the check whether the other is |
| 1648 // negative. |
| 1649 movq(kScratchRegister, src1); |
| 1650 xor_(kScratchRegister, src2); |
| 1651 j(negative, on_not_smi_result, near_jump); |
| 1652 bind(&correct_result); |
| 1653 } |
| 1654 } |
| 1655 |
| 1656 |
| 1657 void MacroAssembler::SmiDiv(Register dst, |
| 1658 Register src1, |
| 1659 Register src2, |
| 1660 Label* on_not_smi_result, |
| 1661 Label::Distance near_jump) { |
| 1662 ASSERT(!src1.is(kScratchRegister)); |
| 1663 ASSERT(!src2.is(kScratchRegister)); |
| 1664 ASSERT(!dst.is(kScratchRegister)); |
| 1665 ASSERT(!src2.is(rax)); |
| 1666 ASSERT(!src2.is(rdx)); |
| 1667 ASSERT(!src1.is(rdx)); |
| 1668 |
| 1669 // Check for 0 divisor (result is +/-Infinity). |
| 1670 testq(src2, src2); |
| 1671 j(zero, on_not_smi_result, near_jump); |
| 1672 |
| 1673 if (src1.is(rax)) { |
| 1674 movq(kScratchRegister, src1); |
| 1675 } |
| 1676 SmiToInteger32(rax, src1); |
| 1677 // We need to rule out dividing Smi::kMinValue by -1, since that would |
| 1678 // overflow in idiv and raise an exception. |
| 1679 // We combine this with negative zero test (negative zero only happens |
| 1680 // when dividing zero by a negative number). |
| 1681 |
| 1682 // We overshoot a little and go to slow case if we divide min-value |
| 1683 // by any negative value, not just -1. |
| 1684 Label safe_div; |
| 1685 testl(rax, Immediate(0x7fffffff)); |
| 1686 j(not_zero, &safe_div, Label::kNear); |
| 1687 testq(src2, src2); |
| 1688 if (src1.is(rax)) { |
| 1689 j(positive, &safe_div, Label::kNear); |
| 1690 movq(src1, kScratchRegister); |
| 1691 jmp(on_not_smi_result, near_jump); |
| 1692 } else { |
| 1693 j(negative, on_not_smi_result, near_jump); |
| 1694 } |
| 1695 bind(&safe_div); |
| 1696 |
| 1697 SmiToInteger32(src2, src2); |
| 1698 // Sign extend src1 into edx:eax. |
| 1699 cdq(); |
| 1700 idivl(src2); |
| 1701 Integer32ToSmi(src2, src2); |
| 1702 // Check that the remainder is zero. |
| 1703 testl(rdx, rdx); |
| 1704 if (src1.is(rax)) { |
| 1705 Label smi_result; |
| 1706 j(zero, &smi_result, Label::kNear); |
| 1707 movq(src1, kScratchRegister); |
| 1708 jmp(on_not_smi_result, near_jump); |
| 1709 bind(&smi_result); |
| 1710 } else { |
| 1711 j(not_zero, on_not_smi_result, near_jump); |
| 1712 } |
| 1713 if (!dst.is(src1) && src1.is(rax)) { |
| 1714 movq(src1, kScratchRegister); |
| 1715 } |
| 1716 Integer32ToSmi(dst, rax); |
| 1717 } |
| 1718 |
| 1719 |
| 1720 void MacroAssembler::SmiMod(Register dst, |
| 1721 Register src1, |
| 1722 Register src2, |
| 1723 Label* on_not_smi_result, |
| 1724 Label::Distance near_jump) { |
| 1725 ASSERT(!dst.is(kScratchRegister)); |
| 1726 ASSERT(!src1.is(kScratchRegister)); |
| 1727 ASSERT(!src2.is(kScratchRegister)); |
| 1728 ASSERT(!src2.is(rax)); |
| 1729 ASSERT(!src2.is(rdx)); |
| 1730 ASSERT(!src1.is(rdx)); |
| 1731 ASSERT(!src1.is(src2)); |
| 1732 |
| 1733 testq(src2, src2); |
| 1734 j(zero, on_not_smi_result, near_jump); |
| 1735 |
| 1736 if (src1.is(rax)) { |
| 1737 movq(kScratchRegister, src1); |
| 1738 } |
| 1739 SmiToInteger32(rax, src1); |
| 1740 SmiToInteger32(src2, src2); |
| 1741 |
| 1742 // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow). |
| 1743 Label safe_div; |
| 1744 cmpl(rax, Immediate(Smi::kMinValue)); |
| 1745 j(not_equal, &safe_div, Label::kNear); |
| 1746 cmpl(src2, Immediate(-1)); |
| 1747 j(not_equal, &safe_div, Label::kNear); |
| 1748 // Retag inputs and go slow case. |
| 1749 Integer32ToSmi(src2, src2); |
| 1750 if (src1.is(rax)) { |
| 1751 movq(src1, kScratchRegister); |
| 1752 } |
| 1753 jmp(on_not_smi_result, near_jump); |
| 1754 bind(&safe_div); |
| 1755 |
| 1756 // Sign extend eax into edx:eax. |
| 1757 cdq(); |
| 1758 idivl(src2); |
| 1759 // Restore smi tags on inputs. |
| 1760 Integer32ToSmi(src2, src2); |
| 1761 if (src1.is(rax)) { |
| 1762 movq(src1, kScratchRegister); |
| 1763 } |
| 1764 // Check for a negative zero result. If the result is zero, and the |
| 1765 // dividend is negative, go slow to return a floating point negative zero. |
| 1766 Label smi_result; |
| 1767 testl(rdx, rdx); |
| 1768 j(not_zero, &smi_result, Label::kNear); |
| 1769 testq(src1, src1); |
| 1770 j(negative, on_not_smi_result, near_jump); |
| 1771 bind(&smi_result); |
| 1772 Integer32ToSmi(dst, rdx); |
| 1773 } |
| 1774 |
| 1775 |
1290 void MacroAssembler::SmiNot(Register dst, Register src) { | 1776 void MacroAssembler::SmiNot(Register dst, Register src) { |
1291 ASSERT(!dst.is(kScratchRegister)); | 1777 ASSERT(!dst.is(kScratchRegister)); |
1292 ASSERT(!src.is(kScratchRegister)); | 1778 ASSERT(!src.is(kScratchRegister)); |
1293 // Set tag and padding bits before negating, so that they are zero afterwards. | 1779 // Set tag and padding bits before negating, so that they are zero afterwards. |
1294 movl(kScratchRegister, Immediate(~0)); | 1780 movl(kScratchRegister, Immediate(~0)); |
1295 if (dst.is(src)) { | 1781 if (dst.is(src)) { |
1296 xor_(dst, kScratchRegister); | 1782 xor_(dst, kScratchRegister); |
1297 } else { | 1783 } else { |
1298 lea(dst, Operand(src, kScratchRegister, times_1, 0)); | 1784 lea(dst, Operand(src, kScratchRegister, times_1, 0)); |
1299 } | 1785 } |
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1386 int shift_value) { | 1872 int shift_value) { |
1387 if (!dst.is(src)) { | 1873 if (!dst.is(src)) { |
1388 movq(dst, src); | 1874 movq(dst, src); |
1389 } | 1875 } |
1390 if (shift_value > 0) { | 1876 if (shift_value > 0) { |
1391 shl(dst, Immediate(shift_value)); | 1877 shl(dst, Immediate(shift_value)); |
1392 } | 1878 } |
1393 } | 1879 } |
1394 | 1880 |
1395 | 1881 |
| 1882 void MacroAssembler::SmiShiftLogicalRightConstant( |
| 1883 Register dst, Register src, int shift_value, |
| 1884 Label* on_not_smi_result, Label::Distance near_jump) { |
| 1885 // Logic right shift interprets its result as an *unsigned* number. |
| 1886 if (dst.is(src)) { |
| 1887 UNIMPLEMENTED(); // Not used. |
| 1888 } else { |
| 1889 movq(dst, src); |
| 1890 if (shift_value == 0) { |
| 1891 testq(dst, dst); |
| 1892 j(negative, on_not_smi_result, near_jump); |
| 1893 } |
| 1894 shr(dst, Immediate(shift_value + kSmiShift)); |
| 1895 shl(dst, Immediate(kSmiShift)); |
| 1896 } |
| 1897 } |
| 1898 |
| 1899 |
1396 void MacroAssembler::SmiShiftLeft(Register dst, | 1900 void MacroAssembler::SmiShiftLeft(Register dst, |
1397 Register src1, | 1901 Register src1, |
1398 Register src2) { | 1902 Register src2) { |
1399 ASSERT(!dst.is(rcx)); | 1903 ASSERT(!dst.is(rcx)); |
1400 // Untag shift amount. | 1904 // Untag shift amount. |
1401 if (!dst.is(src1)) { | 1905 if (!dst.is(src1)) { |
1402 movq(dst, src1); | 1906 movq(dst, src1); |
1403 } | 1907 } |
1404 SmiToInteger32(rcx, src2); | 1908 SmiToInteger32(rcx, src2); |
1405 // Shift amount specified by lower 5 bits, not six as the shl opcode. | 1909 // Shift amount specified by lower 5 bits, not six as the shl opcode. |
1406 and_(rcx, Immediate(0x1f)); | 1910 and_(rcx, Immediate(0x1f)); |
1407 shl_cl(dst); | 1911 shl_cl(dst); |
1408 } | 1912 } |
1409 | 1913 |
1410 | 1914 |
| 1915 void MacroAssembler::SmiShiftLogicalRight(Register dst, |
| 1916 Register src1, |
| 1917 Register src2, |
| 1918 Label* on_not_smi_result, |
| 1919 Label::Distance near_jump) { |
| 1920 ASSERT(!dst.is(kScratchRegister)); |
| 1921 ASSERT(!src1.is(kScratchRegister)); |
| 1922 ASSERT(!src2.is(kScratchRegister)); |
| 1923 ASSERT(!dst.is(rcx)); |
| 1924 // dst and src1 can be the same, because the one case that bails out |
| 1925 // is a shift by 0, which leaves dst, and therefore src1, unchanged. |
| 1926 if (src1.is(rcx) || src2.is(rcx)) { |
| 1927 movq(kScratchRegister, rcx); |
| 1928 } |
| 1929 if (!dst.is(src1)) { |
| 1930 movq(dst, src1); |
| 1931 } |
| 1932 SmiToInteger32(rcx, src2); |
| 1933 orl(rcx, Immediate(kSmiShift)); |
| 1934 shr_cl(dst); // Shift is rcx modulo 0x1f + 32. |
| 1935 shl(dst, Immediate(kSmiShift)); |
| 1936 testq(dst, dst); |
| 1937 if (src1.is(rcx) || src2.is(rcx)) { |
| 1938 Label positive_result; |
| 1939 j(positive, &positive_result, Label::kNear); |
| 1940 if (src1.is(rcx)) { |
| 1941 movq(src1, kScratchRegister); |
| 1942 } else { |
| 1943 movq(src2, kScratchRegister); |
| 1944 } |
| 1945 jmp(on_not_smi_result, near_jump); |
| 1946 bind(&positive_result); |
| 1947 } else { |
| 1948 // src2 was zero and src1 negative. |
| 1949 j(negative, on_not_smi_result, near_jump); |
| 1950 } |
| 1951 } |
| 1952 |
| 1953 |
1411 void MacroAssembler::SmiShiftArithmeticRight(Register dst, | 1954 void MacroAssembler::SmiShiftArithmeticRight(Register dst, |
1412 Register src1, | 1955 Register src1, |
1413 Register src2) { | 1956 Register src2) { |
1414 ASSERT(!dst.is(kScratchRegister)); | 1957 ASSERT(!dst.is(kScratchRegister)); |
1415 ASSERT(!src1.is(kScratchRegister)); | 1958 ASSERT(!src1.is(kScratchRegister)); |
1416 ASSERT(!src2.is(kScratchRegister)); | 1959 ASSERT(!src2.is(kScratchRegister)); |
1417 ASSERT(!dst.is(rcx)); | 1960 ASSERT(!dst.is(rcx)); |
1418 if (src1.is(rcx)) { | 1961 if (src1.is(rcx)) { |
1419 movq(kScratchRegister, src1); | 1962 movq(kScratchRegister, src1); |
1420 } else if (src2.is(rcx)) { | 1963 } else if (src2.is(rcx)) { |
1421 movq(kScratchRegister, src2); | 1964 movq(kScratchRegister, src2); |
1422 } | 1965 } |
1423 if (!dst.is(src1)) { | 1966 if (!dst.is(src1)) { |
1424 movq(dst, src1); | 1967 movq(dst, src1); |
1425 } | 1968 } |
1426 SmiToInteger32(rcx, src2); | 1969 SmiToInteger32(rcx, src2); |
1427 orl(rcx, Immediate(kSmiShift)); | 1970 orl(rcx, Immediate(kSmiShift)); |
1428 sar_cl(dst); // Shift 32 + original rcx & 0x1f. | 1971 sar_cl(dst); // Shift 32 + original rcx & 0x1f. |
1429 shl(dst, Immediate(kSmiShift)); | 1972 shl(dst, Immediate(kSmiShift)); |
1430 if (src1.is(rcx)) { | 1973 if (src1.is(rcx)) { |
1431 movq(src1, kScratchRegister); | 1974 movq(src1, kScratchRegister); |
1432 } else if (src2.is(rcx)) { | 1975 } else if (src2.is(rcx)) { |
1433 movq(src2, kScratchRegister); | 1976 movq(src2, kScratchRegister); |
1434 } | 1977 } |
1435 } | 1978 } |
1436 | 1979 |
1437 | 1980 |
| 1981 void MacroAssembler::SelectNonSmi(Register dst, |
| 1982 Register src1, |
| 1983 Register src2, |
| 1984 Label* on_not_smis, |
| 1985 Label::Distance near_jump) { |
| 1986 ASSERT(!dst.is(kScratchRegister)); |
| 1987 ASSERT(!src1.is(kScratchRegister)); |
| 1988 ASSERT(!src2.is(kScratchRegister)); |
| 1989 ASSERT(!dst.is(src1)); |
| 1990 ASSERT(!dst.is(src2)); |
| 1991 // Both operands must not be smis. |
| 1992 #ifdef DEBUG |
| 1993 if (allow_stub_calls()) { // Check contains a stub call. |
| 1994 Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2)); |
| 1995 Check(not_both_smis, "Both registers were smis in SelectNonSmi."); |
| 1996 } |
| 1997 #endif |
| 1998 ASSERT_EQ(0, kSmiTag); |
| 1999 ASSERT_EQ(0, Smi::FromInt(0)); |
| 2000 movl(kScratchRegister, Immediate(kSmiTagMask)); |
| 2001 and_(kScratchRegister, src1); |
| 2002 testl(kScratchRegister, src2); |
| 2003 // If non-zero then both are smis. |
| 2004 j(not_zero, on_not_smis, near_jump); |
| 2005 |
| 2006 // Exactly one operand is a smi. |
| 2007 ASSERT_EQ(1, static_cast<int>(kSmiTagMask)); |
| 2008 // kScratchRegister still holds src1 & kSmiTag, which is either zero or one. |
| 2009 subq(kScratchRegister, Immediate(1)); |
| 2010 // If src1 is a smi, then scratch register all 1s, else it is all 0s. |
| 2011 movq(dst, src1); |
| 2012 xor_(dst, src2); |
| 2013 and_(dst, kScratchRegister); |
| 2014 // If src1 is a smi, dst holds src1 ^ src2, else it is zero. |
| 2015 xor_(dst, src1); |
| 2016 // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi. |
| 2017 } |
| 2018 |
| 2019 |
1438 SmiIndex MacroAssembler::SmiToIndex(Register dst, | 2020 SmiIndex MacroAssembler::SmiToIndex(Register dst, |
1439 Register src, | 2021 Register src, |
1440 int shift) { | 2022 int shift) { |
1441 ASSERT(is_uint6(shift)); | 2023 ASSERT(is_uint6(shift)); |
1442 // There is a possible optimization if shift is in the range 60-63, but that | 2024 // There is a possible optimization if shift is in the range 60-63, but that |
1443 // will (and must) never happen. | 2025 // will (and must) never happen. |
1444 if (!dst.is(src)) { | 2026 if (!dst.is(src)) { |
1445 movq(dst, src); | 2027 movq(dst, src); |
1446 } | 2028 } |
1447 if (shift < kSmiShift) { | 2029 if (shift < kSmiShift) { |
(...skipping 21 matching lines...) Expand all Loading... |
1469 return SmiIndex(dst, times_1); | 2051 return SmiIndex(dst, times_1); |
1470 } | 2052 } |
1471 | 2053 |
1472 | 2054 |
1473 void MacroAssembler::AddSmiField(Register dst, const Operand& src) { | 2055 void MacroAssembler::AddSmiField(Register dst, const Operand& src) { |
1474 ASSERT_EQ(0, kSmiShift % kBitsPerByte); | 2056 ASSERT_EQ(0, kSmiShift % kBitsPerByte); |
1475 addl(dst, Operand(src, kSmiShift / kBitsPerByte)); | 2057 addl(dst, Operand(src, kSmiShift / kBitsPerByte)); |
1476 } | 2058 } |
1477 | 2059 |
1478 | 2060 |
| 2061 void MacroAssembler::JumpIfNotString(Register object, |
| 2062 Register object_map, |
| 2063 Label* not_string, |
| 2064 Label::Distance near_jump) { |
| 2065 Condition is_smi = CheckSmi(object); |
| 2066 j(is_smi, not_string, near_jump); |
| 2067 CmpObjectType(object, FIRST_NONSTRING_TYPE, object_map); |
| 2068 j(above_equal, not_string, near_jump); |
| 2069 } |
| 2070 |
| 2071 |
| 2072 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings( |
| 2073 Register first_object, |
| 2074 Register second_object, |
| 2075 Register scratch1, |
| 2076 Register scratch2, |
| 2077 Label* on_fail, |
| 2078 Label::Distance near_jump) { |
| 2079 // Check that both objects are not smis. |
| 2080 Condition either_smi = CheckEitherSmi(first_object, second_object); |
| 2081 j(either_smi, on_fail, near_jump); |
| 2082 |
| 2083 // Load instance type for both strings. |
| 2084 movq(scratch1, FieldOperand(first_object, HeapObject::kMapOffset)); |
| 2085 movq(scratch2, FieldOperand(second_object, HeapObject::kMapOffset)); |
| 2086 movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset)); |
| 2087 movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset)); |
| 2088 |
| 2089 // Check that both are flat ascii strings. |
| 2090 ASSERT(kNotStringTag != 0); |
| 2091 const int kFlatAsciiStringMask = |
| 2092 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask; |
| 2093 const int kFlatAsciiStringTag = ASCII_STRING_TYPE; |
| 2094 |
| 2095 andl(scratch1, Immediate(kFlatAsciiStringMask)); |
| 2096 andl(scratch2, Immediate(kFlatAsciiStringMask)); |
| 2097 // Interleave the bits to check both scratch1 and scratch2 in one test. |
| 2098 ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3)); |
| 2099 lea(scratch1, Operand(scratch1, scratch2, times_8, 0)); |
| 2100 cmpl(scratch1, |
| 2101 Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3))); |
| 2102 j(not_equal, on_fail, near_jump); |
| 2103 } |
| 2104 |
| 2105 |
| 2106 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii( |
| 2107 Register instance_type, |
| 2108 Register scratch, |
| 2109 Label* failure, |
| 2110 Label::Distance near_jump) { |
| 2111 if (!scratch.is(instance_type)) { |
| 2112 movl(scratch, instance_type); |
| 2113 } |
| 2114 |
| 2115 const int kFlatAsciiStringMask = |
| 2116 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask; |
| 2117 |
| 2118 andl(scratch, Immediate(kFlatAsciiStringMask)); |
| 2119 cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kAsciiStringTag)); |
| 2120 j(not_equal, failure, near_jump); |
| 2121 } |
| 2122 |
| 2123 |
| 2124 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii( |
| 2125 Register first_object_instance_type, |
| 2126 Register second_object_instance_type, |
| 2127 Register scratch1, |
| 2128 Register scratch2, |
| 2129 Label* on_fail, |
| 2130 Label::Distance near_jump) { |
| 2131 // Load instance type for both strings. |
| 2132 movq(scratch1, first_object_instance_type); |
| 2133 movq(scratch2, second_object_instance_type); |
| 2134 |
| 2135 // Check that both are flat ascii strings. |
| 2136 ASSERT(kNotStringTag != 0); |
| 2137 const int kFlatAsciiStringMask = |
| 2138 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask; |
| 2139 const int kFlatAsciiStringTag = ASCII_STRING_TYPE; |
| 2140 |
| 2141 andl(scratch1, Immediate(kFlatAsciiStringMask)); |
| 2142 andl(scratch2, Immediate(kFlatAsciiStringMask)); |
| 2143 // Interleave the bits to check both scratch1 and scratch2 in one test. |
| 2144 ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3)); |
| 2145 lea(scratch1, Operand(scratch1, scratch2, times_8, 0)); |
| 2146 cmpl(scratch1, |
| 2147 Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3))); |
| 2148 j(not_equal, on_fail, near_jump); |
| 2149 } |
| 2150 |
| 2151 |
1479 | 2152 |
1480 void MacroAssembler::Move(Register dst, Register src) { | 2153 void MacroAssembler::Move(Register dst, Register src) { |
1481 if (!dst.is(src)) { | 2154 if (!dst.is(src)) { |
1482 movq(dst, src); | 2155 movq(dst, src); |
1483 } | 2156 } |
1484 } | 2157 } |
1485 | 2158 |
1486 | 2159 |
1487 void MacroAssembler::Move(Register dst, Handle<Object> source) { | 2160 void MacroAssembler::Move(Register dst, Handle<Object> source) { |
1488 ASSERT(!source->IsFailure()); | 2161 ASSERT(!source->IsFailure()); |
(...skipping 559 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2048 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK); | 2721 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK); |
2049 } | 2722 } |
2050 #endif // ENABLE_DEBUGGER_SUPPORT | 2723 #endif // ENABLE_DEBUGGER_SUPPORT |
2051 | 2724 |
2052 | 2725 |
2053 void MacroAssembler::InvokeCode(Register code, | 2726 void MacroAssembler::InvokeCode(Register code, |
2054 const ParameterCount& expected, | 2727 const ParameterCount& expected, |
2055 const ParameterCount& actual, | 2728 const ParameterCount& actual, |
2056 InvokeFlag flag, | 2729 InvokeFlag flag, |
2057 const CallWrapper& call_wrapper) { | 2730 const CallWrapper& call_wrapper) { |
2058 NearLabel done; | 2731 Label done; |
2059 InvokePrologue(expected, | 2732 InvokePrologue(expected, |
2060 actual, | 2733 actual, |
2061 Handle<Code>::null(), | 2734 Handle<Code>::null(), |
2062 code, | 2735 code, |
2063 &done, | 2736 &done, |
2064 flag, | 2737 flag, |
2065 call_wrapper); | 2738 call_wrapper, |
| 2739 Label::kNear); |
2066 if (flag == CALL_FUNCTION) { | 2740 if (flag == CALL_FUNCTION) { |
2067 call_wrapper.BeforeCall(CallSize(code)); | 2741 call_wrapper.BeforeCall(CallSize(code)); |
2068 call(code); | 2742 call(code); |
2069 call_wrapper.AfterCall(); | 2743 call_wrapper.AfterCall(); |
2070 } else { | 2744 } else { |
2071 ASSERT(flag == JUMP_FUNCTION); | 2745 ASSERT(flag == JUMP_FUNCTION); |
2072 jmp(code); | 2746 jmp(code); |
2073 } | 2747 } |
2074 bind(&done); | 2748 bind(&done); |
2075 } | 2749 } |
2076 | 2750 |
2077 | 2751 |
2078 void MacroAssembler::InvokeCode(Handle<Code> code, | 2752 void MacroAssembler::InvokeCode(Handle<Code> code, |
2079 const ParameterCount& expected, | 2753 const ParameterCount& expected, |
2080 const ParameterCount& actual, | 2754 const ParameterCount& actual, |
2081 RelocInfo::Mode rmode, | 2755 RelocInfo::Mode rmode, |
2082 InvokeFlag flag, | 2756 InvokeFlag flag, |
2083 const CallWrapper& call_wrapper) { | 2757 const CallWrapper& call_wrapper) { |
2084 NearLabel done; | 2758 Label done; |
2085 Register dummy = rax; | 2759 Register dummy = rax; |
2086 InvokePrologue(expected, | 2760 InvokePrologue(expected, |
2087 actual, | 2761 actual, |
2088 code, | 2762 code, |
2089 dummy, | 2763 dummy, |
2090 &done, | 2764 &done, |
2091 flag, | 2765 flag, |
2092 call_wrapper); | 2766 call_wrapper, |
| 2767 Label::kNear); |
2093 if (flag == CALL_FUNCTION) { | 2768 if (flag == CALL_FUNCTION) { |
2094 call_wrapper.BeforeCall(CallSize(code)); | 2769 call_wrapper.BeforeCall(CallSize(code)); |
2095 Call(code, rmode); | 2770 Call(code, rmode); |
2096 call_wrapper.AfterCall(); | 2771 call_wrapper.AfterCall(); |
2097 } else { | 2772 } else { |
2098 ASSERT(flag == JUMP_FUNCTION); | 2773 ASSERT(flag == JUMP_FUNCTION); |
2099 Jump(code, rmode); | 2774 Jump(code, rmode); |
2100 } | 2775 } |
2101 bind(&done); | 2776 bind(&done); |
2102 } | 2777 } |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2142 InvokeCode(code, | 2817 InvokeCode(code, |
2143 expected, | 2818 expected, |
2144 actual, | 2819 actual, |
2145 RelocInfo::CODE_TARGET, | 2820 RelocInfo::CODE_TARGET, |
2146 flag, | 2821 flag, |
2147 call_wrapper); | 2822 call_wrapper); |
2148 } | 2823 } |
2149 } | 2824 } |
2150 | 2825 |
2151 | 2826 |
| 2827 void MacroAssembler::InvokePrologue(const ParameterCount& expected, |
| 2828 const ParameterCount& actual, |
| 2829 Handle<Code> code_constant, |
| 2830 Register code_register, |
| 2831 Label* done, |
| 2832 InvokeFlag flag, |
| 2833 const CallWrapper& call_wrapper, |
| 2834 Label::Distance near_jump) { |
| 2835 bool definitely_matches = false; |
| 2836 Label invoke; |
| 2837 if (expected.is_immediate()) { |
| 2838 ASSERT(actual.is_immediate()); |
| 2839 if (expected.immediate() == actual.immediate()) { |
| 2840 definitely_matches = true; |
| 2841 } else { |
| 2842 Set(rax, actual.immediate()); |
| 2843 if (expected.immediate() == |
| 2844 SharedFunctionInfo::kDontAdaptArgumentsSentinel) { |
| 2845 // Don't worry about adapting arguments for built-ins that |
| 2846 // don't want that done. Skip adaption code by making it look |
| 2847 // like we have a match between expected and actual number of |
| 2848 // arguments. |
| 2849 definitely_matches = true; |
| 2850 } else { |
| 2851 Set(rbx, expected.immediate()); |
| 2852 } |
| 2853 } |
| 2854 } else { |
| 2855 if (actual.is_immediate()) { |
| 2856 // Expected is in register, actual is immediate. This is the |
| 2857 // case when we invoke function values without going through the |
| 2858 // IC mechanism. |
| 2859 cmpq(expected.reg(), Immediate(actual.immediate())); |
| 2860 j(equal, &invoke, Label::kNear); |
| 2861 ASSERT(expected.reg().is(rbx)); |
| 2862 Set(rax, actual.immediate()); |
| 2863 } else if (!expected.reg().is(actual.reg())) { |
| 2864 // Both expected and actual are in (different) registers. This |
| 2865 // is the case when we invoke functions using call and apply. |
| 2866 cmpq(expected.reg(), actual.reg()); |
| 2867 j(equal, &invoke, Label::kNear); |
| 2868 ASSERT(actual.reg().is(rax)); |
| 2869 ASSERT(expected.reg().is(rbx)); |
| 2870 } |
| 2871 } |
| 2872 |
| 2873 if (!definitely_matches) { |
| 2874 Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline(); |
| 2875 if (!code_constant.is_null()) { |
| 2876 movq(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT); |
| 2877 addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag)); |
| 2878 } else if (!code_register.is(rdx)) { |
| 2879 movq(rdx, code_register); |
| 2880 } |
| 2881 |
| 2882 if (flag == CALL_FUNCTION) { |
| 2883 call_wrapper.BeforeCall(CallSize(adaptor)); |
| 2884 Call(adaptor, RelocInfo::CODE_TARGET); |
| 2885 call_wrapper.AfterCall(); |
| 2886 jmp(done, near_jump); |
| 2887 } else { |
| 2888 Jump(adaptor, RelocInfo::CODE_TARGET); |
| 2889 } |
| 2890 bind(&invoke); |
| 2891 } |
| 2892 } |
| 2893 |
| 2894 |
2152 void MacroAssembler::EnterFrame(StackFrame::Type type) { | 2895 void MacroAssembler::EnterFrame(StackFrame::Type type) { |
2153 push(rbp); | 2896 push(rbp); |
2154 movq(rbp, rsp); | 2897 movq(rbp, rsp); |
2155 push(rsi); // Context. | 2898 push(rsi); // Context. |
2156 Push(Smi::FromInt(type)); | 2899 Push(Smi::FromInt(type)); |
2157 movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT); | 2900 movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT); |
2158 push(kScratchRegister); | 2901 push(kScratchRegister); |
2159 if (emit_debug_code()) { | 2902 if (emit_debug_code()) { |
2160 movq(kScratchRegister, | 2903 movq(kScratchRegister, |
2161 isolate()->factory()->undefined_value(), | 2904 isolate()->factory()->undefined_value(), |
(...skipping 748 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2910 CPU::FlushICache(address_, size_); | 3653 CPU::FlushICache(address_, size_); |
2911 | 3654 |
2912 // Check that the code was patched as expected. | 3655 // Check that the code was patched as expected. |
2913 ASSERT(masm_.pc_ == address_ + size_); | 3656 ASSERT(masm_.pc_ == address_ + size_); |
2914 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); | 3657 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); |
2915 } | 3658 } |
2916 | 3659 |
2917 } } // namespace v8::internal | 3660 } } // namespace v8::internal |
2918 | 3661 |
2919 #endif // V8_TARGET_ARCH_X64 | 3662 #endif // V8_TARGET_ARCH_X64 |
OLD | NEW |