Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(408)

Side by Side Diff: src/x64/macro-assembler-x64.cc

Issue 267049: Removed 31-bit smis. (Closed)
Patch Set: Created 11 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/objects-inl.h ('k') | test/cctest/test-heap.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2009 the V8 project authors. All rights reserved. 1 // Copyright 2009 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 410 matching lines...) Expand 10 before | Expand all | Expand 10 after
421 movl(dst, Immediate(x)); 421 movl(dst, Immediate(x));
422 } else { 422 } else {
423 movq(kScratchRegister, x, RelocInfo::NONE); 423 movq(kScratchRegister, x, RelocInfo::NONE);
424 movq(dst, kScratchRegister); 424 movq(dst, kScratchRegister);
425 } 425 }
426 } 426 }
427 427
428 // ---------------------------------------------------------------------------- 428 // ----------------------------------------------------------------------------
429 // Smi tagging, untagging and tag detection. 429 // Smi tagging, untagging and tag detection.
430 430
431 #ifdef V8_LONG_SMI
432
433 static int kSmiShift = kSmiTagSize + kSmiShiftSize; 431 static int kSmiShift = kSmiTagSize + kSmiShiftSize;
434 432
435 void MacroAssembler::Integer32ToSmi(Register dst, Register src) { 433 void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
436 ASSERT_EQ(0, kSmiTag); 434 ASSERT_EQ(0, kSmiTag);
437 if (!dst.is(src)) { 435 if (!dst.is(src)) {
438 movl(dst, src); 436 movl(dst, src);
439 } 437 }
440 shl(dst, Immediate(kSmiShift)); 438 shl(dst, Immediate(kSmiShift));
441 } 439 }
442 440
(...skipping 760 matching lines...) Expand 10 before | Expand all | Expand 10 after
1203 } 1201 }
1204 neg(dst); 1202 neg(dst);
1205 if (shift < kSmiShift) { 1203 if (shift < kSmiShift) {
1206 sar(dst, Immediate(kSmiShift - shift)); 1204 sar(dst, Immediate(kSmiShift - shift));
1207 } else { 1205 } else {
1208 shl(dst, Immediate(shift - kSmiShift)); 1206 shl(dst, Immediate(shift - kSmiShift));
1209 } 1207 }
1210 return SmiIndex(dst, times_1); 1208 return SmiIndex(dst, times_1);
1211 } 1209 }
1212 1210
1213 #else // ! V8_LONG_SMI
1214 // 31 bit smi operations
1215
1216 // Extracts the low 32 bits of a Smi pointer, where the taqgged smi value
1217 // is stored.
1218 static int32_t SmiValue(Smi* smi) {
1219 return static_cast<int32_t>(reinterpret_cast<intptr_t>(smi));
1220 }
1221
1222
1223 void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
1224 ASSERT_EQ(1, kSmiTagSize);
1225 ASSERT_EQ(0, kSmiTag);
1226 #ifdef DEBUG
1227 if (allow_stub_calls()) {
1228 cmpl(src, Immediate(0xC0000000u));
1229 Check(positive, "Smi conversion overflow");
1230 }
1231 #endif
1232 if (dst.is(src)) {
1233 addl(dst, src);
1234 } else {
1235 lea(dst, Operand(src, src, times_1, 0));
1236 }
1237 }
1238
1239
1240 void MacroAssembler::Integer32ToSmi(Register dst,
1241 Register src,
1242 Label* on_overflow) {
1243 ASSERT_EQ(1, kSmiTagSize);
1244 ASSERT_EQ(0, kSmiTag);
1245 if (!dst.is(src)) {
1246 movl(dst, src);
1247 }
1248 addl(dst, src);
1249 j(overflow, on_overflow);
1250 }
1251
1252
1253 void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
1254 Register src,
1255 int constant) {
1256 #ifdef DEBUG
1257 if (allow_stub_calls()) {
1258 movl(kScratchRegister, src);
1259 addl(kScratchRegister, Immediate(constant));
1260 Check(no_overflow, "Add-and-smi-convert overflow");
1261 Condition valid = CheckInteger32ValidSmiValue(kScratchRegister);
1262 Check(valid, "Add-and-smi-convert overflow");
1263 }
1264 #endif
1265 lea(dst, Operand(src, src, times_1, constant << kSmiTagSize));
1266 }
1267
1268
1269 void MacroAssembler::SmiToInteger32(Register dst, Register src) {
1270 ASSERT_EQ(1, kSmiTagSize);
1271 ASSERT_EQ(0, kSmiTag);
1272 if (!dst.is(src)) {
1273 movl(dst, src);
1274 }
1275 sarl(dst, Immediate(kSmiTagSize));
1276 }
1277
1278
1279 void MacroAssembler::SmiToInteger64(Register dst, Register src) {
1280 ASSERT_EQ(1, kSmiTagSize);
1281 ASSERT_EQ(0, kSmiTag);
1282 movsxlq(dst, src);
1283 sar(dst, Immediate(kSmiTagSize));
1284 }
1285
1286
1287 void MacroAssembler::SmiTest(Register src) {
1288 testl(src, src);
1289 }
1290
1291
1292 void MacroAssembler::SmiCompare(Register dst, Register src) {
1293 cmpl(dst, src);
1294 }
1295
1296
1297 void MacroAssembler::SmiCompare(Register dst, Smi* src) {
1298 ASSERT(!dst.is(kScratchRegister));
1299 if (src->value() == 0) {
1300 testl(dst, dst);
1301 } else {
1302 cmpl(dst, Immediate(SmiValue(src)));
1303 }
1304 }
1305
1306
1307 void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
1308 cmpl(dst, src);
1309 }
1310
1311
1312 void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
1313 if (src->value() == 0) {
1314 movl(kScratchRegister, dst);
1315 testl(kScratchRegister, kScratchRegister);
1316 } else {
1317 cmpl(dst, Immediate(SmiValue(src)));
1318 }
1319 }
1320
1321
1322 void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
1323 Register src,
1324 int power) {
1325 ASSERT(power >= 0);
1326 ASSERT(power < 64);
1327 if (power == 0) {
1328 SmiToInteger64(dst, src);
1329 return;
1330 }
1331 movsxlq(dst, src);
1332 shl(dst, Immediate(power - 1));
1333 }
1334
1335 Condition MacroAssembler::CheckSmi(Register src) {
1336 testb(src, Immediate(kSmiTagMask));
1337 return zero;
1338 }
1339
1340 Condition MacroAssembler::CheckPositiveSmi(Register src) {
1341 ASSERT_EQ(0, kSmiTag);
1342 testl(src, Immediate(static_cast<uint32_t>(0x80000000u | kSmiTagMask)));
1343 return zero;
1344 }
1345
1346 Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
1347 if (first.is(second)) {
1348 return CheckSmi(first);
1349 }
1350 movl(kScratchRegister, first);
1351 orl(kScratchRegister, second);
1352 return CheckSmi(kScratchRegister);
1353 }
1354
1355 Condition MacroAssembler::CheckIsMinSmi(Register src) {
1356 ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
1357 cmpl(src, Immediate(0x80000000u));
1358 return equal;
1359 }
1360
1361 Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
1362 // A 32-bit integer value can be converted to a smi if it is in the
1363 // range [-2^30 .. 2^30-1]. That is equivalent to having its 32-bit
1364 // representation have bits 30 and 31 be equal.
1365 cmpl(src, Immediate(0xC0000000u));
1366 return positive;
1367 }
1368
1369
1370 void MacroAssembler::SmiNeg(Register dst,
1371 Register src,
1372 Label* on_smi_result) {
1373 if (!dst.is(src)) {
1374 movl(dst, src);
1375 }
1376 negl(dst);
1377 testl(dst, Immediate(0x7fffffff));
1378 // If the result is zero or 0x80000000, negation failed to create a smi.
1379 j(not_equal, on_smi_result);
1380 }
1381
1382
1383 void MacroAssembler::SmiAdd(Register dst,
1384 Register src1,
1385 Register src2,
1386 Label* on_not_smi_result) {
1387 ASSERT(!dst.is(src2));
1388 if (!dst.is(src1)) {
1389 movl(dst, src1);
1390 }
1391 addl(dst, src2);
1392 if (!dst.is(src1)) {
1393 j(overflow, on_not_smi_result);
1394 } else {
1395 Label smi_result;
1396 j(no_overflow, &smi_result);
1397 // Restore src1.
1398 subl(src1, src2);
1399 jmp(on_not_smi_result);
1400 bind(&smi_result);
1401 }
1402 }
1403
1404
1405 void MacroAssembler::SmiSub(Register dst,
1406 Register src1,
1407 Register src2,
1408 Label* on_not_smi_result) {
1409 ASSERT(!dst.is(src2));
1410 if (!dst.is(src1)) {
1411 movl(dst, src1);
1412 }
1413 subl(dst, src2);
1414 if (!dst.is(src1)) {
1415 j(overflow, on_not_smi_result);
1416 } else {
1417 Label smi_result;
1418 j(no_overflow, &smi_result);
1419 // Restore src1.
1420 addl(src1, src2);
1421 jmp(on_not_smi_result);
1422 bind(&smi_result);
1423 }
1424 }
1425
1426
1427 void MacroAssembler::SmiMul(Register dst,
1428 Register src1,
1429 Register src2,
1430 Label* on_not_smi_result) {
1431 ASSERT(!dst.is(src2));
1432
1433 if (dst.is(src1)) {
1434 // Copy src1 before overwriting.
1435 movq(kScratchRegister, src1);
1436 }
1437 SmiToInteger32(dst, src1);
1438
1439 imull(dst, src2);
1440 j(overflow, on_not_smi_result);
1441
1442 // Check for negative zero result. If product is zero, and one
1443 // argument is negative, go to slow case. The frame is unchanged
1444 // in this block, so local control flow can use a Label rather
1445 // than a JumpTarget.
1446 Label non_zero_result;
1447 testl(dst, dst);
1448 j(not_zero, &non_zero_result);
1449
1450 // Test whether either operand is negative (the other must be zero).
1451 if (!dst.is(src1)) {
1452 movl(kScratchRegister, src1);
1453 }
1454 orl(kScratchRegister, src2);
1455 j(negative, on_not_smi_result);
1456
1457 bind(&non_zero_result);
1458 }
1459
1460
1461 void MacroAssembler::SmiTryAddConstant(Register dst,
1462 Register src,
1463 Smi* constant,
1464 Label* on_not_smi_result) {
1465 // Does not assume that src is a smi.
1466 ASSERT_EQ(static_cast<intptr_t>(1), kSmiTagMask);
1467 ASSERT_EQ(0, kSmiTag);
1468
1469 Register tmp = (src.is(dst) ? kScratchRegister : dst);
1470 movl(tmp, src);
1471 addl(tmp, Immediate(SmiValue(constant)));
1472 if (tmp.is(kScratchRegister)) {
1473 j(overflow, on_not_smi_result);
1474 testl(tmp, Immediate(kSmiTagMask));
1475 j(not_zero, on_not_smi_result);
1476 movl(dst, tmp);
1477 } else {
1478 movl(kScratchRegister, Immediate(kSmiTagMask));
1479 cmovl(overflow, dst, kScratchRegister);
1480 testl(dst, kScratchRegister);
1481 j(not_zero, on_not_smi_result);
1482 }
1483 }
1484
1485
1486 void MacroAssembler::SmiAddConstant(Register dst,
1487 Register src,
1488 Smi* constant) {
1489 ASSERT_EQ(1, kSmiTagMask);
1490 ASSERT_EQ(0, kSmiTag);
1491 int32_t smi_value = SmiValue(constant);
1492 if (dst.is(src)) {
1493 addl(dst, Immediate(smi_value));
1494 } else {
1495 lea(dst, Operand(src, smi_value));
1496 }
1497 }
1498
1499
1500 void MacroAssembler::SmiAddConstant(Register dst,
1501 Register src,
1502 Smi* constant,
1503 Label* on_not_smi_result) {
1504 ASSERT_EQ(1, kSmiTagMask);
1505 ASSERT_EQ(0, kSmiTag);
1506 int32_t smi_value = SmiValue(constant);
1507 if (!dst.is(src)) {
1508 movl(dst, src);
1509 addl(dst, Immediate(smi_value));
1510 j(overflow, on_not_smi_result);
1511 } else {
1512 addl(dst, Immediate(smi_value));
1513 Label result_ok;
1514 j(no_overflow, &result_ok);
1515 subl(dst, Immediate(smi_value));
1516 jmp(on_not_smi_result);
1517 bind(&result_ok);
1518 }
1519 }
1520
1521
1522 void MacroAssembler::SmiSubConstant(Register dst,
1523 Register src,
1524 Smi* constant) {
1525 ASSERT_EQ(1, kSmiTagMask);
1526 ASSERT_EQ(0, kSmiTag);
1527 if (!dst.is(src)) {
1528 movl(dst, src);
1529 }
1530 subl(dst, Immediate(SmiValue(constant)));
1531 }
1532
1533
1534 void MacroAssembler::SmiSubConstant(Register dst,
1535 Register src,
1536 Smi* constant,
1537 Label* on_not_smi_result) {
1538 ASSERT_EQ(1, kSmiTagMask);
1539 ASSERT_EQ(0, kSmiTag);
1540 int32_t smi_value = SmiValue(constant);
1541 if (dst.is(src)) {
1542 // Optimistic subtract - may change value of dst register,
1543 // if it has garbage bits in the higher half, but will not change
1544 // the value as a tagged smi.
1545 subl(dst, Immediate(smi_value));
1546 Label add_success;
1547 j(no_overflow, &add_success);
1548 addl(dst, Immediate(smi_value));
1549 jmp(on_not_smi_result);
1550 bind(&add_success);
1551 } else {
1552 movl(dst, src);
1553 subl(dst, Immediate(smi_value));
1554 j(overflow, on_not_smi_result);
1555 }
1556 }
1557
1558
1559 void MacroAssembler::SmiDiv(Register dst,
1560 Register src1,
1561 Register src2,
1562 Label* on_not_smi_result) {
1563 ASSERT(!src2.is(rax));
1564 ASSERT(!src2.is(rdx));
1565 ASSERT(!src1.is(rdx));
1566
1567 // Check for 0 divisor (result is +/-Infinity).
1568 Label positive_divisor;
1569 testl(src2, src2);
1570 j(zero, on_not_smi_result);
1571 j(positive, &positive_divisor);
1572 // Check for negative zero result. If the dividend is zero, and the
1573 // divisor is negative, return a floating point negative zero.
1574 testl(src1, src1);
1575 j(zero, on_not_smi_result);
1576 bind(&positive_divisor);
1577
1578 // Sign extend src1 into edx:eax.
1579 if (!src1.is(rax)) {
1580 movl(rax, src1);
1581 }
1582 cdq();
1583
1584 idivl(src2);
1585 // Check for the corner case of dividing the most negative smi by
1586 // -1. We cannot use the overflow flag, since it is not set by
1587 // idiv instruction.
1588 ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
1589 cmpl(rax, Immediate(0x40000000));
1590 j(equal, on_not_smi_result);
1591 // Check that the remainder is zero.
1592 testl(rdx, rdx);
1593 j(not_zero, on_not_smi_result);
1594 // Tag the result and store it in the destination register.
1595 Integer32ToSmi(dst, rax);
1596 }
1597
1598
1599 void MacroAssembler::SmiMod(Register dst,
1600 Register src1,
1601 Register src2,
1602 Label* on_not_smi_result) {
1603 ASSERT(!dst.is(kScratchRegister));
1604 ASSERT(!src1.is(kScratchRegister));
1605 ASSERT(!src2.is(kScratchRegister));
1606 ASSERT(!src2.is(rax));
1607 ASSERT(!src2.is(rdx));
1608 ASSERT(!src1.is(rdx));
1609
1610 testl(src2, src2);
1611 j(zero, on_not_smi_result);
1612
1613 if (src1.is(rax)) {
1614 // Mist remember the value to see if a zero result should
1615 // be a negative zero.
1616 movl(kScratchRegister, rax);
1617 } else {
1618 movl(rax, src1);
1619 }
1620 // Sign extend eax into edx:eax.
1621 cdq();
1622 idivl(src2);
1623 // Check for a negative zero result. If the result is zero, and the
1624 // dividend is negative, return a floating point negative zero.
1625 Label non_zero_result;
1626 testl(rdx, rdx);
1627 j(not_zero, &non_zero_result);
1628 if (src1.is(rax)) {
1629 testl(kScratchRegister, kScratchRegister);
1630 } else {
1631 testl(src1, src1);
1632 }
1633 j(negative, on_not_smi_result);
1634 bind(&non_zero_result);
1635 if (!dst.is(rdx)) {
1636 movl(dst, rdx);
1637 }
1638 }
1639
1640
1641 void MacroAssembler::SmiNot(Register dst, Register src) {
1642 if (dst.is(src)) {
1643 not_(dst);
1644 // Remove inverted smi-tag. The mask is sign-extended to 64 bits.
1645 xor_(src, Immediate(kSmiTagMask));
1646 } else {
1647 ASSERT_EQ(0, kSmiTag);
1648 lea(dst, Operand(src, kSmiTagMask));
1649 not_(dst);
1650 }
1651 }
1652
1653
1654 void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
1655 if (!dst.is(src1)) {
1656 movl(dst, src1);
1657 }
1658 and_(dst, src2);
1659 }
1660
1661
1662 void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
1663 if (!dst.is(src)) {
1664 movl(dst, src);
1665 }
1666 int32_t smi_value = SmiValue(constant);
1667 and_(dst, Immediate(smi_value));
1668 }
1669
1670
1671 void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
1672 if (!dst.is(src1)) {
1673 movl(dst, src1);
1674 }
1675 or_(dst, src2);
1676 }
1677
1678
1679 void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
1680 if (!dst.is(src)) {
1681 movl(dst, src);
1682 }
1683 int32_t smi_value = SmiValue(constant);
1684 or_(dst, Immediate(smi_value));
1685 }
1686
1687
1688 void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
1689 if (!dst.is(src1)) {
1690 movl(dst, src1);
1691 }
1692 xor_(dst, src2);
1693 }
1694
1695
1696 void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
1697 if (!dst.is(src)) {
1698 movl(dst, src);
1699 }
1700 int32_t smi_value = SmiValue(constant);
1701 xor_(dst, Immediate(smi_value));
1702 }
1703
1704
1705 void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
1706 Register src,
1707 int shift_value) {
1708 if (shift_value > 0) {
1709 if (dst.is(src)) {
1710 sarl(dst, Immediate(shift_value));
1711 and_(dst, Immediate(~kSmiTagMask));
1712 } else {
1713 UNIMPLEMENTED(); // Not used.
1714 }
1715 }
1716 }
1717
1718
1719 void MacroAssembler::SmiShiftLogicalRightConstant(Register dst,
1720 Register src,
1721 int shift_value,
1722 Label* on_not_smi_result) {
1723 // Logic right shift interprets its result as an *unsigned* number.
1724 if (dst.is(src)) {
1725 UNIMPLEMENTED(); // Not used.
1726 } else {
1727 movl(dst, src);
1728 // Untag the smi.
1729 sarl(dst, Immediate(kSmiTagSize));
1730 if (shift_value < 2) {
1731 // A negative Smi shifted right two is in the positive Smi range,
1732 // but if shifted only by zero or one, it never is.
1733 j(negative, on_not_smi_result);
1734 }
1735 if (shift_value > 0) {
1736 // Do the right shift on the integer value.
1737 shrl(dst, Immediate(shift_value));
1738 }
1739 // Re-tag the result.
1740 addl(dst, dst);
1741 }
1742 }
1743
1744
1745 void MacroAssembler::SmiShiftLeftConstant(Register dst,
1746 Register src,
1747 int shift_value,
1748 Label* on_not_smi_result) {
1749 if (dst.is(src)) {
1750 if (shift_value > 0) {
1751 movq(kScratchRegister, src);
1752 // Treat scratch as an untagged integer value equal to two times the
1753 // smi value of src, i.e., already shifted left by one.
1754 if (shift_value > 1) {
1755 shll(kScratchRegister, Immediate(shift_value - 1));
1756 }
1757 JumpIfNotValidSmiValue(kScratchRegister, on_not_smi_result);
1758 // Convert int result to Smi, checking that it is in smi range.
1759 ASSERT(kSmiTagSize == 1); // adjust code if not the case
1760 Integer32ToSmi(dst, kScratchRegister);
1761 }
1762 } else {
1763 movl(dst, src);
1764 if (shift_value > 0) {
1765 // Treat dst as an untagged integer value equal to two times the
1766 // smi value of src, i.e., already shifted left by one.
1767 if (shift_value > 1) {
1768 shll(dst, Immediate(shift_value - 1));
1769 }
1770 // Convert int result to Smi, checking that it is in smi range.
1771 ASSERT(kSmiTagSize == 1); // adjust code if not the case
1772 Integer32ToSmi(dst, dst, on_not_smi_result);
1773 }
1774 }
1775 }
1776
1777
1778 void MacroAssembler::SmiShiftLeft(Register dst,
1779 Register src1,
1780 Register src2,
1781 Label* on_not_smi_result) {
1782 ASSERT(!dst.is(rcx));
1783 Label result_ok;
1784 // Untag both operands.
1785 if (dst.is(src1) || src1.is(rcx)) {
1786 movq(kScratchRegister, src1);
1787 }
1788 SmiToInteger32(dst, src1);
1789 SmiToInteger32(rcx, src2);
1790 shll(dst);
1791 // Check that the *signed* result fits in a smi.
1792 Condition is_valid = CheckInteger32ValidSmiValue(dst);
1793 j(is_valid, &result_ok);
1794 // Restore the relevant bits of the source registers
1795 // and call the slow version.
1796 if (dst.is(src1) || src1.is(rcx)) {
1797 movq(src1, kScratchRegister);
1798 }
1799 if (src2.is(rcx)) {
1800 Integer32ToSmi(rcx, rcx);
1801 }
1802 jmp(on_not_smi_result);
1803 bind(&result_ok);
1804 Integer32ToSmi(dst, dst);
1805 }
1806
1807
1808 void MacroAssembler::SmiShiftLogicalRight(Register dst,
1809 Register src1,
1810 Register src2,
1811 Label* on_not_smi_result) {
1812 ASSERT(!dst.is(kScratchRegister));
1813 ASSERT(!src1.is(kScratchRegister));
1814 ASSERT(!src2.is(kScratchRegister));
1815 ASSERT(!dst.is(rcx));
1816 Label result_ok;
1817 // Untag both operands.
1818 if (src1.is(rcx)) {
1819 movq(kScratchRegister, src1);
1820 }
1821 SmiToInteger32(dst, src1);
1822 SmiToInteger32(rcx, src2);
1823
1824 shrl(dst);
1825 // Check that the *unsigned* result fits in a smi.
1826 // I.e., that it is a valid positive smi value. The positive smi
1827 // values are 0..0x3fffffff, i.e., neither of the top-most two
1828 // bits can be set.
1829 //
1830 // These two cases can only happen with shifts by 0 or 1 when
1831 // handed a valid smi. If the answer cannot be represented by a
1832 // smi, restore the left and right arguments, and jump to slow
1833 // case. The low bit of the left argument may be lost, but only
1834 // in a case where it is dropped anyway.
1835 testl(dst, Immediate(0xc0000000));
1836 j(zero, &result_ok);
1837 if (dst.is(src1)) {
1838 shll(dst);
1839 Integer32ToSmi(dst, dst);
1840 } else if (src1.is(rcx)) {
1841 movq(rcx, kScratchRegister);
1842 } else if (src2.is(rcx)) {
1843 Integer32ToSmi(src2, src2);
1844 }
1845 jmp(on_not_smi_result);
1846 bind(&result_ok);
1847 // Smi-tag the result in answer.
1848 Integer32ToSmi(dst, dst);
1849 }
1850
1851
1852 void MacroAssembler::SmiShiftArithmeticRight(Register dst,
1853 Register src1,
1854 Register src2) {
1855 ASSERT(!dst.is(rcx));
1856 // Untag both operands.
1857 SmiToInteger32(dst, src1);
1858 SmiToInteger32(rcx, src2);
1859 // Shift as integer.
1860 sarl(dst);
1861 // Retag result.
1862 Integer32ToSmi(dst, dst);
1863 }
1864
1865
1866 void MacroAssembler::SelectNonSmi(Register dst,
1867 Register src1,
1868 Register src2,
1869 Label* on_not_smis) {
1870 ASSERT(!dst.is(src1));
1871 ASSERT(!dst.is(src2));
1872 // Both operands must not be smis.
1873 #ifdef DEBUG
1874 if (allow_stub_calls()) {
1875 Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
1876 Check(not_both_smis, "Both registers were smis.");
1877 }
1878 #endif
1879 ASSERT_EQ(0, kSmiTag);
1880 ASSERT_EQ(0, Smi::FromInt(0));
1881 movq(kScratchRegister, Immediate(kSmiTagMask));
1882 and_(kScratchRegister, src1);
1883 testl(kScratchRegister, src2);
1884 j(not_zero, on_not_smis);
1885 // One operand is a smi.
1886
1887 ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
1888 // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
1889 subq(kScratchRegister, Immediate(1));
1890 // If src1 is a smi, then scratch register all 1s, else it is all 0s.
1891 movq(dst, src1);
1892 xor_(dst, src2);
1893 and_(dst, kScratchRegister);
1894 // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
1895 xor_(dst, src1);
1896 // If src1 is a smi, dst is src2, else it is src1, i.e., a non-smi.
1897 }
1898
1899
1900 SmiIndex MacroAssembler::SmiToIndex(Register dst, Register src, int shift) {
1901 ASSERT(is_uint6(shift));
1902 if (shift == 0) { // times_1.
1903 SmiToInteger32(dst, src);
1904 return SmiIndex(dst, times_1);
1905 }
1906 if (shift <= 4) { // 2 - 16 times multiplier is handled using ScaleFactor.
1907 // We expect that all smis are actually zero-padded. If this holds after
1908 // checking, this line can be omitted.
1909 movl(dst, src); // Ensure that the smi is zero-padded.
1910 return SmiIndex(dst, static_cast<ScaleFactor>(shift - kSmiTagSize));
1911 }
1912 // Shift by shift-kSmiTagSize.
1913 movl(dst, src); // Ensure that the smi is zero-padded.
1914 shl(dst, Immediate(shift - kSmiTagSize));
1915 return SmiIndex(dst, times_1);
1916 }
1917
1918 1211
1919 SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
1920 Register src,
1921 int shift) {
1922 // Register src holds a positive smi.
1923 ASSERT(is_uint6(shift));
1924 if (shift == 0) { // times_1.
1925 SmiToInteger32(dst, src);
1926 neg(dst);
1927 return SmiIndex(dst, times_1);
1928 }
1929 if (shift <= 4) { // 2 - 16 times multiplier is handled using ScaleFactor.
1930 movl(dst, src);
1931 neg(dst);
1932 return SmiIndex(dst, static_cast<ScaleFactor>(shift - kSmiTagSize));
1933 }
1934 // Shift by shift-kSmiTagSize.
1935 movl(dst, src);
1936 neg(dst);
1937 shl(dst, Immediate(shift - kSmiTagSize));
1938 return SmiIndex(dst, times_1);
1939 }
1940
1941 #endif // V8_LONG_SMI
1942
1943
1944 void MacroAssembler::JumpIfSmi(Register src, Label* on_smi) { 1212 void MacroAssembler::JumpIfSmi(Register src, Label* on_smi) {
1945 ASSERT_EQ(0, kSmiTag); 1213 ASSERT_EQ(0, kSmiTag);
1946 Condition smi = CheckSmi(src); 1214 Condition smi = CheckSmi(src);
1947 j(smi, on_smi); 1215 j(smi, on_smi);
1948 } 1216 }
1949 1217
1950 1218
1951 void MacroAssembler::JumpIfNotSmi(Register src, Label* on_not_smi) { 1219 void MacroAssembler::JumpIfNotSmi(Register src, Label* on_not_smi) {
1952 Condition smi = CheckSmi(src); 1220 Condition smi = CheckSmi(src);
1953 j(NegateCondition(smi), on_not_smi); 1221 j(NegateCondition(smi), on_not_smi);
(...skipping 1041 matching lines...) Expand 10 before | Expand all | Expand 10 after
2995 CodePatcher::~CodePatcher() { 2263 CodePatcher::~CodePatcher() {
2996 // Indicate that code has changed. 2264 // Indicate that code has changed.
2997 CPU::FlushICache(address_, size_); 2265 CPU::FlushICache(address_, size_);
2998 2266
2999 // Check that the code was patched as expected. 2267 // Check that the code was patched as expected.
3000 ASSERT(masm_.pc_ == address_ + size_); 2268 ASSERT(masm_.pc_ == address_ + size_);
3001 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); 2269 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
3002 } 2270 }
3003 2271
3004 } } // namespace v8::internal 2272 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/objects-inl.h ('k') | test/cctest/test-heap.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698