Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(551)

Side by Side Diff: src/ia32/full-codegen-ia32.cc

Issue 6592064: Make the materialization of smi constants consistent between the two compiler... (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 9 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/full-codegen.cc ('k') | src/x64/full-codegen-x64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 304 matching lines...) Expand 10 before | Expand all | Expand 10 after
315 #ifdef ENABLE_DEBUGGER_SUPPORT 315 #ifdef ENABLE_DEBUGGER_SUPPORT
316 // Check that the size of the code used for returning is large enough 316 // Check that the size of the code used for returning is large enough
317 // for the debugger's requirements. 317 // for the debugger's requirements.
318 ASSERT(Assembler::kJSReturnSequenceLength <= 318 ASSERT(Assembler::kJSReturnSequenceLength <=
319 masm_->SizeOfCodeGeneratedSince(&check_exit_codesize)); 319 masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
320 #endif 320 #endif
321 } 321 }
322 } 322 }
323 323
324 324
325 FullCodeGenerator::ConstantOperand FullCodeGenerator::GetConstantOperand(
326 Token::Value op, Expression* left, Expression* right) {
327 ASSERT(ShouldInlineSmiCase(op));
328 if (op == Token::DIV || op == Token::MOD || op == Token::MUL) {
329 // We never generate inlined constant smi operations for these.
330 return kNoConstants;
331 } else if (right->IsSmiLiteral()) {
332 return kRightConstant;
333 } else if (left->IsSmiLiteral() && !Token::IsShiftOp(op)) {
334 // Don't inline shifts with constant left hand side.
335 return kLeftConstant;
336 } else {
337 return kNoConstants;
338 }
339 }
340
341
342 void FullCodeGenerator::EffectContext::Plug(Slot* slot) const { 325 void FullCodeGenerator::EffectContext::Plug(Slot* slot) const {
343 } 326 }
344 327
345 328
346 void FullCodeGenerator::AccumulatorValueContext::Plug(Slot* slot) const { 329 void FullCodeGenerator::AccumulatorValueContext::Plug(Slot* slot) const {
347 MemOperand slot_operand = codegen()->EmitSlotSearch(slot, result_register()); 330 MemOperand slot_operand = codegen()->EmitSlotSearch(slot, result_register());
348 __ mov(result_register(), slot_operand); 331 __ mov(result_register(), slot_operand);
349 } 332 }
350 333
351 334
(...skipping 1213 matching lines...) Expand 10 before | Expand all | Expand 10 after
1565 } 1548 }
1566 } 1549 }
1567 1550
1568 // For property compound assignments we need another deoptimization 1551 // For property compound assignments we need another deoptimization
1569 // point after the property load. 1552 // point after the property load.
1570 if (property != NULL) { 1553 if (property != NULL) {
1571 PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG); 1554 PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
1572 } 1555 }
1573 1556
1574 Token::Value op = expr->binary_op(); 1557 Token::Value op = expr->binary_op();
1575 ConstantOperand constant = ShouldInlineSmiCase(op) 1558 __ push(eax); // Left operand goes on the stack.
1576 ? GetConstantOperand(op, expr->target(), expr->value()) 1559 VisitForAccumulatorValue(expr->value());
1577 : kNoConstants;
1578 ASSERT(constant == kRightConstant || constant == kNoConstants);
1579 if (constant == kNoConstants) {
1580 __ push(eax); // Left operand goes on the stack.
1581 VisitForAccumulatorValue(expr->value());
1582 }
1583 1560
1584 OverwriteMode mode = expr->value()->ResultOverwriteAllowed() 1561 OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
1585 ? OVERWRITE_RIGHT 1562 ? OVERWRITE_RIGHT
1586 : NO_OVERWRITE; 1563 : NO_OVERWRITE;
1587 SetSourcePosition(expr->position() + 1); 1564 SetSourcePosition(expr->position() + 1);
1588 AccumulatorValueContext context(this); 1565 AccumulatorValueContext context(this);
1589 if (ShouldInlineSmiCase(op)) { 1566 if (ShouldInlineSmiCase(op)) {
1590 EmitInlineSmiBinaryOp(expr, 1567 EmitInlineSmiBinaryOp(expr,
1591 op, 1568 op,
1592 mode, 1569 mode,
1593 expr->target(), 1570 expr->target(),
1594 expr->value(), 1571 expr->value());
1595 constant);
1596 } else { 1572 } else {
1597 EmitBinaryOp(op, mode); 1573 EmitBinaryOp(op, mode);
1598 } 1574 }
1599 1575
1600 // Deoptimization point in case the binary operation may have side effects. 1576 // Deoptimization point in case the binary operation may have side effects.
1601 PrepareForBailout(expr->binary_operation(), TOS_REG); 1577 PrepareForBailout(expr->binary_operation(), TOS_REG);
1602 } else { 1578 } else {
1603 VisitForAccumulatorValue(expr->value()); 1579 VisitForAccumulatorValue(expr->value());
1604 } 1580 }
1605 1581
(...skipping 27 matching lines...) Expand all
1633 } 1609 }
1634 1610
1635 1611
1636 void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) { 1612 void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
1637 SetSourcePosition(prop->position()); 1613 SetSourcePosition(prop->position());
1638 Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); 1614 Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
1639 EmitCallIC(ic, RelocInfo::CODE_TARGET); 1615 EmitCallIC(ic, RelocInfo::CODE_TARGET);
1640 } 1616 }
1641 1617
1642 1618
1643 void FullCodeGenerator::EmitConstantSmiAdd(Expression* expr,
1644 OverwriteMode mode,
1645 bool left_is_constant_smi,
1646 Smi* value) {
1647 NearLabel call_stub, done;
1648 // Optimistically add smi value with unknown object. If result overflows or is
1649 // not a smi then we had either a smi overflow or added a smi with a tagged
1650 // pointer.
1651 __ add(Operand(eax), Immediate(value));
1652 __ j(overflow, &call_stub);
1653 JumpPatchSite patch_site(masm_);
1654 patch_site.EmitJumpIfSmi(eax, &done);
1655
1656 // Undo the optimistic add operation and call the shared stub.
1657 __ bind(&call_stub);
1658 __ sub(Operand(eax), Immediate(value));
1659 TypeRecordingBinaryOpStub stub(Token::ADD, mode);
1660 if (left_is_constant_smi) {
1661 __ mov(edx, Immediate(value));
1662 } else {
1663 __ mov(edx, eax);
1664 __ mov(eax, Immediate(value));
1665 }
1666 EmitCallIC(stub.GetCode(), &patch_site);
1667
1668 __ bind(&done);
1669 context()->Plug(eax);
1670 }
1671
1672
1673 void FullCodeGenerator::EmitConstantSmiSub(Expression* expr,
1674 OverwriteMode mode,
1675 bool left_is_constant_smi,
1676 Smi* value) {
1677 NearLabel call_stub, done;
1678 // Optimistically subtract smi value with unknown object. If result overflows
1679 // or is not a smi then we had either a smi overflow or added a smi with a
1680 // tagged pointer.
1681 if (left_is_constant_smi) {
1682 __ mov(ecx, eax);
1683 __ mov(eax, Immediate(value));
1684 __ sub(Operand(eax), ecx);
1685 } else {
1686 __ sub(Operand(eax), Immediate(value));
1687 }
1688 __ j(overflow, &call_stub);
1689 JumpPatchSite patch_site(masm_);
1690 patch_site.EmitJumpIfSmi(eax, &done);
1691
1692 __ bind(&call_stub);
1693 if (left_is_constant_smi) {
1694 __ mov(edx, Immediate(value));
1695 __ mov(eax, ecx);
1696 } else {
1697 __ add(Operand(eax), Immediate(value)); // Undo the subtraction.
1698 __ mov(edx, eax);
1699 __ mov(eax, Immediate(value));
1700 }
1701 TypeRecordingBinaryOpStub stub(Token::SUB, mode);
1702 EmitCallIC(stub.GetCode(), &patch_site);
1703
1704 __ bind(&done);
1705 context()->Plug(eax);
1706 }
1707
1708
1709 void FullCodeGenerator::EmitConstantSmiShiftOp(Expression* expr,
1710 Token::Value op,
1711 OverwriteMode mode,
1712 Smi* value) {
1713 NearLabel call_stub, smi_case, done;
1714 int shift_value = value->value() & 0x1f;
1715
1716 JumpPatchSite patch_site(masm_);
1717 patch_site.EmitJumpIfSmi(eax, &smi_case);
1718
1719 // Call stub.
1720 __ bind(&call_stub);
1721 __ mov(edx, eax);
1722 __ mov(eax, Immediate(value));
1723 TypeRecordingBinaryOpStub stub(op, mode);
1724 EmitCallIC(stub.GetCode(), &patch_site);
1725 __ jmp(&done);
1726
1727 // Smi case.
1728 __ bind(&smi_case);
1729 switch (op) {
1730 case Token::SHL:
1731 if (shift_value != 0) {
1732 __ mov(edx, eax);
1733 if (shift_value > 1) {
1734 __ shl(edx, shift_value - 1);
1735 }
1736 // Convert int result to smi, checking that it is in int range.
1737 STATIC_ASSERT(kSmiTagSize == 1); // Adjust code if not the case.
1738 __ add(edx, Operand(edx));
1739 __ j(overflow, &call_stub);
1740 __ mov(eax, edx); // Put result back into eax.
1741 }
1742 break;
1743 case Token::SAR:
1744 if (shift_value != 0) {
1745 __ sar(eax, shift_value);
1746 __ and_(eax, ~kSmiTagMask);
1747 }
1748 break;
1749 case Token::SHR:
1750 // SHR must return a positive value. When shifting by 0 or 1 we need to
1751 // check that smi tagging the result will not create a negative value.
1752 if (shift_value < 2) {
1753 __ mov(edx, eax);
1754 __ SmiUntag(edx);
1755 __ shr(edx, shift_value);
1756 __ test(edx, Immediate(0xc0000000));
1757 __ j(not_zero, &call_stub);
1758 __ SmiTag(edx);
1759 __ mov(eax, edx); // Put result back into eax.
1760 } else {
1761 __ SmiUntag(eax);
1762 __ shr(eax, shift_value);
1763 __ SmiTag(eax);
1764 }
1765 break;
1766 default:
1767 UNREACHABLE();
1768 }
1769
1770 __ bind(&done);
1771 context()->Plug(eax);
1772 }
1773
1774
1775 void FullCodeGenerator::EmitConstantSmiBitOp(Expression* expr,
1776 Token::Value op,
1777 OverwriteMode mode,
1778 Smi* value) {
1779 NearLabel smi_case, done;
1780
1781 JumpPatchSite patch_site(masm_);
1782 patch_site.EmitJumpIfSmi(eax, &smi_case);
1783
1784 // The order of the arguments does not matter for bit-ops with a
1785 // constant operand.
1786 __ mov(edx, Immediate(value));
1787 TypeRecordingBinaryOpStub stub(op, mode);
1788 EmitCallIC(stub.GetCode(), &patch_site);
1789 __ jmp(&done);
1790
1791 // Smi case.
1792 __ bind(&smi_case);
1793 switch (op) {
1794 case Token::BIT_OR:
1795 __ or_(Operand(eax), Immediate(value));
1796 break;
1797 case Token::BIT_XOR:
1798 __ xor_(Operand(eax), Immediate(value));
1799 break;
1800 case Token::BIT_AND:
1801 __ and_(Operand(eax), Immediate(value));
1802 break;
1803 default:
1804 UNREACHABLE();
1805 }
1806
1807 __ bind(&done);
1808 context()->Plug(eax);
1809 }
1810
1811
1812 void FullCodeGenerator::EmitConstantSmiBinaryOp(Expression* expr,
1813 Token::Value op,
1814 OverwriteMode mode,
1815 bool left_is_constant_smi,
1816 Smi* value) {
1817 switch (op) {
1818 case Token::BIT_OR:
1819 case Token::BIT_XOR:
1820 case Token::BIT_AND:
1821 EmitConstantSmiBitOp(expr, op, mode, value);
1822 break;
1823 case Token::SHL:
1824 case Token::SAR:
1825 case Token::SHR:
1826 ASSERT(!left_is_constant_smi);
1827 EmitConstantSmiShiftOp(expr, op, mode, value);
1828 break;
1829 case Token::ADD:
1830 EmitConstantSmiAdd(expr, mode, left_is_constant_smi, value);
1831 break;
1832 case Token::SUB:
1833 EmitConstantSmiSub(expr, mode, left_is_constant_smi, value);
1834 break;
1835 default:
1836 UNREACHABLE();
1837 }
1838 }
1839
1840
1841 void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr, 1619 void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
1842 Token::Value op, 1620 Token::Value op,
1843 OverwriteMode mode, 1621 OverwriteMode mode,
1844 Expression* left, 1622 Expression* left,
1845 Expression* right, 1623 Expression* right) {
1846 ConstantOperand constant) {
1847 if (constant == kRightConstant) {
1848 Smi* value = Smi::cast(*right->AsLiteral()->handle());
1849 EmitConstantSmiBinaryOp(expr, op, mode, false, value);
1850 return;
1851 } else if (constant == kLeftConstant) {
1852 Smi* value = Smi::cast(*left->AsLiteral()->handle());
1853 EmitConstantSmiBinaryOp(expr, op, mode, true, value);
1854 return;
1855 }
1856
1857 // Do combined smi check of the operands. Left operand is on the 1624 // Do combined smi check of the operands. Left operand is on the
1858 // stack. Right operand is in eax. 1625 // stack. Right operand is in eax.
1859 NearLabel done, smi_case, stub_call; 1626 NearLabel done, smi_case, stub_call;
1860 __ pop(edx); 1627 __ pop(edx);
1861 __ mov(ecx, eax); 1628 __ mov(ecx, eax);
1862 __ or_(eax, Operand(edx)); 1629 __ or_(eax, Operand(edx));
1863 JumpPatchSite patch_site(masm_); 1630 JumpPatchSite patch_site(masm_);
1864 patch_site.EmitJumpIfSmi(eax, &smi_case); 1631 patch_site.EmitJumpIfSmi(eax, &smi_case);
1865 1632
1866 __ bind(&stub_call); 1633 __ bind(&stub_call);
(...skipping 2586 matching lines...) Expand 10 before | Expand all | Expand 10 after
4453 // And return. 4220 // And return.
4454 __ ret(0); 4221 __ ret(0);
4455 } 4222 }
4456 4223
4457 4224
4458 #undef __ 4225 #undef __
4459 4226
4460 } } // namespace v8::internal 4227 } } // namespace v8::internal
4461 4228
4462 #endif // V8_TARGET_ARCH_IA32 4229 #endif // V8_TARGET_ARCH_IA32
OLDNEW
« no previous file with comments | « src/full-codegen.cc ('k') | src/x64/full-codegen-x64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698