Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(50)

Side by Side Diff: src/arm/full-codegen-arm.cc

Issue 6529022: ARM: Add inlined smi binary operations in full code generator (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 9 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « no previous file | src/ia32/full-codegen-ia32.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 323 matching lines...) Expand 10 before | Expand all | Expand 10 after
334 ASSERT(Assembler::kJSReturnSequenceInstructions <= 334 ASSERT(Assembler::kJSReturnSequenceInstructions <=
335 masm_->InstructionsGeneratedSince(&check_exit_codesize)); 335 masm_->InstructionsGeneratedSince(&check_exit_codesize));
336 #endif 336 #endif
337 } 337 }
338 } 338 }
339 339
340 340
341 FullCodeGenerator::ConstantOperand FullCodeGenerator::GetConstantOperand( 341 FullCodeGenerator::ConstantOperand FullCodeGenerator::GetConstantOperand(
342 Token::Value op, Expression* left, Expression* right) { 342 Token::Value op, Expression* left, Expression* right) {
343 ASSERT(ShouldInlineSmiCase(op)); 343 ASSERT(ShouldInlineSmiCase(op));
344 return kNoConstants; 344 if (op == Token::DIV || op == Token::MOD || op == Token::MUL) {
345 // We never generate inlined constant smi operations for these.
346 return kNoConstants;
347 } else if (right->IsSmiLiteral()) {
348 return kRightConstant;
349 } else if (left->IsSmiLiteral() && !Token::IsShiftOp(op)) {
350 // Don't inline shifts with constant left hand side.
351 return kLeftConstant;
352 } else {
353 return kNoConstants;
354 }
345 } 355 }
346 356
347 357
348 void FullCodeGenerator::EffectContext::Plug(Slot* slot) const { 358 void FullCodeGenerator::EffectContext::Plug(Slot* slot) const {
349 } 359 }
350 360
351 361
352 void FullCodeGenerator::AccumulatorValueContext::Plug(Slot* slot) const { 362 void FullCodeGenerator::AccumulatorValueContext::Plug(Slot* slot) const {
353 codegen()->Move(result_register(), slot); 363 codegen()->Move(result_register(), slot);
354 } 364 }
(...skipping 1236 matching lines...) Expand 10 before | Expand all | Expand 10 after
1591 1601
1592 1602
1593 void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) { 1603 void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
1594 SetSourcePosition(prop->position()); 1604 SetSourcePosition(prop->position());
1595 // Call keyed load IC. It has arguments key and receiver in r0 and r1. 1605 // Call keyed load IC. It has arguments key and receiver in r0 and r1.
1596 Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); 1606 Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
1597 EmitCallIC(ic, RelocInfo::CODE_TARGET); 1607 EmitCallIC(ic, RelocInfo::CODE_TARGET);
1598 } 1608 }
1599 1609
1600 1610
1611 void FullCodeGenerator::EmitConstantSmiAdd(Expression* expr,
1612 OverwriteMode mode,
1613 bool left_is_constant_smi,
1614 Smi* value) {
1615 Label call_stub, done;
1616 // Optimistically add smi value with unknown object. If result overflows or is
1617 // not a smi then we had either a smi overflow or added a smi with a tagged
1618 // pointer.
1619 __ mov(r1, Operand(value));
1620 __ add(r2, r0, r1, SetCC);
1621 __ b(vs, &call_stub);
1622 JumpPatchSite patch_site(masm_);
1623 patch_site.EmitJumpIfNotSmi(r2, &call_stub);
1624 __ mov(r0, r2);
1625 __ b(&done);
1626
1627 // Call the shared stub.
1628 __ bind(&call_stub);
1629 if (!left_is_constant_smi) {
1630 __ Swap(r0, r1, r2);
1631 }
1632 TypeRecordingBinaryOpStub stub(Token::ADD, mode);
1633 EmitCallIC(stub.GetCode(), &patch_site);
1634
1635 __ bind(&done);
1636 context()->Plug(r0);
1637 }
1638
1639
1640 void FullCodeGenerator::EmitConstantSmiSub(Expression* expr,
1641 OverwriteMode mode,
1642 bool left_is_constant_smi,
1643 Smi* value) {
1644 Label call_stub, done;
1645 // Optimistically subtract smi value and unknown object. If result overflows
1646 // or is not a smi then we had either a smi overflow or subtraction between a
1647 // smi and a tagged pointer.
1648 __ mov(r1, Operand(value));
1649 if (left_is_constant_smi) {
1650 __ sub(r2, r1, r0, SetCC);
1651 } else {
1652 __ sub(r2, r0, r1, SetCC);
1653 }
1654 __ b(vs, &call_stub);
1655 JumpPatchSite patch_site(masm_);
1656 patch_site.EmitJumpIfNotSmi(r2, &call_stub);
1657 __ mov(r0, r2);
1658 __ b(&done);
1659
1660 // Call the shared stub.
1661 __ bind(&call_stub);
1662 if (!left_is_constant_smi) {
1663 __ Swap(r0, r1, r2);
1664 }
1665 TypeRecordingBinaryOpStub stub(Token::SUB, mode);
1666 EmitCallIC(stub.GetCode(), &patch_site);
1667
1668 __ bind(&done);
1669 context()->Plug(r0);
1670 }
1671
1672
1673 void FullCodeGenerator::EmitConstantSmiShiftOp(Expression* expr,
1674 Token::Value op,
1675 OverwriteMode mode,
1676 Smi* value) {
1677 Label call_stub, smi_case, done;
1678 int shift_value = value->value() & 0x1f;
1679
1680 JumpPatchSite patch_site(masm_);
1681 patch_site.EmitJumpIfSmi(r0, &smi_case);
1682
1683 // Call stub.
1684 __ bind(&call_stub);
1685 __ mov(r1, r0);
1686 __ mov(r0, Operand(value));
1687 TypeRecordingBinaryOpStub stub(op, mode);
1688 EmitCallIC(stub.GetCode(), &patch_site);
1689 __ b(&done);
1690
1691 // Smi case.
1692 __ bind(&smi_case);
1693 switch (op) {
1694 case Token::SHL:
1695 if (shift_value != 0) {
1696 __ mov(r1, r0);
1697 if (shift_value > 1) {
1698 __ mov(r1, Operand(r1, LSL, shift_value - 1));
1699 }
1700 // Convert int result to smi, checking that it is in int range.
1701 __ SmiTag(r1, SetCC);
1702 __ b(vs, &call_stub);
1703 __ mov(r0, r1); // Put result back into r0.
1704 }
1705 break;
1706 case Token::SAR:
1707 if (shift_value != 0) {
1708 __ mov(r0, Operand(r0, ASR, shift_value));
1709 __ bic(r0, r0, Operand(kSmiTagMask));
1710 }
1711 break;
1712 case Token::SHR:
1713 // SHR must return a positive value. When shifting by 0 or 1 we need to
1714 // check that smi tagging the result will not create a negative value.
1715 if (shift_value < 2) {
1716 __ mov(r2, Operand(shift_value));
1717 __ SmiUntag(r1, r0);
1718 if (shift_value != 0) {
1719 __ mov(r1, Operand(r1, LSR, shift_value));
1720 }
1721 __ tst(r1, Operand(0xc0000000));
1722 __ b(ne, &call_stub);
1723 __ SmiTag(r0, r1); // result in r0.
1724 } else {
1725 __ SmiUntag(r0);
1726 __ mov(r0, Operand(r0, LSR, shift_value));
1727 __ SmiTag(r0);
1728 }
1729 break;
1730 default:
1731 UNREACHABLE();
1732 }
1733
1734 __ bind(&done);
1735 context()->Plug(r0);
1736 }
1737
1738
1739 void FullCodeGenerator::EmitConstantSmiBitOp(Expression* expr,
1740 Token::Value op,
1741 OverwriteMode mode,
1742 Smi* value) {
1743 Label smi_case, done;
1744
1745 JumpPatchSite patch_site(masm_);
1746 patch_site.EmitJumpIfSmi(r0, &smi_case);
1747
1748 // The order of the arguments does not matter for bit-ops with a
1749 // constant operand.
1750 __ mov(r1, Operand(value));
1751 TypeRecordingBinaryOpStub stub(op, mode);
1752 EmitCallIC(stub.GetCode(), &patch_site);
1753 __ jmp(&done);
1754
1755 // Smi case.
1756 __ bind(&smi_case);
1757 __ mov(r1, Operand(value));
1758 switch (op) {
1759 case Token::BIT_OR:
1760 __ orr(r0, r0, Operand(r1));
1761 break;
1762 case Token::BIT_XOR:
1763 __ eor(r0, r0, Operand(r1));
1764 break;
1765 case Token::BIT_AND:
1766 __ and_(r0, r0, Operand(r1));
1767 break;
1768 default:
1769 UNREACHABLE();
1770 }
1771
1772 __ bind(&done);
1773 context()->Plug(r0);
1774 }
1775
1776
1777 void FullCodeGenerator::EmitConstantSmiBinaryOp(Expression* expr,
1778 Token::Value op,
1779 OverwriteMode mode,
1780 bool left_is_constant_smi,
1781 Smi* value) {
1782 switch (op) {
1783 case Token::BIT_OR:
1784 case Token::BIT_XOR:
1785 case Token::BIT_AND:
1786 EmitConstantSmiBitOp(expr, op, mode, value);
1787 break;
1788 case Token::SHL:
1789 case Token::SAR:
1790 case Token::SHR:
1791 ASSERT(!left_is_constant_smi);
1792 EmitConstantSmiShiftOp(expr, op, mode, value);
1793 break;
1794 case Token::ADD:
1795 EmitConstantSmiAdd(expr, mode, left_is_constant_smi, value);
1796 break;
1797 case Token::SUB:
1798 EmitConstantSmiSub(expr, mode, left_is_constant_smi, value);
1799 break;
1800 default:
1801 UNREACHABLE();
1802 }
1803 }
1804
1805
1601 void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr, 1806 void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
1602 Token::Value op, 1807 Token::Value op,
1603 OverwriteMode mode, 1808 OverwriteMode mode,
1604 Expression* left, 1809 Expression* left_expr,
1605 Expression* right, 1810 Expression* right_expr,
1606 ConstantOperand constant) { 1811 ConstantOperand constant) {
1607 ASSERT(constant == kNoConstants); // Only handled case. 1812 if (constant == kRightConstant) {
1608 EmitBinaryOp(op, mode); 1813 Smi* value = Smi::cast(*right_expr->AsLiteral()->handle());
1609 } 1814 EmitConstantSmiBinaryOp(expr, op, mode, false, value);
1610 1815 return;
1611 1816 } else if (constant == kLeftConstant) {
1817 Smi* value = Smi::cast(*left_expr->AsLiteral()->handle());
1818 EmitConstantSmiBinaryOp(expr, op, mode, true, value);
1819 return;
1820 }
1821
1822 Label done, smi_case, stub_call;
1823
1824 Register scratch1 = r2;
1825 Register scratch2 = r3;
1826
1827 // Get the arguments.
1828 Register left = r1;
1829 Register right = r0;
1830 __ pop(left);
1831
1832 // Perform combined smi check on both operands.
1833 __ orr(scratch1, left, Operand(right));
1834 STATIC_ASSERT(kSmiTag == 0);
1835 JumpPatchSite patch_site(masm_);
1836 patch_site.EmitJumpIfSmi(scratch1, &smi_case);
1837
1838 __ bind(&stub_call);
1839 TypeRecordingBinaryOpStub stub(op, mode);
1840 EmitCallIC(stub.GetCode(), &patch_site);
1841 __ jmp(&done);
1842
1843 __ bind(&smi_case);
1844 // Smi case. This code works the same way as the smi-smi case in the type
1845 // recording binary operation stub, see
1846 // TypeRecordingBinaryOpStub::GenerateSmiSmiOperation for comments.
1847 switch (op) {
1848 case Token::SAR:
1849 __ b(&stub_call);
1850 __ GetLeastBitsFromSmi(scratch1, right, 5);
1851 __ mov(right, Operand(left, ASR, scratch1));
1852 __ bic(right, right, Operand(kSmiTagMask));
1853 break;
1854 case Token::SHL: {
1855 __ b(&stub_call);
1856 __ SmiUntag(scratch1, left);
1857 __ GetLeastBitsFromSmi(scratch2, right, 5);
1858 __ mov(scratch1, Operand(scratch1, LSL, scratch2));
1859 __ add(scratch2, scratch1, Operand(0x40000000), SetCC);
1860 __ b(mi, &stub_call);
1861 __ SmiTag(right, scratch1);
1862 break;
1863 }
1864 case Token::SHR: {
1865 __ b(&stub_call);
1866 __ SmiUntag(scratch1, left);
1867 __ GetLeastBitsFromSmi(scratch2, right, 5);
1868 __ mov(scratch1, Operand(scratch1, LSR, scratch2));
1869 __ tst(scratch1, Operand(0xc0000000));
1870 __ b(ne, &stub_call);
1871 __ SmiTag(right, scratch1);
1872 break;
1873 }
1874 case Token::ADD:
1875 __ add(scratch1, left, Operand(right), SetCC);
1876 __ b(vs, &stub_call);
1877 __ mov(right, scratch1);
1878 break;
1879 case Token::SUB:
1880 __ sub(scratch1, left, Operand(right), SetCC);
1881 __ b(vs, &stub_call);
1882 __ mov(right, scratch1);
1883 break;
1884 case Token::MUL: {
1885 __ SmiUntag(ip, right);
1886 __ smull(scratch1, scratch2, left, ip);
1887 __ mov(ip, Operand(scratch1, ASR, 31));
1888 __ cmp(ip, Operand(scratch2));
1889 __ b(ne, &stub_call);
1890 __ tst(scratch1, Operand(scratch1));
1891 __ mov(right, Operand(scratch1), LeaveCC, ne);
1892 __ b(ne, &done);
1893 __ add(scratch2, right, Operand(left), SetCC);
1894 __ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl);
1895 __ b(mi, &stub_call);
1896 break;
1897 }
1898 case Token::BIT_OR:
1899 __ orr(right, left, Operand(right));
1900 break;
1901 case Token::BIT_AND:
1902 __ and_(right, left, Operand(right));
1903 break;
1904 case Token::BIT_XOR:
1905 __ eor(right, left, Operand(right));
1906 break;
1907 default:
1908 UNREACHABLE();
1909 }
1910
1911 __ bind(&done);
1912 context()->Plug(r0);
1913 }
1914
1915
1612 void FullCodeGenerator::EmitBinaryOp(Token::Value op, 1916 void FullCodeGenerator::EmitBinaryOp(Token::Value op,
1613 OverwriteMode mode) { 1917 OverwriteMode mode) {
1614 __ pop(r1); 1918 __ pop(r1);
1615 TypeRecordingBinaryOpStub stub(op, mode); 1919 TypeRecordingBinaryOpStub stub(op, mode);
1616 EmitCallIC(stub.GetCode(), NULL); 1920 EmitCallIC(stub.GetCode(), NULL);
1617 context()->Plug(r0); 1921 context()->Plug(r0);
1618 } 1922 }
1619 1923
1620 1924
1621 void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) { 1925 void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
(...skipping 1658 matching lines...) Expand 10 before | Expand all | Expand 10 after
3280 case KEYED_PROPERTY: 3584 case KEYED_PROPERTY:
3281 __ str(r0, MemOperand(sp, 2 * kPointerSize)); 3585 __ str(r0, MemOperand(sp, 2 * kPointerSize));
3282 break; 3586 break;
3283 } 3587 }
3284 } 3588 }
3285 } 3589 }
3286 3590
3287 3591
3288 // Inline smi case if we are in a loop. 3592 // Inline smi case if we are in a loop.
3289 Label stub_call, done; 3593 Label stub_call, done;
3594 JumpPatchSite patch_site(masm_);
3595
3290 int count_value = expr->op() == Token::INC ? 1 : -1; 3596 int count_value = expr->op() == Token::INC ? 1 : -1;
3291 if (ShouldInlineSmiCase(expr->op())) { 3597 if (ShouldInlineSmiCase(expr->op())) {
3292 __ add(r0, r0, Operand(Smi::FromInt(count_value)), SetCC); 3598 __ add(r0, r0, Operand(Smi::FromInt(count_value)), SetCC);
3293 __ b(vs, &stub_call); 3599 __ b(vs, &stub_call);
3294 // We could eliminate this smi check if we split the code at 3600 // We could eliminate this smi check if we split the code at
3295 // the first smi check before calling ToNumber. 3601 // the first smi check before calling ToNumber.
3296 __ JumpIfSmi(r0, &done); 3602 patch_site.EmitJumpIfSmi(r0, &done);
3603
3297 __ bind(&stub_call); 3604 __ bind(&stub_call);
3298 // Call stub. Undo operation first. 3605 // Call stub. Undo operation first.
3299 __ sub(r0, r0, Operand(Smi::FromInt(count_value))); 3606 __ sub(r0, r0, Operand(Smi::FromInt(count_value)));
3300 } 3607 }
3301 __ mov(r1, Operand(Smi::FromInt(count_value))); 3608 __ mov(r1, Operand(Smi::FromInt(count_value)));
3302 3609
3303 // Record position before stub call. 3610 // Record position before stub call.
3304 SetSourcePosition(expr->position()); 3611 SetSourcePosition(expr->position());
3305 3612
3306 GenericBinaryOpStub stub(Token::ADD, NO_OVERWRITE, r1, r0); 3613 TypeRecordingBinaryOpStub stub(Token::ADD, NO_OVERWRITE);
3307 __ CallStub(&stub); 3614 EmitCallIC(stub.GetCode(), &patch_site);
3308 __ bind(&done); 3615 __ bind(&done);
3309 3616
3310 // Store the value returned in r0. 3617 // Store the value returned in r0.
3311 switch (assign_type) { 3618 switch (assign_type) {
3312 case VARIABLE: 3619 case VARIABLE:
3313 if (expr->is_postfix()) { 3620 if (expr->is_postfix()) {
3314 { EffectContext context(this); 3621 { EffectContext context(this);
3315 EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(), 3622 EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
3316 Token::ASSIGN); 3623 Token::ASSIGN);
3317 PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); 3624 PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
(...skipping 394 matching lines...) Expand 10 before | Expand all | Expand 10 after
3712 __ mov(r1, Operand(r1, ASR, 1)); // Un-smi-tag value. 4019 __ mov(r1, Operand(r1, ASR, 1)); // Un-smi-tag value.
3713 __ add(pc, r1, Operand(masm_->CodeObject())); 4020 __ add(pc, r1, Operand(masm_->CodeObject()));
3714 } 4021 }
3715 4022
3716 4023
3717 #undef __ 4024 #undef __
3718 4025
3719 } } // namespace v8::internal 4026 } } // namespace v8::internal
3720 4027
3721 #endif // V8_TARGET_ARCH_ARM 4028 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « no previous file | src/ia32/full-codegen-ia32.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698