Chromium Code Reviews

Side by Side Diff: src/arm/full-codegen-arm.cc

Issue 6529022: ARM: Add inlined smi binary operations in full code generator (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 9 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments.
Jump to:
View unified diff | | Annotate | Revision Log
« no previous file with comments | « no previous file | src/ia32/full-codegen-ia32.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 323 matching lines...)
334 ASSERT(Assembler::kJSReturnSequenceInstructions <= 334 ASSERT(Assembler::kJSReturnSequenceInstructions <=
335 masm_->InstructionsGeneratedSince(&check_exit_codesize)); 335 masm_->InstructionsGeneratedSince(&check_exit_codesize));
336 #endif 336 #endif
337 } 337 }
338 } 338 }
339 339
340 340
341 FullCodeGenerator::ConstantOperand FullCodeGenerator::GetConstantOperand( 341 FullCodeGenerator::ConstantOperand FullCodeGenerator::GetConstantOperand(
342 Token::Value op, Expression* left, Expression* right) { 342 Token::Value op, Expression* left, Expression* right) {
343 ASSERT(ShouldInlineSmiCase(op)); 343 ASSERT(ShouldInlineSmiCase(op));
344 return kNoConstants; 344 if (op == Token::DIV || op == Token::MOD || op == Token::MUL) {
345 // We never generate inlined constant smi operations for these.
346 return kNoConstants;
347 } else if (right->IsSmiLiteral()) {
348 return kRightConstant;
349 } else if (left->IsSmiLiteral() && !Token::IsShiftOp(op)) {
350 // Don't inline shifts with constant left hand side.
351 return kLeftConstant;
352 } else {
353 return kNoConstants;
354 }
345 } 355 }
346 356
347 357
348 void FullCodeGenerator::EffectContext::Plug(Slot* slot) const { 358 void FullCodeGenerator::EffectContext::Plug(Slot* slot) const {
349 } 359 }
350 360
351 361
352 void FullCodeGenerator::AccumulatorValueContext::Plug(Slot* slot) const { 362 void FullCodeGenerator::AccumulatorValueContext::Plug(Slot* slot) const {
353 codegen()->Move(result_register(), slot); 363 codegen()->Move(result_register(), slot);
354 } 364 }
(...skipping 1236 matching lines...)
1591 1601
1592 1602
1593 void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) { 1603 void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
1594 SetSourcePosition(prop->position()); 1604 SetSourcePosition(prop->position());
1595 // Call keyed load IC. It has arguments key and receiver in r0 and r1. 1605 // Call keyed load IC. It has arguments key and receiver in r0 and r1.
1596 Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); 1606 Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
1597 EmitCallIC(ic, RelocInfo::CODE_TARGET); 1607 EmitCallIC(ic, RelocInfo::CODE_TARGET);
1598 } 1608 }
1599 1609
1600 1610
1611 void FullCodeGenerator::EmitConstantSmiAdd(Expression* expr,
1612 OverwriteMode mode,
1613 bool left_is_constant_smi,
1614 Smi* value) {
1615 Label call_stub, done;
1616 // Optimistically add smi value with unknown object. If result overflows or is
1617 // not a smi then we had either a smi overflow or added a smi with a tagged
1618 // pointer.
1619 __ mov(r1, Operand(value));
1620 __ add(r2, r0, r1, SetCC);
1621 __ b(vs, &call_stub);
1622 JumpPatchSite patch_site(masm_);
1623 patch_site.EmitJumpIfNotSmi(r2, &call_stub);
1624 __ mov(r0, r2);
1625 __ b(&done);
1626
1627 // Call the shared stub.
1628 __ bind(&call_stub);
1629 if (!left_is_constant_smi) {
1630 __ Swap(r0, r1, r2);
1631 }
1632 TypeRecordingBinaryOpStub stub(Token::ADD, mode);
1633 EmitCallIC(stub.GetCode(), &patch_site);
1634
1635 __ bind(&done);
1636 context()->Plug(r0);
1637 }
1638
1639
1640 void FullCodeGenerator::EmitConstantSmiSub(Expression* expr,
1641 OverwriteMode mode,
1642 bool left_is_constant_smi,
1643 Smi* value) {
1644 Label call_stub, done;
1645 // Optimistically subtract smi value and unknown object. If result overflows
1646 // or is not a smi then we had either a smi overflow or subtraction between a
1647 // smi and a tagged pointer.
1648 __ mov(r1, Operand(value));
1649 if (left_is_constant_smi) {
1650 __ sub(r2, r1, r0, SetCC);
1651 } else {
1652 __ sub(r2, r0, r1, SetCC);
1653 }
1654 __ b(vs, &call_stub);
1655 JumpPatchSite patch_site(masm_);
1656 patch_site.EmitJumpIfNotSmi(r2, &call_stub);
1657 __ mov(r0, r2);
1658 __ b(&done);
1659
1660 // Call the shared stub.
1661 __ bind(&call_stub);
1662 if (!left_is_constant_smi) {
1663 __ Swap(r0, r1, r2);
1664 }
1665 TypeRecordingBinaryOpStub stub(Token::SUB, mode);
1666 EmitCallIC(stub.GetCode(), &patch_site);
1667
1668 __ bind(&done);
1669 context()->Plug(r0);
1670 }
1671
1672
1673 void FullCodeGenerator::EmitConstantSmiShiftOp(Expression* expr,
1674 Token::Value op,
1675 OverwriteMode mode,
1676 Smi* value) {
1677 Label call_stub, smi_case, done;
1678 int shift_value = value->value() & 0x1f;
1679
1680 JumpPatchSite patch_site(masm_);
1681 patch_site.EmitJumpIfSmi(r0, &smi_case);
1682
1683 // Call stub.
1684 __ bind(&call_stub);
1685 __ mov(r1, r0);
1686 __ mov(r0, Operand(value));
1687 TypeRecordingBinaryOpStub stub(op, mode);
1688 EmitCallIC(stub.GetCode(), &patch_site);
1689 __ b(&done);
1690
1691 // Smi case.
1692 __ bind(&smi_case);
1693 switch (op) {
1694 case Token::SHL:
1695 if (shift_value != 0) {
1696 __ mov(r1, r0);
1697 if (shift_value > 1) {
1698 __ mov(r1, Operand(r1, LSL, shift_value - 1));
1699 }
1700 // Convert int result to smi, checking that it is in int range.
1701 __ SmiTag(r1, SetCC);
1702 __ b(vs, &call_stub);
1703 __ mov(r0, r1); // Put result back into r0.
1704 }
1705 break;
1706 case Token::SAR:
1707 if (shift_value != 0) {
1708 __ mov(r0, Operand(r0, ASR, shift_value));
1709 __ bic(r0, r0, Operand(kSmiTagMask));
1710 }
1711 break;
1712 case Token::SHR:
1713 if (shift_value < 2) {
1714 __ mov(r2, Operand(shift_value));
1715 __ SmiUntag(r1, r0);
1716 if (shift_value != 0) {
1717 __ mov(r1, Operand(r1, LSR, shift_value));
1718 }
1719 __ tst(r1, Operand(0xc0000000));
Mads Ager (chromium) 2011/02/16 08:31:54 Could we add a comment here. This is not completel
1720 __ b(ne, &call_stub);
1721 __ SmiTag(r0, r1); // result in r0.
1722 } else {
1723 __ SmiUntag(r0);
1724 __ mov(r0, Operand(r0, LSR, shift_value));
1725 __ SmiTag(r0);
1726 }
1727 break;
1728 default:
1729 UNREACHABLE();
1730 }
1731
1732 __ bind(&done);
1733 context()->Plug(r0);
1734 }
1735
1736
1737 void FullCodeGenerator::EmitConstantSmiBitOp(Expression* expr,
1738 Token::Value op,
1739 OverwriteMode mode,
1740 Smi* value) {
1741 Label smi_case, done;
1742
1743 JumpPatchSite patch_site(masm_);
1744 patch_site.EmitJumpIfSmi(r0, &smi_case);
1745
1746 // The order of the arguments does not matter for bit-ops with a
1747 // constant operand.
1748 __ mov(r1, Operand(value));
1749 TypeRecordingBinaryOpStub stub(op, mode);
1750 EmitCallIC(stub.GetCode(), &patch_site);
1751 __ jmp(&done);
1752
1753 // Smi case.
1754 __ bind(&smi_case);
1755 __ mov(r1, Operand(value));
1756 switch (op) {
1757 case Token::BIT_OR:
1758 __ orr(r0, r0, Operand(r1));
1759 break;
1760 case Token::BIT_XOR:
1761 __ eor(r0, r0, Operand(r1));
1762 break;
1763 case Token::BIT_AND:
1764 __ and_(r0, r0, Operand(r1));
1765 break;
1766 default:
1767 UNREACHABLE();
1768 }
1769
1770 __ bind(&done);
1771 context()->Plug(r0);
1772 }
1773
1774
1775 void FullCodeGenerator::EmitConstantSmiBinaryOp(Expression* expr,
1776 Token::Value op,
1777 OverwriteMode mode,
1778 bool left_is_constant_smi,
1779 Smi* value) {
1780 switch (op) {
1781 case Token::BIT_OR:
1782 case Token::BIT_XOR:
1783 case Token::BIT_AND:
1784 EmitConstantSmiBitOp(expr, op, mode, value);
1785 break;
1786 case Token::SHL:
1787 case Token::SAR:
1788 case Token::SHR:
1789 ASSERT(!left_is_constant_smi);
1790 EmitConstantSmiShiftOp(expr, op, mode, value);
1791 break;
1792 case Token::ADD:
1793 EmitConstantSmiAdd(expr, mode, left_is_constant_smi, value);
1794 break;
1795 case Token::SUB:
1796 EmitConstantSmiSub(expr, mode, left_is_constant_smi, value);
1797 break;
1798 default:
1799 UNREACHABLE();
1800 }
1801 }
1802
1803
1601 void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr, 1804 void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
1602 Token::Value op, 1805 Token::Value op,
1603 OverwriteMode mode, 1806 OverwriteMode mode,
1604 Expression* left, 1807 Expression* left_expr,
1605 Expression* right, 1808 Expression* right_expr,
1606 ConstantOperand constant) { 1809 ConstantOperand constant) {
1607 ASSERT(constant == kNoConstants); // Only handled case. 1810 if (constant == kRightConstant) {
1608 EmitBinaryOp(op, mode); 1811 Smi* value = Smi::cast(*right_expr->AsLiteral()->handle());
1609 } 1812 EmitConstantSmiBinaryOp(expr, op, mode, false, value);
1610 1813 return;
1611 1814 } else if (constant == kLeftConstant) {
1815 Smi* value = Smi::cast(*left_expr->AsLiteral()->handle());
1816 EmitConstantSmiBinaryOp(expr, op, mode, true, value);
1817 return;
1818 }
1819
1820 Label done, smi_case, stub_call;
1821
1822 Register scratch1 = r2;
1823 Register scratch2 = r3;
1824
1825 // Get the arguments.
1826 Register left = r1;
1827 Register right = r0;
1828 __ pop(left);
1829
1830 // Perform combined smi check on both operands.
1831 __ orr(scratch1, left, Operand(right));
1832 STATIC_ASSERT(kSmiTag == 0);
1833 JumpPatchSite patch_site(masm_);
1834 patch_site.EmitJumpIfSmi(scratch1, &smi_case);
1835
1836 __ bind(&stub_call);
1837 TypeRecordingBinaryOpStub stub(op, mode);
1838 EmitCallIC(stub.GetCode(), &patch_site);
1839 __ jmp(&done);
1840
1841 __ bind(&smi_case);
1842 // Smi case. This code works the same way as the smi-smi case in the type
Mads Ager (chromium) 2011/02/16 08:31:54 I wonder if we can put these pieces of smi code in
1843 // recording binary operation stub, see
1844 // TypeRecordingBinaryOpStub::GenerateSmiSmiOperation for comments.
1845 switch (op) {
1846 case Token::SAR:
1847 __ b(&stub_call);
1848 __ GetLeastBitsFromSmi(scratch1, right, 5);
1849 __ mov(right, Operand(left, ASR, scratch1));
1850 __ bic(right, right, Operand(kSmiTagMask));
1851 break;
1852 case Token::SHL: {
1853 __ b(&stub_call);
1854 __ SmiUntag(scratch1, left);
1855 __ GetLeastBitsFromSmi(scratch2, right, 5);
1856 __ mov(scratch1, Operand(scratch1, LSL, scratch2));
1857 __ add(scratch2, scratch1, Operand(0x40000000), SetCC);
1858 __ b(mi, &stub_call);
1859 __ SmiTag(right, scratch1);
1860 break;
1861 }
1862 case Token::SHR: {
1863 __ b(&stub_call);
1864 __ SmiUntag(scratch1, left);
1865 __ GetLeastBitsFromSmi(scratch2, right, 5);
1866 __ mov(scratch1, Operand(scratch1, LSR, scratch2));
1867 __ tst(scratch1, Operand(0xc0000000));
1868 __ b(ne, &stub_call);
1869 __ SmiTag(right, scratch1);
1870 break;
1871 }
1872 case Token::ADD:
1873 __ add(scratch1, left, Operand(right), SetCC);
1874 __ b(vs, &stub_call);
1875 __ mov(right, scratch1);
1876 break;
1877 case Token::SUB:
1878 __ sub(scratch1, left, Operand(right), SetCC);
1879 __ b(vs, &stub_call);
1880 __ mov(right, scratch1);
1881 break;
1882 case Token::MUL: {
1883 __ SmiUntag(ip, right);
1884 __ smull(scratch1, scratch2, left, ip);
1885 __ mov(ip, Operand(scratch1, ASR, 31));
1886 __ cmp(ip, Operand(scratch2));
1887 __ b(ne, &stub_call);
1888 __ tst(scratch1, Operand(scratch1));
1889 __ mov(right, Operand(scratch1), LeaveCC, ne);
1890 __ b(ne, &done);
1891 __ add(scratch2, right, Operand(left), SetCC);
1892 __ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl);
1893 __ b(mi, &stub_call);
1894 break;
1895 }
1896 case Token::BIT_OR:
1897 __ orr(right, left, Operand(right));
1898 break;
1899 case Token::BIT_AND:
1900 __ and_(right, left, Operand(right));
1901 break;
1902 case Token::BIT_XOR:
1903 __ eor(right, left, Operand(right));
1904 break;
1905 default:
1906 UNREACHABLE();
1907 }
1908
1909 __ bind(&done);
1910 context()->Plug(r0);
1911 }
1912
1913
1612 void FullCodeGenerator::EmitBinaryOp(Token::Value op, 1914 void FullCodeGenerator::EmitBinaryOp(Token::Value op,
1613 OverwriteMode mode) { 1915 OverwriteMode mode) {
1614 __ pop(r1); 1916 __ pop(r1);
1615 TypeRecordingBinaryOpStub stub(op, mode); 1917 TypeRecordingBinaryOpStub stub(op, mode);
1616 EmitCallIC(stub.GetCode(), NULL); 1918 EmitCallIC(stub.GetCode(), NULL);
1617 context()->Plug(r0); 1919 context()->Plug(r0);
1618 } 1920 }
1619 1921
1620 1922
1621 void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) { 1923 void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
(...skipping 1658 matching lines...)
3280 case KEYED_PROPERTY: 3582 case KEYED_PROPERTY:
3281 __ str(r0, MemOperand(sp, 2 * kPointerSize)); 3583 __ str(r0, MemOperand(sp, 2 * kPointerSize));
3282 break; 3584 break;
3283 } 3585 }
3284 } 3586 }
3285 } 3587 }
3286 3588
3287 3589
3288 // Inline smi case if we are in a loop. 3590 // Inline smi case if we are in a loop.
3289 Label stub_call, done; 3591 Label stub_call, done;
3592 JumpPatchSite patch_site(masm_);
3593
3290 int count_value = expr->op() == Token::INC ? 1 : -1; 3594 int count_value = expr->op() == Token::INC ? 1 : -1;
3291 if (ShouldInlineSmiCase(expr->op())) { 3595 if (ShouldInlineSmiCase(expr->op())) {
3292 __ add(r0, r0, Operand(Smi::FromInt(count_value)), SetCC); 3596 __ add(r0, r0, Operand(Smi::FromInt(count_value)), SetCC);
3293 __ b(vs, &stub_call); 3597 __ b(vs, &stub_call);
3294 // We could eliminate this smi check if we split the code at 3598 // We could eliminate this smi check if we split the code at
3295 // the first smi check before calling ToNumber. 3599 // the first smi check before calling ToNumber.
3296 __ JumpIfSmi(r0, &done); 3600 patch_site.EmitJumpIfSmi(r0, &done);
3601
3297 __ bind(&stub_call); 3602 __ bind(&stub_call);
3298 // Call stub. Undo operation first. 3603 // Call stub. Undo operation first.
3299 __ sub(r0, r0, Operand(Smi::FromInt(count_value))); 3604 __ sub(r0, r0, Operand(Smi::FromInt(count_value)));
3300 } 3605 }
3301 __ mov(r1, Operand(Smi::FromInt(count_value))); 3606 __ mov(r1, Operand(Smi::FromInt(count_value)));
3302 3607
3303 // Record position before stub call. 3608 // Record position before stub call.
3304 SetSourcePosition(expr->position()); 3609 SetSourcePosition(expr->position());
3305 3610
3306 GenericBinaryOpStub stub(Token::ADD, NO_OVERWRITE, r1, r0); 3611 TypeRecordingBinaryOpStub stub(Token::ADD, NO_OVERWRITE);
3307 __ CallStub(&stub); 3612 EmitCallIC(stub.GetCode(), &patch_site);
3308 __ bind(&done); 3613 __ bind(&done);
3309 3614
3310 // Store the value returned in r0. 3615 // Store the value returned in r0.
3311 switch (assign_type) { 3616 switch (assign_type) {
3312 case VARIABLE: 3617 case VARIABLE:
3313 if (expr->is_postfix()) { 3618 if (expr->is_postfix()) {
3314 { EffectContext context(this); 3619 { EffectContext context(this);
3315 EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(), 3620 EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
3316 Token::ASSIGN); 3621 Token::ASSIGN);
3317 PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); 3622 PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
(...skipping 394 matching lines...)
3712 __ mov(r1, Operand(r1, ASR, 1)); // Un-smi-tag value. 4017 __ mov(r1, Operand(r1, ASR, 1)); // Un-smi-tag value.
3713 __ add(pc, r1, Operand(masm_->CodeObject())); 4018 __ add(pc, r1, Operand(masm_->CodeObject()));
3714 } 4019 }
3715 4020
3716 4021
3717 #undef __ 4022 #undef __
3718 4023
3719 } } // namespace v8::internal 4024 } } // namespace v8::internal
3720 4025
3721 #endif // V8_TARGET_ARCH_ARM 4026 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « no previous file | src/ia32/full-codegen-ia32.cc » ('j') | no next file with comments »

Powered by Google App Engine