Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(65)

Side by Side Diff: src/arm/stub-cache-arm.cc

Issue 4735003: Implement Math.floor stub on ARM. Uses VFP when available.... (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 10 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2006-2009 the V8 project authors. All rights reserved. 1 // Copyright 2006-2009 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 1658 matching lines...) Expand 10 before | Expand all | Expand 10 after
1669 // Return the generated code. 1669 // Return the generated code.
1670 return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name); 1670 return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
1671 } 1671 }
1672 1672
1673 1673
1674 MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object, 1674 MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
1675 JSObject* holder, 1675 JSObject* holder,
1676 JSGlobalPropertyCell* cell, 1676 JSGlobalPropertyCell* cell,
1677 JSFunction* function, 1677 JSFunction* function,
1678 String* name) { 1678 String* name) {
1679 // TODO(872): implement this. 1679 // ----------- S t a t e -------------
1680 return Heap::undefined_value(); 1680 // -- r2 : function name
1681 // -- lr : return address
1682 // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
1683 // -- ...
1684 // -- sp[argc * 4] : receiver
1685 // -----------------------------------
1686
1687 const int argc = arguments().immediate();
1688
1689 // If the object is not a JSObject or we got an unexpected number of
1690 // arguments, bail out to the regular call.
1691 if (!object->IsJSObject() || argc != 1) return Heap::undefined_value();
1692
1693 Label miss, slow;
1694 GenerateNameCheck(name, &miss);
1695
1696 if (cell == NULL) {
1697 __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
1698
1699 STATIC_ASSERT(kSmiTag == 0);
1700 __ BranchOnNotSmi(r1, &miss);
m.m.capewell 2010/11/16 15:05:17 Fixed this - should be BranchOnSmi().
1701
1702 CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
1703 &miss);
1704 } else {
1705 ASSERT(cell->value() == function);
1706 GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
1707 GenerateLoadFunctionFromCell(cell, function, &miss);
1708 }
1709
1710 // Load the (only) argument into r0.
1711 __ ldr(r0, MemOperand(sp, 0 * kPointerSize));
1712
1713 // If the argument is a smi, just return.
1714 STATIC_ASSERT(kSmiTag == 0);
1715 __ tst(r0, Operand(kSmiTagMask));
1716 __ Drop(argc + 1, eq);
1717 __ Ret(eq);
1718
1719 __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, &slow, true);
1720
1721 if (CpuFeatures::IsSupported(VFP3)) {
1722 CpuFeatures::Scope scope_vfp3(VFP3);
1723
1724 Label wont_fit_smi, no_vfp_exception, restore_fpscr_and_return;
1725
1726 // If vfp3 is enabled, we use the fpu rounding with the RM (round towards
1727 // minus infinity) mode.
1728
1729 // Load the HeapNumber value.
1730 // We will need access to the value in the core registers, so we load it
1731 // with ldrd and move it to the fpu. It also spares a sub instruction for
1732 // updating the HeapNumber value address, as vldr expects a multiple
1733 // of 4 offset.
1734 __ Ldrd(r4, r5, MemOperand(r0, HeapNumber::kValueOffset - kHeapObjectTag));
Erik Corry 2010/11/10 13:57:19 Use FieldMemOperand for this.
m.m.capewell 2010/11/16 15:05:17 Done.
1735 __ vmov(d1, r4, r5);
1736
1737 // Backup FPSCR.
1738 __ vmrs(r3);
1739 // Set custom FPCSR:
1740 // - Set rounding mode to "Round towards Minus Infinity
1741 // (ie bits [23:22] = 0b10).
1742 // - Clear vfp cumulative exception flags (bits [3:0]).
1743 // - Make sure Flush-to-zero mode control bit is unset (bit 22).
1744 __ bic(r9, r3, Operand((1 << 22) | 0xf | (1 << 24)));
Erik Corry 2010/11/10 13:57:19 This should be with named constants. For example
m.m.capewell 2010/11/16 15:05:17 Done.
1745 __ orr(r9, r9, Operand(1 << 23));
1746 __ vmsr(r9);
1747
1748 // Convert the argument to an integer.
1749 __ vcvt_s32_f64(s0, d1, Assembler::FPSCRRounding, al);
1750
1751 // Use vcvt latency to start checking for special cases.
1752 // Get the argument exponent and clear the sign bit.
1753 __ bic(r6, r5, Operand(HeapNumber::kSignMask));
1754 __ mov(r6, Operand(r6, LSR, HeapNumber::kMantissaBitsInTopWord));
1755
1756 // Retrieve FCPSR and check for vfp exceptions.
1757 __ vmrs(r9);
1758 __ tst(r9, Operand(0xf));
Erik Corry 2010/11/10 13:57:19 Named constant here too.
m.m.capewell 2010/11/16 15:05:17 Done.
1759 __ b(&no_vfp_exception, eq);
1760
1761 // Check for NaN, Infinity, and -Infinity.
1762 // They are invariant through a Math.Floor call, so just
1763 // return the original argument.
1764 __ sub(r7, r6, Operand(HeapNumber::kExponentMask
1765 >> HeapNumber::kMantissaBitsInTopWord), SetCC);
1766 __ b(&restore_fpscr_and_return, eq);
1767 // We had an overflow or underflow in the conversion. Check if we
1768 // have a big exponent.
1769 __ cmp(r7, Operand(HeapNumber::kMantissaBits));
1770 // If greater or equal, the argument is already round and in r0.
1771 __ b(&restore_fpscr_and_return, ge);
1772 __ b(&slow);
1773
1774 __ bind(&no_vfp_exception);
1775 // Move the result back to general purpose register r0.
1776 __ vmov(r0, s0);
1777 // Check if the result fits into a smi.
1778 __ add(r1, r0, Operand(0x40000000), SetCC);
1779 __ b(&wont_fit_smi, mi);
1780 // Tag the result.
1781 STATIC_ASSERT(kSmiTag == 0);
1782 __ mov(r0, Operand(r0, LSL, kSmiTagSize));
1783
1784 // Check for -0.
1785 __ cmp(r0, Operand(0));
1786 __ b(&restore_fpscr_and_return, ne);
1787 // r5 already holds the HeapNumber exponent.
1788 __ tst(r5, Operand(HeapNumber::kSignMask));
1789 // If our HeapNumber is negative it was -0, so load its address and return.
1790 // Else r0 is loaded with 0, so we can also just return.
1791 __ ldr(r0, MemOperand(sp, 0 * kPointerSize), ne);
1792
1793 __ bind(&restore_fpscr_and_return);
1794 // Restore FPSCR and return.
1795 __ vmsr(r3);
1796 __ Drop(argc + 1);
1797 __ Ret();
1798
1799 __ bind(&wont_fit_smi);
1800 __ bind(&slow);
1801 // Restore FPCSR and fall to slow case.
1802 __ vmsr(r3);
1803 } else {
1804 Label return_;
1805
Erik Corry 2010/11/10 13:57:19 This non-VFP code makes testing difficult and it o
m.m.capewell 2010/11/16 15:05:17 Done.
1806 __ Ldrd(r4, r5, MemOperand(r0, HeapNumber::kValueOffset - kHeapObjectTag));
1807 // Get the argument exponent and clear the sign bit.
1808 __ mov(r6, Operand(r5, LSR, HeapNumber::kMantissaBitsInTopWord));
1809 __ bic(r6, r6, Operand(1 << (HeapNumber::kNonMantissaBitsInTopWord - 1)));
1810 // Check for NaN, Infinity, and -Infinity.
1811 // They are invariant through a Math.Floor call, so just
1812 // return the original argument.
1813 __ sub(r7, r6, Operand((1 << HeapNumber::kExponentBits) -1), SetCC);
1814 __ b(&return_, eq);
1815 // Check for a high exponent, which implies an already round value.
1816 __ cmp(r7, Operand(HeapNumber::kMantissaBits));
1817 __ b(&slow, lt);
1818
1819 // Return.
1820 __ bind(&return_);
1821 __ Drop(argc + 1);
1822 __ Ret();
1823
1824 __ bind(&slow);
1825 }
1826
1827 // Tail call the full function. We do not have to patch the receiver
1828 // because the function makes no use of it.
1829 __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
1830
1831 __ bind(&miss);
1832 // r2: function name.
1833 MaybeObject* obj = GenerateMissBranch();
1834 if (obj->IsFailure()) return obj;
1835
1836 // Return the generated code.
1837 return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
1681 } 1838 }
1682 1839
1683 1840
1684 MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object, 1841 MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
1685 JSObject* holder, 1842 JSObject* holder,
1686 JSGlobalPropertyCell* cell, 1843 JSGlobalPropertyCell* cell,
1687 JSFunction* function, 1844 JSFunction* function,
1688 String* name) { 1845 String* name) {
1689 // ----------- S t a t e ------------- 1846 // ----------- S t a t e -------------
1690 // -- r2 : function name 1847 // -- r2 : function name
(...skipping 1054 matching lines...) Expand 10 before | Expand all | Expand 10 after
2745 // Return the generated code. 2902 // Return the generated code.
2746 return GetCode(); 2903 return GetCode();
2747 } 2904 }
2748 2905
2749 2906
2750 #undef __ 2907 #undef __
2751 2908
2752 } } // namespace v8::internal 2909 } } // namespace v8::internal
2753 2910
2754 #endif // V8_TARGET_ARCH_ARM 2911 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698