Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1190)

Side by Side Diff: src/arm/code-stubs-arm.cc

Issue 6826032: Remove code from the deprecated GenericBinaryOpStub. (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 9 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/code-stubs-arm.h ('k') | src/arm/lithium-arm.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 1762 matching lines...) Expand 10 before | Expand all | Expand 10 after
1773 // If length is not zero, "tos_" contains a non-zero value ==> true. 1773 // If length is not zero, "tos_" contains a non-zero value ==> true.
1774 __ Ret(); 1774 __ Ret();
1775 1775
1776 // Return 0 in "tos_" for false . 1776 // Return 0 in "tos_" for false .
1777 __ bind(&false_result); 1777 __ bind(&false_result);
1778 __ mov(tos_, Operand(0, RelocInfo::NONE)); 1778 __ mov(tos_, Operand(0, RelocInfo::NONE));
1779 __ Ret(); 1779 __ Ret();
1780 } 1780 }
1781 1781
1782 1782
1783 const char* GenericBinaryOpStub::GetName() {
1784 if (name_ != NULL) return name_;
1785 const int len = 100;
1786 name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(len);
1787 if (name_ == NULL) return "OOM";
1788 const char* op_name = Token::Name(op_);
1789 const char* overwrite_name;
1790 switch (mode_) {
1791 case NO_OVERWRITE: overwrite_name = "Alloc"; break;
1792 case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
1793 case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
1794 default: overwrite_name = "UnknownOverwrite"; break;
1795 }
1796
1797 OS::SNPrintF(Vector<char>(name_, len),
1798 "GenericBinaryOpStub_%s_%s%s_%s",
1799 op_name,
1800 overwrite_name,
1801 specialized_on_rhs_ ? "_ConstantRhs" : "",
1802 BinaryOpIC::GetName(runtime_operands_type_));
1803 return name_;
1804 }
1805
1806
1807 // We fall into this code if the operands were Smis, but the result was
1808 // not (eg. overflow). We branch into this code (to the not_smi label) if
1809 // the operands were not both Smi. The operands are in r0 and r1. In order
1810 // to call the C-implemented binary fp operation routines we need to end up
1811 // with the double precision floating point operands in r0 and r1 (for the
1812 // value in r1) and r2 and r3 (for the value in r0).
1813 void GenericBinaryOpStub::HandleBinaryOpSlowCases(
1814 MacroAssembler* masm,
1815 Label* not_smi,
1816 Register lhs,
1817 Register rhs,
1818 const Builtins::JavaScript& builtin) {
1819 Label slow, slow_reverse, do_the_call;
1820 bool use_fp_registers =
1821 CpuFeatures::IsSupported(VFP3) &&
1822 Token::MOD != op_;
1823
1824 ASSERT((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0)));
1825 Register heap_number_map = r6;
1826
1827 if (ShouldGenerateSmiCode()) {
1828 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
1829
1830 // Smi-smi case (overflow).
1831 // Since both are Smis there is no heap number to overwrite, so allocate.
1832 // The new heap number is in r5. r3 and r7 are scratch.
1833 __ AllocateHeapNumber(
1834 r5, r3, r7, heap_number_map, lhs.is(r0) ? &slow_reverse : &slow);
1835
1836 // If we have floating point hardware, inline ADD, SUB, MUL, and DIV,
1837 // using registers d7 and d6 for the double values.
1838 if (CpuFeatures::IsSupported(VFP3)) {
1839 CpuFeatures::Scope scope(VFP3);
1840 __ mov(r7, Operand(rhs, ASR, kSmiTagSize));
1841 __ vmov(s15, r7);
1842 __ vcvt_f64_s32(d7, s15);
1843 __ mov(r7, Operand(lhs, ASR, kSmiTagSize));
1844 __ vmov(s13, r7);
1845 __ vcvt_f64_s32(d6, s13);
1846 if (!use_fp_registers) {
1847 __ vmov(r2, r3, d7);
1848 __ vmov(r0, r1, d6);
1849 }
1850 } else {
1851 // Write Smi from rhs to r3 and r2 in double format. r9 is scratch.
1852 __ mov(r7, Operand(rhs));
1853 ConvertToDoubleStub stub1(r3, r2, r7, r9);
1854 __ push(lr);
1855 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
1856 // Write Smi from lhs to r1 and r0 in double format. r9 is scratch.
1857 __ mov(r7, Operand(lhs));
1858 ConvertToDoubleStub stub2(r1, r0, r7, r9);
1859 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
1860 __ pop(lr);
1861 }
1862 __ jmp(&do_the_call); // Tail call. No return.
1863 }
1864
1865 // We branch here if at least one of r0 and r1 is not a Smi.
1866 __ bind(not_smi);
1867 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
1868
1869 // After this point we have the left hand side in r1 and the right hand side
1870 // in r0.
1871 if (lhs.is(r0)) {
1872 __ Swap(r0, r1, ip);
1873 }
1874
1875 // The type transition also calculates the answer.
1876 bool generate_code_to_calculate_answer = true;
1877
1878 if (ShouldGenerateFPCode()) {
1879 // DIV has neither SmiSmi fast code nor specialized slow code.
1880 // So don't try to patch a DIV Stub.
1881 if (runtime_operands_type_ == BinaryOpIC::DEFAULT) {
1882 switch (op_) {
1883 case Token::ADD:
1884 case Token::SUB:
1885 case Token::MUL:
1886 GenerateTypeTransition(masm); // Tail call.
1887 generate_code_to_calculate_answer = false;
1888 break;
1889
1890 case Token::DIV:
1891 // DIV has neither SmiSmi fast code nor specialized slow code.
1892 // So don't try to patch a DIV Stub.
1893 break;
1894
1895 default:
1896 break;
1897 }
1898 }
1899
1900 if (generate_code_to_calculate_answer) {
1901 Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1;
1902 if (mode_ == NO_OVERWRITE) {
1903 // In the case where there is no chance of an overwritable float we may
1904 // as well do the allocation immediately while r0 and r1 are untouched.
1905 __ AllocateHeapNumber(r5, r3, r7, heap_number_map, &slow);
1906 }
1907
1908 // Move r0 to a double in r2-r3.
1909 __ tst(r0, Operand(kSmiTagMask));
1910 __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number.
1911 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
1912 __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
1913 __ cmp(r4, heap_number_map);
1914 __ b(ne, &slow);
1915 if (mode_ == OVERWRITE_RIGHT) {
1916 __ mov(r5, Operand(r0)); // Overwrite this heap number.
1917 }
1918 if (use_fp_registers) {
1919 CpuFeatures::Scope scope(VFP3);
1920 // Load the double from tagged HeapNumber r0 to d7.
1921 __ sub(r7, r0, Operand(kHeapObjectTag));
1922 __ vldr(d7, r7, HeapNumber::kValueOffset);
1923 } else {
1924 // Calling convention says that second double is in r2 and r3.
1925 __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset));
1926 }
1927 __ jmp(&finished_loading_r0);
1928 __ bind(&r0_is_smi);
1929 if (mode_ == OVERWRITE_RIGHT) {
1930 // We can't overwrite a Smi so get address of new heap number into r5.
1931 __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
1932 }
1933
1934 if (CpuFeatures::IsSupported(VFP3)) {
1935 CpuFeatures::Scope scope(VFP3);
1936 // Convert smi in r0 to double in d7.
1937 __ mov(r7, Operand(r0, ASR, kSmiTagSize));
1938 __ vmov(s15, r7);
1939 __ vcvt_f64_s32(d7, s15);
1940 if (!use_fp_registers) {
1941 __ vmov(r2, r3, d7);
1942 }
1943 } else {
1944 // Write Smi from r0 to r3 and r2 in double format.
1945 __ mov(r7, Operand(r0));
1946 ConvertToDoubleStub stub3(r3, r2, r7, r4);
1947 __ push(lr);
1948 __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET);
1949 __ pop(lr);
1950 }
1951
1952 // HEAP_NUMBERS stub is slower than GENERIC on a pair of smis.
1953 // r0 is known to be a smi. If r1 is also a smi then switch to GENERIC.
1954 Label r1_is_not_smi;
1955 if ((runtime_operands_type_ == BinaryOpIC::HEAP_NUMBERS) &&
1956 HasSmiSmiFastPath()) {
1957 __ tst(r1, Operand(kSmiTagMask));
1958 __ b(ne, &r1_is_not_smi);
1959 GenerateTypeTransition(masm); // Tail call.
1960 }
1961
1962 __ bind(&finished_loading_r0);
1963
1964 // Move r1 to a double in r0-r1.
1965 __ tst(r1, Operand(kSmiTagMask));
1966 __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number.
1967 __ bind(&r1_is_not_smi);
1968 __ ldr(r4, FieldMemOperand(r1, HeapNumber::kMapOffset));
1969 __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
1970 __ cmp(r4, heap_number_map);
1971 __ b(ne, &slow);
1972 if (mode_ == OVERWRITE_LEFT) {
1973 __ mov(r5, Operand(r1)); // Overwrite this heap number.
1974 }
1975 if (use_fp_registers) {
1976 CpuFeatures::Scope scope(VFP3);
1977 // Load the double from tagged HeapNumber r1 to d6.
1978 __ sub(r7, r1, Operand(kHeapObjectTag));
1979 __ vldr(d6, r7, HeapNumber::kValueOffset);
1980 } else {
1981 // Calling convention says that first double is in r0 and r1.
1982 __ Ldrd(r0, r1, FieldMemOperand(r1, HeapNumber::kValueOffset));
1983 }
1984 __ jmp(&finished_loading_r1);
1985 __ bind(&r1_is_smi);
1986 if (mode_ == OVERWRITE_LEFT) {
1987 // We can't overwrite a Smi so get address of new heap number into r5.
1988 __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
1989 }
1990
1991 if (CpuFeatures::IsSupported(VFP3)) {
1992 CpuFeatures::Scope scope(VFP3);
1993 // Convert smi in r1 to double in d6.
1994 __ mov(r7, Operand(r1, ASR, kSmiTagSize));
1995 __ vmov(s13, r7);
1996 __ vcvt_f64_s32(d6, s13);
1997 if (!use_fp_registers) {
1998 __ vmov(r0, r1, d6);
1999 }
2000 } else {
2001 // Write Smi from r1 to r1 and r0 in double format.
2002 __ mov(r7, Operand(r1));
2003 ConvertToDoubleStub stub4(r1, r0, r7, r9);
2004 __ push(lr);
2005 __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET);
2006 __ pop(lr);
2007 }
2008
2009 __ bind(&finished_loading_r1);
2010 }
2011
2012 if (generate_code_to_calculate_answer || do_the_call.is_linked()) {
2013 __ bind(&do_the_call);
2014 // If we are inlining the operation using VFP3 instructions for
2015 // add, subtract, multiply, or divide, the arguments are in d6 and d7.
2016 if (use_fp_registers) {
2017 CpuFeatures::Scope scope(VFP3);
2018 // ARMv7 VFP3 instructions to implement
2019 // double precision, add, subtract, multiply, divide.
2020
2021 if (Token::MUL == op_) {
2022 __ vmul(d5, d6, d7);
2023 } else if (Token::DIV == op_) {
2024 __ vdiv(d5, d6, d7);
2025 } else if (Token::ADD == op_) {
2026 __ vadd(d5, d6, d7);
2027 } else if (Token::SUB == op_) {
2028 __ vsub(d5, d6, d7);
2029 } else {
2030 UNREACHABLE();
2031 }
2032 __ sub(r0, r5, Operand(kHeapObjectTag));
2033 __ vstr(d5, r0, HeapNumber::kValueOffset);
2034 __ add(r0, r0, Operand(kHeapObjectTag));
2035 __ Ret();
2036 } else {
2037 // If we did not inline the operation, then the arguments are in:
2038 // r0: Left value (least significant part of mantissa).
2039 // r1: Left value (sign, exponent, top of mantissa).
2040 // r2: Right value (least significant part of mantissa).
2041 // r3: Right value (sign, exponent, top of mantissa).
2042 // r5: Address of heap number for result.
2043
2044 __ push(lr); // For later.
2045 __ PrepareCallCFunction(4, r4); // Two doubles count as 4 arguments.
2046 // Call C routine that may not cause GC or other trouble. r5 is callee
2047 // save.
2048 __ CallCFunction(
2049 ExternalReference::double_fp_operation(op_, masm->isolate()), 4);
2050 // Store answer in the overwritable heap number.
2051 #if !defined(USE_ARM_EABI)
2052 // Double returned in fp coprocessor register 0 and 1, encoded as
2053 // register cr8. Offsets must be divisible by 4 for coprocessor so we
2054 // need to substract the tag from r5.
2055 __ sub(r4, r5, Operand(kHeapObjectTag));
2056 __ stc(p1, cr8, MemOperand(r4, HeapNumber::kValueOffset));
2057 #else
2058 // Double returned in registers 0 and 1.
2059 __ Strd(r0, r1, FieldMemOperand(r5, HeapNumber::kValueOffset));
2060 #endif
2061 __ mov(r0, Operand(r5));
2062 // And we are done.
2063 __ pop(pc);
2064 }
2065 }
2066 }
2067
2068 if (!generate_code_to_calculate_answer &&
2069 !slow_reverse.is_linked() &&
2070 !slow.is_linked()) {
2071 return;
2072 }
2073
2074 if (lhs.is(r0)) {
2075 __ b(&slow);
2076 __ bind(&slow_reverse);
2077 __ Swap(r0, r1, ip);
2078 }
2079
2080 heap_number_map = no_reg; // Don't use this any more from here on.
2081
2082 // We jump to here if something goes wrong (one param is not a number of any
2083 // sort or new-space allocation fails).
2084 __ bind(&slow);
2085
2086 // Push arguments to the stack
2087 __ Push(r1, r0);
2088
2089 if (Token::ADD == op_) {
2090 // Test for string arguments before calling runtime.
2091 // r1 : first argument
2092 // r0 : second argument
2093 // sp[0] : second argument
2094 // sp[4] : first argument
2095
2096 Label not_strings, not_string1, string1, string1_smi2;
2097 __ tst(r1, Operand(kSmiTagMask));
2098 __ b(eq, &not_string1);
2099 __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
2100 __ b(ge, &not_string1);
2101
2102 // First argument is a a string, test second.
2103 __ tst(r0, Operand(kSmiTagMask));
2104 __ b(eq, &string1_smi2);
2105 __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
2106 __ b(ge, &string1);
2107
2108 // First and second argument are strings.
2109 StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
2110 __ TailCallStub(&string_add_stub);
2111
2112 __ bind(&string1_smi2);
2113 // First argument is a string, second is a smi. Try to lookup the number
2114 // string for the smi in the number string cache.
2115 NumberToStringStub::GenerateLookupNumberStringCache(
2116 masm, r0, r2, r4, r5, r6, true, &string1);
2117
2118 // Replace second argument on stack and tailcall string add stub to make
2119 // the result.
2120 __ str(r2, MemOperand(sp, 0));
2121 __ TailCallStub(&string_add_stub);
2122
2123 // Only first argument is a string.
2124 __ bind(&string1);
2125 __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_JS);
2126
2127 // First argument was not a string, test second.
2128 __ bind(&not_string1);
2129 __ tst(r0, Operand(kSmiTagMask));
2130 __ b(eq, &not_strings);
2131 __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
2132 __ b(ge, &not_strings);
2133
2134 // Only second argument is a string.
2135 __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS);
2136
2137 __ bind(&not_strings);
2138 }
2139
2140 __ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return.
2141 }
2142
2143
2144 // For bitwise ops where the inputs are not both Smis we here try to determine
2145 // whether both inputs are either Smis or at least heap numbers that can be
2146 // represented by a 32 bit signed value. We truncate towards zero as required
2147 // by the ES spec. If this is the case we do the bitwise op and see if the
2148 // result is a Smi. If so, great, otherwise we try to find a heap number to
2149 // write the answer into (either by allocating or by overwriting).
2150 // On entry the operands are in lhs and rhs. On exit the answer is in r0.
2151 void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
2152 Register lhs,
2153 Register rhs) {
2154 Label slow, result_not_a_smi;
2155 Label rhs_is_smi, lhs_is_smi;
2156 Label done_checking_rhs, done_checking_lhs;
2157
2158 Register heap_number_map = r6;
2159 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2160
2161 __ tst(lhs, Operand(kSmiTagMask));
2162 __ b(eq, &lhs_is_smi); // It's a Smi so don't check it's a heap number.
2163 __ ldr(r4, FieldMemOperand(lhs, HeapNumber::kMapOffset));
2164 __ cmp(r4, heap_number_map);
2165 __ b(ne, &slow);
2166 __ ConvertToInt32(lhs, r3, r5, r4, d0, &slow);
2167 __ jmp(&done_checking_lhs);
2168 __ bind(&lhs_is_smi);
2169 __ mov(r3, Operand(lhs, ASR, 1));
2170 __ bind(&done_checking_lhs);
2171
2172 __ tst(rhs, Operand(kSmiTagMask));
2173 __ b(eq, &rhs_is_smi); // It's a Smi so don't check it's a heap number.
2174 __ ldr(r4, FieldMemOperand(rhs, HeapNumber::kMapOffset));
2175 __ cmp(r4, heap_number_map);
2176 __ b(ne, &slow);
2177 __ ConvertToInt32(rhs, r2, r5, r4, d0, &slow);
2178 __ jmp(&done_checking_rhs);
2179 __ bind(&rhs_is_smi);
2180 __ mov(r2, Operand(rhs, ASR, 1));
2181 __ bind(&done_checking_rhs);
2182
2183 ASSERT(((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0))));
2184
2185 // r0 and r1: Original operands (Smi or heap numbers).
2186 // r2 and r3: Signed int32 operands.
2187 switch (op_) {
2188 case Token::BIT_OR: __ orr(r2, r2, Operand(r3)); break;
2189 case Token::BIT_XOR: __ eor(r2, r2, Operand(r3)); break;
2190 case Token::BIT_AND: __ and_(r2, r2, Operand(r3)); break;
2191 case Token::SAR:
2192 // Use only the 5 least significant bits of the shift count.
2193 __ and_(r2, r2, Operand(0x1f));
2194 __ mov(r2, Operand(r3, ASR, r2));
2195 break;
2196 case Token::SHR:
2197 // Use only the 5 least significant bits of the shift count.
2198 __ and_(r2, r2, Operand(0x1f));
2199 __ mov(r2, Operand(r3, LSR, r2), SetCC);
2200 // SHR is special because it is required to produce a positive answer.
2201 // The code below for writing into heap numbers isn't capable of writing
2202 // the register as an unsigned int so we go to slow case if we hit this
2203 // case.
2204 if (CpuFeatures::IsSupported(VFP3)) {
2205 __ b(mi, &result_not_a_smi);
2206 } else {
2207 __ b(mi, &slow);
2208 }
2209 break;
2210 case Token::SHL:
2211 // Use only the 5 least significant bits of the shift count.
2212 __ and_(r2, r2, Operand(0x1f));
2213 __ mov(r2, Operand(r3, LSL, r2));
2214 break;
2215 default: UNREACHABLE();
2216 }
2217 // check that the *signed* result fits in a smi
2218 __ add(r3, r2, Operand(0x40000000), SetCC);
2219 __ b(mi, &result_not_a_smi);
2220 __ mov(r0, Operand(r2, LSL, kSmiTagSize));
2221 __ Ret();
2222
2223 Label have_to_allocate, got_a_heap_number;
2224 __ bind(&result_not_a_smi);
2225 switch (mode_) {
2226 case OVERWRITE_RIGHT: {
2227 __ tst(rhs, Operand(kSmiTagMask));
2228 __ b(eq, &have_to_allocate);
2229 __ mov(r5, Operand(rhs));
2230 break;
2231 }
2232 case OVERWRITE_LEFT: {
2233 __ tst(lhs, Operand(kSmiTagMask));
2234 __ b(eq, &have_to_allocate);
2235 __ mov(r5, Operand(lhs));
2236 break;
2237 }
2238 case NO_OVERWRITE: {
2239 // Get a new heap number in r5. r4 and r7 are scratch.
2240 __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
2241 }
2242 default: break;
2243 }
2244 __ bind(&got_a_heap_number);
2245 // r2: Answer as signed int32.
2246 // r5: Heap number to write answer into.
2247
2248 // Nothing can go wrong now, so move the heap number to r0, which is the
2249 // result.
2250 __ mov(r0, Operand(r5));
2251
2252 if (CpuFeatures::IsSupported(VFP3)) {
2253 // Convert the int32 in r2 to the heap number in r0. r3 is corrupted.
2254 CpuFeatures::Scope scope(VFP3);
2255 __ vmov(s0, r2);
2256 if (op_ == Token::SHR) {
2257 __ vcvt_f64_u32(d0, s0);
2258 } else {
2259 __ vcvt_f64_s32(d0, s0);
2260 }
2261 __ sub(r3, r0, Operand(kHeapObjectTag));
2262 __ vstr(d0, r3, HeapNumber::kValueOffset);
2263 __ Ret();
2264 } else {
2265 // Tail call that writes the int32 in r2 to the heap number in r0, using
2266 // r3 as scratch. r0 is preserved and returned.
2267 WriteInt32ToHeapNumberStub stub(r2, r0, r3);
2268 __ TailCallStub(&stub);
2269 }
2270
2271 if (mode_ != NO_OVERWRITE) {
2272 __ bind(&have_to_allocate);
2273 // Get a new heap number in r5. r4 and r7 are scratch.
2274 __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
2275 __ jmp(&got_a_heap_number);
2276 }
2277
2278 // If all else failed then we go to the runtime system.
2279 __ bind(&slow);
2280 __ Push(lhs, rhs); // Restore stack.
2281 switch (op_) {
2282 case Token::BIT_OR:
2283 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
2284 break;
2285 case Token::BIT_AND:
2286 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS);
2287 break;
2288 case Token::BIT_XOR:
2289 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS);
2290 break;
2291 case Token::SAR:
2292 __ InvokeBuiltin(Builtins::SAR, JUMP_JS);
2293 break;
2294 case Token::SHR:
2295 __ InvokeBuiltin(Builtins::SHR, JUMP_JS);
2296 break;
2297 case Token::SHL:
2298 __ InvokeBuiltin(Builtins::SHL, JUMP_JS);
2299 break;
2300 default:
2301 UNREACHABLE();
2302 }
2303 }
2304
2305
2306
2307
2308 // This function takes the known int in a register for the cases
2309 // where it doesn't know a good trick, and may deliver
2310 // a result that needs shifting.
2311 static void MultiplyByKnownIntInStub(
2312 MacroAssembler* masm,
2313 Register result,
2314 Register source,
2315 Register known_int_register, // Smi tagged.
2316 int known_int,
2317 int* required_shift) { // Including Smi tag shift
2318 switch (known_int) {
2319 case 3:
2320 __ add(result, source, Operand(source, LSL, 1));
2321 *required_shift = 1;
2322 break;
2323 case 5:
2324 __ add(result, source, Operand(source, LSL, 2));
2325 *required_shift = 1;
2326 break;
2327 case 6:
2328 __ add(result, source, Operand(source, LSL, 1));
2329 *required_shift = 2;
2330 break;
2331 case 7:
2332 __ rsb(result, source, Operand(source, LSL, 3));
2333 *required_shift = 1;
2334 break;
2335 case 9:
2336 __ add(result, source, Operand(source, LSL, 3));
2337 *required_shift = 1;
2338 break;
2339 case 10:
2340 __ add(result, source, Operand(source, LSL, 2));
2341 *required_shift = 2;
2342 break;
2343 default:
2344 ASSERT(!IsPowerOf2(known_int)); // That would be very inefficient.
2345 __ mul(result, source, known_int_register);
2346 *required_shift = 0;
2347 }
2348 }
2349
2350
2351 // This uses versions of the sum-of-digits-to-see-if-a-number-is-divisible-by-3
2352 // trick. See http://en.wikipedia.org/wiki/Divisibility_rule
2353 // Takes the sum of the digits base (mask + 1) repeatedly until we have a
2354 // number from 0 to mask. On exit the 'eq' condition flags are set if the
2355 // answer is exactly the mask.
2356 void IntegerModStub::DigitSum(MacroAssembler* masm,
2357 Register lhs,
2358 int mask,
2359 int shift,
2360 Label* entry) {
2361 ASSERT(mask > 0);
2362 ASSERT(mask <= 0xff); // This ensures we don't need ip to use it.
2363 Label loop;
2364 __ bind(&loop);
2365 __ and_(ip, lhs, Operand(mask));
2366 __ add(lhs, ip, Operand(lhs, LSR, shift));
2367 __ bind(entry);
2368 __ cmp(lhs, Operand(mask));
2369 __ b(gt, &loop);
2370 }
2371
2372
2373 void IntegerModStub::DigitSum(MacroAssembler* masm,
2374 Register lhs,
2375 Register scratch,
2376 int mask,
2377 int shift1,
2378 int shift2,
2379 Label* entry) {
2380 ASSERT(mask > 0);
2381 ASSERT(mask <= 0xff); // This ensures we don't need ip to use it.
2382 Label loop;
2383 __ bind(&loop);
2384 __ bic(scratch, lhs, Operand(mask));
2385 __ and_(ip, lhs, Operand(mask));
2386 __ add(lhs, ip, Operand(lhs, LSR, shift1));
2387 __ add(lhs, lhs, Operand(scratch, LSR, shift2));
2388 __ bind(entry);
2389 __ cmp(lhs, Operand(mask));
2390 __ b(gt, &loop);
2391 }
2392
2393
2394 // Splits the number into two halves (bottom half has shift bits). The top
2395 // half is subtracted from the bottom half. If the result is negative then
2396 // rhs is added.
2397 void IntegerModStub::ModGetInRangeBySubtraction(MacroAssembler* masm,
2398 Register lhs,
2399 int shift,
2400 int rhs) {
2401 int mask = (1 << shift) - 1;
2402 __ and_(ip, lhs, Operand(mask));
2403 __ sub(lhs, ip, Operand(lhs, LSR, shift), SetCC);
2404 __ add(lhs, lhs, Operand(rhs), LeaveCC, mi);
2405 }
2406
2407
2408 void IntegerModStub::ModReduce(MacroAssembler* masm,
2409 Register lhs,
2410 int max,
2411 int denominator) {
2412 int limit = denominator;
2413 while (limit * 2 <= max) limit *= 2;
2414 while (limit >= denominator) {
2415 __ cmp(lhs, Operand(limit));
2416 __ sub(lhs, lhs, Operand(limit), LeaveCC, ge);
2417 limit >>= 1;
2418 }
2419 }
2420
2421
2422 void IntegerModStub::ModAnswer(MacroAssembler* masm,
2423 Register result,
2424 Register shift_distance,
2425 Register mask_bits,
2426 Register sum_of_digits) {
2427 __ add(result, mask_bits, Operand(sum_of_digits, LSL, shift_distance));
2428 __ Ret();
2429 }
2430
2431
2432 // See comment for class.
2433 void IntegerModStub::Generate(MacroAssembler* masm) {
2434 __ mov(lhs_, Operand(lhs_, LSR, shift_distance_));
2435 __ bic(odd_number_, odd_number_, Operand(1));
2436 __ mov(odd_number_, Operand(odd_number_, LSL, 1));
2437 // We now have (odd_number_ - 1) * 2 in the register.
2438 // Build a switch out of branches instead of data because it avoids
2439 // having to teach the assembler about intra-code-object pointers
2440 // that are not in relative branch instructions.
2441 Label mod3, mod5, mod7, mod9, mod11, mod13, mod15, mod17, mod19;
2442 Label mod21, mod23, mod25;
2443 { Assembler::BlockConstPoolScope block_const_pool(masm);
2444 __ add(pc, pc, Operand(odd_number_));
2445 // When you read pc it is always 8 ahead, but when you write it you always
2446 // write the actual value. So we put in two nops to take up the slack.
2447 __ nop();
2448 __ nop();
2449 __ b(&mod3);
2450 __ b(&mod5);
2451 __ b(&mod7);
2452 __ b(&mod9);
2453 __ b(&mod11);
2454 __ b(&mod13);
2455 __ b(&mod15);
2456 __ b(&mod17);
2457 __ b(&mod19);
2458 __ b(&mod21);
2459 __ b(&mod23);
2460 __ b(&mod25);
2461 }
2462
2463 // For each denominator we find a multiple that is almost only ones
2464 // when expressed in binary. Then we do the sum-of-digits trick for
2465 // that number. If the multiple is not 1 then we have to do a little
2466 // more work afterwards to get the answer into the 0-denominator-1
2467 // range.
2468 DigitSum(masm, lhs_, 3, 2, &mod3); // 3 = b11.
2469 __ sub(lhs_, lhs_, Operand(3), LeaveCC, eq);
2470 ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
2471
2472 DigitSum(masm, lhs_, 0xf, 4, &mod5); // 5 * 3 = b1111.
2473 ModGetInRangeBySubtraction(masm, lhs_, 2, 5);
2474 ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
2475
2476 DigitSum(masm, lhs_, 7, 3, &mod7); // 7 = b111.
2477 __ sub(lhs_, lhs_, Operand(7), LeaveCC, eq);
2478 ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
2479
2480 DigitSum(masm, lhs_, 0x3f, 6, &mod9); // 7 * 9 = b111111.
2481 ModGetInRangeBySubtraction(masm, lhs_, 3, 9);
2482 ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
2483
2484 DigitSum(masm, lhs_, r5, 0x3f, 6, 3, &mod11); // 5 * 11 = b110111.
2485 ModReduce(masm, lhs_, 0x3f, 11);
2486 ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
2487
2488 DigitSum(masm, lhs_, r5, 0xff, 8, 5, &mod13); // 19 * 13 = b11110111.
2489 ModReduce(masm, lhs_, 0xff, 13);
2490 ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
2491
2492 DigitSum(masm, lhs_, 0xf, 4, &mod15); // 15 = b1111.
2493 __ sub(lhs_, lhs_, Operand(15), LeaveCC, eq);
2494 ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
2495
2496 DigitSum(masm, lhs_, 0xff, 8, &mod17); // 15 * 17 = b11111111.
2497 ModGetInRangeBySubtraction(masm, lhs_, 4, 17);
2498 ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
2499
2500 DigitSum(masm, lhs_, r5, 0xff, 8, 5, &mod19); // 13 * 19 = b11110111.
2501 ModReduce(masm, lhs_, 0xff, 19);
2502 ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
2503
2504 DigitSum(masm, lhs_, 0x3f, 6, &mod21); // 3 * 21 = b111111.
2505 ModReduce(masm, lhs_, 0x3f, 21);
2506 ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
2507
2508 DigitSum(masm, lhs_, r5, 0xff, 8, 7, &mod23); // 11 * 23 = b11111101.
2509 ModReduce(masm, lhs_, 0xff, 23);
2510 ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
2511
2512 DigitSum(masm, lhs_, r5, 0x7f, 7, 6, &mod25); // 5 * 25 = b1111101.
2513 ModReduce(masm, lhs_, 0x7f, 25);
2514 ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
2515 }
2516
2517
2518 void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
2519 // lhs_ : x
2520 // rhs_ : y
2521 // r0 : result
2522
2523 Register result = r0;
2524 Register lhs = lhs_;
2525 Register rhs = rhs_;
2526
2527 // This code can't cope with other register allocations yet.
2528 ASSERT(result.is(r0) &&
2529 ((lhs.is(r0) && rhs.is(r1)) ||
2530 (lhs.is(r1) && rhs.is(r0))));
2531
2532 Register smi_test_reg = r7;
2533 Register scratch = r9;
2534
2535 // All ops need to know whether we are dealing with two Smis. Set up
2536 // smi_test_reg to tell us that.
2537 if (ShouldGenerateSmiCode()) {
2538 __ orr(smi_test_reg, lhs, Operand(rhs));
2539 }
2540
2541 switch (op_) {
2542 case Token::ADD: {
2543 Label not_smi;
2544 // Fast path.
2545 if (ShouldGenerateSmiCode()) {
2546 STATIC_ASSERT(kSmiTag == 0); // Adjust code below.
2547 __ tst(smi_test_reg, Operand(kSmiTagMask));
2548 __ b(ne, &not_smi);
2549 __ add(r0, r1, Operand(r0), SetCC); // Add y optimistically.
2550 // Return if no overflow.
2551 __ Ret(vc);
2552 __ sub(r0, r0, Operand(r1)); // Revert optimistic add.
2553 }
2554 HandleBinaryOpSlowCases(masm, &not_smi, lhs, rhs, Builtins::ADD);
2555 break;
2556 }
2557
2558 case Token::SUB: {
2559 Label not_smi;
2560 // Fast path.
2561 if (ShouldGenerateSmiCode()) {
2562 STATIC_ASSERT(kSmiTag == 0); // Adjust code below.
2563 __ tst(smi_test_reg, Operand(kSmiTagMask));
2564 __ b(ne, &not_smi);
2565 if (lhs.is(r1)) {
2566 __ sub(r0, r1, Operand(r0), SetCC); // Subtract y optimistically.
2567 // Return if no overflow.
2568 __ Ret(vc);
2569 __ sub(r0, r1, Operand(r0)); // Revert optimistic subtract.
2570 } else {
2571 __ sub(r0, r0, Operand(r1), SetCC); // Subtract y optimistically.
2572 // Return if no overflow.
2573 __ Ret(vc);
2574 __ add(r0, r0, Operand(r1)); // Revert optimistic subtract.
2575 }
2576 }
2577 HandleBinaryOpSlowCases(masm, &not_smi, lhs, rhs, Builtins::SUB);
2578 break;
2579 }
2580
2581 case Token::MUL: {
2582 Label not_smi, slow;
2583 if (ShouldGenerateSmiCode()) {
2584 STATIC_ASSERT(kSmiTag == 0); // adjust code below
2585 __ tst(smi_test_reg, Operand(kSmiTagMask));
2586 Register scratch2 = smi_test_reg;
2587 smi_test_reg = no_reg;
2588 __ b(ne, &not_smi);
2589 // Remove tag from one operand (but keep sign), so that result is Smi.
2590 __ mov(ip, Operand(rhs, ASR, kSmiTagSize));
2591 // Do multiplication
2592 // scratch = lower 32 bits of ip * lhs.
2593 __ smull(scratch, scratch2, lhs, ip);
2594 // Go slow on overflows (overflow bit is not set).
2595 __ mov(ip, Operand(scratch, ASR, 31));
2596 // No overflow if higher 33 bits are identical.
2597 __ cmp(ip, Operand(scratch2));
2598 __ b(ne, &slow);
2599 // Go slow on zero result to handle -0.
2600 __ tst(scratch, Operand(scratch));
2601 __ mov(result, Operand(scratch), LeaveCC, ne);
2602 __ Ret(ne);
2603 // We need -0 if we were multiplying a negative number with 0 to get 0.
2604 // We know one of them was zero.
2605 __ add(scratch2, rhs, Operand(lhs), SetCC);
2606 __ mov(result, Operand(Smi::FromInt(0)), LeaveCC, pl);
2607 __ Ret(pl); // Return Smi 0 if the non-zero one was positive.
2608 // Slow case. We fall through here if we multiplied a negative number
2609 // with 0, because that would mean we should produce -0.
2610 __ bind(&slow);
2611 }
2612 HandleBinaryOpSlowCases(masm, &not_smi, lhs, rhs, Builtins::MUL);
2613 break;
2614 }
2615
2616 case Token::DIV:
2617 case Token::MOD: {
2618 Label not_smi;
2619 if (ShouldGenerateSmiCode() && specialized_on_rhs_) {
2620 Label lhs_is_unsuitable;
2621 __ JumpIfNotSmi(lhs, &not_smi);
2622 if (IsPowerOf2(constant_rhs_)) {
2623 if (op_ == Token::MOD) {
2624 __ and_(rhs,
2625 lhs,
2626 Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)),
2627 SetCC);
2628 // We now have the answer, but if the input was negative we also
2629 // have the sign bit. Our work is done if the result is
2630 // positive or zero:
2631 if (!rhs.is(r0)) {
2632 __ mov(r0, rhs, LeaveCC, pl);
2633 }
2634 __ Ret(pl);
2635 // A mod of a negative left hand side must return a negative number.
2636 // Unfortunately if the answer is 0 then we must return -0. And we
2637 // already optimistically trashed rhs so we may need to restore it.
2638 __ eor(rhs, rhs, Operand(0x80000000u), SetCC);
2639 // Next two instructions are conditional on the answer being -0.
2640 __ mov(rhs, Operand(Smi::FromInt(constant_rhs_)), LeaveCC, eq);
2641 __ b(eq, &lhs_is_unsuitable);
2642 // We need to subtract the dividend. Eg. -3 % 4 == -3.
2643 __ sub(result, rhs, Operand(Smi::FromInt(constant_rhs_)));
2644 } else {
2645 ASSERT(op_ == Token::DIV);
2646 __ tst(lhs,
2647 Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)));
2648 __ b(ne, &lhs_is_unsuitable); // Go slow on negative or remainder.
2649 int shift = 0;
2650 int d = constant_rhs_;
2651 while ((d & 1) == 0) {
2652 d >>= 1;
2653 shift++;
2654 }
2655 __ mov(r0, Operand(lhs, LSR, shift));
2656 __ bic(r0, r0, Operand(kSmiTagMask));
2657 }
2658 } else {
2659 // Not a power of 2.
2660 __ tst(lhs, Operand(0x80000000u));
2661 __ b(ne, &lhs_is_unsuitable);
2662 // Find a fixed point reciprocal of the divisor so we can divide by
2663 // multiplying.
2664 double divisor = 1.0 / constant_rhs_;
2665 int shift = 32;
2666 double scale = 4294967296.0; // 1 << 32.
2667 uint32_t mul;
2668 // Maximise the precision of the fixed point reciprocal.
2669 while (true) {
2670 mul = static_cast<uint32_t>(scale * divisor);
2671 if (mul >= 0x7fffffff) break;
2672 scale *= 2.0;
2673 shift++;
2674 }
2675 mul++;
2676 Register scratch2 = smi_test_reg;
2677 smi_test_reg = no_reg;
2678 __ mov(scratch2, Operand(mul));
2679 __ umull(scratch, scratch2, scratch2, lhs);
2680 __ mov(scratch2, Operand(scratch2, LSR, shift - 31));
2681 // scratch2 is lhs / rhs. scratch2 is not Smi tagged.
2682 // rhs is still the known rhs. rhs is Smi tagged.
2683 // lhs is still the unkown lhs. lhs is Smi tagged.
2684 int required_scratch_shift = 0; // Including the Smi tag shift of 1.
2685 // scratch = scratch2 * rhs.
2686 MultiplyByKnownIntInStub(masm,
2687 scratch,
2688 scratch2,
2689 rhs,
2690 constant_rhs_,
2691 &required_scratch_shift);
2692 // scratch << required_scratch_shift is now the Smi tagged rhs *
2693 // (lhs / rhs) where / indicates integer division.
2694 if (op_ == Token::DIV) {
2695 __ cmp(lhs, Operand(scratch, LSL, required_scratch_shift));
2696 __ b(ne, &lhs_is_unsuitable); // There was a remainder.
2697 __ mov(result, Operand(scratch2, LSL, kSmiTagSize));
2698 } else {
2699 ASSERT(op_ == Token::MOD);
2700 __ sub(result, lhs, Operand(scratch, LSL, required_scratch_shift));
2701 }
2702 }
2703 __ Ret();
2704 __ bind(&lhs_is_unsuitable);
2705 } else if (op_ == Token::MOD &&
2706 runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
2707 runtime_operands_type_ != BinaryOpIC::STRINGS) {
2708 // Do generate a bit of smi code for modulus even though the default for
2709 // modulus is not to do it, but as the ARM processor has no coprocessor
2710 // support for modulus checking for smis makes sense. We can handle
2711 // 1 to 25 times any power of 2. This covers over half the numbers from
2712 // 1 to 100 including all of the first 25. (Actually the constants < 10
2713 // are handled above by reciprocal multiplication. We only get here for
2714 // those cases if the right hand side is not a constant or for cases
2715 // like 192 which is 3*2^6 and ends up in the 3 case in the integer mod
2716 // stub.)
2717 Label slow;
2718 Label not_power_of_2;
2719 ASSERT(!ShouldGenerateSmiCode());
2720 STATIC_ASSERT(kSmiTag == 0); // Adjust code below.
2721 // Check for two positive smis.
2722 __ orr(smi_test_reg, lhs, Operand(rhs));
2723 __ tst(smi_test_reg, Operand(0x80000000u | kSmiTagMask));
2724 __ b(ne, &slow);
2725 // Check that rhs is a power of two and not zero.
2726 Register mask_bits = r3;
2727 __ sub(scratch, rhs, Operand(1), SetCC);
2728 __ b(mi, &slow);
2729 __ and_(mask_bits, rhs, Operand(scratch), SetCC);
2730 __ b(ne, &not_power_of_2);
2731 // Calculate power of two modulus.
2732 __ and_(result, lhs, Operand(scratch));
2733 __ Ret();
2734
2735 __ bind(&not_power_of_2);
2736 __ eor(scratch, scratch, Operand(mask_bits));
2737 // At least two bits are set in the modulus. The high one(s) are in
2738 // mask_bits and the low one is scratch + 1.
2739 __ and_(mask_bits, scratch, Operand(lhs));
2740 Register shift_distance = scratch;
2741 scratch = no_reg;
2742
2743 // The rhs consists of a power of 2 multiplied by some odd number.
2744 // The power-of-2 part we handle by putting the corresponding bits
2745 // from the lhs in the mask_bits register, and the power in the
2746 // shift_distance register. Shift distance is never 0 due to Smi
2747 // tagging.
2748 __ CountLeadingZeros(r4, shift_distance, shift_distance);
2749 __ rsb(shift_distance, r4, Operand(32));
2750
2751 // Now we need to find out what the odd number is. The last bit is
2752 // always 1.
2753 Register odd_number = r4;
2754 __ mov(odd_number, Operand(rhs, LSR, shift_distance));
2755 __ cmp(odd_number, Operand(25));
2756 __ b(gt, &slow);
2757
2758 IntegerModStub stub(
2759 result, shift_distance, odd_number, mask_bits, lhs, r5);
2760 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); // Tail call.
2761
2762 __ bind(&slow);
2763 }
2764 HandleBinaryOpSlowCases(
2765 masm,
2766 &not_smi,
2767 lhs,
2768 rhs,
2769 op_ == Token::MOD ? Builtins::MOD : Builtins::DIV);
2770 break;
2771 }
2772
2773 case Token::BIT_OR:
2774 case Token::BIT_AND:
2775 case Token::BIT_XOR:
2776 case Token::SAR:
2777 case Token::SHR:
2778 case Token::SHL: {
2779 Label slow;
2780 STATIC_ASSERT(kSmiTag == 0); // adjust code below
2781 __ tst(smi_test_reg, Operand(kSmiTagMask));
2782 __ b(ne, &slow);
2783 Register scratch2 = smi_test_reg;
2784 smi_test_reg = no_reg;
2785 switch (op_) {
2786 case Token::BIT_OR: __ orr(result, rhs, Operand(lhs)); break;
2787 case Token::BIT_AND: __ and_(result, rhs, Operand(lhs)); break;
2788 case Token::BIT_XOR: __ eor(result, rhs, Operand(lhs)); break;
2789 case Token::SAR:
2790 // Remove tags from right operand.
2791 __ GetLeastBitsFromSmi(scratch2, rhs, 5);
2792 __ mov(result, Operand(lhs, ASR, scratch2));
2793 // Smi tag result.
2794 __ bic(result, result, Operand(kSmiTagMask));
2795 break;
2796 case Token::SHR:
2797 // Remove tags from operands. We can't do this on a 31 bit number
2798 // because then the 0s get shifted into bit 30 instead of bit 31.
2799 __ mov(scratch, Operand(lhs, ASR, kSmiTagSize)); // x
2800 __ GetLeastBitsFromSmi(scratch2, rhs, 5);
2801 __ mov(scratch, Operand(scratch, LSR, scratch2));
2802 // Unsigned shift is not allowed to produce a negative number, so
2803 // check the sign bit and the sign bit after Smi tagging.
2804 __ tst(scratch, Operand(0xc0000000));
2805 __ b(ne, &slow);
2806 // Smi tag result.
2807 __ mov(result, Operand(scratch, LSL, kSmiTagSize));
2808 break;
2809 case Token::SHL:
2810 // Remove tags from operands.
2811 __ mov(scratch, Operand(lhs, ASR, kSmiTagSize)); // x
2812 __ GetLeastBitsFromSmi(scratch2, rhs, 5);
2813 __ mov(scratch, Operand(scratch, LSL, scratch2));
2814 // Check that the signed result fits in a Smi.
2815 __ add(scratch2, scratch, Operand(0x40000000), SetCC);
2816 __ b(mi, &slow);
2817 __ mov(result, Operand(scratch, LSL, kSmiTagSize));
2818 break;
2819 default: UNREACHABLE();
2820 }
2821 __ Ret();
2822 __ bind(&slow);
2823 HandleNonSmiBitwiseOp(masm, lhs, rhs);
2824 break;
2825 }
2826
2827 default: UNREACHABLE();
2828 }
2829 // This code should be unreachable.
2830 __ stop("Unreachable");
2831
2832 // Generate an unreachable reference to the DEFAULT stub so that it can be
2833 // found at the end of this stub when clearing ICs at GC.
2834 // TODO(kaznacheev): Check performance impact and get rid of this.
2835 if (runtime_operands_type_ != BinaryOpIC::DEFAULT) {
2836 GenericBinaryOpStub uninit(MinorKey(), BinaryOpIC::DEFAULT);
2837 __ CallStub(&uninit);
2838 }
2839 }
2840
2841
2842 void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
2843 Label get_result;
2844
2845 __ Push(r1, r0);
2846
2847 __ mov(r2, Operand(Smi::FromInt(MinorKey())));
2848 __ mov(r1, Operand(Smi::FromInt(op_)));
2849 __ mov(r0, Operand(Smi::FromInt(runtime_operands_type_)));
2850 __ Push(r2, r1, r0);
2851
2852 __ TailCallExternalReference(
2853 ExternalReference(IC_Utility(IC::kBinaryOp_Patch), masm->isolate()),
2854 5,
2855 1);
2856 }
2857
2858
2859 Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
2860 GenericBinaryOpStub stub(key, type_info);
2861 return stub.GetCode();
2862 }
2863
2864
2865 Handle<Code> GetTypeRecordingBinaryOpStub(int key, 1783 Handle<Code> GetTypeRecordingBinaryOpStub(int key,
2866 TRBinaryOpIC::TypeInfo type_info, 1784 TRBinaryOpIC::TypeInfo type_info,
2867 TRBinaryOpIC::TypeInfo result_type_info) { 1785 TRBinaryOpIC::TypeInfo result_type_info) {
2868 TypeRecordingBinaryOpStub stub(key, type_info, result_type_info); 1786 TypeRecordingBinaryOpStub stub(key, type_info, result_type_info);
2869 return stub.GetCode(); 1787 return stub.GetCode();
2870 } 1788 }
2871 1789
2872 1790
2873 void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { 1791 void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
2874 Label get_result; 1792 Label get_result;
(...skipping 4057 matching lines...) Expand 10 before | Expand all | Expand 10 after
6932 __ str(pc, MemOperand(sp, 0)); 5850 __ str(pc, MemOperand(sp, 0));
6933 __ Jump(target); // Call the C++ function. 5851 __ Jump(target); // Call the C++ function.
6934 } 5852 }
6935 5853
6936 5854
6937 #undef __ 5855 #undef __
6938 5856
6939 } } // namespace v8::internal 5857 } } // namespace v8::internal
6940 5858
6941 #endif // V8_TARGET_ARCH_ARM 5859 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « src/arm/code-stubs-arm.h ('k') | src/arm/lithium-arm.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698