Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(431)

Side by Side Diff: src/mips/stub-cache-mips.cc

Issue 141733002: Remove the special MathFloor / MathAbs call handlers. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 6 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/ia32/stub-cache-ia32.cc ('k') | src/stub-cache.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 1905 matching lines...) Expand 10 before | Expand all | Expand 10 after
1916 // it. 1916 // it.
1917 GenerateJumpFunctionIgnoreReceiver(function); 1917 GenerateJumpFunctionIgnoreReceiver(function);
1918 1918
1919 HandlerFrontendFooter(&miss); 1919 HandlerFrontendFooter(&miss);
1920 1920
1921 // Return the generated code. 1921 // Return the generated code.
1922 return GetCode(type, name); 1922 return GetCode(type, name);
1923 } 1923 }
1924 1924
1925 1925
1926 Handle<Code> CallStubCompiler::CompileMathFloorCall(
1927 Handle<Object> object,
1928 Handle<JSObject> holder,
1929 Handle<Cell> cell,
1930 Handle<JSFunction> function,
1931 Handle<String> name,
1932 Code::StubType type) {
1933 const int argc = arguments().immediate();
1934 // If the object is not a JSObject or we got an unexpected number of
1935 // arguments, bail out to the regular call.
1936 if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
1937
1938 Label miss, slow;
1939 HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
1940 if (!cell.is_null()) {
1941 ASSERT(cell->value() == *function);
1942 GenerateLoadFunctionFromCell(cell, function, &miss);
1943 }
1944
1945 // Load the (only) argument into v0.
1946 __ lw(v0, MemOperand(sp, 0 * kPointerSize));
1947
1948 // If the argument is a smi, just return.
1949 STATIC_ASSERT(kSmiTag == 0);
1950 __ SmiTst(v0, t0);
1951 __ DropAndRet(argc + 1, eq, t0, Operand(zero_reg));
1952
1953 __ CheckMap(v0, a1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
1954
1955 Label wont_fit_smi, no_fpu_error, restore_fcsr_and_return;
1956
1957 // If fpu is enabled, we use the floor instruction.
1958
1959 // Load the HeapNumber value.
1960 __ ldc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
1961
1962 // Backup FCSR.
1963 __ cfc1(a3, FCSR);
1964 // Clearing FCSR clears the exception mask with no side-effects.
1965 __ ctc1(zero_reg, FCSR);
1966 // Convert the argument to an integer.
1967 __ floor_w_d(f0, f0);
1968
1969 // Start checking for special cases.
1970 // Get the argument exponent and clear the sign bit.
1971 __ lw(t1, FieldMemOperand(v0, HeapNumber::kValueOffset + kPointerSize));
1972 __ And(t2, t1, Operand(~HeapNumber::kSignMask));
1973 __ srl(t2, t2, HeapNumber::kMantissaBitsInTopWord);
1974
1975 // Retrieve FCSR and check for fpu errors.
1976 __ cfc1(t5, FCSR);
1977 __ And(t5, t5, Operand(kFCSRExceptionFlagMask));
1978 __ Branch(&no_fpu_error, eq, t5, Operand(zero_reg));
1979
1980 // Check for NaN, Infinity, and -Infinity.
1981 // They are invariant through a Math.Floor call, so just
1982 // return the original argument.
1983 __ Subu(t3, t2, Operand(HeapNumber::kExponentMask
1984 >> HeapNumber::kMantissaBitsInTopWord));
1985 __ Branch(&restore_fcsr_and_return, eq, t3, Operand(zero_reg));
1986 // We had an overflow or underflow in the conversion. Check if we
1987 // have a big exponent.
1988 // If greater or equal, the argument is already round and in v0.
1989 __ Branch(&restore_fcsr_and_return, ge, t3,
1990 Operand(HeapNumber::kMantissaBits));
1991 __ Branch(&wont_fit_smi);
1992
1993 __ bind(&no_fpu_error);
1994 // Move the result back to v0.
1995 __ mfc1(v0, f0);
1996 // Check if the result fits into a smi.
1997 __ Addu(a1, v0, Operand(0x40000000));
1998 __ Branch(&wont_fit_smi, lt, a1, Operand(zero_reg));
1999 // Tag the result.
2000 STATIC_ASSERT(kSmiTag == 0);
2001 __ sll(v0, v0, kSmiTagSize);
2002
2003 // Check for -0.
2004 __ Branch(&restore_fcsr_and_return, ne, v0, Operand(zero_reg));
2005 // t1 already holds the HeapNumber exponent.
2006 __ And(t0, t1, Operand(HeapNumber::kSignMask));
2007 // If our HeapNumber is negative it was -0, so load its address and return.
2008 // Else v0 is loaded with 0, so we can also just return.
2009 __ Branch(&restore_fcsr_and_return, eq, t0, Operand(zero_reg));
2010 __ lw(v0, MemOperand(sp, 0 * kPointerSize));
2011
2012 __ bind(&restore_fcsr_and_return);
2013 // Restore FCSR and return.
2014 __ ctc1(a3, FCSR);
2015
2016 __ DropAndRet(argc + 1);
2017
2018 __ bind(&wont_fit_smi);
2019 // Restore FCSR and fall to slow case.
2020 __ ctc1(a3, FCSR);
2021
2022 __ bind(&slow);
2023 // We do not have to patch the receiver because the function makes no use of
2024 // it.
2025 GenerateJumpFunctionIgnoreReceiver(function);
2026
2027 HandlerFrontendFooter(&miss);
2028
2029 // Return the generated code.
2030 return GetCode(type, name);
2031 }
2032
2033
2034 Handle<Code> CallStubCompiler::CompileMathAbsCall(
2035 Handle<Object> object,
2036 Handle<JSObject> holder,
2037 Handle<Cell> cell,
2038 Handle<JSFunction> function,
2039 Handle<String> name,
2040 Code::StubType type) {
2041 const int argc = arguments().immediate();
2042 // If the object is not a JSObject or we got an unexpected number of
2043 // arguments, bail out to the regular call.
2044 if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
2045
2046 Label miss;
2047
2048 HandlerFrontendHeader(object, holder, name, RECEIVER_MAP_CHECK, &miss);
2049 if (!cell.is_null()) {
2050 ASSERT(cell->value() == *function);
2051 GenerateLoadFunctionFromCell(cell, function, &miss);
2052 }
2053
2054 // Load the (only) argument into v0.
2055 __ lw(v0, MemOperand(sp, 0 * kPointerSize));
2056
2057 // Check if the argument is a smi.
2058 Label not_smi;
2059 STATIC_ASSERT(kSmiTag == 0);
2060 __ JumpIfNotSmi(v0, &not_smi);
2061
2062 // Do bitwise not or do nothing depending on the sign of the
2063 // argument.
2064 __ sra(t0, v0, kBitsPerInt - 1);
2065 __ Xor(a1, v0, t0);
2066
2067 // Add 1 or do nothing depending on the sign of the argument.
2068 __ Subu(v0, a1, t0);
2069
2070 // If the result is still negative, go to the slow case.
2071 // This only happens for the most negative smi.
2072 Label slow;
2073 __ Branch(&slow, lt, v0, Operand(zero_reg));
2074
2075 // Smi case done.
2076 __ DropAndRet(argc + 1);
2077
2078 // Check if the argument is a heap number and load its exponent and
2079 // sign.
2080 __ bind(&not_smi);
2081 __ CheckMap(v0, a1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
2082 __ lw(a1, FieldMemOperand(v0, HeapNumber::kExponentOffset));
2083
2084 // Check the sign of the argument. If the argument is positive,
2085 // just return it.
2086 Label negative_sign;
2087 __ And(t0, a1, Operand(HeapNumber::kSignMask));
2088 __ Branch(&negative_sign, ne, t0, Operand(zero_reg));
2089 __ DropAndRet(argc + 1);
2090
2091 // If the argument is negative, clear the sign, and return a new
2092 // number.
2093 __ bind(&negative_sign);
2094 __ Xor(a1, a1, Operand(HeapNumber::kSignMask));
2095 __ lw(a3, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
2096 __ LoadRoot(t2, Heap::kHeapNumberMapRootIndex);
2097 __ AllocateHeapNumber(v0, t0, t1, t2, &slow);
2098 __ sw(a1, FieldMemOperand(v0, HeapNumber::kExponentOffset));
2099 __ sw(a3, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
2100 __ DropAndRet(argc + 1);
2101
2102 __ bind(&slow);
2103 // We do not have to patch the receiver because the function makes no use of
2104 // it.
2105 GenerateJumpFunctionIgnoreReceiver(function);
2106
2107 HandlerFrontendFooter(&miss);
2108
2109 // Return the generated code.
2110 return GetCode(type, name);
2111 }
2112
2113
2114 Handle<Code> CallStubCompiler::CompileFastApiCall( 1926 Handle<Code> CallStubCompiler::CompileFastApiCall(
2115 const CallOptimization& optimization, 1927 const CallOptimization& optimization,
2116 Handle<Object> object, 1928 Handle<Object> object,
2117 Handle<JSObject> holder, 1929 Handle<JSObject> holder,
2118 Handle<Cell> cell, 1930 Handle<Cell> cell,
2119 Handle<JSFunction> function, 1931 Handle<JSFunction> function,
2120 Handle<String> name) { 1932 Handle<String> name) {
2121 1933
2122 Counters* counters = isolate()->counters(); 1934 Counters* counters = isolate()->counters();
2123 1935
(...skipping 599 matching lines...) Expand 10 before | Expand all | Expand 10 after
2723 // ----------------------------------- 2535 // -----------------------------------
2724 TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss); 2536 TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss);
2725 } 2537 }
2726 2538
2727 2539
2728 #undef __ 2540 #undef __
2729 2541
2730 } } // namespace v8::internal 2542 } } // namespace v8::internal
2731 2543
2732 #endif // V8_TARGET_ARCH_MIPS 2544 #endif // V8_TARGET_ARCH_MIPS
OLDNEW
« no previous file with comments | « src/ia32/stub-cache-ia32.cc ('k') | src/stub-cache.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698