OLD | NEW |
---|---|
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 1986 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1997 #else | 1997 #else |
1998 const Register exponent = rdi; | 1998 const Register exponent = rdi; |
1999 #endif | 1999 #endif |
2000 const Register base = rax; | 2000 const Register base = rax; |
2001 const Register scratch = rcx; | 2001 const Register scratch = rcx; |
2002 const XMMRegister double_result = xmm3; | 2002 const XMMRegister double_result = xmm3; |
2003 const XMMRegister double_base = xmm2; | 2003 const XMMRegister double_base = xmm2; |
2004 const XMMRegister double_exponent = xmm1; | 2004 const XMMRegister double_exponent = xmm1; |
2005 const XMMRegister double_scratch = xmm4; | 2005 const XMMRegister double_scratch = xmm4; |
2006 | 2006 |
2007 Label double_int_runtime, generic_runtime, done; | 2007 Label call_runtime, done, exponent_not_smi, int_exponent; |
2008 Label exponent_not_smi, int_exponent; | |
2009 | 2008 |
2010 // Save 1 in double_result - we need this several times later on. | 2009 // Save 1 in double_result - we need this several times later on. |
2011 __ movq(scratch, Immediate(1)); | 2010 __ movq(scratch, Immediate(1)); |
2012 __ cvtlsi2sd(double_result, scratch); | 2011 __ cvtlsi2sd(double_result, scratch); |
2013 | 2012 |
2014 if (exponent_type_ == ON_STACK) { | 2013 if (exponent_type_ == ON_STACK) { |
2015 Label base_is_smi, unpack_exponent; | 2014 Label base_is_smi, unpack_exponent; |
2016 // The exponent and base are supplied as arguments on the stack. | 2015 // The exponent and base are supplied as arguments on the stack. |
2017 // This can only happen if the stub is called from non-optimized code. | 2016 // This can only happen if the stub is called from non-optimized code. |
2018 // Load input parameters from stack. | 2017 // Load input parameters from stack. |
2019 __ movq(base, Operand(rsp, 2 * kPointerSize)); | 2018 __ movq(base, Operand(rsp, 2 * kPointerSize)); |
2020 __ movq(exponent, Operand(rsp, 1 * kPointerSize)); | 2019 __ movq(exponent, Operand(rsp, 1 * kPointerSize)); |
2021 __ JumpIfSmi(base, &base_is_smi, Label::kNear); | 2020 __ JumpIfSmi(base, &base_is_smi, Label::kNear); |
2022 __ CompareRoot(FieldOperand(base, HeapObject::kMapOffset), | 2021 __ CompareRoot(FieldOperand(base, HeapObject::kMapOffset), |
2023 Heap::kHeapNumberMapRootIndex); | 2022 Heap::kHeapNumberMapRootIndex); |
2024 __ j(not_equal, &generic_runtime); | 2023 __ j(not_equal, &call_runtime); |
2025 | 2024 |
2026 __ movsd(double_base, FieldOperand(base, HeapNumber::kValueOffset)); | 2025 __ movsd(double_base, FieldOperand(base, HeapNumber::kValueOffset)); |
2027 __ jmp(&unpack_exponent, Label::kNear); | 2026 __ jmp(&unpack_exponent, Label::kNear); |
2028 | 2027 |
2029 __ bind(&base_is_smi); | 2028 __ bind(&base_is_smi); |
2030 __ SmiToInteger32(base, base); | 2029 __ SmiToInteger32(base, base); |
2031 __ cvtlsi2sd(double_base, base); | 2030 __ cvtlsi2sd(double_base, base); |
2032 __ bind(&unpack_exponent); | 2031 __ bind(&unpack_exponent); |
2033 | 2032 |
2034 __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear); | 2033 __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear); |
2035 __ SmiToInteger32(exponent, exponent); | 2034 __ SmiToInteger32(exponent, exponent); |
2036 __ jmp(&int_exponent); | 2035 __ jmp(&int_exponent); |
2037 | 2036 |
2038 __ bind(&exponent_not_smi); | 2037 __ bind(&exponent_not_smi); |
2039 __ CompareRoot(FieldOperand(exponent, HeapObject::kMapOffset), | 2038 __ CompareRoot(FieldOperand(exponent, HeapObject::kMapOffset), |
2040 Heap::kHeapNumberMapRootIndex); | 2039 Heap::kHeapNumberMapRootIndex); |
2041 __ j(not_equal, &generic_runtime); | 2040 __ j(not_equal, &call_runtime); |
2042 __ movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset)); | 2041 __ movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset)); |
2043 } else if (exponent_type_ == TAGGED) { | 2042 } else if (exponent_type_ == TAGGED) { |
2044 __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear); | 2043 __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear); |
2045 __ SmiToInteger32(exponent, exponent); | 2044 __ SmiToInteger32(exponent, exponent); |
2046 __ jmp(&int_exponent); | 2045 __ jmp(&int_exponent); |
2047 | 2046 |
2048 __ bind(&exponent_not_smi); | 2047 __ bind(&exponent_not_smi); |
2049 __ movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset)); | 2048 __ movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset)); |
2050 } | 2049 } |
2051 | 2050 |
2052 if (exponent_type_ != INTEGER) { | 2051 if (exponent_type_ != INTEGER) { |
2053 Label fast_power; | 2052 Label fast_power; |
2054 // Detect integer exponents stored as double. | 2053 // Detect integer exponents stored as double. |
2055 __ cvttsd2si(exponent, double_exponent); | 2054 __ cvttsd2si(exponent, double_exponent); |
2056 // Skip to runtime if possibly NaN (indicated by the indefinite integer). | 2055 // Skip to runtime if possibly NaN (indicated by the indefinite integer). |
2057 __ cmpl(exponent, Immediate(0x80000000u)); | 2056 __ cmpl(exponent, Immediate(0x80000000u)); |
2058 __ j(equal, &generic_runtime); | 2057 __ j(equal, &call_runtime); |
2059 __ cvtlsi2sd(double_scratch, exponent); | 2058 __ cvtlsi2sd(double_scratch, exponent); |
2060 // Already ruled out NaNs for exponent. | 2059 // Already ruled out NaNs for exponent. |
2061 __ ucomisd(double_exponent, double_scratch); | 2060 __ ucomisd(double_exponent, double_scratch); |
2062 __ j(equal, &int_exponent); | 2061 __ j(equal, &int_exponent); |
2063 | 2062 |
2064 if (exponent_type_ == ON_STACK) { | 2063 if (exponent_type_ == ON_STACK) { |
2065 // Detect square root case. Crankshaft detects constant +/-0.5 at | 2064 // Detect square root case. Crankshaft detects constant +/-0.5 at |
2066 // compile time and uses DoMathPowHalf instead. We then skip this check | 2065 // compile time and uses DoMathPowHalf instead. We then skip this check |
2067 // for non-constant cases of +/-0.5 as these hardly occur. | 2066 // for non-constant cases of +/-0.5 as these hardly occur. |
2068 Label continue_sqrt, continue_rsqrt, not_plus_half; | 2067 Label continue_sqrt, continue_rsqrt, not_plus_half; |
(...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2162 __ testb(rax, Immediate(0x5F)); // Check for all but precision exception. | 2161 __ testb(rax, Immediate(0x5F)); // Check for all but precision exception. |
2163 __ j(not_zero, &fast_power_failed, Label::kNear); | 2162 __ j(not_zero, &fast_power_failed, Label::kNear); |
2164 __ fstp_d(Operand(rsp, 0)); | 2163 __ fstp_d(Operand(rsp, 0)); |
2165 __ movsd(double_result, Operand(rsp, 0)); | 2164 __ movsd(double_result, Operand(rsp, 0)); |
2166 __ addq(rsp, Immediate(kDoubleSize)); | 2165 __ addq(rsp, Immediate(kDoubleSize)); |
2167 __ jmp(&done); | 2166 __ jmp(&done); |
2168 | 2167 |
2169 __ bind(&fast_power_failed); | 2168 __ bind(&fast_power_failed); |
2170 __ fninit(); | 2169 __ fninit(); |
2171 __ addq(rsp, Immediate(kDoubleSize)); | 2170 __ addq(rsp, Immediate(kDoubleSize)); |
2172 __ jmp(&generic_runtime); | 2171 __ jmp(&call_runtime); |
2173 } | 2172 } |
2174 | 2173 |
2175 // Calculate power with integer exponent. | 2174 // Calculate power with integer exponent. |
2176 __ bind(&int_exponent); | 2175 __ bind(&int_exponent); |
2177 const XMMRegister double_scratch2 = double_exponent; | 2176 const XMMRegister double_scratch2 = double_exponent; |
2178 // Back up exponent as we need to check if exponent is negative later. | 2177 // Back up exponent as we need to check if exponent is negative later. |
2179 __ movq(scratch, exponent); // Back up exponent. | 2178 __ movq(scratch, exponent); // Back up exponent. |
2180 __ movsd(double_scratch, double_base); // Back up base. | 2179 __ movsd(double_scratch, double_base); // Back up base. |
2181 __ movsd(double_scratch2, double_result); // Load double_exponent with 1. | 2180 __ movsd(double_scratch2, double_result); // Load double_exponent with 1. |
2182 | 2181 |
2183 // Get absolute value of exponent. | 2182 // Get absolute value of exponent. |
2184 Label while_true, no_multiply; | 2183 Label no_neg, while_true, no_multiply; |
2185 const uint32_t kClearSignBitMask = 0x7FFFFFFF; | 2184 __ testl(scratch, scratch); |
2186 __ andl(scratch, Immediate(kClearSignBitMask)); | 2185 __ j(positive, &no_neg, Label::kNear); |
Yang
2011/12/07 15:07:50
Ditto in ia32.
| |
2186 __ negl(scratch); | |
2187 __ bind(&no_neg); | |
2187 | 2188 |
2188 __ bind(&while_true); | 2189 __ bind(&while_true); |
2189 __ shrl(scratch, Immediate(1)); | 2190 __ shrl(scratch, Immediate(1)); |
2190 __ j(not_carry, &no_multiply, Label::kNear); | 2191 __ j(not_carry, &no_multiply, Label::kNear); |
2191 __ mulsd(double_result, double_scratch); | 2192 __ mulsd(double_result, double_scratch); |
2192 __ bind(&no_multiply); | 2193 __ bind(&no_multiply); |
2193 | 2194 |
2194 __ mulsd(double_scratch, double_scratch); | 2195 __ mulsd(double_scratch, double_scratch); |
2195 __ j(not_zero, &while_true); | 2196 __ j(not_zero, &while_true); |
2196 | 2197 |
2197 // scratch has the original value of the exponent - if the exponent is | 2198 // If the exponent is negative, return 1/result. |
2198 // negative, return 1/result. | |
2199 __ testl(exponent, exponent); | 2199 __ testl(exponent, exponent); |
2200 __ j(greater, &done); | 2200 __ j(greater, &done); |
2201 __ divsd(double_scratch2, double_result); | 2201 __ divsd(double_scratch2, double_result); |
2202 __ movsd(double_result, double_scratch2); | 2202 __ movsd(double_result, double_scratch2); |
2203 // Test whether result is zero. Bail out to check for subnormal result. | 2203 // Test whether result is zero. Bail out to check for subnormal result. |
2204 // Due to subnormals, x^-y == (1/x)^y does not hold in all cases. | 2204 // Due to subnormals, x^-y == (1/x)^y does not hold in all cases. |
2205 __ xorps(double_scratch2, double_scratch2); | 2205 __ xorps(double_scratch2, double_scratch2); |
2206 __ ucomisd(double_scratch2, double_result); | 2206 __ ucomisd(double_scratch2, double_result); |
2207 __ j(equal, &double_int_runtime); | 2207 __ j(not_equal, &done); |
2208 __ cvtlsi2sd(double_exponent, exponent); | |
ulan
2011/12/07 16:03:04
Could you please add a comment explaining why we n
| |
2208 | 2209 |
2209 // Returning or bailing out. | 2210 // Returning or bailing out. |
2211 Counters* counters = masm->isolate()->counters(); | |
2210 if (exponent_type_ == ON_STACK) { | 2212 if (exponent_type_ == ON_STACK) { |
2213 // The arguments are still on the stack. | |
2214 __ bind(&call_runtime); | |
2215 __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1); | |
2216 | |
2211 // The stub is called from non-optimized code, which expects the result | 2217 // The stub is called from non-optimized code, which expects the result |
2212 // as heap number in eax. | 2218 // as heap number in eax. |
2213 __ bind(&done); | 2219 __ bind(&done); |
2214 __ AllocateHeapNumber(rax, rcx, &generic_runtime); | 2220 __ AllocateHeapNumber(rax, rcx, &call_runtime); |
2215 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), double_result); | 2221 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), double_result); |
2222 __ IncrementCounter(counters->math_pow(), 1); | |
2216 __ ret(2 * kPointerSize); | 2223 __ ret(2 * kPointerSize); |
2217 | |
2218 // The arguments are still on the stack. | |
2219 __ bind(&generic_runtime); | |
2220 __ bind(&double_int_runtime); | |
2221 __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1); | |
2222 } else { | 2224 } else { |
2223 __ jmp(&done); | 2225 __ bind(&call_runtime); |
2224 | |
2225 Label return_from_runtime; | |
2226 StubRuntimeCallHelper callhelper; | |
2227 __ bind(&generic_runtime); | |
2228 // Move base to the correct argument register. Exponent is already in xmm1. | 2226 // Move base to the correct argument register. Exponent is already in xmm1. |
2229 __ movsd(xmm0, double_base); | 2227 __ movsd(xmm0, double_base); |
2230 ASSERT(double_exponent.is(xmm1)); | 2228 ASSERT(double_exponent.is(xmm1)); |
2231 { | 2229 { |
2232 AllowExternalCallThatCantCauseGC scope(masm); | 2230 AllowExternalCallThatCantCauseGC scope(masm); |
2233 __ PrepareCallCFunction(2); | 2231 __ PrepareCallCFunction(2); |
2234 __ CallCFunction( | 2232 __ CallCFunction( |
2235 ExternalReference::power_double_double_function(masm->isolate()), 2); | 2233 ExternalReference::power_double_double_function(masm->isolate()), 2); |
2236 } | 2234 }; |
ulan
2011/12/07 16:03:04
Redundant semicolon.
| |
2237 __ jmp(&return_from_runtime, Label::kNear); | |
2238 | |
2239 __ bind(&double_int_runtime); | |
2240 // Move base to the correct argument register. | |
2241 __ movsd(xmm0, double_base); | |
2242 // Exponent is already in the correct argument register: | |
2243 // edi (not rdi) on Linux and edx on Windows. | |
2244 { | |
2245 AllowExternalCallThatCantCauseGC scope(masm); | |
2246 __ PrepareCallCFunction(2); | |
2247 __ CallCFunction( | |
2248 ExternalReference::power_double_int_function(masm->isolate()), 2); | |
2249 } | |
2250 | |
2251 __ bind(&return_from_runtime); | |
2252 // Return value is in xmm0. | 2235 // Return value is in xmm0. |
2253 __ movsd(double_result, xmm0); | 2236 __ movsd(double_result, xmm0); |
2254 // Restore context register. | 2237 // Restore context register. |
2255 __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); | 2238 __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); |
2256 | 2239 |
2257 __ bind(&done); | 2240 __ bind(&done); |
2241 __ IncrementCounter(counters->math_pow(), 1); | |
2258 __ ret(0); | 2242 __ ret(0); |
2259 } | 2243 } |
2260 } | 2244 } |
2261 | 2245 |
2262 | 2246 |
2263 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { | 2247 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { |
2264 // The key is in rdx and the parameter count is in rax. | 2248 // The key is in rdx and the parameter count is in rax. |
2265 | 2249 |
2266 // The displacement is used for skipping the frame pointer on the | 2250 // The displacement is used for skipping the frame pointer on the |
2267 // stack. It is the offset of the last parameter (if any) relative | 2251 // stack. It is the offset of the last parameter (if any) relative |
(...skipping 3945 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
6213 xmm0, | 6197 xmm0, |
6214 &slow_elements); | 6198 &slow_elements); |
6215 __ ret(0); | 6199 __ ret(0); |
6216 } | 6200 } |
6217 | 6201 |
6218 #undef __ | 6202 #undef __ |
6219 | 6203 |
6220 } } // namespace v8::internal | 6204 } } // namespace v8::internal |
6221 | 6205 |
6222 #endif // V8_TARGET_ARCH_X64 | 6206 #endif // V8_TARGET_ARCH_X64 |
OLD | NEW |