Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(725)

Side by Side Diff: src/x64/code-stubs-x64.cc

Issue 6602007: Add MathPowStub to x64 platform, and fix error in stub on ia32 platform. (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 9 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/ia32/code-stubs-ia32.cc ('k') | src/x64/full-codegen-x64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 1999 matching lines...) Expand 10 before | Expand all | Expand 10 after
2010 } 2010 }
2011 __ SmiNeg(rax, rax, &done); 2011 __ SmiNeg(rax, rax, &done);
2012 __ jmp(&slow); // zero, if not handled above, and Smi::kMinValue. 2012 __ jmp(&slow); // zero, if not handled above, and Smi::kMinValue.
2013 2013
2014 // Try floating point case. 2014 // Try floating point case.
2015 __ bind(&try_float); 2015 __ bind(&try_float);
2016 } else if (FLAG_debug_code) { 2016 } else if (FLAG_debug_code) {
2017 __ AbortIfSmi(rax); 2017 __ AbortIfSmi(rax);
2018 } 2018 }
2019 2019
2020 __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset)); 2020 __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
2021 __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex); 2021 Heap::kHeapNumberMapRootIndex);
2022 __ j(not_equal, &slow); 2022 __ j(not_equal, &slow);
2023 // Operand is a float, negate its value by flipping sign bit. 2023 // Operand is a float, negate its value by flipping sign bit.
2024 __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset)); 2024 __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
2025 __ movq(kScratchRegister, Immediate(0x01)); 2025 __ movq(kScratchRegister, Immediate(0x01));
2026 __ shl(kScratchRegister, Immediate(63)); 2026 __ shl(kScratchRegister, Immediate(63));
2027 __ xor_(rdx, kScratchRegister); // Flip sign. 2027 __ xor_(rdx, kScratchRegister); // Flip sign.
2028 // rdx is value to store. 2028 // rdx is value to store.
2029 if (overwrite_ == UNARY_OVERWRITE) { 2029 if (overwrite_ == UNARY_OVERWRITE) {
2030 __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rdx); 2030 __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rdx);
2031 } else { 2031 } else {
2032 __ AllocateHeapNumber(rcx, rbx, &slow); 2032 __ AllocateHeapNumber(rcx, rbx, &slow);
2033 // rcx: allocated 'empty' number 2033 // rcx: allocated 'empty' number
2034 __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx); 2034 __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx);
2035 __ movq(rax, rcx); 2035 __ movq(rax, rcx);
2036 } 2036 }
2037 } else if (op_ == Token::BIT_NOT) { 2037 } else if (op_ == Token::BIT_NOT) {
2038 if (include_smi_code_) { 2038 if (include_smi_code_) {
2039 Label try_float; 2039 Label try_float;
2040 __ JumpIfNotSmi(rax, &try_float); 2040 __ JumpIfNotSmi(rax, &try_float);
2041 __ SmiNot(rax, rax); 2041 __ SmiNot(rax, rax);
2042 __ jmp(&done); 2042 __ jmp(&done);
2043 // Try floating point case. 2043 // Try floating point case.
2044 __ bind(&try_float); 2044 __ bind(&try_float);
2045 } else if (FLAG_debug_code) { 2045 } else if (FLAG_debug_code) {
2046 __ AbortIfSmi(rax); 2046 __ AbortIfSmi(rax);
2047 } 2047 }
2048 2048
2049 // Check if the operand is a heap number. 2049 // Check if the operand is a heap number.
2050 __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset)); 2050 __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
2051 __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex); 2051 Heap::kHeapNumberMapRootIndex);
2052 __ j(not_equal, &slow); 2052 __ j(not_equal, &slow);
2053 2053
2054 // Convert the heap number in rax to an untagged integer in rcx. 2054 // Convert the heap number in rax to an untagged integer in rcx.
2055 IntegerConvert(masm, rax, rax); 2055 IntegerConvert(masm, rax, rax);
2056 2056
2057 // Do the bitwise operation and smi tag the result. 2057 // Do the bitwise operation and smi tag the result.
2058 __ notl(rax); 2058 __ notl(rax);
2059 __ Integer32ToSmi(rax, rax); 2059 __ Integer32ToSmi(rax, rax);
2060 } 2060 }
2061 2061
(...skipping 12 matching lines...) Expand all
2074 break; 2074 break;
2075 case Token::BIT_NOT: 2075 case Token::BIT_NOT:
2076 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION); 2076 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
2077 break; 2077 break;
2078 default: 2078 default:
2079 UNREACHABLE(); 2079 UNREACHABLE();
2080 } 2080 }
2081 } 2081 }
2082 2082
2083 2083
2084 void MathPowStub::Generate(MacroAssembler* masm) {
2085 // Registers are used as follows:
2086 // rdx = base
2087 // rax = exponent
2088 // rcx = temporary, result
2089
2090 CpuFeatures::Scope use_sse2(SSE2);
Lasse Reichstein 2011/02/28 13:21:41 No need to have SSE2 scopes in X64.
2091 Label allocate_return, call_runtime;
2092
2093 // Load input parameters.
2094 __ movq(rdx, Operand(rsp, 2 * kPointerSize));
2095 __ movq(rax, Operand(rsp, 1 * kPointerSize));
2096
2097 // Save 1 in xmm3 - we need this several times later on.
2098 __ movl(rcx, Immediate(1));
2099 __ cvtlsi2sd(xmm3, rcx);
2100
2101 Label exponent_nonsmi;
2102 Label base_nonsmi;
2103 // If the exponent is a heap number go to that specific case.
2104 __ JumpIfNotSmi(rax, &exponent_nonsmi);
2105 __ JumpIfNotSmi(rdx, &base_nonsmi);
2106
2107 // Optimized version when both exponent and base are smis.
2108 Label powi;
2109 __ SmiToInteger32(rdx, rdx);
2110 __ cvtlsi2sd(xmm0, rdx);
2111 __ jmp(&powi);
2112 // exponent is smi and base is a heapnumber.
2113 __ bind(&base_nonsmi);
2114 __ CompareRoot(FieldOperand(rdx, HeapObject::kMapOffset),
2115 Heap::kHeapNumberMapRootIndex);
2116 __ j(not_equal, &call_runtime);
2117
2118 __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
2119
2120 // Optimized version of pow if exponent is a smi.
2121 // xmm0 contains the base.
2122 __ bind(&powi);
2123 __ SmiToInteger32(rax, rax);
2124
2125 // Save exponent in base as we need to check if exponent is negative later.
2126 // We know that base and exponent are in different registers.
2127 __ movq(rdx, rax);
2128
2129 // Get absolute value of exponent.
2130 NearLabel no_neg;
2131 __ cmpl(rax, Immediate(0));
2132 __ j(greater_equal, &no_neg);
2133 __ negl(rax);
2134 __ bind(&no_neg);
2135
2136 // Load xmm1 with 1.
2137 __ movsd(xmm1, xmm3);
2138 NearLabel while_true;
2139 NearLabel no_multiply;
2140
2141 __ bind(&while_true);
2142 __ shrl(rax, Immediate(1));
2143 __ j(not_carry, &no_multiply);
2144 __ mulsd(xmm1, xmm0);
2145 __ bind(&no_multiply);
2146 __ mulsd(xmm0, xmm0);
2147 __ j(not_zero, &while_true);
2148
2149 // base has the original value of the exponent - if the exponent is
2150 // negative return 1/result.
2151 __ testl(rdx, rdx);
2152 __ j(positive, &allocate_return);
2153 // Special case if xmm1 has reached infinity.
2154 __ movl(rcx, Immediate(0x7FB00000));
Lasse Reichstein 2011/02/28 13:21:41 Could you detect infinity by adding the value to i
William Hesse 2011/02/28 14:36:11 We now divide first, then compare to 0. On 2011/
2155 __ movd(xmm0, rcx);
2156 __ cvtss2sd(xmm0, xmm0);
2157 __ ucomisd(xmm0, xmm1);
2158 __ j(equal, &call_runtime);
2159 __ divsd(xmm3, xmm1);
2160 __ movsd(xmm1, xmm3);
Lasse Reichstein 2011/02/28 13:21:41 It's worth noticing that if xmm1 is very small, th
William Hesse 2011/02/28 14:36:11 Done.
2161 __ jmp(&allocate_return);
2162
2163 // exponent (or both) is a heapnumber - no matter what we should now work
Lasse Reichstein 2011/02/28 13:21:41 Capitalize exponent.
2164 // on doubles.
2165 __ bind(&exponent_nonsmi);
2166 __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
2167 Heap::kHeapNumberMapRootIndex);
2168 __ j(not_equal, &call_runtime);
2169 __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
2170 // Test if exponent is nan.
2171 __ ucomisd(xmm1, xmm1);
2172 __ j(parity_even, &call_runtime);
2173
2174 NearLabel base_not_smi;
2175 NearLabel handle_special_cases;
2176 __ JumpIfNotSmi(rdx, &base_not_smi);
2177 __ SmiToInteger32(rdx, rdx);
2178 __ cvtlsi2sd(xmm0, rdx);
2179 __ jmp(&handle_special_cases);
2180
2181 __ bind(&base_not_smi);
2182 __ CompareRoot(FieldOperand(rdx, HeapObject::kMapOffset),
2183 Heap::kHeapNumberMapRootIndex);
2184 __ j(not_equal, &call_runtime);
2185 __ movl(rcx, FieldOperand(rdx, HeapNumber::kExponentOffset));
2186 __ andl(rcx, Immediate(HeapNumber::kExponentMask));
2187 __ cmpl(rcx, Immediate(HeapNumber::kExponentMask));
2188 // base is NaN or +/-Infinity
2189 __ j(greater_equal, &call_runtime);
Lasse Reichstein 2011/02/28 13:21:41 If the value isn't -Infinity, won't the result be
William Hesse 2011/02/28 14:36:11 The value b^infinity depends on whether b is >, =,
2190 __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
2191
2192 // base is in xmm0 and exponent is in xmm1.
2193 __ bind(&handle_special_cases);
2194 NearLabel not_minus_half;
2195 // Test for -0.5.
2196 // Load xmm2 with -0.5.
2197 __ movl(rcx, Immediate(0xBF000000));
Lasse Reichstein 2011/02/28 13:21:41 Is this smaller than loading the exact representat
William Hesse 2011/02/28 14:36:11 Changed. On 2011/02/28 13:21:41, Lasse Reichstein
2198 __ movd(xmm2, rcx);
2199 __ cvtss2sd(xmm2, xmm2);
2200 // xmm2 now has -0.5.
2201 __ ucomisd(xmm2, xmm1);
2202 __ j(not_equal, &not_minus_half);
2203
2204 // Calculates reciprocal of square root.
2205 // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
2206 __ xorpd(xmm1, xmm1);
2207 __ addsd(xmm1, xmm0);
2208 __ sqrtsd(xmm1, xmm1);
2209 __ divsd(xmm3, xmm1);
2210 __ movsd(xmm1, xmm3);
2211 __ jmp(&allocate_return);
2212
2213 // Test for 0.5.
2214 __ bind(&not_minus_half);
2215 // Load xmm2 with 0.5.
2216 // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
2217 __ addsd(xmm2, xmm3);
2218 // xmm2 now has 0.5.
2219 __ ucomisd(xmm2, xmm1);
2220 __ j(not_equal, &call_runtime);
2221 // Calculates square root.
2222 // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
2223 __ xorpd(xmm1, xmm1);
2224 __ addsd(xmm1, xmm0);
2225 __ sqrtsd(xmm1, xmm1);
2226
2227 __ bind(&allocate_return);
2228 __ AllocateHeapNumber(rcx, rax, &call_runtime);
2229 __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm1);
2230 __ movq(rax, rcx);
2231 __ ret(2 * kPointerSize);
2232
2233 __ bind(&call_runtime);
2234 __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
2235 }
2236
2237
2084 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { 2238 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
2085 // The key is in rdx and the parameter count is in rax. 2239 // The key is in rdx and the parameter count is in rax.
2086 2240
2087 // The displacement is used for skipping the frame pointer on the 2241 // The displacement is used for skipping the frame pointer on the
2088 // stack. It is the offset of the last parameter (if any) relative 2242 // stack. It is the offset of the last parameter (if any) relative
2089 // to the frame pointer. 2243 // to the frame pointer.
2090 static const int kDisplacement = 1 * kPointerSize; 2244 static const int kDisplacement = 1 * kPointerSize;
2091 2245
2092 // Check that the key is a smi. 2246 // Check that the key is a smi.
2093 Label slow; 2247 Label slow;
(...skipping 2841 matching lines...) Expand 10 before | Expand all | Expand 10 after
4935 FieldOperand(elements, PixelArray::kExternalPointerOffset)); 5089 FieldOperand(elements, PixelArray::kExternalPointerOffset));
4936 __ movb(Operand(external_pointer, untagged_key, times_1, 0), untagged_value); 5090 __ movb(Operand(external_pointer, untagged_key, times_1, 0), untagged_value);
4937 __ ret(0); // Return value in eax. 5091 __ ret(0); // Return value in eax.
4938 } 5092 }
4939 5093
4940 #undef __ 5094 #undef __
4941 5095
4942 } } // namespace v8::internal 5096 } } // namespace v8::internal
4943 5097
4944 #endif // V8_TARGET_ARCH_X64 5098 #endif // V8_TARGET_ARCH_X64
OLDNEW
« no previous file with comments | « src/ia32/code-stubs-ia32.cc ('k') | src/x64/full-codegen-x64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698