Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(379)

Side by Side Diff: src/x64/code-stubs-x64.cc

Issue 6602007: Add MathPowStub to x64 platform, and fix error in stub on ia32 platform. (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 9 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/ia32/code-stubs-ia32.cc ('k') | src/x64/full-codegen-x64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 1999 matching lines...) Expand 10 before | Expand all | Expand 10 after
2010 } 2010 }
2011 __ SmiNeg(rax, rax, &done); 2011 __ SmiNeg(rax, rax, &done);
2012 __ jmp(&slow); // zero, if not handled above, and Smi::kMinValue. 2012 __ jmp(&slow); // zero, if not handled above, and Smi::kMinValue.
2013 2013
2014 // Try floating point case. 2014 // Try floating point case.
2015 __ bind(&try_float); 2015 __ bind(&try_float);
2016 } else if (FLAG_debug_code) { 2016 } else if (FLAG_debug_code) {
2017 __ AbortIfSmi(rax); 2017 __ AbortIfSmi(rax);
2018 } 2018 }
2019 2019
2020 __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset)); 2020 __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
2021 __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex); 2021 Heap::kHeapNumberMapRootIndex);
2022 __ j(not_equal, &slow); 2022 __ j(not_equal, &slow);
2023 // Operand is a float, negate its value by flipping sign bit. 2023 // Operand is a float, negate its value by flipping sign bit.
2024 __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset)); 2024 __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
2025 __ movq(kScratchRegister, Immediate(0x01)); 2025 __ movq(kScratchRegister, Immediate(0x01));
2026 __ shl(kScratchRegister, Immediate(63)); 2026 __ shl(kScratchRegister, Immediate(63));
2027 __ xor_(rdx, kScratchRegister); // Flip sign. 2027 __ xor_(rdx, kScratchRegister); // Flip sign.
2028 // rdx is value to store. 2028 // rdx is value to store.
2029 if (overwrite_ == UNARY_OVERWRITE) { 2029 if (overwrite_ == UNARY_OVERWRITE) {
2030 __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rdx); 2030 __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rdx);
2031 } else { 2031 } else {
2032 __ AllocateHeapNumber(rcx, rbx, &slow); 2032 __ AllocateHeapNumber(rcx, rbx, &slow);
2033 // rcx: allocated 'empty' number 2033 // rcx: allocated 'empty' number
2034 __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx); 2034 __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx);
2035 __ movq(rax, rcx); 2035 __ movq(rax, rcx);
2036 } 2036 }
2037 } else if (op_ == Token::BIT_NOT) { 2037 } else if (op_ == Token::BIT_NOT) {
2038 if (include_smi_code_) { 2038 if (include_smi_code_) {
2039 Label try_float; 2039 Label try_float;
2040 __ JumpIfNotSmi(rax, &try_float); 2040 __ JumpIfNotSmi(rax, &try_float);
2041 __ SmiNot(rax, rax); 2041 __ SmiNot(rax, rax);
2042 __ jmp(&done); 2042 __ jmp(&done);
2043 // Try floating point case. 2043 // Try floating point case.
2044 __ bind(&try_float); 2044 __ bind(&try_float);
2045 } else if (FLAG_debug_code) { 2045 } else if (FLAG_debug_code) {
2046 __ AbortIfSmi(rax); 2046 __ AbortIfSmi(rax);
2047 } 2047 }
2048 2048
2049 // Check if the operand is a heap number. 2049 // Check if the operand is a heap number.
2050 __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset)); 2050 __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
2051 __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex); 2051 Heap::kHeapNumberMapRootIndex);
2052 __ j(not_equal, &slow); 2052 __ j(not_equal, &slow);
2053 2053
2054 // Convert the heap number in rax to an untagged integer in rcx. 2054 // Convert the heap number in rax to an untagged integer in rcx.
2055 IntegerConvert(masm, rax, rax); 2055 IntegerConvert(masm, rax, rax);
2056 2056
2057 // Do the bitwise operation and smi tag the result. 2057 // Do the bitwise operation and smi tag the result.
2058 __ notl(rax); 2058 __ notl(rax);
2059 __ Integer32ToSmi(rax, rax); 2059 __ Integer32ToSmi(rax, rax);
2060 } 2060 }
2061 2061
(...skipping 12 matching lines...) Expand all
2074 break; 2074 break;
2075 case Token::BIT_NOT: 2075 case Token::BIT_NOT:
2076 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION); 2076 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
2077 break; 2077 break;
2078 default: 2078 default:
2079 UNREACHABLE(); 2079 UNREACHABLE();
2080 } 2080 }
2081 } 2081 }
2082 2082
2083 2083
2084 void MathPowStub::Generate(MacroAssembler* masm) {
2085 // Registers are used as follows:
2086 // rdx = base
2087 // rax = exponent
2088 // rcx = temporary, result
2089
2090 Label allocate_return, call_runtime;
2091
2092 // Load input parameters.
2093 __ movq(rdx, Operand(rsp, 2 * kPointerSize));
2094 __ movq(rax, Operand(rsp, 1 * kPointerSize));
2095
2096 // Save 1 in xmm3 - we need this several times later on.
2097 __ movl(rcx, Immediate(1));
2098 __ cvtlsi2sd(xmm3, rcx);
2099
2100 Label exponent_nonsmi;
2101 Label base_nonsmi;
2102 // If the exponent is a heap number go to that specific case.
2103 __ JumpIfNotSmi(rax, &exponent_nonsmi);
2104 __ JumpIfNotSmi(rdx, &base_nonsmi);
2105
2106 // Optimized version when both exponent and base are smis.
2107 Label powi;
2108 __ SmiToInteger32(rdx, rdx);
2109 __ cvtlsi2sd(xmm0, rdx);
2110 __ jmp(&powi);
2111 // Exponent is a smi and base is a heapnumber.
2112 __ bind(&base_nonsmi);
2113 __ CompareRoot(FieldOperand(rdx, HeapObject::kMapOffset),
2114 Heap::kHeapNumberMapRootIndex);
2115 __ j(not_equal, &call_runtime);
2116
2117 __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
2118
2119 // Optimized version of pow if exponent is a smi.
2120 // xmm0 contains the base.
2121 __ bind(&powi);
2122 __ SmiToInteger32(rax, rax);
2123
2124 // Save exponent in base as we need to check if exponent is negative later.
2125 // We know that base and exponent are in different registers.
2126 __ movq(rdx, rax);
2127
2128 // Get absolute value of exponent.
2129 NearLabel no_neg;
2130 __ cmpl(rax, Immediate(0));
2131 __ j(greater_equal, &no_neg);
2132 __ negl(rax);
2133 __ bind(&no_neg);
2134
2135 // Load xmm1 with 1.
2136 __ movsd(xmm1, xmm3);
2137 NearLabel while_true;
2138 NearLabel no_multiply;
2139
2140 __ bind(&while_true);
2141 __ shrl(rax, Immediate(1));
2142 __ j(not_carry, &no_multiply);
2143 __ mulsd(xmm1, xmm0);
2144 __ bind(&no_multiply);
2145 __ mulsd(xmm0, xmm0);
2146 __ j(not_zero, &while_true);
2147
2148 // Base has the original value of the exponent - if the exponent is
2149 // negative return 1/result.
2150 __ testl(rdx, rdx);
2151 __ j(positive, &allocate_return);
2152 // Special case if xmm1 has reached infinity.
2153 __ divsd(xmm3, xmm1);
2154 __ movsd(xmm1, xmm3);
2155 __ xorpd(xmm0, xmm0);
2156 __ ucomisd(xmm0, xmm1);
2157 __ j(equal, &call_runtime);
2158
2159 __ jmp(&allocate_return);
2160
2161 // Exponent (or both) is a heapnumber - no matter what we should now work
2162 // on doubles.
2163 __ bind(&exponent_nonsmi);
2164 __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
2165 Heap::kHeapNumberMapRootIndex);
2166 __ j(not_equal, &call_runtime);
2167 __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
2168 // Test if exponent is nan.
2169 __ ucomisd(xmm1, xmm1);
2170 __ j(parity_even, &call_runtime);
2171
2172 NearLabel base_not_smi;
2173 NearLabel handle_special_cases;
2174 __ JumpIfNotSmi(rdx, &base_not_smi);
2175 __ SmiToInteger32(rdx, rdx);
2176 __ cvtlsi2sd(xmm0, rdx);
2177 __ jmp(&handle_special_cases);
2178
2179 __ bind(&base_not_smi);
2180 __ CompareRoot(FieldOperand(rdx, HeapObject::kMapOffset),
2181 Heap::kHeapNumberMapRootIndex);
2182 __ j(not_equal, &call_runtime);
2183 __ movl(rcx, FieldOperand(rdx, HeapNumber::kExponentOffset));
2184 __ andl(rcx, Immediate(HeapNumber::kExponentMask));
2185 __ cmpl(rcx, Immediate(HeapNumber::kExponentMask));
2186 // base is NaN or +/-Infinity
2187 __ j(greater_equal, &call_runtime);
2188 __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
2189
2190 // base is in xmm0 and exponent is in xmm1.
2191 __ bind(&handle_special_cases);
2192 NearLabel not_minus_half;
2193 // Test for -0.5.
2194 // Load xmm2 with -0.5.
2195 __ movq(rcx, V8_UINT64_C(0xBFE0000000000000), RelocInfo::NONE);
2196 __ movq(xmm2, rcx);
2197 // xmm2 now has -0.5.
2198 __ ucomisd(xmm2, xmm1);
2199 __ j(not_equal, &not_minus_half);
2200
2201 // Calculates reciprocal of square root.
2202 // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
2203 __ xorpd(xmm1, xmm1);
2204 __ addsd(xmm1, xmm0);
2205 __ sqrtsd(xmm1, xmm1);
2206 __ divsd(xmm3, xmm1);
2207 __ movsd(xmm1, xmm3);
2208 __ jmp(&allocate_return);
2209
2210 // Test for 0.5.
2211 __ bind(&not_minus_half);
2212 // Load xmm2 with 0.5.
2213 // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
2214 __ addsd(xmm2, xmm3);
2215 // xmm2 now has 0.5.
2216 __ ucomisd(xmm2, xmm1);
2217 __ j(not_equal, &call_runtime);
2218 // Calculates square root.
2219 // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
2220 __ xorpd(xmm1, xmm1);
2221 __ addsd(xmm1, xmm0);
2222 __ sqrtsd(xmm1, xmm1);
2223
2224 __ bind(&allocate_return);
2225 __ AllocateHeapNumber(rcx, rax, &call_runtime);
2226 __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm1);
2227 __ movq(rax, rcx);
2228 __ ret(2 * kPointerSize);
2229
2230 __ bind(&call_runtime);
2231 __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
2232 }
2233
2234
2084 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { 2235 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
2085 // The key is in rdx and the parameter count is in rax. 2236 // The key is in rdx and the parameter count is in rax.
2086 2237
2087 // The displacement is used for skipping the frame pointer on the 2238 // The displacement is used for skipping the frame pointer on the
2088 // stack. It is the offset of the last parameter (if any) relative 2239 // stack. It is the offset of the last parameter (if any) relative
2089 // to the frame pointer. 2240 // to the frame pointer.
2090 static const int kDisplacement = 1 * kPointerSize; 2241 static const int kDisplacement = 1 * kPointerSize;
2091 2242
2092 // Check that the key is a smi. 2243 // Check that the key is a smi.
2093 Label slow; 2244 Label slow;
(...skipping 2841 matching lines...) Expand 10 before | Expand all | Expand 10 after
4935 FieldOperand(elements, PixelArray::kExternalPointerOffset)); 5086 FieldOperand(elements, PixelArray::kExternalPointerOffset));
4936 __ movb(Operand(external_pointer, untagged_key, times_1, 0), untagged_value); 5087 __ movb(Operand(external_pointer, untagged_key, times_1, 0), untagged_value);
4937 __ ret(0); // Return value in eax. 5088 __ ret(0); // Return value in eax.
4938 } 5089 }
4939 5090
4940 #undef __ 5091 #undef __
4941 5092
4942 } } // namespace v8::internal 5093 } } // namespace v8::internal
4943 5094
4944 #endif // V8_TARGET_ARCH_X64 5095 #endif // V8_TARGET_ARCH_X64
OLDNEW
« no previous file with comments | « src/ia32/code-stubs-ia32.cc ('k') | src/x64/full-codegen-x64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698