Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(56)

Side by Side Diff: src/x64/macro-assembler-x64.cc

Issue 264973011: Update SmiShiftLeft, SmiShiftLogicalRight, SmiShiftArithmeticRight and SmiDiv to support x32 port. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Addressed comments Created 6 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/x64/macro-assembler-x64.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "v8.h" 5 #include "v8.h"
6 6
7 #if V8_TARGET_ARCH_X64 7 #if V8_TARGET_ARCH_X64
8 8
9 #include "bootstrapper.h" 9 #include "bootstrapper.h"
10 #include "codegen.h" 10 #include "codegen.h"
(...skipping 250 matching lines...) Expand 10 before | Expand all | Expand 10 after
261 Move(kScratchRegister, ExternalReference::new_space_mask(isolate())); 261 Move(kScratchRegister, ExternalReference::new_space_mask(isolate()));
262 andp(scratch, kScratchRegister); 262 andp(scratch, kScratchRegister);
263 } else { 263 } else {
264 Move(scratch, ExternalReference::new_space_mask(isolate())); 264 Move(scratch, ExternalReference::new_space_mask(isolate()));
265 andp(scratch, object); 265 andp(scratch, object);
266 } 266 }
267 Move(kScratchRegister, ExternalReference::new_space_start(isolate())); 267 Move(kScratchRegister, ExternalReference::new_space_start(isolate()));
268 cmpp(scratch, kScratchRegister); 268 cmpp(scratch, kScratchRegister);
269 j(cc, branch, distance); 269 j(cc, branch, distance);
270 } else { 270 } else {
271 ASSERT(is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask()))); 271 ASSERT(kPointerSize == kInt64Size
272 ? is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask()))
273 : kPointerSize == kInt32Size);
272 intptr_t new_space_start = 274 intptr_t new_space_start =
273 reinterpret_cast<intptr_t>(isolate()->heap()->NewSpaceStart()); 275 reinterpret_cast<intptr_t>(isolate()->heap()->NewSpaceStart());
274 Move(kScratchRegister, reinterpret_cast<Address>(-new_space_start), 276 Move(kScratchRegister, reinterpret_cast<Address>(-new_space_start),
275 Assembler::RelocInfoNone()); 277 Assembler::RelocInfoNone());
276 if (scratch.is(object)) { 278 if (scratch.is(object)) {
277 addp(scratch, kScratchRegister); 279 addp(scratch, kScratchRegister);
278 } else { 280 } else {
279 leap(scratch, Operand(object, kScratchRegister, times_1, 0)); 281 leap(scratch, Operand(object, kScratchRegister, times_1, 0));
280 } 282 }
281 andp(scratch, 283 andp(scratch,
(...skipping 1127 matching lines...) Expand 10 before | Expand all | Expand 10 after
1409 if (!(src.AddressUsesRegister(dst))) { 1411 if (!(src.AddressUsesRegister(dst))) {
1410 movl(dst, Immediate(kSmiTagMask)); 1412 movl(dst, Immediate(kSmiTagMask));
1411 andl(dst, src); 1413 andl(dst, src);
1412 } else { 1414 } else {
1413 movl(dst, src); 1415 movl(dst, src);
1414 andl(dst, Immediate(kSmiTagMask)); 1416 andl(dst, Immediate(kSmiTagMask));
1415 } 1417 }
1416 } 1418 }
1417 1419
1418 1420
1421 void MacroAssembler::JumpIfValidSmiValue(Register src,
1422 Label* on_valid,
1423 Label::Distance near_jump) {
1424 Condition is_valid = CheckInteger32ValidSmiValue(src);
1425 j(is_valid, on_valid, near_jump);
1426 }
1427
1428
1419 void MacroAssembler::JumpIfNotValidSmiValue(Register src, 1429 void MacroAssembler::JumpIfNotValidSmiValue(Register src,
1420 Label* on_invalid, 1430 Label* on_invalid,
1421 Label::Distance near_jump) { 1431 Label::Distance near_jump) {
1422 Condition is_valid = CheckInteger32ValidSmiValue(src); 1432 Condition is_valid = CheckInteger32ValidSmiValue(src);
1423 j(NegateCondition(is_valid), on_invalid, near_jump); 1433 j(NegateCondition(is_valid), on_invalid, near_jump);
1424 } 1434 }
1425 1435
1426 1436
1437 void MacroAssembler::JumpIfUIntValidSmiValue(Register src,
1438 Label* on_valid,
1439 Label::Distance near_jump) {
1440 Condition is_valid = CheckUInteger32ValidSmiValue(src);
1441 j(is_valid, on_valid, near_jump);
1442 }
1443
1444
1427 void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src, 1445 void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
1428 Label* on_invalid, 1446 Label* on_invalid,
1429 Label::Distance near_jump) { 1447 Label::Distance near_jump) {
1430 Condition is_valid = CheckUInteger32ValidSmiValue(src); 1448 Condition is_valid = CheckUInteger32ValidSmiValue(src);
1431 j(NegateCondition(is_valid), on_invalid, near_jump); 1449 j(NegateCondition(is_valid), on_invalid, near_jump);
1432 } 1450 }
1433 1451
1434 1452
1435 void MacroAssembler::JumpIfSmi(Register src, 1453 void MacroAssembler::JumpIfSmi(Register src,
1436 Label* on_smi, 1454 Label* on_smi,
(...skipping 460 matching lines...) Expand 10 before | Expand all | Expand 10 after
1897 } 1915 }
1898 SmiToInteger32(rax, src1); 1916 SmiToInteger32(rax, src1);
1899 // We need to rule out dividing Smi::kMinValue by -1, since that would 1917 // We need to rule out dividing Smi::kMinValue by -1, since that would
1900 // overflow in idiv and raise an exception. 1918 // overflow in idiv and raise an exception.
1901 // We combine this with negative zero test (negative zero only happens 1919 // We combine this with negative zero test (negative zero only happens
1902 // when dividing zero by a negative number). 1920 // when dividing zero by a negative number).
1903 1921
1904 // We overshoot a little and go to slow case if we divide min-value 1922 // We overshoot a little and go to slow case if we divide min-value
1905 // by any negative value, not just -1. 1923 // by any negative value, not just -1.
1906 Label safe_div; 1924 Label safe_div;
1907 testl(rax, Immediate(0x7fffffff)); 1925 testl(rax, Immediate(~Smi::kMinValue));
1908 j(not_zero, &safe_div, Label::kNear); 1926 j(not_zero, &safe_div, Label::kNear);
1909 testp(src2, src2); 1927 testp(src2, src2);
1910 if (src1.is(rax)) { 1928 if (src1.is(rax)) {
1911 j(positive, &safe_div, Label::kNear); 1929 j(positive, &safe_div, Label::kNear);
1912 movp(src1, kScratchRegister); 1930 movp(src1, kScratchRegister);
1913 jmp(on_not_smi_result, near_jump); 1931 jmp(on_not_smi_result, near_jump);
1914 } else { 1932 } else {
1915 j(negative, on_not_smi_result, near_jump); 1933 j(negative, on_not_smi_result, near_jump);
1916 } 1934 }
1917 bind(&safe_div); 1935 bind(&safe_div);
(...skipping 172 matching lines...) Expand 10 before | Expand all | Expand 10 after
2090 shlp(dst, Immediate(kSmiShift)); 2108 shlp(dst, Immediate(kSmiShift));
2091 } else { 2109 } else {
2092 UNIMPLEMENTED(); // Not used. 2110 UNIMPLEMENTED(); // Not used.
2093 } 2111 }
2094 } 2112 }
2095 } 2113 }
2096 2114
2097 2115
2098 void MacroAssembler::SmiShiftLeftConstant(Register dst, 2116 void MacroAssembler::SmiShiftLeftConstant(Register dst,
2099 Register src, 2117 Register src,
2100 int shift_value) { 2118 int shift_value,
2101 if (!dst.is(src)) { 2119 Label* on_not_smi_result,
2102 movp(dst, src); 2120 Label::Distance near_jump) {
2103 } 2121 if (SmiValuesAre32Bits()) {
2104 if (shift_value > 0) { 2122 if (!dst.is(src)) {
2105 shlp(dst, Immediate(shift_value)); 2123 movp(dst, src);
2124 }
2125 if (shift_value > 0) {
2126 // Shift amount specified by lower 5 bits, not six as the shl opcode.
2127 shlq(dst, Immediate(shift_value & 0x1f));
2128 }
2129 } else {
2130 ASSERT(SmiValuesAre31Bits());
2131 if (dst.is(src)) {
2132 UNIMPLEMENTED(); // Not used.
2133 } else {
2134 SmiToInteger32(dst, src);
2135 shll(dst, Immediate(shift_value));
2136 JumpIfNotValidSmiValue(dst, on_not_smi_result, near_jump);
2137 Integer32ToSmi(dst, dst);
2138 }
2106 } 2139 }
2107 } 2140 }
2108 2141
2109 2142
2110 void MacroAssembler::SmiShiftLogicalRightConstant( 2143 void MacroAssembler::SmiShiftLogicalRightConstant(
2111 Register dst, Register src, int shift_value, 2144 Register dst, Register src, int shift_value,
2112 Label* on_not_smi_result, Label::Distance near_jump) { 2145 Label* on_not_smi_result, Label::Distance near_jump) {
2113 // Logic right shift interprets its result as an *unsigned* number. 2146 // Logic right shift interprets its result as an *unsigned* number.
2114 if (dst.is(src)) { 2147 if (dst.is(src)) {
2115 UNIMPLEMENTED(); // Not used. 2148 UNIMPLEMENTED(); // Not used.
2116 } else { 2149 } else {
2117 movp(dst, src);
2118 if (shift_value == 0) { 2150 if (shift_value == 0) {
2119 testp(dst, dst); 2151 testp(src, src);
2120 j(negative, on_not_smi_result, near_jump); 2152 j(negative, on_not_smi_result, near_jump);
2121 } 2153 }
2122 shrq(dst, Immediate(shift_value + kSmiShift)); 2154 if (SmiValuesAre32Bits()) {
2123 shlq(dst, Immediate(kSmiShift)); 2155 movp(dst, src);
2156 shrp(dst, Immediate(shift_value + kSmiShift));
2157 shlp(dst, Immediate(kSmiShift));
2158 } else {
2159 ASSERT(SmiValuesAre31Bits());
2160 SmiToInteger32(dst, src);
2161 shrp(dst, Immediate(shift_value));
2162 JumpIfUIntNotValidSmiValue(dst, on_not_smi_result, near_jump);
2163 Integer32ToSmi(dst, dst);
2164 }
2124 } 2165 }
2125 } 2166 }
2126 2167
2127 2168
2128 void MacroAssembler::SmiShiftLeft(Register dst, 2169 void MacroAssembler::SmiShiftLeft(Register dst,
2129 Register src1, 2170 Register src1,
2130 Register src2) { 2171 Register src2,
2131 ASSERT(!dst.is(rcx)); 2172 Label* on_not_smi_result,
2132 // Untag shift amount. 2173 Label::Distance near_jump) {
2133 if (!dst.is(src1)) { 2174 if (SmiValuesAre32Bits()) {
2134 movq(dst, src1); 2175 ASSERT(!dst.is(rcx));
2176 if (!dst.is(src1)) {
2177 movp(dst, src1);
2178 }
2179 // Untag shift amount.
2180 SmiToInteger32(rcx, src2);
2181 // Shift amount specified by lower 5 bits, not six as the shl opcode.
2182 andp(rcx, Immediate(0x1f));
2183 shlq_cl(dst);
2184 } else {
2185 ASSERT(SmiValuesAre31Bits());
2186 ASSERT(!dst.is(kScratchRegister));
2187 ASSERT(!src1.is(kScratchRegister));
2188 ASSERT(!src2.is(kScratchRegister));
2189 ASSERT(!dst.is(src2));
2190 ASSERT(!dst.is(rcx));
2191
2192 if (src1.is(rcx) || src2.is(rcx)) {
2193 movq(kScratchRegister, rcx);
2194 }
2195 if (dst.is(src1)) {
2196 UNIMPLEMENTED(); // Not used.
2197 } else {
2198 Label valid_result;
2199 SmiToInteger32(dst, src1);
2200 SmiToInteger32(rcx, src2);
2201 shll_cl(dst);
2202 JumpIfValidSmiValue(dst, &valid_result, Label::kNear);
2203 // As src1 or src2 could not be dst, we do not need to restore them for
2204 // clobbering dst.
2205 if (src1.is(rcx) || src2.is(rcx)) {
2206 if (src1.is(rcx)) {
2207 movq(src1, kScratchRegister);
2208 } else {
2209 movq(src2, kScratchRegister);
2210 }
2211 }
2212 jmp(on_not_smi_result, near_jump);
2213 bind(&valid_result);
2214 Integer32ToSmi(dst, dst);
2215 }
2135 } 2216 }
2136 SmiToInteger32(rcx, src2);
2137 // Shift amount specified by lower 5 bits, not six as the shl opcode.
2138 andq(rcx, Immediate(0x1f));
2139 shlq_cl(dst);
2140 } 2217 }
2141 2218
2142 2219
2143 void MacroAssembler::SmiShiftLogicalRight(Register dst, 2220 void MacroAssembler::SmiShiftLogicalRight(Register dst,
2144 Register src1, 2221 Register src1,
2145 Register src2, 2222 Register src2,
2146 Label* on_not_smi_result, 2223 Label* on_not_smi_result,
2147 Label::Distance near_jump) { 2224 Label::Distance near_jump) {
2148 ASSERT(!dst.is(kScratchRegister)); 2225 ASSERT(!dst.is(kScratchRegister));
2149 ASSERT(!src1.is(kScratchRegister)); 2226 ASSERT(!src1.is(kScratchRegister));
2150 ASSERT(!src2.is(kScratchRegister)); 2227 ASSERT(!src2.is(kScratchRegister));
2228 ASSERT(!dst.is(src2));
2151 ASSERT(!dst.is(rcx)); 2229 ASSERT(!dst.is(rcx));
2152 // dst and src1 can be the same, because the one case that bails out
2153 // is a shift by 0, which leaves dst, and therefore src1, unchanged.
2154 if (src1.is(rcx) || src2.is(rcx)) { 2230 if (src1.is(rcx) || src2.is(rcx)) {
2155 movq(kScratchRegister, rcx); 2231 movq(kScratchRegister, rcx);
2156 } 2232 }
2157 if (!dst.is(src1)) { 2233 if (dst.is(src1)) {
2158 movq(dst, src1); 2234 UNIMPLEMENTED(); // Not used.
2159 } 2235 } else {
2160 SmiToInteger32(rcx, src2); 2236 Label valid_result;
2161 orl(rcx, Immediate(kSmiShift)); 2237 SmiToInteger32(dst, src1);
2162 shrq_cl(dst); // Shift is rcx modulo 0x1f + 32. 2238 SmiToInteger32(rcx, src2);
2163 shlq(dst, Immediate(kSmiShift)); 2239 shrl_cl(dst);
2164 testq(dst, dst); 2240 JumpIfUIntValidSmiValue(dst, &valid_result, Label::kNear);
2165 if (src1.is(rcx) || src2.is(rcx)) { 2241 // As src1 or src2 could not be dst, we do not need to restore them for
2166 Label positive_result; 2242 // clobbering dst.
2167 j(positive, &positive_result, Label::kNear); 2243 if (src1.is(rcx) || src2.is(rcx)) {
2168 if (src1.is(rcx)) { 2244 if (src1.is(rcx)) {
2169 movq(src1, kScratchRegister); 2245 movq(src1, kScratchRegister);
2170 } else { 2246 } else {
2171 movq(src2, kScratchRegister); 2247 movq(src2, kScratchRegister);
2172 } 2248 }
2249 }
2173 jmp(on_not_smi_result, near_jump); 2250 jmp(on_not_smi_result, near_jump);
2174 bind(&positive_result); 2251 bind(&valid_result);
2175 } else { 2252 Integer32ToSmi(dst, dst);
2176 // src2 was zero and src1 negative.
2177 j(negative, on_not_smi_result, near_jump);
2178 } 2253 }
2179 } 2254 }
2180 2255
2181 2256
2182 void MacroAssembler::SmiShiftArithmeticRight(Register dst, 2257 void MacroAssembler::SmiShiftArithmeticRight(Register dst,
2183 Register src1, 2258 Register src1,
2184 Register src2) { 2259 Register src2) {
2185 ASSERT(!dst.is(kScratchRegister)); 2260 ASSERT(!dst.is(kScratchRegister));
2186 ASSERT(!src1.is(kScratchRegister)); 2261 ASSERT(!src1.is(kScratchRegister));
2187 ASSERT(!src2.is(kScratchRegister)); 2262 ASSERT(!src2.is(kScratchRegister));
2188 ASSERT(!dst.is(rcx)); 2263 ASSERT(!dst.is(rcx));
2189 if (src1.is(rcx)) { 2264
2190 movp(kScratchRegister, src1); 2265 SmiToInteger32(rcx, src2);
2191 } else if (src2.is(rcx)) {
2192 movp(kScratchRegister, src2);
2193 }
2194 if (!dst.is(src1)) { 2266 if (!dst.is(src1)) {
2195 movp(dst, src1); 2267 movp(dst, src1);
2196 } 2268 }
2197 SmiToInteger32(rcx, src2); 2269 SmiToInteger32(dst, dst);
2198 orl(rcx, Immediate(kSmiShift)); 2270 sarl_cl(dst);
2199 sarp_cl(dst); // Shift 32 + original rcx & 0x1f. 2271 Integer32ToSmi(dst, dst);
2200 shlp(dst, Immediate(kSmiShift));
2201 if (src1.is(rcx)) {
2202 movp(src1, kScratchRegister);
2203 } else if (src2.is(rcx)) {
2204 movp(src2, kScratchRegister);
2205 }
2206 } 2272 }
2207 2273
2208 2274
2209 void MacroAssembler::SelectNonSmi(Register dst, 2275 void MacroAssembler::SelectNonSmi(Register dst,
2210 Register src1, 2276 Register src1,
2211 Register src2, 2277 Register src2,
2212 Label* on_not_smis, 2278 Label* on_not_smis,
2213 Label::Distance near_jump) { 2279 Label::Distance near_jump) {
2214 ASSERT(!dst.is(kScratchRegister)); 2280 ASSERT(!dst.is(kScratchRegister));
2215 ASSERT(!src1.is(kScratchRegister)); 2281 ASSERT(!src1.is(kScratchRegister));
(...skipping 2961 matching lines...) Expand 10 before | Expand all | Expand 10 after
5177 if (ms.shift() > 0) sarl(rdx, Immediate(ms.shift())); 5243 if (ms.shift() > 0) sarl(rdx, Immediate(ms.shift()));
5178 movl(rax, dividend); 5244 movl(rax, dividend);
5179 shrl(rax, Immediate(31)); 5245 shrl(rax, Immediate(31));
5180 addl(rdx, rax); 5246 addl(rdx, rax);
5181 } 5247 }
5182 5248
5183 5249
5184 } } // namespace v8::internal 5250 } } // namespace v8::internal
5185 5251
5186 #endif // V8_TARGET_ARCH_X64 5252 #endif // V8_TARGET_ARCH_X64
OLDNEW
« no previous file with comments | « src/x64/macro-assembler-x64.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698