Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(255)

Side by Side Diff: src/x64/macro-assembler-x64.cc

Issue 264973011: Update SmiShiftLeft, SmiShiftLogicalRight, SmiShiftArithmeticRight and SmiDiv to support x32 port. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 6 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/x64/macro-assembler-x64.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "v8.h" 5 #include "v8.h"
6 6
7 #if V8_TARGET_ARCH_X64 7 #if V8_TARGET_ARCH_X64
8 8
9 #include "bootstrapper.h" 9 #include "bootstrapper.h"
10 #include "codegen.h" 10 #include "codegen.h"
(...skipping 250 matching lines...) Expand 10 before | Expand all | Expand 10 after
261 Move(kScratchRegister, ExternalReference::new_space_mask(isolate())); 261 Move(kScratchRegister, ExternalReference::new_space_mask(isolate()));
262 andp(scratch, kScratchRegister); 262 andp(scratch, kScratchRegister);
263 } else { 263 } else {
264 Move(scratch, ExternalReference::new_space_mask(isolate())); 264 Move(scratch, ExternalReference::new_space_mask(isolate()));
265 andp(scratch, object); 265 andp(scratch, object);
266 } 266 }
267 Move(kScratchRegister, ExternalReference::new_space_start(isolate())); 267 Move(kScratchRegister, ExternalReference::new_space_start(isolate()));
268 cmpp(scratch, kScratchRegister); 268 cmpp(scratch, kScratchRegister);
269 j(cc, branch, distance); 269 j(cc, branch, distance);
270 } else { 270 } else {
271 ASSERT(is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask()))); 271 ASSERT(kPointerSize == kInt64Size
272 ? is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask()))
273 : kPointerSize == kInt32Size);
272 intptr_t new_space_start = 274 intptr_t new_space_start =
273 reinterpret_cast<intptr_t>(isolate()->heap()->NewSpaceStart()); 275 reinterpret_cast<intptr_t>(isolate()->heap()->NewSpaceStart());
274 Move(kScratchRegister, reinterpret_cast<Address>(-new_space_start), 276 Move(kScratchRegister, reinterpret_cast<Address>(-new_space_start),
275 Assembler::RelocInfoNone()); 277 Assembler::RelocInfoNone());
276 if (scratch.is(object)) { 278 if (scratch.is(object)) {
277 addp(scratch, kScratchRegister); 279 addp(scratch, kScratchRegister);
278 } else { 280 } else {
279 leap(scratch, Operand(object, kScratchRegister, times_1, 0)); 281 leap(scratch, Operand(object, kScratchRegister, times_1, 0));
280 } 282 }
281 andp(scratch, 283 andp(scratch,
(...skipping 1140 matching lines...) Expand 10 before | Expand all | Expand 10 after
1422 if (!(src.AddressUsesRegister(dst))) { 1424 if (!(src.AddressUsesRegister(dst))) {
1423 movl(dst, Immediate(kSmiTagMask)); 1425 movl(dst, Immediate(kSmiTagMask));
1424 andl(dst, src); 1426 andl(dst, src);
1425 } else { 1427 } else {
1426 movl(dst, src); 1428 movl(dst, src);
1427 andl(dst, Immediate(kSmiTagMask)); 1429 andl(dst, Immediate(kSmiTagMask));
1428 } 1430 }
1429 } 1431 }
1430 1432
1431 1433
1434 void MacroAssembler::JumpIfValidSmiValue(Register src,
1435 Label* on_valid,
1436 Label::Distance near_jump) {
1437 Condition is_valid = CheckInteger32ValidSmiValue(src);
1438 j(is_valid, on_valid, near_jump);
1439 }
1440
1441
1432 void MacroAssembler::JumpIfNotValidSmiValue(Register src, 1442 void MacroAssembler::JumpIfNotValidSmiValue(Register src,
1433 Label* on_invalid, 1443 Label* on_invalid,
1434 Label::Distance near_jump) { 1444 Label::Distance near_jump) {
1435 Condition is_valid = CheckInteger32ValidSmiValue(src); 1445 Condition is_valid = CheckInteger32ValidSmiValue(src);
1436 j(NegateCondition(is_valid), on_invalid, near_jump); 1446 j(NegateCondition(is_valid), on_invalid, near_jump);
1437 } 1447 }
1438 1448
1439 1449
1450 void MacroAssembler::JumpIfUIntValidSmiValue(Register src,
1451 Label* on_valid,
1452 Label::Distance near_jump) {
1453 Condition is_valid = CheckUInteger32ValidSmiValue(src);
1454 j(is_valid, on_valid, near_jump);
1455 }
1456
1457
1440 void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src, 1458 void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
1441 Label* on_invalid, 1459 Label* on_invalid,
1442 Label::Distance near_jump) { 1460 Label::Distance near_jump) {
1443 Condition is_valid = CheckUInteger32ValidSmiValue(src); 1461 Condition is_valid = CheckUInteger32ValidSmiValue(src);
1444 j(NegateCondition(is_valid), on_invalid, near_jump); 1462 j(NegateCondition(is_valid), on_invalid, near_jump);
1445 } 1463 }
1446 1464
1447 1465
1448 void MacroAssembler::JumpIfSmi(Register src, 1466 void MacroAssembler::JumpIfSmi(Register src,
1449 Label* on_smi, 1467 Label* on_smi,
(...skipping 460 matching lines...) Expand 10 before | Expand all | Expand 10 after
1910 } 1928 }
1911 SmiToInteger32(rax, src1); 1929 SmiToInteger32(rax, src1);
1912 // We need to rule out dividing Smi::kMinValue by -1, since that would 1930 // We need to rule out dividing Smi::kMinValue by -1, since that would
1913 // overflow in idiv and raise an exception. 1931 // overflow in idiv and raise an exception.
1914 // We combine this with negative zero test (negative zero only happens 1932 // We combine this with negative zero test (negative zero only happens
1915 // when dividing zero by a negative number). 1933 // when dividing zero by a negative number).
1916 1934
1917 // We overshoot a little and go to slow case if we divide min-value 1935 // We overshoot a little and go to slow case if we divide min-value
1918 // by any negative value, not just -1. 1936 // by any negative value, not just -1.
1919 Label safe_div; 1937 Label safe_div;
1920 testl(rax, Immediate(0x7fffffff)); 1938 testl(rax, Immediate(~Smi::kMinValue));
1921 j(not_zero, &safe_div, Label::kNear); 1939 j(not_zero, &safe_div, Label::kNear);
1922 testp(src2, src2); 1940 testp(src2, src2);
1923 if (src1.is(rax)) { 1941 if (src1.is(rax)) {
1924 j(positive, &safe_div, Label::kNear); 1942 j(positive, &safe_div, Label::kNear);
1925 movp(src1, kScratchRegister); 1943 movp(src1, kScratchRegister);
1926 jmp(on_not_smi_result, near_jump); 1944 jmp(on_not_smi_result, near_jump);
1927 } else { 1945 } else {
1928 j(negative, on_not_smi_result, near_jump); 1946 j(negative, on_not_smi_result, near_jump);
1929 } 1947 }
1930 bind(&safe_div); 1948 bind(&safe_div);
(...skipping 172 matching lines...) Expand 10 before | Expand all | Expand 10 after
2103 shlp(dst, Immediate(kSmiShift)); 2121 shlp(dst, Immediate(kSmiShift));
2104 } else { 2122 } else {
2105 UNIMPLEMENTED(); // Not used. 2123 UNIMPLEMENTED(); // Not used.
2106 } 2124 }
2107 } 2125 }
2108 } 2126 }
2109 2127
2110 2128
2111 void MacroAssembler::SmiShiftLeftConstant(Register dst, 2129 void MacroAssembler::SmiShiftLeftConstant(Register dst,
2112 Register src, 2130 Register src,
2113 int shift_value) { 2131 int shift_value,
2114 if (!dst.is(src)) { 2132 Label* on_not_smi_result,
2115 movp(dst, src); 2133 Label::Distance near_jump) {
2116 } 2134 if (SmiValuesAre32Bits()) {
2117 if (shift_value > 0) { 2135 if (!dst.is(src)) {
2118 shlp(dst, Immediate(shift_value)); 2136 movp(dst, src);
2137 }
2138 if (shift_value > 0) {
2139 // Shift amount specified by lower 5 bits, not six as the shl opcode.
2140 shlq(dst, Immediate(shift_value & 0x1f));
2141 }
2142 } else {
2143 ASSERT(SmiValuesAre31Bits());
2144 if (dst.is(src)) {
2145 UNIMPLEMENTED(); // Not used.
2146 } else {
2147 movp(dst, src);
2148 SmiToInteger32(dst, dst);
Toon Verwaest 2014/06/02 09:49:08 Why not just SmiToInteger32(dst, src) ?
haitao.feng 2014/06/03 03:14:30 Done.
2149 shll(dst, Immediate(shift_value));
2150 JumpIfNotValidSmiValue(dst, on_not_smi_result, near_jump);
2151 Integer32ToSmi(dst, dst);
2152 }
2119 } 2153 }
2120 } 2154 }
2121 2155
2122 2156
2123 void MacroAssembler::SmiShiftLogicalRightConstant( 2157 void MacroAssembler::SmiShiftLogicalRightConstant(
2124 Register dst, Register src, int shift_value, 2158 Register dst, Register src, int shift_value,
2125 Label* on_not_smi_result, Label::Distance near_jump) { 2159 Label* on_not_smi_result, Label::Distance near_jump) {
2126 // Logic right shift interprets its result as an *unsigned* number. 2160 // Logic right shift interprets its result as an *unsigned* number.
2127 if (dst.is(src)) { 2161 if (dst.is(src)) {
2128 UNIMPLEMENTED(); // Not used. 2162 UNIMPLEMENTED(); // Not used.
2129 } else { 2163 } else {
2130 movp(dst, src); 2164 movp(dst, src);
2131 if (shift_value == 0) { 2165 if (shift_value == 0) {
2132 testp(dst, dst); 2166 testp(dst, dst);
2133 j(negative, on_not_smi_result, near_jump); 2167 j(negative, on_not_smi_result, near_jump);
2134 } 2168 }
2135 shrq(dst, Immediate(shift_value + kSmiShift)); 2169 if (SmiValuesAre32Bits()) {
2136 shlq(dst, Immediate(kSmiShift)); 2170 shrp(dst, Immediate(shift_value + kSmiShift));
2171 shlp(dst, Immediate(kSmiShift));
2172 } else {
2173 ASSERT(SmiValuesAre31Bits());
2174 SmiToInteger32(dst, dst);
Toon Verwaest 2014/06/02 09:49:08 Same here
haitao.feng 2014/06/03 03:14:30 Done.
2175 shrp(dst, Immediate(shift_value));
2176 JumpIfUIntNotValidSmiValue(dst, on_not_smi_result, near_jump);
2177 Integer32ToSmi(dst, dst);
2178 }
2137 } 2179 }
2138 } 2180 }
2139 2181
2140 2182
2141 void MacroAssembler::SmiShiftLeft(Register dst, 2183 void MacroAssembler::SmiShiftLeft(Register dst,
2142 Register src1, 2184 Register src1,
2143 Register src2) { 2185 Register src2,
2144 ASSERT(!dst.is(rcx)); 2186 Label* on_not_smi_result,
2145 // Untag shift amount. 2187 Label::Distance near_jump) {
2146 if (!dst.is(src1)) { 2188 if (SmiValuesAre32Bits()) {
2147 movq(dst, src1); 2189 ASSERT(!dst.is(rcx));
2190 // Untag shift amount.
2191 SmiToInteger32(rcx, src2);
2192 if (!dst.is(src1)) {
Toon Verwaest 2014/06/02 09:49:08 If src1 is rcx, doing SmiToInteger32 first will de
haitao.feng 2014/06/03 03:14:30 Done.
2193 movp(dst, src1);
2194 }
2195 // Shift amount specified by lower 5 bits, not six as the shl opcode.
2196 andp(rcx, Immediate(0x1f));
2197 shlq_cl(dst);
2198 } else {
2199 ASSERT(SmiValuesAre31Bits());
2200 ASSERT(!dst.is(kScratchRegister));
2201 ASSERT(!src1.is(kScratchRegister));
2202 ASSERT(!src2.is(kScratchRegister));
2203 ASSERT(!dst.is(src2));
2204 ASSERT(!dst.is(rcx));
2205
2206 if (src1.is(rcx) || src2.is(rcx)) {
2207 movp(kScratchRegister, rcx);
2208 }
2209 if (dst.is(src1)) {
2210 UNIMPLEMENTED(); // Not used.
2211 } else {
2212 Label valid_result;
2213 movp(dst, src1);
2214 SmiToInteger32(dst, dst);
2215 SmiToInteger32(rcx, src2);
2216 shll_cl(dst);
Toon Verwaest 2014/06/02 09:49:08 Seems like this code could be simplified? Why didn
haitao.feng 2014/06/03 03:14:30 For 31-bit SMI, we might need to jump to the on_no
2217 JumpIfValidSmiValue(dst, &valid_result, Label::kNear);
2218 // As src1 or src2 could not be dst, we do not need to restore them for
2219 // clobbering dst.
2220 if (src1.is(rcx) || src2.is(rcx)) {
2221 if (src1.is(rcx)) {
2222 movp(src1, kScratchRegister);
2223 } else {
2224 movp(src2, kScratchRegister);
2225 }
2226 }
2227 jmp(on_not_smi_result, near_jump);
2228 bind(&valid_result);
2229 Integer32ToSmi(dst, dst);
2230 }
2148 } 2231 }
2149 SmiToInteger32(rcx, src2);
2150 // Shift amount specified by lower 5 bits, not six as the shl opcode.
2151 andq(rcx, Immediate(0x1f));
2152 shlq_cl(dst);
2153 } 2232 }
2154 2233
2155 2234
2156 void MacroAssembler::SmiShiftLogicalRight(Register dst, 2235 void MacroAssembler::SmiShiftLogicalRight(Register dst,
2157 Register src1, 2236 Register src1,
2158 Register src2, 2237 Register src2,
2159 Label* on_not_smi_result, 2238 Label* on_not_smi_result,
2160 Label::Distance near_jump) { 2239 Label::Distance near_jump) {
2161 ASSERT(!dst.is(kScratchRegister)); 2240 ASSERT(!dst.is(kScratchRegister));
2162 ASSERT(!src1.is(kScratchRegister)); 2241 ASSERT(!src1.is(kScratchRegister));
2163 ASSERT(!src2.is(kScratchRegister)); 2242 ASSERT(!src2.is(kScratchRegister));
2243 ASSERT(!dst.is(src2));
2164 ASSERT(!dst.is(rcx)); 2244 ASSERT(!dst.is(rcx));
2165 // dst and src1 can be the same, because the one case that bails out
2166 // is a shift by 0, which leaves dst, and therefore src1, unchanged.
2167 if (src1.is(rcx) || src2.is(rcx)) { 2245 if (src1.is(rcx) || src2.is(rcx)) {
2168 movq(kScratchRegister, rcx); 2246 movp(kScratchRegister, rcx);
2169 } 2247 }
2170 if (!dst.is(src1)) { 2248 if (dst.is(src1)) {
2171 movq(dst, src1); 2249 UNIMPLEMENTED(); // Not used.
2172 } 2250 } else {
2173 SmiToInteger32(rcx, src2); 2251 Label valid_result;
2174 orl(rcx, Immediate(kSmiShift)); 2252 movp(dst, src1);
Toon Verwaest 2014/06/02 09:49:08 SmiToInteger32(dst, src1);
haitao.feng 2014/06/03 03:14:30 Done.
2175 shrq_cl(dst); // Shift is rcx modulo 0x1f + 32. 2253 SmiToInteger32(rcx, src2);
2176 shlq(dst, Immediate(kSmiShift)); 2254 SmiToInteger32(dst, dst);
2177 testq(dst, dst); 2255 shrl_cl(dst);
2178 if (src1.is(rcx) || src2.is(rcx)) { 2256 JumpIfUIntValidSmiValue(dst, &valid_result, Label::kNear);
2179 Label positive_result; 2257 // As src1 or src2 could not be dst, we do not need to restore them for
2180 j(positive, &positive_result, Label::kNear); 2258 // clobbering dst.
2181 if (src1.is(rcx)) { 2259 if (src1.is(rcx) || src2.is(rcx)) {
2182 movq(src1, kScratchRegister); 2260 if (src1.is(rcx)) {
2183 } else { 2261 movp(src1, kScratchRegister);
2184 movq(src2, kScratchRegister); 2262 } else {
2185 } 2263 movp(src2, kScratchRegister);
2264 }
2265 }
2186 jmp(on_not_smi_result, near_jump); 2266 jmp(on_not_smi_result, near_jump);
2187 bind(&positive_result); 2267 bind(&valid_result);
2188 } else { 2268 Integer32ToSmi(dst, dst);
2189 // src2 was zero and src1 negative.
2190 j(negative, on_not_smi_result, near_jump);
2191 } 2269 }
2192 } 2270 }
2193 2271
2194 2272
2195 void MacroAssembler::SmiShiftArithmeticRight(Register dst, 2273 void MacroAssembler::SmiShiftArithmeticRight(Register dst,
2196 Register src1, 2274 Register src1,
2197 Register src2) { 2275 Register src2) {
2198 ASSERT(!dst.is(kScratchRegister)); 2276 ASSERT(!dst.is(kScratchRegister));
2199 ASSERT(!src1.is(kScratchRegister)); 2277 ASSERT(!src1.is(kScratchRegister));
2200 ASSERT(!src2.is(kScratchRegister)); 2278 ASSERT(!src2.is(kScratchRegister));
2201 ASSERT(!dst.is(rcx)); 2279 ASSERT(!dst.is(rcx));
2202 if (src1.is(rcx)) { 2280
2203 movp(kScratchRegister, src1); 2281 SmiToInteger32(rcx, src2);
2204 } else if (src2.is(rcx)) {
2205 movp(kScratchRegister, src2);
2206 }
2207 if (!dst.is(src1)) { 2282 if (!dst.is(src1)) {
2208 movp(dst, src1); 2283 movp(dst, src1);
2209 } 2284 }
2210 SmiToInteger32(rcx, src2); 2285 SmiToInteger32(dst, dst);
2211 orl(rcx, Immediate(kSmiShift)); 2286 sarl_cl(dst);
2212 sarp_cl(dst); // Shift 32 + original rcx & 0x1f. 2287 Integer32ToSmi(dst, dst);
2213 shlp(dst, Immediate(kSmiShift));
2214 if (src1.is(rcx)) {
2215 movp(src1, kScratchRegister);
2216 } else if (src2.is(rcx)) {
2217 movp(src2, kScratchRegister);
2218 }
2219 } 2288 }
2220 2289
2221 2290
2222 void MacroAssembler::SelectNonSmi(Register dst, 2291 void MacroAssembler::SelectNonSmi(Register dst,
2223 Register src1, 2292 Register src1,
2224 Register src2, 2293 Register src2,
2225 Label* on_not_smis, 2294 Label* on_not_smis,
2226 Label::Distance near_jump) { 2295 Label::Distance near_jump) {
2227 ASSERT(!dst.is(kScratchRegister)); 2296 ASSERT(!dst.is(kScratchRegister));
2228 ASSERT(!src1.is(kScratchRegister)); 2297 ASSERT(!src1.is(kScratchRegister));
(...skipping 3017 matching lines...) Expand 10 before | Expand all | Expand 10 after
5246 if (ms.shift() > 0) sarl(rdx, Immediate(ms.shift())); 5315 if (ms.shift() > 0) sarl(rdx, Immediate(ms.shift()));
5247 movl(rax, dividend); 5316 movl(rax, dividend);
5248 shrl(rax, Immediate(31)); 5317 shrl(rax, Immediate(31));
5249 addl(rdx, rax); 5318 addl(rdx, rax);
5250 } 5319 }
5251 5320
5252 5321
5253 } } // namespace v8::internal 5322 } } // namespace v8::internal
5254 5323
5255 #endif // V8_TARGET_ARCH_X64 5324 #endif // V8_TARGET_ARCH_X64
OLDNEW
« no previous file with comments | « src/x64/macro-assembler-x64.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698