Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(587)

Side by Side Diff: src/ia32/codegen-ia32.cc

Issue 13929015: Fix Windows build (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 7 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 165 matching lines...) Expand 10 before | Expand all | Expand 10 after
176 // Helper functions for CreateMemMoveFunction. 176 // Helper functions for CreateMemMoveFunction.
177 #undef __ 177 #undef __
178 #define __ ACCESS_MASM(masm) 178 #define __ ACCESS_MASM(masm)
179 179
180 // Keep around global pointers to these objects so that Valgrind won't complain. 180 // Keep around global pointers to these objects so that Valgrind won't complain.
181 static size_t* medium_handlers = NULL; 181 static size_t* medium_handlers = NULL;
182 static size_t* small_handlers = NULL; 182 static size_t* small_handlers = NULL;
183 183
184 184
185 enum Direction { FORWARD, BACKWARD }; 185 enum Direction { FORWARD, BACKWARD };
186 enum Alignment { ALIGNED, UNALIGNED }; 186 enum Alignment { MOVE_ALIGNED, MOVE_UNALIGNED };
187 187
188 // Expects registers: 188 // Expects registers:
189 // esi - source, aligned if alignment == ALIGNED 189 // esi - source, aligned if alignment == ALIGNED
190 // edi - destination, always aligned 190 // edi - destination, always aligned
191 // ecx - count (copy size in bytes) 191 // ecx - count (copy size in bytes)
192 // edx - loop count (number of 64 byte chunks) 192 // edx - loop count (number of 64 byte chunks)
193 void MemMoveEmitMainLoop(MacroAssembler* masm, 193 void MemMoveEmitMainLoop(MacroAssembler* masm,
194 Label* move_last_15, 194 Label* move_last_15,
195 Direction direction, 195 Direction direction,
196 Alignment alignment) { 196 Alignment alignment) {
197 Register src = esi; 197 Register src = esi;
198 Register dst = edi; 198 Register dst = edi;
199 Register count = ecx; 199 Register count = ecx;
200 Register loop_count = edx; 200 Register loop_count = edx;
201 Label loop, move_last_31, move_last_63; 201 Label loop, move_last_31, move_last_63;
202 __ cmp(loop_count, 0); 202 __ cmp(loop_count, 0);
203 __ j(equal, &move_last_63); 203 __ j(equal, &move_last_63);
204 __ bind(&loop); 204 __ bind(&loop);
205 // Main loop. Copy in 64 byte chunks. 205 // Main loop. Copy in 64 byte chunks.
206 if (direction == BACKWARD) __ sub(src, Immediate(0x40)); 206 if (direction == BACKWARD) __ sub(src, Immediate(0x40));
207 __ movdq(alignment == ALIGNED, xmm0, Operand(src, 0x00)); 207 __ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0x00));
208 __ movdq(alignment == ALIGNED, xmm1, Operand(src, 0x10)); 208 __ movdq(alignment == MOVE_ALIGNED, xmm1, Operand(src, 0x10));
209 __ movdq(alignment == ALIGNED, xmm2, Operand(src, 0x20)); 209 __ movdq(alignment == MOVE_ALIGNED, xmm2, Operand(src, 0x20));
210 __ movdq(alignment == ALIGNED, xmm3, Operand(src, 0x30)); 210 __ movdq(alignment == MOVE_ALIGNED, xmm3, Operand(src, 0x30));
211 if (direction == FORWARD) __ add(src, Immediate(0x40)); 211 if (direction == FORWARD) __ add(src, Immediate(0x40));
212 if (direction == BACKWARD) __ sub(dst, Immediate(0x40)); 212 if (direction == BACKWARD) __ sub(dst, Immediate(0x40));
213 __ movdqa(Operand(dst, 0x00), xmm0); 213 __ movdqa(Operand(dst, 0x00), xmm0);
214 __ movdqa(Operand(dst, 0x10), xmm1); 214 __ movdqa(Operand(dst, 0x10), xmm1);
215 __ movdqa(Operand(dst, 0x20), xmm2); 215 __ movdqa(Operand(dst, 0x20), xmm2);
216 __ movdqa(Operand(dst, 0x30), xmm3); 216 __ movdqa(Operand(dst, 0x30), xmm3);
217 if (direction == FORWARD) __ add(dst, Immediate(0x40)); 217 if (direction == FORWARD) __ add(dst, Immediate(0x40));
218 __ dec(loop_count); 218 __ dec(loop_count);
219 __ j(not_zero, &loop); 219 __ j(not_zero, &loop);
220 // At most 63 bytes left to copy. 220 // At most 63 bytes left to copy.
221 __ bind(&move_last_63); 221 __ bind(&move_last_63);
222 __ test(count, Immediate(0x20)); 222 __ test(count, Immediate(0x20));
223 __ j(zero, &move_last_31); 223 __ j(zero, &move_last_31);
224 if (direction == BACKWARD) __ sub(src, Immediate(0x20)); 224 if (direction == BACKWARD) __ sub(src, Immediate(0x20));
225 __ movdq(alignment == ALIGNED, xmm0, Operand(src, 0x00)); 225 __ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0x00));
226 __ movdq(alignment == ALIGNED, xmm1, Operand(src, 0x10)); 226 __ movdq(alignment == MOVE_ALIGNED, xmm1, Operand(src, 0x10));
227 if (direction == FORWARD) __ add(src, Immediate(0x20)); 227 if (direction == FORWARD) __ add(src, Immediate(0x20));
228 if (direction == BACKWARD) __ sub(dst, Immediate(0x20)); 228 if (direction == BACKWARD) __ sub(dst, Immediate(0x20));
229 __ movdqa(Operand(dst, 0x00), xmm0); 229 __ movdqa(Operand(dst, 0x00), xmm0);
230 __ movdqa(Operand(dst, 0x10), xmm1); 230 __ movdqa(Operand(dst, 0x10), xmm1);
231 if (direction == FORWARD) __ add(dst, Immediate(0x20)); 231 if (direction == FORWARD) __ add(dst, Immediate(0x20));
232 // At most 31 bytes left to copy. 232 // At most 31 bytes left to copy.
233 __ bind(&move_last_31); 233 __ bind(&move_last_31);
234 __ test(count, Immediate(0x10)); 234 __ test(count, Immediate(0x10));
235 __ j(zero, move_last_15); 235 __ j(zero, move_last_15);
236 if (direction == BACKWARD) __ sub(src, Immediate(0x10)); 236 if (direction == BACKWARD) __ sub(src, Immediate(0x10));
237 __ movdq(alignment == ALIGNED, xmm0, Operand(src, 0)); 237 __ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0));
238 if (direction == FORWARD) __ add(src, Immediate(0x10)); 238 if (direction == FORWARD) __ add(src, Immediate(0x10));
239 if (direction == BACKWARD) __ sub(dst, Immediate(0x10)); 239 if (direction == BACKWARD) __ sub(dst, Immediate(0x10));
240 __ movdqa(Operand(dst, 0), xmm0); 240 __ movdqa(Operand(dst, 0), xmm0);
241 if (direction == FORWARD) __ add(dst, Immediate(0x10)); 241 if (direction == FORWARD) __ add(dst, Immediate(0x10));
242 } 242 }
243 243
244 244
245 void MemMoveEmitPopAndReturn(MacroAssembler* masm) { 245 void MemMoveEmitPopAndReturn(MacroAssembler* masm) {
246 __ pop(esi); 246 __ pop(esi);
247 __ pop(edi); 247 __ pop(edi);
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after
332 __ add(dst, edx); 332 __ add(dst, edx);
333 __ add(src, edx); 333 __ add(src, edx);
334 __ sub(count, edx); 334 __ sub(count, edx);
335 // dst is now aligned. Main copy loop. 335 // dst is now aligned. Main copy loop.
336 __ mov(loop_count, count); 336 __ mov(loop_count, count);
337 __ shr(loop_count, 6); 337 __ shr(loop_count, 6);
338 // Check if src is also aligned. 338 // Check if src is also aligned.
339 __ test(src, Immediate(0xF)); 339 __ test(src, Immediate(0xF));
340 __ j(not_zero, &unaligned_source); 340 __ j(not_zero, &unaligned_source);
341 // Copy loop for aligned source and destination. 341 // Copy loop for aligned source and destination.
342 MemMoveEmitMainLoop(&masm, &move_last_15, FORWARD, ALIGNED); 342 MemMoveEmitMainLoop(&masm, &move_last_15, FORWARD, MOVE_ALIGNED);
343 // At most 15 bytes to copy. Copy 16 bytes at end of string. 343 // At most 15 bytes to copy. Copy 16 bytes at end of string.
344 __ bind(&move_last_15); 344 __ bind(&move_last_15);
345 __ and_(count, 0xF); 345 __ and_(count, 0xF);
346 __ j(zero, &skip_last_move, Label::kNear); 346 __ j(zero, &skip_last_move, Label::kNear);
347 __ movdqu(xmm0, Operand(src, count, times_1, -0x10)); 347 __ movdqu(xmm0, Operand(src, count, times_1, -0x10));
348 __ movdqu(Operand(dst, count, times_1, -0x10), xmm0); 348 __ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
349 __ bind(&skip_last_move); 349 __ bind(&skip_last_move);
350 MemMoveEmitPopAndReturn(&masm); 350 MemMoveEmitPopAndReturn(&masm);
351 351
352 // Copy loop for unaligned source and aligned destination. 352 // Copy loop for unaligned source and aligned destination.
353 __ bind(&unaligned_source); 353 __ bind(&unaligned_source);
354 MemMoveEmitMainLoop(&masm, &move_last_15, FORWARD, UNALIGNED); 354 MemMoveEmitMainLoop(&masm, &move_last_15, FORWARD, MOVE_UNALIGNED);
355 __ jmp(&move_last_15); 355 __ jmp(&move_last_15);
356 356
357 // Less than kMinMoveDistance offset between dst and src. 357 // Less than kMinMoveDistance offset between dst and src.
358 Label loop_until_aligned, last_15_much_overlap; 358 Label loop_until_aligned, last_15_much_overlap;
359 __ bind(&loop_until_aligned); 359 __ bind(&loop_until_aligned);
360 __ mov_b(eax, Operand(src, 0)); 360 __ mov_b(eax, Operand(src, 0));
361 __ inc(src); 361 __ inc(src);
362 __ mov_b(Operand(dst, 0), eax); 362 __ mov_b(Operand(dst, 0), eax);
363 __ inc(dst); 363 __ inc(dst);
364 __ dec(count); 364 __ dec(count);
365 __ bind(&forward_much_overlap); // Entry point into this block. 365 __ bind(&forward_much_overlap); // Entry point into this block.
366 __ test(dst, Immediate(0xF)); 366 __ test(dst, Immediate(0xF));
367 __ j(not_zero, &loop_until_aligned); 367 __ j(not_zero, &loop_until_aligned);
368 // dst is now aligned, src can't be. Main copy loop. 368 // dst is now aligned, src can't be. Main copy loop.
369 __ mov(loop_count, count); 369 __ mov(loop_count, count);
370 __ shr(loop_count, 6); 370 __ shr(loop_count, 6);
371 MemMoveEmitMainLoop(&masm, &last_15_much_overlap, FORWARD, UNALIGNED); 371 MemMoveEmitMainLoop(&masm, &last_15_much_overlap,
372 FORWARD, MOVE_UNALIGNED);
372 __ bind(&last_15_much_overlap); 373 __ bind(&last_15_much_overlap);
373 __ and_(count, 0xF); 374 __ and_(count, 0xF);
374 __ j(zero, &pop_and_return); 375 __ j(zero, &pop_and_return);
375 __ cmp(count, kSmallCopySize); 376 __ cmp(count, kSmallCopySize);
376 __ j(below_equal, &small_size); 377 __ j(below_equal, &small_size);
377 __ jmp(&medium_size); 378 __ jmp(&medium_size);
378 } 379 }
379 380
380 { 381 {
381 // |dst| is a higher address than |src|. Copy backwards. 382 // |dst| is a higher address than |src|. Copy backwards.
(...skipping 15 matching lines...) Expand all
397 __ sub(dst, edx); 398 __ sub(dst, edx);
398 __ sub(src, edx); 399 __ sub(src, edx);
399 __ sub(count, edx); 400 __ sub(count, edx);
400 // dst is now aligned. Main copy loop. 401 // dst is now aligned. Main copy loop.
401 __ mov(loop_count, count); 402 __ mov(loop_count, count);
402 __ shr(loop_count, 6); 403 __ shr(loop_count, 6);
403 // Check if src is also aligned. 404 // Check if src is also aligned.
404 __ test(src, Immediate(0xF)); 405 __ test(src, Immediate(0xF));
405 __ j(not_zero, &unaligned_source); 406 __ j(not_zero, &unaligned_source);
406 // Copy loop for aligned source and destination. 407 // Copy loop for aligned source and destination.
407 MemMoveEmitMainLoop(&masm, &move_first_15, BACKWARD, ALIGNED); 408 MemMoveEmitMainLoop(&masm, &move_first_15, BACKWARD, MOVE_ALIGNED);
408 // At most 15 bytes to copy. Copy 16 bytes at beginning of string. 409 // At most 15 bytes to copy. Copy 16 bytes at beginning of string.
409 __ bind(&move_first_15); 410 __ bind(&move_first_15);
410 __ and_(count, 0xF); 411 __ and_(count, 0xF);
411 __ j(zero, &skip_last_move, Label::kNear); 412 __ j(zero, &skip_last_move, Label::kNear);
412 __ sub(src, count); 413 __ sub(src, count);
413 __ sub(dst, count); 414 __ sub(dst, count);
414 __ movdqu(xmm0, Operand(src, 0)); 415 __ movdqu(xmm0, Operand(src, 0));
415 __ movdqu(Operand(dst, 0), xmm0); 416 __ movdqu(Operand(dst, 0), xmm0);
416 __ bind(&skip_last_move); 417 __ bind(&skip_last_move);
417 MemMoveEmitPopAndReturn(&masm); 418 MemMoveEmitPopAndReturn(&masm);
418 419
419 // Copy loop for unaligned source and aligned destination. 420 // Copy loop for unaligned source and aligned destination.
420 __ bind(&unaligned_source); 421 __ bind(&unaligned_source);
421 MemMoveEmitMainLoop(&masm, &move_first_15, BACKWARD, UNALIGNED); 422 MemMoveEmitMainLoop(&masm, &move_first_15, BACKWARD, MOVE_UNALIGNED);
422 __ jmp(&move_first_15); 423 __ jmp(&move_first_15);
423 424
424 // Less than kMinMoveDistance offset between dst and src. 425 // Less than kMinMoveDistance offset between dst and src.
425 Label loop_until_aligned, first_15_much_overlap; 426 Label loop_until_aligned, first_15_much_overlap;
426 __ bind(&loop_until_aligned); 427 __ bind(&loop_until_aligned);
427 __ dec(src); 428 __ dec(src);
428 __ dec(dst); 429 __ dec(dst);
429 __ mov_b(eax, Operand(src, 0)); 430 __ mov_b(eax, Operand(src, 0));
430 __ mov_b(Operand(dst, 0), eax); 431 __ mov_b(Operand(dst, 0), eax);
431 __ dec(count); 432 __ dec(count);
432 __ bind(&backward_much_overlap); // Entry point into this block. 433 __ bind(&backward_much_overlap); // Entry point into this block.
433 __ test(dst, Immediate(0xF)); 434 __ test(dst, Immediate(0xF));
434 __ j(not_zero, &loop_until_aligned); 435 __ j(not_zero, &loop_until_aligned);
435 // dst is now aligned, src can't be. Main copy loop. 436 // dst is now aligned, src can't be. Main copy loop.
436 __ mov(loop_count, count); 437 __ mov(loop_count, count);
437 __ shr(loop_count, 6); 438 __ shr(loop_count, 6);
438 MemMoveEmitMainLoop(&masm, &first_15_much_overlap, BACKWARD, UNALIGNED); 439 MemMoveEmitMainLoop(&masm, &first_15_much_overlap,
440 BACKWARD, MOVE_UNALIGNED);
439 __ bind(&first_15_much_overlap); 441 __ bind(&first_15_much_overlap);
440 __ and_(count, 0xF); 442 __ and_(count, 0xF);
441 __ j(zero, &pop_and_return); 443 __ j(zero, &pop_and_return);
442 // Small/medium handlers expect dst/src to point to the beginning. 444 // Small/medium handlers expect dst/src to point to the beginning.
443 __ sub(dst, count); 445 __ sub(dst, count);
444 __ sub(src, count); 446 __ sub(src, count);
445 __ cmp(count, kSmallCopySize); 447 __ cmp(count, kSmallCopySize);
446 __ j(below_equal, &small_size); 448 __ j(below_equal, &small_size);
447 __ jmp(&medium_size); 449 __ jmp(&medium_size);
448 } 450 }
(...skipping 763 matching lines...) Expand 10 before | Expand all | Expand 10 after
1212 Code* stub = GetCodeAgeStub(age, parity); 1214 Code* stub = GetCodeAgeStub(age, parity);
1213 CodePatcher patcher(sequence, young_length); 1215 CodePatcher patcher(sequence, young_length);
1214 patcher.masm()->call(stub->instruction_start(), RelocInfo::NONE32); 1216 patcher.masm()->call(stub->instruction_start(), RelocInfo::NONE32);
1215 } 1217 }
1216 } 1218 }
1217 1219
1218 1220
1219 } } // namespace v8::internal 1221 } } // namespace v8::internal
1220 1222
1221 #endif // V8_TARGET_ARCH_IA32 1223 #endif // V8_TARGET_ARCH_IA32
OLDNEW
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698