Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(38)

Side by Side Diff: src/x64/codegen-x64.cc

Issue 18014003: Add X32 port into V8 (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 7 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
46 46
47 47
48 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { 48 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
49 masm->LeaveFrame(StackFrame::INTERNAL); 49 masm->LeaveFrame(StackFrame::INTERNAL);
50 ASSERT(masm->has_frame()); 50 ASSERT(masm->has_frame());
51 masm->set_has_frame(false); 51 masm->set_has_frame(false);
52 } 52 }
53 53
54 54
55 #define __ masm. 55 #define __ masm.
56 #define __k __
56 57
57 58
58 UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) { 59 UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
59 size_t actual_size; 60 size_t actual_size;
60 // Allocate buffer in executable space. 61 // Allocate buffer in executable space.
61 byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, 62 byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
62 &actual_size, 63 &actual_size,
63 true)); 64 true));
64 if (buffer == NULL) { 65 if (buffer == NULL) {
65 // Fallback to library function if function cannot be created. 66 // Fallback to library function if function cannot be created.
66 switch (type) { 67 switch (type) {
67 case TranscendentalCache::SIN: return &sin; 68 case TranscendentalCache::SIN: return &sin;
68 case TranscendentalCache::COS: return &cos; 69 case TranscendentalCache::COS: return &cos;
69 case TranscendentalCache::TAN: return &tan; 70 case TranscendentalCache::TAN: return &tan;
70 case TranscendentalCache::LOG: return &log; 71 case TranscendentalCache::LOG: return &log;
71 default: UNIMPLEMENTED(); 72 default: UNIMPLEMENTED();
72 } 73 }
73 } 74 }
74 75
75 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size)); 76 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
76 // xmm0: raw double input. 77 // xmm0: raw double input.
77 // Move double input into registers. 78 // Move double input into registers.
78 __ push(rbx); 79 __k push(rbx);
79 __ push(rdi); 80 __k push(rdi);
80 __ movq(rbx, xmm0); 81 __k movq(rbx, xmm0);
81 __ push(rbx); 82 __k push(rbx);
82 __ fld_d(Operand(rsp, 0)); 83 __ fld_d(Operand(rsp, 0));
83 TranscendentalCacheStub::GenerateOperation(&masm, type); 84 TranscendentalCacheStub::GenerateOperation(&masm, type);
84 // The return value is expected to be in xmm0. 85 // The return value is expected to be in xmm0.
85 __ fstp_d(Operand(rsp, 0)); 86 __ fstp_d(Operand(rsp, 0));
86 __ pop(rbx); 87 __k pop(rbx);
87 __ movq(xmm0, rbx); 88 __k movq(xmm0, rbx);
88 __ pop(rdi); 89 __k pop(rdi);
89 __ pop(rbx); 90 __k pop(rbx);
90 __ Ret(); 91 __ Ret();
91 92
92 CodeDesc desc; 93 CodeDesc desc;
93 masm.GetCode(&desc); 94 masm.GetCode(&desc);
94 ASSERT(!RelocInfo::RequiresRelocation(desc)); 95 ASSERT(!RelocInfo::RequiresRelocation(desc));
95 96
96 CPU::FlushICache(buffer, actual_size); 97 CPU::FlushICache(buffer, actual_size);
97 OS::ProtectCode(buffer, actual_size); 98 OS::ProtectCode(buffer, actual_size);
98 return FUNCTION_CAST<UnaryMathFunction>(buffer); 99 return FUNCTION_CAST<UnaryMathFunction>(buffer);
99 } 100 }
100 101
101 102
102 UnaryMathFunction CreateExpFunction() { 103 UnaryMathFunction CreateExpFunction() {
103 if (!FLAG_fast_math) return &exp; 104 if (!FLAG_fast_math) return &exp;
104 size_t actual_size; 105 size_t actual_size;
105 byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true)); 106 byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
106 if (buffer == NULL) return &exp; 107 if (buffer == NULL) return &exp;
107 ExternalReference::InitializeMathExpData(); 108 ExternalReference::InitializeMathExpData();
108 109
109 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size)); 110 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
110 // xmm0: raw double input. 111 // xmm0: raw double input.
111 XMMRegister input = xmm0; 112 XMMRegister input = xmm0;
112 XMMRegister result = xmm1; 113 XMMRegister result = xmm1;
113 __ push(rax); 114 __k push(rax);
114 __ push(rbx); 115 __k push(rbx);
115 116
116 MathExpGenerator::EmitMathExp(&masm, input, result, xmm2, rax, rbx); 117 MathExpGenerator::EmitMathExp(&masm, input, result, xmm2, rax, rbx);
117 118
118 __ pop(rbx); 119 __k pop(rbx);
119 __ pop(rax); 120 __k pop(rax);
120 __ movsd(xmm0, result); 121 __ movsd(xmm0, result);
121 __ Ret(); 122 __ Ret();
122 123
123 CodeDesc desc; 124 CodeDesc desc;
124 masm.GetCode(&desc); 125 masm.GetCode(&desc);
125 ASSERT(!RelocInfo::RequiresRelocation(desc)); 126 ASSERT(!RelocInfo::RequiresRelocation(desc));
126 127
127 CPU::FlushICache(buffer, actual_size); 128 CPU::FlushICache(buffer, actual_size);
128 OS::ProtectCode(buffer, actual_size); 129 OS::ProtectCode(buffer, actual_size);
129 return FUNCTION_CAST<UnaryMathFunction>(buffer); 130 return FUNCTION_CAST<UnaryMathFunction>(buffer);
(...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after
206 } 207 }
207 208
208 Label valid_result; 209 Label valid_result;
209 Label return_result; 210 Label return_result;
210 // If Invalid Operand or Zero Division exceptions are set, 211 // If Invalid Operand or Zero Division exceptions are set,
211 // return NaN. 212 // return NaN.
212 __ testb(rax, Immediate(5)); 213 __ testb(rax, Immediate(5));
213 __ j(zero, &valid_result); 214 __ j(zero, &valid_result);
214 __ fstp(0); // Drop result in st(0). 215 __ fstp(0); // Drop result in st(0).
215 int64_t kNaNValue = V8_INT64_C(0x7ff8000000000000); 216 int64_t kNaNValue = V8_INT64_C(0x7ff8000000000000);
216 __ movq(rcx, kNaNValue, RelocInfo::NONE64); 217 __k movq(rcx, kNaNValue, RelocInfo::NONE64);
217 __ movq(Operand(rsp, kPointerSize), rcx); 218 __ movq(Operand(rsp, kPointerSize), rcx);
218 __ movsd(xmm0, Operand(rsp, kPointerSize)); 219 __ movsd(xmm0, Operand(rsp, kPointerSize));
219 __ jmp(&return_result); 220 __ jmp(&return_result);
220 221
221 // If result is valid, return that. 222 // If result is valid, return that.
222 __ bind(&valid_result); 223 __ bind(&valid_result);
223 __ fstp_d(Operand(rsp, kPointerSize)); 224 __ fstp_d(Operand(rsp, kPointerSize));
224 __ movsd(xmm0, Operand(rsp, kPointerSize)); 225 __ movsd(xmm0, Operand(rsp, kPointerSize));
225 226
226 // Clean up FPU stack and exceptions and return xmm0 227 // Clean up FPU stack and exceptions and return xmm0
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after
298 299
299 // Check for empty arrays, which only require a map transition and no changes 300 // Check for empty arrays, which only require a map transition and no changes
300 // to the backing store. 301 // to the backing store.
301 __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset)); 302 __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset));
302 __ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex); 303 __ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
303 __ j(equal, &only_change_map); 304 __ j(equal, &only_change_map);
304 305
305 // Check backing store for COW-ness. For COW arrays we have to 306 // Check backing store for COW-ness. For COW arrays we have to
306 // allocate a new backing store. 307 // allocate a new backing store.
307 __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset)); 308 __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
309 #ifndef V8_TARGET_ARCH_X32
308 __ CompareRoot(FieldOperand(r8, HeapObject::kMapOffset), 310 __ CompareRoot(FieldOperand(r8, HeapObject::kMapOffset),
309 Heap::kFixedCOWArrayMapRootIndex); 311 Heap::kFixedCOWArrayMapRootIndex);
310 __ j(equal, &new_backing_store); 312 __ j(equal, &new_backing_store);
313 #else
314 // Smi is 4-byte while double is 8-byte for X32.
315 __ jmp(&new_backing_store);
316 #endif
311 // Check if the backing store is in new-space. If not, we need to allocate 317 // Check if the backing store is in new-space. If not, we need to allocate
312 // a new one since the old one is in pointer-space. 318 // a new one since the old one is in pointer-space.
313 // If in new space, we can reuse the old backing store because it is 319 // If in new space, we can reuse the old backing store because it is
314 // the same size. 320 // the same size.
315 __ JumpIfNotInNewSpace(r8, rdi, &new_backing_store); 321 __ JumpIfNotInNewSpace(r8, rdi, &new_backing_store);
316 322
317 __ movq(r14, r8); // Destination array equals source array. 323 __ movq(r14, r8); // Destination array equals source array.
318 324
319 // r8 : source FixedArray 325 // r8 : source FixedArray
320 // r9 : elements array length 326 // r9 : elements array length
(...skipping 12 matching lines...) Expand all
333 kDontSaveFPRegs, 339 kDontSaveFPRegs,
334 EMIT_REMEMBERED_SET, 340 EMIT_REMEMBERED_SET,
335 OMIT_SMI_CHECK); 341 OMIT_SMI_CHECK);
336 342
337 // Convert smis to doubles and holes to hole NaNs. The Array's length 343 // Convert smis to doubles and holes to hole NaNs. The Array's length
338 // remains unchanged. 344 // remains unchanged.
339 STATIC_ASSERT(FixedDoubleArray::kLengthOffset == FixedArray::kLengthOffset); 345 STATIC_ASSERT(FixedDoubleArray::kLengthOffset == FixedArray::kLengthOffset);
340 STATIC_ASSERT(FixedDoubleArray::kHeaderSize == FixedArray::kHeaderSize); 346 STATIC_ASSERT(FixedDoubleArray::kHeaderSize == FixedArray::kHeaderSize);
341 347
342 Label loop, entry, convert_hole; 348 Label loop, entry, convert_hole;
343 __ movq(r15, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE64); 349 __k movq(r15, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE64);
344 // r15: the-hole NaN 350 // r15: the-hole NaN
345 __ jmp(&entry); 351 __ jmp(&entry);
346 352
347 // Allocate new backing store. 353 // Allocate new backing store.
348 __ bind(&new_backing_store); 354 __ bind(&new_backing_store);
349 __ lea(rdi, Operand(r9, times_8, FixedArray::kHeaderSize)); 355 __ lea(rdi, Operand(r9, times_8, FixedArray::kHeaderSize));
350 __ Allocate(rdi, r14, r11, r15, fail, TAG_OBJECT); 356 __ Allocate(rdi, r14, r11, r15, fail, TAG_OBJECT);
351 // Set backing store's map 357 // Set backing store's map
352 __ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex); 358 __ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
353 __ movq(FieldOperand(r14, HeapObject::kMapOffset), rdi); 359 __ movq(FieldOperand(r14, HeapObject::kMapOffset), rdi);
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
390 __ movsd(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize), 396 __ movsd(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize),
391 xmm0); 397 xmm0);
392 __ jmp(&entry); 398 __ jmp(&entry);
393 __ bind(&convert_hole); 399 __ bind(&convert_hole);
394 400
395 if (FLAG_debug_code) { 401 if (FLAG_debug_code) {
396 __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex); 402 __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
397 __ Assert(equal, "object found in smi-only array"); 403 __ Assert(equal, "object found in smi-only array");
398 } 404 }
399 405
400 __ movq(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize), r15); 406 __k movq(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize), r15);
401 __ bind(&entry); 407 __ bind(&entry);
402 __ decq(r9); 408 __ decq(r9);
403 __ j(not_sign, &loop); 409 __ j(not_sign, &loop);
404 410
405 __ bind(&done); 411 __ bind(&done);
406 } 412 }
407 413
408 414
409 void ElementsTransitionGenerator::GenerateDoubleToObject( 415 void ElementsTransitionGenerator::GenerateDoubleToObject(
410 MacroAssembler* masm, AllocationSiteMode mode, Label* fail) { 416 MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
(...skipping 25 matching lines...) Expand all
436 // r9 : number of elements 442 // r9 : number of elements
437 __ lea(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize)); 443 __ lea(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize));
438 __ Allocate(rdi, r11, r14, r15, &gc_required, TAG_OBJECT); 444 __ Allocate(rdi, r11, r14, r15, &gc_required, TAG_OBJECT);
439 // r11: destination FixedArray 445 // r11: destination FixedArray
440 __ LoadRoot(rdi, Heap::kFixedArrayMapRootIndex); 446 __ LoadRoot(rdi, Heap::kFixedArrayMapRootIndex);
441 __ movq(FieldOperand(r11, HeapObject::kMapOffset), rdi); 447 __ movq(FieldOperand(r11, HeapObject::kMapOffset), rdi);
442 __ Integer32ToSmi(r14, r9); 448 __ Integer32ToSmi(r14, r9);
443 __ movq(FieldOperand(r11, FixedArray::kLengthOffset), r14); 449 __ movq(FieldOperand(r11, FixedArray::kLengthOffset), r14);
444 450
445 // Prepare for conversion loop. 451 // Prepare for conversion loop.
446 __ movq(rsi, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE64); 452 __k movq(rsi, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE64);
447 __ LoadRoot(rdi, Heap::kTheHoleValueRootIndex); 453 __ LoadRoot(rdi, Heap::kTheHoleValueRootIndex);
448 // rsi: the-hole NaN 454 // rsi: the-hole NaN
449 // rdi: pointer to the-hole 455 // rdi: pointer to the-hole
450 __ jmp(&entry); 456 __ jmp(&entry);
451 457
452 // Call into runtime if GC is required. 458 // Call into runtime if GC is required.
453 __ bind(&gc_required); 459 __ bind(&gc_required);
454 __ pop(rax); 460 __ pop(rax);
455 __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); 461 __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
456 __ jmp(fail); 462 __ jmp(fail);
457 463
458 // Box doubles into heap numbers. 464 // Box doubles into heap numbers.
459 __ bind(&loop); 465 __ bind(&loop);
460 __ movq(r14, FieldOperand(r8, 466 __k movq(r14, FieldOperand(r8,
461 r9, 467 r9,
462 times_8, 468 times_8,
463 FixedDoubleArray::kHeaderSize)); 469 FixedDoubleArray::kHeaderSize));
464 // r9 : current element's index 470 // r9 : current element's index
465 // r14: current element 471 // r14: current element
466 __ cmpq(r14, rsi); 472 __k cmpq(r14, rsi);
467 __ j(equal, &convert_hole); 473 __ j(equal, &convert_hole);
468 474
469 // Non-hole double, copy value into a heap number. 475 // Non-hole double, copy value into a heap number.
470 __ AllocateHeapNumber(rax, r15, &gc_required); 476 __ AllocateHeapNumber(rax, r15, &gc_required);
471 // rax: new heap number 477 // rax: new heap number
472 __ movq(FieldOperand(rax, HeapNumber::kValueOffset), r14); 478 __k movq(FieldOperand(rax, HeapNumber::kValueOffset), r14);
473 __ movq(FieldOperand(r11, 479 __ movq(FieldOperand(r11,
474 r9, 480 r9,
475 times_pointer_size, 481 times_pointer_size,
476 FixedArray::kHeaderSize), 482 FixedArray::kHeaderSize),
477 rax); 483 rax);
478 __ movq(r15, r9); 484 __ movq(r15, r9);
479 __ RecordWriteArray(r11, 485 __ RecordWriteArray(r11,
480 rax, 486 rax,
481 r15, 487 r15,
482 kDontSaveFPRegs, 488 kDontSaveFPRegs,
(...skipping 163 matching lines...) Expand 10 before | Expand all | Expand 10 after
646 __ ucomisd(input, Operand(kScratchRegister, 1 * kDoubleSize)); 652 __ ucomisd(input, Operand(kScratchRegister, 1 * kDoubleSize));
647 __ movsd(result, Operand(kScratchRegister, 2 * kDoubleSize)); 653 __ movsd(result, Operand(kScratchRegister, 2 * kDoubleSize));
648 __ j(above_equal, &done); 654 __ j(above_equal, &done);
649 __ movsd(double_scratch, Operand(kScratchRegister, 3 * kDoubleSize)); 655 __ movsd(double_scratch, Operand(kScratchRegister, 3 * kDoubleSize));
650 __ movsd(result, Operand(kScratchRegister, 4 * kDoubleSize)); 656 __ movsd(result, Operand(kScratchRegister, 4 * kDoubleSize));
651 __ mulsd(double_scratch, input); 657 __ mulsd(double_scratch, input);
652 __ addsd(double_scratch, result); 658 __ addsd(double_scratch, result);
653 __ movq(temp2, double_scratch); 659 __ movq(temp2, double_scratch);
654 __ subsd(double_scratch, result); 660 __ subsd(double_scratch, result);
655 __ movsd(result, Operand(kScratchRegister, 6 * kDoubleSize)); 661 __ movsd(result, Operand(kScratchRegister, 6 * kDoubleSize));
656 __ lea(temp1, Operand(temp2, 0x1ff800)); 662 __k lea(temp1, Operand(temp2, 0x1ff800));
657 __ and_(temp2, Immediate(0x7ff)); 663 __k and_(temp2, Immediate(0x7ff));
658 __ shr(temp1, Immediate(11)); 664 __k shr(temp1, Immediate(11));
659 __ mulsd(double_scratch, Operand(kScratchRegister, 5 * kDoubleSize)); 665 __ mulsd(double_scratch, Operand(kScratchRegister, 5 * kDoubleSize));
660 __ movq(kScratchRegister, ExternalReference::math_exp_log_table()); 666 __ movq(kScratchRegister, ExternalReference::math_exp_log_table());
661 __ shl(temp1, Immediate(52)); 667 __k shl(temp1, Immediate(52));
662 __ or_(temp1, Operand(kScratchRegister, temp2, times_8, 0)); 668 __k or_(temp1, Operand(kScratchRegister, temp2, times_8, 0));
663 __ movq(kScratchRegister, ExternalReference::math_exp_constants(0)); 669 __ movq(kScratchRegister, ExternalReference::math_exp_constants(0));
664 __ subsd(double_scratch, input); 670 __ subsd(double_scratch, input);
665 __ movsd(input, double_scratch); 671 __ movsd(input, double_scratch);
666 __ subsd(result, double_scratch); 672 __ subsd(result, double_scratch);
667 __ mulsd(input, double_scratch); 673 __ mulsd(input, double_scratch);
668 __ mulsd(result, input); 674 __ mulsd(result, input);
669 __ movq(input, temp1); 675 __k movq(input, temp1);
670 __ mulsd(result, Operand(kScratchRegister, 7 * kDoubleSize)); 676 __ mulsd(result, Operand(kScratchRegister, 7 * kDoubleSize));
671 __ subsd(result, double_scratch); 677 __ subsd(result, double_scratch);
672 __ addsd(result, Operand(kScratchRegister, 8 * kDoubleSize)); 678 __ addsd(result, Operand(kScratchRegister, 8 * kDoubleSize));
673 __ mulsd(result, input); 679 __ mulsd(result, input);
674 680
675 __ bind(&done); 681 __ bind(&done);
676 } 682 }
677 683
684 #undef __k
678 #undef __ 685 #undef __
679 686
680 687
688 #ifndef V8_TARGET_ARCH_X32
681 static const int kNoCodeAgeSequenceLength = 6; 689 static const int kNoCodeAgeSequenceLength = 6;
690 #else
691 static const int kNoCodeAgeSequenceLength = 17;
692 #endif
682 693
683 static byte* GetNoCodeAgeSequence(uint32_t* length) { 694 static byte* GetNoCodeAgeSequence(uint32_t* length) {
684 static bool initialized = false; 695 static bool initialized = false;
685 static byte sequence[kNoCodeAgeSequenceLength]; 696 static byte sequence[kNoCodeAgeSequenceLength];
686 *length = kNoCodeAgeSequenceLength; 697 *length = kNoCodeAgeSequenceLength;
687 if (!initialized) { 698 if (!initialized) {
688 // The sequence of instructions that is patched out for aging code is the 699 // The sequence of instructions that is patched out for aging code is the
689 // following boilerplate stack-building prologue that is found both in 700 // following boilerplate stack-building prologue that is found both in
690 // FUNCTION and OPTIMIZED_FUNCTION code: 701 // FUNCTION and OPTIMIZED_FUNCTION code:
691 CodePatcher patcher(sequence, kNoCodeAgeSequenceLength); 702 CodePatcher patcher(sequence, kNoCodeAgeSequenceLength);
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
740 i++) { 751 i++) {
741 patcher.masm()->nop(); 752 patcher.masm()->nop();
742 } 753 }
743 } 754 }
744 } 755 }
745 756
746 757
747 } } // namespace v8::internal 758 } } // namespace v8::internal
748 759
749 #endif // V8_TARGET_ARCH_X64 760 #endif // V8_TARGET_ARCH_X64
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698