OLD | NEW |
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2008 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
81 double ceiling(double x) { | 81 double ceiling(double x) { |
82 // Correct Mac OS X Leopard 'ceil' behavior. | 82 // Correct Mac OS X Leopard 'ceil' behavior. |
83 if (-1.0 < x && x < 0.0) { | 83 if (-1.0 < x && x < 0.0) { |
84 return -0.0; | 84 return -0.0; |
85 } else { | 85 } else { |
86 return ceil(x); | 86 return ceil(x); |
87 } | 87 } |
88 } | 88 } |
89 | 89 |
90 | 90 |
| 91 static Mutex* limit_mutex = NULL; |
| 92 |
| 93 |
91 void OS::Setup() { | 94 void OS::Setup() { |
92 // Seed the random number generator. | 95 // Seed the random number generator. |
93 // Convert the current time to a 64-bit integer first, before converting it | 96 // Convert the current time to a 64-bit integer first, before converting it |
94 // to an unsigned. Going directly will cause an overflow and the seed to be | 97 // to an unsigned. Going directly will cause an overflow and the seed to be |
95 // set to all ones. The seed will be identical for different instances that | 98 // set to all ones. The seed will be identical for different instances that |
96 // call this setup code within the same millisecond. | 99 // call this setup code within the same millisecond. |
97 uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis()); | 100 uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis()); |
98 srandom(static_cast<unsigned int>(seed)); | 101 srandom(static_cast<unsigned int>(seed)); |
| 102 limit_mutex = CreateMutex(); |
99 } | 103 } |
100 | 104 |
101 | 105 |
102 // We keep the lowest and highest addresses mapped as a quick way of | 106 // We keep the lowest and highest addresses mapped as a quick way of |
103 // determining that pointers are outside the heap (used mostly in assertions | 107 // determining that pointers are outside the heap (used mostly in assertions |
104 // and verification). The estimate is conservative, ie, not all addresses in | 108 // and verification). The estimate is conservative, ie, not all addresses in |
105 // 'allocated' space are actually allocated to our heap. The range is | 109 // 'allocated' space are actually allocated to our heap. The range is |
106 // [lowest, highest), inclusive on the low and and exclusive on the high end. | 110 // [lowest, highest), inclusive on the low and and exclusive on the high end. |
107 static void* lowest_ever_allocated = reinterpret_cast<void*>(-1); | 111 static void* lowest_ever_allocated = reinterpret_cast<void*>(-1); |
108 static void* highest_ever_allocated = reinterpret_cast<void*>(0); | 112 static void* highest_ever_allocated = reinterpret_cast<void*>(0); |
109 | 113 |
110 | 114 |
111 static void UpdateAllocatedSpaceLimits(void* address, int size) { | 115 static void UpdateAllocatedSpaceLimits(void* address, int size) { |
| 116 ASSERT(limit_mutex != NULL); |
| 117 ScopedLock lock(limit_mutex); |
| 118 |
112 lowest_ever_allocated = Min(lowest_ever_allocated, address); | 119 lowest_ever_allocated = Min(lowest_ever_allocated, address); |
113 highest_ever_allocated = | 120 highest_ever_allocated = |
114 Max(highest_ever_allocated, | 121 Max(highest_ever_allocated, |
115 reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size)); | 122 reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size)); |
116 } | 123 } |
117 | 124 |
118 | 125 |
119 bool OS::IsOutsideAllocatedSpace(void* address) { | 126 bool OS::IsOutsideAllocatedSpace(void* address) { |
120 return address < lowest_ever_allocated || address >= highest_ever_allocated; | 127 return address < lowest_ever_allocated || address >= highest_ever_allocated; |
121 } | 128 } |
(...skipping 14 matching lines...) Expand all Loading... |
136 | 143 |
137 void* OS::Allocate(const size_t requested, | 144 void* OS::Allocate(const size_t requested, |
138 size_t* allocated, | 145 size_t* allocated, |
139 bool is_executable) { | 146 bool is_executable) { |
140 const size_t msize = RoundUp(requested, getpagesize()); | 147 const size_t msize = RoundUp(requested, getpagesize()); |
141 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); | 148 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); |
142 void* mbase = mmap(NULL, msize, prot, | 149 void* mbase = mmap(NULL, msize, prot, |
143 MAP_PRIVATE | MAP_ANON, | 150 MAP_PRIVATE | MAP_ANON, |
144 kMmapFd, kMmapFdOffset); | 151 kMmapFd, kMmapFdOffset); |
145 if (mbase == MAP_FAILED) { | 152 if (mbase == MAP_FAILED) { |
146 LOG(StringEvent("OS::Allocate", "mmap failed")); | 153 LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed")); |
147 return NULL; | 154 return NULL; |
148 } | 155 } |
149 *allocated = msize; | 156 *allocated = msize; |
150 UpdateAllocatedSpaceLimits(mbase, msize); | 157 UpdateAllocatedSpaceLimits(mbase, msize); |
151 return mbase; | 158 return mbase; |
152 } | 159 } |
153 | 160 |
154 | 161 |
155 void OS::Free(void* address, const size_t size) { | 162 void OS::Free(void* address, const size_t size) { |
156 // TODO(1240712): munmap has a return value which is ignored here. | 163 // TODO(1240712): munmap has a return value which is ignored here. |
(...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
251 SEG_TEXT, | 258 SEG_TEXT, |
252 SECT_TEXT, | 259 SECT_TEXT, |
253 &size); | 260 &size); |
254 #else | 261 #else |
255 unsigned int size; | 262 unsigned int size; |
256 char* code_ptr = getsectdatafromheader(header, SEG_TEXT, SECT_TEXT, &size); | 263 char* code_ptr = getsectdatafromheader(header, SEG_TEXT, SECT_TEXT, &size); |
257 #endif | 264 #endif |
258 if (code_ptr == NULL) continue; | 265 if (code_ptr == NULL) continue; |
259 const uintptr_t slide = _dyld_get_image_vmaddr_slide(i); | 266 const uintptr_t slide = _dyld_get_image_vmaddr_slide(i); |
260 const uintptr_t start = reinterpret_cast<uintptr_t>(code_ptr) + slide; | 267 const uintptr_t start = reinterpret_cast<uintptr_t>(code_ptr) + slide; |
261 LOG(SharedLibraryEvent(_dyld_get_image_name(i), start, start + size)); | 268 LOG(Isolate::Current(), |
| 269 SharedLibraryEvent(_dyld_get_image_name(i), start, start + size)); |
262 } | 270 } |
263 #endif // ENABLE_LOGGING_AND_PROFILING | 271 #endif // ENABLE_LOGGING_AND_PROFILING |
264 } | 272 } |
265 | 273 |
266 | 274 |
267 void OS::SignalCodeMovingGC() { | 275 void OS::SignalCodeMovingGC() { |
268 } | 276 } |
269 | 277 |
270 | 278 |
271 uint64_t OS::CpuFeaturesImpliedByPlatform() { | 279 uint64_t OS::CpuFeaturesImpliedByPlatform() { |
(...skipping 145 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
417 bool ThreadHandle::IsSelf() const { | 425 bool ThreadHandle::IsSelf() const { |
418 return pthread_equal(data_->thread_, pthread_self()); | 426 return pthread_equal(data_->thread_, pthread_self()); |
419 } | 427 } |
420 | 428 |
421 | 429 |
422 bool ThreadHandle::IsValid() const { | 430 bool ThreadHandle::IsValid() const { |
423 return data_->thread_ != kNoThread; | 431 return data_->thread_ != kNoThread; |
424 } | 432 } |
425 | 433 |
426 | 434 |
427 Thread::Thread() : ThreadHandle(ThreadHandle::INVALID) { | 435 Thread::Thread(Isolate* isolate) |
| 436 : ThreadHandle(ThreadHandle::INVALID), |
| 437 isolate_(isolate) { |
428 set_name("v8:<unknown>"); | 438 set_name("v8:<unknown>"); |
429 } | 439 } |
430 | 440 |
431 | 441 |
432 Thread::Thread(const char* name) : ThreadHandle(ThreadHandle::INVALID) { | 442 Thread::Thread(Isolate* isolate, const char* name) |
| 443 : ThreadHandle(ThreadHandle::INVALID), |
| 444 isolate_(isolate) { |
433 set_name(name); | 445 set_name(name); |
434 } | 446 } |
435 | 447 |
436 | 448 |
437 Thread::~Thread() { | 449 Thread::~Thread() { |
438 } | 450 } |
439 | 451 |
440 | 452 |
441 | 453 |
442 static void SetThreadName(const char* name) { | 454 static void SetThreadName(const char* name) { |
(...skipping 14 matching lines...) Expand all Loading... |
457 | 469 |
458 | 470 |
459 static void* ThreadEntry(void* arg) { | 471 static void* ThreadEntry(void* arg) { |
460 Thread* thread = reinterpret_cast<Thread*>(arg); | 472 Thread* thread = reinterpret_cast<Thread*>(arg); |
461 // This is also initialized by the first argument to pthread_create() but we | 473 // This is also initialized by the first argument to pthread_create() but we |
462 // don't know which thread will run first (the original thread or the new | 474 // don't know which thread will run first (the original thread or the new |
463 // one) so we initialize it here too. | 475 // one) so we initialize it here too. |
464 thread->thread_handle_data()->thread_ = pthread_self(); | 476 thread->thread_handle_data()->thread_ = pthread_self(); |
465 SetThreadName(thread->name()); | 477 SetThreadName(thread->name()); |
466 ASSERT(thread->IsValid()); | 478 ASSERT(thread->IsValid()); |
| 479 Thread::SetThreadLocal(Isolate::isolate_key(), thread->isolate()); |
467 thread->Run(); | 480 thread->Run(); |
468 return NULL; | 481 return NULL; |
469 } | 482 } |
470 | 483 |
471 | 484 |
472 void Thread::set_name(const char* name) { | 485 void Thread::set_name(const char* name) { |
473 strncpy(name_, name, sizeof(name_)); | 486 strncpy(name_, name, sizeof(name_)); |
474 name_[sizeof(name_) - 1] = '\0'; | 487 name_[sizeof(name_) - 1] = '\0'; |
475 } | 488 } |
476 | 489 |
(...skipping 111 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
588 | 601 |
589 Semaphore* OS::CreateSemaphore(int count) { | 602 Semaphore* OS::CreateSemaphore(int count) { |
590 return new MacOSSemaphore(count); | 603 return new MacOSSemaphore(count); |
591 } | 604 } |
592 | 605 |
593 | 606 |
594 #ifdef ENABLE_LOGGING_AND_PROFILING | 607 #ifdef ENABLE_LOGGING_AND_PROFILING |
595 | 608 |
596 class Sampler::PlatformData : public Malloced { | 609 class Sampler::PlatformData : public Malloced { |
597 public: | 610 public: |
598 explicit PlatformData(Sampler* sampler) | 611 PlatformData() : profiled_thread_(mach_thread_self()) {} |
599 : sampler_(sampler), | 612 |
600 task_self_(mach_task_self()), | 613 ~PlatformData() { |
601 profiled_thread_(0), | 614 // Deallocate Mach port for thread. |
602 sampler_thread_(0) { | 615 mach_port_deallocate(mach_task_self(), profiled_thread_); |
603 } | 616 } |
604 | 617 |
605 Sampler* sampler_; | 618 thread_act_t profiled_thread() { return profiled_thread_; } |
| 619 |
| 620 private: |
606 // Note: for profiled_thread_ Mach primitives are used instead of PThread's | 621 // Note: for profiled_thread_ Mach primitives are used instead of PThread's |
607 // because the latter doesn't provide thread manipulation primitives required. | 622 // because the latter doesn't provide thread manipulation primitives required. |
608 // For details, consult "Mac OS X Internals" book, Section 7.3. | 623 // For details, consult "Mac OS X Internals" book, Section 7.3. |
609 mach_port_t task_self_; | |
610 thread_act_t profiled_thread_; | 624 thread_act_t profiled_thread_; |
611 pthread_t sampler_thread_; | 625 }; |
612 RuntimeProfilerRateLimiter rate_limiter_; | |
613 | 626 |
614 // Sampler thread handler. | 627 class SamplerThread : public Thread { |
615 void Runner() { | 628 public: |
616 while (sampler_->IsActive()) { | 629 explicit SamplerThread(int interval) : Thread(NULL), interval_(interval) {} |
617 if (rate_limiter_.SuspendIfNecessary()) continue; | 630 |
618 Sample(); | 631 static void AddActiveSampler(Sampler* sampler) { |
619 OS::Sleep(sampler_->interval_); | 632 ScopedLock lock(mutex_); |
| 633 SamplerRegistry::AddActiveSampler(sampler); |
| 634 if (instance_ == NULL) { |
| 635 instance_ = new SamplerThread(sampler->interval()); |
| 636 instance_->Start(); |
| 637 } else { |
| 638 ASSERT(instance_->interval_ == sampler->interval()); |
620 } | 639 } |
621 } | 640 } |
622 | 641 |
623 void Sample() { | 642 static void RemoveActiveSampler(Sampler* sampler) { |
624 if (sampler_->IsProfiling()) { | 643 ScopedLock lock(mutex_); |
625 TickSample sample_obj; | 644 SamplerRegistry::RemoveActiveSampler(sampler); |
626 TickSample* sample = CpuProfiler::TickSampleEvent(); | 645 if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) { |
627 if (sample == NULL) sample = &sample_obj; | 646 RuntimeProfiler::WakeUpRuntimeProfilerThreadBeforeShutdown(); |
| 647 instance_->Join(); |
| 648 delete instance_; |
| 649 instance_ = NULL; |
| 650 } |
| 651 } |
628 | 652 |
629 if (KERN_SUCCESS != thread_suspend(profiled_thread_)) return; | 653 // Implement Thread::Run(). |
| 654 virtual void Run() { |
| 655 SamplerRegistry::State state = SamplerRegistry::GetState(); |
| 656 while (state != SamplerRegistry::HAS_NO_SAMPLERS) { |
| 657 bool cpu_profiling_enabled = |
| 658 (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS); |
| 659 bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled(); |
| 660 // When CPU profiling is enabled both JavaScript and C++ code is |
| 661 // profiled. We must not suspend. |
| 662 if (!cpu_profiling_enabled) { |
| 663 if (rate_limiter_.SuspendIfNecessary()) continue; |
| 664 } |
| 665 if (cpu_profiling_enabled) { |
| 666 if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) { |
| 667 return; |
| 668 } |
| 669 } |
| 670 if (runtime_profiler_enabled) { |
| 671 if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) { |
| 672 return; |
| 673 } |
| 674 } |
| 675 OS::Sleep(interval_); |
| 676 state = SamplerRegistry::GetState(); |
| 677 } |
| 678 } |
| 679 |
| 680 static void DoCpuProfile(Sampler* sampler, void* raw_sampler_thread) { |
| 681 if (!sampler->isolate()->IsInitialized()) return; |
| 682 if (!sampler->IsProfiling()) return; |
| 683 SamplerThread* sampler_thread = |
| 684 reinterpret_cast<SamplerThread*>(raw_sampler_thread); |
| 685 sampler_thread->SampleContext(sampler); |
| 686 } |
| 687 |
| 688 static void DoRuntimeProfile(Sampler* sampler, void* ignored) { |
| 689 if (!sampler->isolate()->IsInitialized()) return; |
| 690 sampler->isolate()->runtime_profiler()->NotifyTick(); |
| 691 } |
| 692 |
| 693 void SampleContext(Sampler* sampler) { |
| 694 thread_act_t profiled_thread = sampler->platform_data()->profiled_thread(); |
| 695 TickSample sample_obj; |
| 696 TickSample* sample = CpuProfiler::TickSampleEvent(sampler->isolate()); |
| 697 if (sample == NULL) sample = &sample_obj; |
| 698 |
| 699 if (KERN_SUCCESS != thread_suspend(profiled_thread)) return; |
630 | 700 |
631 #if V8_HOST_ARCH_X64 | 701 #if V8_HOST_ARCH_X64 |
632 thread_state_flavor_t flavor = x86_THREAD_STATE64; | 702 thread_state_flavor_t flavor = x86_THREAD_STATE64; |
633 x86_thread_state64_t state; | 703 x86_thread_state64_t state; |
634 mach_msg_type_number_t count = x86_THREAD_STATE64_COUNT; | 704 mach_msg_type_number_t count = x86_THREAD_STATE64_COUNT; |
635 #if __DARWIN_UNIX03 | 705 #if __DARWIN_UNIX03 |
636 #define REGISTER_FIELD(name) __r ## name | 706 #define REGISTER_FIELD(name) __r ## name |
637 #else | 707 #else |
638 #define REGISTER_FIELD(name) r ## name | 708 #define REGISTER_FIELD(name) r ## name |
639 #endif // __DARWIN_UNIX03 | 709 #endif // __DARWIN_UNIX03 |
640 #elif V8_HOST_ARCH_IA32 | 710 #elif V8_HOST_ARCH_IA32 |
641 thread_state_flavor_t flavor = i386_THREAD_STATE; | 711 thread_state_flavor_t flavor = i386_THREAD_STATE; |
642 i386_thread_state_t state; | 712 i386_thread_state_t state; |
643 mach_msg_type_number_t count = i386_THREAD_STATE_COUNT; | 713 mach_msg_type_number_t count = i386_THREAD_STATE_COUNT; |
644 #if __DARWIN_UNIX03 | 714 #if __DARWIN_UNIX03 |
645 #define REGISTER_FIELD(name) __e ## name | 715 #define REGISTER_FIELD(name) __e ## name |
646 #else | 716 #else |
647 #define REGISTER_FIELD(name) e ## name | 717 #define REGISTER_FIELD(name) e ## name |
648 #endif // __DARWIN_UNIX03 | 718 #endif // __DARWIN_UNIX03 |
649 #else | 719 #else |
650 #error Unsupported Mac OS X host architecture. | 720 #error Unsupported Mac OS X host architecture. |
651 #endif // V8_HOST_ARCH | 721 #endif // V8_HOST_ARCH |
652 | 722 |
653 if (thread_get_state(profiled_thread_, | 723 if (thread_get_state(profiled_thread, |
654 flavor, | 724 flavor, |
655 reinterpret_cast<natural_t*>(&state), | 725 reinterpret_cast<natural_t*>(&state), |
656 &count) == KERN_SUCCESS) { | 726 &count) == KERN_SUCCESS) { |
657 sample->state = Top::current_vm_state(); | 727 sample->state = sampler->isolate()->current_vm_state(); |
658 sample->pc = reinterpret_cast<Address>(state.REGISTER_FIELD(ip)); | 728 sample->pc = reinterpret_cast<Address>(state.REGISTER_FIELD(ip)); |
659 sample->sp = reinterpret_cast<Address>(state.REGISTER_FIELD(sp)); | 729 sample->sp = reinterpret_cast<Address>(state.REGISTER_FIELD(sp)); |
660 sample->fp = reinterpret_cast<Address>(state.REGISTER_FIELD(bp)); | 730 sample->fp = reinterpret_cast<Address>(state.REGISTER_FIELD(bp)); |
661 sampler_->SampleStack(sample); | 731 sampler->SampleStack(sample); |
662 sampler_->Tick(sample); | 732 sampler->Tick(sample); |
663 } | |
664 thread_resume(profiled_thread_); | |
665 } | 733 } |
666 if (RuntimeProfiler::IsEnabled()) RuntimeProfiler::NotifyTick(); | 734 thread_resume(profiled_thread); |
667 } | 735 } |
| 736 |
| 737 const int interval_; |
| 738 RuntimeProfilerRateLimiter rate_limiter_; |
| 739 |
| 740 // Protects the process wide state below. |
| 741 static Mutex* mutex_; |
| 742 static SamplerThread* instance_; |
| 743 |
| 744 DISALLOW_COPY_AND_ASSIGN(SamplerThread); |
668 }; | 745 }; |
669 | 746 |
670 #undef REGISTER_FIELD | 747 #undef REGISTER_FIELD |
671 | 748 |
672 | 749 |
673 // Entry point for sampler thread. | 750 Mutex* SamplerThread::mutex_ = OS::CreateMutex(); |
674 static void* SamplerEntry(void* arg) { | 751 SamplerThread* SamplerThread::instance_ = NULL; |
675 Sampler::PlatformData* data = | |
676 reinterpret_cast<Sampler::PlatformData*>(arg); | |
677 data->Runner(); | |
678 return 0; | |
679 } | |
680 | 752 |
681 | 753 |
682 Sampler::Sampler(int interval) | 754 Sampler::Sampler(Isolate* isolate, int interval) |
683 : interval_(interval), | 755 : isolate_(isolate), |
| 756 interval_(interval), |
684 profiling_(false), | 757 profiling_(false), |
685 active_(false), | 758 active_(false), |
686 samples_taken_(0) { | 759 samples_taken_(0) { |
687 data_ = new PlatformData(this); | 760 data_ = new PlatformData; |
688 } | 761 } |
689 | 762 |
690 | 763 |
691 Sampler::~Sampler() { | 764 Sampler::~Sampler() { |
| 765 ASSERT(!IsActive()); |
692 delete data_; | 766 delete data_; |
693 } | 767 } |
694 | 768 |
695 | 769 |
696 void Sampler::Start() { | 770 void Sampler::Start() { |
697 // Do not start multiple threads for the same sampler. | |
698 ASSERT(!IsActive()); | 771 ASSERT(!IsActive()); |
699 data_->profiled_thread_ = mach_thread_self(); | |
700 | |
701 // Create sampler thread with high priority. | |
702 // According to POSIX spec, when SCHED_FIFO policy is used, a thread | |
703 // runs until it exits or blocks. | |
704 pthread_attr_t sched_attr; | |
705 sched_param fifo_param; | |
706 pthread_attr_init(&sched_attr); | |
707 pthread_attr_setinheritsched(&sched_attr, PTHREAD_EXPLICIT_SCHED); | |
708 pthread_attr_setschedpolicy(&sched_attr, SCHED_FIFO); | |
709 fifo_param.sched_priority = sched_get_priority_max(SCHED_FIFO); | |
710 pthread_attr_setschedparam(&sched_attr, &fifo_param); | |
711 | |
712 SetActive(true); | 772 SetActive(true); |
713 pthread_create(&data_->sampler_thread_, &sched_attr, SamplerEntry, data_); | 773 SamplerThread::AddActiveSampler(this); |
714 } | 774 } |
715 | 775 |
716 | 776 |
717 void Sampler::Stop() { | 777 void Sampler::Stop() { |
718 // Seting active to false triggers termination of the sampler | 778 ASSERT(IsActive()); |
719 // thread. | 779 SamplerThread::RemoveActiveSampler(this); |
720 SetActive(false); | 780 SetActive(false); |
721 | |
722 // Wait for sampler thread to terminate. | |
723 Top::WakeUpRuntimeProfilerThreadBeforeShutdown(); | |
724 pthread_join(data_->sampler_thread_, NULL); | |
725 | |
726 // Deallocate Mach port for thread. | |
727 mach_port_deallocate(data_->task_self_, data_->profiled_thread_); | |
728 } | 781 } |
729 | 782 |
730 #endif // ENABLE_LOGGING_AND_PROFILING | 783 #endif // ENABLE_LOGGING_AND_PROFILING |
731 | 784 |
732 } } // namespace v8::internal | 785 } } // namespace v8::internal |
OLD | NEW |