OLD | NEW |
---|---|
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
81 // 0 is never a valid thread id on Solaris since the main thread is 1 and | 81 // 0 is never a valid thread id on Solaris since the main thread is 1 and |
82 // subsequent have their ids incremented from there | 82 // subsequent have their ids incremented from there |
83 static const pthread_t kNoThread = (pthread_t) 0; | 83 static const pthread_t kNoThread = (pthread_t) 0; |
84 | 84 |
85 | 85 |
86 double ceiling(double x) { | 86 double ceiling(double x) { |
87 return ceil(x); | 87 return ceil(x); |
88 } | 88 } |
89 | 89 |
90 | 90 |
91 static Mutex* limit_mutex = NULL; | |
91 void OS::Setup() { | 92 void OS::Setup() { |
92 // Seed the random number generator. | 93 // Seed the random number generator. |
93 // Convert the current time to a 64-bit integer first, before converting it | 94 // Convert the current time to a 64-bit integer first, before converting it |
94 // to an unsigned. Going directly will cause an overflow and the seed to be | 95 // to an unsigned. Going directly will cause an overflow and the seed to be |
95 // set to all ones. The seed will be identical for different instances that | 96 // set to all ones. The seed will be identical for different instances that |
96 // call this setup code within the same millisecond. | 97 // call this setup code within the same millisecond. |
97 uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis()); | 98 uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis()); |
98 srandom(static_cast<unsigned int>(seed)); | 99 srandom(static_cast<unsigned int>(seed)); |
100 limit_mutex = CreateMutex(); | |
99 } | 101 } |
100 | 102 |
101 | 103 |
102 uint64_t OS::CpuFeaturesImpliedByPlatform() { | 104 uint64_t OS::CpuFeaturesImpliedByPlatform() { |
103 return 0; // Solaris runs on a lot of things. | 105 return 0; // Solaris runs on a lot of things. |
104 } | 106 } |
105 | 107 |
106 | 108 |
107 int OS::ActivationFrameAlignment() { | 109 int OS::ActivationFrameAlignment() { |
108 // GCC generates code that requires 16 byte alignment such as movdqa. | 110 // GCC generates code that requires 16 byte alignment such as movdqa. |
(...skipping 29 matching lines...) Expand all Loading... | |
138 // We keep the lowest and highest addresses mapped as a quick way of | 140 // We keep the lowest and highest addresses mapped as a quick way of |
139 // determining that pointers are outside the heap (used mostly in assertions | 141 // determining that pointers are outside the heap (used mostly in assertions |
140 // and verification). The estimate is conservative, ie, not all addresses in | 142 // and verification). The estimate is conservative, ie, not all addresses in |
141 // 'allocated' space are actually allocated to our heap. The range is | 143 // 'allocated' space are actually allocated to our heap. The range is |
142 // [lowest, highest), inclusive on the low and and exclusive on the high end. | 144 // [lowest, highest), inclusive on the low and and exclusive on the high end. |
143 static void* lowest_ever_allocated = reinterpret_cast<void*>(-1); | 145 static void* lowest_ever_allocated = reinterpret_cast<void*>(-1); |
144 static void* highest_ever_allocated = reinterpret_cast<void*>(0); | 146 static void* highest_ever_allocated = reinterpret_cast<void*>(0); |
145 | 147 |
146 | 148 |
147 static void UpdateAllocatedSpaceLimits(void* address, int size) { | 149 static void UpdateAllocatedSpaceLimits(void* address, int size) { |
150 ASSERT(limit_mutex != NULL); | |
151 ScopedLock lock(limit_mutex); | |
152 | |
148 lowest_ever_allocated = Min(lowest_ever_allocated, address); | 153 lowest_ever_allocated = Min(lowest_ever_allocated, address); |
149 highest_ever_allocated = | 154 highest_ever_allocated = |
150 Max(highest_ever_allocated, | 155 Max(highest_ever_allocated, |
151 reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size)); | 156 reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size)); |
152 } | 157 } |
153 | 158 |
154 | 159 |
155 bool OS::IsOutsideAllocatedSpace(void* address) { | 160 bool OS::IsOutsideAllocatedSpace(void* address) { |
156 return address < lowest_ever_allocated || address >= highest_ever_allocated; | 161 return address < lowest_ever_allocated || address >= highest_ever_allocated; |
157 } | 162 } |
(...skipping 242 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
400 } | 405 } |
401 | 406 |
402 | 407 |
403 static void* ThreadEntry(void* arg) { | 408 static void* ThreadEntry(void* arg) { |
404 Thread* thread = reinterpret_cast<Thread*>(arg); | 409 Thread* thread = reinterpret_cast<Thread*>(arg); |
405 // This is also initialized by the first argument to pthread_create() but we | 410 // This is also initialized by the first argument to pthread_create() but we |
406 // don't know which thread will run first (the original thread or the new | 411 // don't know which thread will run first (the original thread or the new |
407 // one) so we initialize it here too. | 412 // one) so we initialize it here too. |
408 thread->data()->thread_ = pthread_self(); | 413 thread->data()->thread_ = pthread_self(); |
409 ASSERT(thread->data()->thread_ != kNoThread); | 414 ASSERT(thread->data()->thread_ != kNoThread); |
410 Thread::SetThreadLocal(Isolate::isolate_key(), thread->isolate()); | |
411 thread->Run(); | 415 thread->Run(); |
412 return NULL; | 416 return NULL; |
413 } | 417 } |
414 | 418 |
415 | 419 |
416 void Thread::set_name(const char* name) { | 420 void Thread::set_name(const char* name) { |
417 strncpy(name_, name, sizeof(name_)); | 421 strncpy(name_, name, sizeof(name_)); |
418 name_[sizeof(name_) - 1] = '\0'; | 422 name_[sizeof(name_) - 1] = '\0'; |
419 } | 423 } |
420 | 424 |
(...skipping 159 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
580 } | 584 } |
581 | 585 |
582 | 586 |
583 Semaphore* OS::CreateSemaphore(int count) { | 587 Semaphore* OS::CreateSemaphore(int count) { |
584 return new SolarisSemaphore(count); | 588 return new SolarisSemaphore(count); |
585 } | 589 } |
586 | 590 |
587 | 591 |
588 #ifdef ENABLE_LOGGING_AND_PROFILING | 592 #ifdef ENABLE_LOGGING_AND_PROFILING |
589 | 593 |
590 static Sampler* active_sampler_ = NULL; | |
591 static pthread_t vm_tid_ = 0; | |
592 | |
593 | |
594 static pthread_t GetThreadID() { | 594 static pthread_t GetThreadID() { |
595 return pthread_self(); | 595 return pthread_self(); |
596 } | 596 } |
597 | 597 |
598 class Sampler::PlatformData : public Malloced { | |
599 public: | |
600 PlatformData() : vm_tid_(GetThreadID()) {} | |
601 | |
602 pthread_t vm_tid() const { return vm_tid_; } | |
603 | |
604 private: | |
605 pthread_t vm_tid_; | |
606 }; | |
607 | |
598 | 608 |
599 static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) { | 609 static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) { |
600 USE(info); | 610 USE(info); |
601 if (signal != SIGPROF) return; | 611 if (signal != SIGPROF) return; |
602 if (active_sampler_ == NULL || !active_sampler_->IsActive()) return; | 612 Isolate* isolate = Isolate::UncheckedCurrent(); |
603 if (vm_tid_ != GetThreadID()) return; | 613 if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) { |
614 // We require a fully initialized and entered isolate. | |
615 return; | |
616 } | |
617 if (v8::Locker::IsActive() && | |
618 !isolate->thread_manager()->IsLockedByCurrentThread()) { | |
619 return; | |
620 } | |
621 | |
622 Sampler* sampler = isolate->logger()->sampler(); | |
623 if (sampler == NULL || !sampler->IsActive()) return; | |
604 | 624 |
605 TickSample sample_obj; | 625 TickSample sample_obj; |
606 TickSample* sample = CpuProfiler::TickSampleEvent(); | 626 TickSample* sample = CpuProfiler::TickSampleEvent(isolate); |
607 if (sample == NULL) sample = &sample_obj; | 627 if (sample == NULL) sample = &sample_obj; |
608 | 628 |
609 // Extracting the sample from the context is extremely machine dependent. | 629 // Extracting the sample from the context is extremely machine dependent. |
610 ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context); | 630 ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context); |
611 mcontext_t& mcontext = ucontext->uc_mcontext; | 631 mcontext_t& mcontext = ucontext->uc_mcontext; |
612 sample->state = Top::current_vm_state(); | 632 sample->state = isolate->current_vm_state(); |
613 | 633 |
614 sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_PC]); | 634 sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_PC]); |
615 sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_SP]); | 635 sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_SP]); |
616 sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_FP]); | 636 sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_FP]); |
617 | 637 |
618 active_sampler_->SampleStack(sample); | 638 sampler->SampleStack(sample); |
619 active_sampler_->Tick(sample); | 639 sampler->Tick(sample); |
620 } | 640 } |
621 | 641 |
622 | 642 class SignalSender : public Thread { |
623 class Sampler::PlatformData : public Malloced { | |
624 public: | 643 public: |
625 enum SleepInterval { | 644 enum SleepInterval { |
626 FULL_INTERVAL, | 645 HALF_INTERVAL, |
627 HALF_INTERVAL | 646 FULL_INTERVAL |
628 }; | 647 }; |
629 | 648 |
630 explicit PlatformData(Sampler* sampler) | 649 explicit SignalSender(int interval) |
631 : sampler_(sampler), | 650 : Thread("SignalSender"), |
632 signal_handler_installed_(false), | 651 interval_(interval) {} |
633 vm_tgid_(getpid()), | 652 |
634 signal_sender_launched_(false) { | 653 static void AddActiveSampler(Sampler* sampler) { |
654 ScopedLock lock(mutex_); | |
655 SamplerRegistry::AddActiveSampler(sampler); | |
656 if (instance_ == NULL) { | |
657 // Install a signal handler. | |
658 struct sigaction sa; | |
659 sa.sa_sigaction = ProfilerSignalHandler; | |
660 sigemptyset(&sa.sa_mask); | |
661 sa.sa_flags = SA_RESTART | SA_SIGINFO; | |
662 signal_handler_installed_ = | |
663 (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0); | |
664 | |
665 // Start a thread that sends SIGPROF signal to VM threads. | |
666 instance_ = new SignalSender(sampler->interval()); | |
667 instance_->Start(); | |
668 } else { | |
669 ASSERT(instance_->interval_ == sampler->interval()); | |
670 } | |
635 } | 671 } |
636 | 672 |
637 void SignalSender() { | 673 static void RemoveActiveSampler(Sampler* sampler) { |
638 while (sampler_->IsActive()) { | 674 ScopedLock lock(mutex_); |
639 if (rate_limiter_.SuspendIfNecessary()) continue; | 675 SamplerRegistry::RemoveActiveSampler(sampler); |
640 if (sampler_->IsProfiling() && RuntimeProfiler::IsEnabled()) { | 676 if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) { |
641 SendProfilingSignal(); | 677 RuntimeProfiler::WakeUpRuntimeProfilerThreadBeforeShutdown(); |
678 instance_->Join(); | |
679 delete instance_; | |
680 instance_ = NULL; | |
681 | |
682 // Restore the old signal handler. | |
683 if (signal_handler_installed_) { | |
684 sigaction(SIGPROF, &old_signal_handler_, 0); | |
685 signal_handler_installed_ = false; | |
686 } | |
687 } | |
688 } | |
689 | |
690 // Implement Thread::Run(). | |
691 virtual void Run() { | |
Mads Ager (chromium)
2011/06/30 07:20:46
The Run method has not been updated to completely
| |
692 SamplerRegistry::State state; | |
693 while ((state = SamplerRegistry::GetState()) != | |
694 SamplerRegistry::HAS_NO_SAMPLERS) { | |
695 bool cpu_profiling_enabled = | |
696 (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS); | |
697 bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled(); | |
698 // When CPU profiling is enabled both JavaScript and C++ code is | |
699 // profiled. We must not suspend. | |
700 if (!cpu_profiling_enabled) { | |
701 if (rate_limiter_.SuspendIfNecessary()) continue; | |
702 } | |
703 if (cpu_profiling_enabled && runtime_profiler_enabled) { | |
704 if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) { | |
705 return; | |
706 } | |
642 Sleep(HALF_INTERVAL); | 707 Sleep(HALF_INTERVAL); |
643 RuntimeProfiler::NotifyTick(); | 708 if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) { |
709 return; | |
710 } | |
644 Sleep(HALF_INTERVAL); | 711 Sleep(HALF_INTERVAL); |
645 } else { | 712 } else { |
646 if (sampler_->IsProfiling()) SendProfilingSignal(); | 713 if (cpu_profiling_enabled) { |
647 if (RuntimeProfiler::IsEnabled()) RuntimeProfiler::NotifyTick(); | 714 if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, |
715 this)) { | |
716 return; | |
717 } | |
718 } | |
719 if (runtime_profiler_enabled) { | |
720 if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, | |
721 NULL)) { | |
722 return; | |
723 } | |
724 } | |
648 Sleep(FULL_INTERVAL); | 725 Sleep(FULL_INTERVAL); |
649 } | 726 } |
650 } | 727 } |
651 } | 728 } |
652 | 729 |
653 void SendProfilingSignal() { | 730 static void DoCpuProfile(Sampler* sampler, void* raw_sender) { |
731 if (!sampler->IsProfiling()) return; | |
732 SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender); | |
733 sender->SendProfilingSignal(sampler->platform_data()->vm_tid()); | |
734 } | |
735 | |
736 static void DoRuntimeProfile(Sampler* sampler, void* ignored) { | |
737 if (!sampler->isolate()->IsInitialized()) return; | |
738 sampler->isolate()->runtime_profiler()->NotifyTick(); | |
739 } | |
740 | |
741 void SendProfilingSignal(pthread_t tid) { | |
654 if (!signal_handler_installed_) return; | 742 if (!signal_handler_installed_) return; |
655 pthread_kill(vm_tid_, SIGPROF); | 743 pthread_kill(tid, SIGPROF); |
656 } | 744 } |
657 | 745 |
658 void Sleep(SleepInterval full_or_half) { | 746 void Sleep(SleepInterval full_or_half) { |
659 // Convert ms to us and subtract 100 us to compensate delays | 747 // Convert ms to us and subtract 100 us to compensate delays |
660 // occuring during signal delivery. | 748 // occuring during signal delivery. |
661 useconds_t interval = sampler_->interval_ * 1000 - 100; | 749 useconds_t interval = interval_ * 1000 - 100; |
662 if (full_or_half == HALF_INTERVAL) interval /= 2; | 750 if (full_or_half == HALF_INTERVAL) interval /= 2; |
663 int result = usleep(interval); | 751 int result = usleep(interval); |
664 #ifdef DEBUG | 752 #ifdef DEBUG |
665 if (result != 0 && errno != EINTR) { | 753 if (result != 0 && errno != EINTR) { |
666 fprintf(stderr, | 754 fprintf(stderr, |
667 "SignalSender usleep error; interval = %u, errno = %d\n", | 755 "SignalSender usleep error; interval = %u, errno = %d\n", |
668 interval, | 756 interval, |
669 errno); | 757 errno); |
670 ASSERT(result == 0 || errno == EINTR); | 758 ASSERT(result == 0 || errno == EINTR); |
671 } | 759 } |
672 #endif | 760 #endif |
673 USE(result); | 761 USE(result); |
674 } | 762 } |
675 | 763 |
676 Sampler* sampler_; | 764 const int interval_; |
677 bool signal_handler_installed_; | |
678 struct sigaction old_signal_handler_; | |
679 int vm_tgid_; | |
680 bool signal_sender_launched_; | |
681 pthread_t signal_sender_thread_; | |
682 RuntimeProfilerRateLimiter rate_limiter_; | 765 RuntimeProfilerRateLimiter rate_limiter_; |
766 | |
767 // Protects the process wide state below. | |
768 static Mutex* mutex_; | |
769 static SignalSender* instance_; | |
770 static bool signal_handler_installed_; | |
771 static struct sigaction old_signal_handler_; | |
772 | |
773 DISALLOW_COPY_AND_ASSIGN(SignalSender); | |
683 }; | 774 }; |
684 | 775 |
685 | 776 Mutex* SignalSender::mutex_ = OS::CreateMutex(); |
686 static void* SenderEntry(void* arg) { | 777 SignalSender* SignalSender::instance_ = NULL; |
687 Sampler::PlatformData* data = | 778 struct sigaction SignalSender::old_signal_handler_; |
688 reinterpret_cast<Sampler::PlatformData*>(arg); | 779 bool SignalSender::signal_handler_installed_ = false; |
689 data->SignalSender(); | |
690 return 0; | |
691 } | |
692 | 780 |
693 | 781 |
694 Sampler::Sampler(Isolate* isolate, int interval) | 782 Sampler::Sampler(Isolate* isolate, int interval) |
695 : isolate_(isolate), | 783 : isolate_(isolate), |
696 interval_(interval), | 784 interval_(interval), |
697 profiling_(false), | 785 profiling_(false), |
698 active_(false), | 786 active_(false), |
699 samples_taken_(0) { | 787 samples_taken_(0) { |
700 data_ = new PlatformData(this); | 788 data_ = new PlatformData; |
701 } | 789 } |
702 | 790 |
703 | 791 |
704 Sampler::~Sampler() { | 792 Sampler::~Sampler() { |
705 ASSERT(!data_->signal_sender_launched_); | 793 ASSERT(!IsActive()); |
706 delete data_; | 794 delete data_; |
707 } | 795 } |
708 | 796 |
709 | 797 |
710 void Sampler::Start() { | 798 void Sampler::Start() { |
711 // There can only be one active sampler at the time on POSIX | |
712 // platforms. | |
713 ASSERT(!IsActive()); | 799 ASSERT(!IsActive()); |
714 vm_tid_ = GetThreadID(); | |
715 | |
716 // Request profiling signals. | |
717 struct sigaction sa; | |
718 sa.sa_sigaction = ProfilerSignalHandler; | |
719 sigemptyset(&sa.sa_mask); | |
720 sa.sa_flags = SA_RESTART | SA_SIGINFO; | |
721 data_->signal_handler_installed_ = | |
722 sigaction(SIGPROF, &sa, &data_->old_signal_handler_) == 0; | |
723 | |
724 // Start a thread that sends SIGPROF signal to VM thread. | |
725 // Sending the signal ourselves instead of relying on itimer provides | |
726 // much better accuracy. | |
727 SetActive(true); | 800 SetActive(true); |
728 if (pthread_create( | 801 SignalSender::AddActiveSampler(this); |
729 &data_->signal_sender_thread_, NULL, SenderEntry, data_) == 0) { | |
730 data_->signal_sender_launched_ = true; | |
731 } | |
732 | |
733 // Set this sampler as the active sampler. | |
734 active_sampler_ = this; | |
735 } | 802 } |
736 | 803 |
737 | 804 |
738 void Sampler::Stop() { | 805 void Sampler::Stop() { |
806 ASSERT(IsActive()); | |
807 SignalSender::RemoveActiveSampler(this); | |
739 SetActive(false); | 808 SetActive(false); |
740 | |
741 // Wait for signal sender termination (it will exit after setting | |
742 // active_ to false). | |
743 if (data_->signal_sender_launched_) { | |
744 Top::WakeUpRuntimeProfilerThreadBeforeShutdown(); | |
745 pthread_join(data_->signal_sender_thread_, NULL); | |
746 data_->signal_sender_launched_ = false; | |
747 } | |
748 | |
749 // Restore old signal handler | |
750 if (data_->signal_handler_installed_) { | |
751 sigaction(SIGPROF, &data_->old_signal_handler_, 0); | |
752 data_->signal_handler_installed_ = false; | |
753 } | |
754 | |
755 // This sampler is no longer the active sampler. | |
756 active_sampler_ = NULL; | |
757 } | 809 } |
758 | 810 |
759 #endif // ENABLE_LOGGING_AND_PROFILING | 811 #endif // ENABLE_LOGGING_AND_PROFILING |
760 | 812 |
761 } } // namespace v8::internal | 813 } } // namespace v8::internal |
OLD | NEW |