| Index: src/cpu-profiler.cc
|
| diff --git a/src/cpu-profiler.cc b/src/cpu-profiler.cc
|
| index 3cbac77858d5560feb3cab420f08c9e461f2422e..0187ebff80e9e1dd26008d7b2516e2613d7edf0f 100644
|
| --- a/src/cpu-profiler.cc
|
| +++ b/src/cpu-profiler.cc
|
| @@ -45,10 +45,12 @@ static const int kTickSamplesBufferChunksCount = 16;
|
| static const int kProfilerStackSize = 64 * KB;
|
|
|
|
|
| -ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator)
|
| +ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator, Sampler* sampler, int interval_in_useconds)
|
| : Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)),
|
| generator_(generator),
|
| + sampler_(sampler),
|
| running_(true),
|
| + interval_in_useconds_(interval_in_useconds),
|
| ticks_buffer_(sizeof(TickSampleEventRecord),
|
| kTickSamplesBufferChunkSize,
|
| kTickSamplesBufferChunksCount),
|
| @@ -206,8 +208,8 @@ bool ProfilerEventsProcessor::ProcessCodeEvent(unsigned* dequeue_order) {
|
| }
|
|
|
|
|
| -bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order) {
|
| - while (true) {
|
| +bool ProfilerEventsProcessor::ProcessTicks(int64_t stop_time, unsigned dequeue_order) {
|
| + while (stop_time == -1 || OS::Ticks() < stop_time) {
|
| if (!ticks_from_vm_buffer_.IsEmpty()
|
| && ticks_from_vm_buffer_.Peek()->order == dequeue_order) {
|
| TickSampleEventRecord record;
|
| @@ -236,26 +238,35 @@ bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order) {
|
| return true;
|
| }
|
| }
|
| + return false;
|
| }
|
|
|
|
|
| -void ProfilerEventsProcessor::Run() {
|
| - unsigned dequeue_order = 0;
|
| -
|
| - while (running_) {
|
| +void ProfilerEventsProcessor::ProcessEventsQueue(int64_t stop_time, unsigned* dequeue_order) {
|
| + while (OS::Ticks() < stop_time) {
|
| // Process ticks until we have any.
|
| - if (ProcessTicks(dequeue_order)) {
|
| + if (ProcessTicks(stop_time, *dequeue_order)) {
|
| // All ticks of the current dequeue_order are processed,
|
| // proceed to the next code event.
|
| - ProcessCodeEvent(&dequeue_order);
|
| + ProcessCodeEvent(dequeue_order);
|
| }
|
| - YieldCPU();
|
| + }
|
| +}
|
| +
|
| +
|
| +void ProfilerEventsProcessor::Run() {
|
| + unsigned dequeue_order = 0;
|
| +
|
| + while (running_) {
|
| + int64_t stop_time = OS::Ticks() + interval_in_useconds_;
|
| + sampler_->DoSample();
|
| + ProcessEventsQueue(stop_time, &dequeue_order);
|
| }
|
|
|
| // Process remaining tick events.
|
| ticks_buffer_.FlushResidualRecords();
|
| // Perform processing until we have tick events, skip remaining code events.
|
| - while (ProcessTicks(dequeue_order) && ProcessCodeEvent(&dequeue_order)) { }
|
| + while (ProcessTicks(-1, dequeue_order) && ProcessCodeEvent(&dequeue_order)) { }
|
| }
|
|
|
|
|
| @@ -486,13 +497,13 @@ void CpuProfiler::StartProcessorIfNotStarted() {
|
| if (processor_ == NULL) {
|
| Isolate* isolate = Isolate::Current();
|
|
|
| + Sampler* sampler = isolate->logger()->sampler();
|
| // Disable logging when using the new implementation.
|
| saved_logging_nesting_ = isolate->logger()->logging_nesting_;
|
| isolate->logger()->logging_nesting_ = 0;
|
| generator_ = new ProfileGenerator(profiles_);
|
| - processor_ = new ProfilerEventsProcessor(generator_);
|
| + processor_ = new ProfilerEventsProcessor(generator_, sampler, FLAG_cpu_profiler_sampling_interval);
|
| NoBarrier_Store(&is_profiling_, true);
|
| - processor_->Start();
|
| // Enumerate stuff we already have in the heap.
|
| if (isolate->heap()->HasBeenSetUp()) {
|
| if (!FLAG_prof_browser_mode) {
|
| @@ -505,12 +516,12 @@ void CpuProfiler::StartProcessorIfNotStarted() {
|
| isolate->logger()->LogAccessorCallbacks();
|
| }
|
| // Enable stack sampling.
|
| - Sampler* sampler = reinterpret_cast<Sampler*>(isolate->logger()->ticker_);
|
| if (!sampler->IsActive()) {
|
| sampler->Start();
|
| need_to_stop_sampler_ = true;
|
| }
|
| sampler->IncreaseProfilingDepth();
|
| + processor_->Start();
|
| }
|
| }
|
|
|
|
|