Chromium Code Reviews| Index: src/cpu-profiler.cc |
| diff --git a/src/cpu-profiler.cc b/src/cpu-profiler.cc |
| index b3800f58771af4383e97e2ceefd15afc67856af7..fdfe438ec7bb0abe817c55a62a0e318d43515c85 100644 |
| --- a/src/cpu-profiler.cc |
| +++ b/src/cpu-profiler.cc |
| @@ -52,18 +52,18 @@ ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator) |
| ticks_buffer_(sizeof(TickSampleEventRecord), |
| kTickSamplesBufferChunkSize, |
| kTickSamplesBufferChunksCount), |
| - enqueue_order_(0) { |
| + last_recorded_code_event_id_(0), last_processed_code_event_id_(0) { |
| } |
| void ProfilerEventsProcessor::Enqueue(const CodeEventsContainer& event) { |
| - event.generic.order = ++enqueue_order_; |
| + event.generic.order = ++last_recorded_code_event_id_; |
| events_buffer_.Enqueue(event); |
| } |
| void ProfilerEventsProcessor::AddCurrentStack(Isolate* isolate) { |
| - TickSampleEventRecord record(enqueue_order_); |
| + TickSampleEventRecord record(last_recorded_code_event_id_); |
| TickSample* sample = &record.sample; |
| sample->state = isolate->current_vm_state(); |
| sample->pc = reinterpret_cast<Address>(sample); // Not NULL. |
| @@ -76,7 +76,21 @@ void ProfilerEventsProcessor::AddCurrentStack(Isolate* isolate) { |
| } |
| -bool ProfilerEventsProcessor::ProcessCodeEvent(unsigned* dequeue_order) { |
| +void ProfilerEventsProcessor::Start() { |
| + last_processed_code_event_id_ = 0; |
| + while (ProcessCodeEvent()) { } |
|
yurys
2013/07/05 13:10:00
Why do we have to do this processing on the main t
loislo
2013/07/05 13:15:37
reverted
|
| + StartSynchronously(); |
| +} |
| + |
| + |
| +void ProfilerEventsProcessor::Stop() { |
|
yurys
2013/07/05 13:10:00
StopAndJoin or StopSynchronously.
loislo
2013/07/05 13:15:37
Done.
|
| + if (!running_) return; |
| + running_ = false; |
| + Join(); |
| +} |
| + |
| + |
| +bool ProfilerEventsProcessor::ProcessCodeEvent() { |
| CodeEventsContainer record; |
| if (events_buffer_.Dequeue(&record)) { |
| switch (record.generic.type) { |
| @@ -90,17 +104,18 @@ bool ProfilerEventsProcessor::ProcessCodeEvent(unsigned* dequeue_order) { |
| #undef PROFILER_TYPE_CASE |
| default: return true; // Skip record. |
| } |
| - *dequeue_order = record.generic.order; |
| + last_processed_code_event_id_ = record.generic.order; |
| return true; |
| } |
| return false; |
| } |
| -bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order) { |
| +bool ProfilerEventsProcessor::ProcessTicks() { |
| while (true) { |
| if (!ticks_from_vm_buffer_.IsEmpty() |
| - && ticks_from_vm_buffer_.Peek()->order == dequeue_order) { |
| + && ticks_from_vm_buffer_.Peek()->order == |
| + last_processed_code_event_id_) { |
| TickSampleEventRecord record; |
| ticks_from_vm_buffer_.Dequeue(&record); |
| generator_->RecordTickSample(record.sample); |
| @@ -115,38 +130,35 @@ bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order) { |
| // will get far behind, a record may be modified right under its |
| // feet. |
| TickSampleEventRecord record = *rec; |
| - if (record.order == dequeue_order) { |
| - // A paranoid check to make sure that we don't get a memory overrun |
| - // in case of frames_count having a wild value. |
| - if (record.sample.frames_count < 0 |
| - || record.sample.frames_count > TickSample::kMaxFramesCount) |
| - record.sample.frames_count = 0; |
| - generator_->RecordTickSample(record.sample); |
| - ticks_buffer_.FinishDequeue(); |
| - } else { |
| - return true; |
| - } |
| + if (record.order != last_processed_code_event_id_) return true; |
| + |
| + // A paranoid check to make sure that we don't get a memory overrun |
| + // in case of frames_count having a wild value. |
| + if (record.sample.frames_count < 0 |
| + || record.sample.frames_count > TickSample::kMaxFramesCount) |
| + record.sample.frames_count = 0; |
| + generator_->RecordTickSample(record.sample); |
| + ticks_buffer_.FinishDequeue(); |
| } |
| } |
| void ProfilerEventsProcessor::Run() { |
| - unsigned dequeue_order = 0; |
| - |
| while (running_) { |
| // Process ticks until we have any. |
| - if (ProcessTicks(dequeue_order)) { |
| - // All ticks of the current dequeue_order are processed, |
| + if (ProcessTicks()) { |
| + // All ticks of the current last_processed_code_event_id_ are processed, |
| // proceed to the next code event. |
| - ProcessCodeEvent(&dequeue_order); |
| + ProcessCodeEvent(); |
| } |
| YieldCPU(); |
| } |
| // Process remaining tick events. |
| ticks_buffer_.FlushResidualRecords(); |
| - // Perform processing until we have tick events, skip remaining code events. |
| - while (ProcessTicks(dequeue_order) && ProcessCodeEvent(&dequeue_order)) { } |
| + do { |
| + ProcessTicks(); |
| + } while (ProcessCodeEvent()); |
| } |
| @@ -445,7 +457,6 @@ void CpuProfiler::StartProcessorIfNotStarted() { |
| generator_ = new ProfileGenerator(profiles_); |
| processor_ = new ProfilerEventsProcessor(generator_); |
| is_profiling_ = true; |
| - processor_->StartSynchronously(); |
| // Enumerate stuff we already have in the heap. |
| ASSERT(isolate_->heap()->HasBeenSetUp()); |
| if (!FLAG_prof_browser_mode) { |
| @@ -454,6 +465,7 @@ void CpuProfiler::StartProcessorIfNotStarted() { |
| logger->LogCompiledFunctions(); |
| logger->LogAccessorCallbacks(); |
| LogBuiltins(); |
| + processor_->Start(); |
|
loislo
2013/07/05 13:15:37
reverted
|
| // Enable stack sampling. |
| Sampler* sampler = logger->sampler(); |
| sampler->IncreaseProfilingDepth(); |
| @@ -505,7 +517,6 @@ void CpuProfiler::StopProcessor() { |
| } |
| is_profiling_ = false; |
| processor_->Stop(); |
| - processor_->Join(); |
| delete processor_; |
| delete generator_; |
| processor_ = NULL; |