Index: src/cpu-profiler.cc |
diff --git a/src/cpu-profiler.cc b/src/cpu-profiler.cc |
index 3cbac77858d5560feb3cab420f08c9e461f2422e..02d41a1fb616e2b06b20ad8d0a965f8ab21f5159 100644 |
--- a/src/cpu-profiler.cc |
+++ b/src/cpu-profiler.cc |
@@ -45,10 +45,11 @@ static const int kTickSamplesBufferChunksCount = 16; |
static const int kProfilerStackSize = 64 * KB; |
-ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator) |
- : Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)), |
+ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator, Sampler* sampler, int interval) |
+ : CpuProfilerThread(sampler), |
generator_(generator), |
running_(true), |
+ interval_(interval), |
ticks_buffer_(sizeof(TickSampleEventRecord), |
kTickSamplesBufferChunkSize, |
kTickSamplesBufferChunksCount), |
@@ -184,7 +185,7 @@ void ProfilerEventsProcessor::AddCurrentStack() { |
} |
-bool ProfilerEventsProcessor::ProcessCodeEvent(unsigned* dequeue_order) { |
+bool ProfilerEventsProcessor::ProcessCodeEvent(unsigned& dequeue_order) { |
if (!events_buffer_.IsEmpty()) { |
CodeEventsContainer record; |
events_buffer_.Dequeue(&record); |
@@ -199,15 +200,15 @@ bool ProfilerEventsProcessor::ProcessCodeEvent(unsigned* dequeue_order) { |
#undef PROFILER_TYPE_CASE |
default: return true; // Skip record. |
} |
- *dequeue_order = record.generic.order; |
+ dequeue_order = record.generic.order; |
return true; |
} |
return false; |
} |
-bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order) { |
- while (true) { |
+bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order, int64_t start, int64_t time_limit) { |
+ while (time_limit == -1 || OS::Ticks() - start < time_limit) { |
if (!ticks_from_vm_buffer_.IsEmpty() |
&& ticks_from_vm_buffer_.Peek()->order == dequeue_order) { |
TickSampleEventRecord record; |
@@ -236,26 +237,36 @@ bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order) { |
return true; |
} |
} |
+ return false; |
} |
-void ProfilerEventsProcessor::Run() { |
- unsigned dequeue_order = 0; |
- |
- while (running_) { |
+void ProfilerEventsProcessor::ProcessEventsQueue(unsigned& dequeue_order, int64_t time_limit) { |
+ int64_t start = OS::Ticks(); |
+ while (OS::Ticks() - start < time_limit) |
caseq
2012/08/16 14:39:22
I would just use absolute time limit, so that we c
|
// Process ticks until we have any. |
- if (ProcessTicks(dequeue_order)) { |
+ if (ProcessTicks(dequeue_order, start, time_limit)) { |
// All ticks of the current dequeue_order are processed, |
// proceed to the next code event. |
- ProcessCodeEvent(&dequeue_order); |
+ ProcessCodeEvent(dequeue_order); |
} |
YieldCPU(); |
caseq
2012/08/16 14:39:22
So we're running the loop with 100% usage until it
|
+} |
+ |
+ |
+void ProfilerEventsProcessor::Run() { |
+ unsigned dequeue_order = 0; |
+ |
+ while (running_) { |
+ int64_t start = OS::Ticks(); |
+ DoCpuProfile(); |
+ ProcessEventsQueue(dequeue_order, static_cast<int64_t>(interval_) - (OS::Ticks() - start)); |
} |
// Process remaining tick events. |
ticks_buffer_.FlushResidualRecords(); |
// Perform processing until we have tick events, skip remaining code events. |
- while (ProcessTicks(dequeue_order) && ProcessCodeEvent(&dequeue_order)) { } |
+ while (ProcessTicks(dequeue_order, -1, -1) && ProcessCodeEvent(dequeue_order)) { } |
} |
@@ -486,11 +497,18 @@ void CpuProfiler::StartProcessorIfNotStarted() { |
if (processor_ == NULL) { |
Isolate* isolate = Isolate::Current(); |
+ // Enable stack sampling. |
+ Sampler* sampler = reinterpret_cast<Sampler*>(isolate->logger()->ticker_); |
+ if (!sampler->IsActive()) { |
+ sampler->Start(); |
+ need_to_stop_sampler_ = true; |
+ } |
+ sampler->IncreaseProfilingDepth(); |
// Disable logging when using the new implementation. |
saved_logging_nesting_ = isolate->logger()->logging_nesting_; |
isolate->logger()->logging_nesting_ = 0; |
generator_ = new ProfileGenerator(profiles_); |
- processor_ = new ProfilerEventsProcessor(generator_); |
+ processor_ = new ProfilerEventsProcessor(generator_, sampler, sampler->interval() * 1000); |
NoBarrier_Store(&is_profiling_, true); |
processor_->Start(); |
// Enumerate stuff we already have in the heap. |
@@ -504,13 +522,6 @@ void CpuProfiler::StartProcessorIfNotStarted() { |
isolate->logger()->LogCompiledFunctions(); |
isolate->logger()->LogAccessorCallbacks(); |
} |
- // Enable stack sampling. |
- Sampler* sampler = reinterpret_cast<Sampler*>(isolate->logger()->ticker_); |
- if (!sampler->IsActive()) { |
- sampler->Start(); |
- need_to_stop_sampler_ = true; |
- } |
- sampler->IncreaseProfilingDepth(); |
} |
} |