Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(100)

Unified Diff: src/cpu-profiler.cc

Issue 10857035: Moving cpu profiling into its own thread. (Closed) Base URL: http://git.chromium.org/external/v8.git@master
Patch Set: added a flag to switch between old and new versions Created 8 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: src/cpu-profiler.cc
diff --git a/src/cpu-profiler.cc b/src/cpu-profiler.cc
index 3cbac77858d5560feb3cab420f08c9e461f2422e..467bb131dd15317a31cf09a8cf274be68ffc64df 100644
--- a/src/cpu-profiler.cc
+++ b/src/cpu-profiler.cc
@@ -45,10 +45,11 @@ static const int kTickSamplesBufferChunksCount = 16;
static const int kProfilerStackSize = 64 * KB;
-ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator)
- : Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)),
+ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator, Sampler* sampler, int interval)
+ : CpuProfilerThread(sampler),
generator_(generator),
running_(true),
+ interval_(interval),
ticks_buffer_(sizeof(TickSampleEventRecord),
kTickSamplesBufferChunkSize,
kTickSamplesBufferChunksCount),
@@ -184,7 +185,7 @@ void ProfilerEventsProcessor::AddCurrentStack() {
}
-bool ProfilerEventsProcessor::ProcessCodeEvent(unsigned* dequeue_order) {
+bool ProfilerEventsProcessor::ProcessCodeEvent(unsigned& dequeue_order) {
caseq 2012/08/17 12:56:35 Output parameters should be pointers, not referenc
if (!events_buffer_.IsEmpty()) {
CodeEventsContainer record;
events_buffer_.Dequeue(&record);
@@ -199,15 +200,15 @@ bool ProfilerEventsProcessor::ProcessCodeEvent(unsigned* dequeue_order) {
#undef PROFILER_TYPE_CASE
default: return true; // Skip record.
}
- *dequeue_order = record.generic.order;
+ dequeue_order = record.generic.order;
return true;
}
return false;
}
-bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order) {
- while (true) {
+bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order, int64_t start, int64_t time_limit) {
caseq 2012/08/17 12:56:35 just pass absolute time limit instead of start and
+ while (time_limit == -1 || OS::Ticks() - start < time_limit) {
if (!ticks_from_vm_buffer_.IsEmpty()
&& ticks_from_vm_buffer_.Peek()->order == dequeue_order) {
TickSampleEventRecord record;
@@ -236,6 +237,18 @@ bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order) {
return true;
}
}
+ return false;
+}
+
+
+void ProfilerEventsProcessor::ProcessEventsQueue(unsigned& dequeue_order, int64_t start, int64_t time_limit) {
caseq 2012/08/17 12:56:35 ditto
+ while (OS::Ticks() - start < time_limit)
+ // Process ticks until we have any.
+ if (ProcessTicks(dequeue_order, start, time_limit)) {
+ // All ticks of the current dequeue_order are processed,
+ // proceed to the next code event.
+ ProcessCodeEvent(dequeue_order);
+ }
}
@@ -243,19 +256,25 @@ void ProfilerEventsProcessor::Run() {
unsigned dequeue_order = 0;
while (running_) {
- // Process ticks until we have any.
- if (ProcessTicks(dequeue_order)) {
- // All ticks of the current dequeue_order are processed,
- // proceed to the next code event.
- ProcessCodeEvent(&dequeue_order);
+ if (FLAG_sample_stack_in_postprocessor_thread) {
+ int64_t start = OS::Ticks();
+ DoSample();
+ ProcessEventsQueue(dequeue_order, start, static_cast<int64_t>(interval_));
+ } else {
+ // Process ticks until we have any.
+ if (ProcessTicks(dequeue_order, -1, -1)) {
+ // All ticks of the current dequeue_order are processed,
+ // proceed to the next code event.
+ ProcessCodeEvent(dequeue_order);
+ }
+ YieldCPU();
}
- YieldCPU();
}
// Process remaining tick events.
ticks_buffer_.FlushResidualRecords();
// Perform processing until we have tick events, skip remaining code events.
- while (ProcessTicks(dequeue_order) && ProcessCodeEvent(&dequeue_order)) { }
+ while (ProcessTicks(dequeue_order, -1, -1) && ProcessCodeEvent(dequeue_order)) { }
}
@@ -486,11 +505,18 @@ void CpuProfiler::StartProcessorIfNotStarted() {
if (processor_ == NULL) {
Isolate* isolate = Isolate::Current();
+ // Enable stack sampling.
+ Sampler* sampler = isolate->logger()->sampler();
+ if (!sampler->IsActive()) {
+ sampler->Start();
+ need_to_stop_sampler_ = true;
+ }
+ sampler->IncreaseProfilingDepth();
// Disable logging when using the new implementation.
saved_logging_nesting_ = isolate->logger()->logging_nesting_;
isolate->logger()->logging_nesting_ = 0;
generator_ = new ProfileGenerator(profiles_);
- processor_ = new ProfilerEventsProcessor(generator_);
+ processor_ = new ProfilerEventsProcessor(generator_, sampler, sampler->interval() * 1000);
caseq 2012/08/17 12:56:35 Do we have to pass interval explicitly?
NoBarrier_Store(&is_profiling_, true);
processor_->Start();
// Enumerate stuff we already have in the heap.
@@ -504,13 +530,6 @@ void CpuProfiler::StartProcessorIfNotStarted() {
isolate->logger()->LogCompiledFunctions();
isolate->logger()->LogAccessorCallbacks();
}
- // Enable stack sampling.
- Sampler* sampler = reinterpret_cast<Sampler*>(isolate->logger()->ticker_);
- if (!sampler->IsActive()) {
- sampler->Start();
- need_to_stop_sampler_ = true;
- }
- sampler->IncreaseProfilingDepth();
}
}
« no previous file with comments | « src/cpu-profiler.h ('k') | src/flag-definitions.h » ('j') | src/platform-linux.cc » ('J')

Powered by Google App Engine
This is Rietveld 408576698