Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 27 matching lines...) Expand all Loading... | |
| 38 | 38 |
| 39 namespace v8 { | 39 namespace v8 { |
| 40 namespace internal { | 40 namespace internal { |
| 41 | 41 |
| 42 static const int kEventsBufferSize = 256 * KB; | 42 static const int kEventsBufferSize = 256 * KB; |
| 43 static const int kTickSamplesBufferChunkSize = 64 * KB; | 43 static const int kTickSamplesBufferChunkSize = 64 * KB; |
| 44 static const int kTickSamplesBufferChunksCount = 16; | 44 static const int kTickSamplesBufferChunksCount = 16; |
| 45 static const int kProfilerStackSize = 64 * KB; | 45 static const int kProfilerStackSize = 64 * KB; |
| 46 | 46 |
| 47 | 47 |
| 48 ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator) | 48 ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator, Sa mpler* sampler, int interval_in_useconds) |
|
Jakob Kummerow
2012/09/04 11:10:51
long line
| |
| 49 : Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)), | 49 : Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)), |
| 50 generator_(generator), | 50 generator_(generator), |
| 51 sampler_(sampler), | |
| 51 running_(true), | 52 running_(true), |
| 53 interval_in_useconds_(interval_in_useconds), | |
| 52 ticks_buffer_(sizeof(TickSampleEventRecord), | 54 ticks_buffer_(sizeof(TickSampleEventRecord), |
| 53 kTickSamplesBufferChunkSize, | 55 kTickSamplesBufferChunkSize, |
| 54 kTickSamplesBufferChunksCount), | 56 kTickSamplesBufferChunksCount), |
| 55 enqueue_order_(0) { | 57 enqueue_order_(0) { |
| 56 } | 58 } |
| 57 | 59 |
| 58 | 60 |
| 59 void ProfilerEventsProcessor::CallbackCreateEvent(Logger::LogEventsAndTags tag, | 61 void ProfilerEventsProcessor::CallbackCreateEvent(Logger::LogEventsAndTags tag, |
| 60 const char* prefix, | 62 const char* prefix, |
| 61 String* name, | 63 String* name, |
| (...skipping 137 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 199 #undef PROFILER_TYPE_CASE | 201 #undef PROFILER_TYPE_CASE |
| 200 default: return true; // Skip record. | 202 default: return true; // Skip record. |
| 201 } | 203 } |
| 202 *dequeue_order = record.generic.order; | 204 *dequeue_order = record.generic.order; |
| 203 return true; | 205 return true; |
| 204 } | 206 } |
| 205 return false; | 207 return false; |
| 206 } | 208 } |
| 207 | 209 |
| 208 | 210 |
| 209 bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order) { | 211 bool ProfilerEventsProcessor::ProcessTicks(int64_t stop_time, unsigned dequeue_o rder) { |
|
Jakob Kummerow
2012/09/04 11:10:51
long line
| |
| 210 while (true) { | 212 while (stop_time == -1 || OS::Ticks() < stop_time) { |
| 211 if (!ticks_from_vm_buffer_.IsEmpty() | 213 if (!ticks_from_vm_buffer_.IsEmpty() |
| 212 && ticks_from_vm_buffer_.Peek()->order == dequeue_order) { | 214 && ticks_from_vm_buffer_.Peek()->order == dequeue_order) { |
| 213 TickSampleEventRecord record; | 215 TickSampleEventRecord record; |
| 214 ticks_from_vm_buffer_.Dequeue(&record); | 216 ticks_from_vm_buffer_.Dequeue(&record); |
| 215 generator_->RecordTickSample(record.sample); | 217 generator_->RecordTickSample(record.sample); |
| 216 } | 218 } |
| 217 | 219 |
| 218 const TickSampleEventRecord* rec = | 220 const TickSampleEventRecord* rec = |
| 219 TickSampleEventRecord::cast(ticks_buffer_.StartDequeue()); | 221 TickSampleEventRecord::cast(ticks_buffer_.StartDequeue()); |
| 220 if (rec == NULL) return !ticks_from_vm_buffer_.IsEmpty(); | 222 if (rec == NULL) return !ticks_from_vm_buffer_.IsEmpty(); |
| 221 // Make a local copy of tick sample record to ensure that it won't | 223 // Make a local copy of tick sample record to ensure that it won't |
| 222 // be modified as we are processing it. This is possible as the | 224 // be modified as we are processing it. This is possible as the |
| 223 // sampler writes w/o any sync to the queue, so if the processor | 225 // sampler writes w/o any sync to the queue, so if the processor |
| 224 // will get far behind, a record may be modified right under its | 226 // will get far behind, a record may be modified right under its |
| 225 // feet. | 227 // feet. |
| 226 TickSampleEventRecord record = *rec; | 228 TickSampleEventRecord record = *rec; |
| 227 if (record.order == dequeue_order) { | 229 if (record.order == dequeue_order) { |
| 228 // A paranoid check to make sure that we don't get a memory overrun | 230 // A paranoid check to make sure that we don't get a memory overrun |
| 229 // in case of frames_count having a wild value. | 231 // in case of frames_count having a wild value. |
| 230 if (record.sample.frames_count < 0 | 232 if (record.sample.frames_count < 0 |
| 231 || record.sample.frames_count > TickSample::kMaxFramesCount) | 233 || record.sample.frames_count > TickSample::kMaxFramesCount) |
| 232 record.sample.frames_count = 0; | 234 record.sample.frames_count = 0; |
| 233 generator_->RecordTickSample(record.sample); | 235 generator_->RecordTickSample(record.sample); |
| 234 ticks_buffer_.FinishDequeue(); | 236 ticks_buffer_.FinishDequeue(); |
| 235 } else { | 237 } else { |
| 236 return true; | 238 return true; |
| 237 } | 239 } |
| 238 } | 240 } |
| 241 return false; | |
| 242 } | |
| 243 | |
| 244 | |
| 245 void ProfilerEventsProcessor::ProcessEventsQueue(int64_t stop_time, unsigned* de queue_order) { | |
|
Jakob Kummerow
2012/09/04 11:10:51
long line
| |
| 246 while (OS::Ticks() < stop_time) { | |
| 247 // Process ticks until we have any. | |
|
Jakob Kummerow
2012/09/04 11:10:51
This comment doesn't make sense. "Process ticks un
| |
| 248 if (ProcessTicks(stop_time, *dequeue_order)) { | |
| 249 // All ticks of the current dequeue_order are processed, | |
| 250 // proceed to the next code event. | |
| 251 ProcessCodeEvent(dequeue_order); | |
| 252 } | |
| 253 } | |
| 239 } | 254 } |
| 240 | 255 |
| 241 | 256 |
| 242 void ProfilerEventsProcessor::Run() { | 257 void ProfilerEventsProcessor::Run() { |
| 243 unsigned dequeue_order = 0; | 258 unsigned dequeue_order = 0; |
| 244 | 259 |
| 245 while (running_) { | 260 while (running_) { |
| 246 // Process ticks until we have any. | 261 int64_t stop_time = OS::Ticks() + interval_in_useconds_; |
| 247 if (ProcessTicks(dequeue_order)) { | 262 sampler_->DoSample(); |
| 248 // All ticks of the current dequeue_order are processed, | 263 ProcessEventsQueue(stop_time, &dequeue_order); |
| 249 // proceed to the next code event. | |
| 250 ProcessCodeEvent(&dequeue_order); | |
| 251 } | |
| 252 YieldCPU(); | |
| 253 } | 264 } |
| 254 | 265 |
| 255 // Process remaining tick events. | 266 // Process remaining tick events. |
| 256 ticks_buffer_.FlushResidualRecords(); | 267 ticks_buffer_.FlushResidualRecords(); |
| 257 // Perform processing until we have tick events, skip remaining code events. | 268 // Perform processing until we have tick events, skip remaining code events. |
| 258 while (ProcessTicks(dequeue_order) && ProcessCodeEvent(&dequeue_order)) { } | 269 while (ProcessTicks(-1, dequeue_order) && ProcessCodeEvent(&dequeue_order)) { } |
| 259 } | 270 } |
| 260 | 271 |
| 261 | 272 |
| 262 void CpuProfiler::StartProfiling(const char* title) { | 273 void CpuProfiler::StartProfiling(const char* title) { |
| 263 ASSERT(Isolate::Current()->cpu_profiler() != NULL); | 274 ASSERT(Isolate::Current()->cpu_profiler() != NULL); |
| 264 Isolate::Current()->cpu_profiler()->StartCollectingProfile(title); | 275 Isolate::Current()->cpu_profiler()->StartCollectingProfile(title); |
| 265 } | 276 } |
| 266 | 277 |
| 267 | 278 |
| 268 void CpuProfiler::StartProfiling(String* title) { | 279 void CpuProfiler::StartProfiling(String* title) { |
| (...skipping 210 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 479 | 490 |
| 480 void CpuProfiler::StartCollectingProfile(String* title) { | 491 void CpuProfiler::StartCollectingProfile(String* title) { |
| 481 StartCollectingProfile(profiles_->GetName(title)); | 492 StartCollectingProfile(profiles_->GetName(title)); |
| 482 } | 493 } |
| 483 | 494 |
| 484 | 495 |
| 485 void CpuProfiler::StartProcessorIfNotStarted() { | 496 void CpuProfiler::StartProcessorIfNotStarted() { |
| 486 if (processor_ == NULL) { | 497 if (processor_ == NULL) { |
| 487 Isolate* isolate = Isolate::Current(); | 498 Isolate* isolate = Isolate::Current(); |
| 488 | 499 |
| 500 Sampler* sampler = isolate->logger()->sampler(); | |
| 489 // Disable logging when using the new implementation. | 501 // Disable logging when using the new implementation. |
| 490 saved_logging_nesting_ = isolate->logger()->logging_nesting_; | 502 saved_logging_nesting_ = isolate->logger()->logging_nesting_; |
| 491 isolate->logger()->logging_nesting_ = 0; | 503 isolate->logger()->logging_nesting_ = 0; |
| 492 generator_ = new ProfileGenerator(profiles_); | 504 generator_ = new ProfileGenerator(profiles_); |
| 493 processor_ = new ProfilerEventsProcessor(generator_); | 505 processor_ = new ProfilerEventsProcessor(generator_, sampler, FLAG_cpu_profi ler_sampling_interval); |
|
Jakob Kummerow
2012/09/04 11:10:51
long line.
| |
| 494 NoBarrier_Store(&is_profiling_, true); | 506 NoBarrier_Store(&is_profiling_, true); |
| 495 processor_->Start(); | |
| 496 // Enumerate stuff we already have in the heap. | 507 // Enumerate stuff we already have in the heap. |
| 497 if (isolate->heap()->HasBeenSetUp()) { | 508 if (isolate->heap()->HasBeenSetUp()) { |
| 498 if (!FLAG_prof_browser_mode) { | 509 if (!FLAG_prof_browser_mode) { |
| 499 bool saved_log_code_flag = FLAG_log_code; | 510 bool saved_log_code_flag = FLAG_log_code; |
| 500 FLAG_log_code = true; | 511 FLAG_log_code = true; |
| 501 isolate->logger()->LogCodeObjects(); | 512 isolate->logger()->LogCodeObjects(); |
| 502 FLAG_log_code = saved_log_code_flag; | 513 FLAG_log_code = saved_log_code_flag; |
| 503 } | 514 } |
| 504 isolate->logger()->LogCompiledFunctions(); | 515 isolate->logger()->LogCompiledFunctions(); |
| 505 isolate->logger()->LogAccessorCallbacks(); | 516 isolate->logger()->LogAccessorCallbacks(); |
| 506 } | 517 } |
| 507 // Enable stack sampling. | 518 // Enable stack sampling. |
| 508 Sampler* sampler = reinterpret_cast<Sampler*>(isolate->logger()->ticker_); | |
| 509 if (!sampler->IsActive()) { | 519 if (!sampler->IsActive()) { |
| 510 sampler->Start(); | 520 sampler->Start(); |
| 511 need_to_stop_sampler_ = true; | 521 need_to_stop_sampler_ = true; |
| 512 } | 522 } |
| 513 sampler->IncreaseProfilingDepth(); | 523 sampler->IncreaseProfilingDepth(); |
| 524 processor_->Start(); | |
| 514 } | 525 } |
| 515 } | 526 } |
| 516 | 527 |
| 517 | 528 |
| 518 CpuProfile* CpuProfiler::StopCollectingProfile(const char* title) { | 529 CpuProfile* CpuProfiler::StopCollectingProfile(const char* title) { |
| 519 const double actual_sampling_rate = generator_->actual_sampling_rate(); | 530 const double actual_sampling_rate = generator_->actual_sampling_rate(); |
| 520 StopProcessorIfLastProfile(title); | 531 StopProcessorIfLastProfile(title); |
| 521 CpuProfile* result = | 532 CpuProfile* result = |
| 522 profiles_->StopProfiling(TokenEnumerator::kNoSecurityToken, | 533 profiles_->StopProfiling(TokenEnumerator::kNoSecurityToken, |
| 523 title, | 534 title, |
| (...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 573 | 584 |
| 574 void CpuProfiler::TearDown() { | 585 void CpuProfiler::TearDown() { |
| 575 Isolate* isolate = Isolate::Current(); | 586 Isolate* isolate = Isolate::Current(); |
| 576 if (isolate->cpu_profiler() != NULL) { | 587 if (isolate->cpu_profiler() != NULL) { |
| 577 delete isolate->cpu_profiler(); | 588 delete isolate->cpu_profiler(); |
| 578 } | 589 } |
| 579 isolate->set_cpu_profiler(NULL); | 590 isolate->set_cpu_profiler(NULL); |
| 580 } | 591 } |
| 581 | 592 |
| 582 } } // namespace v8::internal | 593 } } // namespace v8::internal |
| OLD | NEW |