OLD | NEW |
---|---|
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 27 matching lines...) Expand all Loading... | |
38 | 38 |
39 namespace v8 { | 39 namespace v8 { |
40 namespace internal { | 40 namespace internal { |
41 | 41 |
42 static const int kEventsBufferSize = 256 * KB; | 42 static const int kEventsBufferSize = 256 * KB; |
43 static const int kTickSamplesBufferChunkSize = 64 * KB; | 43 static const int kTickSamplesBufferChunkSize = 64 * KB; |
44 static const int kTickSamplesBufferChunksCount = 16; | 44 static const int kTickSamplesBufferChunksCount = 16; |
45 static const int kProfilerStackSize = 64 * KB; | 45 static const int kProfilerStackSize = 64 * KB; |
46 | 46 |
47 | 47 |
48 ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator) | 48 ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator, Sa mpler* sampler, int interval) |
49 : Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)), | 49 : CpuProfilerThread(sampler), |
50 generator_(generator), | 50 generator_(generator), |
51 running_(true), | 51 running_(true), |
52 interval_(interval), | |
52 ticks_buffer_(sizeof(TickSampleEventRecord), | 53 ticks_buffer_(sizeof(TickSampleEventRecord), |
53 kTickSamplesBufferChunkSize, | 54 kTickSamplesBufferChunkSize, |
54 kTickSamplesBufferChunksCount), | 55 kTickSamplesBufferChunksCount), |
55 enqueue_order_(0) { | 56 enqueue_order_(0) { |
56 } | 57 } |
57 | 58 |
58 | 59 |
59 void ProfilerEventsProcessor::CallbackCreateEvent(Logger::LogEventsAndTags tag, | 60 void ProfilerEventsProcessor::CallbackCreateEvent(Logger::LogEventsAndTags tag, |
60 const char* prefix, | 61 const char* prefix, |
61 String* name, | 62 String* name, |
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
177 sample->pc = reinterpret_cast<Address>(sample); // Not NULL. | 178 sample->pc = reinterpret_cast<Address>(sample); // Not NULL. |
178 for (StackTraceFrameIterator it(isolate); | 179 for (StackTraceFrameIterator it(isolate); |
179 !it.done() && sample->frames_count < TickSample::kMaxFramesCount; | 180 !it.done() && sample->frames_count < TickSample::kMaxFramesCount; |
180 it.Advance()) { | 181 it.Advance()) { |
181 sample->stack[sample->frames_count++] = it.frame()->pc(); | 182 sample->stack[sample->frames_count++] = it.frame()->pc(); |
182 } | 183 } |
183 ticks_from_vm_buffer_.Enqueue(record); | 184 ticks_from_vm_buffer_.Enqueue(record); |
184 } | 185 } |
185 | 186 |
186 | 187 |
187 bool ProfilerEventsProcessor::ProcessCodeEvent(unsigned* dequeue_order) { | 188 bool ProfilerEventsProcessor::ProcessCodeEvent(unsigned& dequeue_order) { |
caseq
2012/08/17 12:56:35
Output parameters should be pointers, not referenc
| |
188 if (!events_buffer_.IsEmpty()) { | 189 if (!events_buffer_.IsEmpty()) { |
189 CodeEventsContainer record; | 190 CodeEventsContainer record; |
190 events_buffer_.Dequeue(&record); | 191 events_buffer_.Dequeue(&record); |
191 switch (record.generic.type) { | 192 switch (record.generic.type) { |
192 #define PROFILER_TYPE_CASE(type, clss) \ | 193 #define PROFILER_TYPE_CASE(type, clss) \ |
193 case CodeEventRecord::type: \ | 194 case CodeEventRecord::type: \ |
194 record.clss##_.UpdateCodeMap(generator_->code_map()); \ | 195 record.clss##_.UpdateCodeMap(generator_->code_map()); \ |
195 break; | 196 break; |
196 | 197 |
197 CODE_EVENTS_TYPE_LIST(PROFILER_TYPE_CASE) | 198 CODE_EVENTS_TYPE_LIST(PROFILER_TYPE_CASE) |
198 | 199 |
199 #undef PROFILER_TYPE_CASE | 200 #undef PROFILER_TYPE_CASE |
200 default: return true; // Skip record. | 201 default: return true; // Skip record. |
201 } | 202 } |
202 *dequeue_order = record.generic.order; | 203 dequeue_order = record.generic.order; |
203 return true; | 204 return true; |
204 } | 205 } |
205 return false; | 206 return false; |
206 } | 207 } |
207 | 208 |
208 | 209 |
209 bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order) { | 210 bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order, int64_t start , int64_t time_limit) { |
caseq
2012/08/17 12:56:35
just pass absolute time limit instead of start and
| |
210 while (true) { | 211 while (time_limit == -1 || OS::Ticks() - start < time_limit) { |
211 if (!ticks_from_vm_buffer_.IsEmpty() | 212 if (!ticks_from_vm_buffer_.IsEmpty() |
212 && ticks_from_vm_buffer_.Peek()->order == dequeue_order) { | 213 && ticks_from_vm_buffer_.Peek()->order == dequeue_order) { |
213 TickSampleEventRecord record; | 214 TickSampleEventRecord record; |
214 ticks_from_vm_buffer_.Dequeue(&record); | 215 ticks_from_vm_buffer_.Dequeue(&record); |
215 generator_->RecordTickSample(record.sample); | 216 generator_->RecordTickSample(record.sample); |
216 } | 217 } |
217 | 218 |
218 const TickSampleEventRecord* rec = | 219 const TickSampleEventRecord* rec = |
219 TickSampleEventRecord::cast(ticks_buffer_.StartDequeue()); | 220 TickSampleEventRecord::cast(ticks_buffer_.StartDequeue()); |
220 if (rec == NULL) return !ticks_from_vm_buffer_.IsEmpty(); | 221 if (rec == NULL) return !ticks_from_vm_buffer_.IsEmpty(); |
221 // Make a local copy of tick sample record to ensure that it won't | 222 // Make a local copy of tick sample record to ensure that it won't |
222 // be modified as we are processing it. This is possible as the | 223 // be modified as we are processing it. This is possible as the |
223 // sampler writes w/o any sync to the queue, so if the processor | 224 // sampler writes w/o any sync to the queue, so if the processor |
224 // will get far behind, a record may be modified right under its | 225 // will get far behind, a record may be modified right under its |
225 // feet. | 226 // feet. |
226 TickSampleEventRecord record = *rec; | 227 TickSampleEventRecord record = *rec; |
227 if (record.order == dequeue_order) { | 228 if (record.order == dequeue_order) { |
228 // A paranoid check to make sure that we don't get a memory overrun | 229 // A paranoid check to make sure that we don't get a memory overrun |
229 // in case of frames_count having a wild value. | 230 // in case of frames_count having a wild value. |
230 if (record.sample.frames_count < 0 | 231 if (record.sample.frames_count < 0 |
231 || record.sample.frames_count > TickSample::kMaxFramesCount) | 232 || record.sample.frames_count > TickSample::kMaxFramesCount) |
232 record.sample.frames_count = 0; | 233 record.sample.frames_count = 0; |
233 generator_->RecordTickSample(record.sample); | 234 generator_->RecordTickSample(record.sample); |
234 ticks_buffer_.FinishDequeue(); | 235 ticks_buffer_.FinishDequeue(); |
235 } else { | 236 } else { |
236 return true; | 237 return true; |
237 } | 238 } |
238 } | 239 } |
240 return false; | |
239 } | 241 } |
240 | 242 |
241 | 243 |
244 void ProfilerEventsProcessor::ProcessEventsQueue(unsigned& dequeue_order, int64_ t start, int64_t time_limit) { | |
caseq
2012/08/17 12:56:35
ditto
| |
245 while (OS::Ticks() - start < time_limit) | |
246 // Process ticks until we have any. | |
247 if (ProcessTicks(dequeue_order, start, time_limit)) { | |
248 // All ticks of the current dequeue_order are processed, | |
249 // proceed to the next code event. | |
250 ProcessCodeEvent(dequeue_order); | |
251 } | |
252 } | |
253 | |
254 | |
242 void ProfilerEventsProcessor::Run() { | 255 void ProfilerEventsProcessor::Run() { |
243 unsigned dequeue_order = 0; | 256 unsigned dequeue_order = 0; |
244 | 257 |
245 while (running_) { | 258 while (running_) { |
246 // Process ticks until we have any. | 259 if (FLAG_sample_stack_in_postprocessor_thread) { |
247 if (ProcessTicks(dequeue_order)) { | 260 int64_t start = OS::Ticks(); |
248 // All ticks of the current dequeue_order are processed, | 261 DoSample(); |
249 // proceed to the next code event. | 262 ProcessEventsQueue(dequeue_order, start, static_cast<int64_t>(interval_)); |
250 ProcessCodeEvent(&dequeue_order); | 263 } else { |
264 // Process ticks until we have any. | |
265 if (ProcessTicks(dequeue_order, -1, -1)) { | |
266 // All ticks of the current dequeue_order are processed, | |
267 // proceed to the next code event. | |
268 ProcessCodeEvent(dequeue_order); | |
269 } | |
270 YieldCPU(); | |
251 } | 271 } |
252 YieldCPU(); | |
253 } | 272 } |
254 | 273 |
255 // Process remaining tick events. | 274 // Process remaining tick events. |
256 ticks_buffer_.FlushResidualRecords(); | 275 ticks_buffer_.FlushResidualRecords(); |
257 // Perform processing until we have tick events, skip remaining code events. | 276 // Perform processing until we have tick events, skip remaining code events. |
258 while (ProcessTicks(dequeue_order) && ProcessCodeEvent(&dequeue_order)) { } | 277 while (ProcessTicks(dequeue_order, -1, -1) && ProcessCodeEvent(dequeue_order)) { } |
259 } | 278 } |
260 | 279 |
261 | 280 |
262 void CpuProfiler::StartProfiling(const char* title) { | 281 void CpuProfiler::StartProfiling(const char* title) { |
263 ASSERT(Isolate::Current()->cpu_profiler() != NULL); | 282 ASSERT(Isolate::Current()->cpu_profiler() != NULL); |
264 Isolate::Current()->cpu_profiler()->StartCollectingProfile(title); | 283 Isolate::Current()->cpu_profiler()->StartCollectingProfile(title); |
265 } | 284 } |
266 | 285 |
267 | 286 |
268 void CpuProfiler::StartProfiling(String* title) { | 287 void CpuProfiler::StartProfiling(String* title) { |
(...skipping 210 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
479 | 498 |
480 void CpuProfiler::StartCollectingProfile(String* title) { | 499 void CpuProfiler::StartCollectingProfile(String* title) { |
481 StartCollectingProfile(profiles_->GetName(title)); | 500 StartCollectingProfile(profiles_->GetName(title)); |
482 } | 501 } |
483 | 502 |
484 | 503 |
485 void CpuProfiler::StartProcessorIfNotStarted() { | 504 void CpuProfiler::StartProcessorIfNotStarted() { |
486 if (processor_ == NULL) { | 505 if (processor_ == NULL) { |
487 Isolate* isolate = Isolate::Current(); | 506 Isolate* isolate = Isolate::Current(); |
488 | 507 |
508 // Enable stack sampling. | |
509 Sampler* sampler = isolate->logger()->sampler(); | |
510 if (!sampler->IsActive()) { | |
511 sampler->Start(); | |
512 need_to_stop_sampler_ = true; | |
513 } | |
514 sampler->IncreaseProfilingDepth(); | |
489 // Disable logging when using the new implementation. | 515 // Disable logging when using the new implementation. |
490 saved_logging_nesting_ = isolate->logger()->logging_nesting_; | 516 saved_logging_nesting_ = isolate->logger()->logging_nesting_; |
491 isolate->logger()->logging_nesting_ = 0; | 517 isolate->logger()->logging_nesting_ = 0; |
492 generator_ = new ProfileGenerator(profiles_); | 518 generator_ = new ProfileGenerator(profiles_); |
493 processor_ = new ProfilerEventsProcessor(generator_); | 519 processor_ = new ProfilerEventsProcessor(generator_, sampler, sampler->inter val() * 1000); |
caseq
2012/08/17 12:56:35
Do we have to pass interval explicitly?
| |
494 NoBarrier_Store(&is_profiling_, true); | 520 NoBarrier_Store(&is_profiling_, true); |
495 processor_->Start(); | 521 processor_->Start(); |
496 // Enumerate stuff we already have in the heap. | 522 // Enumerate stuff we already have in the heap. |
497 if (isolate->heap()->HasBeenSetUp()) { | 523 if (isolate->heap()->HasBeenSetUp()) { |
498 if (!FLAG_prof_browser_mode) { | 524 if (!FLAG_prof_browser_mode) { |
499 bool saved_log_code_flag = FLAG_log_code; | 525 bool saved_log_code_flag = FLAG_log_code; |
500 FLAG_log_code = true; | 526 FLAG_log_code = true; |
501 isolate->logger()->LogCodeObjects(); | 527 isolate->logger()->LogCodeObjects(); |
502 FLAG_log_code = saved_log_code_flag; | 528 FLAG_log_code = saved_log_code_flag; |
503 } | 529 } |
504 isolate->logger()->LogCompiledFunctions(); | 530 isolate->logger()->LogCompiledFunctions(); |
505 isolate->logger()->LogAccessorCallbacks(); | 531 isolate->logger()->LogAccessorCallbacks(); |
506 } | 532 } |
507 // Enable stack sampling. | |
508 Sampler* sampler = reinterpret_cast<Sampler*>(isolate->logger()->ticker_); | |
509 if (!sampler->IsActive()) { | |
510 sampler->Start(); | |
511 need_to_stop_sampler_ = true; | |
512 } | |
513 sampler->IncreaseProfilingDepth(); | |
514 } | 533 } |
515 } | 534 } |
516 | 535 |
517 | 536 |
518 CpuProfile* CpuProfiler::StopCollectingProfile(const char* title) { | 537 CpuProfile* CpuProfiler::StopCollectingProfile(const char* title) { |
519 const double actual_sampling_rate = generator_->actual_sampling_rate(); | 538 const double actual_sampling_rate = generator_->actual_sampling_rate(); |
520 StopProcessorIfLastProfile(title); | 539 StopProcessorIfLastProfile(title); |
521 CpuProfile* result = | 540 CpuProfile* result = |
522 profiles_->StopProfiling(TokenEnumerator::kNoSecurityToken, | 541 profiles_->StopProfiling(TokenEnumerator::kNoSecurityToken, |
523 title, | 542 title, |
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
573 | 592 |
574 void CpuProfiler::TearDown() { | 593 void CpuProfiler::TearDown() { |
575 Isolate* isolate = Isolate::Current(); | 594 Isolate* isolate = Isolate::Current(); |
576 if (isolate->cpu_profiler() != NULL) { | 595 if (isolate->cpu_profiler() != NULL) { |
577 delete isolate->cpu_profiler(); | 596 delete isolate->cpu_profiler(); |
578 } | 597 } |
579 isolate->set_cpu_profiler(NULL); | 598 isolate->set_cpu_profiler(NULL); |
580 } | 599 } |
581 | 600 |
582 } } // namespace v8::internal | 601 } } // namespace v8::internal |
OLD | NEW |