OLD | NEW |
---|---|
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
45 static const int kProfilerStackSize = 64 * KB; | 45 static const int kProfilerStackSize = 64 * KB; |
46 | 46 |
47 | 47 |
48 ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator) | 48 ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator) |
49 : Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)), | 49 : Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)), |
50 generator_(generator), | 50 generator_(generator), |
51 running_(true), | 51 running_(true), |
52 ticks_buffer_(sizeof(TickSampleEventRecord), | 52 ticks_buffer_(sizeof(TickSampleEventRecord), |
53 kTickSamplesBufferChunkSize, | 53 kTickSamplesBufferChunkSize, |
54 kTickSamplesBufferChunksCount), | 54 kTickSamplesBufferChunksCount), |
55 enqueue_order_(0) { | 55 last_recorded_code_event_id_(0), last_processed_code_event_id_(0) { |
56 } | 56 } |
57 | 57 |
58 | 58 |
59 void ProfilerEventsProcessor::Enqueue(const CodeEventsContainer& event) { | 59 void ProfilerEventsProcessor::Enqueue(const CodeEventsContainer& event) { |
60 event.generic.order = ++enqueue_order_; | 60 event.generic.order = ++last_recorded_code_event_id_; |
61 events_buffer_.Enqueue(event); | 61 events_buffer_.Enqueue(event); |
62 } | 62 } |
63 | 63 |
64 | 64 |
65 void ProfilerEventsProcessor::AddCurrentStack(Isolate* isolate) { | 65 void ProfilerEventsProcessor::AddCurrentStack(Isolate* isolate) { |
66 TickSampleEventRecord record(enqueue_order_); | 66 TickSampleEventRecord record(last_recorded_code_event_id_); |
67 TickSample* sample = &record.sample; | 67 TickSample* sample = &record.sample; |
68 sample->state = isolate->current_vm_state(); | 68 sample->state = isolate->current_vm_state(); |
69 sample->pc = reinterpret_cast<Address>(sample); // Not NULL. | 69 sample->pc = reinterpret_cast<Address>(sample); // Not NULL. |
70 for (StackTraceFrameIterator it(isolate); | 70 for (StackTraceFrameIterator it(isolate); |
71 !it.done() && sample->frames_count < TickSample::kMaxFramesCount; | 71 !it.done() && sample->frames_count < TickSample::kMaxFramesCount; |
72 it.Advance()) { | 72 it.Advance()) { |
73 sample->stack[sample->frames_count++] = it.frame()->pc(); | 73 sample->stack[sample->frames_count++] = it.frame()->pc(); |
74 } | 74 } |
75 ticks_from_vm_buffer_.Enqueue(record); | 75 ticks_from_vm_buffer_.Enqueue(record); |
76 } | 76 } |
77 | 77 |
78 | 78 |
79 bool ProfilerEventsProcessor::ProcessCodeEvent(unsigned* dequeue_order) { | 79 void ProfilerEventsProcessor::Start() { |
80 last_processed_code_event_id_ = 0; | |
81 while (ProcessCodeEvent()) { } | |
yurys
2013/07/05 13:10:00
Why do we have to do this processing on the main t
loislo
2013/07/05 13:15:37
reverted
| |
82 StartSynchronously(); | |
83 } | |
84 | |
85 | |
86 void ProfilerEventsProcessor::Stop() { | |
yurys
2013/07/05 13:10:00
StopAndJoin or StopSynchronously.
loislo
2013/07/05 13:15:37
Done.
| |
87 if (!running_) return; | |
88 running_ = false; | |
89 Join(); | |
90 } | |
91 | |
92 | |
93 bool ProfilerEventsProcessor::ProcessCodeEvent() { | |
80 CodeEventsContainer record; | 94 CodeEventsContainer record; |
81 if (events_buffer_.Dequeue(&record)) { | 95 if (events_buffer_.Dequeue(&record)) { |
82 switch (record.generic.type) { | 96 switch (record.generic.type) { |
83 #define PROFILER_TYPE_CASE(type, clss) \ | 97 #define PROFILER_TYPE_CASE(type, clss) \ |
84 case CodeEventRecord::type: \ | 98 case CodeEventRecord::type: \ |
85 record.clss##_.UpdateCodeMap(generator_->code_map()); \ | 99 record.clss##_.UpdateCodeMap(generator_->code_map()); \ |
86 break; | 100 break; |
87 | 101 |
88 CODE_EVENTS_TYPE_LIST(PROFILER_TYPE_CASE) | 102 CODE_EVENTS_TYPE_LIST(PROFILER_TYPE_CASE) |
89 | 103 |
90 #undef PROFILER_TYPE_CASE | 104 #undef PROFILER_TYPE_CASE |
91 default: return true; // Skip record. | 105 default: return true; // Skip record. |
92 } | 106 } |
93 *dequeue_order = record.generic.order; | 107 last_processed_code_event_id_ = record.generic.order; |
94 return true; | 108 return true; |
95 } | 109 } |
96 return false; | 110 return false; |
97 } | 111 } |
98 | 112 |
99 | 113 |
100 bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order) { | 114 bool ProfilerEventsProcessor::ProcessTicks() { |
101 while (true) { | 115 while (true) { |
102 if (!ticks_from_vm_buffer_.IsEmpty() | 116 if (!ticks_from_vm_buffer_.IsEmpty() |
103 && ticks_from_vm_buffer_.Peek()->order == dequeue_order) { | 117 && ticks_from_vm_buffer_.Peek()->order == |
118 last_processed_code_event_id_) { | |
104 TickSampleEventRecord record; | 119 TickSampleEventRecord record; |
105 ticks_from_vm_buffer_.Dequeue(&record); | 120 ticks_from_vm_buffer_.Dequeue(&record); |
106 generator_->RecordTickSample(record.sample); | 121 generator_->RecordTickSample(record.sample); |
107 } | 122 } |
108 | 123 |
109 const TickSampleEventRecord* rec = | 124 const TickSampleEventRecord* rec = |
110 TickSampleEventRecord::cast(ticks_buffer_.StartDequeue()); | 125 TickSampleEventRecord::cast(ticks_buffer_.StartDequeue()); |
111 if (rec == NULL) return !ticks_from_vm_buffer_.IsEmpty(); | 126 if (rec == NULL) return !ticks_from_vm_buffer_.IsEmpty(); |
112 // Make a local copy of tick sample record to ensure that it won't | 127 // Make a local copy of tick sample record to ensure that it won't |
113 // be modified as we are processing it. This is possible as the | 128 // be modified as we are processing it. This is possible as the |
114 // sampler writes w/o any sync to the queue, so if the processor | 129 // sampler writes w/o any sync to the queue, so if the processor |
115 // will get far behind, a record may be modified right under its | 130 // will get far behind, a record may be modified right under its |
116 // feet. | 131 // feet. |
117 TickSampleEventRecord record = *rec; | 132 TickSampleEventRecord record = *rec; |
118 if (record.order == dequeue_order) { | 133 if (record.order != last_processed_code_event_id_) return true; |
119 // A paranoid check to make sure that we don't get a memory overrun | 134 |
120 // in case of frames_count having a wild value. | 135 // A paranoid check to make sure that we don't get a memory overrun |
121 if (record.sample.frames_count < 0 | 136 // in case of frames_count having a wild value. |
122 || record.sample.frames_count > TickSample::kMaxFramesCount) | 137 if (record.sample.frames_count < 0 |
123 record.sample.frames_count = 0; | 138 || record.sample.frames_count > TickSample::kMaxFramesCount) |
124 generator_->RecordTickSample(record.sample); | 139 record.sample.frames_count = 0; |
125 ticks_buffer_.FinishDequeue(); | 140 generator_->RecordTickSample(record.sample); |
126 } else { | 141 ticks_buffer_.FinishDequeue(); |
127 return true; | |
128 } | |
129 } | 142 } |
130 } | 143 } |
131 | 144 |
132 | 145 |
133 void ProfilerEventsProcessor::Run() { | 146 void ProfilerEventsProcessor::Run() { |
134 unsigned dequeue_order = 0; | |
135 | |
136 while (running_) { | 147 while (running_) { |
137 // Process ticks until we have any. | 148 // Process ticks until we have any. |
138 if (ProcessTicks(dequeue_order)) { | 149 if (ProcessTicks()) { |
139 // All ticks of the current dequeue_order are processed, | 150 // All ticks of the current last_processed_code_event_id_ are processed, |
140 // proceed to the next code event. | 151 // proceed to the next code event. |
141 ProcessCodeEvent(&dequeue_order); | 152 ProcessCodeEvent(); |
142 } | 153 } |
143 YieldCPU(); | 154 YieldCPU(); |
144 } | 155 } |
145 | 156 |
146 // Process remaining tick events. | 157 // Process remaining tick events. |
147 ticks_buffer_.FlushResidualRecords(); | 158 ticks_buffer_.FlushResidualRecords(); |
148 // Perform processing until we have tick events, skip remaining code events. | 159 do { |
149 while (ProcessTicks(dequeue_order) && ProcessCodeEvent(&dequeue_order)) { } | 160 ProcessTicks(); |
161 } while (ProcessCodeEvent()); | |
150 } | 162 } |
151 | 163 |
152 | 164 |
153 int CpuProfiler::GetProfilesCount() { | 165 int CpuProfiler::GetProfilesCount() { |
154 // The count of profiles doesn't depend on a security token. | 166 // The count of profiles doesn't depend on a security token. |
155 return profiles_->Profiles(TokenEnumerator::kNoSecurityToken)->length(); | 167 return profiles_->Profiles(TokenEnumerator::kNoSecurityToken)->length(); |
156 } | 168 } |
157 | 169 |
158 | 170 |
159 CpuProfile* CpuProfiler::GetProfile(Object* security_token, int index) { | 171 CpuProfile* CpuProfiler::GetProfile(Object* security_token, int index) { |
(...skipping 278 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
438 | 450 |
439 void CpuProfiler::StartProcessorIfNotStarted() { | 451 void CpuProfiler::StartProcessorIfNotStarted() { |
440 if (processor_ == NULL) { | 452 if (processor_ == NULL) { |
441 Logger* logger = isolate_->logger(); | 453 Logger* logger = isolate_->logger(); |
442 // Disable logging when using the new implementation. | 454 // Disable logging when using the new implementation. |
443 saved_logging_nesting_ = logger->logging_nesting_; | 455 saved_logging_nesting_ = logger->logging_nesting_; |
444 logger->logging_nesting_ = 0; | 456 logger->logging_nesting_ = 0; |
445 generator_ = new ProfileGenerator(profiles_); | 457 generator_ = new ProfileGenerator(profiles_); |
446 processor_ = new ProfilerEventsProcessor(generator_); | 458 processor_ = new ProfilerEventsProcessor(generator_); |
447 is_profiling_ = true; | 459 is_profiling_ = true; |
448 processor_->StartSynchronously(); | |
449 // Enumerate stuff we already have in the heap. | 460 // Enumerate stuff we already have in the heap. |
450 ASSERT(isolate_->heap()->HasBeenSetUp()); | 461 ASSERT(isolate_->heap()->HasBeenSetUp()); |
451 if (!FLAG_prof_browser_mode) { | 462 if (!FLAG_prof_browser_mode) { |
452 logger->LogCodeObjects(); | 463 logger->LogCodeObjects(); |
453 } | 464 } |
454 logger->LogCompiledFunctions(); | 465 logger->LogCompiledFunctions(); |
455 logger->LogAccessorCallbacks(); | 466 logger->LogAccessorCallbacks(); |
456 LogBuiltins(); | 467 LogBuiltins(); |
468 processor_->Start(); | |
loislo
2013/07/05 13:15:37
reverted
| |
457 // Enable stack sampling. | 469 // Enable stack sampling. |
458 Sampler* sampler = logger->sampler(); | 470 Sampler* sampler = logger->sampler(); |
459 sampler->IncreaseProfilingDepth(); | 471 sampler->IncreaseProfilingDepth(); |
460 if (!sampler->IsActive()) { | 472 if (!sampler->IsActive()) { |
461 sampler->Start(); | 473 sampler->Start(); |
462 need_to_stop_sampler_ = true; | 474 need_to_stop_sampler_ = true; |
463 } | 475 } |
464 } | 476 } |
465 } | 477 } |
466 | 478 |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
498 void CpuProfiler::StopProcessor() { | 510 void CpuProfiler::StopProcessor() { |
499 Logger* logger = isolate_->logger(); | 511 Logger* logger = isolate_->logger(); |
500 Sampler* sampler = reinterpret_cast<Sampler*>(logger->ticker_); | 512 Sampler* sampler = reinterpret_cast<Sampler*>(logger->ticker_); |
501 sampler->DecreaseProfilingDepth(); | 513 sampler->DecreaseProfilingDepth(); |
502 if (need_to_stop_sampler_) { | 514 if (need_to_stop_sampler_) { |
503 sampler->Stop(); | 515 sampler->Stop(); |
504 need_to_stop_sampler_ = false; | 516 need_to_stop_sampler_ = false; |
505 } | 517 } |
506 is_profiling_ = false; | 518 is_profiling_ = false; |
507 processor_->Stop(); | 519 processor_->Stop(); |
508 processor_->Join(); | |
509 delete processor_; | 520 delete processor_; |
510 delete generator_; | 521 delete generator_; |
511 processor_ = NULL; | 522 processor_ = NULL; |
512 generator_ = NULL; | 523 generator_ = NULL; |
513 logger->logging_nesting_ = saved_logging_nesting_; | 524 logger->logging_nesting_ = saved_logging_nesting_; |
514 } | 525 } |
515 | 526 |
516 | 527 |
517 void CpuProfiler::LogBuiltins() { | 528 void CpuProfiler::LogBuiltins() { |
518 Builtins* builtins = isolate_->builtins(); | 529 Builtins* builtins = isolate_->builtins(); |
519 ASSERT(builtins->is_initialized()); | 530 ASSERT(builtins->is_initialized()); |
520 for (int i = 0; i < Builtins::builtin_count; i++) { | 531 for (int i = 0; i < Builtins::builtin_count; i++) { |
521 CodeEventsContainer evt_rec(CodeEventRecord::REPORT_BUILTIN); | 532 CodeEventsContainer evt_rec(CodeEventRecord::REPORT_BUILTIN); |
522 ReportBuiltinEventRecord* rec = &evt_rec.ReportBuiltinEventRecord_; | 533 ReportBuiltinEventRecord* rec = &evt_rec.ReportBuiltinEventRecord_; |
523 Builtins::Name id = static_cast<Builtins::Name>(i); | 534 Builtins::Name id = static_cast<Builtins::Name>(i); |
524 rec->start = builtins->builtin(id)->address(); | 535 rec->start = builtins->builtin(id)->address(); |
525 rec->builtin_id = id; | 536 rec->builtin_id = id; |
526 processor_->Enqueue(evt_rec); | 537 processor_->Enqueue(evt_rec); |
527 } | 538 } |
528 } | 539 } |
529 | 540 |
530 | 541 |
531 } } // namespace v8::internal | 542 } } // namespace v8::internal |
OLD | NEW |