Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1)

Side by Side Diff: src/cpu-profiler.cc

Issue 18058008: CPUProfiler: Improve line numbers support in profiler. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: cosmetic changes Created 7 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
45 static const int kProfilerStackSize = 64 * KB; 45 static const int kProfilerStackSize = 64 * KB;
46 46
47 47
48 ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator) 48 ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator)
49 : Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)), 49 : Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)),
50 generator_(generator), 50 generator_(generator),
51 running_(true), 51 running_(true),
52 ticks_buffer_(sizeof(TickSampleEventRecord), 52 ticks_buffer_(sizeof(TickSampleEventRecord),
53 kTickSamplesBufferChunkSize, 53 kTickSamplesBufferChunkSize,
54 kTickSamplesBufferChunksCount), 54 kTickSamplesBufferChunksCount),
55 enqueue_order_(0) { 55 last_code_event_id_(0), last_processed_code_event_id_(0) {
56 } 56 }
57 57
58 58
59 void ProfilerEventsProcessor::Enqueue(const CodeEventsContainer& event) { 59 void ProfilerEventsProcessor::Enqueue(const CodeEventsContainer& event) {
60 event.generic.order = ++enqueue_order_; 60 event.generic.order = ++last_code_event_id_;
61 events_buffer_.Enqueue(event); 61 events_buffer_.Enqueue(event);
62 } 62 }
63 63
64 64
65 void ProfilerEventsProcessor::AddCurrentStack(Isolate* isolate) { 65 void ProfilerEventsProcessor::AddCurrentStack(Isolate* isolate) {
66 TickSampleEventRecord record(enqueue_order_); 66 TickSampleEventRecord record(last_code_event_id_);
67 TickSample* sample = &record.sample; 67 TickSample* sample = &record.sample;
68 sample->state = isolate->current_vm_state(); 68 sample->state = isolate->current_vm_state();
69 sample->pc = reinterpret_cast<Address>(sample); // Not NULL. 69 sample->pc = reinterpret_cast<Address>(sample); // Not NULL.
70 for (StackTraceFrameIterator it(isolate); 70 for (StackTraceFrameIterator it(isolate);
71 !it.done() && sample->frames_count < TickSample::kMaxFramesCount; 71 !it.done() && sample->frames_count < TickSample::kMaxFramesCount;
72 it.Advance()) { 72 it.Advance()) {
73 sample->stack[sample->frames_count++] = it.frame()->pc(); 73 sample->stack[sample->frames_count++] = it.frame()->pc();
74 } 74 }
75 ticks_from_vm_buffer_.Enqueue(record); 75 ticks_from_vm_buffer_.Enqueue(record);
76 } 76 }
77 77
78 78
79 bool ProfilerEventsProcessor::ProcessCodeEvent(unsigned* dequeue_order) { 79 void ProfilerEventsProcessor::StopSynchronously() {
80 if (!running_) return;
81 running_ = false;
82 Join();
83 }
84
85
86 bool ProfilerEventsProcessor::ProcessCodeEvent() {
80 CodeEventsContainer record; 87 CodeEventsContainer record;
81 if (events_buffer_.Dequeue(&record)) { 88 if (events_buffer_.Dequeue(&record)) {
82 switch (record.generic.type) { 89 switch (record.generic.type) {
83 #define PROFILER_TYPE_CASE(type, clss) \ 90 #define PROFILER_TYPE_CASE(type, clss) \
84 case CodeEventRecord::type: \ 91 case CodeEventRecord::type: \
85 record.clss##_.UpdateCodeMap(generator_->code_map()); \ 92 record.clss##_.UpdateCodeMap(generator_->code_map()); \
86 break; 93 break;
87 94
88 CODE_EVENTS_TYPE_LIST(PROFILER_TYPE_CASE) 95 CODE_EVENTS_TYPE_LIST(PROFILER_TYPE_CASE)
89 96
90 #undef PROFILER_TYPE_CASE 97 #undef PROFILER_TYPE_CASE
91 default: return true; // Skip record. 98 default: return true; // Skip record.
92 } 99 }
93 *dequeue_order = record.generic.order; 100 last_processed_code_event_id_ = record.generic.order;
94 return true; 101 return true;
95 } 102 }
96 return false; 103 return false;
97 } 104 }
98 105
99 106
100 bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order) { 107 bool ProfilerEventsProcessor::ProcessTicks() {
101 while (true) { 108 while (true) {
102 if (!ticks_from_vm_buffer_.IsEmpty() 109 if (!ticks_from_vm_buffer_.IsEmpty()
103 && ticks_from_vm_buffer_.Peek()->order == dequeue_order) { 110 && ticks_from_vm_buffer_.Peek()->order ==
111 last_processed_code_event_id_) {
104 TickSampleEventRecord record; 112 TickSampleEventRecord record;
105 ticks_from_vm_buffer_.Dequeue(&record); 113 ticks_from_vm_buffer_.Dequeue(&record);
106 generator_->RecordTickSample(record.sample); 114 generator_->RecordTickSample(record.sample);
107 } 115 }
108 116
109 const TickSampleEventRecord* rec = 117 const TickSampleEventRecord* rec =
110 TickSampleEventRecord::cast(ticks_buffer_.StartDequeue()); 118 TickSampleEventRecord::cast(ticks_buffer_.StartDequeue());
111 if (rec == NULL) return !ticks_from_vm_buffer_.IsEmpty(); 119 if (rec == NULL) return !ticks_from_vm_buffer_.IsEmpty();
112 // Make a local copy of tick sample record to ensure that it won't 120 // Make a local copy of tick sample record to ensure that it won't
113 // be modified as we are processing it. This is possible as the 121 // be modified as we are processing it. This is possible as the
114 // sampler writes w/o any sync to the queue, so if the processor 122 // sampler writes w/o any sync to the queue, so if the processor
115 // will get far behind, a record may be modified right under its 123 // will get far behind, a record may be modified right under its
116 // feet. 124 // feet.
117 TickSampleEventRecord record = *rec; 125 TickSampleEventRecord record = *rec;
118 if (record.order == dequeue_order) { 126 if (record.order != last_processed_code_event_id_) return true;
119 // A paranoid check to make sure that we don't get a memory overrun 127
120 // in case of frames_count having a wild value. 128 // A paranoid check to make sure that we don't get a memory overrun
121 if (record.sample.frames_count < 0 129 // in case of frames_count having a wild value.
122 || record.sample.frames_count > TickSample::kMaxFramesCount) 130 if (record.sample.frames_count < 0
123 record.sample.frames_count = 0; 131 || record.sample.frames_count > TickSample::kMaxFramesCount)
124 generator_->RecordTickSample(record.sample); 132 record.sample.frames_count = 0;
125 ticks_buffer_.FinishDequeue(); 133 generator_->RecordTickSample(record.sample);
126 } else { 134 ticks_buffer_.FinishDequeue();
127 return true;
128 }
129 } 135 }
130 } 136 }
131 137
132 138
133 void ProfilerEventsProcessor::Run() { 139 void ProfilerEventsProcessor::Run() {
134 unsigned dequeue_order = 0;
135
136 while (running_) { 140 while (running_) {
137 // Process ticks until we have any. 141 // Process ticks until we have any.
138 if (ProcessTicks(dequeue_order)) { 142 if (ProcessTicks()) {
139 // All ticks of the current dequeue_order are processed, 143 // All ticks of the current last_processed_code_event_id_ are processed,
140 // proceed to the next code event. 144 // proceed to the next code event.
141 ProcessCodeEvent(&dequeue_order); 145 ProcessCodeEvent();
142 } 146 }
143 YieldCPU(); 147 YieldCPU();
144 } 148 }
145 149
146 // Process remaining tick events. 150 // Process remaining tick events.
147 ticks_buffer_.FlushResidualRecords(); 151 ticks_buffer_.FlushResidualRecords();
148 // Perform processing until we have tick events, skip remaining code events. 152 do {
149 while (ProcessTicks(dequeue_order) && ProcessCodeEvent(&dequeue_order)) { } 153 ProcessTicks();
154 } while (ProcessCodeEvent());
150 } 155 }
151 156
152 157
153 int CpuProfiler::GetProfilesCount() { 158 int CpuProfiler::GetProfilesCount() {
154 // The count of profiles doesn't depend on a security token. 159 // The count of profiles doesn't depend on a security token.
155 return profiles_->Profiles(TokenEnumerator::kNoSecurityToken)->length(); 160 return profiles_->Profiles(TokenEnumerator::kNoSecurityToken)->length();
156 } 161 }
157 162
158 163
159 CpuProfile* CpuProfiler::GetProfile(Object* security_token, int index) { 164 CpuProfile* CpuProfiler::GetProfile(Object* security_token, int index) {
(...skipping 338 matching lines...) Expand 10 before | Expand all | Expand 10 after
498 503
499 void CpuProfiler::StopProcessor() { 504 void CpuProfiler::StopProcessor() {
500 Logger* logger = isolate_->logger(); 505 Logger* logger = isolate_->logger();
501 Sampler* sampler = reinterpret_cast<Sampler*>(logger->ticker_); 506 Sampler* sampler = reinterpret_cast<Sampler*>(logger->ticker_);
502 sampler->DecreaseProfilingDepth(); 507 sampler->DecreaseProfilingDepth();
503 if (need_to_stop_sampler_) { 508 if (need_to_stop_sampler_) {
504 sampler->Stop(); 509 sampler->Stop();
505 need_to_stop_sampler_ = false; 510 need_to_stop_sampler_ = false;
506 } 511 }
507 is_profiling_ = false; 512 is_profiling_ = false;
508 processor_->Stop(); 513 processor_->StopSynchronously();
509 processor_->Join();
510 delete processor_; 514 delete processor_;
511 delete generator_; 515 delete generator_;
512 processor_ = NULL; 516 processor_ = NULL;
513 generator_ = NULL; 517 generator_ = NULL;
514 logger->logging_nesting_ = saved_logging_nesting_; 518 logger->logging_nesting_ = saved_logging_nesting_;
515 } 519 }
516 520
517 521
518 void CpuProfiler::LogBuiltins() { 522 void CpuProfiler::LogBuiltins() {
519 Builtins* builtins = isolate_->builtins(); 523 Builtins* builtins = isolate_->builtins();
520 ASSERT(builtins->is_initialized()); 524 ASSERT(builtins->is_initialized());
521 for (int i = 0; i < Builtins::builtin_count; i++) { 525 for (int i = 0; i < Builtins::builtin_count; i++) {
522 CodeEventsContainer evt_rec(CodeEventRecord::REPORT_BUILTIN); 526 CodeEventsContainer evt_rec(CodeEventRecord::REPORT_BUILTIN);
523 ReportBuiltinEventRecord* rec = &evt_rec.ReportBuiltinEventRecord_; 527 ReportBuiltinEventRecord* rec = &evt_rec.ReportBuiltinEventRecord_;
524 Builtins::Name id = static_cast<Builtins::Name>(i); 528 Builtins::Name id = static_cast<Builtins::Name>(i);
525 rec->start = builtins->builtin(id)->address(); 529 rec->start = builtins->builtin(id)->address();
526 rec->builtin_id = id; 530 rec->builtin_id = id;
527 processor_->Enqueue(evt_rec); 531 processor_->Enqueue(evt_rec);
528 } 532 }
529 } 533 }
530 534
531 535
532 } } // namespace v8::internal 536 } } // namespace v8::internal
OLDNEW
« src/compiler.cc ('K') | « src/cpu-profiler.h ('k') | src/cpu-profiler-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698