OLD | NEW |
---|---|
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file |
2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
4 | 4 |
5 #include <cstdio> | 5 #include <cstdio> |
6 | 6 |
7 #include "platform/utils.h" | 7 #include "platform/utils.h" |
8 | 8 |
9 #include "vm/atomic.h" | |
9 #include "vm/isolate.h" | 10 #include "vm/isolate.h" |
10 #include "vm/json_stream.h" | 11 #include "vm/json_stream.h" |
11 #include "vm/native_symbol.h" | 12 #include "vm/native_symbol.h" |
12 #include "vm/object.h" | 13 #include "vm/object.h" |
13 #include "vm/os.h" | 14 #include "vm/os.h" |
14 #include "vm/profiler.h" | 15 #include "vm/profiler.h" |
15 #include "vm/signal_handler.h" | 16 #include "vm/signal_handler.h" |
16 | 17 |
17 namespace dart { | 18 namespace dart { |
18 | 19 |
19 // Notes on locking and signal handling: | |
20 // | |
21 // The ProfilerManager has a single monitor (monitor_). This monitor guards | |
22 // access to the schedule list of isolates (isolates_, isolates_size_, etc). | |
23 // | |
24 // Each isolate has a mutex (profiler_data_mutex_) which protects access | |
25 // to the isolate's profiler data. | |
26 // | |
27 // Locks can be taken in this order: | |
28 // 1. ProfilerManager::monitor_ | |
29 // 2. isolate->profiler_data_mutex_ | |
30 // In other words, it is not acceptable to take ProfilerManager::monitor_ | |
31 // after grabbing isolate->profiler_data_mutex_. | |
32 // | |
33 // ProfileManager::monitor_ taking entry points: | |
34 // InitOnce, Shutdown | |
35 // ProfilerManager::monitor_ | |
36 // ScheduleIsolate, DescheduleIsolate. | |
37 // ProfilerManager::monitor_, isolate->profiler_data_mutex_ | |
38 // ThreadMain | |
39 // isolate->profiler_data_mutex_ taking entry points: | |
40 // SetupIsolateForProfiling, FreeIsolateForProfiling. | |
41 // ProfilerManager::monitor_, isolate->profiler_data_mutex_ | |
42 // ScheduleIsolate, DescheduleIsolate. | |
43 // ProfilerManager::monitor_, isolate->profiler_data_mutex_ | |
44 // ProfileSignalAction | |
45 // isolate->profiler_data_mutex_ | |
46 // ProfilerManager::monitor_, isolate->profiler_data_mutex_ | |
47 // | |
48 // Signal handling and locking: | |
49 // On OSes with pthreads (Android, Linux, and Mac) we use signal delivery | |
50 // to interrupt the isolate running thread for sampling. After a thread | |
51 // is sent the SIGPROF, it is removed from the scheduled isolate list. | |
52 // Inside the signal handler, after the sample is taken, the isolate is | |
53 // added to the scheduled isolate list again. The side effect of this is | |
54 // that the signal handler must be able to acquire the isolate profiler data | |
55 // mutex and the profile manager monitor. When an isolate running thread | |
56 // (potential signal target) calls into an entry point which acquires | |
57 // ProfileManager::monitor_ signal delivery must be blocked. An example is | |
58 // ProfileManager::ScheduleIsolate which blocks signal delivery while removing | |
59 // the scheduling the isolate. | |
60 // | |
61 | 20 |
62 // Notes on stack frame walking: | 21 // Notes on stack frame walking: |
63 // | 22 // |
64 // The sampling profiler will collect up to Sample::kNumStackFrames stack frames | 23 // The sampling profiler will collect up to Sample::kNumStackFrames stack frames |
65 // The stack frame walking code uses the frame pointer to traverse the stack. | 24 // The stack frame walking code uses the frame pointer to traverse the stack. |
66 // If the VM is compiled without frame pointers (which is the default on | 25 // If the VM is compiled without frame pointers (which is the default on |
67 // recent GCC versions with optimizing enabled) the stack walking code will | 26 // recent GCC versions with optimizing enabled) the stack walking code will |
68 // fail (sometimes leading to a crash). | 27 // fail (sometimes leading to a crash). |
69 // | 28 // |
70 | 29 |
71 DEFINE_FLAG(bool, profile, false, "Enable Sampling Profiler"); | 30 DEFINE_FLAG(bool, profile, true, "Enable Sampling Profiler"); |
72 DEFINE_FLAG(bool, trace_profiled_isolates, false, "Trace profiled isolates."); | 31 DEFINE_FLAG(bool, trace_profiled_isolates, false, "Trace profiled isolates."); |
73 | 32 |
74 bool ProfilerManager::initialized_ = false; | 33 bool Profiler::initialized_ = false; |
75 bool ProfilerManager::shutdown_ = false; | 34 Monitor* Profiler::monitor_ = NULL; |
76 bool ProfilerManager::thread_running_ = false; | 35 SampleBuffer* Profiler::sample_buffer_ = NULL; |
77 Monitor* ProfilerManager::monitor_ = NULL; | |
78 Monitor* ProfilerManager::start_stop_monitor_ = NULL; | |
79 Isolate** ProfilerManager::isolates_ = NULL; | |
80 intptr_t ProfilerManager::isolates_capacity_ = 0; | |
81 intptr_t ProfilerManager::isolates_size_ = 0; | |
82 | 36 |
83 | 37 void Profiler::InitOnce() { |
84 void ProfilerManager::InitOnce() { | |
85 #if defined(USING_SIMULATOR) | |
86 // Force disable of profiling on simulator. | |
87 FLAG_profile = false; | |
88 #endif | |
89 #if defined(TARGET_OS_WINDOWS) | |
90 // Force disable of profiling on Windows. | |
91 FLAG_profile = false; | |
92 #endif | |
93 if (!FLAG_profile) { | 38 if (!FLAG_profile) { |
94 return; | 39 return; |
95 } | 40 } |
41 ASSERT(!initialized_); | |
42 initialized_ = true; | |
43 monitor_ = new Monitor(); | |
44 sample_buffer_ = new SampleBuffer(); | |
96 NativeSymbolResolver::InitOnce(); | 45 NativeSymbolResolver::InitOnce(); |
97 ASSERT(!initialized_); | |
98 monitor_ = new Monitor(); | |
99 start_stop_monitor_ = new Monitor(); | |
100 initialized_ = true; | |
101 ResizeIsolates(16); | |
102 if (FLAG_trace_profiled_isolates) { | |
103 OS::Print("ProfilerManager starting up.\n"); | |
104 } | |
105 { | |
106 ScopedMonitor startup_lock(start_stop_monitor_); | |
107 Thread::Start(ThreadMain, 0); | |
108 while (!thread_running_) { | |
109 // Wait until profiler thread has started up. | |
110 startup_lock.Wait(); | |
111 } | |
112 } | |
113 if (FLAG_trace_profiled_isolates) { | |
114 OS::Print("ProfilerManager running.\n"); | |
115 } | |
116 } | 46 } |
117 | 47 |
118 | 48 |
119 void ProfilerManager::Shutdown() { | 49 void Profiler::Shutdown() { |
120 if (!FLAG_profile) { | 50 if (!FLAG_profile) { |
121 return; | 51 return; |
122 } | 52 } |
123 ASSERT(initialized_); | 53 ASSERT(initialized_); |
124 if (FLAG_trace_profiled_isolates) { | |
125 OS::Print("ProfilerManager shutting down.\n"); | |
126 } | |
127 intptr_t size_at_shutdown = 0; | |
128 { | |
129 ScopedSignalBlocker ssb; | |
130 { | |
131 ScopedMonitor lock(monitor_); | |
132 shutdown_ = true; | |
133 size_at_shutdown = isolates_size_; | |
134 isolates_size_ = 0; | |
135 free(isolates_); | |
136 isolates_ = NULL; | |
137 lock.Notify(); | |
138 } | |
139 } | |
140 NativeSymbolResolver::ShutdownOnce(); | 54 NativeSymbolResolver::ShutdownOnce(); |
141 { | |
142 ScopedMonitor shutdown_lock(start_stop_monitor_); | |
143 while (thread_running_) { | |
144 // Wait until profiler thread has exited. | |
145 shutdown_lock.Wait(); | |
146 } | |
147 } | |
148 if (FLAG_trace_profiled_isolates) { | |
149 OS::Print("ProfilerManager shut down (%" Pd ").\n", size_at_shutdown); | |
150 } | |
151 } | 55 } |
152 | 56 |
153 | 57 |
154 void ProfilerManager::SetupIsolateForProfiling(Isolate* isolate) { | 58 void Profiler::InitIsolateForProfiling(Isolate* isolate, bool shared_buffer) { |
155 if (!FLAG_profile) { | 59 if (!FLAG_profile) { |
156 return; | 60 return; |
157 } | 61 } |
158 ASSERT(isolate != NULL); | 62 ASSERT(isolate != NULL); |
63 ASSERT(sample_buffer_ != NULL); | |
64 MonitorLocker ml(monitor_); | |
159 { | 65 { |
160 ScopedSignalBlocker ssb; | 66 MutexLocker profiler_data_lock(isolate->profiler_data_mutex()); |
161 { | 67 SampleBuffer* sample_buffer = sample_buffer_; |
162 ScopedMutex profiler_data_lock(isolate->profiler_data_mutex()); | 68 if (!shared_buffer) { |
163 SampleBuffer* sample_buffer = new SampleBuffer(); | 69 sample_buffer = new SampleBuffer(); |
164 ASSERT(sample_buffer != NULL); | 70 } |
165 IsolateProfilerData* profiler_data = | 71 IsolateProfilerData* profiler_data = |
166 new IsolateProfilerData(isolate, sample_buffer); | 72 new IsolateProfilerData(sample_buffer, !shared_buffer); |
167 ASSERT(profiler_data != NULL); | 73 ASSERT(profiler_data != NULL); |
168 profiler_data->set_sample_interval_micros(1000); | 74 isolate->set_profiler_data(profiler_data); |
169 isolate->set_profiler_data(profiler_data); | 75 if (FLAG_trace_profiled_isolates) { |
170 if (FLAG_trace_profiled_isolates) { | 76 OS::Print("Profiler Setup %p %s\n", isolate, isolate->name()); |
171 OS::Print("ProfilerManager Setup Isolate %p %s %p\n", | |
172 isolate, | |
173 isolate->name(), | |
174 reinterpret_cast<void*>(Thread::GetCurrentThreadId())); | |
175 } | |
176 } | 77 } |
177 } | 78 } |
178 } | 79 } |
179 | 80 |
180 | 81 |
181 void ProfilerManager::FreeIsolateProfilingData(Isolate* isolate) { | 82 void Profiler::ShutdownIsolateForProfiling(Isolate* isolate) { |
siva
2013/12/11 02:52:21
The names InitIsolateForProfiling and ShutdownIsol
Cutch
2013/12/11 17:44:56
Done.
| |
182 ScopedMutex profiler_data_lock(isolate->profiler_data_mutex()); | |
183 IsolateProfilerData* profiler_data = isolate->profiler_data(); | |
184 if (profiler_data == NULL) { | |
185 // Already freed. | |
186 return; | |
187 } | |
188 isolate->set_profiler_data(NULL); | |
189 SampleBuffer* sample_buffer = profiler_data->sample_buffer(); | |
190 ASSERT(sample_buffer != NULL); | |
191 profiler_data->set_sample_buffer(NULL); | |
192 delete sample_buffer; | |
193 delete profiler_data; | |
194 if (FLAG_trace_profiled_isolates) { | |
195 OS::Print("ProfilerManager Shutdown Isolate %p %s %p\n", | |
196 isolate, | |
197 isolate->name(), | |
198 reinterpret_cast<void*>(Thread::GetCurrentThreadId())); | |
199 } | |
200 } | |
201 | |
202 | |
203 void ProfilerManager::ShutdownIsolateForProfiling(Isolate* isolate) { | |
204 ASSERT(isolate != NULL); | 83 ASSERT(isolate != NULL); |
205 if (!FLAG_profile) { | 84 if (!FLAG_profile) { |
206 return; | 85 return; |
207 } | 86 } |
87 // We do not have a current isolate. | |
88 ASSERT(Isolate::Current() == NULL); | |
89 MonitorLocker ml(monitor_); | |
208 { | 90 { |
209 ScopedSignalBlocker ssb; | 91 MutexLocker profiler_data_lock(isolate->profiler_data_mutex()); |
210 FreeIsolateProfilingData(isolate); | 92 IsolateProfilerData* profiler_data = isolate->profiler_data(); |
211 } | 93 if (profiler_data == NULL) { |
212 } | 94 // Already freed. |
213 | |
214 | |
215 void ProfilerManager::ScheduleIsolateHelper(Isolate* isolate) { | |
216 ScopedMonitor lock(monitor_); | |
217 { | |
218 if (shutdown_) { | |
219 // Shutdown. | |
220 return; | 95 return; |
221 } | 96 } |
222 ScopedMutex profiler_data_lock(isolate->profiler_data_mutex()); | 97 isolate->set_profiler_data(NULL); |
223 IsolateProfilerData* profiler_data = isolate->profiler_data(); | 98 profiler_data->set_sample_buffer(NULL); |
siva
2013/12/11 02:52:21
if shared_buffer is not true how is the per isolat
Cutch
2013/12/11 17:44:56
IsolateProfilerData tracks whether it owns the buf
| |
224 if (profiler_data == NULL) { | 99 delete profiler_data; |
225 return; | 100 if (FLAG_trace_profiled_isolates) { |
226 } | 101 OS::Print("Profiler Shutdown %p %s\n", isolate, isolate->name()); |
227 profiler_data->Scheduled(OS::GetCurrentTimeMicros(), | |
228 Thread::GetCurrentThreadId()); | |
229 } | |
230 intptr_t i = FindIsolate(isolate); | |
231 if (i >= 0) { | |
232 // Already scheduled. | |
233 return; | |
234 } | |
235 AddIsolate(isolate); | |
236 lock.Notify(); | |
237 } | |
238 | |
239 | |
240 void ProfilerManager::ScheduleIsolate(Isolate* isolate, bool inside_signal) { | |
241 if (!FLAG_profile) { | |
242 return; | |
243 } | |
244 ASSERT(initialized_); | |
245 ASSERT(isolate != NULL); | |
246 if (!inside_signal) { | |
247 ScopedSignalBlocker ssb; | |
248 { | |
249 ScheduleIsolateHelper(isolate); | |
250 } | |
251 } else { | |
252 // Do not need a signal blocker inside a signal handler. | |
253 { | |
254 ScheduleIsolateHelper(isolate); | |
255 } | 102 } |
256 } | 103 } |
257 } | 104 } |
258 | 105 |
259 | 106 |
260 void ProfilerManager::DescheduleIsolate(Isolate* isolate) { | 107 void Profiler::BeginExecution(Isolate* isolate) { |
108 if (isolate == NULL) { | |
109 return; | |
110 } | |
261 if (!FLAG_profile) { | 111 if (!FLAG_profile) { |
262 return; | 112 return; |
263 } | 113 } |
264 ASSERT(initialized_); | 114 IsolateProfilerData* profiler_data = isolate->profiler_data(); |
265 ASSERT(isolate != NULL); | 115 if (profiler_data == NULL) { |
266 { | 116 return; |
267 ScopedSignalBlocker ssb; | |
268 { | |
269 ScopedMonitor lock(monitor_); | |
270 if (shutdown_) { | |
271 // Shutdown. | |
272 return; | |
273 } | |
274 intptr_t i = FindIsolate(isolate); | |
275 if (i < 0) { | |
276 // Not scheduled. | |
277 return; | |
278 } | |
279 { | |
280 ScopedMutex profiler_data_lock(isolate->profiler_data_mutex()); | |
281 IsolateProfilerData* profiler_data = isolate->profiler_data(); | |
282 if (profiler_data != NULL) { | |
283 profiler_data->Descheduled(); | |
284 } | |
285 } | |
286 RemoveIsolate(i); | |
287 lock.Notify(); | |
288 } | |
289 } | 117 } |
118 SampleBuffer* sample_buffer = profiler_data->sample_buffer(); | |
119 if (sample_buffer == NULL) { | |
120 return; | |
121 } | |
siva
2013/12/11 02:52:21
Are the two checks above more an assertion, i.e:
Cutch
2013/12/11 17:44:56
No. BeginExecution can be called before an isolate
| |
122 Sample* sample = sample_buffer->ReserveSample(); | |
123 sample->Init(Sample::kIsolateStart, isolate, OS::GetCurrentTimeMicros(), | |
124 Thread::GetCurrentThreadId()); | |
125 ThreadInterrupter::Register(RecordSampleInterruptCallback, isolate); | |
290 } | 126 } |
291 | 127 |
292 | 128 |
293 void PrintToJSONStream(Isolate* isolate, JSONStream* stream) { | 129 void Profiler::EndExecution(Isolate* isolate) { |
130 if (isolate == NULL) { | |
131 return; | |
132 } | |
133 if (!FLAG_profile) { | |
134 return; | |
135 } | |
136 ThreadInterrupter::Unregister(); | |
137 IsolateProfilerData* profiler_data = isolate->profiler_data(); | |
138 if (profiler_data == NULL) { | |
139 return; | |
140 } | |
141 SampleBuffer* sample_buffer = profiler_data->sample_buffer(); | |
142 if (sample_buffer == NULL) { | |
143 return; | |
144 } | |
siva
2013/12/11 02:52:21
Ditto comment about ASSERT vs hard checks.
| |
145 Sample* sample = sample_buffer->ReserveSample(); | |
146 sample->Init(Sample::kIsolateStop, isolate, OS::GetCurrentTimeMicros(), | |
147 Thread::GetCurrentThreadId()); | |
148 } | |
149 | |
150 | |
151 void Profiler::RecordTickInterruptCallback(const InterruptedThreadState& state, | |
152 void* data) { | |
153 Isolate* isolate = reinterpret_cast<Isolate*>(data); | |
154 if (isolate == NULL) { | |
155 return; | |
156 } | |
157 IsolateProfilerData* profiler_data = isolate->profiler_data(); | |
158 if (profiler_data == NULL) { | |
159 return; | |
160 } | |
161 SampleBuffer* sample_buffer = profiler_data->sample_buffer(); | |
162 if (sample_buffer == NULL) { | |
163 return; | |
164 } | |
165 Sample* sample = sample_buffer->ReserveSample(); | |
166 sample->Init(Sample::kIsolateSample, isolate, OS::GetCurrentTimeMicros(), | |
167 state.tid); | |
168 } | |
169 | |
170 | |
171 void Profiler::RecordSampleInterruptCallback( | |
172 const InterruptedThreadState& state, | |
173 void* data) { | |
174 Isolate* isolate = reinterpret_cast<Isolate*>(data); | |
175 if (isolate == NULL) { | |
176 return; | |
177 } | |
178 IsolateProfilerData* profiler_data = isolate->profiler_data(); | |
179 if (profiler_data == NULL) { | |
180 return; | |
181 } | |
182 SampleBuffer* sample_buffer = profiler_data->sample_buffer(); | |
183 if (sample_buffer == NULL) { | |
184 return; | |
185 } | |
186 Sample* sample = sample_buffer->ReserveSample(); | |
187 sample->Init(Sample::kIsolateSample, isolate, OS::GetCurrentTimeMicros(), | |
188 state.tid); | |
189 uintptr_t stack_lower = 0; | |
190 uintptr_t stack_upper = 0; | |
191 isolate->GetStackBounds(&stack_lower, &stack_upper); | |
192 if ((stack_lower == 0) || (stack_upper == 0)) { | |
193 stack_lower = 0; | |
194 stack_upper = 0; | |
195 } | |
196 ProfilerSampleStackWalker stackWalker(sample, stack_lower, stack_upper, | |
197 state.pc, state.fp, state.sp); | |
198 stackWalker.walk(); | |
199 } | |
200 | |
201 | |
202 void Profiler::PrintToJSONStream(Isolate* isolate, JSONStream* stream) { | |
294 ASSERT(isolate == Isolate::Current()); | 203 ASSERT(isolate == Isolate::Current()); |
295 { | |
296 // We can't get signals here. | |
297 } | |
298 UNIMPLEMENTED(); | 204 UNIMPLEMENTED(); |
299 } | 205 } |
300 | 206 |
301 | 207 |
302 void ProfilerManager::ResizeIsolates(intptr_t new_capacity) { | |
303 ASSERT(new_capacity < kMaxProfiledIsolates); | |
304 ASSERT(new_capacity > isolates_capacity_); | |
305 Isolate* isolate = NULL; | |
306 isolates_ = reinterpret_cast<Isolate**>( | |
307 realloc(isolates_, sizeof(isolate) * new_capacity)); | |
308 isolates_capacity_ = new_capacity; | |
309 } | |
310 | |
311 | |
312 void ProfilerManager::AddIsolate(Isolate* isolate) { | |
313 // Must be called with monitor_ locked. | |
314 if (isolates_ == NULL) { | |
315 // We are shutting down. | |
316 return; | |
317 } | |
318 if (isolates_size_ == isolates_capacity_) { | |
319 ResizeIsolates(isolates_capacity_ == 0 ? 16 : isolates_capacity_ * 2); | |
320 } | |
321 isolates_[isolates_size_] = isolate; | |
322 isolates_size_++; | |
323 } | |
324 | |
325 | |
326 intptr_t ProfilerManager::FindIsolate(Isolate* isolate) { | |
327 // Must be called with monitor_ locked. | |
328 if (isolates_ == NULL) { | |
329 // We are shutting down. | |
330 return -1; | |
331 } | |
332 for (intptr_t i = 0; i < isolates_size_; i++) { | |
333 if (isolates_[i] == isolate) { | |
334 return i; | |
335 } | |
336 } | |
337 return -1; | |
338 } | |
339 | |
340 | |
341 void ProfilerManager::RemoveIsolate(intptr_t i) { | |
342 // Must be called with monitor_ locked. | |
343 if (isolates_ == NULL) { | |
344 // We are shutting down. | |
345 return; | |
346 } | |
347 ASSERT(i < isolates_size_); | |
348 intptr_t last = isolates_size_ - 1; | |
349 if (i != last) { | |
350 isolates_[i] = isolates_[last]; | |
351 } | |
352 // Mark last as NULL. | |
353 isolates_[last] = NULL; | |
354 // Pop. | |
355 isolates_size_--; | |
356 } | |
357 | |
358 | |
359 static char* FindSymbolName(uintptr_t pc, bool* native_symbol) { | 208 static char* FindSymbolName(uintptr_t pc, bool* native_symbol) { |
360 // TODO(johnmccutchan): Differentiate between symbols which can't be found | 209 // TODO(johnmccutchan): Differentiate between symbols which can't be found |
361 // and symbols which were GCed. (Heap::CodeContains). | 210 // and symbols which were GCed. (Heap::CodeContains). |
362 ASSERT(native_symbol != NULL); | 211 ASSERT(native_symbol != NULL); |
363 const char* symbol_name = "Unknown"; | 212 const char* symbol_name = "Unknown"; |
364 *native_symbol = false; | 213 *native_symbol = false; |
214 if (pc == 0) { | |
215 return const_cast<char*>(Sample::kNoFrame); | |
216 } | |
365 const Code& code = Code::Handle(Code::LookupCode(pc)); | 217 const Code& code = Code::Handle(Code::LookupCode(pc)); |
366 if (code.IsNull()) { | 218 if (code.IsNull()) { |
367 // Possibly a native symbol. | 219 // Possibly a native symbol. |
368 char* native_name = NativeSymbolResolver::LookupSymbolName(pc); | 220 char* native_name = NativeSymbolResolver::LookupSymbolName(pc); |
369 if (native_name != NULL) { | 221 if (native_name != NULL) { |
370 symbol_name = native_name; | 222 symbol_name = native_name; |
371 *native_symbol = true; | 223 *native_symbol = true; |
372 } | 224 } |
373 } else { | 225 } else { |
374 const Function& function = Function::Handle(code.function()); | 226 const Function& function = Function::Handle(code.function()); |
375 if (!function.IsNull()) { | 227 if (!function.IsNull()) { |
376 const String& name = String::Handle(function.QualifiedUserVisibleName()); | 228 const String& name = String::Handle(function.QualifiedUserVisibleName()); |
377 if (!name.IsNull()) { | 229 if (!name.IsNull()) { |
378 symbol_name = name.ToCString(); | 230 symbol_name = name.ToCString(); |
379 } | 231 } |
380 } | 232 } |
381 } | 233 } |
382 return const_cast<char*>(symbol_name); | 234 return const_cast<char*>(symbol_name); |
383 } | 235 } |
384 | 236 |
385 | 237 |
386 void ProfilerManager::WriteTracing(Isolate* isolate, const char* name, | 238 void Profiler::WriteTracingSample(Isolate* isolate, intptr_t pid, |
387 Dart_Port port) { | 239 Sample* sample, JSONArray& events) { |
388 ASSERT(isolate == Isolate::Current()); | 240 Sample::SampleType type = sample->type; |
389 { | 241 intptr_t tid = Thread::ThreadIdToIntPtr(sample->tid); |
390 ScopedSignalBlocker ssb; | 242 double timestamp = static_cast<double>(sample->timestamp); |
391 { | 243 const char* isolate_name = isolate->name(); |
392 ScopedMutex profiler_data_lock(isolate->profiler_data_mutex()); | 244 switch (type) { |
393 IsolateProfilerData* profiler_data = isolate->profiler_data(); | 245 case Sample::kIsolateStart: { |
394 if (profiler_data == NULL) { | 246 JSONObject begin(&events); |
395 return; | 247 begin.AddProperty("ph", "B"); |
396 } | 248 begin.AddProperty("tid", tid); |
397 SampleBuffer* sample_buffer = profiler_data->sample_buffer(); | 249 begin.AddProperty("pid", pid); |
398 ASSERT(sample_buffer != NULL); | 250 begin.AddProperty("name", isolate_name); |
399 JSONStream stream(10 * MB); | 251 begin.AddProperty("ts", timestamp); |
400 intptr_t tid = reinterpret_cast<intptr_t>(sample_buffer); | 252 } |
401 intptr_t pid = 1; | 253 break; |
402 { | 254 case Sample::kIsolateStop: { |
403 JSONArray events(&stream); | 255 JSONObject begin(&events); |
256 begin.AddProperty("ph", "E"); | |
257 begin.AddProperty("tid", tid); | |
258 begin.AddProperty("pid", pid); | |
259 begin.AddProperty("name", isolate_name); | |
260 begin.AddProperty("ts", timestamp); | |
261 } | |
262 break; | |
263 case Sample::kIsolateSample: | |
264 // Write "B" events. | |
265 for (int i = Sample::kNumStackFrames - 1; i >= 0; i--) { | |
266 bool native_symbol = false; | |
267 char* symbol_name = FindSymbolName(sample->pcs[i], &native_symbol); | |
404 { | 268 { |
405 JSONObject thread_name(&events); | 269 JSONObject begin(&events); |
406 thread_name.AddProperty("name", "thread_name"); | 270 begin.AddProperty("ph", "B"); |
407 thread_name.AddProperty("ph", "M"); | 271 begin.AddProperty("tid", tid); |
408 thread_name.AddProperty("tid", tid); | 272 begin.AddProperty("pid", pid); |
409 thread_name.AddProperty("pid", pid); | 273 begin.AddProperty("name", symbol_name); |
410 { | 274 begin.AddProperty("ts", timestamp); |
411 JSONObject args(&thread_name, "args"); | |
412 args.AddProperty("name", name); | |
413 } | |
414 } | 275 } |
415 { | 276 if (native_symbol) { |
416 JSONObject process_name(&events); | 277 NativeSymbolResolver::FreeSymbolName(symbol_name); |
417 process_name.AddProperty("name", "process_name"); | |
418 process_name.AddProperty("ph", "M"); | |
419 process_name.AddProperty("tid", tid); | |
420 process_name.AddProperty("pid", pid); | |
421 { | |
422 JSONObject args(&process_name, "args"); | |
423 args.AddProperty("name", "Dart VM"); | |
424 } | |
425 } | |
426 uint64_t last_time = 0; | |
427 for (Sample* i = sample_buffer->FirstSample(); | |
428 i != sample_buffer->LastSample(); | |
429 i = sample_buffer->NextSample(i)) { | |
430 if (last_time == 0) { | |
431 last_time = i->timestamp; | |
432 } | |
433 intptr_t delta = i->timestamp - last_time; | |
434 { | |
435 double percentage = static_cast<double>(i->cpu_usage) / | |
436 static_cast<double>(delta) * 100.0; | |
437 if (percentage != percentage) { | |
438 percentage = 0.0; | |
439 } | |
440 percentage = percentage < 0.0 ? 0.0 : percentage; | |
441 percentage = percentage > 100.0 ? 100.0 : percentage; | |
442 { | |
443 JSONObject cpu_usage(&events); | |
444 cpu_usage.AddProperty("name", "CPU Usage"); | |
445 cpu_usage.AddProperty("ph", "C"); | |
446 cpu_usage.AddProperty("tid", tid); | |
447 cpu_usage.AddProperty("pid", pid); | |
448 cpu_usage.AddProperty("ts", static_cast<double>(last_time)); | |
449 { | |
450 JSONObject args(&cpu_usage, "args"); | |
451 args.AddProperty("CPU", percentage); | |
452 } | |
453 } | |
454 { | |
455 JSONObject cpu_usage(&events); | |
456 cpu_usage.AddProperty("name", "CPU Usage"); | |
457 cpu_usage.AddProperty("ph", "C"); | |
458 cpu_usage.AddProperty("tid", tid); | |
459 cpu_usage.AddProperty("pid", pid); | |
460 cpu_usage.AddProperty("ts", static_cast<double>(i->timestamp)); | |
461 { | |
462 JSONObject args(&cpu_usage, "args"); | |
463 args.AddProperty("CPU", percentage); | |
464 } | |
465 } | |
466 } | |
467 for (int j = 0; j < Sample::kNumStackFrames; j++) { | |
468 if (i->pcs[j] == 0) { | |
469 continue; | |
470 } | |
471 bool native_symbol = false; | |
472 char* symbol_name = FindSymbolName(i->pcs[j], &native_symbol); | |
473 { | |
474 JSONObject begin(&events); | |
475 begin.AddProperty("ph", "B"); | |
476 begin.AddProperty("tid", tid); | |
477 begin.AddProperty("pid", pid); | |
478 begin.AddProperty("name", symbol_name); | |
479 begin.AddProperty("ts", static_cast<double>(last_time)); | |
480 } | |
481 if (native_symbol) { | |
482 NativeSymbolResolver::FreeSymbolName(symbol_name); | |
483 } | |
484 } | |
485 for (int j = Sample::kNumStackFrames-1; j >= 0; j--) { | |
486 if (i->pcs[j] == 0) { | |
487 continue; | |
488 } | |
489 bool native_symbol = false; | |
490 char* symbol_name = FindSymbolName(i->pcs[j], &native_symbol); | |
491 { | |
492 JSONObject end(&events); | |
493 end.AddProperty("ph", "E"); | |
494 end.AddProperty("tid", tid); | |
495 end.AddProperty("pid", pid); | |
496 end.AddProperty("name", symbol_name); | |
497 end.AddProperty("ts", static_cast<double>(i->timestamp)); | |
498 } | |
499 if (native_symbol) { | |
500 NativeSymbolResolver::FreeSymbolName(symbol_name); | |
501 } | |
502 } | |
503 last_time = i->timestamp; | |
504 } | 278 } |
505 } | 279 } |
506 char fname[1024]; | 280 // Write "E" events. |
507 #if defined(TARGET_OS_WINDOWS) | 281 for (int i = 0; i < Sample::kNumStackFrames; i++) { |
508 snprintf(fname, sizeof(fname)-1, "c:\\tmp\\isolate-%d.prof", | 282 bool native_symbol = false; |
509 static_cast<int>(port)); | 283 char* symbol_name = FindSymbolName(sample->pcs[i], &native_symbol); |
510 #else | 284 { |
511 snprintf(fname, sizeof(fname)-1, "/tmp/isolate-%d.prof", | 285 JSONObject begin(&events); |
512 static_cast<int>(port)); | 286 begin.AddProperty("ph", "E"); |
513 #endif | 287 begin.AddProperty("tid", tid); |
514 printf("%s\n", fname); | 288 begin.AddProperty("pid", pid); |
515 FILE* f = fopen(fname, "wb"); | 289 begin.AddProperty("name", symbol_name); |
516 ASSERT(f != NULL); | 290 begin.AddProperty("ts", timestamp); |
517 fputs(stream.ToCString(), f); | 291 } |
518 fclose(f); | 292 if (native_symbol) { |
519 } | 293 NativeSymbolResolver::FreeSymbolName(symbol_name); |
294 } | |
295 } | |
296 break; | |
297 default: | |
298 UNIMPLEMENTED(); | |
520 } | 299 } |
521 } | 300 } |
522 | 301 |
523 | 302 |
524 IsolateProfilerData::IsolateProfilerData(Isolate* isolate, | 303 void Profiler::WriteTracing(Isolate* isolate) { |
525 SampleBuffer* sample_buffer) { | 304 // We will be looking up code objects within the isolate. |
526 isolate_ = isolate; | 305 ASSERT(Isolate::Current() != NULL); |
306 // We do not want to be interrupted while processing the buffer. | |
307 EndExecution(isolate); | |
308 Dart_Port port = isolate->main_port(); | |
309 MutexLocker profiler_data_lock(isolate->profiler_data_mutex()); | |
310 IsolateProfilerData* profiler_data = isolate->profiler_data(); | |
311 if (profiler_data == NULL) { | |
312 return; | |
313 } | |
314 SampleBuffer* sample_buffer = profiler_data->sample_buffer(); | |
315 ASSERT(sample_buffer != NULL); | |
316 JSONStream stream(10 * MB); | |
317 intptr_t pid = OS::ProcessId(); | |
318 { | |
319 JSONArray events(&stream); | |
320 { | |
321 JSONObject process_name(&events); | |
322 process_name.AddProperty("name", "process_name"); | |
323 process_name.AddProperty("ph", "M"); | |
324 process_name.AddProperty("pid", pid); | |
325 { | |
326 JSONObject args(&process_name, "args"); | |
327 args.AddProperty("name", "Dart VM"); | |
328 } | |
329 } | |
330 for (intptr_t i = 0; i < sample_buffer->capacity(); i++) { | |
331 Sample* sample = sample_buffer->GetSample(i); | |
332 if (sample->isolate != isolate) { | |
333 continue; | |
334 } | |
335 if (sample->timestamp == 0) { | |
336 continue; | |
337 } | |
338 WriteTracingSample(isolate, pid, sample, events); | |
339 } | |
340 } | |
341 char fname[1024]; | |
342 #if defined(TARGET_OS_WINDOWS) | |
343 snprintf(fname, sizeof(fname)-1, "c:\\tmp\\isolate-%d-%d.prof", | |
344 static_cast<int>(pid), static_cast<int>(port)); | |
345 #else | |
346 snprintf(fname, sizeof(fname)-1, "/tmp/isolate-%d-%d.prof", | |
347 static_cast<int>(pid), static_cast<int>(port)); | |
348 #endif | |
349 FILE* f = fopen(fname, "wb"); | |
350 ASSERT(f != NULL); | |
351 fputs(stream.ToCString(), f); | |
352 fclose(f); | |
siva
2013/12/11 02:52:21
We don't operate on files directly like this insid
Cutch
2013/12/11 17:44:56
Done.
| |
353 BeginExecution(isolate); | |
354 } | |
355 | |
356 | |
357 IsolateProfilerData::IsolateProfilerData(SampleBuffer* sample_buffer, | |
358 bool own_sample_buffer) { | |
527 sample_buffer_ = sample_buffer; | 359 sample_buffer_ = sample_buffer; |
528 timer_expiration_micros_ = kNoExpirationTime; | 360 own_sample_buffer_ = own_sample_buffer; |
529 last_sampled_micros_ = 0; | |
530 thread_id_ = 0; | |
531 } | 361 } |
532 | 362 |
533 | 363 |
534 IsolateProfilerData::~IsolateProfilerData() { | 364 IsolateProfilerData::~IsolateProfilerData() { |
535 } | 365 if (own_sample_buffer_) { |
536 | 366 delete sample_buffer_; |
537 | 367 sample_buffer_ = NULL; |
538 void IsolateProfilerData::SampledAt(int64_t current_time) { | 368 own_sample_buffer_ = false; |
539 last_sampled_micros_ = current_time; | 369 } |
540 } | |
541 | |
542 | |
543 void IsolateProfilerData::Scheduled(int64_t current_time, ThreadId thread_id) { | |
544 timer_expiration_micros_ = current_time + sample_interval_micros_; | |
545 thread_id_ = thread_id; | |
546 Thread::GetThreadCpuUsage(thread_id_, &cpu_usage_); | |
547 } | |
548 | |
549 | |
550 void IsolateProfilerData::Descheduled() { | |
551 // TODO(johnmccutchan): Track when we ran for a fraction of our sample | |
552 // interval and incorporate the time difference when scheduling the | |
553 // isolate again. | |
554 cpu_usage_ = kDescheduledCpuUsage; | |
555 timer_expiration_micros_ = kNoExpirationTime; | |
556 Sample* sample = sample_buffer_->ReserveSample(); | |
557 ASSERT(sample != NULL); | |
558 sample->timestamp = OS::GetCurrentTimeMicros(); | |
559 sample->cpu_usage = 0; | |
560 sample->vm_tags = Sample::kIdle; | |
561 } | 370 } |
562 | 371 |
563 | 372 |
564 const char* Sample::kLookupSymbol = "Symbol Not Looked Up"; | 373 const char* Sample::kLookupSymbol = "Symbol Not Looked Up"; |
565 const char* Sample::kNoSymbol = "No Symbol Found"; | 374 const char* Sample::kNoSymbol = "No Symbol Found"; |
375 const char* Sample::kNoFrame = "<no frame>"; | |
566 | 376 |
567 Sample::Sample() { | 377 void Sample::Init(SampleType type, Isolate* isolate, int64_t timestamp, |
568 timestamp = 0; | 378 ThreadId tid) { |
569 cpu_usage = 0; | 379 this->timestamp = timestamp; |
570 for (int i = 0; i < kNumStackFrames; i++) { | 380 this->tid = tid; |
381 this->isolate = isolate; | |
382 for (intptr_t i = 0; i < kNumStackFrames; i++) { | |
571 pcs[i] = 0; | 383 pcs[i] = 0; |
572 } | 384 } |
573 vm_tags = kIdle; | 385 this->type = type; |
386 vm_tags = 0; | |
574 runtime_tags = 0; | 387 runtime_tags = 0; |
575 } | 388 } |
576 | 389 |
577 | |
578 SampleBuffer::SampleBuffer(intptr_t capacity) { | 390 SampleBuffer::SampleBuffer(intptr_t capacity) { |
579 start_ = 0; | |
580 end_ = 0; | |
581 capacity_ = capacity; | 391 capacity_ = capacity; |
582 samples_ = reinterpret_cast<Sample*>(calloc(capacity, sizeof(Sample))); | 392 samples_ = reinterpret_cast<Sample*>(calloc(capacity, sizeof(Sample))); |
393 cursor_ = 0; | |
583 } | 394 } |
584 | 395 |
585 | 396 |
586 SampleBuffer::~SampleBuffer() { | 397 SampleBuffer::~SampleBuffer() { |
587 if (samples_ != NULL) { | 398 if (samples_ != NULL) { |
588 free(samples_); | 399 free(samples_); |
589 samples_ = NULL; | 400 samples_ = NULL; |
590 start_ = 0; | 401 cursor_ = 0; |
591 end_ = 0; | |
592 capacity_ = 0; | 402 capacity_ = 0; |
593 } | 403 } |
594 } | 404 } |
595 | 405 |
596 | 406 |
597 Sample* SampleBuffer::ReserveSample() { | 407 Sample* SampleBuffer::ReserveSample() { |
598 ASSERT(samples_ != NULL); | 408 ASSERT(samples_ != NULL); |
599 intptr_t index = end_; | 409 uintptr_t cursor = AtomicOperations::FetchAndIncrement(&cursor_); |
siva
2013/12/11 02:52:21
what happens when cursor overflows?
Also there is
Cutch
2013/12/11 17:44:56
We wrap around back to zero.
| |
600 end_ = WrapIncrement(end_); | 410 // Map back into sample buffer range. |
601 if (end_ == start_) { | 411 cursor = cursor % capacity_; |
602 start_ = WrapIncrement(start_); | 412 return &samples_[cursor]; |
603 } | |
604 ASSERT(index >= 0); | |
605 ASSERT(index < capacity_); | |
606 // Reset. | |
607 samples_[index] = Sample(); | |
608 return &samples_[index]; | |
609 } | 413 } |
610 | 414 |
611 | 415 |
612 Sample* SampleBuffer::FirstSample() const { | |
613 return &samples_[start_]; | |
614 } | |
615 | |
616 | |
617 Sample* SampleBuffer::NextSample(Sample* sample) const { | |
618 ASSERT(sample >= &samples_[0]); | |
619 ASSERT(sample < &samples_[capacity_]); | |
620 intptr_t index = sample - samples_; | |
621 index = WrapIncrement(index); | |
622 return &samples_[index]; | |
623 } | |
624 | |
625 | |
626 Sample* SampleBuffer::LastSample() const { | |
627 return &samples_[end_]; | |
628 } | |
629 | |
630 | |
631 intptr_t SampleBuffer::WrapIncrement(intptr_t i) const { | |
632 return (i + 1) % capacity_; | |
633 } | |
634 | |
635 | |
636 ProfilerSampleStackWalker::ProfilerSampleStackWalker(Sample* sample, | 416 ProfilerSampleStackWalker::ProfilerSampleStackWalker(Sample* sample, |
637 uintptr_t stack_lower, | 417 uintptr_t stack_lower, |
638 uintptr_t stack_upper, | 418 uintptr_t stack_upper, |
639 uintptr_t pc, | 419 uintptr_t pc, |
640 uintptr_t fp, | 420 uintptr_t fp, |
641 uintptr_t sp) : | 421 uintptr_t sp) : |
642 sample_(sample), | 422 sample_(sample), |
643 stack_lower_(stack_lower), | 423 stack_lower_(stack_lower), |
644 stack_upper_(stack_upper), | 424 stack_upper_(stack_upper), |
645 original_pc_(pc), | 425 original_pc_(pc), |
646 original_fp_(fp), | 426 original_fp_(fp), |
647 original_sp_(sp), | 427 original_sp_(sp), |
648 lower_bound_(stack_lower) { | 428 lower_bound_(stack_lower) { |
649 ASSERT(sample_ != NULL); | 429 ASSERT(sample_ != NULL); |
650 } | 430 } |
651 | 431 |
652 | 432 |
653 int ProfilerSampleStackWalker::walk() { | 433 int ProfilerSampleStackWalker::walk() { |
654 uword* pc = reinterpret_cast<uword*>(original_pc_); | 434 uword* pc = reinterpret_cast<uword*>(original_pc_); |
435 #define WALK_STACK | |
655 #if defined(WALK_STACK) | 436 #if defined(WALK_STACK) |
656 uword* fp = reinterpret_cast<uword*>(original_fp_); | 437 uword* fp = reinterpret_cast<uword*>(original_fp_); |
657 uword* previous_fp = fp; | 438 uword* previous_fp = fp; |
658 if (original_sp_ < lower_bound_) { | 439 if (original_sp_ < lower_bound_) { |
659 // The stack pointer gives us a better lower bound than | 440 // The stack pointer gives us a better lower bound than |
660 // the isolates stack limit. | 441 // the isolates stack limit. |
661 lower_bound_ = original_sp_; | 442 lower_bound_ = original_sp_; |
662 } | 443 } |
663 int i = 0; | 444 int i = 0; |
664 for (; i < Sample::kNumStackFrames; i++) { | 445 for (; i < Sample::kNumStackFrames; i++) { |
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
701 return false; | 482 return false; |
702 } | 483 } |
703 uintptr_t cursor = reinterpret_cast<uintptr_t>(fp); | 484 uintptr_t cursor = reinterpret_cast<uintptr_t>(fp); |
704 cursor += sizeof(fp); | 485 cursor += sizeof(fp); |
705 bool r = cursor >= lower_bound_ && cursor < stack_upper_; | 486 bool r = cursor >= lower_bound_ && cursor < stack_upper_; |
706 return r; | 487 return r; |
707 } | 488 } |
708 | 489 |
709 | 490 |
710 } // namespace dart | 491 } // namespace dart |
OLD | NEW |