OLD | NEW |
---|---|
1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/profiler/stack_sampling_profiler.h" | 5 #include "base/profiler/stack_sampling_profiler.h" |
6 | 6 |
7 #include <algorithm> | 7 #include <algorithm> |
8 #include <map> | |
8 #include <utility> | 9 #include <utility> |
9 | 10 |
11 #include "base/atomic_sequence_num.h" | |
12 #include "base/atomicops.h" | |
10 #include "base/bind.h" | 13 #include "base/bind.h" |
11 #include "base/bind_helpers.h" | 14 #include "base/bind_helpers.h" |
12 #include "base/callback.h" | 15 #include "base/callback.h" |
13 #include "base/lazy_instance.h" | 16 #include "base/lazy_instance.h" |
14 #include "base/location.h" | 17 #include "base/location.h" |
15 #include "base/macros.h" | 18 #include "base/macros.h" |
19 #include "base/memory/ptr_util.h" | |
20 #include "base/memory/singleton.h" | |
16 #include "base/profiler/native_stack_sampler.h" | 21 #include "base/profiler/native_stack_sampler.h" |
17 #include "base/synchronization/lock.h" | 22 #include "base/synchronization/lock.h" |
23 #include "base/threading/thread.h" | |
24 #include "base/threading/thread_restrictions.h" | |
18 #include "base/threading/thread_task_runner_handle.h" | 25 #include "base/threading/thread_task_runner_handle.h" |
19 #include "base/timer/elapsed_timer.h" | 26 #include "base/timer/elapsed_timer.h" |
20 | 27 |
21 namespace base { | 28 namespace base { |
22 | 29 |
23 namespace { | 30 namespace { |
24 | 31 |
25 // Used to ensure only one profiler is running at a time. | 32 // This value is used when there is no collection in progress and thus no ID |
26 LazyInstance<Lock>::Leaky concurrent_profiling_lock = LAZY_INSTANCE_INITIALIZER; | 33 // for referencing the active collection to the SamplingThread. |
27 | 34 const int NULL_COLLECTION_ID = -1; |
28 // AsyncRunner ---------------------------------------------------------------- | |
29 | |
30 // Helper class to allow a profiler to be run completely asynchronously from the | |
31 // initiator, without being concerned with the profiler's lifetime. | |
32 class AsyncRunner { | |
33 public: | |
34 // Sets up a profiler and arranges for it to be deleted on its completed | |
35 // callback. | |
36 static void Run(PlatformThreadId thread_id, | |
37 const StackSamplingProfiler::SamplingParams& params, | |
38 const StackSamplingProfiler::CompletedCallback& callback); | |
39 | |
40 private: | |
41 AsyncRunner(); | |
42 | |
43 // Runs the callback and deletes the AsyncRunner instance. |profiles| is not | |
44 // const& because it must be passed with std::move. | |
45 static void RunCallbackAndDeleteInstance( | |
46 std::unique_ptr<AsyncRunner> object_to_be_deleted, | |
47 const StackSamplingProfiler::CompletedCallback& callback, | |
48 scoped_refptr<SingleThreadTaskRunner> task_runner, | |
49 StackSamplingProfiler::CallStackProfiles profiles); | |
50 | |
51 std::unique_ptr<StackSamplingProfiler> profiler_; | |
52 | |
53 DISALLOW_COPY_AND_ASSIGN(AsyncRunner); | |
54 }; | |
55 | |
56 // static | |
57 void AsyncRunner::Run( | |
58 PlatformThreadId thread_id, | |
59 const StackSamplingProfiler::SamplingParams& params, | |
60 const StackSamplingProfiler::CompletedCallback &callback) { | |
61 std::unique_ptr<AsyncRunner> runner(new AsyncRunner); | |
62 AsyncRunner* temp_ptr = runner.get(); | |
63 temp_ptr->profiler_.reset( | |
64 new StackSamplingProfiler(thread_id, params, | |
65 Bind(&AsyncRunner::RunCallbackAndDeleteInstance, | |
66 Passed(&runner), callback, | |
67 ThreadTaskRunnerHandle::Get()))); | |
68 // The callback won't be called until after Start(), so temp_ptr will still | |
69 // be valid here. | |
70 temp_ptr->profiler_->Start(); | |
71 } | |
72 | |
73 AsyncRunner::AsyncRunner() {} | |
74 | |
75 void AsyncRunner::RunCallbackAndDeleteInstance( | |
76 std::unique_ptr<AsyncRunner> object_to_be_deleted, | |
77 const StackSamplingProfiler::CompletedCallback& callback, | |
78 scoped_refptr<SingleThreadTaskRunner> task_runner, | |
79 StackSamplingProfiler::CallStackProfiles profiles) { | |
80 callback.Run(std::move(profiles)); | |
81 // Delete the instance on the original calling thread. | |
82 task_runner->DeleteSoon(FROM_HERE, object_to_be_deleted.release()); | |
83 } | |
84 | 35 |
85 void ChangeAtomicFlags(subtle::Atomic32* flags, | 36 void ChangeAtomicFlags(subtle::Atomic32* flags, |
86 subtle::Atomic32 set, | 37 subtle::Atomic32 set, |
87 subtle::Atomic32 clear) { | 38 subtle::Atomic32 clear) { |
88 DCHECK(set != 0 || clear != 0); | 39 DCHECK(set != 0 || clear != 0); |
89 DCHECK_EQ(0, set & clear); | 40 DCHECK_EQ(0, set & clear); |
90 | 41 |
91 subtle::Atomic32 bits = subtle::NoBarrier_Load(flags); | 42 subtle::Atomic32 bits = subtle::NoBarrier_Load(flags); |
92 while (true) { | 43 while (true) { |
93 subtle::Atomic32 existing = | 44 subtle::Atomic32 existing = |
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
153 StackSamplingProfiler::CallStackProfile | 104 StackSamplingProfiler::CallStackProfile |
154 StackSamplingProfiler::CallStackProfile::CopyForTesting() const { | 105 StackSamplingProfiler::CallStackProfile::CopyForTesting() const { |
155 return CallStackProfile(*this); | 106 return CallStackProfile(*this); |
156 } | 107 } |
157 | 108 |
158 StackSamplingProfiler::CallStackProfile::CallStackProfile( | 109 StackSamplingProfiler::CallStackProfile::CallStackProfile( |
159 const CallStackProfile& other) = default; | 110 const CallStackProfile& other) = default; |
160 | 111 |
161 // StackSamplingProfiler::SamplingThread -------------------------------------- | 112 // StackSamplingProfiler::SamplingThread -------------------------------------- |
162 | 113 |
163 StackSamplingProfiler::SamplingThread::SamplingThread( | 114 class StackSamplingProfiler::SamplingThread : public Thread { |
164 std::unique_ptr<NativeStackSampler> native_sampler, | 115 public: |
116 class TestAPI { | |
117 public: | |
118 // Disables inherent idle-shutdown behavior. | |
119 static void DisableIdleShutdown(); | |
120 | |
121 // Begins an idle shutdown as if the idle-timer had expired. | |
122 static void ShutdownIfIdle(); | |
123 }; | |
124 | |
125 struct CollectionContext { | |
126 CollectionContext(PlatformThreadId target, | |
127 const SamplingParams& params, | |
128 const CompletedCallback& callback, | |
129 WaitableEvent* finished, | |
130 std::unique_ptr<NativeStackSampler> sampler) | |
131 : collection_id(next_collection_id_.GetNext()), | |
132 target(target), | |
133 params(params), | |
134 callback(callback), | |
135 finished(finished), | |
136 native_sampler(std::move(sampler)) {} | |
137 ~CollectionContext() {} | |
138 | |
139 // An identifier for this collection, used to uniquely identify it to | |
140 // outside interests. | |
141 const int collection_id; | |
142 | |
143 const PlatformThreadId target; // ID of The thread being sampled. | |
144 const SamplingParams params; // Information about how to sample. | |
145 const CompletedCallback callback; // Callback made when sampling complete. | |
146 WaitableEvent* const finished; // Signaled when all sampling complete. | |
147 | |
148 // Platform-specific module that does the actual sampling. | |
149 std::unique_ptr<NativeStackSampler> native_sampler; | |
150 | |
151 // The absolute time for the next sample. | |
152 Time next_sample_time; | |
153 | |
154 // The time that a profile was started, for calculating the total duration. | |
155 Time profile_start_time; | |
156 | |
157 // Counters that indicate the current position along the acquisition. | |
158 int burst = 0; | |
159 int sample = 0; | |
160 | |
161 // The collected stack samples. The active profile is always at the back(). | |
162 CallStackProfiles profiles; | |
163 | |
164 private: | |
165 static StaticAtomicSequenceNumber next_collection_id_; | |
166 }; | |
167 | |
168 // Gets the single instance of this class. | |
169 static SamplingThread* GetInstance(); | |
170 | |
171 // Starts the thread. | |
172 void Start(); | |
173 | |
174 // Adds a new CollectionContext to the thread. This can be called externally | |
175 // from any thread. This returns an ID that can later be used to stop | |
176 // the sampling. | |
177 int Add(std::unique_ptr<CollectionContext> collection); | |
178 | |
179 // Removes an active collection based on its ID, forcing it to run its | |
180 // callback if any data has been collected. This can be called externally | |
181 // from any thread. | |
182 void Remove(int id); | |
183 | |
184 private: | |
185 friend class TestAPI; | |
186 friend struct DefaultSingletonTraits<SamplingThread>; | |
187 | |
188 // The different states in which the sampling-thread can be. | |
189 enum ThreadExecutionState { | |
190 // The thread is not running because it has never been started. It will be | |
191 // started when a sampling request is received. | |
192 NOT_STARTED, | |
193 | |
194 // The thread is running and processing tasks. This is the state when any | |
195 // sampling requests are active and during the "idle" period afterward | |
196 // before the thread is stopped. | |
197 RUNNING, | |
198 | |
199 // Once all sampling requests have finished and the "idle" period has | |
200 // expired, the thread will be set to this state and its shutdown | |
201 // initiated. A call to Stop() must be made to ensure the previous thread | |
202 // has completely exited before calling Start() and moving back to the | |
203 // RUNNING state. | |
204 EXITING, | |
205 }; | |
206 | |
207 SamplingThread(); | |
208 ~SamplingThread() override; | |
209 | |
210 // Get task runner that is usable from the outside. | |
211 scoped_refptr<SingleThreadTaskRunner> GetOrCreateTaskRunnerForAdd(); | |
212 scoped_refptr<SingleThreadTaskRunner> GetTaskRunner( | |
213 ThreadExecutionState* out_state); | |
214 | |
215 // Get task runner that is usable from the sampling thread itself. | |
216 scoped_refptr<SingleThreadTaskRunner> GetTaskRunnerOnSamplingThread(); | |
217 | |
218 // Finishes a collection and reports collected data via callback. | |
219 void FinishCollection(CollectionContext* collection); | |
220 | |
221 // Records a single sample of a collection. | |
222 void RecordSample(CollectionContext* collection); | |
223 | |
224 // Check if the sampling thread is idle and begin a shutdown if so. | |
225 void ScheduleShutdownIfIdle(); | |
226 | |
227 // These methods are tasks that get posted to the internal message queue. | |
228 void AddCollectionTask(std::unique_ptr<CollectionContext> collection); | |
229 void RemoveCollectionTask(int id); | |
230 void PerformCollectionTask(int id); | |
231 void ShutdownTask(int add_events); | |
232 | |
233 // Updates the |next_sample_time| time based on configured parameters. | |
234 bool UpdateNextSampleTime(CollectionContext* collection); | |
235 | |
236 // Thread: | |
237 void CleanUp() override; | |
238 | |
239 // The task-runner for the sampling thread and some information about it. | |
240 // This must always be accessed while holding the lock. The saved task-runner | |
241 // can be freely used by any calling thread. | |
242 Lock thread_execution_state_lock_; // Protects all thread_execution_state_* | |
243 ThreadExecutionState thread_execution_state_ = NOT_STARTED; | |
244 scoped_refptr<SingleThreadTaskRunner> thread_execution_state_task_runner_; | |
245 bool thread_execution_state_disable_idle_shutdown_for_testing_ = false; | |
246 | |
247 // A counter that notes adds of new collection requests. It is incremented | |
248 // when changes occur so that delayed shutdown tasks are able to detect if | |
249 // samething new has happened while it was waiting. Like all "execution_state" | |
250 // vars, this must be accessed while holding |thread_execution_state_lock_|. | |
251 int thread_execution_state_add_events_ = 0; | |
252 | |
253 // A map of IDs to collection contexts. Because this class is a singleton | |
254 // that is never destroyed, context objects will never be destructed except | |
255 // by explicit action. Thus, it's acceptable to pass unretained pointers | |
256 // to these objects when posting tasks. | |
257 std::map<int, std::unique_ptr<CollectionContext>> active_collections_; | |
258 | |
259 DISALLOW_COPY_AND_ASSIGN(SamplingThread); | |
260 }; | |
261 | |
262 void StackSamplingProfiler::SamplingThread::TestAPI::DisableIdleShutdown() { | |
263 SamplingThread* sampler = SamplingThread::GetInstance(); | |
264 DCHECK(sampler); | |
265 | |
266 { | |
267 AutoLock lock(sampler->thread_execution_state_lock_); | |
268 sampler->thread_execution_state_disable_idle_shutdown_for_testing_ = true; | |
269 } | |
270 } | |
271 | |
272 void StackSamplingProfiler::SamplingThread::TestAPI::ShutdownIfIdle() { | |
273 SamplingThread* sampler = SamplingThread::GetInstance(); | |
274 DCHECK(sampler); | |
275 | |
276 ThreadExecutionState state; | |
277 scoped_refptr<SingleThreadTaskRunner> task_runner = | |
278 sampler->GetTaskRunner(&state); | |
279 DCHECK_EQ(RUNNING, state); | |
280 DCHECK(task_runner); | |
281 | |
282 int add_events; | |
283 { | |
284 AutoLock lock(sampler->thread_execution_state_lock_); | |
285 add_events = sampler->thread_execution_state_add_events_; | |
286 } | |
287 | |
288 // ShutdownTask will check if the thread is idle and skip the shutdown if not. | |
289 task_runner->PostTask(FROM_HERE, Bind(&SamplingThread::ShutdownTask, | |
290 Unretained(sampler), add_events)); | |
291 } | |
292 | |
293 StaticAtomicSequenceNumber StackSamplingProfiler::SamplingThread:: | |
294 CollectionContext::next_collection_id_; | |
295 | |
296 StackSamplingProfiler::SamplingThread::SamplingThread() | |
297 : Thread("Chrome_SamplingProfilerThread") {} | |
298 | |
299 StackSamplingProfiler::SamplingThread::~SamplingThread() { | |
300 Stop(); | |
301 } | |
302 | |
303 StackSamplingProfiler::SamplingThread* | |
304 StackSamplingProfiler::SamplingThread::GetInstance() { | |
305 return Singleton<SamplingThread, LeakySingletonTraits<SamplingThread>>::get(); | |
306 } | |
307 | |
308 void StackSamplingProfiler::SamplingThread::Start() { | |
309 Thread::Options options; | |
310 // Use a higher priority for a more accurate sampling interval. | |
311 options.priority = ThreadPriority::DISPLAY; | |
312 Thread::StartWithOptions(options); | |
313 } | |
314 | |
315 int StackSamplingProfiler::SamplingThread::Add( | |
316 std::unique_ptr<CollectionContext> collection) { | |
317 int id = collection->collection_id; | |
318 scoped_refptr<SingleThreadTaskRunner> task_runner = | |
319 GetOrCreateTaskRunnerForAdd(); | |
320 | |
321 task_runner->PostTask(FROM_HERE, Bind(&SamplingThread::AddCollectionTask, | |
322 Unretained(this), Passed(&collection))); | |
323 | |
324 return id; | |
325 } | |
326 | |
327 void StackSamplingProfiler::SamplingThread::Remove(int id) { | |
328 ThreadExecutionState state; | |
329 scoped_refptr<SingleThreadTaskRunner> task_runner = GetTaskRunner(&state); | |
330 DCHECK_NE(NOT_STARTED, state); | |
331 if (state != RUNNING) | |
332 return; | |
333 DCHECK(task_runner); | |
334 | |
335 // This can fail if the thread were to exit between acquisition of the task | |
336 // runner above and the call below. In that case, however, everything has | |
337 // stopped so there's no need to try to stop it. | |
338 task_runner->PostTask(FROM_HERE, Bind(&SamplingThread::RemoveCollectionTask, | |
339 Unretained(this), id)); | |
340 } | |
341 | |
342 scoped_refptr<SingleThreadTaskRunner> | |
343 StackSamplingProfiler::SamplingThread::GetOrCreateTaskRunnerForAdd() { | |
344 AutoLock lock(thread_execution_state_lock_); | |
345 | |
346 // The increment of the "add events" count is why this method is to be only | |
347 // called from "add". | |
348 ++thread_execution_state_add_events_; | |
349 | |
350 if (thread_execution_state_ == RUNNING) { | |
351 DCHECK(thread_execution_state_task_runner_); | |
352 // This shouldn't be called from the sampling thread as it's inefficient. | |
353 // Use GetTaskRunnerOnSamplingThread() instead. | |
354 DCHECK_NE(GetThreadId(), PlatformThread::CurrentId()); | |
355 return thread_execution_state_task_runner_; | |
356 } | |
357 | |
358 if (thread_execution_state_ == EXITING) { | |
359 // The previous instance has only been partially cleaned up. It is necessary | |
360 // to call Stop() before Start(). | |
361 Stop(); | |
362 } | |
363 | |
364 // The thread is not running. Start it and get associated runner. The task- | |
365 // runner has to be saved for future use because though it can be used from | |
366 // any thread, it can be acquired via task_runner() only on the created | |
367 // thread and the thread that creates it (i.e. this thread). | |
368 Start(); | |
369 thread_execution_state_ = RUNNING; | |
370 thread_execution_state_task_runner_ = Thread::task_runner(); | |
371 | |
372 // Detach the sampling thread from the "sequence" (i.e. thread) that | |
373 // started it so that it can be self-managed or stopped by another thread. | |
374 DetachFromSequence(); | |
375 | |
376 return thread_execution_state_task_runner_; | |
377 } | |
378 | |
379 scoped_refptr<SingleThreadTaskRunner> | |
380 StackSamplingProfiler::SamplingThread::GetTaskRunner( | |
381 ThreadExecutionState* out_state) { | |
382 AutoLock lock(thread_execution_state_lock_); | |
383 if (out_state) | |
384 *out_state = thread_execution_state_; | |
385 if (thread_execution_state_ == RUNNING) { | |
386 // This shouldn't be called from the sampling thread as it's inefficient. | |
387 // Use GetTaskRunnerOnSamplingThread() instead. | |
388 DCHECK_NE(GetThreadId(), PlatformThread::CurrentId()); | |
389 DCHECK(thread_execution_state_task_runner_); | |
390 } else { | |
391 DCHECK(!thread_execution_state_task_runner_); | |
392 } | |
393 | |
394 return thread_execution_state_task_runner_; | |
395 } | |
396 | |
397 scoped_refptr<SingleThreadTaskRunner> | |
398 StackSamplingProfiler::SamplingThread::GetTaskRunnerOnSamplingThread() { | |
399 // This should be called only from the sampling thread as it has limited | |
400 // accessibility. | |
401 DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId()); | |
402 | |
403 return Thread::task_runner(); | |
404 } | |
405 | |
406 void StackSamplingProfiler::SamplingThread::FinishCollection( | |
407 CollectionContext* collection) { | |
408 // If there is no duration for the final profile (because it was stopped), | |
409 // calculate it now. | |
410 if (!collection->profiles.empty() && | |
411 collection->profiles.back().profile_duration == TimeDelta()) { | |
412 collection->profiles.back().profile_duration = | |
413 Time::Now() - collection->profile_start_time; | |
414 } | |
415 | |
416 // Extract some information so callback and event-signalling can still be | |
417 // done after the collection has been removed from the list of "active" ones. | |
418 // This allows the the controlling object (and tests using it) to be confident | |
419 // that collection is fully finished when those things occur. | |
420 const CompletedCallback callback = collection->callback; | |
421 CallStackProfiles profiles = std::move(collection->profiles); | |
422 WaitableEvent* finished = collection->finished; | |
423 | |
424 // Remove this collection from the map of known ones. The |collection| | |
425 // parameter is invalid after this point. | |
426 size_t count = active_collections_.erase(collection->collection_id); | |
427 DCHECK_EQ(1U, count); | |
428 | |
429 // Run the associated callback, passing the collected profiles. It's okay to | |
430 // move them because this collection is about to be deleted. | |
Mike Wittman
2017/03/21 16:50:38
This last sentence is no longer relevant and can b
bcwhite
2017/03/22 17:48:54
Done.
| |
431 callback.Run(std::move(profiles)); | |
432 | |
433 // Signal that this collection is finished. | |
434 finished->Signal(); | |
435 } | |
436 | |
437 void StackSamplingProfiler::SamplingThread::RecordSample( | |
438 CollectionContext* collection) { | |
439 DCHECK(collection->native_sampler); | |
440 | |
441 // If this is the first sample of a burst, a new Profile needs to be created | |
442 // and filled. | |
443 if (collection->sample == 0) { | |
444 collection->profiles.push_back(CallStackProfile()); | |
445 CallStackProfile& profile = collection->profiles.back(); | |
446 profile.sampling_period = collection->params.sampling_interval; | |
447 collection->profile_start_time = Time::Now(); | |
448 collection->native_sampler->ProfileRecordingStarting(&profile.modules); | |
449 } | |
450 | |
451 // The currently active profile being captured. | |
452 CallStackProfile& profile = collection->profiles.back(); | |
453 | |
454 // Record a single sample. | |
455 profile.samples.push_back(Sample()); | |
456 collection->native_sampler->RecordStackSample(&profile.samples.back()); | |
457 | |
458 // If this is the last sample of a burst, record the total time. | |
459 if (collection->sample == collection->params.samples_per_burst - 1) { | |
460 profile.profile_duration = Time::Now() - collection->profile_start_time; | |
461 collection->native_sampler->ProfileRecordingStopped(); | |
462 } | |
463 } | |
464 | |
465 void StackSamplingProfiler::SamplingThread::ScheduleShutdownIfIdle() { | |
466 if (!active_collections_.empty()) | |
467 return; | |
468 | |
469 int add_events; | |
470 { | |
471 AutoLock lock(thread_execution_state_lock_); | |
472 if (thread_execution_state_disable_idle_shutdown_for_testing_) | |
473 return; | |
474 add_events = thread_execution_state_add_events_; | |
475 } | |
476 | |
477 GetTaskRunnerOnSamplingThread()->PostDelayedTask( | |
478 FROM_HERE, | |
479 Bind(&SamplingThread::ShutdownTask, Unretained(this), add_events), | |
480 TimeDelta::FromSeconds(60)); | |
481 } | |
482 | |
483 void StackSamplingProfiler::SamplingThread::AddCollectionTask( | |
484 std::unique_ptr<CollectionContext> collection) { | |
485 const int collection_id = collection->collection_id; | |
486 const TimeDelta initial_delay = collection->params.initial_delay; | |
487 | |
488 active_collections_.insert( | |
489 std::make_pair(collection_id, std::move(collection))); | |
490 | |
491 GetTaskRunnerOnSamplingThread()->PostDelayedTask( | |
492 FROM_HERE, | |
493 Bind(&SamplingThread::PerformCollectionTask, Unretained(this), | |
494 collection_id), | |
495 initial_delay); | |
496 | |
497 // Another increment of "add events" serves to invalidate any pending | |
498 // shutdown tasks that may have been initiated between the Add() and this | |
499 // task running. | |
500 { | |
501 AutoLock lock(thread_execution_state_lock_); | |
502 ++thread_execution_state_add_events_; | |
503 } | |
504 } | |
505 | |
506 void StackSamplingProfiler::SamplingThread::RemoveCollectionTask(int id) { | |
507 auto found = active_collections_.find(id); | |
508 if (found == active_collections_.end()) | |
509 return; | |
510 | |
511 FinishCollection(found->second.get()); | |
512 ScheduleShutdownIfIdle(); | |
513 } | |
514 | |
515 void StackSamplingProfiler::SamplingThread::PerformCollectionTask(int id) { | |
516 auto found = active_collections_.find(id); | |
517 | |
518 // The task won't be found if it has been stopped. | |
519 if (found == active_collections_.end()) | |
520 return; | |
521 | |
522 CollectionContext* collection = found->second.get(); | |
523 | |
524 // Handle first-run with no "next time". | |
525 if (collection->next_sample_time == Time()) | |
526 collection->next_sample_time = Time::Now(); | |
527 | |
528 // Do the collection of a single sample. | |
529 RecordSample(collection); | |
530 | |
531 // Update the time of the next sample recording. | |
532 if (UpdateNextSampleTime(collection)) { | |
533 bool success = GetTaskRunnerOnSamplingThread()->PostDelayedTask( | |
534 FROM_HERE, | |
535 Bind(&SamplingThread::PerformCollectionTask, Unretained(this), id), | |
536 std::max(collection->next_sample_time - Time::Now(), TimeDelta())); | |
537 DCHECK(success); | |
538 } else { | |
539 // All capturing has completed so finish the collection. Let object expire. | |
540 // The |collection| variable will be invalid after this call. | |
541 FinishCollection(collection); | |
542 ScheduleShutdownIfIdle(); | |
543 } | |
544 } | |
545 | |
546 void StackSamplingProfiler::SamplingThread::ShutdownTask(int add_events) { | |
547 // Stop here if not idle. This can be scheduled via the TestAPI which does | |
548 // not have access to pre-check if the thread is idle meaning that it has to | |
549 // be done here. | |
550 if (!active_collections_.empty()) | |
551 return; | |
552 | |
553 // Holding this lock ensures that any attempt to start another job will | |
554 // get postponed until thread_execution_state_task_runner_ is cleared, thus | |
555 // eliminating the race. | |
556 AutoLock lock(thread_execution_state_lock_); | |
557 | |
558 // If the current count of creation requests doesn't match the passed count | |
559 // then other tasks have been created since this was posted. Abort shutdown. | |
560 if (thread_execution_state_add_events_ != add_events) | |
561 return; | |
562 | |
563 // There can be no new AddCollectionTasks at this point because creating | |
564 // those always increments "add events". There may be other requests, like | |
565 // Remove, but it's okay to schedule the thread to stop once they've been | |
566 // executed (i.e. "soon"). | |
567 StopSoon(); | |
568 | |
569 // StopSoon will have set the owning sequence (again) so it must be detached | |
570 // (again) in order for Stop/Start to be called (again) should more work | |
571 // come in. Holding the |thread_execution_state_lock_| ensures the necessary | |
572 // happens-after with regard to this detach and future Thread API calls. | |
573 DetachFromSequence(); | |
574 | |
575 // Set the thread_state variable so the thread will be restarted when new | |
576 // work comes in. Remove the thread_execution_state_task_runner_ to avoid | |
577 // confusion. | |
578 thread_execution_state_ = EXITING; | |
579 thread_execution_state_task_runner_ = nullptr; | |
580 } | |
581 | |
582 bool StackSamplingProfiler::SamplingThread::UpdateNextSampleTime( | |
583 CollectionContext* collection) { | |
584 // This will keep a consistent average interval between samples but will | |
585 // result in constant series of acquisitions, thus nearly locking out the | |
586 // target thread, if the interval is smaller than the time it takes to | |
587 // actually acquire the sample. Anything sampling that quickly is going | |
588 // to be a problem anyway so don't worry about it. | |
589 if (++collection->sample < collection->params.samples_per_burst) { | |
590 collection->next_sample_time += collection->params.sampling_interval; | |
591 return true; | |
592 } | |
593 | |
594 if (++collection->burst < collection->params.bursts) { | |
595 collection->sample = 0; | |
596 collection->next_sample_time += collection->params.burst_interval; | |
597 return true; | |
598 } | |
599 | |
600 return false; | |
601 } | |
602 | |
603 void StackSamplingProfiler::SamplingThread::CleanUp() { | |
604 // There should be no collections remaining when the thread stops. | |
605 DCHECK(active_collections_.empty()); | |
606 | |
607 // Let the parent clean up. | |
608 Thread::CleanUp(); | |
609 } | |
610 | |
611 // StackSamplingProfiler ------------------------------------------------------ | |
612 | |
613 // static | |
614 bool StackSamplingProfiler::TestAPI::IsSamplingThreadRunning() { | |
615 return SamplingThread::GetInstance()->IsRunning(); | |
616 } | |
617 | |
618 // static | |
619 void StackSamplingProfiler::TestAPI::DisableIdleShutdown() { | |
620 SamplingThread::TestAPI::DisableIdleShutdown(); | |
621 } | |
622 | |
623 // static | |
624 void StackSamplingProfiler::TestAPI::InitiateSamplingThreadIdleShutdown() { | |
625 SamplingThread::TestAPI::ShutdownIfIdle(); | |
626 } | |
627 | |
628 subtle::Atomic32 StackSamplingProfiler::process_milestones_ = 0; | |
629 | |
630 StackSamplingProfiler::StackSamplingProfiler( | |
165 const SamplingParams& params, | 631 const SamplingParams& params, |
166 const CompletedCallback& completed_callback) | 632 const CompletedCallback& callback, |
167 : native_sampler_(std::move(native_sampler)), | 633 NativeStackSamplerTestDelegate* test_delegate) |
168 params_(params), | 634 : StackSamplingProfiler(base::PlatformThread::CurrentId(), |
169 stop_event_(WaitableEvent::ResetPolicy::AUTOMATIC, | 635 params, |
170 WaitableEvent::InitialState::NOT_SIGNALED), | 636 callback, |
171 completed_callback_(completed_callback) {} | 637 test_delegate) {} |
172 | |
173 StackSamplingProfiler::SamplingThread::~SamplingThread() {} | |
174 | |
175 void StackSamplingProfiler::SamplingThread::ThreadMain() { | |
176 PlatformThread::SetName("Chrome_SamplingProfilerThread"); | |
177 | |
178 // For now, just ignore any requests to profile while another profiler is | |
179 // working. | |
180 if (!concurrent_profiling_lock.Get().Try()) | |
181 return; | |
182 | |
183 CallStackProfiles profiles; | |
184 CollectProfiles(&profiles); | |
185 concurrent_profiling_lock.Get().Release(); | |
186 completed_callback_.Run(std::move(profiles)); | |
187 } | |
188 | |
189 // Depending on how long the sampling takes and the length of the sampling | |
190 // interval, a burst of samples could take arbitrarily longer than | |
191 // samples_per_burst * sampling_interval. In this case, we (somewhat | |
192 // arbitrarily) honor the number of samples requested rather than strictly | |
193 // adhering to the sampling intervals. Once we have established users for the | |
194 // StackSamplingProfiler and the collected data to judge, we may go the other | |
195 // way or make this behavior configurable. | |
196 void StackSamplingProfiler::SamplingThread::CollectProfile( | |
197 CallStackProfile* profile, | |
198 TimeDelta* elapsed_time, | |
199 bool* was_stopped) { | |
200 ElapsedTimer profile_timer; | |
201 native_sampler_->ProfileRecordingStarting(&profile->modules); | |
202 profile->sampling_period = params_.sampling_interval; | |
203 *was_stopped = false; | |
204 TimeDelta previous_elapsed_sample_time; | |
205 for (int i = 0; i < params_.samples_per_burst; ++i) { | |
206 if (i != 0) { | |
207 // Always wait, even if for 0 seconds, so we can observe a signal on | |
208 // stop_event_. | |
209 if (stop_event_.TimedWait( | |
210 std::max(params_.sampling_interval - previous_elapsed_sample_time, | |
211 TimeDelta()))) { | |
212 *was_stopped = true; | |
213 break; | |
214 } | |
215 } | |
216 ElapsedTimer sample_timer; | |
217 profile->samples.push_back(Sample()); | |
218 native_sampler_->RecordStackSample(&profile->samples.back()); | |
219 previous_elapsed_sample_time = sample_timer.Elapsed(); | |
220 } | |
221 | |
222 *elapsed_time = profile_timer.Elapsed(); | |
223 profile->profile_duration = *elapsed_time; | |
224 native_sampler_->ProfileRecordingStopped(); | |
225 } | |
226 | |
227 // In an analogous manner to CollectProfile() and samples exceeding the expected | |
228 // total sampling time, bursts may also exceed the burst_interval. We adopt the | |
229 // same wait-and-see approach here. | |
230 void StackSamplingProfiler::SamplingThread::CollectProfiles( | |
231 CallStackProfiles* profiles) { | |
232 if (stop_event_.TimedWait(params_.initial_delay)) | |
233 return; | |
234 | |
235 TimeDelta previous_elapsed_profile_time; | |
236 for (int i = 0; i < params_.bursts; ++i) { | |
237 if (i != 0) { | |
238 // Always wait, even if for 0 seconds, so we can observe a signal on | |
239 // stop_event_. | |
240 if (stop_event_.TimedWait( | |
241 std::max(params_.burst_interval - previous_elapsed_profile_time, | |
242 TimeDelta()))) | |
243 return; | |
244 } | |
245 | |
246 CallStackProfile profile; | |
247 bool was_stopped = false; | |
248 CollectProfile(&profile, &previous_elapsed_profile_time, &was_stopped); | |
249 if (!profile.samples.empty()) | |
250 profiles->push_back(std::move(profile)); | |
251 | |
252 if (was_stopped) | |
253 return; | |
254 } | |
255 } | |
256 | |
257 void StackSamplingProfiler::SamplingThread::Stop() { | |
258 stop_event_.Signal(); | |
259 } | |
260 | |
261 // StackSamplingProfiler ------------------------------------------------------ | |
262 | |
263 subtle::Atomic32 StackSamplingProfiler::process_milestones_ = 0; | |
264 | |
265 StackSamplingProfiler::StackSamplingProfiler( | |
266 PlatformThreadId thread_id, | |
267 const SamplingParams& params, | |
268 const CompletedCallback& callback) | |
269 : StackSamplingProfiler(thread_id, params, callback, nullptr) {} | |
270 | 638 |
271 StackSamplingProfiler::StackSamplingProfiler( | 639 StackSamplingProfiler::StackSamplingProfiler( |
272 PlatformThreadId thread_id, | 640 PlatformThreadId thread_id, |
273 const SamplingParams& params, | 641 const SamplingParams& params, |
274 const CompletedCallback& callback, | 642 const CompletedCallback& callback, |
275 NativeStackSamplerTestDelegate* test_delegate) | 643 NativeStackSamplerTestDelegate* test_delegate) |
276 : thread_id_(thread_id), params_(params), completed_callback_(callback), | 644 : thread_id_(thread_id), |
277 test_delegate_(test_delegate) { | 645 params_(params), |
278 } | 646 completed_callback_(callback), |
647 // The event starts "signaled" so code knows it's safe to start thread. | |
648 profiling_inactive_(WaitableEvent::ResetPolicy::MANUAL, | |
649 WaitableEvent::InitialState::SIGNALED), | |
650 collection_id_(NULL_COLLECTION_ID), | |
651 test_delegate_(test_delegate) {} | |
279 | 652 |
280 StackSamplingProfiler::~StackSamplingProfiler() { | 653 StackSamplingProfiler::~StackSamplingProfiler() { |
654 // Stop is immediate but asynchronous. There is a non-zero probability that | |
655 // one more sample will be taken after this call returns. | |
281 Stop(); | 656 Stop(); |
282 if (!sampling_thread_handle_.is_null()) | 657 |
283 PlatformThread::Join(sampling_thread_handle_); | 658 // The behavior of sampling a thread that has exited is undefined and could |
284 } | 659 // cause Bad Things(tm) to occur. The safety model provided by this class is |
285 | 660 // that an instance of this object is expected to live at least as long as |
286 // static | 661 // the thread it is sampling. However, because the sampling is performed |
287 void StackSamplingProfiler::StartAndRunAsync( | 662 // asynchronously by the SamplingThread, there is no way to guarantee this |
288 PlatformThreadId thread_id, | 663 // is true without waiting for it to signal that it has finished. |
289 const SamplingParams& params, | 664 // |
290 const CompletedCallback& callback) { | 665 // The wait time should, at most, be only as long as it takes to collect one |
291 CHECK(ThreadTaskRunnerHandle::Get()); | 666 // sample (~200us) or none at all if sampling has already completed. |
292 AsyncRunner::Run(thread_id, params, callback); | 667 ThreadRestrictions::ScopedAllowWait allow_wait; |
668 profiling_inactive_.Wait(); | |
293 } | 669 } |
294 | 670 |
295 void StackSamplingProfiler::Start() { | 671 void StackSamplingProfiler::Start() { |
296 if (completed_callback_.is_null()) | 672 if (completed_callback_.is_null()) |
297 return; | 673 return; |
298 | 674 |
299 std::unique_ptr<NativeStackSampler> native_sampler = | 675 std::unique_ptr<NativeStackSampler> native_sampler = |
300 NativeStackSampler::Create(thread_id_, &RecordAnnotations, | 676 NativeStackSampler::Create(thread_id_, &RecordAnnotations, |
301 test_delegate_); | 677 test_delegate_); |
678 | |
302 if (!native_sampler) | 679 if (!native_sampler) |
303 return; | 680 return; |
304 | 681 |
305 sampling_thread_.reset(new SamplingThread(std::move(native_sampler), params_, | 682 // Wait for profiling to be "inactive", then reset it for the upcoming run. |
306 completed_callback_)); | 683 profiling_inactive_.Wait(); |
307 if (!PlatformThread::Create(0, sampling_thread_.get(), | 684 profiling_inactive_.Reset(); |
308 &sampling_thread_handle_)) | 685 |
309 sampling_thread_.reset(); | 686 DCHECK_EQ(NULL_COLLECTION_ID, collection_id_); |
687 collection_id_ = SamplingThread::GetInstance()->Add( | |
688 MakeUnique<SamplingThread::CollectionContext>( | |
689 thread_id_, params_, completed_callback_, &profiling_inactive_, | |
690 std::move(native_sampler))); | |
691 DCHECK_NE(NULL_COLLECTION_ID, collection_id_); | |
310 } | 692 } |
311 | 693 |
312 void StackSamplingProfiler::Stop() { | 694 void StackSamplingProfiler::Stop() { |
313 if (sampling_thread_) | 695 SamplingThread::GetInstance()->Remove(collection_id_); |
314 sampling_thread_->Stop(); | 696 collection_id_ = NULL_COLLECTION_ID; |
315 } | 697 } |
316 | 698 |
317 // static | 699 // static |
318 void StackSamplingProfiler::SetProcessMilestone(int milestone) { | 700 void StackSamplingProfiler::SetProcessMilestone(int milestone) { |
319 DCHECK_LE(0, milestone); | 701 DCHECK_LE(0, milestone); |
320 DCHECK_GT(static_cast<int>(sizeof(process_milestones_) * 8), milestone); | 702 DCHECK_GT(static_cast<int>(sizeof(process_milestones_) * 8), milestone); |
321 DCHECK_EQ(0, subtle::NoBarrier_Load(&process_milestones_) & (1 << milestone)); | 703 DCHECK_EQ(0, subtle::NoBarrier_Load(&process_milestones_) & (1 << milestone)); |
322 ChangeAtomicFlags(&process_milestones_, 1 << milestone, 0); | 704 ChangeAtomicFlags(&process_milestones_, 1 << milestone, 0); |
323 } | 705 } |
324 | 706 |
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
370 } | 752 } |
371 | 753 |
372 bool operator<(const StackSamplingProfiler::Frame &a, | 754 bool operator<(const StackSamplingProfiler::Frame &a, |
373 const StackSamplingProfiler::Frame &b) { | 755 const StackSamplingProfiler::Frame &b) { |
374 return (a.module_index < b.module_index) || | 756 return (a.module_index < b.module_index) || |
375 (a.module_index == b.module_index && | 757 (a.module_index == b.module_index && |
376 a.instruction_pointer < b.instruction_pointer); | 758 a.instruction_pointer < b.instruction_pointer); |
377 } | 759 } |
378 | 760 |
379 } // namespace base | 761 } // namespace base |
OLD | NEW |