Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(515)

Side by Side Diff: base/profiler/stack_sampling_profiler.cc

Issue 2554123002: Support parallel captures from the StackSamplingProfiler. (Closed)
Patch Set: merged synchronized-stop CL Created 3 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/profiler/stack_sampling_profiler.h" 5 #include "base/profiler/stack_sampling_profiler.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <map>
8 #include <utility> 9 #include <utility>
9 10
11 #include "base/atomic_sequence_num.h"
12 #include "base/atomicops.h"
10 #include "base/bind.h" 13 #include "base/bind.h"
11 #include "base/bind_helpers.h" 14 #include "base/bind_helpers.h"
12 #include "base/callback.h" 15 #include "base/callback.h"
13 #include "base/lazy_instance.h" 16 #include "base/lazy_instance.h"
14 #include "base/location.h" 17 #include "base/location.h"
15 #include "base/macros.h" 18 #include "base/macros.h"
19 #include "base/memory/ptr_util.h"
20 #include "base/memory/singleton.h"
16 #include "base/profiler/native_stack_sampler.h" 21 #include "base/profiler/native_stack_sampler.h"
17 #include "base/synchronization/lock.h" 22 #include "base/synchronization/lock.h"
23 #include "base/threading/thread.h"
18 #include "base/threading/thread_task_runner_handle.h" 24 #include "base/threading/thread_task_runner_handle.h"
19 #include "base/timer/elapsed_timer.h" 25 #include "base/timer/elapsed_timer.h"
20 26
21 namespace base { 27 namespace base {
22 28
23 namespace { 29 namespace {
24 30
25 // Used to ensure only one profiler is running at a time.
26 LazyInstance<Lock>::Leaky concurrent_profiling_lock = LAZY_INSTANCE_INITIALIZER;
27
28 // AsyncRunner ----------------------------------------------------------------
29
30 // Helper class to allow a profiler to be run completely asynchronously from the
31 // initiator, without being concerned with the profiler's lifetime.
32 class AsyncRunner {
33 public:
34 // Sets up a profiler and arranges for it to be deleted on its completed
35 // callback.
36 static void Run(PlatformThreadId thread_id,
37 const StackSamplingProfiler::SamplingParams& params,
38 const StackSamplingProfiler::CompletedCallback& callback);
39
40 private:
41 AsyncRunner();
42
43 // Runs the callback and deletes the AsyncRunner instance. |profiles| is not
44 // const& because it must be passed with std::move.
45 static void RunCallbackAndDeleteInstance(
46 std::unique_ptr<AsyncRunner> object_to_be_deleted,
47 const StackSamplingProfiler::CompletedCallback& callback,
48 scoped_refptr<SingleThreadTaskRunner> task_runner,
49 StackSamplingProfiler::CallStackProfiles profiles);
50
51 std::unique_ptr<StackSamplingProfiler> profiler_;
52
53 DISALLOW_COPY_AND_ASSIGN(AsyncRunner);
54 };
55
56 // static
57 void AsyncRunner::Run(
58 PlatformThreadId thread_id,
59 const StackSamplingProfiler::SamplingParams& params,
60 const StackSamplingProfiler::CompletedCallback &callback) {
61 std::unique_ptr<AsyncRunner> runner(new AsyncRunner);
62 AsyncRunner* temp_ptr = runner.get();
63 temp_ptr->profiler_.reset(
64 new StackSamplingProfiler(thread_id, params,
65 Bind(&AsyncRunner::RunCallbackAndDeleteInstance,
66 Passed(&runner), callback,
67 ThreadTaskRunnerHandle::Get())));
68 // The callback won't be called until after Start(), so temp_ptr will still
69 // be valid here.
70 temp_ptr->profiler_->Start();
71 }
72
73 AsyncRunner::AsyncRunner() {}
74
75 void AsyncRunner::RunCallbackAndDeleteInstance(
76 std::unique_ptr<AsyncRunner> object_to_be_deleted,
77 const StackSamplingProfiler::CompletedCallback& callback,
78 scoped_refptr<SingleThreadTaskRunner> task_runner,
79 StackSamplingProfiler::CallStackProfiles profiles) {
80 callback.Run(std::move(profiles));
81 // Delete the instance on the original calling thread.
82 task_runner->DeleteSoon(FROM_HERE, object_to_be_deleted.release());
83 }
84
85 void ChangeAtomicFlags(subtle::Atomic32* flags, 31 void ChangeAtomicFlags(subtle::Atomic32* flags,
86 subtle::Atomic32 set, 32 subtle::Atomic32 set,
87 subtle::Atomic32 clear) { 33 subtle::Atomic32 clear) {
88 DCHECK(set != 0 || clear != 0); 34 DCHECK(set != 0 || clear != 0);
89 DCHECK_EQ(0, set & clear); 35 DCHECK_EQ(0, set & clear);
90 36
91 subtle::Atomic32 bits = subtle::NoBarrier_Load(flags); 37 subtle::Atomic32 bits = subtle::NoBarrier_Load(flags);
92 while (true) { 38 while (true) {
93 subtle::Atomic32 existing = 39 subtle::Atomic32 existing =
94 subtle::NoBarrier_CompareAndSwap(flags, bits, (bits | set) & ~clear); 40 subtle::NoBarrier_CompareAndSwap(flags, bits, (bits | set) & ~clear);
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
153 StackSamplingProfiler::CallStackProfile 99 StackSamplingProfiler::CallStackProfile
154 StackSamplingProfiler::CallStackProfile::CopyForTesting() const { 100 StackSamplingProfiler::CallStackProfile::CopyForTesting() const {
155 return CallStackProfile(*this); 101 return CallStackProfile(*this);
156 } 102 }
157 103
158 StackSamplingProfiler::CallStackProfile::CallStackProfile( 104 StackSamplingProfiler::CallStackProfile::CallStackProfile(
159 const CallStackProfile& other) = default; 105 const CallStackProfile& other) = default;
160 106
161 // StackSamplingProfiler::SamplingThread -------------------------------------- 107 // StackSamplingProfiler::SamplingThread --------------------------------------
162 108
163 StackSamplingProfiler::SamplingThread::SamplingThread( 109 class StackSamplingProfiler::SamplingThread : public Thread {
164 std::unique_ptr<NativeStackSampler> native_sampler, 110 public:
165 const SamplingParams& params, 111 struct CollectionContext {
166 const CompletedCallback& completed_callback) 112 CollectionContext(PlatformThreadId target,
167 : native_sampler_(std::move(native_sampler)), 113 const SamplingParams& params,
168 params_(params), 114 const CompletedCallback& callback,
169 stop_event_(WaitableEvent::ResetPolicy::AUTOMATIC, 115 WaitableEvent* finished,
170 WaitableEvent::InitialState::NOT_SIGNALED), 116 std::unique_ptr<NativeStackSampler> sampler)
171 completed_callback_(completed_callback) {} 117 : collection_id(next_collection_id_.GetNext()),
172 118 target(target),
173 StackSamplingProfiler::SamplingThread::~SamplingThread() {} 119 params(params),
174 120 callback(callback),
175 void StackSamplingProfiler::SamplingThread::ThreadMain() { 121 finished(finished),
176 PlatformThread::SetName("Chrome_SamplingProfilerThread"); 122 native_sampler(std::move(sampler)) {}
177 123 ~CollectionContext() {}
178 // For now, just ignore any requests to profile while another profiler is 124
179 // working. 125 // An identifier for this collection, used to uniquely identify it to
180 if (!concurrent_profiling_lock.Get().Try()) 126 // outside interests.
127 const int collection_id;
128
129 Time next_sample_time; // The absolute time for the next sample.
130 PlatformThreadId target; // The thread being sampled.
Mike Wittman 2017/02/13 22:35:57 const
bcwhite 2017/02/14 14:33:01 Done.
131 SamplingParams params; // Information about how to sample.
Mike Wittman 2017/02/13 22:35:57 const
bcwhite 2017/02/14 14:33:01 Done.
132 CompletedCallback callback; // Callback made when sampling is complete.
Mike Wittman 2017/02/13 22:35:57 const
bcwhite 2017/02/14 14:33:02 Done.
133 WaitableEvent* finished; // Signaled when all sampling is complete.
Mike Wittman 2017/02/13 22:35:57 WaitableEvent* const
bcwhite 2017/02/14 14:33:02 Done.
134
135 // Platform-specific module that does the actual sampling.
136 std::unique_ptr<NativeStackSampler> native_sampler;
Mike Wittman 2017/02/13 22:35:57 Move all const state before all mutable state, to
Mike Wittman 2017/02/13 22:35:57 const
bcwhite 2017/02/14 14:33:01 Done.
bcwhite 2017/02/14 14:33:02 Done.
137
138 // Counters that indicate the current position along the acquisition.
139 int burst = 0;
140 int sample = 0;
141
142 // The time that a profile was started, for calculating the total duration.
143 Time profile_start_time;
144
145 // The collected stack samples. The active profile is always at the back().
146 CallStackProfiles profiles;
147
148 private:
149 static StaticAtomicSequenceNumber next_collection_id_;
150 };
151
152 // Gets the single instance of this class.
153 static SamplingThread* GetInstance();
154
155 // Starts the thread.
156 void Start();
157
158 // Adds a new CollectionContext to the thread. This can be called externally
159 // from any thread. This returns an ID that can later be used to stop
160 // the sampling.
161 int Add(std::unique_ptr<CollectionContext> collection);
162
163 // Removes an active collection based on its ID, forcing it to run its
164 // callback if any data has been collected. This can be called externally
165 // from any thread.
166 void Remove(int id);
167
168 // Removes all active collections and stops the underlying thread.
169 void Shutdown();
170
171 // Begins an idle shutdown as if the idle-timer had expired.
172 void ShutdownIfIdle();
173
174 // Undoes the "permanent" effect of Shutdown() so the thread can restart.
175 void UndoShutdown();
176
177 // Sets the number of ms to wait after becoming idle before shutting down.
178 // Set to zero to disable.
179 void SetIdleShutdownTime(int shutdown_ms);
180
181 private:
182 SamplingThread();
183 ~SamplingThread() override;
184 friend struct DefaultSingletonTraits<SamplingThread>;
185
186 // Get task runner that is usable from the outside.
187 scoped_refptr<SingleThreadTaskRunner> GetOrCreateTaskRunner();
188 scoped_refptr<SingleThreadTaskRunner> GetTaskRunner();
189
190 // Get task runner that is usable from the sampling thread itself.
191 scoped_refptr<SingleThreadTaskRunner> GetTaskRunnerOnSamplingThread();
192
193 // Finishes a collection and reports collected data via callback.
194 void FinishCollection(CollectionContext* collection);
195
196 // Records a single sample of a collection.
197 void RecordSample(CollectionContext* collection);
198
199 // Check if the sampling thread is idle.
200 void CheckForIdle();
201
202 // These methods are tasks that get posted to the internal message queue.
203 void AddCollectionTask(std::unique_ptr<CollectionContext> collection_ptr);
204 void RemoveCollectionTask(int id);
205 void PerformCollectionTask(int id);
206 void ShutdownTask();
207
208 // Updates the |next_sample_time| time based on configured parameters.
209 bool UpdateNextSampleTime(CollectionContext* collection);
210
211 // Thread:
212 void CleanUp() override;
213
214 // The task-runner for the sampling thread and some information about it.
215 // This must always be accessed while holding the lock. The saved task-runner
216 // can be freely used by any calling thread.
217 scoped_refptr<SingleThreadTaskRunner> task_runner_;
218 bool task_runner_forced_shutdown_ = false;
219 int task_runner_create_requests_ = 0;
220 TimeDelta task_runner_idle_shutdown_time_ = TimeDelta::FromSeconds(5);
221 Lock task_runner_lock_;
222
223 // A map of IDs to collection contexts. Because this class is a singleton
224 // that is never destroyed, context objects will never be destructed except
225 // by explicit action. Thus, it's acceptable to pass unretained pointers
226 // to these objects when posting tasks.
227 std::map<int, std::unique_ptr<CollectionContext>> active_collections_;
228
229 DISALLOW_COPY_AND_ASSIGN(SamplingThread);
230 };
231
232 StaticAtomicSequenceNumber StackSamplingProfiler::SamplingThread::
233 CollectionContext::next_collection_id_;
234
235 StackSamplingProfiler::SamplingThread::SamplingThread()
236 : Thread("Chrome_SamplingProfilerThread") {}
237
238 StackSamplingProfiler::SamplingThread::~SamplingThread() {
239 Thread::Stop();
240 }
241
242 StackSamplingProfiler::SamplingThread*
243 StackSamplingProfiler::SamplingThread::GetInstance() {
244 return Singleton<SamplingThread, LeakySingletonTraits<SamplingThread>>::get();
245 }
246
247 void StackSamplingProfiler::SamplingThread::Start() {
248 Thread::Options options;
249 // Use a higher priority for a more accurate sampling interval.
250 options.priority = ThreadPriority::DISPLAY;
251 Thread::StartWithOptions(options);
252 }
253
254 int StackSamplingProfiler::SamplingThread::Add(
255 std::unique_ptr<CollectionContext> collection) {
256 int id = collection->collection_id;
257 scoped_refptr<SingleThreadTaskRunner> task_runner = GetOrCreateTaskRunner();
258
259 // There may be no task-runner if the sampling thread has been permanently
260 // shut down.
261 if (task_runner) {
262 task_runner->PostTask(
263 FROM_HERE, Bind(&SamplingThread::AddCollectionTask, Unretained(this),
264 Passed(&collection)));
265 }
266 return id;
267 }
268
269 void StackSamplingProfiler::SamplingThread::Remove(int id) {
270 scoped_refptr<SingleThreadTaskRunner> task_runner = GetTaskRunner();
271 if (!task_runner)
272 return; // Everything has already stopped.
273
274 // This can fail if the thread were to exit between acquisition of the task
275 // runner above and the call below. In that case, however, everything has
276 // stopped so there's no need to try to stop it.
277 task_runner->PostTask(FROM_HERE, Bind(&SamplingThread::RemoveCollectionTask,
278 Unretained(this), id));
279 }
280
281 void StackSamplingProfiler::SamplingThread::Shutdown() {
282 // Record that a shutdown has been requested so nothing can cause it to
283 // start up again.
284 {
285 AutoLock lock(task_runner_lock_);
286 task_runner_forced_shutdown_ = true;
287 }
288
289 scoped_refptr<SingleThreadTaskRunner> task_runner = GetTaskRunner();
290 if (!task_runner)
291 return; // Everything has already stopped.
292
293 // This can fail if the thread were to exit between acquisition of the task
294 // runner above and the call below. In that case, however, everything has
295 // stopped so there's no need to do anything.
296 task_runner->PostTask(FROM_HERE,
297 Bind(&SamplingThread::ShutdownTask, Unretained(this)));
298
299 // Now that a task has been posted, calling Stop() will block until that task
300 // has been executed.
301 Stop();
302 }
303
304 void StackSamplingProfiler::SamplingThread::ShutdownIfIdle() {
305 scoped_refptr<SingleThreadTaskRunner> task_runner = GetTaskRunner();
306 if (!task_runner)
307 return; // Everything has already stopped.
308
309 // ShutdownTask will check if the thread is idle and skip the shutdown if not.
310 task_runner->PostTask(FROM_HERE,
311 Bind(&SamplingThread::ShutdownTask, Unretained(this)));
312 }
313
314 void StackSamplingProfiler::SamplingThread::UndoShutdown() {
315 {
316 AutoLock lock(task_runner_lock_);
317 task_runner_forced_shutdown_ = false;
318 }
319 }
320
321 void StackSamplingProfiler::SamplingThread::SetIdleShutdownTime(
322 int shutdown_ms) {
323 AutoLock lock(task_runner_lock_);
324 task_runner_idle_shutdown_time_ = TimeDelta::FromMilliseconds(shutdown_ms);
325 }
326
327 scoped_refptr<SingleThreadTaskRunner>
328 StackSamplingProfiler::SamplingThread::GetOrCreateTaskRunner() {
329 AutoLock lock(task_runner_lock_);
330 ++task_runner_create_requests_;
331 if (!task_runner_) {
332 // If a forced shutdown has been done, don't let it restart.
333 if (task_runner_forced_shutdown_)
334 return nullptr;
335 // If this is not the first time the sampling thread has been launched, the
336 // previous instance has only been partially cleaned up. It is necessary
337 // to call Stop() before Start(). This is safe even the thread has never
338 // been started.
339 Stop();
340 // The thread is not running. Start it and get associated runner. The task-
341 // runner has to be saved for future use because though it can be used from
342 // any thread, it can be acquired via task_runner() only on the created
343 // thread and the thread that creates it (i.e. this thread).
344 Start();
345 task_runner_ = Thread::task_runner();
346 // Detach the sampling thread from the "sequence" (i.e. thread) that
347 // started it so that it can be self-managed or stopped on by another
348 // thread.
349 DetachFromSequence();
350 } else {
351 // This shouldn't be called from the sampling thread as it's inefficient.
352 // Use GetTaskRunnerOnSamplingThread() instead.
353 DCHECK_NE(GetThreadId(), PlatformThread::CurrentId());
354 }
355
356 return task_runner_;
357 }
358
359 scoped_refptr<SingleThreadTaskRunner>
360 StackSamplingProfiler::SamplingThread::GetTaskRunner() {
361 // This shouldn't be called from the sampling thread as it's inefficient. Use
362 // GetTaskRunnerOnSamplingThread() instead.
363 DCHECK_NE(GetThreadId(), PlatformThread::CurrentId());
364
365 AutoLock lock(task_runner_lock_);
366 return task_runner_;
367 }
368
369 scoped_refptr<SingleThreadTaskRunner>
370 StackSamplingProfiler::SamplingThread::GetTaskRunnerOnSamplingThread() {
371 // This should be called only from the sampling thread as it has limited
372 // accessibility.
373 DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
374
375 return Thread::task_runner();
376 }
377
378 void StackSamplingProfiler::SamplingThread::FinishCollection(
379 CollectionContext* collection) {
380 // If there is no duration for the final profile (because it was stopped),
381 // calculated it now.
382 if (!collection->profiles.empty() &&
383 collection->profiles.back().profile_duration == TimeDelta()) {
384 collection->profiles.back().profile_duration =
385 Time::Now() - collection->profile_start_time;
386 }
387
388 // Run the associated callback, passing the collected profiles. It's okay to
389 // move them because this collection is about to be deleted.
390 collection->callback.Run(std::move(collection->profiles));
391
392 // Signal that this collection is finished.
393 collection->finished->Signal();
394
395 // Remove this collection from the map of known ones. This must be done
396 // last as the |collection| parameter is invalid after this point.
397 size_t count = active_collections_.erase(collection->collection_id);
398 DCHECK_EQ(1U, count);
399 }
400
401 void StackSamplingProfiler::SamplingThread::RecordSample(
402 CollectionContext* collection) {
403 DCHECK(collection->native_sampler);
404
405 // If this is the first sample of a burst, a new Profile needs to be created
406 // and filled.
407 if (collection->sample == 0) {
408 collection->profiles.push_back(CallStackProfile());
409 CallStackProfile& profile = collection->profiles.back();
410 profile.sampling_period = collection->params.sampling_interval;
411 collection->profile_start_time = Time::Now();
412 collection->native_sampler->ProfileRecordingStarting(&profile.modules);
413 }
414
415 // The currently active profile being acptured.
416 CallStackProfile& profile = collection->profiles.back();
417
418 // Record a single sample.
419 profile.samples.push_back(Sample());
420 Sample& sample = profile.samples.back();
421 collection->native_sampler->RecordStackSample(&sample);
422
423 // If the thread was not suspendable, assume it has exited.
424 if (collection->native_sampler->thread_state() ==
425 NativeStackSampler::THREAD_EXITED) {
426 // Indicate that collection is complete so it stops below and finishes.
427 // The empty frame remains as an indicator during analysis that the
428 // thread exited.
429 collection->sample = collection->params.samples_per_burst - 1;
430 collection->burst = collection->params.bursts - 1;
431 }
432
433 // If this is the last sample of a burst, record the total time.
434 if (collection->sample == collection->params.samples_per_burst - 1) {
435 profile.profile_duration = Time::Now() - collection->profile_start_time;
436 collection->native_sampler->ProfileRecordingStopped();
437 }
438 }
439
440 void StackSamplingProfiler::SamplingThread::CheckForIdle() {
441 if (!active_collections_.empty())
181 return; 442 return;
182 443
183 CallStackProfiles profiles; 444 AutoLock lock(task_runner_lock_);
184 CollectProfiles(&profiles); 445 if (!task_runner_idle_shutdown_time_.is_zero()) {
185 concurrent_profiling_lock.Get().Release(); 446 GetTaskRunnerOnSamplingThread()->PostDelayedTask(
186 completed_callback_.Run(std::move(profiles)); 447 FROM_HERE, Bind(&SamplingThread::ShutdownTask, Unretained(this)),
187 } 448 task_runner_idle_shutdown_time_);
188 449 }
189 // Depending on how long the sampling takes and the length of the sampling 450 }
190 // interval, a burst of samples could take arbitrarily longer than 451
191 // samples_per_burst * sampling_interval. In this case, we (somewhat 452 void StackSamplingProfiler::SamplingThread::AddCollectionTask(
192 // arbitrarily) honor the number of samples requested rather than strictly 453 std::unique_ptr<CollectionContext> collection_ptr) {
193 // adhering to the sampling intervals. Once we have established users for the 454 // Ownership of the collection is going to be given to a map but a pointer
194 // StackSamplingProfiler and the collected data to judge, we may go the other 455 // to it will be needed later.
195 // way or make this behavior configurable. 456 CollectionContext* collection = collection_ptr.get();
196 void StackSamplingProfiler::SamplingThread::CollectProfile( 457 active_collections_.insert(
197 CallStackProfile* profile, 458 std::make_pair(collection->collection_id, std::move(collection_ptr)));
198 TimeDelta* elapsed_time, 459
199 bool* was_stopped) { 460 GetTaskRunnerOnSamplingThread()->PostDelayedTask(
200 ElapsedTimer profile_timer; 461 FROM_HERE, Bind(&SamplingThread::PerformCollectionTask, Unretained(this),
201 native_sampler_->ProfileRecordingStarting(&profile->modules); 462 collection->collection_id),
202 profile->sampling_period = params_.sampling_interval; 463 collection->params.initial_delay);
203 *was_stopped = false; 464 }
204 TimeDelta previous_elapsed_sample_time; 465
205 for (int i = 0; i < params_.samples_per_burst; ++i) { 466 void StackSamplingProfiler::SamplingThread::RemoveCollectionTask(int id) {
206 if (i != 0) { 467 auto found = active_collections_.find(id);
207 // Always wait, even if for 0 seconds, so we can observe a signal on 468 if (found == active_collections_.end())
208 // stop_event_. 469 return;
209 if (stop_event_.TimedWait( 470
210 std::max(params_.sampling_interval - previous_elapsed_sample_time, 471 FinishCollection(found->second.get());
211 TimeDelta()))) { 472 CheckForIdle();
212 *was_stopped = true; 473 }
213 break; 474
214 } 475 void StackSamplingProfiler::SamplingThread::PerformCollectionTask(int id) {
476 auto found = active_collections_.find(id);
477
478 // The task won't be found if it has been stopped.
479 if (found == active_collections_.end())
480 return;
481
482 CollectionContext* collection = found->second.get();
483
484 // Handle first-run with no "next time".
485 if (collection->next_sample_time == Time())
486 collection->next_sample_time = Time::Now();
487
488 // Do the collection of a single sample.
489 RecordSample(collection);
490
491 // Update the time of the next sample recording.
492 if (UpdateNextSampleTime(collection)) {
493 bool success = GetTaskRunnerOnSamplingThread()->PostDelayedTask(
494 FROM_HERE,
495 Bind(&SamplingThread::PerformCollectionTask, Unretained(this), id),
496 std::max(collection->next_sample_time - Time::Now(), TimeDelta()));
497 DCHECK(success);
498 } else {
499 // All capturing has completed so finish the collection. Let object expire.
500 // The |collection| variable will be invalid after this call.
501 FinishCollection(collection);
502 CheckForIdle();
503 }
504 }
505
506 void StackSamplingProfiler::SamplingThread::ShutdownTask() {
507 // Holding this lock ensures that any attempt to start another job will
508 // get postponed until StopSoon can run thus eliminating the race.
509 AutoLock lock(task_runner_lock_);
510
511 // If this is a forced, permanent shutdown, stop all active collections.
512 if (task_runner_forced_shutdown_) {
513 // FinishCollection will remove the entry thus invalidating any iterator.
514 while (!active_collections_.empty())
515 FinishCollection(active_collections_.begin()->second.get());
516 } else {
517 // If active_collections_ is not empty, something new has arrived since
518 // this task got posted. Abort the shutdown so it can be processed.
519 if (!active_collections_.empty())
520 return;
521 // It's possible that a new AddCollectionTask has been posted after this
522 // task. Reset the "create requests" counter and try again after any other
523 // pending tasks.
524 if (task_runner_create_requests_ > 0 && task_runner_) {
525 task_runner_create_requests_ = 0;
526 task_runner_->PostTask(
527 FROM_HERE, Bind(&SamplingThread::ShutdownTask, Unretained(this)));
528 return;
215 } 529 }
216 ElapsedTimer sample_timer; 530 // There can be no new AddCollectionTasks at this point because creating
217 profile->samples.push_back(Sample()); 531 // those always increments "create requests". There may be other requests,
218 native_sampler_->RecordStackSample(&profile->samples.back()); 532 // like Remove, but it's okay to schedule the thread to stop once they've
219 previous_elapsed_sample_time = sample_timer.Elapsed(); 533 // been executed (i.e. "soon").
220 } 534 }
221 535
222 *elapsed_time = profile_timer.Elapsed(); 536 // Stop the underlying thread as soon as all immediate tasks are complete.
223 profile->profile_duration = *elapsed_time; 537 // Calling Stop() directly would result in deadlock.
224 native_sampler_->ProfileRecordingStopped(); 538 StopSoon();
225 } 539
226 540 // StopSoon will have set the owning sequence (again) so it must be detached
227 // In an analogous manner to CollectProfile() and samples exceeding the expected 541 // (again) in order for Stop/Start to be called (again) should more work
228 // total sampling time, bursts may also exceed the burst_interval. We adopt the 542 // come in. Holding the |task_runner_lock_| ensures the necessary happens-
229 // same wait-and-see approach here. 543 // after with regard to this detach and future Thread API calls.
230 void StackSamplingProfiler::SamplingThread::CollectProfiles( 544 DetachFromSequence();
231 CallStackProfiles* profiles) { 545
232 if (stop_event_.TimedWait(params_.initial_delay)) 546 // Clear the task_runner_ variable so the thread will be restarted when
233 return; 547 // new work comes in.
234 548 task_runner_ = nullptr;
235 TimeDelta previous_elapsed_profile_time; 549 }
236 for (int i = 0; i < params_.bursts; ++i) { 550
237 if (i != 0) { 551 bool StackSamplingProfiler::SamplingThread::UpdateNextSampleTime(
238 // Always wait, even if for 0 seconds, so we can observe a signal on 552 CollectionContext* collection) {
239 // stop_event_. 553 if (++collection->sample < collection->params.samples_per_burst) {
240 if (stop_event_.TimedWait( 554 collection->next_sample_time += collection->params.sampling_interval;
241 std::max(params_.burst_interval - previous_elapsed_profile_time, 555 return true;
242 TimeDelta()))) 556 }
243 return; 557
244 } 558 // This will keep a consistent average interval between samples but will
245 559 // result in constant series of acquisitions, thus nearly locking out the
246 CallStackProfile profile; 560 // target thread, if the interval is smaller than the time it takes to
247 bool was_stopped = false; 561 // actually acquire the sample. Anything sampling that quickly is going
248 CollectProfile(&profile, &previous_elapsed_profile_time, &was_stopped); 562 // to be a problem anyway so don't worry about it.
249 if (!profile.samples.empty()) 563 if (++collection->burst < collection->params.bursts) {
250 profiles->push_back(std::move(profile)); 564 collection->sample = 0;
251 565 collection->next_sample_time += collection->params.burst_interval;
252 if (was_stopped) 566 return true;
253 return; 567 }
254 } 568
255 } 569 return false;
256 570 }
257 void StackSamplingProfiler::SamplingThread::Stop() { 571
258 stop_event_.Signal(); 572 void StackSamplingProfiler::SamplingThread::CleanUp() {
573 // There should be no collections remaining when the thread stops.
574 DCHECK(active_collections_.empty());
575
576 // Let the parent clean up.
577 Thread::CleanUp();
259 } 578 }
260 579
261 // StackSamplingProfiler ------------------------------------------------------ 580 // StackSamplingProfiler ------------------------------------------------------
262 581
263 subtle::Atomic32 StackSamplingProfiler::process_milestones_ = 0; 582 subtle::Atomic32 StackSamplingProfiler::process_milestones_ = 0;
264 583
265 StackSamplingProfiler::SamplingParams::SamplingParams() 584 StackSamplingProfiler::SamplingParams::SamplingParams()
266 : initial_delay(TimeDelta::FromMilliseconds(0)), 585 : initial_delay(TimeDelta::FromMilliseconds(0)),
267 bursts(1), 586 bursts(1),
268 burst_interval(TimeDelta::FromMilliseconds(10000)), 587 burst_interval(TimeDelta::FromMilliseconds(10000)),
269 samples_per_burst(300), 588 samples_per_burst(300),
270 sampling_interval(TimeDelta::FromMilliseconds(100)) { 589 sampling_interval(TimeDelta::FromMilliseconds(100)) {
271 } 590 }
272 591
273 StackSamplingProfiler::StackSamplingProfiler( 592 StackSamplingProfiler::StackSamplingProfiler(
274 PlatformThreadId thread_id, 593 PlatformThreadId thread_id,
275 const SamplingParams& params, 594 const SamplingParams& params,
276 const CompletedCallback& callback) 595 const CompletedCallback& callback)
277 : StackSamplingProfiler(thread_id, params, callback, nullptr) {} 596 : StackSamplingProfiler(thread_id, params, callback, nullptr) {}
278 597
279 StackSamplingProfiler::StackSamplingProfiler( 598 StackSamplingProfiler::StackSamplingProfiler(
280 PlatformThreadId thread_id, 599 PlatformThreadId thread_id,
281 const SamplingParams& params, 600 const SamplingParams& params,
282 const CompletedCallback& callback, 601 const CompletedCallback& callback,
283 NativeStackSamplerTestDelegate* test_delegate) 602 NativeStackSamplerTestDelegate* test_delegate)
284 : thread_id_(thread_id), params_(params), completed_callback_(callback), 603 : thread_id_(thread_id),
604 params_(params),
605 completed_callback_(callback),
606 finished_event_(WaitableEvent::ResetPolicy::MANUAL,
607 WaitableEvent::InitialState::NOT_SIGNALED),
285 test_delegate_(test_delegate) { 608 test_delegate_(test_delegate) {
609 native_sampler_ = NativeStackSampler::Create(thread_id_, &RecordAnnotations,
610 test_delegate_);
286 } 611 }
287 612
288 StackSamplingProfiler::~StackSamplingProfiler() { 613 StackSamplingProfiler::~StackSamplingProfiler() {
289 Stop(); 614 Stop();
290 if (!sampling_thread_handle_.is_null()) 615 finished_event_.Wait();
291 PlatformThread::Join(sampling_thread_handle_);
292 }
293
294 // static
295 void StackSamplingProfiler::StartAndRunAsync(
296 PlatformThreadId thread_id,
297 const SamplingParams& params,
298 const CompletedCallback& callback) {
299 CHECK(ThreadTaskRunnerHandle::Get());
300 AsyncRunner::Run(thread_id, params, callback);
301 } 616 }
302 617
303 void StackSamplingProfiler::Start() { 618 void StackSamplingProfiler::Start() {
304 if (completed_callback_.is_null()) 619 if (completed_callback_.is_null())
305 return; 620 return;
306 621
307 std::unique_ptr<NativeStackSampler> native_sampler = 622 if (!native_sampler_)
308 NativeStackSampler::Create(thread_id_, &RecordAnnotations,
309 test_delegate_);
310 if (!native_sampler)
311 return; 623 return;
312 624
313 sampling_thread_.reset(new SamplingThread(std::move(native_sampler), params_, 625 collection_id_ = SamplingThread::GetInstance()->Add(
314 completed_callback_)); 626 MakeUnique<SamplingThread::CollectionContext>(
315 if (!PlatformThread::Create(0, sampling_thread_.get(), 627 thread_id_, params_, completed_callback_, &finished_event_,
316 &sampling_thread_handle_)) 628 std::move(native_sampler_)));
317 sampling_thread_.reset();
318 } 629 }
319 630
320 void StackSamplingProfiler::Stop() { 631 void StackSamplingProfiler::Stop() {
321 if (sampling_thread_) 632 SamplingThread::GetInstance()->Remove(collection_id_);
322 sampling_thread_->Stop();
323 } 633 }
324 634
325 // static 635 // static
636 void StackSamplingProfiler::Shutdown() {
637 SamplingThread::GetInstance()->Shutdown();
638 }
639
640 // static
641 void StackSamplingProfiler::UndoShutdownForTesting() {
642 SamplingThread::GetInstance()->UndoShutdown();
643 }
644
645 // static
646 bool StackSamplingProfiler::IsSamplingThreadRunningForTesting() {
647 return SamplingThread::GetInstance()->IsRunning();
648 }
649
650 // static
651 void StackSamplingProfiler::SetSamplingThreadIdleShutdownTimeForTesting(
652 int shutdown_ms) {
653 SamplingThread::GetInstance()->SetIdleShutdownTime(shutdown_ms);
654 }
655
656 // static
657 void StackSamplingProfiler::InitiateSamplingThreadIdleShutdownForTesting() {
658 SamplingThread::GetInstance()->ShutdownIfIdle();
659 }
660
661 // static
326 void StackSamplingProfiler::SetProcessMilestone(int milestone) { 662 void StackSamplingProfiler::SetProcessMilestone(int milestone) {
327 DCHECK_LE(0, milestone); 663 DCHECK_LE(0, milestone);
328 DCHECK_GT(static_cast<int>(sizeof(process_milestones_) * 8), milestone); 664 DCHECK_GT(static_cast<int>(sizeof(process_milestones_) * 8), milestone);
329 DCHECK_EQ(0, subtle::NoBarrier_Load(&process_milestones_) & (1 << milestone)); 665 DCHECK_EQ(0, subtle::NoBarrier_Load(&process_milestones_) & (1 << milestone));
330 ChangeAtomicFlags(&process_milestones_, 1 << milestone, 0); 666 ChangeAtomicFlags(&process_milestones_, 1 << milestone, 0);
331 } 667 }
332 668
333 // static 669 // static
334 void StackSamplingProfiler::ResetAnnotationsForTesting() { 670 void StackSamplingProfiler::ResetAnnotationsForTesting() {
335 subtle::NoBarrier_Store(&process_milestones_, 0u); 671 subtle::NoBarrier_Store(&process_milestones_, 0u);
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
378 } 714 }
379 715
380 bool operator<(const StackSamplingProfiler::Frame &a, 716 bool operator<(const StackSamplingProfiler::Frame &a,
381 const StackSamplingProfiler::Frame &b) { 717 const StackSamplingProfiler::Frame &b) {
382 return (a.module_index < b.module_index) || 718 return (a.module_index < b.module_index) ||
383 (a.module_index == b.module_index && 719 (a.module_index == b.module_index &&
384 a.instruction_pointer < b.instruction_pointer); 720 a.instruction_pointer < b.instruction_pointer);
385 } 721 }
386 722
387 } // namespace base 723 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698