Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(187)

Side by Side Diff: base/profiler/stack_sampling_profiler.cc

Issue 2554123002: Support parallel captures from the StackSamplingProfiler. (Closed)
Patch Set: addressed review comments by wittman Created 3 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/profiler/stack_sampling_profiler.h" 5 #include "base/profiler/stack_sampling_profiler.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <map>
8 #include <utility> 9 #include <utility>
9 10
11 #include "base/atomic_sequence_num.h"
12 #include "base/atomicops.h"
10 #include "base/bind.h" 13 #include "base/bind.h"
11 #include "base/bind_helpers.h" 14 #include "base/bind_helpers.h"
12 #include "base/callback.h" 15 #include "base/callback.h"
13 #include "base/lazy_instance.h" 16 #include "base/lazy_instance.h"
14 #include "base/location.h" 17 #include "base/location.h"
15 #include "base/macros.h" 18 #include "base/macros.h"
19 #include "base/memory/ptr_util.h"
20 #include "base/memory/singleton.h"
16 #include "base/profiler/native_stack_sampler.h" 21 #include "base/profiler/native_stack_sampler.h"
17 #include "base/synchronization/lock.h" 22 #include "base/synchronization/lock.h"
23 #include "base/threading/thread.h"
24 #include "base/threading/thread_restrictions.h"
18 #include "base/threading/thread_task_runner_handle.h" 25 #include "base/threading/thread_task_runner_handle.h"
19 #include "base/timer/elapsed_timer.h" 26 #include "base/timer/elapsed_timer.h"
20 27
21 namespace base { 28 namespace base {
22 29
23 namespace { 30 namespace {
24 31
25 // Used to ensure only one profiler is running at a time. 32 // This value is used when there is no collection in progress and thus no ID
26 LazyInstance<Lock>::Leaky concurrent_profiling_lock = LAZY_INSTANCE_INITIALIZER; 33 // for referencing the active collection to the SamplingThread.
27 34 const int NULL_COLLECTION_ID = -1;
28 // AsyncRunner ----------------------------------------------------------------
29
30 // Helper class to allow a profiler to be run completely asynchronously from the
31 // initiator, without being concerned with the profiler's lifetime.
32 class AsyncRunner {
33 public:
34 // Sets up a profiler and arranges for it to be deleted on its completed
35 // callback.
36 static void Run(PlatformThreadId thread_id,
37 const StackSamplingProfiler::SamplingParams& params,
38 const StackSamplingProfiler::CompletedCallback& callback);
39
40 private:
41 AsyncRunner();
42
43 // Runs the callback and deletes the AsyncRunner instance. |profiles| is not
44 // const& because it must be passed with std::move.
45 static void RunCallbackAndDeleteInstance(
46 std::unique_ptr<AsyncRunner> object_to_be_deleted,
47 const StackSamplingProfiler::CompletedCallback& callback,
48 scoped_refptr<SingleThreadTaskRunner> task_runner,
49 StackSamplingProfiler::CallStackProfiles profiles);
50
51 std::unique_ptr<StackSamplingProfiler> profiler_;
52
53 DISALLOW_COPY_AND_ASSIGN(AsyncRunner);
54 };
55
56 // static
57 void AsyncRunner::Run(
58 PlatformThreadId thread_id,
59 const StackSamplingProfiler::SamplingParams& params,
60 const StackSamplingProfiler::CompletedCallback &callback) {
61 std::unique_ptr<AsyncRunner> runner(new AsyncRunner);
62 AsyncRunner* temp_ptr = runner.get();
63 temp_ptr->profiler_.reset(
64 new StackSamplingProfiler(thread_id, params,
65 Bind(&AsyncRunner::RunCallbackAndDeleteInstance,
66 Passed(&runner), callback,
67 ThreadTaskRunnerHandle::Get())));
68 // The callback won't be called until after Start(), so temp_ptr will still
69 // be valid here.
70 temp_ptr->profiler_->Start();
71 }
72
73 AsyncRunner::AsyncRunner() {}
74
75 void AsyncRunner::RunCallbackAndDeleteInstance(
76 std::unique_ptr<AsyncRunner> object_to_be_deleted,
77 const StackSamplingProfiler::CompletedCallback& callback,
78 scoped_refptr<SingleThreadTaskRunner> task_runner,
79 StackSamplingProfiler::CallStackProfiles profiles) {
80 callback.Run(std::move(profiles));
81 // Delete the instance on the original calling thread.
82 task_runner->DeleteSoon(FROM_HERE, object_to_be_deleted.release());
83 }
84 35
85 void ChangeAtomicFlags(subtle::Atomic32* flags, 36 void ChangeAtomicFlags(subtle::Atomic32* flags,
86 subtle::Atomic32 set, 37 subtle::Atomic32 set,
87 subtle::Atomic32 clear) { 38 subtle::Atomic32 clear) {
88 DCHECK(set != 0 || clear != 0); 39 DCHECK(set != 0 || clear != 0);
89 DCHECK_EQ(0, set & clear); 40 DCHECK_EQ(0, set & clear);
90 41
91 subtle::Atomic32 bits = subtle::NoBarrier_Load(flags); 42 subtle::Atomic32 bits = subtle::NoBarrier_Load(flags);
92 while (true) { 43 while (true) {
93 subtle::Atomic32 existing = 44 subtle::Atomic32 existing =
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
153 StackSamplingProfiler::CallStackProfile 104 StackSamplingProfiler::CallStackProfile
154 StackSamplingProfiler::CallStackProfile::CopyForTesting() const { 105 StackSamplingProfiler::CallStackProfile::CopyForTesting() const {
155 return CallStackProfile(*this); 106 return CallStackProfile(*this);
156 } 107 }
157 108
158 StackSamplingProfiler::CallStackProfile::CallStackProfile( 109 StackSamplingProfiler::CallStackProfile::CallStackProfile(
159 const CallStackProfile& other) = default; 110 const CallStackProfile& other) = default;
160 111
161 // StackSamplingProfiler::SamplingThread -------------------------------------- 112 // StackSamplingProfiler::SamplingThread --------------------------------------
162 113
163 StackSamplingProfiler::SamplingThread::SamplingThread( 114 class StackSamplingProfiler::SamplingThread : public Thread {
164 std::unique_ptr<NativeStackSampler> native_sampler, 115 public:
165 const SamplingParams& params, 116 class TestAPI {
166 const CompletedCallback& completed_callback) 117 public:
167 : native_sampler_(std::move(native_sampler)), 118 // Disables inherent idle-shutdown behavior.
168 params_(params), 119 static void DisableIdleShutdown();
169 stop_event_(WaitableEvent::ResetPolicy::AUTOMATIC, 120
170 WaitableEvent::InitialState::NOT_SIGNALED), 121 // Begins an idle shutdown as if the idle-timer had expired.
171 completed_callback_(completed_callback) {} 122 static void ShutdownIfIdle();
172 123 };
173 StackSamplingProfiler::SamplingThread::~SamplingThread() {} 124
174 125 struct CollectionContext {
175 void StackSamplingProfiler::SamplingThread::ThreadMain() { 126 CollectionContext(PlatformThreadId target,
176 PlatformThread::SetName("Chrome_SamplingProfilerThread"); 127 const SamplingParams& params,
177 128 const CompletedCallback& callback,
178 // For now, just ignore any requests to profile while another profiler is 129 WaitableEvent* finished,
179 // working. 130 std::unique_ptr<NativeStackSampler> sampler)
180 if (!concurrent_profiling_lock.Get().Try()) 131 : collection_id(next_collection_id_.GetNext()),
132 target(target),
133 params(params),
134 callback(callback),
135 finished(finished),
136 native_sampler(std::move(sampler)) {}
137 ~CollectionContext() {}
138
139 // An identifier for this collection, used to uniquely identify it to
140 // outside interests.
141 const int collection_id;
142
143 const PlatformThreadId target; // ID of The thread being sampled.
144 const SamplingParams params; // Information about how to sample.
145 const CompletedCallback callback; // Callback made when sampling complete.
146 WaitableEvent* const finished; // Signaled when all sampling complete.
147
148 // Platform-specific module that does the actual sampling.
149 std::unique_ptr<NativeStackSampler> native_sampler;
150
151 // The absolute time for the next sample.
152 Time next_sample_time;
153
154 // The time that a profile was started, for calculating the total duration.
155 Time profile_start_time;
156
157 // Counters that indicate the current position along the acquisition.
158 int burst = 0;
159 int sample = 0;
160
161 // The collected stack samples. The active profile is always at the back().
162 CallStackProfiles profiles;
163
164 private:
165 static StaticAtomicSequenceNumber next_collection_id_;
166 };
167
168 // Gets the single instance of this class.
169 static SamplingThread* GetInstance();
170
171 // Starts the thread.
172 void Start();
173
174 // Adds a new CollectionContext to the thread. This can be called externally
175 // from any thread. This returns an ID that can later be used to stop
176 // the sampling.
177 int Add(std::unique_ptr<CollectionContext> collection);
178
179 // Removes an active collection based on its ID, forcing it to run its
180 // callback if any data has been collected. This can be called externally
181 // from any thread.
182 void Remove(int id);
183
184 private:
185 friend class TestAPI;
186 friend struct DefaultSingletonTraits<SamplingThread>;
187
188 // The different states in which the sampling-thread can be.
189 enum ThreadExecutionState {
190 // The thread is not running, either because it has never been started or
191 // because it has exited. It will be started (or restarted) when a sampling
Mike Wittman 2017/02/27 23:27:34 As the code is currently, the state is only set to
bcwhite 2017/03/13 18:50:17 Done.
Mike Wittman 2017/03/14 18:57:33 I think it would be clearer to remove the "(or res
bcwhite 2017/03/16 15:56:25 Done.
192 // request is received.
193 NOT_STARTED,
194
195 // The thread is running and processing tasks. This is the state when any
196 // sampling requests are active and during the "idle" period afterward
197 // before the thread is stopped.
198 RUNNING,
199
200 // Once all sampling requests have finished and the "idle" period has
201 // expired, the thread will be set to this state and its shutdown
202 // initiated.
Mike Wittman 2017/02/27 23:27:34 We should mention that new profiling requests (whi
bcwhite 2017/03/13 18:50:17 Done.
203 EXITING,
204 };
205
206 SamplingThread();
207 ~SamplingThread() override;
208
209 // Get task runner that is usable from the outside.
210 scoped_refptr<SingleThreadTaskRunner> GetOrCreateTaskRunner();
211 scoped_refptr<SingleThreadTaskRunner> GetTaskRunner(
212 ThreadExecutionState* out_state);
213
214 // Get task runner that is usable from the sampling thread itself.
215 scoped_refptr<SingleThreadTaskRunner> GetTaskRunnerOnSamplingThread();
216
217 // Finishes a collection and reports collected data via callback.
218 void FinishCollection(CollectionContext* collection);
219
220 // Records a single sample of a collection.
221 void RecordSample(CollectionContext* collection);
222
223 // Check if the sampling thread is idle and begin a shutdown if so.
224 void ScheduleShutdownIfIdle();
225
226 // These methods are tasks that get posted to the internal message queue.
227 void AddCollectionTask(std::unique_ptr<CollectionContext> collection);
228 void RemoveCollectionTask(int id);
229 void PerformCollectionTask(int id);
230 void ShutdownTask(int add_events);
231
232 // Updates the |next_sample_time| time based on configured parameters.
233 bool UpdateNextSampleTime(CollectionContext* collection);
234
235 // Thread:
236 void CleanUp() override;
237
238 // The task-runner for the sampling thread and some information about it.
239 // This must always be accessed while holding the lock. The saved task-runner
240 // can be freely used by any calling thread.
241 Lock task_runner_lock_;
242 scoped_refptr<SingleThreadTaskRunner> task_runner_;
243 ThreadExecutionState task_runner_thread_state_ = NOT_STARTED;
244 bool task_runner_disable_idle_shutdown_for_testing_ = false;
245
246 // A counter that notes adds of new collection requests. It is incremented
247 // when changes occur so that delayed shutdown tasks are able to detect if
248 // samething new has happened while it was waiting. Like all |task_runner_*|
249 // vars, this must be accessed while holding |task_runner_lock_|.
250 int task_runner_add_events_ = 0;
251
252 // A map of IDs to collection contexts. Because this class is a singleton
253 // that is never destroyed, context objects will never be destructed except
254 // by explicit action. Thus, it's acceptable to pass unretained pointers
255 // to these objects when posting tasks.
256 std::map<int, std::unique_ptr<CollectionContext>> active_collections_;
257
258 DISALLOW_COPY_AND_ASSIGN(SamplingThread);
259 };
260
261 void StackSamplingProfiler::SamplingThread::TestAPI::DisableIdleShutdown() {
262 SamplingThread* sampler = SamplingThread::GetInstance();
263 DCHECK(sampler);
264
265 {
266 AutoLock lock(sampler->task_runner_lock_);
267 sampler->task_runner_disable_idle_shutdown_for_testing_ = true;
268 }
269 }
270
271 void StackSamplingProfiler::SamplingThread::TestAPI::ShutdownIfIdle() {
272 SamplingThread* sampler = SamplingThread::GetInstance();
273 DCHECK(sampler);
274
275 ThreadExecutionState state;
276 scoped_refptr<SingleThreadTaskRunner> task_runner =
277 sampler->GetTaskRunner(&state);
278 DCHECK_EQ(RUNNING, state);
279 DCHECK(task_runner);
280
281 int add_events;
282 {
283 AutoLock lock(sampler->task_runner_lock_);
284 add_events = sampler->task_runner_add_events_;
285 }
286
287 // ShutdownTask will check if the thread is idle and skip the shutdown if not.
288 task_runner->PostTask(FROM_HERE, Bind(&SamplingThread::ShutdownTask,
289 Unretained(sampler), add_events));
290 }
291
292 StaticAtomicSequenceNumber StackSamplingProfiler::SamplingThread::
293 CollectionContext::next_collection_id_;
294
295 StackSamplingProfiler::SamplingThread::SamplingThread()
296 : Thread("Chrome_SamplingProfilerThread") {}
297
298 StackSamplingProfiler::SamplingThread::~SamplingThread() {
299 Stop();
300 }
301
302 StackSamplingProfiler::SamplingThread*
303 StackSamplingProfiler::SamplingThread::GetInstance() {
304 return Singleton<SamplingThread, LeakySingletonTraits<SamplingThread>>::get();
305 }
306
307 void StackSamplingProfiler::SamplingThread::Start() {
308 Thread::Options options;
309 // Use a higher priority for a more accurate sampling interval.
310 options.priority = ThreadPriority::DISPLAY;
311 Thread::StartWithOptions(options);
312 }
313
314 int StackSamplingProfiler::SamplingThread::Add(
315 std::unique_ptr<CollectionContext> collection) {
316 int id = collection->collection_id;
317 scoped_refptr<SingleThreadTaskRunner> task_runner = GetOrCreateTaskRunner();
318
319 task_runner->PostTask(FROM_HERE, Bind(&SamplingThread::AddCollectionTask,
320 Unretained(this), Passed(&collection)));
321
322 return id;
323 }
324
325 void StackSamplingProfiler::SamplingThread::Remove(int id) {
326 ThreadExecutionState state;
327 scoped_refptr<SingleThreadTaskRunner> task_runner = GetTaskRunner(&state);
328 if (state != RUNNING)
181 return; 329 return;
182 330 DCHECK(task_runner);
183 CallStackProfiles profiles; 331
184 CollectProfiles(&profiles); 332 // This can fail if the thread were to exit between acquisition of the task
185 concurrent_profiling_lock.Get().Release(); 333 // runner above and the call below. In that case, however, everything has
186 completed_callback_.Run(std::move(profiles)); 334 // stopped so there's no need to try to stop it.
187 } 335 task_runner->PostTask(FROM_HERE, Bind(&SamplingThread::RemoveCollectionTask,
188 336 Unretained(this), id));
189 // Depending on how long the sampling takes and the length of the sampling 337 }
190 // interval, a burst of samples could take arbitrarily longer than 338
191 // samples_per_burst * sampling_interval. In this case, we (somewhat 339 scoped_refptr<SingleThreadTaskRunner>
192 // arbitrarily) honor the number of samples requested rather than strictly 340 StackSamplingProfiler::SamplingThread::GetOrCreateTaskRunner() {
Mike Wittman 2017/02/27 23:27:34 We should rename this to something like GetOrCreat
bcwhite 2017/03/13 18:50:17 Done. This is exactly why I didn't want these hel
Mike Wittman 2017/03/14 18:57:33 The most important values for Chrome code are read
193 // adhering to the sampling intervals. Once we have established users for the 341 AutoLock lock(task_runner_lock_);
194 // StackSamplingProfiler and the collected data to judge, we may go the other 342 ++task_runner_add_events_;
195 // way or make this behavior configurable. 343
196 void StackSamplingProfiler::SamplingThread::CollectProfile( 344 if (task_runner_thread_state_ == RUNNING) {
197 CallStackProfile* profile, 345 DCHECK(task_runner_);
198 TimeDelta* elapsed_time, 346 // This shouldn't be called from the sampling thread as it's inefficient.
199 bool* was_stopped) { 347 // Use GetTaskRunnerOnSamplingThread() instead.
200 ElapsedTimer profile_timer; 348 DCHECK_NE(GetThreadId(), PlatformThread::CurrentId());
201 native_sampler_->ProfileRecordingStarting(&profile->modules); 349 return task_runner_;
202 profile->sampling_period = params_.sampling_interval; 350 }
203 *was_stopped = false; 351
204 TimeDelta previous_elapsed_sample_time; 352 if (task_runner_thread_state_ == EXITING) {
205 for (int i = 0; i < params_.samples_per_burst; ++i) { 353 // The previous instance has only been partially cleaned up. It is necessary
206 if (i != 0) { 354 // to call Stop() before Start(). This is safe even the thread has never
Mike Wittman 2017/02/27 23:27:34 The last sentence is no longer relevant and can be
bcwhite 2017/03/13 18:50:17 Done.
207 // Always wait, even if for 0 seconds, so we can observe a signal on 355 // been started.
208 // stop_event_. 356 Stop();
209 if (stop_event_.TimedWait( 357 }
210 std::max(params_.sampling_interval - previous_elapsed_sample_time, 358
211 TimeDelta()))) { 359 // The thread is not running. Start it and get associated runner. The task-
212 *was_stopped = true; 360 // runner has to be saved for future use because though it can be used from
213 break; 361 // any thread, it can be acquired via task_runner() only on the created
214 } 362 // thread and the thread that creates it (i.e. this thread).
215 } 363 Start();
216 ElapsedTimer sample_timer; 364 task_runner_thread_state_ = RUNNING;
217 profile->samples.push_back(Sample()); 365 task_runner_ = Thread::task_runner();
218 native_sampler_->RecordStackSample(&profile->samples.back()); 366
219 previous_elapsed_sample_time = sample_timer.Elapsed(); 367 // Detach the sampling thread from the "sequence" (i.e. thread) that
220 } 368 // started it so that it can be self-managed or stopped by another thread.
221 369 DetachFromSequence();
222 *elapsed_time = profile_timer.Elapsed(); 370
223 profile->profile_duration = *elapsed_time; 371 return task_runner_;
224 native_sampler_->ProfileRecordingStopped(); 372 }
225 } 373
226 374 scoped_refptr<SingleThreadTaskRunner>
227 // In an analogous manner to CollectProfile() and samples exceeding the expected 375 StackSamplingProfiler::SamplingThread::GetTaskRunner(
228 // total sampling time, bursts may also exceed the burst_interval. We adopt the 376 ThreadExecutionState* out_state) {
229 // same wait-and-see approach here. 377 AutoLock lock(task_runner_lock_);
230 void StackSamplingProfiler::SamplingThread::CollectProfiles( 378 if (out_state)
231 CallStackProfiles* profiles) { 379 *out_state = task_runner_thread_state_;
232 if (stop_event_.TimedWait(params_.initial_delay)) 380 if (task_runner_thread_state_ != RUNNING) {
381 DCHECK(!task_runner_);
382 return nullptr;
383 }
384
385 // This shouldn't be called from the sampling thread as it's inefficient.
386 // Use GetTaskRunnerOnSamplingThread() instead.
387 DCHECK_NE(GetThreadId(), PlatformThread::CurrentId());
388
389 return task_runner_;
390 }
391
392 scoped_refptr<SingleThreadTaskRunner>
393 StackSamplingProfiler::SamplingThread::GetTaskRunnerOnSamplingThread() {
394 // This should be called only from the sampling thread as it has limited
395 // accessibility.
396 DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
397
398 return Thread::task_runner();
399 }
400
401 void StackSamplingProfiler::SamplingThread::FinishCollection(
402 CollectionContext* collection) {
403 // If there is no duration for the final profile (because it was stopped),
404 // calculate it now.
405 if (!collection->profiles.empty() &&
406 collection->profiles.back().profile_duration == TimeDelta()) {
407 collection->profiles.back().profile_duration =
408 Time::Now() - collection->profile_start_time;
409 }
410
411 // Run the associated callback, passing the collected profiles. It's okay to
412 // move them because this collection is about to be deleted.
413 collection->callback.Run(std::move(collection->profiles));
414
415 // Signal that this collection is finished.
416 collection->finished->Signal();
417
418 // Remove this collection from the map of known ones. This must be done
419 // last as the |collection| parameter is invalid after this point.
420 size_t count = active_collections_.erase(collection->collection_id);
421 DCHECK_EQ(1U, count);
422 }
423
424 void StackSamplingProfiler::SamplingThread::RecordSample(
425 CollectionContext* collection) {
426 DCHECK(collection->native_sampler);
427
428 // If this is the first sample of a burst, a new Profile needs to be created
429 // and filled.
430 if (collection->sample == 0) {
431 collection->profiles.push_back(CallStackProfile());
432 CallStackProfile& profile = collection->profiles.back();
433 profile.sampling_period = collection->params.sampling_interval;
434 collection->profile_start_time = Time::Now();
435 collection->native_sampler->ProfileRecordingStarting(&profile.modules);
436 }
437
438 // The currently active profile being captured.
439 CallStackProfile& profile = collection->profiles.back();
440
441 // Record a single sample.
442 profile.samples.push_back(Sample());
443 collection->native_sampler->RecordStackSample(&profile.samples.back());
444
445 // If this is the last sample of a burst, record the total time.
446 if (collection->sample == collection->params.samples_per_burst - 1) {
447 profile.profile_duration = Time::Now() - collection->profile_start_time;
448 collection->native_sampler->ProfileRecordingStopped();
449 }
450 }
451
452 void StackSamplingProfiler::SamplingThread::ScheduleShutdownIfIdle() {
453 if (!active_collections_.empty())
233 return; 454 return;
234 455
235 TimeDelta previous_elapsed_profile_time; 456 int add_events;
236 for (int i = 0; i < params_.bursts; ++i) { 457 {
237 if (i != 0) { 458 AutoLock lock(task_runner_lock_);
238 // Always wait, even if for 0 seconds, so we can observe a signal on 459 if (task_runner_disable_idle_shutdown_for_testing_)
239 // stop_event_.
240 if (stop_event_.TimedWait(
241 std::max(params_.burst_interval - previous_elapsed_profile_time,
242 TimeDelta())))
243 return;
244 }
245
246 CallStackProfile profile;
247 bool was_stopped = false;
248 CollectProfile(&profile, &previous_elapsed_profile_time, &was_stopped);
249 if (!profile.samples.empty())
250 profiles->push_back(std::move(profile));
251
252 if (was_stopped)
253 return; 460 return;
254 } 461 add_events = task_runner_add_events_;
255 } 462 }
256 463
257 void StackSamplingProfiler::SamplingThread::Stop() { 464 GetTaskRunnerOnSamplingThread()->PostDelayedTask(
258 stop_event_.Signal(); 465 FROM_HERE,
466 Bind(&SamplingThread::ShutdownTask, Unretained(this), add_events),
467 TimeDelta::FromSeconds(60));
468 }
469
470 void StackSamplingProfiler::SamplingThread::AddCollectionTask(
471 std::unique_ptr<CollectionContext> collection) {
472 const int collection_id = collection->collection_id;
473 const TimeDelta initial_delay = collection->params.initial_delay;
474
475 active_collections_.insert(
476 std::make_pair(collection_id, std::move(collection)));
477
478 GetTaskRunnerOnSamplingThread()->PostDelayedTask(
479 FROM_HERE, Bind(&SamplingThread::PerformCollectionTask, Unretained(this),
480 collection_id),
481 initial_delay);
482
483 // Another increment of "create requests" serves to invalidate any pending
484 // shutdown tasks that may have been initiated between the Add() and this
485 // task running.
486 {
487 AutoLock lock(task_runner_lock_);
488 ++task_runner_add_events_;
489 }
490 }
491
492 void StackSamplingProfiler::SamplingThread::RemoveCollectionTask(int id) {
493 auto found = active_collections_.find(id);
494 if (found == active_collections_.end())
495 return;
496
497 FinishCollection(found->second.get());
498 ScheduleShutdownIfIdle();
499 }
500
501 void StackSamplingProfiler::SamplingThread::PerformCollectionTask(int id) {
502 auto found = active_collections_.find(id);
503
504 // The task won't be found if it has been stopped.
505 if (found == active_collections_.end())
506 return;
507
508 CollectionContext* collection = found->second.get();
509
510 // Handle first-run with no "next time".
511 if (collection->next_sample_time == Time())
512 collection->next_sample_time = Time::Now();
513
514 // Do the collection of a single sample.
515 RecordSample(collection);
516
517 // Update the time of the next sample recording.
518 if (UpdateNextSampleTime(collection)) {
519 bool success = GetTaskRunnerOnSamplingThread()->PostDelayedTask(
520 FROM_HERE,
521 Bind(&SamplingThread::PerformCollectionTask, Unretained(this), id),
522 std::max(collection->next_sample_time - Time::Now(), TimeDelta()));
523 DCHECK(success);
524 } else {
525 // All capturing has completed so finish the collection. Let object expire.
526 // The |collection| variable will be invalid after this call.
527 FinishCollection(collection);
528 ScheduleShutdownIfIdle();
529 }
530 }
531
532 void StackSamplingProfiler::SamplingThread::ShutdownTask(int add_events) {
533 // Holding this lock ensures that any attempt to start another job will
534 // get postponed until task_runner_ is cleared, thus eliminating the race.
535 AutoLock lock(task_runner_lock_);
536
537 // If the current count of creation requests doesn't match the passed count
538 // then other tasks have been created since this was posted. Abort shutdown.
539 if (task_runner_add_events_ != add_events)
540 return;
541
542 // There can be no new AddCollectionTasks at this point because creating
543 // those always increments "create requests". There may be other requests,
544 // like Remove, but it's okay to schedule the thread to stop once they've
545 // been executed (i.e. "soon").
546 StopSoon();
547
548 // StopSoon will have set the owning sequence (again) so it must be detached
549 // (again) in order for Stop/Start to be called (again) should more work
550 // come in. Holding the |task_runner_lock_| ensures the necessary happens-
551 // after with regard to this detach and future Thread API calls.
552 DetachFromSequence();
553
554 // Set the thread_state variable so the thread will be restarted when new
555 // work comes in.
556 task_runner_thread_state_ = EXITING;
557 }
558
559 bool StackSamplingProfiler::SamplingThread::UpdateNextSampleTime(
560 CollectionContext* collection) {
561 // This will keep a consistent average interval between samples but will
562 // result in constant series of acquisitions, thus nearly locking out the
563 // target thread, if the interval is smaller than the time it takes to
564 // actually acquire the sample. Anything sampling that quickly is going
565 // to be a problem anyway so don't worry about it.
566 if (++collection->sample < collection->params.samples_per_burst) {
567 collection->next_sample_time += collection->params.sampling_interval;
568 return true;
569 }
570
571 if (++collection->burst < collection->params.bursts) {
572 collection->sample = 0;
573 collection->next_sample_time += collection->params.burst_interval;
574 return true;
575 }
576
577 return false;
578 }
579
580 void StackSamplingProfiler::SamplingThread::CleanUp() {
581 // There should be no collections remaining when the thread stops.
582 DCHECK(active_collections_.empty());
583
584 // Let the parent clean up.
585 Thread::CleanUp();
259 } 586 }
260 587
261 // StackSamplingProfiler ------------------------------------------------------ 588 // StackSamplingProfiler ------------------------------------------------------
262 589
590 // static
591 bool StackSamplingProfiler::TestAPI::IsSamplingThreadRunning() {
592 return SamplingThread::GetInstance()->IsRunning();
593 }
594
595 // static
596 void StackSamplingProfiler::TestAPI::DisableIdleShutdown() {
597 SamplingThread::TestAPI::DisableIdleShutdown();
598 }
599
600 // static
601 void StackSamplingProfiler::TestAPI::InitiateSamplingThreadIdleShutdown() {
602 SamplingThread::TestAPI::ShutdownIfIdle();
603 }
604
263 subtle::Atomic32 StackSamplingProfiler::process_milestones_ = 0; 605 subtle::Atomic32 StackSamplingProfiler::process_milestones_ = 0;
264 606
265 StackSamplingProfiler::SamplingParams::SamplingParams() 607 StackSamplingProfiler::SamplingParams::SamplingParams()
266 : initial_delay(TimeDelta::FromMilliseconds(0)), 608 : initial_delay(TimeDelta::FromMilliseconds(0)),
267 bursts(1), 609 bursts(1),
268 burst_interval(TimeDelta::FromMilliseconds(10000)), 610 burst_interval(TimeDelta::FromMilliseconds(10000)),
269 samples_per_burst(300), 611 samples_per_burst(300),
270 sampling_interval(TimeDelta::FromMilliseconds(100)) { 612 sampling_interval(TimeDelta::FromMilliseconds(100)) {
271 } 613 }
272 614
273 StackSamplingProfiler::StackSamplingProfiler( 615 StackSamplingProfiler::StackSamplingProfiler(
274 PlatformThreadId thread_id,
275 const SamplingParams& params, 616 const SamplingParams& params,
276 const CompletedCallback& callback) 617 const CompletedCallback& callback,
277 : StackSamplingProfiler(thread_id, params, callback, nullptr) {} 618 NativeStackSamplerTestDelegate* test_delegate)
619 : StackSamplingProfiler(base::PlatformThread::CurrentId(),
620 params,
621 callback,
622 test_delegate) {}
278 623
279 StackSamplingProfiler::StackSamplingProfiler( 624 StackSamplingProfiler::StackSamplingProfiler(
280 PlatformThreadId thread_id, 625 PlatformThreadId thread_id,
281 const SamplingParams& params, 626 const SamplingParams& params,
282 const CompletedCallback& callback, 627 const CompletedCallback& callback,
283 NativeStackSamplerTestDelegate* test_delegate) 628 NativeStackSamplerTestDelegate* test_delegate)
284 : thread_id_(thread_id), params_(params), completed_callback_(callback), 629 : thread_id_(thread_id),
285 test_delegate_(test_delegate) { 630 params_(params),
286 } 631 completed_callback_(callback),
632 // The event starts "signaled" so code knows it's safe to start thread.
633 profiling_inactive_(WaitableEvent::ResetPolicy::MANUAL,
634 WaitableEvent::InitialState::SIGNALED),
635 collection_id_(NULL_COLLECTION_ID),
636 test_delegate_(test_delegate) {}
287 637
288 StackSamplingProfiler::~StackSamplingProfiler() { 638 StackSamplingProfiler::~StackSamplingProfiler() {
639 // Stop is immediate but asynchronous. There is a non-zero probability that
640 // one more sample will be taken after this call returns.
289 Stop(); 641 Stop();
290 if (!sampling_thread_handle_.is_null())
291 PlatformThread::Join(sampling_thread_handle_);
292 }
293 642
294 // static 643 // The behavior of sampling a thread that has exited is undefined and could
295 void StackSamplingProfiler::StartAndRunAsync( 644 // cause Bad Things(tm) to occur. The safety model provided by this class is
296 PlatformThreadId thread_id, 645 // that an instance of this object is expected to live at least as long as
297 const SamplingParams& params, 646 // the thread it is sampling. However, because the sampling is performed
298 const CompletedCallback& callback) { 647 // asynchronously by the SamplingThread, there is no way to guarantee this
299 CHECK(ThreadTaskRunnerHandle::Get()); 648 // is true without waiting for it to signal that it has finished.
300 AsyncRunner::Run(thread_id, params, callback); 649 //
650 // The wait time should, at most, be only as long as it takes to collect one
651 // sample (~200us) or none at all if sampling has already completed.
652 ThreadRestrictions::ScopedAllowWait allow_wait;
653 profiling_inactive_.Wait();
301 } 654 }
302 655
303 void StackSamplingProfiler::Start() { 656 void StackSamplingProfiler::Start() {
304 if (completed_callback_.is_null()) 657 if (completed_callback_.is_null())
305 return; 658 return;
306 659
307 std::unique_ptr<NativeStackSampler> native_sampler = 660 std::unique_ptr<NativeStackSampler> native_sampler =
308 NativeStackSampler::Create(thread_id_, &RecordAnnotations, 661 NativeStackSampler::Create(thread_id_, &RecordAnnotations,
309 test_delegate_); 662 test_delegate_);
663
310 if (!native_sampler) 664 if (!native_sampler)
311 return; 665 return;
312 666
313 sampling_thread_.reset(new SamplingThread(std::move(native_sampler), params_, 667 // Wait for profiling to be "inactive", then reset it for the upcoming run.
314 completed_callback_)); 668 profiling_inactive_.Wait();
315 if (!PlatformThread::Create(0, sampling_thread_.get(), 669 profiling_inactive_.Reset();
Mike Wittman 2017/02/27 23:27:34 We can avoid the manual reset by setting the reset
bcwhite 2017/03/13 18:50:17 True but only because there is currently nothing e
Mike Wittman 2017/03/14 18:57:33 Acknowledged.
316 &sampling_thread_handle_)) 670
317 sampling_thread_.reset(); 671 DCHECK_EQ(NULL_COLLECTION_ID, collection_id_);
672 collection_id_ = SamplingThread::GetInstance()->Add(
673 MakeUnique<SamplingThread::CollectionContext>(
674 thread_id_, params_, completed_callback_, &profiling_inactive_,
675 std::move(native_sampler)));
676 DCHECK_NE(NULL_COLLECTION_ID, collection_id_);
318 } 677 }
319 678
320 void StackSamplingProfiler::Stop() { 679 void StackSamplingProfiler::Stop() {
321 if (sampling_thread_) 680 SamplingThread::GetInstance()->Remove(collection_id_);
322 sampling_thread_->Stop(); 681 collection_id_ = NULL_COLLECTION_ID;
323 } 682 }
324 683
325 // static 684 // static
326 void StackSamplingProfiler::SetProcessMilestone(int milestone) { 685 void StackSamplingProfiler::SetProcessMilestone(int milestone) {
327 DCHECK_LE(0, milestone); 686 DCHECK_LE(0, milestone);
328 DCHECK_GT(static_cast<int>(sizeof(process_milestones_) * 8), milestone); 687 DCHECK_GT(static_cast<int>(sizeof(process_milestones_) * 8), milestone);
329 DCHECK_EQ(0, subtle::NoBarrier_Load(&process_milestones_) & (1 << milestone)); 688 DCHECK_EQ(0, subtle::NoBarrier_Load(&process_milestones_) & (1 << milestone));
330 ChangeAtomicFlags(&process_milestones_, 1 << milestone, 0); 689 ChangeAtomicFlags(&process_milestones_, 1 << milestone, 0);
331 } 690 }
332 691
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
378 } 737 }
379 738
380 bool operator<(const StackSamplingProfiler::Frame &a, 739 bool operator<(const StackSamplingProfiler::Frame &a,
381 const StackSamplingProfiler::Frame &b) { 740 const StackSamplingProfiler::Frame &b) {
382 return (a.module_index < b.module_index) || 741 return (a.module_index < b.module_index) ||
383 (a.module_index == b.module_index && 742 (a.module_index == b.module_index &&
384 a.instruction_pointer < b.instruction_pointer); 743 a.instruction_pointer < b.instruction_pointer);
385 } 744 }
386 745
387 } // namespace base 746 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698