Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(615)

Side by Side Diff: base/profiler/stack_sampling_profiler.cc

Issue 2554123002: Support parallel captures from the StackSamplingProfiler. (Closed)
Patch Set: support for death of thread-under-test Created 3 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/profiler/stack_sampling_profiler.h" 5 #include "base/profiler/stack_sampling_profiler.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <map>
8 #include <utility> 9 #include <utility>
9 10
11 #include "base/atomic_sequence_num.h"
12 #include "base/atomicops.h"
10 #include "base/bind.h" 13 #include "base/bind.h"
11 #include "base/bind_helpers.h" 14 #include "base/bind_helpers.h"
12 #include "base/callback.h" 15 #include "base/callback.h"
13 #include "base/lazy_instance.h" 16 #include "base/lazy_instance.h"
14 #include "base/location.h" 17 #include "base/location.h"
15 #include "base/macros.h" 18 #include "base/macros.h"
19 #include "base/memory/ptr_util.h"
20 #include "base/memory/singleton.h"
16 #include "base/profiler/native_stack_sampler.h" 21 #include "base/profiler/native_stack_sampler.h"
17 #include "base/synchronization/lock.h" 22 #include "base/synchronization/lock.h"
23 #include "base/threading/thread.h"
18 #include "base/threading/thread_task_runner_handle.h" 24 #include "base/threading/thread_task_runner_handle.h"
19 #include "base/timer/elapsed_timer.h" 25 #include "base/timer/elapsed_timer.h"
20 26
21 namespace base { 27 namespace base {
22 28
23 namespace { 29 namespace {
24 30
25 // Used to ensure only one profiler is running at a time.
26 LazyInstance<Lock>::Leaky concurrent_profiling_lock = LAZY_INSTANCE_INITIALIZER;
27
28 // AsyncRunner ---------------------------------------------------------------- 31 // AsyncRunner ----------------------------------------------------------------
29 32
30 // Helper class to allow a profiler to be run completely asynchronously from the 33 // Helper class to allow a profiler to be run completely asynchronously from the
31 // initiator, without being concerned with the profiler's lifetime. 34 // initiator, without being concerned with the profiler's lifetime.
32 class AsyncRunner { 35 class AsyncRunner {
33 public: 36 public:
34 // Sets up a profiler and arranges for it to be deleted on its completed 37 // Sets up a profiler and arranges for it to be deleted on its completed
35 // callback. 38 // callback.
36 static void Run(PlatformThreadId thread_id, 39 static void Run(PlatformThreadId thread_id,
37 const StackSamplingProfiler::SamplingParams& params, 40 const StackSamplingProfiler::SamplingParams& params,
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after
153 StackSamplingProfiler::CallStackProfile 156 StackSamplingProfiler::CallStackProfile
154 StackSamplingProfiler::CallStackProfile::CopyForTesting() const { 157 StackSamplingProfiler::CallStackProfile::CopyForTesting() const {
155 return CallStackProfile(*this); 158 return CallStackProfile(*this);
156 } 159 }
157 160
158 StackSamplingProfiler::CallStackProfile::CallStackProfile( 161 StackSamplingProfiler::CallStackProfile::CallStackProfile(
159 const CallStackProfile& other) = default; 162 const CallStackProfile& other) = default;
160 163
161 // StackSamplingProfiler::SamplingThread -------------------------------------- 164 // StackSamplingProfiler::SamplingThread --------------------------------------
162 165
163 StackSamplingProfiler::SamplingThread::SamplingThread( 166 class StackSamplingProfiler::SamplingThread : public Thread {
164 std::unique_ptr<NativeStackSampler> native_sampler, 167 public:
165 const SamplingParams& params, 168 struct CollectionContext {
166 const CompletedCallback& completed_callback) 169 CollectionContext(PlatformThreadId target,
167 : native_sampler_(std::move(native_sampler)), 170 const SamplingParams& params,
168 params_(params), 171 const CompletedCallback& callback,
169 stop_event_(WaitableEvent::ResetPolicy::AUTOMATIC, 172 std::unique_ptr<NativeStackSampler> sampler)
170 WaitableEvent::InitialState::NOT_SIGNALED), 173 : collection_id(next_collection_id_.GetNext()),
171 completed_callback_(completed_callback) {} 174 target(target),
172 175 params(params),
173 StackSamplingProfiler::SamplingThread::~SamplingThread() {} 176 callback(callback),
174 177 native_sampler(std::move(sampler)) {}
175 void StackSamplingProfiler::SamplingThread::ThreadMain() { 178 ~CollectionContext() {}
176 PlatformThread::SetName("Chrome_SamplingProfilerThread"); 179
177 180 // An identifier for this collection, used to uniquely identify it to
178 // For now, just ignore any requests to profile while another profiler is 181 // outside interests.
179 // working. 182 const int collection_id;
180 if (!concurrent_profiling_lock.Get().Try()) 183
184 Time next_sample_time;
185
186 PlatformThreadId target;
187 SamplingParams params;
188 CompletedCallback callback;
189
190 std::unique_ptr<NativeStackSampler> native_sampler;
191
192 // Counters that indicate the current position along the acquisition.
193 int burst = 0;
194 int sample = 0;
195
196 // The time that a profile was started, for calculating the total duration.
197 Time profile_start_time;
198
199 // The collected stack samples. The active profile is always at the back().
200 CallStackProfiles profiles;
201
202 private:
203 static StaticAtomicSequenceNumber next_collection_id_;
204 };
205
206 // Gets the single instance of this class.
207 static SamplingThread* GetInstance();
208
209 // Starts the thread.
210 void Start();
211
212 // Adds a new CollectionContext to the thread. This can be called externally
213 // from any thread. This returns an ID that can later be used to stop
214 // the sampling.
215 int Add(std::unique_ptr<CollectionContext> collection);
216
217 // Removes an active collection based on its ID, forcing it to run its
218 // callback if any data has been collected. This can be called externally
219 // from any thread.
220 void Remove(int id);
221
222 // Removes all active collections and stops the underlying thread.
223 void Shutdown();
224
225 // Begins an idle shutdown as if the idle-timer had expired.
226 void ShutdownIfIdle();
227
228 // Undoes the "permanent" effect of Shutdown() so the thread can restart.
229 void UndoShutdown();
230
231 // Sets the number of ms to wait after becoming idle before shutting down.
232 // Set to zero to disable.
233 void SetIdleShutdownTime(int shutdown_ms);
234
235 private:
236 SamplingThread();
237 ~SamplingThread() override;
238 friend struct DefaultSingletonTraits<SamplingThread>;
239
240 // Get task runner that is usable from the outside.
241 scoped_refptr<SingleThreadTaskRunner> GetOrCreateTaskRunner();
242 scoped_refptr<SingleThreadTaskRunner> GetTaskRunner();
243
244 // Get task runner that is usable from the sampling thread itself.
245 scoped_refptr<SingleThreadTaskRunner> GetTaskRunnerOnSamplingThread();
246
247 // Finishes a collection and reports collected data via callback.
248 void FinishCollection(CollectionContext* collection);
249
250 // Records a single sample of a collection.
251 void RecordSample(CollectionContext* collection);
252
253 // Check if the sampling thread is idle.
254 void CheckForIdle();
255
256 // These methods are tasks that get posted to the internal message queue.
257 void AddCollectionTask(std::unique_ptr<CollectionContext> collection_ptr);
258 void RemoveCollectionTask(int id);
259 void PerformCollectionTask(int id);
260 void ShutdownTask();
261
262 // Updates the |next_sample_time| time based on configured parameters.
263 bool UpdateNextSampleTime(CollectionContext* collection);
264
265 // Thread:
266 void CleanUp() override;
267
268 // The task-runner for the sampling thread and some information about it.
269 // This must always be accessed while holding the lock. The saved task-runner
270 // can be freely used by any calling thread.
271 scoped_refptr<SingleThreadTaskRunner> task_runner_;
272 bool task_runner_forced_shutdown_ = false;
273 int task_runner_create_requests_ = 0;
274 TimeDelta task_runner_idle_shutdown_time_ = TimeDelta::FromSeconds(5);
275 Lock task_runner_lock_;
276
277 // A map of IDs to collection contexts. Because this class is a singleton
278 // that is never destroyed, context objects will never be destructed except
279 // by explicit action. Thus, it's acceptable to pass unretained pointers
280 // to these objects when posting tasks.
281 std::map<int, std::unique_ptr<CollectionContext>> active_collections_;
282
283 DISALLOW_COPY_AND_ASSIGN(SamplingThread);
284 };
285
286 StaticAtomicSequenceNumber StackSamplingProfiler::SamplingThread::
287 CollectionContext::next_collection_id_;
288
289 StackSamplingProfiler::SamplingThread::SamplingThread()
290 : Thread("Chrome_SamplingProfilerThread") {}
291
292 StackSamplingProfiler::SamplingThread::~SamplingThread() {
293 Thread::Stop();
294 }
295
296 StackSamplingProfiler::SamplingThread*
297 StackSamplingProfiler::SamplingThread::GetInstance() {
298 return Singleton<SamplingThread, LeakySingletonTraits<SamplingThread>>::get();
299 }
300
301 void StackSamplingProfiler::SamplingThread::Start() {
302 Thread::Options options;
303 // Use a higher priority for a more accurate sampling interval.
304 options.priority = ThreadPriority::DISPLAY;
305 Thread::StartWithOptions(options);
306 }
307
308 int StackSamplingProfiler::SamplingThread::Add(
309 std::unique_ptr<CollectionContext> collection) {
310 int id = collection->collection_id;
311 scoped_refptr<SingleThreadTaskRunner> task_runner = GetOrCreateTaskRunner();
312
313 // There may be no task-runner if the sampling thread has been permanently
314 // shut down.
315 if (task_runner) {
316 task_runner->PostTask(
317 FROM_HERE, Bind(&SamplingThread::AddCollectionTask, Unretained(this),
318 Passed(&collection)));
319 }
320 return id;
321 }
322
323 void StackSamplingProfiler::SamplingThread::Remove(int id) {
324 scoped_refptr<SingleThreadTaskRunner> task_runner = GetTaskRunner();
325 if (!task_runner)
326 return; // Everything has already stopped.
327
328 // This can fail if the thread were to exit between acquisition of the task
329 // runner above and the call below. In that case, however, everything has
330 // stopped so there's no need to try to stop it.
331 task_runner->PostTask(FROM_HERE, Bind(&SamplingThread::RemoveCollectionTask,
332 Unretained(this), id));
333 }
334
335 void StackSamplingProfiler::SamplingThread::Shutdown() {
336 // Record that a shutdown has been requested so nothing can cause it to
337 // start up again.
338 {
339 AutoLock lock(task_runner_lock_);
340 task_runner_forced_shutdown_ = true;
341 }
342
343 scoped_refptr<SingleThreadTaskRunner> task_runner = GetTaskRunner();
344 if (!task_runner)
345 return; // Everything has already stopped.
346
347 // This can fail if the thread were to exit between acquisition of the task
348 // runner above and the call below. In that case, however, everything has
349 // stopped so there's no need to do anything.
350 task_runner->PostTask(FROM_HERE,
351 Bind(&SamplingThread::ShutdownTask, Unretained(this)));
352
353 // Now that a task has been posted, calling Stop() will block until that task
354 // has been executed.
355 Stop();
356 }
357
358 void StackSamplingProfiler::SamplingThread::ShutdownIfIdle() {
359 scoped_refptr<SingleThreadTaskRunner> task_runner = GetTaskRunner();
360 if (!task_runner)
361 return; // Everything has already stopped.
362
363 // ShutdownTask will check if the thread is idle and skip the shutdown if not.
364 task_runner->PostTask(FROM_HERE,
365 Bind(&SamplingThread::ShutdownTask, Unretained(this)));
366 }
367
368 void StackSamplingProfiler::SamplingThread::UndoShutdown() {
369 {
370 AutoLock lock(task_runner_lock_);
371 task_runner_forced_shutdown_ = false;
372 }
373 }
374
375 void StackSamplingProfiler::SamplingThread::SetIdleShutdownTime(
376 int shutdown_ms) {
377 AutoLock lock(task_runner_lock_);
378 task_runner_idle_shutdown_time_ = TimeDelta::FromMilliseconds(shutdown_ms);
379 }
380
381 scoped_refptr<SingleThreadTaskRunner>
382 StackSamplingProfiler::SamplingThread::GetOrCreateTaskRunner() {
383 AutoLock lock(task_runner_lock_);
384 ++task_runner_create_requests_;
385 if (!task_runner_) {
386 // If a forced shutdown has been done, don't let it restart.
387 if (task_runner_forced_shutdown_)
388 return nullptr;
389 // If this is not the first time the sampling thread has been launched, the
390 // previous instance has only been partially cleaned up. It is necessary
391 // to call Stop() before Start(). This is safe even the thread has never
392 // been started.
393 Stop();
394 // The thread is not running. Start it and get associated runner. The task-
395 // runner has to be saved for future use because though it can be used from
396 // any thread, it can be acquired via task_runner() only on the created
397 // thread and the thread that creates it (i.e. this thread).
398 Start();
399 task_runner_ = Thread::task_runner();
400 // Detach the sampling thread from the "sequence" (i.e. thread) that
401 // started it so that it can be self-managed or stopped on by another
402 // thread.
403 DetachFromSequence();
404 } else {
405 // This shouldn't be called from the sampling thread as it's inefficient.
406 // Use GetTaskRunnerOnSamplingThread() instead.
407 DCHECK_NE(GetThreadId(), PlatformThread::CurrentId());
408 }
409
410 return task_runner_;
411 }
412
413 scoped_refptr<SingleThreadTaskRunner>
414 StackSamplingProfiler::SamplingThread::GetTaskRunner() {
415 // This shouldn't be called from the sampling thread as it's inefficient. Use
416 // GetTaskRunnerOnSamplingThread() instead.
417 DCHECK_NE(GetThreadId(), PlatformThread::CurrentId());
418
419 AutoLock lock(task_runner_lock_);
420 return task_runner_;
421 }
422
423 scoped_refptr<SingleThreadTaskRunner>
424 StackSamplingProfiler::SamplingThread::GetTaskRunnerOnSamplingThread() {
425 // This should be called only from the sampling thread as it has limited
426 // accessibility.
427 DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
428
429 return Thread::task_runner();
430 }
431
432 void StackSamplingProfiler::SamplingThread::FinishCollection(
433 CollectionContext* collection) {
434 // If there is no duration for the final profile (because it was stopped),
435 // calculated it now.
436 if (!collection->profiles.empty() &&
437 collection->profiles.back().profile_duration == TimeDelta()) {
438 collection->profiles.back().profile_duration =
439 Time::Now() - collection->profile_start_time;
440 }
441
442 // Run the associated callback, passing the collected profiles. It's okay to
443 // move them because this collection is about to be deleted.
444 collection->callback.Run(std::move(collection->profiles));
445
446 // Remove this collection from the map of known ones. This must be done
447 // last as the |collection| parameter is invalid after this point.
448 size_t count = active_collections_.erase(collection->collection_id);
449 DCHECK_EQ(1U, count);
450 }
451
452 void StackSamplingProfiler::SamplingThread::RecordSample(
453 CollectionContext* collection) {
454 DCHECK(collection->native_sampler);
455
456 // If this is the first sample of a burst, a new Profile needs to be created
457 // and filled.
458 if (collection->sample == 0) {
459 collection->profiles.push_back(CallStackProfile());
460 CallStackProfile& profile = collection->profiles.back();
461 profile.sampling_period = collection->params.sampling_interval;
462 collection->profile_start_time = Time::Now();
463 collection->native_sampler->ProfileRecordingStarting(&profile.modules);
464 }
465
466 // The currently active profile being acptured.
467 CallStackProfile& profile = collection->profiles.back();
468
469 // Record a single sample.
470 profile.samples.push_back(Sample());
471 Sample& sample = profile.samples.back();
472 collection->native_sampler->RecordStackSample(&sample);
473
474 // An empty result indicates that the thread under test is gone.
Mike Wittman 2017/01/31 22:14:27 I don't think relying on SuspendThread to error ou
bcwhite 2017/02/01 14:47:29 While reuse of thread-ids is possible, I don't thi
Mike Wittman 2017/02/01 17:59:53 I think this is a serious concern, and requires a
bcwhite 2017/02/01 19:11:16 I don't understand. The StackSamplingProfiler lif
Mike Wittman 2017/02/01 20:26:51 That's true, the current interface allows an arbit
bcwhite 2017/02/01 20:37:59 If the call were made while the thread was suspend
Mike Wittman 2017/02/01 21:19:04 There would still be a race between the time the t
bcwhite 2017/02/01 22:01:47 The thread creation time would be captured during
Mike Wittman 2017/02/02 02:48:05 I am not convinced that we've enumerated and addre
bcwhite 2017/02/02 14:24:25 Undocumented? GetThreadTimes is a published and s
Mike Wittman 2017/02/02 19:22:31 Several points: 1. Running the check after the sa
bcwhite 2017/02/02 20:46:16 100ms? If the check happens immediately after the
Mike Wittman 2017/02/02 22:29:15 Are you presuming the GetThreadTimes call is made
bcwhite 2017/02/03 13:28:38 Okay, you've convinced me: If a thread were to exi
Mike Wittman 2017/02/03 17:24:15 No, sorry, I don't agree. Support for other OS's
bcwhite 2017/02/03 18:47:58 Correct. Or at least to a different CL.
Mike Wittman 2017/02/04 01:09:29 The complexity is inherent in this change: it adds
bcwhite 2017/02/04 02:07:01 Not true. I can remove completely everything I ad
Mike Wittman 2017/02/06 20:54:06 Even if the interface is updated to be the same ac
475 if (sample.frames.empty()) {
476 // Indicate that collection is complete so it stops below and finishes.
477 // The empty frame remains as an indicator during analysis that the
478 // thread exited.
479 collection->sample = collection->params.samples_per_burst - 1;
480 collection->burst = collection->params.bursts - 1;
481 }
482
483 // If this is the last sample of a burst, record the total time.
484 if (collection->sample == collection->params.samples_per_burst - 1) {
485 profile.profile_duration = Time::Now() - collection->profile_start_time;
486 collection->native_sampler->ProfileRecordingStopped();
487 }
488 }
489
490 void StackSamplingProfiler::SamplingThread::CheckForIdle() {
491 if (!active_collections_.empty())
181 return; 492 return;
182 493
183 CallStackProfiles profiles; 494 AutoLock lock(task_runner_lock_);
184 CollectProfiles(&profiles); 495 if (!task_runner_idle_shutdown_time_.is_zero()) {
185 concurrent_profiling_lock.Get().Release(); 496 GetTaskRunnerOnSamplingThread()->PostDelayedTask(
186 completed_callback_.Run(std::move(profiles)); 497 FROM_HERE, Bind(&SamplingThread::ShutdownTask, Unretained(this)),
187 } 498 task_runner_idle_shutdown_time_);
188 499 }
189 // Depending on how long the sampling takes and the length of the sampling 500 }
190 // interval, a burst of samples could take arbitrarily longer than 501
191 // samples_per_burst * sampling_interval. In this case, we (somewhat 502 void StackSamplingProfiler::SamplingThread::AddCollectionTask(
192 // arbitrarily) honor the number of samples requested rather than strictly 503 std::unique_ptr<CollectionContext> collection_ptr) {
193 // adhering to the sampling intervals. Once we have established users for the 504 // Ownership of the collection is going to be given to a map but a pointer
194 // StackSamplingProfiler and the collected data to judge, we may go the other 505 // to it will be needed later.
195 // way or make this behavior configurable. 506 CollectionContext* collection = collection_ptr.get();
196 void StackSamplingProfiler::SamplingThread::CollectProfile( 507 active_collections_.insert(
197 CallStackProfile* profile, 508 std::make_pair(collection->collection_id, std::move(collection_ptr)));
198 TimeDelta* elapsed_time, 509
199 bool* was_stopped) { 510 GetTaskRunnerOnSamplingThread()->PostDelayedTask(
200 ElapsedTimer profile_timer; 511 FROM_HERE, Bind(&SamplingThread::PerformCollectionTask, Unretained(this),
201 native_sampler_->ProfileRecordingStarting(&profile->modules); 512 collection->collection_id),
202 profile->sampling_period = params_.sampling_interval; 513 collection->params.initial_delay);
203 *was_stopped = false; 514 }
204 TimeDelta previous_elapsed_sample_time; 515
205 for (int i = 0; i < params_.samples_per_burst; ++i) { 516 void StackSamplingProfiler::SamplingThread::RemoveCollectionTask(int id) {
206 if (i != 0) { 517 auto found = active_collections_.find(id);
207 // Always wait, even if for 0 seconds, so we can observe a signal on 518 if (found == active_collections_.end())
208 // stop_event_. 519 return;
209 if (stop_event_.TimedWait( 520
210 std::max(params_.sampling_interval - previous_elapsed_sample_time, 521 FinishCollection(found->second.get());
211 TimeDelta()))) { 522 CheckForIdle();
212 *was_stopped = true; 523 }
213 break; 524
214 } 525 void StackSamplingProfiler::SamplingThread::PerformCollectionTask(int id) {
526 auto found = active_collections_.find(id);
527
528 // The task won't be found if it has been stopped.
529 if (found == active_collections_.end())
530 return;
531
532 CollectionContext* collection = found->second.get();
533
534 // Handle first-run with no "next time".
535 if (collection->next_sample_time == Time())
536 collection->next_sample_time = Time::Now();
537
538 // Do the collection of a single sample.
539 RecordSample(collection);
540
541 // Update the time of the next sample recording.
542 if (UpdateNextSampleTime(collection)) {
543 bool success = GetTaskRunnerOnSamplingThread()->PostDelayedTask(
544 FROM_HERE,
545 Bind(&SamplingThread::PerformCollectionTask, Unretained(this), id),
546 std::max(collection->next_sample_time - Time::Now(), TimeDelta()));
547 DCHECK(success);
548 } else {
549 // All capturing has completed so finish the collection. Let object expire.
550 // The |collection| variable will be invalid after this call.
551 FinishCollection(collection);
552 CheckForIdle();
553 }
554 }
555
556 void StackSamplingProfiler::SamplingThread::ShutdownTask() {
557 // Holding this lock ensures that any attempt to start another job will
558 // get postponed until StopSoon can run thus eliminating the race.
559 AutoLock lock(task_runner_lock_);
560
561 // If this is a forced, permanent shutdown, stop all active collections.
562 if (task_runner_forced_shutdown_) {
563 // FinishCollection will remove the entry thus invalidating any iterator.
564 while (!active_collections_.empty())
565 FinishCollection(active_collections_.begin()->second.get());
566 } else {
567 // If active_collections_ is not empty, something new has arrived since
568 // this task got posted. Abort the shutdown so it can be processed.
569 if (!active_collections_.empty())
570 return;
571 // It's possible that a new AddCollectionTask has been posted after this
572 // task. Reset the "create requests" counter and try again after any other
573 // pending tasks.
574 if (task_runner_create_requests_ > 0 && task_runner_) {
575 task_runner_create_requests_ = 0;
576 task_runner_->PostTask(
577 FROM_HERE, Bind(&SamplingThread::ShutdownTask, Unretained(this)));
578 return;
215 } 579 }
216 ElapsedTimer sample_timer; 580 // There can be no new AddCollectionTasks at this point because creating
217 profile->samples.push_back(Sample()); 581 // those always increments "create requests". There may be other requests,
218 native_sampler_->RecordStackSample(&profile->samples.back()); 582 // like Remove, but it's okay to schedule the thread to stop once they've
219 previous_elapsed_sample_time = sample_timer.Elapsed(); 583 // been executed (i.e. "soon").
220 } 584 }
221 585
222 *elapsed_time = profile_timer.Elapsed(); 586 // Stop the underlying thread as soon as all immediate tasks are complete.
223 profile->profile_duration = *elapsed_time; 587 // Calling Stop() directly would result in deadlock.
224 native_sampler_->ProfileRecordingStopped(); 588 StopSoon();
225 } 589
226 590 // StopSoon will have set the owning sequence (again) so it must be detached
227 // In an analogous manner to CollectProfile() and samples exceeding the expected 591 // (again) in order for Stop/Start to be called (again) should more work
228 // total sampling time, bursts may also exceed the burst_interval. We adopt the 592 // come in. Holding the |task_runner_lock_| ensures the necessary happens-
229 // same wait-and-see approach here. 593 // after with regard to this detach and future Thread API calls.
230 void StackSamplingProfiler::SamplingThread::CollectProfiles( 594 DetachFromSequence();
231 CallStackProfiles* profiles) { 595
232 if (stop_event_.TimedWait(params_.initial_delay)) 596 // Clear the task_runner_ variable so the thread will be restarted when
233 return; 597 // new work comes in.
234 598 task_runner_ = nullptr;
235 TimeDelta previous_elapsed_profile_time; 599 }
236 for (int i = 0; i < params_.bursts; ++i) { 600
237 if (i != 0) { 601 bool StackSamplingProfiler::SamplingThread::UpdateNextSampleTime(
238 // Always wait, even if for 0 seconds, so we can observe a signal on 602 CollectionContext* collection) {
239 // stop_event_. 603 if (++collection->sample < collection->params.samples_per_burst) {
240 if (stop_event_.TimedWait( 604 collection->next_sample_time += collection->params.sampling_interval;
241 std::max(params_.burst_interval - previous_elapsed_profile_time, 605 return true;
242 TimeDelta()))) 606 }
243 return; 607
244 } 608 // This will keep a consistent average interval between samples but will
245 609 // result in constant series of acquisitions, thus nearly locking out the
246 CallStackProfile profile; 610 // target thread, if the interval is smaller than the time it takes to
247 bool was_stopped = false; 611 // actually acquire the sample. Anything sampling that quickly is going
248 CollectProfile(&profile, &previous_elapsed_profile_time, &was_stopped); 612 // to be a problem anyway so don't worry about it.
249 if (!profile.samples.empty()) 613 if (++collection->burst < collection->params.bursts) {
250 profiles->push_back(std::move(profile)); 614 collection->sample = 0;
251 615 collection->next_sample_time += collection->params.burst_interval;
252 if (was_stopped) 616 return true;
253 return; 617 }
254 } 618
255 } 619 return false;
256 620 }
257 void StackSamplingProfiler::SamplingThread::Stop() { 621
258 stop_event_.Signal(); 622 void StackSamplingProfiler::SamplingThread::CleanUp() {
623 // There should be no collections remaining when the thread stops.
624 DCHECK(active_collections_.empty());
625
626 // Let the parent clean up.
627 Thread::CleanUp();
259 } 628 }
260 629
261 // StackSamplingProfiler ------------------------------------------------------ 630 // StackSamplingProfiler ------------------------------------------------------
262 631
263 subtle::Atomic32 StackSamplingProfiler::process_milestones_ = 0; 632 subtle::Atomic32 StackSamplingProfiler::process_milestones_ = 0;
264 633
265 StackSamplingProfiler::SamplingParams::SamplingParams() 634 StackSamplingProfiler::SamplingParams::SamplingParams()
266 : initial_delay(TimeDelta::FromMilliseconds(0)), 635 : initial_delay(TimeDelta::FromMilliseconds(0)),
267 bursts(1), 636 bursts(1),
268 burst_interval(TimeDelta::FromMilliseconds(10000)), 637 burst_interval(TimeDelta::FromMilliseconds(10000)),
(...skipping 11 matching lines...) Expand all
280 PlatformThreadId thread_id, 649 PlatformThreadId thread_id,
281 const SamplingParams& params, 650 const SamplingParams& params,
282 const CompletedCallback& callback, 651 const CompletedCallback& callback,
283 NativeStackSamplerTestDelegate* test_delegate) 652 NativeStackSamplerTestDelegate* test_delegate)
284 : thread_id_(thread_id), params_(params), completed_callback_(callback), 653 : thread_id_(thread_id), params_(params), completed_callback_(callback),
285 test_delegate_(test_delegate) { 654 test_delegate_(test_delegate) {
286 } 655 }
287 656
288 StackSamplingProfiler::~StackSamplingProfiler() { 657 StackSamplingProfiler::~StackSamplingProfiler() {
289 Stop(); 658 Stop();
290 if (!sampling_thread_handle_.is_null())
291 PlatformThread::Join(sampling_thread_handle_);
292 } 659 }
293 660
294 // static 661 // static
295 void StackSamplingProfiler::StartAndRunAsync( 662 void StackSamplingProfiler::StartAndRunAsync(
296 PlatformThreadId thread_id, 663 PlatformThreadId thread_id,
297 const SamplingParams& params, 664 const SamplingParams& params,
298 const CompletedCallback& callback) { 665 const CompletedCallback& callback) {
299 CHECK(ThreadTaskRunnerHandle::Get()); 666 CHECK(ThreadTaskRunnerHandle::Get());
300 AsyncRunner::Run(thread_id, params, callback); 667 AsyncRunner::Run(thread_id, params, callback);
301 } 668 }
302 669
303 void StackSamplingProfiler::Start() { 670 void StackSamplingProfiler::Start() {
304 if (completed_callback_.is_null()) 671 if (completed_callback_.is_null())
305 return; 672 return;
306 673
307 std::unique_ptr<NativeStackSampler> native_sampler = 674 std::unique_ptr<NativeStackSampler> native_sampler =
308 NativeStackSampler::Create(thread_id_, &RecordAnnotations, 675 NativeStackSampler::Create(thread_id_, &RecordAnnotations,
309 test_delegate_); 676 test_delegate_);
310 if (!native_sampler) 677 if (!native_sampler)
311 return; 678 return;
312 679
313 sampling_thread_.reset(new SamplingThread(std::move(native_sampler), params_, 680 collection_id_ = SamplingThread::GetInstance()->Add(
314 completed_callback_)); 681 MakeUnique<SamplingThread::CollectionContext>(
315 if (!PlatformThread::Create(0, sampling_thread_.get(), 682 thread_id_, params_, completed_callback_, std::move(native_sampler)));
316 &sampling_thread_handle_))
317 sampling_thread_.reset();
318 } 683 }
319 684
320 void StackSamplingProfiler::Stop() { 685 void StackSamplingProfiler::Stop() {
321 if (sampling_thread_) 686 SamplingThread::GetInstance()->Remove(collection_id_);
322 sampling_thread_->Stop();
323 } 687 }
324 688
325 // static 689 // static
690 void StackSamplingProfiler::Shutdown() {
691 SamplingThread::GetInstance()->Shutdown();
692 }
693
694 // static
695 void StackSamplingProfiler::UndoShutdownForTesting() {
696 SamplingThread::GetInstance()->UndoShutdown();
697 }
698
699 // static
700 bool StackSamplingProfiler::IsSamplingThreadRunningForTesting() {
701 return SamplingThread::GetInstance()->IsRunning();
702 }
703
704 // static
705 void StackSamplingProfiler::SetSamplingThreadIdleShutdownTimeForTesting(
706 int shutdown_ms) {
707 SamplingThread::GetInstance()->SetIdleShutdownTime(shutdown_ms);
708 }
709
710 // static
711 void StackSamplingProfiler::InitiateSamplingThreadIdleShutdownForTesting() {
712 SamplingThread::GetInstance()->ShutdownIfIdle();
713 }
714
715 // static
326 void StackSamplingProfiler::SetProcessMilestone(int milestone) { 716 void StackSamplingProfiler::SetProcessMilestone(int milestone) {
327 DCHECK_LE(0, milestone); 717 DCHECK_LE(0, milestone);
328 DCHECK_GT(static_cast<int>(sizeof(process_milestones_) * 8), milestone); 718 DCHECK_GT(static_cast<int>(sizeof(process_milestones_) * 8), milestone);
329 DCHECK_EQ(0, subtle::NoBarrier_Load(&process_milestones_) & (1 << milestone)); 719 DCHECK_EQ(0, subtle::NoBarrier_Load(&process_milestones_) & (1 << milestone));
330 ChangeAtomicFlags(&process_milestones_, 1 << milestone, 0); 720 ChangeAtomicFlags(&process_milestones_, 1 << milestone, 0);
331 } 721 }
332 722
333 // static 723 // static
334 void StackSamplingProfiler::ResetAnnotationsForTesting() { 724 void StackSamplingProfiler::ResetAnnotationsForTesting() {
335 subtle::NoBarrier_Store(&process_milestones_, 0u); 725 subtle::NoBarrier_Store(&process_milestones_, 0u);
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
378 } 768 }
379 769
380 bool operator<(const StackSamplingProfiler::Frame &a, 770 bool operator<(const StackSamplingProfiler::Frame &a,
381 const StackSamplingProfiler::Frame &b) { 771 const StackSamplingProfiler::Frame &b) {
382 return (a.module_index < b.module_index) || 772 return (a.module_index < b.module_index) ||
383 (a.module_index == b.module_index && 773 (a.module_index == b.module_index &&
384 a.instruction_pointer < b.instruction_pointer); 774 a.instruction_pointer < b.instruction_pointer);
385 } 775 }
386 776
387 } // namespace base 777 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698