Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/profiler/stack_sampling_profiler.h" | 5 #include "base/profiler/stack_sampling_profiler.h" |
| 6 | 6 |
| 7 #include <algorithm> | 7 #include <algorithm> |
| 8 #include <map> | |
| 8 #include <utility> | 9 #include <utility> |
| 9 | 10 |
| 11 #include "base/atomic_sequence_num.h" | |
| 12 #include "base/atomicops.h" | |
| 10 #include "base/bind.h" | 13 #include "base/bind.h" |
| 11 #include "base/bind_helpers.h" | 14 #include "base/bind_helpers.h" |
| 12 #include "base/callback.h" | 15 #include "base/callback.h" |
| 13 #include "base/lazy_instance.h" | 16 #include "base/lazy_instance.h" |
| 14 #include "base/location.h" | 17 #include "base/location.h" |
| 15 #include "base/macros.h" | 18 #include "base/macros.h" |
| 19 #include "base/memory/ptr_util.h" | |
| 20 #include "base/memory/singleton.h" | |
| 16 #include "base/profiler/native_stack_sampler.h" | 21 #include "base/profiler/native_stack_sampler.h" |
| 17 #include "base/synchronization/lock.h" | 22 #include "base/synchronization/lock.h" |
| 23 #include "base/threading/thread.h" | |
| 24 #include "base/threading/thread_restrictions.h" | |
| 18 #include "base/threading/thread_task_runner_handle.h" | 25 #include "base/threading/thread_task_runner_handle.h" |
| 19 #include "base/timer/elapsed_timer.h" | 26 #include "base/timer/elapsed_timer.h" |
| 20 | 27 |
| 21 namespace base { | 28 namespace base { |
| 22 | 29 |
| 23 namespace { | 30 namespace { |
| 24 | 31 |
| 25 // Used to ensure only one profiler is running at a time. | 32 // This value is used when there is no collection in progress and thus no ID |
| 26 LazyInstance<Lock>::Leaky concurrent_profiling_lock = LAZY_INSTANCE_INITIALIZER; | 33 // for referencing the active collection to the SamplingThread. |
| 27 | 34 const int NULL_COLLECTION_ID = -1; |
| 28 // AsyncRunner ---------------------------------------------------------------- | |
| 29 | |
| 30 // Helper class to allow a profiler to be run completely asynchronously from the | |
| 31 // initiator, without being concerned with the profiler's lifetime. | |
| 32 class AsyncRunner { | |
| 33 public: | |
| 34 // Sets up a profiler and arranges for it to be deleted on its completed | |
| 35 // callback. | |
| 36 static void Run(PlatformThreadId thread_id, | |
| 37 const StackSamplingProfiler::SamplingParams& params, | |
| 38 const StackSamplingProfiler::CompletedCallback& callback); | |
| 39 | |
| 40 private: | |
| 41 AsyncRunner(); | |
| 42 | |
| 43 // Runs the callback and deletes the AsyncRunner instance. |profiles| is not | |
| 44 // const& because it must be passed with std::move. | |
| 45 static void RunCallbackAndDeleteInstance( | |
| 46 std::unique_ptr<AsyncRunner> object_to_be_deleted, | |
| 47 const StackSamplingProfiler::CompletedCallback& callback, | |
| 48 scoped_refptr<SingleThreadTaskRunner> task_runner, | |
| 49 StackSamplingProfiler::CallStackProfiles profiles); | |
| 50 | |
| 51 std::unique_ptr<StackSamplingProfiler> profiler_; | |
| 52 | |
| 53 DISALLOW_COPY_AND_ASSIGN(AsyncRunner); | |
| 54 }; | |
| 55 | |
| 56 // static | |
| 57 void AsyncRunner::Run( | |
| 58 PlatformThreadId thread_id, | |
| 59 const StackSamplingProfiler::SamplingParams& params, | |
| 60 const StackSamplingProfiler::CompletedCallback &callback) { | |
| 61 std::unique_ptr<AsyncRunner> runner(new AsyncRunner); | |
| 62 AsyncRunner* temp_ptr = runner.get(); | |
| 63 temp_ptr->profiler_.reset( | |
| 64 new StackSamplingProfiler(thread_id, params, | |
| 65 Bind(&AsyncRunner::RunCallbackAndDeleteInstance, | |
| 66 Passed(&runner), callback, | |
| 67 ThreadTaskRunnerHandle::Get()))); | |
| 68 // The callback won't be called until after Start(), so temp_ptr will still | |
| 69 // be valid here. | |
| 70 temp_ptr->profiler_->Start(); | |
| 71 } | |
| 72 | |
| 73 AsyncRunner::AsyncRunner() {} | |
| 74 | |
| 75 void AsyncRunner::RunCallbackAndDeleteInstance( | |
| 76 std::unique_ptr<AsyncRunner> object_to_be_deleted, | |
| 77 const StackSamplingProfiler::CompletedCallback& callback, | |
| 78 scoped_refptr<SingleThreadTaskRunner> task_runner, | |
| 79 StackSamplingProfiler::CallStackProfiles profiles) { | |
| 80 callback.Run(std::move(profiles)); | |
| 81 // Delete the instance on the original calling thread. | |
| 82 task_runner->DeleteSoon(FROM_HERE, object_to_be_deleted.release()); | |
| 83 } | |
| 84 | 35 |
| 85 void ChangeAtomicFlags(subtle::Atomic32* flags, | 36 void ChangeAtomicFlags(subtle::Atomic32* flags, |
| 86 subtle::Atomic32 set, | 37 subtle::Atomic32 set, |
| 87 subtle::Atomic32 clear) { | 38 subtle::Atomic32 clear) { |
| 88 DCHECK(set != 0 || clear != 0); | 39 DCHECK(set != 0 || clear != 0); |
| 89 DCHECK_EQ(0, set & clear); | 40 DCHECK_EQ(0, set & clear); |
| 90 | 41 |
| 91 subtle::Atomic32 bits = subtle::NoBarrier_Load(flags); | 42 subtle::Atomic32 bits = subtle::NoBarrier_Load(flags); |
| 92 while (true) { | 43 while (true) { |
| 93 subtle::Atomic32 existing = | 44 subtle::Atomic32 existing = |
| (...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 153 StackSamplingProfiler::CallStackProfile | 104 StackSamplingProfiler::CallStackProfile |
| 154 StackSamplingProfiler::CallStackProfile::CopyForTesting() const { | 105 StackSamplingProfiler::CallStackProfile::CopyForTesting() const { |
| 155 return CallStackProfile(*this); | 106 return CallStackProfile(*this); |
| 156 } | 107 } |
| 157 | 108 |
| 158 StackSamplingProfiler::CallStackProfile::CallStackProfile( | 109 StackSamplingProfiler::CallStackProfile::CallStackProfile( |
| 159 const CallStackProfile& other) = default; | 110 const CallStackProfile& other) = default; |
| 160 | 111 |
| 161 // StackSamplingProfiler::SamplingThread -------------------------------------- | 112 // StackSamplingProfiler::SamplingThread -------------------------------------- |
| 162 | 113 |
| 163 StackSamplingProfiler::SamplingThread::SamplingThread( | 114 class StackSamplingProfiler::SamplingThread : public Thread { |
|
gab
2017/04/05 20:38:42
This is big enough to warrant its own file and uni
bcwhite
2017/04/06 18:40:18
Acknowledged.
| |
| 164 std::unique_ptr<NativeStackSampler> native_sampler, | 115 public: |
| 116 class TestAPI { | |
| 117 public: | |
| 118 // Reset the existing sampler. This will unfortunately create the object | |
| 119 // unnecessarily if it doesn't already exist but there's no way around that. | |
| 120 static void Reset(); | |
| 121 | |
| 122 // Disables inherent idle-shutdown behavior. | |
| 123 static void DisableIdleShutdown(); | |
| 124 | |
| 125 // Begins an idle shutdown as if the idle-timer had expired and wait for | |
| 126 // it to execute. Since the timer would have only been started at a time | |
| 127 // when the sampling thread actually was idle, this must be called only | |
| 128 // when it is known that there are no active sampling threads. If | |
| 129 // |simulate_intervening_add| is true then, when executed, the shutdown | |
| 130 // task will believe that a new collection has been added since it was | |
| 131 // posted. | |
| 132 static void ShutdownAssumingIdle(bool simulate_intervening_add); | |
| 133 | |
| 134 private: | |
| 135 // Calls the sampling threads ShutdownTask and then signals an event. | |
| 136 static void ShutdownTaskAndSignalEvent(SamplingThread* sampler, | |
| 137 int add_events, | |
| 138 WaitableEvent* event); | |
| 139 }; | |
| 140 | |
| 141 struct CollectionContext { | |
| 142 CollectionContext(PlatformThreadId target, | |
| 143 const SamplingParams& params, | |
| 144 const CompletedCallback& callback, | |
| 145 WaitableEvent* finished, | |
| 146 std::unique_ptr<NativeStackSampler> sampler) | |
| 147 : collection_id(next_collection_id_.GetNext()), | |
| 148 target(target), | |
| 149 params(params), | |
| 150 callback(callback), | |
| 151 finished(finished), | |
| 152 native_sampler(std::move(sampler)) {} | |
| 153 ~CollectionContext() {} | |
| 154 | |
| 155 // An identifier for this collection, used to uniquely identify it to | |
| 156 // outside interests. | |
| 157 const int collection_id; | |
| 158 | |
| 159 const PlatformThreadId target; // ID of The thread being sampled. | |
| 160 const SamplingParams params; // Information about how to sample. | |
| 161 const CompletedCallback callback; // Callback made when sampling complete. | |
| 162 WaitableEvent* const finished; // Signaled when all sampling complete. | |
| 163 | |
| 164 // Platform-specific module that does the actual sampling. | |
| 165 std::unique_ptr<NativeStackSampler> native_sampler; | |
| 166 | |
| 167 // The absolute time for the next sample. | |
| 168 Time next_sample_time; | |
| 169 | |
| 170 // The time that a profile was started, for calculating the total duration. | |
| 171 Time profile_start_time; | |
| 172 | |
| 173 // Counters that indicate the current position along the acquisition. | |
| 174 int burst = 0; | |
| 175 int sample = 0; | |
| 176 | |
| 177 // The collected stack samples. The active profile is always at the back(). | |
| 178 CallStackProfiles profiles; | |
| 179 | |
| 180 private: | |
| 181 static StaticAtomicSequenceNumber next_collection_id_; | |
| 182 }; | |
| 183 | |
| 184 // Gets the single instance of this class. | |
| 185 static SamplingThread* GetInstance(); | |
| 186 | |
| 187 // Starts the thread. | |
| 188 void Start(); | |
| 189 | |
| 190 // Adds a new CollectionContext to the thread. This can be called externally | |
| 191 // from any thread. This returns an ID that can later be used to stop | |
| 192 // the sampling. | |
| 193 int Add(std::unique_ptr<CollectionContext> collection); | |
| 194 | |
| 195 // Removes an active collection based on its ID, forcing it to run its | |
| 196 // callback if any data has been collected. This can be called externally | |
| 197 // from any thread. | |
| 198 void Remove(int id); | |
| 199 | |
| 200 private: | |
| 201 friend class TestAPI; | |
| 202 friend struct DefaultSingletonTraits<SamplingThread>; | |
| 203 | |
| 204 // The different states in which the sampling-thread can be. | |
| 205 enum ThreadExecutionState { | |
| 206 // The thread is not running because it has never been started. It will be | |
| 207 // started when a sampling request is received. | |
| 208 NOT_STARTED, | |
| 209 | |
| 210 // The thread is running and processing tasks. This is the state when any | |
| 211 // sampling requests are active and during the "idle" period afterward | |
| 212 // before the thread is stopped. | |
| 213 RUNNING, | |
| 214 | |
| 215 // Once all sampling requests have finished and the "idle" period has | |
| 216 // expired, the thread will be set to this state and its shutdown | |
| 217 // initiated. A call to Stop() must be made to ensure the previous thread | |
| 218 // has completely exited before calling Start() and moving back to the | |
| 219 // RUNNING state. | |
| 220 EXITING, | |
| 221 }; | |
| 222 | |
| 223 SamplingThread(); | |
| 224 ~SamplingThread() override; | |
| 225 | |
| 226 // Get task runner that is usable from the outside. | |
| 227 scoped_refptr<SingleThreadTaskRunner> GetOrCreateTaskRunnerForAdd(); | |
| 228 scoped_refptr<SingleThreadTaskRunner> GetTaskRunner( | |
| 229 ThreadExecutionState* out_state); | |
| 230 | |
| 231 // Get task runner that is usable from the sampling thread itself. | |
| 232 scoped_refptr<SingleThreadTaskRunner> GetTaskRunnerOnSamplingThread(); | |
| 233 | |
| 234 // Finishes a collection and reports collected data via callback. | |
| 235 void FinishCollection(CollectionContext* collection); | |
| 236 | |
| 237 // Records a single sample of a collection. | |
| 238 void RecordSample(CollectionContext* collection); | |
| 239 | |
| 240 // Check if the sampling thread is idle and begin a shutdown if so. | |
|
gab
2017/04/05 20:38:42
"begin a shutdown if so" sounds weird to me
bcwhite
2017/04/06 18:40:18
Done.
| |
| 241 void ScheduleShutdownIfIdle(); | |
| 242 | |
| 243 // These methods are tasks that get posted to the internal message queue. | |
| 244 void AddCollectionTask(std::unique_ptr<CollectionContext> collection); | |
| 245 void RemoveCollectionTask(int id); | |
| 246 void PerformCollectionTask(int id); | |
| 247 void ShutdownTask(int add_events); | |
| 248 | |
| 249 // Updates the |next_sample_time| time based on configured parameters. | |
| 250 // Returns true if there is a next sample or false if sampling is complete. | |
| 251 bool UpdateNextSampleTime(CollectionContext* collection); | |
| 252 | |
| 253 // Thread: | |
| 254 void CleanUp() override; | |
| 255 | |
| 256 // A map of IDs to collection contexts. Because this class is a singleton | |
| 257 // that is never destroyed, context objects will never be destructed except | |
| 258 // by explicit action. Thus, it's acceptable to pass unretained pointers | |
| 259 // to these objects when posting tasks. | |
| 260 std::map<int, std::unique_ptr<CollectionContext>> active_collections_; | |
| 261 | |
| 262 // State maintained about the current execution (or non-execution) of | |
| 263 // the thread. This state must always be accessed while holding the | |
| 264 // lock. A copy of the task-runner is maintained here for use by any | |
| 265 // calling thread; this is necessary because Thread's accessor for it is | |
| 266 // not itself thread-safe. The lock is also used to order calls to the | |
| 267 // Thread API (Start, Stop, StopSoon, & DetachFromSequence) so that | |
| 268 // multiple threads may make those calls. | |
| 269 Lock thread_execution_state_lock_; // Protects all thread_execution_state_* | |
| 270 ThreadExecutionState thread_execution_state_ = NOT_STARTED; | |
| 271 scoped_refptr<SingleThreadTaskRunner> thread_execution_state_task_runner_; | |
| 272 bool thread_execution_state_disable_idle_shutdown_for_testing_ = false; | |
| 273 | |
| 274 // A counter that notes adds of new collection requests. It is incremented | |
| 275 // when changes occur so that delayed shutdown tasks are able to detect if | |
| 276 // samething new has happened while it was waiting. Like all "execution_state" | |
| 277 // vars, this must be accessed while holding |thread_execution_state_lock_|. | |
| 278 int thread_execution_state_add_events_ = 0; | |
| 279 | |
| 280 DISALLOW_COPY_AND_ASSIGN(SamplingThread); | |
| 281 }; | |
| 282 | |
| 283 // static | |
| 284 void StackSamplingProfiler::SamplingThread::TestAPI::Reset() { | |
| 285 SamplingThread* sampler = SamplingThread::GetInstance(); | |
| 286 | |
| 287 ThreadExecutionState state; | |
| 288 { | |
| 289 AutoLock lock(sampler->thread_execution_state_lock_); | |
| 290 state = sampler->thread_execution_state_; | |
| 291 CHECK(sampler->active_collections_.empty()); | |
|
brettw
2017/04/03 21:42:51
Can this and the two non-debug assertions 2 functi
bcwhite
2017/04/04 12:59:31
I had them DCHECK originally but was told to conve
Alexei Svitkine (slow)
2017/04/04 15:44:26
If they're only called from tests, then what's the
bcwhite
2017/04/04 15:54:49
No problem. The question was whether there was an
Mike Wittman
2017/04/04 17:59:51
I did a spot check of several TestAPI definitions
gab
2017/04/05 20:38:42
Yes, this should be DCHECK, don't CHECK just to fo
bcwhite
2017/04/06 16:18:49
Mike, you asked for them to be CHECK and you're th
Mike Wittman
2017/04/06 16:55:41
Following the guidance from Brett and Gab is OK wi
bcwhite
2017/04/06 18:40:18
Done.
| |
| 292 } | |
| 293 | |
| 294 // Stop the thread and wait for it to exit. This has to be done through by | |
| 295 // the thread itself because it has taken ownership of its own lifetime. | |
| 296 if (state == RUNNING) { | |
| 297 ShutdownAssumingIdle(false); | |
| 298 state = EXITING; | |
| 299 } | |
| 300 // Make sure thread is cleaned up since state will be reset to NOT_STARTED. | |
| 301 if (state == EXITING) | |
| 302 sampler->Stop(); | |
| 303 | |
| 304 // Reset internal variables to the just-initialized state. | |
| 305 { | |
| 306 AutoLock lock(sampler->thread_execution_state_lock_); | |
| 307 sampler->thread_execution_state_ = NOT_STARTED; | |
| 308 sampler->thread_execution_state_task_runner_ = nullptr; | |
| 309 sampler->thread_execution_state_disable_idle_shutdown_for_testing_ = false; | |
| 310 sampler->thread_execution_state_add_events_ = 0; | |
| 311 } | |
| 312 } | |
| 313 | |
| 314 // static | |
| 315 void StackSamplingProfiler::SamplingThread::TestAPI::DisableIdleShutdown() { | |
| 316 SamplingThread* sampler = SamplingThread::GetInstance(); | |
| 317 | |
| 318 { | |
| 319 AutoLock lock(sampler->thread_execution_state_lock_); | |
| 320 sampler->thread_execution_state_disable_idle_shutdown_for_testing_ = true; | |
| 321 } | |
| 322 } | |
| 323 | |
| 324 // static | |
| 325 void StackSamplingProfiler::SamplingThread::TestAPI::ShutdownAssumingIdle( | |
| 326 bool simulate_intervening_add) { | |
| 327 SamplingThread* sampler = SamplingThread::GetInstance(); | |
| 328 | |
| 329 ThreadExecutionState state; | |
| 330 scoped_refptr<SingleThreadTaskRunner> task_runner = | |
| 331 sampler->GetTaskRunner(&state); | |
| 332 CHECK_EQ(RUNNING, state); | |
| 333 CHECK(task_runner); | |
| 334 | |
| 335 int add_events; | |
| 336 { | |
| 337 AutoLock lock(sampler->thread_execution_state_lock_); | |
| 338 add_events = sampler->thread_execution_state_add_events_; | |
| 339 if (simulate_intervening_add) | |
| 340 ++sampler->thread_execution_state_add_events_; | |
| 341 } | |
| 342 | |
| 343 WaitableEvent executed(WaitableEvent::ResetPolicy::MANUAL, | |
| 344 WaitableEvent::InitialState::NOT_SIGNALED); | |
| 345 // PostTaskAndReply won't work because thread and associated message-loop may | |
| 346 // be shut down. | |
| 347 task_runner->PostTask(FROM_HERE, | |
| 348 Bind(&ShutdownTaskAndSignalEvent, Unretained(sampler), | |
| 349 add_events, Unretained(&executed))); | |
| 350 executed.Wait(); | |
| 351 } | |
| 352 | |
| 353 // static | |
| 354 void StackSamplingProfiler::SamplingThread::TestAPI::ShutdownTaskAndSignalEvent( | |
|
gab
2017/04/05 20:38:42
You can just post two tasks in a row instead of a
bcwhite
2017/04/06 18:40:18
Wouldn't two successive posts create a race-condit
gab
2017/04/06 19:31:28
Ah, good point, yes. Hadn't realized ShutdownTask(
| |
| 355 SamplingThread* sampler, | |
| 356 int add_events, | |
| 357 WaitableEvent* event) { | |
| 358 sampler->ShutdownTask(add_events); | |
| 359 event->Signal(); | |
| 360 } | |
| 361 | |
| 362 StaticAtomicSequenceNumber StackSamplingProfiler::SamplingThread:: | |
| 363 CollectionContext::next_collection_id_; | |
| 364 | |
| 365 StackSamplingProfiler::SamplingThread::SamplingThread() | |
| 366 : Thread("Chrome_SamplingProfilerThread") {} | |
|
gab
2017/04/05 20:38:42
No need to prefix with "Chrome_", the thread names
bcwhite
2017/04/06 18:40:18
Done.
| |
| 367 | |
| 368 StackSamplingProfiler::SamplingThread::~SamplingThread() { | |
| 369 Stop(); | |
|
gab
2017/04/05 20:38:42
Not necessary, ~Thread() does this already so = de
bcwhite
2017/04/06 18:40:18
Done.
| |
| 370 } | |
| 371 | |
| 372 StackSamplingProfiler::SamplingThread* | |
| 373 StackSamplingProfiler::SamplingThread::GetInstance() { | |
| 374 return Singleton<SamplingThread, LeakySingletonTraits<SamplingThread>>::get(); | |
| 375 } | |
| 376 | |
| 377 void StackSamplingProfiler::SamplingThread::Start() { | |
| 378 Thread::Options options; | |
| 379 // Use a higher priority for a more accurate sampling interval. | |
| 380 options.priority = ThreadPriority::DISPLAY; | |
|
gab
2017/04/05 20:38:42
Hmmm I don't think that's appropriate. On Android
bcwhite
2017/04/06 18:40:18
Right.
| |
| 381 Thread::StartWithOptions(options); | |
| 382 } | |
| 383 | |
| 384 int StackSamplingProfiler::SamplingThread::Add( | |
| 385 std::unique_ptr<CollectionContext> collection) { | |
| 386 int id = collection->collection_id; | |
| 387 scoped_refptr<SingleThreadTaskRunner> task_runner = | |
| 388 GetOrCreateTaskRunnerForAdd(); | |
| 389 | |
| 390 task_runner->PostTask(FROM_HERE, Bind(&SamplingThread::AddCollectionTask, | |
| 391 Unretained(this), Passed(&collection))); | |
| 392 | |
| 393 return id; | |
| 394 } | |
| 395 | |
| 396 void StackSamplingProfiler::SamplingThread::Remove(int id) { | |
| 397 ThreadExecutionState state; | |
| 398 scoped_refptr<SingleThreadTaskRunner> task_runner = GetTaskRunner(&state); | |
| 399 if (state != RUNNING) | |
| 400 return; | |
| 401 DCHECK(task_runner); | |
| 402 | |
| 403 // This can fail if the thread were to exit between acquisition of the task | |
| 404 // runner above and the call below. In that case, however, everything has | |
| 405 // stopped so there's no need to try to stop it. | |
| 406 task_runner->PostTask(FROM_HERE, Bind(&SamplingThread::RemoveCollectionTask, | |
| 407 Unretained(this), id)); | |
| 408 } | |
| 409 | |
| 410 scoped_refptr<SingleThreadTaskRunner> | |
| 411 StackSamplingProfiler::SamplingThread::GetOrCreateTaskRunnerForAdd() { | |
| 412 AutoLock lock(thread_execution_state_lock_); | |
| 413 | |
| 414 // The increment of the "add events" count is why this method is to be only | |
| 415 // called from "add". | |
| 416 ++thread_execution_state_add_events_; | |
| 417 | |
| 418 if (thread_execution_state_ == RUNNING) { | |
| 419 DCHECK(thread_execution_state_task_runner_); | |
| 420 // This shouldn't be called from the sampling thread as it's inefficient. | |
| 421 // Use GetTaskRunnerOnSamplingThread() instead. | |
| 422 DCHECK_NE(GetThreadId(), PlatformThread::CurrentId()); | |
| 423 return thread_execution_state_task_runner_; | |
| 424 } | |
| 425 | |
| 426 if (thread_execution_state_ == EXITING) { | |
| 427 // The previous instance has only been partially cleaned up. It is necessary | |
| 428 // to call Stop() before Start(). | |
| 429 Stop(); | |
| 430 } | |
| 431 | |
| 432 // The thread is not running. Start it and get associated runner. The task- | |
| 433 // runner has to be saved for future use because though it can be used from | |
| 434 // any thread, it can be acquired via task_runner() only on the created | |
| 435 // thread and the thread that creates it (i.e. this thread). | |
|
gab
2017/04/05 20:38:42
Add " for thread-safety reasons which are alleviat
bcwhite
2017/04/06 18:40:18
Done.
| |
| 436 Start(); | |
| 437 thread_execution_state_ = RUNNING; | |
| 438 thread_execution_state_task_runner_ = Thread::task_runner(); | |
| 439 | |
| 440 // Detach the sampling thread from the "sequence" (i.e. thread) that | |
| 441 // started it so that it can be self-managed or stopped by another thread. | |
| 442 DetachFromSequence(); | |
| 443 | |
| 444 return thread_execution_state_task_runner_; | |
| 445 } | |
| 446 | |
| 447 scoped_refptr<SingleThreadTaskRunner> | |
| 448 StackSamplingProfiler::SamplingThread::GetTaskRunner( | |
| 449 ThreadExecutionState* out_state) { | |
| 450 AutoLock lock(thread_execution_state_lock_); | |
| 451 if (out_state) | |
| 452 *out_state = thread_execution_state_; | |
| 453 if (thread_execution_state_ == RUNNING) { | |
| 454 // This shouldn't be called from the sampling thread as it's inefficient. | |
| 455 // Use GetTaskRunnerOnSamplingThread() instead. | |
| 456 DCHECK_NE(GetThreadId(), PlatformThread::CurrentId()); | |
| 457 DCHECK(thread_execution_state_task_runner_); | |
| 458 } else { | |
| 459 DCHECK(!thread_execution_state_task_runner_); | |
| 460 } | |
| 461 | |
| 462 return thread_execution_state_task_runner_; | |
| 463 } | |
| 464 | |
| 465 scoped_refptr<SingleThreadTaskRunner> | |
| 466 StackSamplingProfiler::SamplingThread::GetTaskRunnerOnSamplingThread() { | |
| 467 // This should be called only from the sampling thread as it has limited | |
| 468 // accessibility. | |
| 469 DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId()); | |
| 470 | |
| 471 return Thread::task_runner(); | |
| 472 } | |
| 473 | |
| 474 void StackSamplingProfiler::SamplingThread::FinishCollection( | |
| 475 CollectionContext* collection) { | |
|
gab
2017/04/05 20:38:42
Add DCHECK_EQ(GetThreadId(), PlatformThread::Curre
bcwhite
2017/04/06 18:40:18
Done.
| |
| 476 // If there is no duration for the final profile (because it was stopped), | |
| 477 // calculate it now. | |
| 478 if (!collection->profiles.empty() && | |
| 479 collection->profiles.back().profile_duration == TimeDelta()) { | |
| 480 collection->profiles.back().profile_duration = | |
| 481 Time::Now() - collection->profile_start_time; | |
| 482 } | |
| 483 | |
| 484 // Extract some information so callback and event-signalling can still be | |
| 485 // done after the collection has been removed from the list of "active" ones. | |
| 486 // This allows the the controlling object (and tests using it) to be confident | |
| 487 // that collection is fully finished when those things occur. | |
| 488 const CompletedCallback callback = collection->callback; | |
| 489 CallStackProfiles profiles = std::move(collection->profiles); | |
| 490 WaitableEvent* finished = collection->finished; | |
| 491 | |
| 492 // Remove this collection from the map of known ones. The |collection| | |
| 493 // parameter is invalid after this point. | |
| 494 size_t count = active_collections_.erase(collection->collection_id); | |
| 495 DCHECK_EQ(1U, count); | |
| 496 | |
| 497 // Run the associated callback, passing the collected profiles. | |
| 498 callback.Run(std::move(profiles)); | |
| 499 | |
| 500 // Signal that this collection is finished. | |
| 501 finished->Signal(); | |
| 502 } | |
| 503 | |
| 504 void StackSamplingProfiler::SamplingThread::RecordSample( | |
| 505 CollectionContext* collection) { | |
| 506 DCHECK(collection->native_sampler); | |
| 507 | |
| 508 // If this is the first sample of a burst, a new Profile needs to be created | |
| 509 // and filled. | |
| 510 if (collection->sample == 0) { | |
| 511 collection->profiles.push_back(CallStackProfile()); | |
| 512 CallStackProfile& profile = collection->profiles.back(); | |
| 513 profile.sampling_period = collection->params.sampling_interval; | |
| 514 collection->profile_start_time = Time::Now(); | |
| 515 collection->native_sampler->ProfileRecordingStarting(&profile.modules); | |
| 516 } | |
| 517 | |
| 518 // The currently active profile being captured. | |
| 519 CallStackProfile& profile = collection->profiles.back(); | |
| 520 | |
| 521 // Record a single sample. | |
| 522 profile.samples.push_back(Sample()); | |
| 523 collection->native_sampler->RecordStackSample(&profile.samples.back()); | |
| 524 | |
| 525 // If this is the last sample of a burst, record the total time. | |
| 526 if (collection->sample == collection->params.samples_per_burst - 1) { | |
| 527 profile.profile_duration = Time::Now() - collection->profile_start_time; | |
| 528 collection->native_sampler->ProfileRecordingStopped(); | |
| 529 } | |
| 530 } | |
| 531 | |
| 532 void StackSamplingProfiler::SamplingThread::ScheduleShutdownIfIdle() { | |
| 533 if (!active_collections_.empty()) | |
| 534 return; | |
| 535 | |
| 536 int add_events; | |
| 537 { | |
| 538 AutoLock lock(thread_execution_state_lock_); | |
| 539 if (thread_execution_state_disable_idle_shutdown_for_testing_) | |
| 540 return; | |
| 541 add_events = thread_execution_state_add_events_; | |
| 542 } | |
| 543 | |
| 544 GetTaskRunnerOnSamplingThread()->PostDelayedTask( | |
| 545 FROM_HERE, | |
| 546 Bind(&SamplingThread::ShutdownTask, Unretained(this), add_events), | |
| 547 TimeDelta::FromSeconds(60)); | |
| 548 } | |
| 549 | |
| 550 void StackSamplingProfiler::SamplingThread::AddCollectionTask( | |
| 551 std::unique_ptr<CollectionContext> collection) { | |
| 552 const int collection_id = collection->collection_id; | |
| 553 const TimeDelta initial_delay = collection->params.initial_delay; | |
| 554 | |
| 555 active_collections_.insert( | |
| 556 std::make_pair(collection_id, std::move(collection))); | |
| 557 | |
| 558 GetTaskRunnerOnSamplingThread()->PostDelayedTask( | |
| 559 FROM_HERE, | |
| 560 Bind(&SamplingThread::PerformCollectionTask, Unretained(this), | |
| 561 collection_id), | |
| 562 initial_delay); | |
| 563 | |
| 564 // Another increment of "add events" serves to invalidate any pending | |
| 565 // shutdown tasks that may have been initiated between the Add() and this | |
| 566 // task running. | |
| 567 { | |
| 568 AutoLock lock(thread_execution_state_lock_); | |
| 569 ++thread_execution_state_add_events_; | |
| 570 } | |
| 571 } | |
| 572 | |
| 573 void StackSamplingProfiler::SamplingThread::RemoveCollectionTask(int id) { | |
| 574 auto found = active_collections_.find(id); | |
| 575 if (found == active_collections_.end()) | |
| 576 return; | |
| 577 | |
| 578 FinishCollection(found->second.get()); | |
| 579 ScheduleShutdownIfIdle(); | |
| 580 } | |
| 581 | |
| 582 void StackSamplingProfiler::SamplingThread::PerformCollectionTask(int id) { | |
| 583 auto found = active_collections_.find(id); | |
| 584 | |
| 585 // The task won't be found if it has been stopped. | |
| 586 if (found == active_collections_.end()) | |
| 587 return; | |
| 588 | |
| 589 CollectionContext* collection = found->second.get(); | |
| 590 | |
| 591 // Handle first-run with no "next time". | |
| 592 if (collection->next_sample_time == Time()) | |
| 593 collection->next_sample_time = Time::Now(); | |
| 594 | |
| 595 // Do the collection of a single sample. | |
| 596 RecordSample(collection); | |
| 597 | |
| 598 // Update the time of the next sample recording. | |
| 599 if (UpdateNextSampleTime(collection)) { | |
| 600 bool success = GetTaskRunnerOnSamplingThread()->PostDelayedTask( | |
| 601 FROM_HERE, | |
| 602 Bind(&SamplingThread::PerformCollectionTask, Unretained(this), id), | |
| 603 std::max(collection->next_sample_time - Time::Now(), TimeDelta())); | |
|
gab
2017/04/05 20:38:42
This isn't required I think, negative delays shoul
bcwhite
2017/04/06 18:40:18
There is a DCHECK in incoming_task_queue.cc (line
gab
2017/04/06 19:31:28
Ah ok interesting, probably an artifact but std::m
| |
| 604 DCHECK(success); | |
| 605 } else { | |
| 606 // All capturing has completed so finish the collection. By not re-adding | |
| 607 // it to the task queue, the collection will "expire" (i.e. no further work | |
| 608 // will be done). The |collection| variable will be invalid after this call. | |
| 609 FinishCollection(collection); | |
| 610 ScheduleShutdownIfIdle(); | |
| 611 } | |
| 612 } | |
| 613 | |
| 614 void StackSamplingProfiler::SamplingThread::ShutdownTask(int add_events) { | |
| 615 // Holding this lock ensures that any attempt to start another job will | |
| 616 // get postponed until thread_execution_state_ is updated, thus eliminating | |
|
gab
2017/04/05 20:38:42
|thread_execution_state_|
bcwhite
2017/04/06 18:40:18
Done.
| |
| 617 // the race in starting a new thread while the previous one is exiting. | |
| 618 AutoLock lock(thread_execution_state_lock_); | |
| 619 | |
| 620 // If the current count of creation requests doesn't match the passed count | |
| 621 // then other tasks have been created since this was posted. Abort shutdown. | |
| 622 if (thread_execution_state_add_events_ != add_events) | |
| 623 return; | |
| 624 | |
| 625 // There can be no new AddCollectionTasks at this point because creating | |
| 626 // those always increments "add events". There may be other requests, like | |
| 627 // Remove, but it's okay to schedule the thread to stop once they've been | |
| 628 // executed (i.e. "soon"). | |
| 629 DCHECK(active_collections_.empty()); | |
| 630 StopSoon(); | |
| 631 | |
| 632 // StopSoon will have set the owning sequence (again) so it must be detached | |
| 633 // (again) in order for Stop/Start to be called (again) should more work | |
| 634 // come in. Holding the |thread_execution_state_lock_| ensures the necessary | |
| 635 // happens-after with regard to this detach and future Thread API calls. | |
| 636 DetachFromSequence(); | |
| 637 | |
| 638 // Set the thread_state variable so the thread will be restarted when new | |
| 639 // work comes in. Remove the thread_execution_state_task_runner_ to avoid | |
|
gab
2017/04/05 20:38:42
|thread_execution_state_task_runner_ | and maybe e
bcwhite
2017/04/06 18:40:18
Done.
| |
| 640 // confusion. | |
| 641 thread_execution_state_ = EXITING; | |
| 642 thread_execution_state_task_runner_ = nullptr; | |
| 643 } | |
| 644 | |
| 645 bool StackSamplingProfiler::SamplingThread::UpdateNextSampleTime( | |
| 646 CollectionContext* collection) { | |
| 647 // This will keep a consistent average interval between samples but will | |
| 648 // result in constant series of acquisitions, thus nearly locking out the | |
| 649 // target thread, if the interval is smaller than the time it takes to | |
| 650 // actually acquire the sample. Anything sampling that quickly is going | |
| 651 // to be a problem anyway so don't worry about it. | |
| 652 if (++collection->sample < collection->params.samples_per_burst) { | |
| 653 collection->next_sample_time += collection->params.sampling_interval; | |
| 654 return true; | |
| 655 } | |
| 656 | |
| 657 if (++collection->burst < collection->params.bursts) { | |
| 658 collection->sample = 0; | |
| 659 collection->next_sample_time += collection->params.burst_interval; | |
| 660 return true; | |
| 661 } | |
| 662 | |
| 663 return false; | |
| 664 } | |
| 665 | |
| 666 void StackSamplingProfiler::SamplingThread::CleanUp() { | |
| 667 // There should be no collections remaining when the thread stops. | |
| 668 DCHECK(active_collections_.empty()); | |
| 669 | |
| 670 // Let the parent clean up. | |
| 671 Thread::CleanUp(); | |
| 672 } | |
| 673 | |
| 674 // StackSamplingProfiler ------------------------------------------------------ | |
| 675 | |
| 676 // static | |
| 677 void StackSamplingProfiler::TestAPI::Reset() { | |
| 678 SamplingThread::TestAPI::Reset(); | |
| 679 ResetAnnotations(); | |
| 680 } | |
| 681 | |
| 682 // static | |
| 683 void StackSamplingProfiler::TestAPI::ResetAnnotations() { | |
| 684 subtle::NoBarrier_Store(&process_milestones_, 0u); | |
| 685 } | |
| 686 | |
| 687 // static | |
| 688 bool StackSamplingProfiler::TestAPI::IsSamplingThreadRunning() { | |
| 689 return SamplingThread::GetInstance()->IsRunning(); | |
| 690 } | |
| 691 | |
| 692 // static | |
| 693 void StackSamplingProfiler::TestAPI::DisableIdleShutdown() { | |
| 694 SamplingThread::TestAPI::DisableIdleShutdown(); | |
| 695 } | |
| 696 | |
| 697 // static | |
| 698 void StackSamplingProfiler::TestAPI::PerformSamplingThreadIdleShutdown( | |
| 699 bool simulate_intervening_start) { | |
| 700 SamplingThread::TestAPI::ShutdownAssumingIdle(simulate_intervening_start); | |
| 701 } | |
| 702 | |
| 703 subtle::Atomic32 StackSamplingProfiler::process_milestones_ = 0; | |
| 704 | |
| 705 StackSamplingProfiler::StackSamplingProfiler( | |
| 165 const SamplingParams& params, | 706 const SamplingParams& params, |
| 166 const CompletedCallback& completed_callback) | 707 const CompletedCallback& callback, |
| 167 : native_sampler_(std::move(native_sampler)), | 708 NativeStackSamplerTestDelegate* test_delegate) |
| 168 params_(params), | 709 : StackSamplingProfiler(base::PlatformThread::CurrentId(), |
| 169 stop_event_(WaitableEvent::ResetPolicy::AUTOMATIC, | 710 params, |
| 170 WaitableEvent::InitialState::NOT_SIGNALED), | 711 callback, |
| 171 completed_callback_(completed_callback) {} | 712 test_delegate) {} |
| 172 | |
| 173 StackSamplingProfiler::SamplingThread::~SamplingThread() {} | |
| 174 | |
| 175 void StackSamplingProfiler::SamplingThread::ThreadMain() { | |
| 176 PlatformThread::SetName("Chrome_SamplingProfilerThread"); | |
| 177 | |
| 178 // For now, just ignore any requests to profile while another profiler is | |
| 179 // working. | |
| 180 if (!concurrent_profiling_lock.Get().Try()) | |
| 181 return; | |
| 182 | |
| 183 CallStackProfiles profiles; | |
| 184 CollectProfiles(&profiles); | |
| 185 concurrent_profiling_lock.Get().Release(); | |
| 186 completed_callback_.Run(std::move(profiles)); | |
| 187 } | |
| 188 | |
| 189 // Depending on how long the sampling takes and the length of the sampling | |
| 190 // interval, a burst of samples could take arbitrarily longer than | |
| 191 // samples_per_burst * sampling_interval. In this case, we (somewhat | |
| 192 // arbitrarily) honor the number of samples requested rather than strictly | |
| 193 // adhering to the sampling intervals. Once we have established users for the | |
| 194 // StackSamplingProfiler and the collected data to judge, we may go the other | |
| 195 // way or make this behavior configurable. | |
| 196 void StackSamplingProfiler::SamplingThread::CollectProfile( | |
| 197 CallStackProfile* profile, | |
| 198 TimeDelta* elapsed_time, | |
| 199 bool* was_stopped) { | |
| 200 ElapsedTimer profile_timer; | |
| 201 native_sampler_->ProfileRecordingStarting(&profile->modules); | |
| 202 profile->sampling_period = params_.sampling_interval; | |
| 203 *was_stopped = false; | |
| 204 TimeDelta previous_elapsed_sample_time; | |
| 205 for (int i = 0; i < params_.samples_per_burst; ++i) { | |
| 206 if (i != 0) { | |
| 207 // Always wait, even if for 0 seconds, so we can observe a signal on | |
| 208 // stop_event_. | |
| 209 if (stop_event_.TimedWait( | |
| 210 std::max(params_.sampling_interval - previous_elapsed_sample_time, | |
| 211 TimeDelta()))) { | |
| 212 *was_stopped = true; | |
| 213 break; | |
| 214 } | |
| 215 } | |
| 216 ElapsedTimer sample_timer; | |
| 217 profile->samples.push_back(Sample()); | |
| 218 native_sampler_->RecordStackSample(&profile->samples.back()); | |
| 219 previous_elapsed_sample_time = sample_timer.Elapsed(); | |
| 220 } | |
| 221 | |
| 222 *elapsed_time = profile_timer.Elapsed(); | |
| 223 profile->profile_duration = *elapsed_time; | |
| 224 native_sampler_->ProfileRecordingStopped(); | |
| 225 } | |
| 226 | |
| 227 // In an analogous manner to CollectProfile() and samples exceeding the expected | |
| 228 // total sampling time, bursts may also exceed the burst_interval. We adopt the | |
| 229 // same wait-and-see approach here. | |
| 230 void StackSamplingProfiler::SamplingThread::CollectProfiles( | |
| 231 CallStackProfiles* profiles) { | |
| 232 if (stop_event_.TimedWait(params_.initial_delay)) | |
| 233 return; | |
| 234 | |
| 235 TimeDelta previous_elapsed_profile_time; | |
| 236 for (int i = 0; i < params_.bursts; ++i) { | |
| 237 if (i != 0) { | |
| 238 // Always wait, even if for 0 seconds, so we can observe a signal on | |
| 239 // stop_event_. | |
| 240 if (stop_event_.TimedWait( | |
| 241 std::max(params_.burst_interval - previous_elapsed_profile_time, | |
| 242 TimeDelta()))) | |
| 243 return; | |
| 244 } | |
| 245 | |
| 246 CallStackProfile profile; | |
| 247 bool was_stopped = false; | |
| 248 CollectProfile(&profile, &previous_elapsed_profile_time, &was_stopped); | |
| 249 if (!profile.samples.empty()) | |
| 250 profiles->push_back(std::move(profile)); | |
| 251 | |
| 252 if (was_stopped) | |
| 253 return; | |
| 254 } | |
| 255 } | |
| 256 | |
| 257 void StackSamplingProfiler::SamplingThread::Stop() { | |
| 258 stop_event_.Signal(); | |
| 259 } | |
| 260 | |
| 261 // StackSamplingProfiler ------------------------------------------------------ | |
| 262 | |
| 263 subtle::Atomic32 StackSamplingProfiler::process_milestones_ = 0; | |
| 264 | |
| 265 StackSamplingProfiler::StackSamplingProfiler( | |
| 266 PlatformThreadId thread_id, | |
| 267 const SamplingParams& params, | |
| 268 const CompletedCallback& callback) | |
| 269 : StackSamplingProfiler(thread_id, params, callback, nullptr) {} | |
| 270 | 713 |
| 271 StackSamplingProfiler::StackSamplingProfiler( | 714 StackSamplingProfiler::StackSamplingProfiler( |
| 272 PlatformThreadId thread_id, | 715 PlatformThreadId thread_id, |
| 273 const SamplingParams& params, | 716 const SamplingParams& params, |
| 274 const CompletedCallback& callback, | 717 const CompletedCallback& callback, |
| 275 NativeStackSamplerTestDelegate* test_delegate) | 718 NativeStackSamplerTestDelegate* test_delegate) |
| 276 : thread_id_(thread_id), params_(params), completed_callback_(callback), | 719 : thread_id_(thread_id), |
| 277 test_delegate_(test_delegate) { | 720 params_(params), |
| 278 } | 721 completed_callback_(callback), |
| 722 // The event starts "signaled" so code knows it's safe to start thread. | |
| 723 profiling_inactive_(WaitableEvent::ResetPolicy::MANUAL, | |
| 724 WaitableEvent::InitialState::SIGNALED), | |
| 725 collection_id_(NULL_COLLECTION_ID), | |
| 726 test_delegate_(test_delegate) {} | |
| 279 | 727 |
| 280 StackSamplingProfiler::~StackSamplingProfiler() { | 728 StackSamplingProfiler::~StackSamplingProfiler() { |
| 729 // Stop is immediate but asynchronous. There is a non-zero probability that | |
| 730 // one more sample will be taken after this call returns. | |
| 281 Stop(); | 731 Stop(); |
| 282 if (!sampling_thread_handle_.is_null()) | 732 |
| 283 PlatformThread::Join(sampling_thread_handle_); | 733 // The behavior of sampling a thread that has exited is undefined and could |
| 284 } | 734 // cause Bad Things(tm) to occur. The safety model provided by this class is |
| 285 | 735 // that an instance of this object is expected to live at least as long as |
| 286 // static | 736 // the thread it is sampling. However, because the sampling is performed |
| 287 void StackSamplingProfiler::StartAndRunAsync( | 737 // asynchronously by the SamplingThread, there is no way to guarantee this |
| 288 PlatformThreadId thread_id, | 738 // is true without waiting for it to signal that it has finished. |
| 289 const SamplingParams& params, | 739 // |
| 290 const CompletedCallback& callback) { | 740 // The wait time should, at most, be only as long as it takes to collect one |
| 291 CHECK(ThreadTaskRunnerHandle::Get()); | 741 // sample (~200us) or none at all if sampling has already completed. |
| 292 AsyncRunner::Run(thread_id, params, callback); | 742 ThreadRestrictions::ScopedAllowWait allow_wait; |
| 743 profiling_inactive_.Wait(); | |
| 293 } | 744 } |
| 294 | 745 |
| 295 void StackSamplingProfiler::Start() { | 746 void StackSamplingProfiler::Start() { |
| 296 if (completed_callback_.is_null()) | 747 if (completed_callback_.is_null()) |
| 297 return; | 748 return; |
| 298 | 749 |
| 299 std::unique_ptr<NativeStackSampler> native_sampler = | 750 std::unique_ptr<NativeStackSampler> native_sampler = |
| 300 NativeStackSampler::Create(thread_id_, &RecordAnnotations, | 751 NativeStackSampler::Create(thread_id_, &RecordAnnotations, |
| 301 test_delegate_); | 752 test_delegate_); |
| 753 | |
| 302 if (!native_sampler) | 754 if (!native_sampler) |
| 303 return; | 755 return; |
| 304 | 756 |
| 305 sampling_thread_.reset(new SamplingThread(std::move(native_sampler), params_, | 757 // Wait for profiling to be "inactive", then reset it for the upcoming run. |
| 306 completed_callback_)); | 758 profiling_inactive_.Wait(); |
| 307 if (!PlatformThread::Create(0, sampling_thread_.get(), | 759 profiling_inactive_.Reset(); |
|
gab
2017/04/05 20:38:42
Use a ResetPolicy::AUTOMATIC WaitableEvent?
bcwhite
2017/04/06 18:40:18
There are other Wait calls on this that don't rese
gab
2017/04/06 19:31:28
The only other call I see is in the destructor (at
| |
| 308 &sampling_thread_handle_)) | 760 |
| 309 sampling_thread_.reset(); | 761 DCHECK_EQ(NULL_COLLECTION_ID, collection_id_); |
| 762 collection_id_ = SamplingThread::GetInstance()->Add( | |
| 763 MakeUnique<SamplingThread::CollectionContext>( | |
| 764 thread_id_, params_, completed_callback_, &profiling_inactive_, | |
| 765 std::move(native_sampler))); | |
| 766 DCHECK_NE(NULL_COLLECTION_ID, collection_id_); | |
| 310 } | 767 } |
| 311 | 768 |
| 312 void StackSamplingProfiler::Stop() { | 769 void StackSamplingProfiler::Stop() { |
| 313 if (sampling_thread_) | 770 SamplingThread::GetInstance()->Remove(collection_id_); |
| 314 sampling_thread_->Stop(); | 771 collection_id_ = NULL_COLLECTION_ID; |
| 315 } | 772 } |
| 316 | 773 |
| 317 // static | 774 // static |
| 318 void StackSamplingProfiler::SetProcessMilestone(int milestone) { | 775 void StackSamplingProfiler::SetProcessMilestone(int milestone) { |
| 319 DCHECK_LE(0, milestone); | 776 DCHECK_LE(0, milestone); |
| 320 DCHECK_GT(static_cast<int>(sizeof(process_milestones_) * 8), milestone); | 777 DCHECK_GT(static_cast<int>(sizeof(process_milestones_) * 8), milestone); |
| 321 DCHECK_EQ(0, subtle::NoBarrier_Load(&process_milestones_) & (1 << milestone)); | 778 DCHECK_EQ(0, subtle::NoBarrier_Load(&process_milestones_) & (1 << milestone)); |
| 322 ChangeAtomicFlags(&process_milestones_, 1 << milestone, 0); | 779 ChangeAtomicFlags(&process_milestones_, 1 << milestone, 0); |
| 323 } | 780 } |
| 324 | 781 |
| 325 // static | 782 // static |
| 326 void StackSamplingProfiler::ResetAnnotationsForTesting() { | |
| 327 subtle::NoBarrier_Store(&process_milestones_, 0u); | |
| 328 } | |
| 329 | |
| 330 // static | |
| 331 void StackSamplingProfiler::RecordAnnotations(Sample* sample) { | 783 void StackSamplingProfiler::RecordAnnotations(Sample* sample) { |
| 332 // The code inside this method must not do anything that could acquire a | 784 // The code inside this method must not do anything that could acquire a |
| 333 // mutex, including allocating memory (which includes LOG messages) because | 785 // mutex, including allocating memory (which includes LOG messages) because |
| 334 // that mutex could be held by a stopped thread, thus resulting in deadlock. | 786 // that mutex could be held by a stopped thread, thus resulting in deadlock. |
| 335 sample->process_milestones = subtle::NoBarrier_Load(&process_milestones_); | 787 sample->process_milestones = subtle::NoBarrier_Load(&process_milestones_); |
| 336 } | 788 } |
| 337 | 789 |
| 338 // StackSamplingProfiler::Frame global functions ------------------------------ | 790 // StackSamplingProfiler::Frame global functions ------------------------------ |
| 339 | 791 |
| 340 bool operator==(const StackSamplingProfiler::Module& a, | 792 bool operator==(const StackSamplingProfiler::Module& a, |
| (...skipping 29 matching lines...) Expand all Loading... | |
| 370 } | 822 } |
| 371 | 823 |
| 372 bool operator<(const StackSamplingProfiler::Frame &a, | 824 bool operator<(const StackSamplingProfiler::Frame &a, |
| 373 const StackSamplingProfiler::Frame &b) { | 825 const StackSamplingProfiler::Frame &b) { |
| 374 return (a.module_index < b.module_index) || | 826 return (a.module_index < b.module_index) || |
| 375 (a.module_index == b.module_index && | 827 (a.module_index == b.module_index && |
| 376 a.instruction_pointer < b.instruction_pointer); | 828 a.instruction_pointer < b.instruction_pointer); |
| 377 } | 829 } |
| 378 | 830 |
| 379 } // namespace base | 831 } // namespace base |
| OLD | NEW |