| Index: base/profiler/stack_sampling_profiler.cc
|
| diff --git a/base/profiler/stack_sampling_profiler.cc b/base/profiler/stack_sampling_profiler.cc
|
| index fa98bed116bb9b759ab610e5e0cd7ec77abc44a9..0263a9f81af95b8d605e09c685d9fd21e9489bcb 100644
|
| --- a/base/profiler/stack_sampling_profiler.cc
|
| +++ b/base/profiler/stack_sampling_profiler.cc
|
| @@ -5,16 +5,23 @@
|
| #include "base/profiler/stack_sampling_profiler.h"
|
|
|
| #include <algorithm>
|
| +#include <map>
|
| #include <utility>
|
|
|
| +#include "base/atomic_sequence_num.h"
|
| +#include "base/atomicops.h"
|
| #include "base/bind.h"
|
| #include "base/bind_helpers.h"
|
| #include "base/callback.h"
|
| #include "base/lazy_instance.h"
|
| #include "base/location.h"
|
| #include "base/macros.h"
|
| +#include "base/memory/ptr_util.h"
|
| +#include "base/memory/singleton.h"
|
| #include "base/profiler/native_stack_sampler.h"
|
| #include "base/synchronization/lock.h"
|
| +#include "base/threading/thread.h"
|
| +#include "base/threading/thread_restrictions.h"
|
| #include "base/threading/thread_task_runner_handle.h"
|
| #include "base/timer/elapsed_timer.h"
|
|
|
| @@ -22,65 +29,9 @@ namespace base {
|
|
|
| namespace {
|
|
|
| -// Used to ensure only one profiler is running at a time.
|
| -LazyInstance<Lock>::Leaky concurrent_profiling_lock = LAZY_INSTANCE_INITIALIZER;
|
| -
|
| -// AsyncRunner ----------------------------------------------------------------
|
| -
|
| -// Helper class to allow a profiler to be run completely asynchronously from the
|
| -// initiator, without being concerned with the profiler's lifetime.
|
| -class AsyncRunner {
|
| - public:
|
| - // Sets up a profiler and arranges for it to be deleted on its completed
|
| - // callback.
|
| - static void Run(PlatformThreadId thread_id,
|
| - const StackSamplingProfiler::SamplingParams& params,
|
| - const StackSamplingProfiler::CompletedCallback& callback);
|
| -
|
| - private:
|
| - AsyncRunner();
|
| -
|
| - // Runs the callback and deletes the AsyncRunner instance. |profiles| is not
|
| - // const& because it must be passed with std::move.
|
| - static void RunCallbackAndDeleteInstance(
|
| - std::unique_ptr<AsyncRunner> object_to_be_deleted,
|
| - const StackSamplingProfiler::CompletedCallback& callback,
|
| - scoped_refptr<SingleThreadTaskRunner> task_runner,
|
| - StackSamplingProfiler::CallStackProfiles profiles);
|
| -
|
| - std::unique_ptr<StackSamplingProfiler> profiler_;
|
| -
|
| - DISALLOW_COPY_AND_ASSIGN(AsyncRunner);
|
| -};
|
| -
|
| -// static
|
| -void AsyncRunner::Run(
|
| - PlatformThreadId thread_id,
|
| - const StackSamplingProfiler::SamplingParams& params,
|
| - const StackSamplingProfiler::CompletedCallback &callback) {
|
| - std::unique_ptr<AsyncRunner> runner(new AsyncRunner);
|
| - AsyncRunner* temp_ptr = runner.get();
|
| - temp_ptr->profiler_.reset(
|
| - new StackSamplingProfiler(thread_id, params,
|
| - Bind(&AsyncRunner::RunCallbackAndDeleteInstance,
|
| - Passed(&runner), callback,
|
| - ThreadTaskRunnerHandle::Get())));
|
| - // The callback won't be called until after Start(), so temp_ptr will still
|
| - // be valid here.
|
| - temp_ptr->profiler_->Start();
|
| -}
|
| -
|
| -AsyncRunner::AsyncRunner() {}
|
| -
|
| -void AsyncRunner::RunCallbackAndDeleteInstance(
|
| - std::unique_ptr<AsyncRunner> object_to_be_deleted,
|
| - const StackSamplingProfiler::CompletedCallback& callback,
|
| - scoped_refptr<SingleThreadTaskRunner> task_runner,
|
| - StackSamplingProfiler::CallStackProfiles profiles) {
|
| - callback.Run(std::move(profiles));
|
| - // Delete the instance on the original calling thread.
|
| - task_runner->DeleteSoon(FROM_HERE, object_to_be_deleted.release());
|
| -}
|
| +// This value is used when there is no collection in progress and thus no ID
|
| +// for referencing the active collection to the SamplingThread.
|
| +const int NULL_COLLECTION_ID = -1;
|
|
|
| void ChangeAtomicFlags(subtle::Atomic32* flags,
|
| subtle::Atomic32 set,
|
| @@ -160,136 +111,647 @@ StackSamplingProfiler::CallStackProfile::CallStackProfile(
|
|
|
| // StackSamplingProfiler::SamplingThread --------------------------------------
|
|
|
| -StackSamplingProfiler::SamplingThread::SamplingThread(
|
| - std::unique_ptr<NativeStackSampler> native_sampler,
|
| - const SamplingParams& params,
|
| - const CompletedCallback& completed_callback)
|
| - : native_sampler_(std::move(native_sampler)),
|
| - params_(params),
|
| - stop_event_(WaitableEvent::ResetPolicy::AUTOMATIC,
|
| - WaitableEvent::InitialState::NOT_SIGNALED),
|
| - completed_callback_(completed_callback) {}
|
| +class StackSamplingProfiler::SamplingThread : public Thread {
|
| + public:
|
| + class TestAPI {
|
| + public:
|
| + // Reset the existing sampler. This will unfortunately create the object
|
| + // unnecessarily if it doesn't already exist but there's no way around that.
|
| + static void Reset();
|
| +
|
| + // Disables inherent idle-shutdown behavior.
|
| + static void DisableIdleShutdown();
|
| +
|
| + // Begins an idle shutdown as if the idle-timer had expired and wait for
|
| + // it to execute. Since the timer would have only been started at a time
|
| + // when the sampling thread actually was idle, this must be called only
|
| + // when it is known that there are no active sampling threads. If
|
| + // |simulate_intervening_add| is true then, when executed, the shutdown
|
| + // task will believe that a new collection has been added since it was
|
| + // posted.
|
| + static void ShutdownAssumingIdle(bool simulate_intervening_add);
|
| +
|
| + private:
|
| + // Calls the sampling threads ShutdownTask and then signals an event.
|
| + static void ShutdownTaskAndSignalEvent(SamplingThread* sampler,
|
| + int add_events,
|
| + WaitableEvent* event);
|
| + };
|
| +
|
| + struct CollectionContext {
|
| + CollectionContext(PlatformThreadId target,
|
| + const SamplingParams& params,
|
| + const CompletedCallback& callback,
|
| + WaitableEvent* finished,
|
| + std::unique_ptr<NativeStackSampler> sampler)
|
| + : collection_id(next_collection_id_.GetNext()),
|
| + target(target),
|
| + params(params),
|
| + callback(callback),
|
| + finished(finished),
|
| + native_sampler(std::move(sampler)) {}
|
| + ~CollectionContext() {}
|
| +
|
| + // An identifier for this collection, used to uniquely identify it to
|
| + // outside interests.
|
| + const int collection_id;
|
| +
|
| + const PlatformThreadId target; // ID of The thread being sampled.
|
| + const SamplingParams params; // Information about how to sample.
|
| + const CompletedCallback callback; // Callback made when sampling complete.
|
| + WaitableEvent* const finished; // Signaled when all sampling complete.
|
| +
|
| + // Platform-specific module that does the actual sampling.
|
| + std::unique_ptr<NativeStackSampler> native_sampler;
|
| +
|
| + // The absolute time for the next sample.
|
| + Time next_sample_time;
|
| +
|
| + // The time that a profile was started, for calculating the total duration.
|
| + Time profile_start_time;
|
| +
|
| + // Counters that indicate the current position along the acquisition.
|
| + int burst = 0;
|
| + int sample = 0;
|
| +
|
| + // The collected stack samples. The active profile is always at the back().
|
| + CallStackProfiles profiles;
|
| +
|
| + private:
|
| + static StaticAtomicSequenceNumber next_collection_id_;
|
| + };
|
| +
|
| + // Gets the single instance of this class.
|
| + static SamplingThread* GetInstance();
|
| +
|
| + // Adds a new CollectionContext to the thread. This can be called externally
|
| + // from any thread. This returns an ID that can later be used to stop
|
| + // the sampling.
|
| + int Add(std::unique_ptr<CollectionContext> collection);
|
| +
|
| + // Removes an active collection based on its ID, forcing it to run its
|
| + // callback if any data has been collected. This can be called externally
|
| + // from any thread.
|
| + void Remove(int id);
|
|
|
| -StackSamplingProfiler::SamplingThread::~SamplingThread() {}
|
| + private:
|
| + friend class TestAPI;
|
| + friend struct DefaultSingletonTraits<SamplingThread>;
|
| +
|
| + // The different states in which the sampling-thread can be.
|
| + enum ThreadExecutionState {
|
| + // The thread is not running because it has never been started. It will be
|
| + // started when a sampling request is received.
|
| + NOT_STARTED,
|
| +
|
| + // The thread is running and processing tasks. This is the state when any
|
| + // sampling requests are active and during the "idle" period afterward
|
| + // before the thread is stopped.
|
| + RUNNING,
|
| +
|
| + // Once all sampling requests have finished and the "idle" period has
|
| + // expired, the thread will be set to this state and its shutdown
|
| + // initiated. A call to Stop() must be made to ensure the previous thread
|
| + // has completely exited before calling Start() and moving back to the
|
| + // RUNNING state.
|
| + EXITING,
|
| + };
|
| +
|
| + SamplingThread();
|
| + ~SamplingThread() override;
|
| +
|
| + // Get task runner that is usable from the outside.
|
| + scoped_refptr<SingleThreadTaskRunner> GetOrCreateTaskRunnerForAdd();
|
| + scoped_refptr<SingleThreadTaskRunner> GetTaskRunner(
|
| + ThreadExecutionState* out_state);
|
| +
|
| + // Get task runner that is usable from the sampling thread itself.
|
| + scoped_refptr<SingleThreadTaskRunner> GetTaskRunnerOnSamplingThread();
|
| +
|
| + // Finishes a collection and reports collected data via callback.
|
| + void FinishCollection(CollectionContext* collection);
|
| +
|
| + // Records a single sample of a collection.
|
| + void RecordSample(CollectionContext* collection);
|
| +
|
| + // Check if the sampling thread is idle and begin a shutdown if it is.
|
| + void ScheduleShutdownIfIdle();
|
| +
|
| + // These methods are tasks that get posted to the internal message queue.
|
| + void AddCollectionTask(std::unique_ptr<CollectionContext> collection);
|
| + void RemoveCollectionTask(int id);
|
| + void PerformCollectionTask(int id);
|
| + void ShutdownTask(int add_events);
|
| +
|
| + // Updates the |next_sample_time| time based on configured parameters.
|
| + // Returns true if there is a next sample or false if sampling is complete.
|
| + bool UpdateNextSampleTime(CollectionContext* collection);
|
| +
|
| + // Thread:
|
| + void CleanUp() override;
|
| +
|
| + // A map of IDs to collection contexts. Because this class is a singleton
|
| + // that is never destroyed, context objects will never be destructed except
|
| + // by explicit action. Thus, it's acceptable to pass unretained pointers
|
| + // to these objects when posting tasks.
|
| + std::map<int, std::unique_ptr<CollectionContext>> active_collections_;
|
| +
|
| + // State maintained about the current execution (or non-execution) of
|
| + // the thread. This state must always be accessed while holding the
|
| + // lock. A copy of the task-runner is maintained here for use by any
|
| + // calling thread; this is necessary because Thread's accessor for it is
|
| + // not itself thread-safe. The lock is also used to order calls to the
|
| + // Thread API (Start, Stop, StopSoon, & DetachFromSequence) so that
|
| + // multiple threads may make those calls.
|
| + Lock thread_execution_state_lock_; // Protects all thread_execution_state_*
|
| + ThreadExecutionState thread_execution_state_ = NOT_STARTED;
|
| + scoped_refptr<SingleThreadTaskRunner> thread_execution_state_task_runner_;
|
| + bool thread_execution_state_disable_idle_shutdown_for_testing_ = false;
|
| +
|
| + // A counter that notes adds of new collection requests. It is incremented
|
| + // when changes occur so that delayed shutdown tasks are able to detect if
|
| + // samething new has happened while it was waiting. Like all "execution_state"
|
| + // vars, this must be accessed while holding |thread_execution_state_lock_|.
|
| + int thread_execution_state_add_events_ = 0;
|
| +
|
| + DISALLOW_COPY_AND_ASSIGN(SamplingThread);
|
| +};
|
|
|
| -void StackSamplingProfiler::SamplingThread::ThreadMain() {
|
| - PlatformThread::SetName("Chrome_SamplingProfilerThread");
|
| +// static
|
| +void StackSamplingProfiler::SamplingThread::TestAPI::Reset() {
|
| + SamplingThread* sampler = SamplingThread::GetInstance();
|
| +
|
| + ThreadExecutionState state;
|
| + {
|
| + AutoLock lock(sampler->thread_execution_state_lock_);
|
| + state = sampler->thread_execution_state_;
|
| + DCHECK(sampler->active_collections_.empty());
|
| + }
|
| +
|
| + // Stop the thread and wait for it to exit. This has to be done through by
|
| + // the thread itself because it has taken ownership of its own lifetime.
|
| + if (state == RUNNING) {
|
| + ShutdownAssumingIdle(false);
|
| + state = EXITING;
|
| + }
|
| + // Make sure thread is cleaned up since state will be reset to NOT_STARTED.
|
| + if (state == EXITING)
|
| + sampler->Stop();
|
| +
|
| + // Reset internal variables to the just-initialized state.
|
| + {
|
| + AutoLock lock(sampler->thread_execution_state_lock_);
|
| + sampler->thread_execution_state_ = NOT_STARTED;
|
| + sampler->thread_execution_state_task_runner_ = nullptr;
|
| + sampler->thread_execution_state_disable_idle_shutdown_for_testing_ = false;
|
| + sampler->thread_execution_state_add_events_ = 0;
|
| + }
|
| +}
|
| +
|
| +// static
|
| +void StackSamplingProfiler::SamplingThread::TestAPI::DisableIdleShutdown() {
|
| + SamplingThread* sampler = SamplingThread::GetInstance();
|
| +
|
| + {
|
| + AutoLock lock(sampler->thread_execution_state_lock_);
|
| + sampler->thread_execution_state_disable_idle_shutdown_for_testing_ = true;
|
| + }
|
| +}
|
| +
|
| +// static
|
| +void StackSamplingProfiler::SamplingThread::TestAPI::ShutdownAssumingIdle(
|
| + bool simulate_intervening_add) {
|
| + SamplingThread* sampler = SamplingThread::GetInstance();
|
| +
|
| + ThreadExecutionState state;
|
| + scoped_refptr<SingleThreadTaskRunner> task_runner =
|
| + sampler->GetTaskRunner(&state);
|
| + DCHECK_EQ(RUNNING, state);
|
| + DCHECK(task_runner);
|
| +
|
| + int add_events;
|
| + {
|
| + AutoLock lock(sampler->thread_execution_state_lock_);
|
| + add_events = sampler->thread_execution_state_add_events_;
|
| + if (simulate_intervening_add)
|
| + ++sampler->thread_execution_state_add_events_;
|
| + }
|
|
|
| - // For now, just ignore any requests to profile while another profiler is
|
| - // working.
|
| - if (!concurrent_profiling_lock.Get().Try())
|
| + WaitableEvent executed(WaitableEvent::ResetPolicy::MANUAL,
|
| + WaitableEvent::InitialState::NOT_SIGNALED);
|
| + // PostTaskAndReply won't work because thread and associated message-loop may
|
| + // be shut down.
|
| + task_runner->PostTask(FROM_HERE,
|
| + Bind(&ShutdownTaskAndSignalEvent, Unretained(sampler),
|
| + add_events, Unretained(&executed)));
|
| + executed.Wait();
|
| +}
|
| +
|
| +// static
|
| +void StackSamplingProfiler::SamplingThread::TestAPI::ShutdownTaskAndSignalEvent(
|
| + SamplingThread* sampler,
|
| + int add_events,
|
| + WaitableEvent* event) {
|
| + sampler->ShutdownTask(add_events);
|
| + event->Signal();
|
| +}
|
| +
|
| +StaticAtomicSequenceNumber StackSamplingProfiler::SamplingThread::
|
| + CollectionContext::next_collection_id_;
|
| +
|
| +StackSamplingProfiler::SamplingThread::SamplingThread()
|
| + : Thread("StackSamplingProfiler") {}
|
| +
|
| +StackSamplingProfiler::SamplingThread::~SamplingThread() = default;
|
| +
|
| +StackSamplingProfiler::SamplingThread*
|
| +StackSamplingProfiler::SamplingThread::GetInstance() {
|
| + return Singleton<SamplingThread, LeakySingletonTraits<SamplingThread>>::get();
|
| +}
|
| +
|
| +int StackSamplingProfiler::SamplingThread::Add(
|
| + std::unique_ptr<CollectionContext> collection) {
|
| + // This is not to be run on the sampling thread.
|
| +
|
| + int id = collection->collection_id;
|
| + scoped_refptr<SingleThreadTaskRunner> task_runner =
|
| + GetOrCreateTaskRunnerForAdd();
|
| +
|
| + task_runner->PostTask(FROM_HERE, Bind(&SamplingThread::AddCollectionTask,
|
| + Unretained(this), Passed(&collection)));
|
| +
|
| + return id;
|
| +}
|
| +
|
| +void StackSamplingProfiler::SamplingThread::Remove(int id) {
|
| + // This is not to be run on the sampling thread.
|
| +
|
| + ThreadExecutionState state;
|
| + scoped_refptr<SingleThreadTaskRunner> task_runner = GetTaskRunner(&state);
|
| + if (state != RUNNING)
|
| return;
|
| + DCHECK(task_runner);
|
| +
|
| + // This can fail if the thread were to exit between acquisition of the task
|
| + // runner above and the call below. In that case, however, everything has
|
| + // stopped so there's no need to try to stop it.
|
| + task_runner->PostTask(FROM_HERE, Bind(&SamplingThread::RemoveCollectionTask,
|
| + Unretained(this), id));
|
| +}
|
| +
|
| +scoped_refptr<SingleThreadTaskRunner>
|
| +StackSamplingProfiler::SamplingThread::GetOrCreateTaskRunnerForAdd() {
|
| + AutoLock lock(thread_execution_state_lock_);
|
| +
|
| + // The increment of the "add events" count is why this method is to be only
|
| + // called from "add".
|
| + ++thread_execution_state_add_events_;
|
| +
|
| + if (thread_execution_state_ == RUNNING) {
|
| + DCHECK(thread_execution_state_task_runner_);
|
| + // This shouldn't be called from the sampling thread as it's inefficient.
|
| + // Use GetTaskRunnerOnSamplingThread() instead.
|
| + DCHECK_NE(GetThreadId(), PlatformThread::CurrentId());
|
| + return thread_execution_state_task_runner_;
|
| + }
|
| +
|
| + if (thread_execution_state_ == EXITING) {
|
| + // The previous instance has only been partially cleaned up. It is necessary
|
| + // to call Stop() before Start().
|
| + Stop();
|
| + }
|
| +
|
| + // The thread is not running. Start it and get associated runner. The task-
|
| + // runner has to be saved for future use because though it can be used from
|
| + // any thread, it can be acquired via task_runner() only on the created
|
| + // thread and the thread that creates it (i.e. this thread) for thread-safety
|
| + // reasons which are alleviated in SamplingThread by gating access to it with
|
| + // the |thread_execution_state_lock_|.
|
| + Start();
|
| + thread_execution_state_ = RUNNING;
|
| + thread_execution_state_task_runner_ = Thread::task_runner();
|
| +
|
| + // Detach the sampling thread from the "sequence" (i.e. thread) that
|
| + // started it so that it can be self-managed or stopped by another thread.
|
| + DetachFromSequence();
|
| +
|
| + return thread_execution_state_task_runner_;
|
| +}
|
| +
|
| +scoped_refptr<SingleThreadTaskRunner>
|
| +StackSamplingProfiler::SamplingThread::GetTaskRunner(
|
| + ThreadExecutionState* out_state) {
|
| + AutoLock lock(thread_execution_state_lock_);
|
| + if (out_state)
|
| + *out_state = thread_execution_state_;
|
| + if (thread_execution_state_ == RUNNING) {
|
| + // This shouldn't be called from the sampling thread as it's inefficient.
|
| + // Use GetTaskRunnerOnSamplingThread() instead.
|
| + DCHECK_NE(GetThreadId(), PlatformThread::CurrentId());
|
| + DCHECK(thread_execution_state_task_runner_);
|
| + } else {
|
| + DCHECK(!thread_execution_state_task_runner_);
|
| + }
|
| +
|
| + return thread_execution_state_task_runner_;
|
| +}
|
| +
|
| +scoped_refptr<SingleThreadTaskRunner>
|
| +StackSamplingProfiler::SamplingThread::GetTaskRunnerOnSamplingThread() {
|
| + // This should be called only from the sampling thread as it has limited
|
| + // accessibility.
|
| + DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
|
| +
|
| + return Thread::task_runner();
|
| +}
|
| +
|
| +void StackSamplingProfiler::SamplingThread::FinishCollection(
|
| + CollectionContext* collection) {
|
| + DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
|
| +
|
| + // If there is no duration for the final profile (because it was stopped),
|
| + // calculate it now.
|
| + if (!collection->profiles.empty() &&
|
| + collection->profiles.back().profile_duration == TimeDelta()) {
|
| + collection->profiles.back().profile_duration =
|
| + Time::Now() - collection->profile_start_time;
|
| + }
|
| +
|
| + // Extract some information so callback and event-signalling can still be
|
| + // done after the collection has been removed from the list of "active" ones.
|
| + // This allows the the controlling object (and tests using it) to be confident
|
| + // that collection is fully finished when those things occur.
|
| + const CompletedCallback callback = collection->callback;
|
| + CallStackProfiles profiles = std::move(collection->profiles);
|
| + WaitableEvent* finished = collection->finished;
|
| +
|
| + // Remove this collection from the map of known ones. The |collection|
|
| + // parameter is invalid after this point.
|
| + size_t count = active_collections_.erase(collection->collection_id);
|
| + DCHECK_EQ(1U, count);
|
| +
|
| + // Run the associated callback, passing the collected profiles.
|
| + callback.Run(std::move(profiles));
|
|
|
| - CallStackProfiles profiles;
|
| - CollectProfiles(&profiles);
|
| - concurrent_profiling_lock.Get().Release();
|
| - completed_callback_.Run(std::move(profiles));
|
| -}
|
| -
|
| -// Depending on how long the sampling takes and the length of the sampling
|
| -// interval, a burst of samples could take arbitrarily longer than
|
| -// samples_per_burst * sampling_interval. In this case, we (somewhat
|
| -// arbitrarily) honor the number of samples requested rather than strictly
|
| -// adhering to the sampling intervals. Once we have established users for the
|
| -// StackSamplingProfiler and the collected data to judge, we may go the other
|
| -// way or make this behavior configurable.
|
| -void StackSamplingProfiler::SamplingThread::CollectProfile(
|
| - CallStackProfile* profile,
|
| - TimeDelta* elapsed_time,
|
| - bool* was_stopped) {
|
| - ElapsedTimer profile_timer;
|
| - native_sampler_->ProfileRecordingStarting(&profile->modules);
|
| - profile->sampling_period = params_.sampling_interval;
|
| - *was_stopped = false;
|
| - TimeDelta previous_elapsed_sample_time;
|
| - for (int i = 0; i < params_.samples_per_burst; ++i) {
|
| - if (i != 0) {
|
| - // Always wait, even if for 0 seconds, so we can observe a signal on
|
| - // stop_event_.
|
| - if (stop_event_.TimedWait(
|
| - std::max(params_.sampling_interval - previous_elapsed_sample_time,
|
| - TimeDelta()))) {
|
| - *was_stopped = true;
|
| - break;
|
| - }
|
| - }
|
| - ElapsedTimer sample_timer;
|
| - profile->samples.push_back(Sample());
|
| - native_sampler_->RecordStackSample(&profile->samples.back());
|
| - previous_elapsed_sample_time = sample_timer.Elapsed();
|
| + // Signal that this collection is finished.
|
| + finished->Signal();
|
| +}
|
| +
|
| +void StackSamplingProfiler::SamplingThread::RecordSample(
|
| + CollectionContext* collection) {
|
| + DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
|
| + DCHECK(collection->native_sampler);
|
| +
|
| + // If this is the first sample of a burst, a new Profile needs to be created
|
| + // and filled.
|
| + if (collection->sample == 0) {
|
| + collection->profiles.push_back(CallStackProfile());
|
| + CallStackProfile& profile = collection->profiles.back();
|
| + profile.sampling_period = collection->params.sampling_interval;
|
| + collection->profile_start_time = Time::Now();
|
| + collection->native_sampler->ProfileRecordingStarting(&profile.modules);
|
| }
|
|
|
| - *elapsed_time = profile_timer.Elapsed();
|
| - profile->profile_duration = *elapsed_time;
|
| - native_sampler_->ProfileRecordingStopped();
|
| + // The currently active profile being captured.
|
| + CallStackProfile& profile = collection->profiles.back();
|
| +
|
| + // Record a single sample.
|
| + profile.samples.push_back(Sample());
|
| + collection->native_sampler->RecordStackSample(&profile.samples.back());
|
| +
|
| + // If this is the last sample of a burst, record the total time.
|
| + if (collection->sample == collection->params.samples_per_burst - 1) {
|
| + profile.profile_duration = Time::Now() - collection->profile_start_time;
|
| + collection->native_sampler->ProfileRecordingStopped();
|
| + }
|
| }
|
|
|
| -// In an analogous manner to CollectProfile() and samples exceeding the expected
|
| -// total sampling time, bursts may also exceed the burst_interval. We adopt the
|
| -// same wait-and-see approach here.
|
| -void StackSamplingProfiler::SamplingThread::CollectProfiles(
|
| - CallStackProfiles* profiles) {
|
| - if (stop_event_.TimedWait(params_.initial_delay))
|
| +void StackSamplingProfiler::SamplingThread::ScheduleShutdownIfIdle() {
|
| + DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
|
| +
|
| + if (!active_collections_.empty())
|
| return;
|
|
|
| - TimeDelta previous_elapsed_profile_time;
|
| - for (int i = 0; i < params_.bursts; ++i) {
|
| - if (i != 0) {
|
| - // Always wait, even if for 0 seconds, so we can observe a signal on
|
| - // stop_event_.
|
| - if (stop_event_.TimedWait(
|
| - std::max(params_.burst_interval - previous_elapsed_profile_time,
|
| - TimeDelta())))
|
| - return;
|
| - }
|
| -
|
| - CallStackProfile profile;
|
| - bool was_stopped = false;
|
| - CollectProfile(&profile, &previous_elapsed_profile_time, &was_stopped);
|
| - if (!profile.samples.empty())
|
| - profiles->push_back(std::move(profile));
|
| -
|
| - if (was_stopped)
|
| + int add_events;
|
| + {
|
| + AutoLock lock(thread_execution_state_lock_);
|
| + if (thread_execution_state_disable_idle_shutdown_for_testing_)
|
| return;
|
| + add_events = thread_execution_state_add_events_;
|
| }
|
| +
|
| + GetTaskRunnerOnSamplingThread()->PostDelayedTask(
|
| + FROM_HERE,
|
| + Bind(&SamplingThread::ShutdownTask, Unretained(this), add_events),
|
| + TimeDelta::FromSeconds(60));
|
| }
|
|
|
| -void StackSamplingProfiler::SamplingThread::Stop() {
|
| - stop_event_.Signal();
|
| +void StackSamplingProfiler::SamplingThread::AddCollectionTask(
|
| + std::unique_ptr<CollectionContext> collection) {
|
| + DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
|
| +
|
| + const int collection_id = collection->collection_id;
|
| + const TimeDelta initial_delay = collection->params.initial_delay;
|
| +
|
| + active_collections_.insert(
|
| + std::make_pair(collection_id, std::move(collection)));
|
| +
|
| + GetTaskRunnerOnSamplingThread()->PostDelayedTask(
|
| + FROM_HERE,
|
| + Bind(&SamplingThread::PerformCollectionTask, Unretained(this),
|
| + collection_id),
|
| + initial_delay);
|
| +
|
| + // Another increment of "add events" serves to invalidate any pending
|
| + // shutdown tasks that may have been initiated between the Add() and this
|
| + // task running.
|
| + {
|
| + AutoLock lock(thread_execution_state_lock_);
|
| + ++thread_execution_state_add_events_;
|
| + }
|
| +}
|
| +
|
| +void StackSamplingProfiler::SamplingThread::RemoveCollectionTask(int id) {
|
| + DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
|
| +
|
| + auto found = active_collections_.find(id);
|
| + if (found == active_collections_.end())
|
| + return;
|
| +
|
| + FinishCollection(found->second.get());
|
| + ScheduleShutdownIfIdle();
|
| +}
|
| +
|
| +void StackSamplingProfiler::SamplingThread::PerformCollectionTask(int id) {
|
| + DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
|
| +
|
| + auto found = active_collections_.find(id);
|
| +
|
| + // The task won't be found if it has been stopped.
|
| + if (found == active_collections_.end())
|
| + return;
|
| +
|
| + CollectionContext* collection = found->second.get();
|
| +
|
| + // Handle first-run with no "next time".
|
| + if (collection->next_sample_time == Time())
|
| + collection->next_sample_time = Time::Now();
|
| +
|
| + // Do the collection of a single sample.
|
| + RecordSample(collection);
|
| +
|
| + // Update the time of the next sample recording.
|
| + if (UpdateNextSampleTime(collection)) {
|
| + bool success = GetTaskRunnerOnSamplingThread()->PostDelayedTask(
|
| + FROM_HERE,
|
| + Bind(&SamplingThread::PerformCollectionTask, Unretained(this), id),
|
| + std::max(collection->next_sample_time - Time::Now(), TimeDelta()));
|
| + DCHECK(success);
|
| + } else {
|
| + // All capturing has completed so finish the collection. By not re-adding
|
| + // it to the task queue, the collection will "expire" (i.e. no further work
|
| + // will be done). The |collection| variable will be invalid after this call.
|
| + FinishCollection(collection);
|
| + ScheduleShutdownIfIdle();
|
| + }
|
| +}
|
| +
|
| +void StackSamplingProfiler::SamplingThread::ShutdownTask(int add_events) {
|
| + DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
|
| +
|
| + // Holding this lock ensures that any attempt to start another job will
|
| + // get postponed until |thread_execution_state_| is updated, thus eliminating
|
| + // the race in starting a new thread while the previous one is exiting.
|
| + AutoLock lock(thread_execution_state_lock_);
|
| +
|
| + // If the current count of creation requests doesn't match the passed count
|
| + // then other tasks have been created since this was posted. Abort shutdown.
|
| + if (thread_execution_state_add_events_ != add_events)
|
| + return;
|
| +
|
| + // There can be no new AddCollectionTasks at this point because creating
|
| + // those always increments "add events". There may be other requests, like
|
| + // Remove, but it's okay to schedule the thread to stop once they've been
|
| + // executed (i.e. "soon").
|
| + DCHECK(active_collections_.empty());
|
| + StopSoon();
|
| +
|
| + // StopSoon will have set the owning sequence (again) so it must be detached
|
| + // (again) in order for Stop/Start to be called (again) should more work
|
| + // come in. Holding the |thread_execution_state_lock_| ensures the necessary
|
| + // happens-after with regard to this detach and future Thread API calls.
|
| + DetachFromSequence();
|
| +
|
| + // Set the thread_state variable so the thread will be restarted when new
|
| + // work comes in. Remove the |thread_execution_state_task_runner_| to avoid
|
| + // confusion.
|
| + thread_execution_state_ = EXITING;
|
| + thread_execution_state_task_runner_ = nullptr;
|
| +}
|
| +
|
| +bool StackSamplingProfiler::SamplingThread::UpdateNextSampleTime(
|
| + CollectionContext* collection) {
|
| + // This will keep a consistent average interval between samples but will
|
| + // result in constant series of acquisitions, thus nearly locking out the
|
| + // target thread, if the interval is smaller than the time it takes to
|
| + // actually acquire the sample. Anything sampling that quickly is going
|
| + // to be a problem anyway so don't worry about it.
|
| + if (++collection->sample < collection->params.samples_per_burst) {
|
| + collection->next_sample_time += collection->params.sampling_interval;
|
| + return true;
|
| + }
|
| +
|
| + if (++collection->burst < collection->params.bursts) {
|
| + collection->sample = 0;
|
| + collection->next_sample_time += collection->params.burst_interval;
|
| + return true;
|
| + }
|
| +
|
| + return false;
|
| +}
|
| +
|
| +void StackSamplingProfiler::SamplingThread::CleanUp() {
|
| + DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
|
| +
|
| + // There should be no collections remaining when the thread stops.
|
| + DCHECK(active_collections_.empty());
|
| +
|
| + // Let the parent clean up.
|
| + Thread::CleanUp();
|
| }
|
|
|
| // StackSamplingProfiler ------------------------------------------------------
|
|
|
| +// static
|
| +void StackSamplingProfiler::TestAPI::Reset() {
|
| + SamplingThread::TestAPI::Reset();
|
| + ResetAnnotations();
|
| +}
|
| +
|
| +// static
|
| +void StackSamplingProfiler::TestAPI::ResetAnnotations() {
|
| + subtle::NoBarrier_Store(&process_milestones_, 0u);
|
| +}
|
| +
|
| +// static
|
| +bool StackSamplingProfiler::TestAPI::IsSamplingThreadRunning() {
|
| + return SamplingThread::GetInstance()->IsRunning();
|
| +}
|
| +
|
| +// static
|
| +void StackSamplingProfiler::TestAPI::DisableIdleShutdown() {
|
| + SamplingThread::TestAPI::DisableIdleShutdown();
|
| +}
|
| +
|
| +// static
|
| +void StackSamplingProfiler::TestAPI::PerformSamplingThreadIdleShutdown(
|
| + bool simulate_intervening_start) {
|
| + SamplingThread::TestAPI::ShutdownAssumingIdle(simulate_intervening_start);
|
| +}
|
| +
|
| subtle::Atomic32 StackSamplingProfiler::process_milestones_ = 0;
|
|
|
| StackSamplingProfiler::StackSamplingProfiler(
|
| - PlatformThreadId thread_id,
|
| const SamplingParams& params,
|
| - const CompletedCallback& callback)
|
| - : StackSamplingProfiler(thread_id, params, callback, nullptr) {}
|
| + const CompletedCallback& callback,
|
| + NativeStackSamplerTestDelegate* test_delegate)
|
| + : StackSamplingProfiler(base::PlatformThread::CurrentId(),
|
| + params,
|
| + callback,
|
| + test_delegate) {}
|
|
|
| StackSamplingProfiler::StackSamplingProfiler(
|
| PlatformThreadId thread_id,
|
| const SamplingParams& params,
|
| const CompletedCallback& callback,
|
| NativeStackSamplerTestDelegate* test_delegate)
|
| - : thread_id_(thread_id), params_(params), completed_callback_(callback),
|
| - test_delegate_(test_delegate) {
|
| -}
|
| + : thread_id_(thread_id),
|
| + params_(params),
|
| + completed_callback_(callback),
|
| + // The event starts "signaled" so code knows it's safe to start thread
|
| + // and "manual" so that it can be waited in multiple places.
|
| + profiling_inactive_(WaitableEvent::ResetPolicy::MANUAL,
|
| + WaitableEvent::InitialState::SIGNALED),
|
| + collection_id_(NULL_COLLECTION_ID),
|
| + test_delegate_(test_delegate) {}
|
|
|
| StackSamplingProfiler::~StackSamplingProfiler() {
|
| + // Stop returns immediately but the shutdown runs asynchronously. There is a
|
| + // non-zero probability that one more sample will be taken after this call
|
| + // returns.
|
| Stop();
|
| - if (!sampling_thread_handle_.is_null())
|
| - PlatformThread::Join(sampling_thread_handle_);
|
| -}
|
|
|
| -// static
|
| -void StackSamplingProfiler::StartAndRunAsync(
|
| - PlatformThreadId thread_id,
|
| - const SamplingParams& params,
|
| - const CompletedCallback& callback) {
|
| - CHECK(ThreadTaskRunnerHandle::Get());
|
| - AsyncRunner::Run(thread_id, params, callback);
|
| + // The behavior of sampling a thread that has exited is undefined and could
|
| + // cause Bad Things(tm) to occur. The safety model provided by this class is
|
| + // that an instance of this object is expected to live at least as long as
|
| + // the thread it is sampling. However, because the sampling is performed
|
| + // asynchronously by the SamplingThread, there is no way to guarantee this
|
| + // is true without waiting for it to signal that it has finished.
|
| + //
|
| + // The wait time should, at most, be only as long as it takes to collect one
|
| + // sample (~200us) or none at all if sampling has already completed.
|
| + ThreadRestrictions::ScopedAllowWait allow_wait;
|
| + profiling_inactive_.Wait();
|
| }
|
|
|
| void StackSamplingProfiler::Start() {
|
| @@ -299,19 +761,25 @@ void StackSamplingProfiler::Start() {
|
| std::unique_ptr<NativeStackSampler> native_sampler =
|
| NativeStackSampler::Create(thread_id_, &RecordAnnotations,
|
| test_delegate_);
|
| +
|
| if (!native_sampler)
|
| return;
|
|
|
| - sampling_thread_.reset(new SamplingThread(std::move(native_sampler), params_,
|
| - completed_callback_));
|
| - if (!PlatformThread::Create(0, sampling_thread_.get(),
|
| - &sampling_thread_handle_))
|
| - sampling_thread_.reset();
|
| + // Wait for profiling to be "inactive", then reset it for the upcoming run.
|
| + profiling_inactive_.Wait();
|
| + profiling_inactive_.Reset();
|
| +
|
| + DCHECK_EQ(NULL_COLLECTION_ID, collection_id_);
|
| + collection_id_ = SamplingThread::GetInstance()->Add(
|
| + MakeUnique<SamplingThread::CollectionContext>(
|
| + thread_id_, params_, completed_callback_, &profiling_inactive_,
|
| + std::move(native_sampler)));
|
| + DCHECK_NE(NULL_COLLECTION_ID, collection_id_);
|
| }
|
|
|
| void StackSamplingProfiler::Stop() {
|
| - if (sampling_thread_)
|
| - sampling_thread_->Stop();
|
| + SamplingThread::GetInstance()->Remove(collection_id_);
|
| + collection_id_ = NULL_COLLECTION_ID;
|
| }
|
|
|
| // static
|
| @@ -323,11 +791,6 @@ void StackSamplingProfiler::SetProcessMilestone(int milestone) {
|
| }
|
|
|
| // static
|
| -void StackSamplingProfiler::ResetAnnotationsForTesting() {
|
| - subtle::NoBarrier_Store(&process_milestones_, 0u);
|
| -}
|
| -
|
| -// static
|
| void StackSamplingProfiler::RecordAnnotations(Sample* sample) {
|
| // The code inside this method must not do anything that could acquire a
|
| // mutex, including allocating memory (which includes LOG messages) because
|
|
|