Chromium Code Reviews| Index: base/debug/activity_tracker.cc |
| diff --git a/base/debug/activity_tracker.cc b/base/debug/activity_tracker.cc |
| new file mode 100644 |
| index 0000000000000000000000000000000000000000..26d878a9505edb05b08622850951aa6fa06542fd |
| --- /dev/null |
| +++ b/base/debug/activity_tracker.cc |
| @@ -0,0 +1,424 @@ |
| +// Copyright 2016 The Chromium Authors. All rights reserved. |
| +// Use of this source code is governed by a BSD-style license that can be |
| +// found in the LICENSE file. |
| + |
| +#include "base/debug/activity_tracker.h" |
| + |
| +#include <atomic> |
| + |
| +#include "base/files/memory_mapped_file.h" |
| +#include "base/logging.h" |
| +#include "base/memory/ptr_util.h" |
| +#include "base/metrics/field_trial.h" |
| +#include "base/pending_task.h" |
| +#include "base/stl_util.h" |
| + |
| +namespace base { |
| +namespace debug { |
| + |
| +namespace { |
| + |
| +// A number that indetifies memory has having been initialized. |
|
manzagop (departed)
2016/05/20 18:19:29
nit: typos
bcwhite
2016/05/20 19:19:18
Done. (I have done zero proof-reading so far.)
|
| +const uint64_t kHeaderCookie = 0x98476A390137E67A + 1; // v1 |
|
manzagop (departed)
2016/05/20 18:19:29
Comment on where this value comes from?
bcwhite
2016/05/20 19:19:18
Done.
|
| + |
| +// The minimum depth a stack should support. |
| +const int kMinStackDepth = 2; |
| + |
| +// Type identifiers used when storing in persistent memory so they can be |
| +// identified during extraction; the first 4 bytes of the SHA1 of the name |
| +// is used as a unique integer. A "version number" is added to the base |
| +// so that, if the structure of that object changes, stored older versions |
| +// will be safely ignored. |
| +enum : uint32_t { |
| + kTypeIdActivityTracker = 0x5D7381AF + 1, // SHA1(ActivityTracker) v1 |
| + kTypeIdActivityTrackerFree = 0x3F0272FB, // SHA1(ActivityTrackerFree) |
| +}; |
| + |
| +} // namespace |
| + |
| +const char kActivityTrackingFeatureName[] = "ActivityTracking"; |
| + |
| +void SetupGlobalActivityTrackerFieldTrial() { |
| + const size_t kMemorySize = 1 << 20; // 1 MiB |
|
manzagop (departed)
2016/05/20 18:19:30
Can you comment on how you chose this number? (k t
bcwhite
2016/05/20 19:19:19
Acknowledged.
|
| + const int kStackDepth = 3; |
| + const uint64_t kAllocatorId = 0; |
| + const char kAllocatorName[] = "ActivityTracker"; |
| + |
| + const std::string group_name = |
| + FieldTrialList::FindFullName(kActivityTrackingFeatureName); |
| + if (group_name.empty() || group_name == "Disabled") |
| + return; |
| + |
| + if (group_name == "InMemory") |
|
manzagop (departed)
2016/05/20 18:19:29
nit: braces when multiline.
bcwhite
2016/05/20 19:19:18
Done.
|
| + GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, kAllocatorId, |
| + kAllocatorName, kStackDepth); |
| + else |
| + NOTREACHED() << group_name; |
|
manzagop (departed)
2016/05/20 18:19:30
Isn't it better to silently disable, and the perso
bcwhite
2016/05/20 19:19:19
That's what'll happen in a release build.
|
| +} |
| + |
| + |
| +struct ThreadActivityTracker::Header { |
| + // This unique number indicates a valid initialization of the memory. |
| + uint64_t cookie; |
| + |
| + // The thread-id to which this data belongs. This identifier is not |
| + // guaranteed to mean anything, just to be unique among all active |
| + // trackers. |
| + uint64_t thread_id; |
| + |
| + // The start-time and start-ticks when the data was created. Each activity |
| + // record has a |time_ticks| value that can be converted to a "wall time" |
| + // with these two values. |
| + int64_t start_time; |
| + int64_t start_ticks; |
| + |
| + // The number of Activity slots in the data. |
| + uint32_t slots; |
| + |
| + // The current depth of the stack. This may be greater than the number of |
| + // slots. If the depth exceeds the number of slots, the newest entries |
| + // won't be recorded. |
| + std::atomic<uint32_t> depth; |
|
manzagop (departed)
2016/05/20 18:19:30
Is atomic<POD> considered POD? Can there be versio
bcwhite
2016/05/20 19:19:19
Yes. No. It's exactly the underlying type but wi
|
| + |
| + // A memory location used to indicate if changes have been made to the stack |
| + // that would invalidate an in-progress read of its contents. The active |
| + // tracker will zero the value whenever something gets popped from the |
| + // stack. A monitoring tracker can write a non-zero value here, copy the |
| + // stack contents, and read the value to know, if it is still non-zero, that |
| + // the contents didn't change while being copied. |
| + std::atomic<int> unchanged; |
|
manzagop (departed)
2016/05/20 18:19:30
int vs bool?
bcwhite
2016/05/20 19:19:18
I chose to use a natural word size so there's no n
|
| +}; |
| + |
| +ThreadActivityTracker::ThreadActivityTracker(void* base, size_t size) |
| + : header_(static_cast<Header*>(base)), |
| + stack_(reinterpret_cast<StackEntry*>(reinterpret_cast<char*>(base) + |
| + sizeof(Header))), |
| + slots_((size - sizeof(Header)) / sizeof(StackEntry)) { |
|
manzagop (departed)
2016/05/20 18:19:29
DCHECK(base)?
bcwhite
2016/05/20 19:19:18
Done.
|
| + DCHECK(thread_checker_.CalledOnValidThread()); |
| + |
| + // Ensure there is enough space for the header and at least a few records. |
| + DCHECK_LE(sizeof(Header) + kMinStackDepth * sizeof(StackEntry), size); |
| + |
| + // Ensure that the |slots_| calculation didn't overflow. |
| + DCHECK_GE(std::numeric_limits<uint32_t>::max(), |
| + (size - sizeof(Header)) / sizeof(StackEntry)); |
| + |
| + // Provided memory should either be completely initialized or all zeros. |
| + if (header_->cookie == 0) { |
| + // This is a new file. Double-check other fields and then initialize. |
| + DCHECK_EQ(0U, header_->thread_id); |
| + DCHECK_EQ(0, header_->start_time); |
| + DCHECK_EQ(0, header_->start_ticks); |
| + DCHECK_EQ(0U, header_->slots); |
| + DCHECK_EQ(0U, header_->depth.load(std::memory_order_relaxed)); |
| + DCHECK_EQ(0, header_->unchanged.load(std::memory_order_relaxed)); |
| + DCHECK_EQ(0, stack_[0].time_ticks); |
| + DCHECK_EQ(0, stack_[0].source_address); |
| + DCHECK_EQ(0, stack_[0].method_address); |
| + DCHECK_EQ(0U, stack_[0].sequence_id); |
| + |
| + header_->cookie = kHeaderCookie; |
| + header_->thread_id = static_cast<uint64_t>(PlatformThread::CurrentId()); |
| + header_->start_time = base::Time::Now().ToInternalValue(); |
| + header_->start_ticks = base::TimeTicks::Now().ToInternalValue(); |
| + header_->slots = slots_; |
| + valid_ = true; |
| + } else { |
| + // This is a file with existing data. Perform basic consistency checks. |
| + if (header_->cookie != kHeaderCookie || |
| + header_->slots != slots_ || |
| + header_->start_time > base::Time::Now().ToInternalValue() || |
| + stack_[0].time_ticks == 0) |
| + return; |
| + valid_ = true; |
| + } |
| +} |
| + |
| +ThreadActivityTracker::~ThreadActivityTracker() {} |
| + |
| +void ThreadActivityTracker::RecordStart(const void* source, |
| + ActivityType activity, |
| + intptr_t method, |
| + uint64_t sequence) { |
| + DCHECK(thread_checker_.CalledOnValidThread()); |
| + |
| + // Get the current depth of the stack. No access to other memory guarded |
| + // by this variable is done here so a "relaxed" load is acceptable. |
| + uint32_t depth = header_->depth.load(std::memory_order_relaxed); |
| + |
| + // Handle the case where the stack depth has exceeded the storage capacity. |
| + // Extra entries will be lost leaving only the base of the stack. |
| + if (depth >= slots_) { |
| + // Since no other memory is being modified, a "relaxed" store is acceptable. |
| + header_->depth.store(depth + 1, std::memory_order_relaxed); |
| + return; |
| + } |
| + |
| + // Get a pointer to the next entry and load it. No atomicity is required |
| + // here because the memory is known only to this thread. It will be made |
| + // known to other threads once the depth is incremented. |
| + StackEntry* entry = &stack_[depth]; |
| + entry->time_ticks = base::TimeTicks::Now().ToInternalValue(); |
| + entry->activity_type = activity; |
| + entry->source_address = reinterpret_cast<intptr_t>(source); |
| + entry->method_address = method; |
| + entry->sequence_id = sequence; |
| + |
| + // Save the incremented depth. Because this guards |entry| memory filled |
| + // above that may be read by another thread once the recorded depth changes, |
| + // a "release" store is required. |
| + header_->depth.store(depth + 1, std::memory_order_release); |
| +} |
| + |
| +void ThreadActivityTracker::RecordFinish(const void* source) { |
| + DCHECK(thread_checker_.CalledOnValidThread()); |
| + |
| + // Do an atomic decrement of the depth. No changes to stack entries guarded |
| + // by this variable is done here so a "relaxed" operation is acceptable. |
| + // |depth| will receive the value before it was modified. |
| + uint32_t depth = header_->depth.fetch_sub(1, std::memory_order_relaxed); |
| + |
| + // Validate that everything is running correctly. |
| + DCHECK_LT(0U, depth); |
| + if (depth <= slots_) { |
| + DCHECK_EQ(reinterpret_cast<intptr_t>(source), |
| + stack_[depth - 1].source_address); |
| + } |
| + |
| + // The stack has shrunk meaning that some other thread trying to copy the |
| + // contents for reporting purposes could get bad data. That thread would |
| + // have written a non-zero value into |unchanged|; clearing it here will |
| + // let that thread detect that something did change. It doesn't matter |
| + // when this is done relative to the atomic |depth| operation above so a |
| + // "relaxed" access is acceptable. |
| + header_->unchanged.store(0, std::memory_order_relaxed); |
| +} |
| + |
| +uint32_t ThreadActivityTracker::CopyStack(std::vector<StackEntry>* stack) { |
| + // It's possible for the data to change while reading it. Make several |
|
manzagop (departed)
2016/05/20 18:19:30
Nit: add that (only) some of these changes invalid
bcwhite
2016/05/20 19:19:18
Done.
|
| + // attempts but don't try forever. |
| + const int kMaxAttempts = 10; |
| + uint32_t depth; |
| + |
| + // Start with an empty return stack. |
| + stack->clear(); |
| + |
| + // Stop here if the data isn't valid. |
| + if (!valid_) |
|
manzagop (departed)
2016/05/20 18:19:30
The return value is ambiguous wrt the function suc
bcwhite
2016/05/20 19:19:18
Caller can always check is_valid() first. Returni
|
| + return 0; |
| + |
| + for (int attempt = 0; attempt < kMaxAttempts; ++attempt) { |
| + // Write a non-zero value to |unchanged| so it's possible to detect at |
| + // the end that nothing has changed since copying the data began. |
| + header_->unchanged.store(1, std::memory_order_relaxed); |
| + |
| + // Fetching the current depth also "acquires" the contents of the stack. |
| + depth = header_->depth.load(std::memory_order_acquire); |
| + if (depth == 0) |
| + return 0; |
| + |
| + // Copy the existing contents. Memcpy is used for speed. |
| + uint32_t count = std::min(depth, slots_); |
| + stack->resize(count); |
| + memcpy(&(*stack)[0], stack_, count * sizeof(StackEntry)); |
| + |
| + // Check to make sure everything was unchanged during the copy. |
|
manzagop (departed)
2016/05/20 18:19:29
Is there a constraint there may be at most 1 reade
bcwhite
2016/05/20 19:19:18
Good point. Multiple readers would be possible if
|
| + if (header_->unchanged.load(std::memory_order_relaxed)) |
| + return depth; |
| + } |
| + |
| + // If all attempts failed, just return the depth with no content. |
| + stack->clear(); |
| + return depth; |
| +} |
| + |
| +// static |
| +size_t ThreadActivityTracker::SizeForStackDepth(int stack_depth) { |
| + return static_cast<size_t>(stack_depth) * sizeof(StackEntry) + sizeof(Header); |
| +} |
| + |
| + |
| +GlobalActivityTracker* GlobalActivityTracker::g_tracker_ = nullptr; |
| + |
| +GlobalActivityTracker::ManagedActivityTracker::ManagedActivityTracker( |
| + PersistentMemoryAllocator::Reference mem_reference, |
| + void* base, |
| + size_t size) |
| + : ThreadActivityTracker(base, size), |
| + mem_reference_(mem_reference), |
| + mem_base_(base) {} |
| + |
| +GlobalActivityTracker::ManagedActivityTracker::~ManagedActivityTracker() { |
| + // The global |g_tracker_| must point to the owner of this class since all |
| + // objects of this type must be destructed before |g_tracker_| can be changed. |
| + DCHECK(g_tracker_); |
| + g_tracker_->ReturnTrackerMemory(this, mem_reference_, mem_base_); |
| +} |
| + |
| +void GlobalActivityTracker::CreateWithAllocator( |
| + std::unique_ptr<PersistentMemoryAllocator> allocator, |
| + int stack_depth) { |
| + // There's no need to do anything with the result. It is self-managing. |
| + new GlobalActivityTracker(std::move(allocator), stack_depth); |
| +} |
| + |
| +// static |
| +void GlobalActivityTracker::CreateWithLocalMemory(size_t size, |
| + uint64_t id, |
| + StringPiece name, |
| + int stack_depth) { |
| + CreateWithAllocator( |
| + WrapUnique(new LocalPersistentMemoryAllocator(size, id, name)), |
| + stack_depth); |
| +} |
| + |
| +// static |
| +void GlobalActivityTracker::CreateWithFile(const FilePath& file_path, |
| + size_t size, |
| + uint64_t id, |
| + StringPiece name, |
| + int stack_depth) { |
| + // Create the file, overwriting anything that was there previously, and set |
| + // the length. This will create a space that is zero-filled, a requirement |
| + // for operation. |
| + File file(file_path, |
| + File::FLAG_CREATE_ALWAYS | File::FLAG_READ | File::FLAG_WRITE); |
| + file.SetLength(size); |
| + |
| + // Map the file into memory and make it globally available. |
| + std::unique_ptr<MemoryMappedFile> mapped_file(new MemoryMappedFile()); |
| + mapped_file->Initialize(std::move(file), MemoryMappedFile::READ_WRITE); |
| + CreateWithAllocator(WrapUnique(new FilePersistentMemoryAllocator( |
| + std::move(mapped_file), id, name)), |
| + stack_depth); |
| +} |
| + |
| +ThreadActivityTracker* GlobalActivityTracker::CreateTrackerForCurrentThread() { |
| + DCHECK(!this_thread_tracker_.Get()); |
| + |
| + // The lock must be acquired to access the STL data structures. |
| + AutoLock auto_lock(lock_); |
| + |
| + PersistentMemoryAllocator::Reference mem_reference; |
| + void* mem_base; |
| + if (!available_memories_.empty()) { |
| + // There is a memory block that was previously released (and zero'd) so |
| + // just re-use that rather than allocating a new one. |
| + mem_reference = available_memories_.back(); |
| + available_memories_.pop_back(); |
| + mem_base = allocator_->GetAsObject<char>(mem_reference, |
| + kTypeIdActivityTrackerFree); |
| + DCHECK(mem_base); |
| + DCHECK_LT(stack_memory_, allocator_->GetAllocSize(mem_reference)); |
|
manzagop (departed)
2016/05/20 20:24:21
Is this not EQ? Or LE?
bcwhite
2016/05/20 20:41:18
The space gets rounded up for alignment reasons so
|
| + allocator_->SetType(mem_reference, kTypeIdActivityTracker); |
| + } else { |
| + // Allocate a block of memory from the persistent segment. |
| + mem_reference = allocator_->Allocate(stack_memory_, kTypeIdActivityTracker); |
| + if (mem_reference) { |
| + // Success. Convert the reference to an actual memory address. |
| + mem_base = |
| + allocator_->GetAsObject<char>(mem_reference, kTypeIdActivityTracker); |
| + } else { |
| + // Failure. This should never happen. |
| + NOTREACHED(); |
| + // But if it does, handle it gracefully by allocating the required |
| + // memory from the heap. |
| + mem_base = new char[stack_memory_]; |
| + memset(mem_base, 0, stack_memory_); |
| + } |
| + } |
| + |
| + // Create a tracker with the acquired memory and set it as the tracker |
| + // for this particular thread in thread-local-storage. |
| + ManagedActivityTracker* tracker = |
| + new ManagedActivityTracker(mem_reference, mem_base, stack_memory_); |
| + DCHECK(tracker->is_valid()); |
| + thread_trackers_.insert(tracker); |
| + this_thread_tracker_.Set(tracker); |
| + |
| + return tracker; |
| +} |
| + |
| +void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() { |
| + ThreadActivityTracker* tracker = |
| + reinterpret_cast<ThreadActivityTracker*>(this_thread_tracker_.Get()); |
| + if (tracker) { |
| + this_thread_tracker_.Free(); |
| + delete tracker; |
| + } |
| +} |
| + |
| +GlobalActivityTracker::GlobalActivityTracker( |
| + std::unique_ptr<PersistentMemoryAllocator> allocator, |
| + int stack_depth) |
| + : allocator_(std::move(allocator)), |
| + stack_memory_(ThreadActivityTracker::SizeForStackDepth(stack_depth)), |
| + this_thread_tracker_(&OnTLSDestroy) { |
| + // Ensure the passed memory is valid and empty (iterator finds nothing). |
| + uint32_t type; |
| + DCHECK(!PersistentMemoryAllocator::Iterator(allocator_.get()).GetNext(&type)); |
| + |
| + // Ensure that there is no other global object and then make this one such. |
| + DCHECK(!g_tracker_); |
| + g_tracker_ = this; |
| + |
| + // Create a tracker for this thread since it is known. |
| + CreateTrackerForCurrentThread(); |
| +} |
| + |
| +GlobalActivityTracker::~GlobalActivityTracker() { |
| + DCHECK_EQ(g_tracker_, this); |
| + DCHECK_EQ(0U, thread_trackers_.size()); |
| + g_tracker_ = nullptr; |
| +} |
| + |
| +void GlobalActivityTracker::ReturnTrackerMemory( |
| + ManagedActivityTracker* tracker, |
| + PersistentMemoryAllocator::Reference mem_reference, |
| + void* mem_base) { |
| + // Zero the memory so that it is ready for use if needed again later. It's |
| + // better to clear the memory now, when a thread is exiting, than to do it |
| + // when it is first needed by a thread doing actual work. |
| + memset(mem_base, 0, stack_memory_); |
| + |
| + // Access to STL structurs requires a lock because this could get called |
| + // from any thread. |
| + AutoLock auto_lock(lock_); |
| + |
| + // Remove the destructed tracker from the set of known ones. |
| + DCHECK(ContainsKey(thread_trackers_, tracker)); |
| + thread_trackers_.erase(tracker); |
| + |
| + // Deal with the memory that was used by the tracker. |
| + if (mem_reference) { |
| + // The memory was within the persistent memory allocator. Change its type |
| + // so that iteration won't find it. |
| + allocator_->SetType(mem_reference, kTypeIdActivityTrackerFree); |
| + // There is no way to free memory from a persistent allocator so instead |
| + // keep it on the internal list of available memory blocks. |
| + DCHECK_LE(stack_memory_, allocator_->GetAllocSize(mem_reference)); |
| + available_memories_.push_back(mem_reference); |
| + } else { |
| + // The memory was allocated from the process heap. This shouldn't happen |
| + // because the persistent memory segment should be big enough for all |
| + // thread stacks but it's better to support falling back to allocation |
| + // from the heap rather than crash. Everything will work as normal but |
| + // the data won't be persisted. |
| + delete[] reinterpret_cast<char*>(mem_base); |
| + } |
| +} |
| + |
| +// static |
| +void GlobalActivityTracker::OnTLSDestroy(void* value) { |
| + delete reinterpret_cast<ManagedActivityTracker*>(value); |
| +} |
| + |
| +ScopedTaskActivity::ScopedTaskActivity(const PendingTask& task) |
| + : GlobalActivityTracker::ScopedThreadActivity( |
| + task.posted_from.program_counter(), |
| + ThreadActivityTracker::ACT_TASK, |
| + 0, |
| + task.sequence_num) {} |
| + |
| +} // namespace debug |
| +} // namespace base |