Chromium Code Reviews| Index: base/debug/activity_tracker.cc |
| diff --git a/base/debug/activity_tracker.cc b/base/debug/activity_tracker.cc |
| index 59b63526488ffaa780c364e602027b1ac89fcc68..46390c2cc5fc4f07de58727dd220caab0f575c05 100644 |
| --- a/base/debug/activity_tracker.cc |
| +++ b/base/debug/activity_tracker.cc |
| @@ -12,6 +12,7 @@ |
| #include "base/files/memory_mapped_file.h" |
| #include "base/logging.h" |
| #include "base/memory/ptr_util.h" |
| +#include "base/memory/ptr_util.h" |
| #include "base/metrics/field_trial.h" |
| #include "base/metrics/histogram_macros.h" |
| #include "base/pending_task.h" |
| @@ -38,7 +39,7 @@ const int kMinStackDepth = 2; |
| // The amount of memory set aside for holding arbitrary user data (key/value |
| // pairs) globally or associated with ActivityData entries. |
| const size_t kUserDataSize = 1024; // bytes |
| -const size_t kGlobalDataSize = 1024; // bytes |
| +const size_t kGlobalDataSize = 4096; // bytes |
| const size_t kMaxUserDataNameLength = |
| static_cast<size_t>(std::numeric_limits<uint8_t>::max()); |
| @@ -194,15 +195,81 @@ void Activity::FillFrom(Activity* activity, |
| #endif |
| } |
| -ActivitySnapshot::ActivitySnapshot() {} |
| -ActivitySnapshot::~ActivitySnapshot() {} |
| +ActivityUserData::TypedValue::TypedValue() {} |
| +ActivityUserData::TypedValue::TypedValue(const TypedValue& other) = default; |
| +ActivityUserData::TypedValue::~TypedValue() {} |
| + |
| +StringPiece ActivityUserData::TypedValue::Get() const { |
| + DCHECK_EQ(RAW_VALUE, type); |
| + return long_value; |
| +} |
| + |
| +StringPiece ActivityUserData::TypedValue::GetReference() const { |
| + DCHECK_EQ(RAW_VALUE_REFERENCE, type); |
| + return ref_value; |
| +} |
| + |
| +StringPiece ActivityUserData::TypedValue::GetString() const { |
| + DCHECK_EQ(STRING_VALUE, type); |
| + return long_value; |
| +} |
| + |
| +StringPiece ActivityUserData::TypedValue::GetStringReference() const { |
| + DCHECK_EQ(STRING_VALUE_REFERENCE, type); |
| + return ref_value; |
| +} |
| + |
| +bool ActivityUserData::TypedValue::GetBool() const { |
| + DCHECK_EQ(BOOL_VALUE, type); |
| + return short_value != 0; |
| +} |
| + |
| +char ActivityUserData::TypedValue::GetChar() const { |
| + DCHECK_EQ(CHAR_VALUE, type); |
| + return static_cast<char>(short_value); |
| +} |
| + |
| +int64_t ActivityUserData::TypedValue::GetInt() const { |
| + DCHECK_EQ(SIGNED_VALUE, type); |
| + return static_cast<int64_t>(short_value); |
| +} |
| + |
| +uint64_t ActivityUserData::TypedValue::GetUint() const { |
| + DCHECK_EQ(UNSIGNED_VALUE, type); |
| + return static_cast<uint64_t>(short_value); |
| +} |
| ActivityUserData::ValueInfo::ValueInfo() {} |
| ActivityUserData::ValueInfo::ValueInfo(ValueInfo&&) = default; |
| ActivityUserData::ValueInfo::~ValueInfo() {} |
| +std::atomic<uint32_t> ActivityUserData::next_id_; |
|
manzagop (departed)
2016/11/29 22:42:58
Did you mean to set a value?
bcwhite
2016/12/01 16:29:39
Adding "=1" caused a compile error on some platfor
|
| + |
| ActivityUserData::ActivityUserData(void* memory, size_t size) |
| - : memory_(static_cast<char*>(memory)), available_(size) {} |
| + : memory_(reinterpret_cast<char*>(memory)), |
| + available_(size), |
| + id_(reinterpret_cast<std::atomic<uint32_t>*>(memory)) { |
| + // It's possible that no user data is being stored. |
| + if (!memory_) |
| + return; |
| + |
| + DCHECK_LT(kMemoryAlignment, available_); |
| + if (id_->load(std::memory_order_relaxed) == 0) { |
| + // Generate a new ID and store it in the first 32-bit word of memory_. |
| + // |id_| must be non-zero for non-sink instances. |
| + uint32_t id; |
| + while ((id = next_id_.fetch_add(1, std::memory_order_relaxed)) == 0) |
| + ; |
| + id_->store(id, std::memory_order_relaxed); |
| + DCHECK_NE(0U, id_->load(std::memory_order_relaxed)); |
| + } |
| + memory_ += kMemoryAlignment; |
| + available_ -= kMemoryAlignment; |
| + |
| + // If there is already data present, load that. This allows the same class |
| + // to be used for analysis through snapshots. |
| + ImportExistingData(); |
|
manzagop (departed)
2016/11/29 22:42:58
Worth avoiding the import when the id was initiall
bcwhite
2016/12/01 16:29:39
It would require making a copy of the initial ID v
|
| +} |
| ActivityUserData::~ActivityUserData() {} |
| @@ -239,18 +306,22 @@ void ActivityUserData::Set(StringPiece name, |
| sizeof(Header); |
| size_t value_extent = RoundUpToAlignment(size, kMemoryAlignment); |
| - // The "basic size" is the minimum size of the record. It's possible that |
| - // lengthy values will get truncated but there must be at least some bytes |
| - // available. |
| - size_t basic_size = sizeof(Header) + name_extent + kMemoryAlignment; |
| - if (basic_size > available_) |
| - return; // No space to store even the smallest value. |
| - |
| - // The "full size" is the size for storing the entire value, truncated |
| - // to the amount of available memory. |
| + // The "full size" is the size for storing the entire value. |
| size_t full_size = |
| std::min(sizeof(Header) + name_extent + value_extent, available_); |
| + |
| + // If the value is actually a single byte, see if it can be stuffed at the |
| + // end of the name extent rather than wasting kMemoryAlignment bytes. |
| + if (size == 1 && name_extent > name_size) { |
| + full_size = sizeof(Header) + name_extent; |
|
manzagop (departed)
2016/11/29 22:42:58
std::min(sizeof(Header) + name_extent, available_)
bcwhite
2016/12/01 16:29:39
Done, but with a separate check placed above.
|
| + --name_extent; |
| + } |
| + |
| + // Truncate the stored size to the amount of available memory. Stop now if |
| + // there's not any room for even part of the value. |
| size = std::min(full_size - sizeof(Header) - name_extent, size); |
|
manzagop (departed)
2016/11/29 22:42:58
I guess full size could be equal to available_ whi
bcwhite
2016/12/01 16:29:39
Done as part of the above.
|
| + if (size == 0) |
| + return; |
| // Allocate a chunk of memory. |
| Header* header = reinterpret_cast<Header*>(memory_); |
| @@ -303,6 +374,91 @@ void ActivityUserData::SetReference(StringPiece name, |
| Set(name, type, &rec, sizeof(rec)); |
| } |
| +void ActivityUserData::ImportExistingData() const { |
| + while (available_ > 0) { |
|
manzagop (departed)
2016/11/29 22:42:58
0 -> sizeof(Header)?
bcwhite
2016/12/01 16:29:39
Done.
|
| + Header* header = reinterpret_cast<Header*>(memory_); |
| + ValueType type = |
| + static_cast<ValueType>(header->type.load(std::memory_order_acquire)); |
| + if (type == END_OF_VALUES) |
| + return; |
| + if (header->record_size > available_) |
| + NOTREACHED(); // return; |
| + |
| + size_t value_offset = RoundUpToAlignment(sizeof(Header) + header->name_size, |
| + kMemoryAlignment); |
| + if (header->record_size == value_offset && |
| + header->value_size.load(std::memory_order_relaxed) == 1) { |
| + value_offset -= 1; |
| + } |
| + if (value_offset + header->value_size > header->record_size) |
| + NOTREACHED(); // return; |
| + |
| + ValueInfo info; |
| + info.name = StringPiece(memory_ + sizeof(Header), header->name_size); |
| + info.type = type; |
| + info.memory = memory_ + value_offset; |
| + info.size_ptr = &header->value_size; |
| + info.extent = header->record_size - value_offset; |
| + |
| + StringPiece key(info.name); |
| + values_.insert(std::make_pair(key, std::move(info))); |
|
manzagop (departed)
2016/11/29 22:42:58
Do you want to support successive calls of ImportE
bcwhite
2016/12/01 16:29:39
It's called as part of every snapshot operation in
manzagop (departed)
2016/12/02 22:13:32
Hm my comment was totally off! You scan from avail
bcwhite
2016/12/08 21:30:56
Ah. The |memory_| pointer always points to the en
|
| + |
| + memory_ += header->record_size; |
| + available_ -= header->record_size; |
| + } |
| +} |
| + |
| +bool ActivityUserData::CreateSnapshot(Snapshot* output_snapshot) const { |
| + DCHECK(output_snapshot); |
| + DCHECK(output_snapshot->empty()); |
| + |
| + // Find any new data that may have been added by an active instance of this |
| + // class that is adding records. |
| + ImportExistingData(); |
| + |
| + for (const auto& entry : values_) { |
| + TypedValue value; |
| + value.type = entry.second.type; |
| + DCHECK_GE(entry.second.extent, |
| + entry.second.size_ptr->load(std::memory_order_relaxed)); |
| + |
| + switch (entry.second.type) { |
| + case RAW_VALUE: |
| + case STRING_VALUE: |
| + value.long_value = |
| + std::string(reinterpret_cast<char*>(entry.second.memory), |
| + entry.second.size_ptr->load(std::memory_order_relaxed)); |
| + break; |
| + case RAW_VALUE_REFERENCE: |
| + case STRING_VALUE_REFERENCE: { |
| + ReferenceRecord* ref = |
| + reinterpret_cast<ReferenceRecord*>(entry.second.memory); |
| + value.ref_value = StringPiece( |
| + reinterpret_cast<char*>(static_cast<uintptr_t>(ref->address)), |
| + static_cast<size_t>(ref->size)); |
| + } break; |
| + case BOOL_VALUE: |
| + case CHAR_VALUE: |
| + value.short_value = *reinterpret_cast<char*>(entry.second.memory); |
| + break; |
| + case SIGNED_VALUE: |
| + case UNSIGNED_VALUE: |
| + value.short_value = *reinterpret_cast<uint64_t*>(entry.second.memory); |
| + break; |
| + case END_OF_VALUES: // Included for completeness purposes. |
| + NOTREACHED(); |
| + } |
| + auto inserted = output_snapshot->insert( |
| + std::make_pair(entry.second.name.as_string(), std::move(value))); |
| + DCHECK(inserted.second); // True if inserted, false if existed. |
| + } |
| + |
| + return true; |
| +} |
| + |
| +ActivitySnapshot::ActivitySnapshot() {} |
| +ActivitySnapshot::~ActivitySnapshot() {} |
| + |
| // This information is kept for every thread that is tracked. It is filled |
| // the very first time the thread is seen. All fields must be of exact sizes |
| // so there is no issue moving between 32 and 64-bit builds. |
| @@ -382,16 +538,6 @@ void ThreadActivityTracker::ScopedActivity::ChangeTypeAndData( |
| tracker_->ChangeActivity(activity_id_, type, data); |
| } |
| -ActivityUserData& ThreadActivityTracker::ScopedActivity::user_data() { |
| - if (!user_data_) { |
| - if (tracker_) |
| - user_data_ = tracker_->GetUserData(activity_id_); |
| - else |
| - user_data_ = MakeUnique<ActivityUserData>(nullptr, 0); |
| - } |
| - return *user_data_; |
| -} |
| - |
| ThreadActivityTracker::ThreadActivityTracker(void* base, size_t size) |
| : header_(static_cast<Header*>(base)), |
| stack_(reinterpret_cast<Activity*>(reinterpret_cast<char*>(base) + |
| @@ -547,11 +693,6 @@ void ThreadActivityTracker::PopActivity(ActivityId id) { |
| DCHECK(stack_[depth].activity_type == Activity::ACT_LOCK_ACQUIRE || |
| thread_checker_.CalledOnValidThread()); |
| - // Check if there was any user-data memory. It isn't free'd until later |
| - // because the call to release it can push something on the stack. |
| - PersistentMemoryAllocator::Reference user_data = stack_[depth].user_data; |
| - stack_[depth].user_data = 0; |
| - |
| // The stack has shrunk meaning that some other thread trying to copy the |
| // contents for reporting purposes could get bad data. That thread would |
| // have written a non-zero value into |stack_unchanged|; clearing it here |
| @@ -559,27 +700,52 @@ void ThreadActivityTracker::PopActivity(ActivityId id) { |
| // happen after the atomic |depth| operation above so a "release" store |
| // is required. |
| header_->stack_unchanged.store(0, std::memory_order_release); |
| - |
| - // Release resources located above. All stack processing is done so it's |
| - // safe if some outside code does another push. |
| - if (user_data) |
| - GlobalActivityTracker::Get()->ReleaseUserDataMemory(&user_data); |
| } |
| std::unique_ptr<ActivityUserData> ThreadActivityTracker::GetUserData( |
| - ActivityId id) { |
| + ActivityId id, |
| + ActivityTrackerMemoryAllocator* allocator) { |
| // User-data is only stored for activities actually held in the stack. |
| if (id < stack_slots_) { |
| + // Don't allow user data for lock acquisition as recursion may occur. |
| + if (stack_[id].activity_type == Activity::ACT_LOCK_ACQUIRE) { |
| + NOTREACHED(); |
| + return MakeUnique<ActivityUserData>(nullptr, 0); |
| + } |
| + |
| + // Get (or reuse) a block of memory and create a real UserData object |
| + // on it. |
| + PersistentMemoryAllocator::Reference ref = allocator->GetObjectReference(); |
| void* memory = |
| - GlobalActivityTracker::Get()->GetUserDataMemory(&stack_[id].user_data); |
| - if (memory) |
| - return MakeUnique<ActivityUserData>(memory, kUserDataSize); |
| + allocator->GetAsArray<char>(ref, PersistentMemoryAllocator::kSizeAny); |
| + if (memory) { |
| + std::unique_ptr<ActivityUserData> user_data = |
| + MakeUnique<ActivityUserData>(memory, kUserDataSize); |
| + stack_[id].user_data_ref = ref; |
| + stack_[id].user_data_id = user_data->id(); |
| + return user_data; |
| + } |
| } |
| // Return a dummy object that will still accept (but ignore) Set() calls. |
| return MakeUnique<ActivityUserData>(nullptr, 0); |
| } |
| +bool ThreadActivityTracker::HasUserData(ActivityId id) { |
| + // User-data is only stored for activities actually held in the stack. |
| + return (id < stack_slots_ && stack_[id].user_data_ref); |
| +} |
| + |
| +void ThreadActivityTracker::ReleaseUserData( |
| + ActivityId id, |
| + ActivityTrackerMemoryAllocator* allocator) { |
| + // User-data is only stored for activities actually held in the stack. |
| + if (id < stack_slots_ && stack_[id].user_data_ref) { |
| + allocator->ReleaseObjectReference(stack_[id].user_data_ref); |
| + stack_[id].user_data_ref = 0; |
| + } |
| +} |
| + |
| bool ThreadActivityTracker::IsValid() const { |
| if (header_->cookie.load(std::memory_order_acquire) != kHeaderCookie || |
| header_->process_id.load(std::memory_order_relaxed) == 0 || |
| @@ -594,7 +760,8 @@ bool ThreadActivityTracker::IsValid() const { |
| return valid_; |
| } |
| -bool ThreadActivityTracker::Snapshot(ActivitySnapshot* output_snapshot) const { |
| +bool ThreadActivityTracker::CreateSnapshot( |
| + ActivitySnapshot* output_snapshot) const { |
| DCHECK(output_snapshot); |
| // There is no "called on valid thread" check for this method as it can be |
| @@ -710,6 +877,41 @@ size_t ThreadActivityTracker::SizeForStackDepth(int stack_depth) { |
| GlobalActivityTracker* GlobalActivityTracker::g_tracker_ = nullptr; |
| +GlobalActivityTracker::ScopedThreadActivity::ScopedThreadActivity( |
| + const void* program_counter, |
| + const void* origin, |
| + Activity::Type type, |
| + const ActivityData& data, |
| + bool lock_allowed) |
| + : ThreadActivityTracker::ScopedActivity( |
| + GetOrCreateTracker(lock_allowed), |
| + program_counter, |
| + origin, |
| + type, |
| + data) {} |
| + |
| +GlobalActivityTracker::ScopedThreadActivity::~ScopedThreadActivity() { |
| + if (tracker_ && tracker_->HasUserData(activity_id_)) { |
| + GlobalActivityTracker* global = GlobalActivityTracker::Get(); |
| + AutoLock lock(global->user_data_allocator_lock_); |
| + tracker_->ReleaseUserData(activity_id_, &global->user_data_allocator_); |
| + } |
| +} |
| + |
| +ActivityUserData& GlobalActivityTracker::ScopedThreadActivity::user_data() { |
| + if (!user_data_) { |
| + if (tracker_) { |
| + GlobalActivityTracker* global = GlobalActivityTracker::Get(); |
| + AutoLock lock(global->user_data_allocator_lock_); |
| + user_data_ = |
| + tracker_->GetUserData(activity_id_, &global->user_data_allocator_); |
| + } else { |
| + user_data_ = MakeUnique<ActivityUserData>(nullptr, 0); |
| + } |
| + } |
| + return *user_data_; |
| +} |
| + |
| GlobalActivityTracker::ManagedActivityTracker::ManagedActivityTracker( |
| PersistentMemoryAllocator::Reference mem_reference, |
| void* base, |
| @@ -835,29 +1037,6 @@ void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() { |
| delete tracker; |
| } |
| -void* GlobalActivityTracker::GetUserDataMemory( |
| - PersistentMemoryAllocator::Reference* reference) { |
| - if (!*reference) { |
| - base::AutoLock autolock(user_data_allocator_lock_); |
| - *reference = user_data_allocator_.GetObjectReference(); |
| - if (!*reference) |
| - return nullptr; |
| - } |
| - |
| - void* memory = allocator_->GetAsArray<char>( |
| - *reference, kTypeIdUserDataRecord, PersistentMemoryAllocator::kSizeAny); |
| - DCHECK(memory); |
| - return memory; |
| -} |
| - |
| -void GlobalActivityTracker::ReleaseUserDataMemory( |
| - PersistentMemoryAllocator::Reference* reference) { |
| - DCHECK(*reference); |
| - base::AutoLock autolock(user_data_allocator_lock_); |
| - user_data_allocator_.ReleaseObjectReference(*reference); |
| - *reference = PersistentMemoryAllocator::kReferenceNull; |
| -} |
| - |
| GlobalActivityTracker::GlobalActivityTracker( |
| std::unique_ptr<PersistentMemoryAllocator> allocator, |
| int stack_depth) |
| @@ -890,6 +1069,10 @@ GlobalActivityTracker::GlobalActivityTracker( |
| // Ensure that there is no other global object and then make this one such. |
| DCHECK(!g_tracker_); |
| g_tracker_ = this; |
| + |
| + // The global user-data record must be iterable in order to be found by an |
| + // analyzer. |
| + // allocator_->MakeIterable(...); |
|
manzagop (departed)
2016/11/29 22:42:58
Uncomment?
bcwhite
2016/12/01 16:29:39
Done. I had to wait for a separate CL to land.
|
| } |
| GlobalActivityTracker::~GlobalActivityTracker() { |