Index: base/debug/activity_tracker.cc |
diff --git a/base/debug/activity_tracker.cc b/base/debug/activity_tracker.cc |
index efdf8c89ee6b3b241c532bf1a17e1e5132b0e9bc..4388c17665f99eac2c72d0adcb5e49a215bfe5ba 100644 |
--- a/base/debug/activity_tracker.cc |
+++ b/base/debug/activity_tracker.cc |
@@ -42,6 +42,7 @@ const int kMinStackDepth = 2; |
// The amount of memory set aside for holding arbitrary user data (key/value |
// pairs) globally or associated with ActivityData entries. |
const size_t kUserDataSize = 1 << 10; // 1 KiB |
+const size_t kProcessDataSize = 4 << 10; // 4 KiB |
const size_t kGlobalDataSize = 16 << 10; // 16 KiB |
const size_t kMaxUserDataNameLength = |
static_cast<size_t>(std::numeric_limits<uint8_t>::max()); |
@@ -64,6 +65,25 @@ union ThreadRef { |
#endif |
}; |
+// Finds and reuses a specific allocation or creates a new one. |
+PersistentMemoryAllocator::Reference AllocateFrom( |
+ PersistentMemoryAllocator* allocator, |
+ uint32_t from_type, |
+ size_t size, |
+ uint32_t to_type) { |
+ PersistentMemoryAllocator::Iterator iter(allocator); |
+ PersistentMemoryAllocator::Reference ref; |
+ while ((ref = iter.GetNextOfType(from_type)) != 0) { |
+ DCHECK_LE(size, allocator->GetAllocSize(ref)); |
+ // This can fail if a another thread has just taken it. It isassumed that |
+ // the memory is cleared during the "free" operation. |
+ if (allocator->ChangeType(ref, to_type, from_type, /*clear=*/false)) |
+ return ref; |
+ } |
+ |
+ return allocator->Allocate(size, to_type); |
+} |
+ |
// Determines the previous aligned index. |
size_t RoundDownToAlignment(size_t index, size_t alignment) { |
return index & (0 - alignment); |
@@ -246,32 +266,43 @@ StringPiece ActivityUserData::TypedValue::GetStringReference() const { |
return ref_value_; |
} |
+// These are required because std::atomic is (currently) not a POD type and |
+// thus clang requires explicit out-of-line constructors and destructors even |
+// when they do nothing. |
ActivityUserData::ValueInfo::ValueInfo() {} |
ActivityUserData::ValueInfo::ValueInfo(ValueInfo&&) = default; |
ActivityUserData::ValueInfo::~ValueInfo() {} |
+ActivityUserData::MemoryHeader::MemoryHeader() {} |
+ActivityUserData::MemoryHeader::~MemoryHeader() {} |
+ActivityUserData::FieldHeader::FieldHeader() {} |
+ActivityUserData::FieldHeader::~FieldHeader() {} |
StaticAtomicSequenceNumber ActivityUserData::next_id_; |
ActivityUserData::ActivityUserData(void* memory, size_t size) |
: memory_(reinterpret_cast<char*>(memory)), |
available_(RoundDownToAlignment(size, kMemoryAlignment)), |
- id_(reinterpret_cast<std::atomic<uint32_t>*>(memory)) { |
+ header_(reinterpret_cast<MemoryHeader*>(memory)) { |
// It's possible that no user data is being stored. |
if (!memory_) |
return; |
- DCHECK_LT(kMemoryAlignment, available_); |
- if (id_->load(std::memory_order_relaxed) == 0) { |
- // Generate a new ID and store it in the first 32-bit word of memory_. |
- // |id_| must be non-zero for non-sink instances. |
+ static_assert(0 == sizeof(MemoryHeader) % kMemoryAlignment, "invalid header"); |
+ DCHECK_LT(sizeof(MemoryHeader), available_); |
+ if (header_->data_id.load(std::memory_order_acquire) == 0) { |
+ // Store the current process ID so analysis can determine which process |
+ // generated the data. This is done first so it can be released later. |
+ header_->process_id = GetCurrentProcId(); |
+ |
+ // Generate a new ID and store it in the header. |
+ // |data_id| must be non-zero for non-sink instances. |
uint32_t id; |
while ((id = next_id_.GetNext()) == 0) |
; |
- id_->store(id, std::memory_order_relaxed); |
- DCHECK_NE(0U, id_->load(std::memory_order_relaxed)); |
+ header_->data_id.store(id, std::memory_order_release); |
} |
- memory_ += kMemoryAlignment; |
- available_ -= kMemoryAlignment; |
+ memory_ += sizeof(MemoryHeader); |
+ available_ -= sizeof(MemoryHeader); |
// If there is already data present, load that. This allows the same class |
// to be used for analysis through snapshots. |
@@ -280,6 +311,81 @@ ActivityUserData::ActivityUserData(void* memory, size_t size) |
ActivityUserData::~ActivityUserData() {} |
+bool ActivityUserData::CreateSnapshot(Snapshot* output_snapshot) const { |
+ DCHECK(output_snapshot); |
+ DCHECK(output_snapshot->empty()); |
+ |
+ // Find any new data that may have been added by an active instance of this |
+ // class that is adding records. |
+ ImportExistingData(); |
+ |
+ for (const auto& entry : values_) { |
+ TypedValue value; |
+ value.type_ = entry.second.type; |
+ DCHECK_GE(entry.second.extent, |
+ entry.second.size_ptr->load(std::memory_order_relaxed)); |
+ |
+ switch (entry.second.type) { |
+ case RAW_VALUE: |
+ case STRING_VALUE: |
+ value.long_value_ = |
+ std::string(reinterpret_cast<char*>(entry.second.memory), |
+ entry.second.size_ptr->load(std::memory_order_relaxed)); |
+ break; |
+ case RAW_VALUE_REFERENCE: |
+ case STRING_VALUE_REFERENCE: { |
+ ReferenceRecord* ref = |
+ reinterpret_cast<ReferenceRecord*>(entry.second.memory); |
+ value.ref_value_ = StringPiece( |
+ reinterpret_cast<char*>(static_cast<uintptr_t>(ref->address)), |
+ static_cast<size_t>(ref->size)); |
+ } break; |
+ case BOOL_VALUE: |
+ case CHAR_VALUE: |
+ value.short_value_ = *reinterpret_cast<char*>(entry.second.memory); |
+ break; |
+ case SIGNED_VALUE: |
+ case UNSIGNED_VALUE: |
+ value.short_value_ = *reinterpret_cast<uint64_t*>(entry.second.memory); |
+ break; |
+ case END_OF_VALUES: // Included for completeness purposes. |
+ NOTREACHED(); |
+ } |
+ auto inserted = output_snapshot->insert( |
+ std::make_pair(entry.second.name.as_string(), std::move(value))); |
+ DCHECK(inserted.second); // True if inserted, false if existed. |
+ } |
+ |
+ return true; |
+} |
+ |
+const void* ActivityUserData::GetBaseAddress() { |
+ // The |memory_| pointer advances as elements are written but the |header_| |
+ // value is always at the start of the block so just return that. |
+ return header_; |
+} |
+ |
+void ActivityUserData::SetOwningProcessIdForTesting(ProcessId id, |
+ int64_t stamp) { |
+ if (!header_) |
+ return; |
+ header_->process_id = id; |
+ header_->create_stamp = stamp; |
+} |
+ |
+// static |
+bool ActivityUserData::OwningProcessId(const void* memory, |
+ ProcessId* out_id, |
+ int64_t* out_stamp) { |
+ const MemoryHeader* header = reinterpret_cast<const MemoryHeader*>(memory); |
+ if (header->data_id.load(std::memory_order_acquire) == 0) |
+ return false; |
+ |
+ *out_id = static_cast<ProcessId>(header->process_id); |
+ *out_stamp = header->create_stamp; |
+ return true; |
+} |
+ |
void ActivityUserData::Set(StringPiece name, |
ValueType type, |
const void* memory, |
@@ -308,13 +414,13 @@ void ActivityUserData::Set(StringPiece name, |
// following field will be aligned properly. |
size_t name_size = name.length(); |
size_t name_extent = |
- RoundUpToAlignment(sizeof(Header) + name_size, kMemoryAlignment) - |
- sizeof(Header); |
+ RoundUpToAlignment(sizeof(FieldHeader) + name_size, kMemoryAlignment) - |
+ sizeof(FieldHeader); |
size_t value_extent = RoundUpToAlignment(size, kMemoryAlignment); |
// The "base size" is the size of the header and (padded) string key. Stop |
// now if there's not room enough for even this. |
- size_t base_size = sizeof(Header) + name_extent; |
+ size_t base_size = sizeof(FieldHeader) + name_extent; |
if (base_size > available_) |
return; |
@@ -336,7 +442,7 @@ void ActivityUserData::Set(StringPiece name, |
return; |
// Allocate a chunk of memory. |
- Header* header = reinterpret_cast<Header*>(memory_); |
+ FieldHeader* header = reinterpret_cast<FieldHeader*>(memory_); |
memory_ += full_size; |
available_ -= full_size; |
@@ -346,9 +452,9 @@ void ActivityUserData::Set(StringPiece name, |
DCHECK_EQ(0, header->value_size.load(std::memory_order_relaxed)); |
header->name_size = static_cast<uint8_t>(name_size); |
header->record_size = full_size; |
- char* name_memory = reinterpret_cast<char*>(header) + sizeof(Header); |
+ char* name_memory = reinterpret_cast<char*>(header) + sizeof(FieldHeader); |
void* value_memory = |
- reinterpret_cast<char*>(header) + sizeof(Header) + name_extent; |
+ reinterpret_cast<char*>(header) + sizeof(FieldHeader) + name_extent; |
memcpy(name_memory, name.data(), name_size); |
header->type.store(type, std::memory_order_release); |
@@ -362,7 +468,7 @@ void ActivityUserData::Set(StringPiece name, |
info->name = persistent_name; |
info->memory = value_memory; |
info->size_ptr = &header->value_size; |
- info->extent = full_size - sizeof(Header) - name_extent; |
+ info->extent = full_size - sizeof(FieldHeader) - name_extent; |
info->type = type; |
} |
@@ -387,8 +493,8 @@ void ActivityUserData::SetReference(StringPiece name, |
} |
void ActivityUserData::ImportExistingData() const { |
- while (available_ > sizeof(Header)) { |
- Header* header = reinterpret_cast<Header*>(memory_); |
+ while (available_ > sizeof(FieldHeader)) { |
+ FieldHeader* header = reinterpret_cast<FieldHeader*>(memory_); |
ValueType type = |
static_cast<ValueType>(header->type.load(std::memory_order_acquire)); |
if (type == END_OF_VALUES) |
@@ -396,8 +502,8 @@ void ActivityUserData::ImportExistingData() const { |
if (header->record_size > available_) |
return; |
- size_t value_offset = RoundUpToAlignment(sizeof(Header) + header->name_size, |
- kMemoryAlignment); |
+ size_t value_offset = RoundUpToAlignment( |
+ sizeof(FieldHeader) + header->name_size, kMemoryAlignment); |
if (header->record_size == value_offset && |
header->value_size.load(std::memory_order_relaxed) == 1) { |
value_offset -= 1; |
@@ -406,7 +512,7 @@ void ActivityUserData::ImportExistingData() const { |
return; |
ValueInfo info; |
- info.name = StringPiece(memory_ + sizeof(Header), header->name_size); |
+ info.name = StringPiece(memory_ + sizeof(FieldHeader), header->name_size); |
info.type = type; |
info.memory = memory_ + value_offset; |
info.size_ptr = &header->value_size; |
@@ -420,60 +526,6 @@ void ActivityUserData::ImportExistingData() const { |
} |
} |
-bool ActivityUserData::CreateSnapshot(Snapshot* output_snapshot) const { |
- DCHECK(output_snapshot); |
- DCHECK(output_snapshot->empty()); |
- |
- // Find any new data that may have been added by an active instance of this |
- // class that is adding records. |
- ImportExistingData(); |
- |
- for (const auto& entry : values_) { |
- TypedValue value; |
- value.type_ = entry.second.type; |
- DCHECK_GE(entry.second.extent, |
- entry.second.size_ptr->load(std::memory_order_relaxed)); |
- |
- switch (entry.second.type) { |
- case RAW_VALUE: |
- case STRING_VALUE: |
- value.long_value_ = |
- std::string(reinterpret_cast<char*>(entry.second.memory), |
- entry.second.size_ptr->load(std::memory_order_relaxed)); |
- break; |
- case RAW_VALUE_REFERENCE: |
- case STRING_VALUE_REFERENCE: { |
- ReferenceRecord* ref = |
- reinterpret_cast<ReferenceRecord*>(entry.second.memory); |
- value.ref_value_ = StringPiece( |
- reinterpret_cast<char*>(static_cast<uintptr_t>(ref->address)), |
- static_cast<size_t>(ref->size)); |
- } break; |
- case BOOL_VALUE: |
- case CHAR_VALUE: |
- value.short_value_ = *reinterpret_cast<char*>(entry.second.memory); |
- break; |
- case SIGNED_VALUE: |
- case UNSIGNED_VALUE: |
- value.short_value_ = *reinterpret_cast<uint64_t*>(entry.second.memory); |
- break; |
- case END_OF_VALUES: // Included for completeness purposes. |
- NOTREACHED(); |
- } |
- auto inserted = output_snapshot->insert( |
- std::make_pair(entry.second.name.as_string(), std::move(value))); |
- DCHECK(inserted.second); // True if inserted, false if existed. |
- } |
- |
- return true; |
-} |
- |
-const void* ActivityUserData::GetBaseAddress() { |
- // The |memory_| pointer advances as elements are written but the |id_| |
- // value is always at the start of the block so just return that. |
- return id_; |
-} |
- |
// This information is kept for every thread that is tracked. It is filled |
// the very first time the thread is seen. All fields must be of exact sizes |
// so there is no issue moving between 32 and 64-bit builds. |
@@ -890,6 +942,30 @@ bool ThreadActivityTracker::CreateSnapshot(Snapshot* output_snapshot) const { |
return false; |
} |
+const void* ThreadActivityTracker::GetBaseAddress() { |
+ return header_; |
+} |
+ |
+void ThreadActivityTracker::SetOwningProcessIdForTesting(ProcessId id, |
+ int64_t stamp) { |
+ header_->process_id.store(id, std::memory_order_relaxed); |
+ header_->start_time = stamp; |
+} |
+ |
+// static |
+bool ThreadActivityTracker::OwningProcessId(const void* memory, |
+ ProcessId* out_id, |
+ int64_t* out_stamp) { |
+ const Header* header = reinterpret_cast<const Header*>(memory); |
+ if (header->cookie.load(std::memory_order_acquire) != kHeaderCookie) |
+ return false; |
+ |
+ *out_id = static_cast<ProcessId>( |
+ header->process_id.load(std::memory_order_relaxed)); |
+ *out_stamp = header->start_time; |
+ return true; |
+} |
+ |
// static |
size_t ThreadActivityTracker::SizeForStackDepth(int stack_depth) { |
return static_cast<size_t>(stack_depth) * sizeof(Activity) + sizeof(Header); |
@@ -1184,6 +1260,81 @@ void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() { |
delete tracker; |
} |
+void GlobalActivityTracker::SetBackgroundTaskRunner( |
+ const scoped_refptr<TaskRunner>& runner) { |
+ AutoLock lock(global_tracker_lock_); |
+ background_task_runner_ = runner; |
+} |
+ |
+void GlobalActivityTracker::RecordProcessLaunch(ProcessId process_id) {} |
+ |
+void GlobalActivityTracker::RecordProcessExit(ProcessId process_id, |
+ int exit_code) { |
+ DCHECK_NE(GetCurrentProcId(), process_id); |
+ |
+ int64_t now_stamp = Time::Now().ToInternalValue(); |
+ |
+ // The persistent allocator is thread-safe so run the iteration and |
+ // adjustments on a worker thread if one was provided. |
+ { |
+ AutoLock lock(global_tracker_lock_); |
+ if (background_task_runner_ && |
+ !background_task_runner_->RunsTasksOnCurrentThread()) { |
+ background_task_runner_->PostTask( |
+ FROM_HERE, Bind(&GlobalActivityTracker::RecordProcessExitImpl, |
+ Unretained(this), process_id, exit_code, now_stamp)); |
+ return; |
+ } |
+ } |
+ |
+ RecordProcessExitImpl(process_id, exit_code, now_stamp); |
+} |
+ |
+void GlobalActivityTracker::RecordProcessExitImpl(ProcessId process_id, |
+ int exit_code, |
+ int64_t exit_stamp) { |
+ // The process may not have exited cleanly so its necessary to go through |
+ // all the data structures it may have allocated in the persistent memory |
+ // segment and mark them as "released". This will allow them to be reused |
+ // later on. Memory is cleared here, rather than when it's needed, so as to |
+ // limit the impact at that critical time. |
+ PersistentMemoryAllocator::Iterator iter(allocator_.get()); |
+ PersistentMemoryAllocator::Reference ref; |
+ uint32_t type; |
+ while ((ref = iter.GetNext(&type)) != 0) { |
+ const void* memory = allocator_->GetAsArray<char>( |
+ ref, type, PersistentMemoryAllocator::kSizeAny); |
+ ProcessId found_id; |
+ int64_t create_stamp; |
+ |
+ switch (type) { |
+ case kTypeIdActivityTracker: |
+ if (ThreadActivityTracker::OwningProcessId(memory, &found_id, |
+ &create_stamp)) { |
+ break; |
+ } |
+ continue; |
+ |
+ case kTypeIdUserDataRecord: |
+ case kTypeIdProcessDataRecord: |
+ if (ActivityUserData::OwningProcessId(memory, &found_id, |
+ &create_stamp)) { |
+ break; |
+ } |
+ continue; |
+ |
+ default: |
+ continue; |
+ } |
+ |
+ // Only change the type to be "free" if the process ID matches and the |
+ // creation time is before the exit time (so PID re-use doesn't cause |
+ // the erasure of something that is in-use). |
+ if (found_id == process_id && create_stamp < exit_stamp) |
+ allocator_->ChangeType(ref, ~type, type, /*clear=*/true); |
+ } |
+} |
+ |
void GlobalActivityTracker::RecordLogMessage(StringPiece message) { |
// Allocate at least one extra byte so the string is NUL terminated. All |
// memory returned by the allocator is guaranteed to be zeroed. |
@@ -1247,12 +1398,20 @@ GlobalActivityTracker::GlobalActivityTracker( |
kTypeIdUserDataRecordFree, |
kUserDataSize, |
kCachedUserDataMemories, |
- /*make_iterable=*/false), |
+ /*make_iterable=*/true), |
+ process_data_(allocator_->GetAsArray<char>( |
+ AllocateFrom(allocator_.get(), |
+ kTypeIdProcessDataRecordFree, |
+ kProcessDataSize, |
+ kTypeIdProcessDataRecord), |
+ kTypeIdProcessDataRecord, |
+ kProcessDataSize), |
+ kProcessDataSize), |
global_data_( |
allocator_->GetAsArray<char>( |
allocator_->Allocate(kGlobalDataSize, kTypeIdGlobalDataRecord), |
kTypeIdGlobalDataRecord, |
- PersistentMemoryAllocator::kSizeAny), |
+ kGlobalDataSize), |
kGlobalDataSize) { |
// Ensure the passed memory is valid and empty (iterator finds nothing). |
uint32_t type; |
@@ -1262,7 +1421,9 @@ GlobalActivityTracker::GlobalActivityTracker( |
DCHECK(!g_tracker_); |
subtle::NoBarrier_Store(&g_tracker_, reinterpret_cast<uintptr_t>(this)); |
- // The global records must be iterable in order to be found by an analyzer. |
+ // The data records must be iterable in order to be found by an analyzer. |
+ allocator_->MakeIterable(allocator_->GetAsReference( |
+ process_data_.GetBaseAddress(), kTypeIdProcessDataRecord)); |
allocator_->MakeIterable(allocator_->GetAsReference( |
global_data_.GetBaseAddress(), kTypeIdGlobalDataRecord)); |