Index: base/debug/activity_tracker.cc |
diff --git a/base/debug/activity_tracker.cc b/base/debug/activity_tracker.cc |
index 09946eed7296a2088f152b30ab46b513ea6dcf05..c591556d3aa2537328ca571f11ecfee759878908 100644 |
--- a/base/debug/activity_tracker.cc |
+++ b/base/debug/activity_tracker.cc |
@@ -30,18 +30,13 @@ namespace debug { |
namespace { |
-// A number that identifies the memory as having been initialized. It's |
-// arbitrary but happens to be the first 4 bytes of SHA1(ThreadActivityTracker). |
-// A version number is added on so that major structure changes won't try to |
-// read an older version (since the cookie won't match). |
-const uint32_t kHeaderCookie = 0xC0029B24UL + 2; // v2 |
- |
// The minimum depth a stack should support. |
const int kMinStackDepth = 2; |
// The amount of memory set aside for holding arbitrary user data (key/value |
// pairs) globally or associated with ActivityData entries. |
const size_t kUserDataSize = 1 << 10; // 1 KiB |
+const size_t kProcessDataSize = 4 << 10; // 4 KiB |
const size_t kGlobalDataSize = 16 << 10; // 16 KiB |
const size_t kMaxUserDataNameLength = |
static_cast<size_t>(std::numeric_limits<uint8_t>::max()); |
@@ -49,6 +44,10 @@ const size_t kMaxUserDataNameLength = |
// A constant used to indicate that module information is changing. |
const uint32_t kModuleInformationChanging = 0x80000000; |
+// An atomically incrementing number, used to check for recreations of objects |
+// in the same memory space. |
+StaticAtomicSequenceNumber g_next_id; |
+ |
union ThreadRef { |
int64_t as_id; |
#if defined(OS_WIN) |
@@ -64,6 +63,33 @@ union ThreadRef { |
#endif |
}; |
+// Get the next non-zero identifier. It is only unique within a process. |
+uint32_t GetNextDataId() { |
+ uint32_t id; |
+ while ((id = g_next_id.GetNext()) == 0) |
+ ; |
+ return id; |
+} |
+ |
+// Finds and reuses a specific allocation or creates a new one. |
+PersistentMemoryAllocator::Reference AllocateFrom( |
+ PersistentMemoryAllocator* allocator, |
+ uint32_t from_type, |
+ size_t size, |
+ uint32_t to_type) { |
+ PersistentMemoryAllocator::Iterator iter(allocator); |
+ PersistentMemoryAllocator::Reference ref; |
+ while ((ref = iter.GetNextOfType(from_type)) != 0) { |
+ DCHECK_LE(size, allocator->GetAllocSize(ref)); |
+ // This can fail if a another thread has just taken it. It isassumed that |
+ // the memory is cleared during the "free" operation. |
+ if (allocator->ChangeType(ref, to_type, from_type, /*clear=*/false)) |
+ return ref; |
+ } |
+ |
+ return allocator->Allocate(size, to_type); |
+} |
+ |
// Determines the previous aligned index. |
size_t RoundDownToAlignment(size_t index, size_t alignment) { |
return index & (0 - alignment); |
@@ -76,6 +102,35 @@ size_t RoundUpToAlignment(size_t index, size_t alignment) { |
} // namespace |
+ProcessInfo::ProcessInfo() {} |
+ProcessInfo::~ProcessInfo() {} |
+ |
+void ProcessInfo::Release_Initialize() { |
+ uint32_t old_id = data_id.load(std::memory_order_acquire); |
+ DCHECK_EQ(0U, old_id); |
+ process_id = GetCurrentProcId(); |
+ create_stamp = Time::Now().ToInternalValue(); |
+ data_id.store(GetNextDataId(), std::memory_order_release); |
+} |
+ |
+void ProcessInfo::SetOwningProcessIdForTesting(ProcessId pid, int64_t stamp) { |
+ process_id = pid; |
+ create_stamp = stamp; |
manzagop (departed)
2017/02/22 20:44:15
Do you need/want to set data_id so that ProcessInf
bcwhite
2017/02/22 22:13:02
All three already have valid, non-zero values. Th
manzagop (departed)
2017/02/24 15:56:35
Is that because calling Release_Initialize is a pr
bcwhite
2017/03/06 16:33:51
Done.
|
+} |
+ |
+// static |
+bool ProcessInfo::OwningProcessId(const void* memory, |
+ ProcessId* out_id, |
+ int64_t* out_stamp) { |
+ const ProcessInfo* info = reinterpret_cast<const ProcessInfo*>(memory); |
+ uint32_t id = info->data_id.load(std::memory_order_acquire); |
+ if (id == 0) |
+ return false; |
+ |
+ *out_id = static_cast<ProcessId>(info->process_id); |
+ *out_stamp = info->create_stamp; |
+ return id == info->data_id.load(std::memory_order_seq_cst); |
manzagop (departed)
2017/02/22 20:44:15
Can you say more about this operation?
IIUC there
bcwhite
2017/02/22 22:13:02
It could since memset doesn't define the order in
manzagop (departed)
2017/02/24 15:56:35
I'm totally going to forget about this! :) Could y
bcwhite
2017/03/06 16:33:51
It's already out for review:
https://codereview.ch
|
+} |
// It doesn't matter what is contained in this (though it will be all zeros) |
// as only the address of it is important. |
@@ -246,32 +301,31 @@ StringPiece ActivityUserData::TypedValue::GetStringReference() const { |
return ref_value_; |
} |
+// These are required because std::atomic is (currently) not a POD type and |
+// thus clang requires explicit out-of-line constructors and destructors even |
+// when they do nothing. |
ActivityUserData::ValueInfo::ValueInfo() {} |
ActivityUserData::ValueInfo::ValueInfo(ValueInfo&&) = default; |
ActivityUserData::ValueInfo::~ValueInfo() {} |
- |
-StaticAtomicSequenceNumber ActivityUserData::next_id_; |
+ActivityUserData::MemoryHeader::MemoryHeader() {} |
+ActivityUserData::MemoryHeader::~MemoryHeader() {} |
+ActivityUserData::FieldHeader::FieldHeader() {} |
+ActivityUserData::FieldHeader::~FieldHeader() {} |
ActivityUserData::ActivityUserData(void* memory, size_t size) |
: memory_(reinterpret_cast<char*>(memory)), |
available_(RoundDownToAlignment(size, kMemoryAlignment)), |
- id_(reinterpret_cast<std::atomic<uint32_t>*>(memory)) { |
+ header_(reinterpret_cast<MemoryHeader*>(memory)) { |
// It's possible that no user data is being stored. |
if (!memory_) |
return; |
- DCHECK_LT(kMemoryAlignment, available_); |
- if (id_->load(std::memory_order_relaxed) == 0) { |
- // Generate a new ID and store it in the first 32-bit word of memory_. |
- // |id_| must be non-zero for non-sink instances. |
- uint32_t id; |
- while ((id = next_id_.GetNext()) == 0) |
- ; |
- id_->store(id, std::memory_order_relaxed); |
- DCHECK_NE(0U, id_->load(std::memory_order_relaxed)); |
- } |
- memory_ += kMemoryAlignment; |
- available_ -= kMemoryAlignment; |
+ static_assert(0 == sizeof(MemoryHeader) % kMemoryAlignment, "invalid header"); |
+ DCHECK_LT(sizeof(MemoryHeader), available_); |
+ if (header_->process_info.data_id.load(std::memory_order_acquire) == 0) |
+ header_->process_info.Release_Initialize(); |
+ memory_ += sizeof(MemoryHeader); |
+ available_ -= sizeof(MemoryHeader); |
// If there is already data present, load that. This allows the same class |
// to be used for analysis through snapshots. |
@@ -280,6 +334,75 @@ ActivityUserData::ActivityUserData(void* memory, size_t size) |
ActivityUserData::~ActivityUserData() {} |
+bool ActivityUserData::CreateSnapshot(Snapshot* output_snapshot) const { |
+ DCHECK(output_snapshot); |
+ DCHECK(output_snapshot->empty()); |
+ |
+ // Find any new data that may have been added by an active instance of this |
+ // class that is adding records. |
+ ImportExistingData(); |
+ |
+ for (const auto& entry : values_) { |
+ TypedValue value; |
+ value.type_ = entry.second.type; |
+ DCHECK_GE(entry.second.extent, |
+ entry.second.size_ptr->load(std::memory_order_relaxed)); |
+ |
+ switch (entry.second.type) { |
+ case RAW_VALUE: |
+ case STRING_VALUE: |
+ value.long_value_ = |
+ std::string(reinterpret_cast<char*>(entry.second.memory), |
+ entry.second.size_ptr->load(std::memory_order_relaxed)); |
+ break; |
+ case RAW_VALUE_REFERENCE: |
+ case STRING_VALUE_REFERENCE: { |
+ ReferenceRecord* ref = |
+ reinterpret_cast<ReferenceRecord*>(entry.second.memory); |
+ value.ref_value_ = StringPiece( |
+ reinterpret_cast<char*>(static_cast<uintptr_t>(ref->address)), |
+ static_cast<size_t>(ref->size)); |
+ } break; |
+ case BOOL_VALUE: |
+ case CHAR_VALUE: |
+ value.short_value_ = *reinterpret_cast<char*>(entry.second.memory); |
+ break; |
+ case SIGNED_VALUE: |
+ case UNSIGNED_VALUE: |
+ value.short_value_ = *reinterpret_cast<uint64_t*>(entry.second.memory); |
+ break; |
+ case END_OF_VALUES: // Included for completeness purposes. |
+ NOTREACHED(); |
+ } |
+ auto inserted = output_snapshot->insert( |
+ std::make_pair(entry.second.name.as_string(), std::move(value))); |
+ DCHECK(inserted.second); // True if inserted, false if existed. |
+ } |
+ |
+ return true; |
+} |
+ |
+const void* ActivityUserData::GetBaseAddress() { |
+ // The |memory_| pointer advances as elements are written but the |header_| |
+ // value is always at the start of the block so just return that. |
+ return header_; |
+} |
+ |
+void ActivityUserData::SetOwningProcessIdForTesting(ProcessId pid, |
+ int64_t stamp) { |
+ if (!header_) |
+ return; |
+ header_->process_info.SetOwningProcessIdForTesting(pid, stamp); |
+} |
+ |
+// static |
+bool ActivityUserData::OwningProcessId(const void* memory, |
+ ProcessId* out_id, |
+ int64_t* out_stamp) { |
+ const MemoryHeader* header = reinterpret_cast<const MemoryHeader*>(memory); |
+ return ProcessInfo::OwningProcessId(&header->process_info, out_id, out_stamp); |
+} |
+ |
void ActivityUserData::Set(StringPiece name, |
ValueType type, |
const void* memory, |
@@ -308,13 +431,13 @@ void ActivityUserData::Set(StringPiece name, |
// following field will be aligned properly. |
size_t name_size = name.length(); |
size_t name_extent = |
- RoundUpToAlignment(sizeof(Header) + name_size, kMemoryAlignment) - |
- sizeof(Header); |
+ RoundUpToAlignment(sizeof(FieldHeader) + name_size, kMemoryAlignment) - |
+ sizeof(FieldHeader); |
size_t value_extent = RoundUpToAlignment(size, kMemoryAlignment); |
// The "base size" is the size of the header and (padded) string key. Stop |
// now if there's not room enough for even this. |
- size_t base_size = sizeof(Header) + name_extent; |
+ size_t base_size = sizeof(FieldHeader) + name_extent; |
if (base_size > available_) |
return; |
@@ -336,7 +459,7 @@ void ActivityUserData::Set(StringPiece name, |
return; |
// Allocate a chunk of memory. |
- Header* header = reinterpret_cast<Header*>(memory_); |
+ FieldHeader* header = reinterpret_cast<FieldHeader*>(memory_); |
memory_ += full_size; |
available_ -= full_size; |
@@ -346,9 +469,9 @@ void ActivityUserData::Set(StringPiece name, |
DCHECK_EQ(0, header->value_size.load(std::memory_order_relaxed)); |
header->name_size = static_cast<uint8_t>(name_size); |
header->record_size = full_size; |
- char* name_memory = reinterpret_cast<char*>(header) + sizeof(Header); |
+ char* name_memory = reinterpret_cast<char*>(header) + sizeof(FieldHeader); |
void* value_memory = |
- reinterpret_cast<char*>(header) + sizeof(Header) + name_extent; |
+ reinterpret_cast<char*>(header) + sizeof(FieldHeader) + name_extent; |
memcpy(name_memory, name.data(), name_size); |
header->type.store(type, std::memory_order_release); |
@@ -362,7 +485,7 @@ void ActivityUserData::Set(StringPiece name, |
info->name = persistent_name; |
info->memory = value_memory; |
info->size_ptr = &header->value_size; |
- info->extent = full_size - sizeof(Header) - name_extent; |
+ info->extent = full_size - sizeof(FieldHeader) - name_extent; |
info->type = type; |
} |
@@ -387,8 +510,8 @@ void ActivityUserData::SetReference(StringPiece name, |
} |
void ActivityUserData::ImportExistingData() const { |
- while (available_ > sizeof(Header)) { |
- Header* header = reinterpret_cast<Header*>(memory_); |
+ while (available_ > sizeof(FieldHeader)) { |
+ FieldHeader* header = reinterpret_cast<FieldHeader*>(memory_); |
ValueType type = |
static_cast<ValueType>(header->type.load(std::memory_order_acquire)); |
if (type == END_OF_VALUES) |
@@ -396,8 +519,8 @@ void ActivityUserData::ImportExistingData() const { |
if (header->record_size > available_) |
return; |
- size_t value_offset = RoundUpToAlignment(sizeof(Header) + header->name_size, |
- kMemoryAlignment); |
+ size_t value_offset = RoundUpToAlignment( |
+ sizeof(FieldHeader) + header->name_size, kMemoryAlignment); |
if (header->record_size == value_offset && |
header->value_size.load(std::memory_order_relaxed) == 1) { |
value_offset -= 1; |
@@ -406,7 +529,7 @@ void ActivityUserData::ImportExistingData() const { |
return; |
ValueInfo info; |
- info.name = StringPiece(memory_ + sizeof(Header), header->name_size); |
+ info.name = StringPiece(memory_ + sizeof(FieldHeader), header->name_size); |
info.type = type; |
info.memory = memory_ + value_offset; |
info.size_ptr = &header->value_size; |
@@ -420,60 +543,6 @@ void ActivityUserData::ImportExistingData() const { |
} |
} |
-bool ActivityUserData::CreateSnapshot(Snapshot* output_snapshot) const { |
- DCHECK(output_snapshot); |
- DCHECK(output_snapshot->empty()); |
- |
- // Find any new data that may have been added by an active instance of this |
- // class that is adding records. |
- ImportExistingData(); |
- |
- for (const auto& entry : values_) { |
- TypedValue value; |
- value.type_ = entry.second.type; |
- DCHECK_GE(entry.second.extent, |
- entry.second.size_ptr->load(std::memory_order_relaxed)); |
- |
- switch (entry.second.type) { |
- case RAW_VALUE: |
- case STRING_VALUE: |
- value.long_value_ = |
- std::string(reinterpret_cast<char*>(entry.second.memory), |
- entry.second.size_ptr->load(std::memory_order_relaxed)); |
- break; |
- case RAW_VALUE_REFERENCE: |
- case STRING_VALUE_REFERENCE: { |
- ReferenceRecord* ref = |
- reinterpret_cast<ReferenceRecord*>(entry.second.memory); |
- value.ref_value_ = StringPiece( |
- reinterpret_cast<char*>(static_cast<uintptr_t>(ref->address)), |
- static_cast<size_t>(ref->size)); |
- } break; |
- case BOOL_VALUE: |
- case CHAR_VALUE: |
- value.short_value_ = *reinterpret_cast<char*>(entry.second.memory); |
- break; |
- case SIGNED_VALUE: |
- case UNSIGNED_VALUE: |
- value.short_value_ = *reinterpret_cast<uint64_t*>(entry.second.memory); |
- break; |
- case END_OF_VALUES: // Included for completeness purposes. |
- NOTREACHED(); |
- } |
- auto inserted = output_snapshot->insert( |
- std::make_pair(entry.second.name.as_string(), std::move(value))); |
- DCHECK(inserted.second); // True if inserted, false if existed. |
- } |
- |
- return true; |
-} |
- |
-const void* ActivityUserData::GetBaseAddress() { |
- // The |memory_| pointer advances as elements are written but the |id_| |
- // value is always at the start of the block so just return that. |
- return id_; |
-} |
- |
// This information is kept for every thread that is tracked. It is filled |
// the very first time the thread is seen. All fields must be of exact sizes |
// so there is no issue moving between 32 and 64-bit builds. |
@@ -483,27 +552,15 @@ struct ThreadActivityTracker::Header { |
GlobalActivityTracker::kTypeIdActivityTracker; |
// Expected size for 32/64-bit check. |
- static constexpr size_t kExpectedInstanceSize = 80; |
- |
- // This unique number indicates a valid initialization of the memory. |
- std::atomic<uint32_t> cookie; |
+ static constexpr size_t kExpectedInstanceSize = |
+ ProcessInfo::kExpectedInstanceSize + 72; |
- // The number of Activity slots (spaces that can hold an Activity) that |
- // immediately follow this structure in memory. |
- uint32_t stack_slots; |
+ // This information uniquely identifies a process. |
+ ProcessInfo process_info; |
- // The process-id and thread-id (thread_ref.as_id) to which this data belongs. |
- // These identifiers are not guaranteed to mean anything but are unique, in |
- // combination, among all active trackers. It would be nice to always have |
- // the process_id be a 64-bit value but the necessity of having it atomic |
- // (for the memory barriers it provides) limits it to the natural word size |
- // of the machine. |
-#ifdef ARCH_CPU_64_BITS |
- std::atomic<int64_t> process_id; |
-#else |
- std::atomic<int32_t> process_id; |
- int32_t process_id_padding; |
-#endif |
+ // The thread-id (thread_ref.as_id) to which this data belongs. This number |
+ // is not guaranteed to mean anything but combined with the process-id from |
+ // ProcessInfo is unique among all active trackers. |
ThreadRef thread_ref; |
// The start-time and start-ticks when the data was created. Each activity |
@@ -512,6 +569,13 @@ struct ThreadActivityTracker::Header { |
int64_t start_time; |
int64_t start_ticks; |
+ // The number of Activity slots (spaces that can hold an Activity) that |
+ // immediately follow this structure in memory. |
+ uint32_t stack_slots; |
+ |
+ // Some padding to keep everything 64-bit aligned. |
+ uint32_t padding; |
+ |
// The current depth of the stack. This may be greater than the number of |
// slots. If the depth exceeds the number of slots, the newest entries |
// won't be recorded. |
@@ -594,9 +658,10 @@ ThreadActivityTracker::ThreadActivityTracker(void* base, size_t size) |
"ActivityData.data is not 64-bit aligned"); |
// Provided memory should either be completely initialized or all zeros. |
- if (header_->cookie.load(std::memory_order_relaxed) == 0) { |
+ if (header_->process_info.data_id.load(std::memory_order_relaxed) == 0) { |
// This is a new file. Double-check other fields and then initialize. |
- DCHECK_EQ(0, header_->process_id.load(std::memory_order_relaxed)); |
+ DCHECK_EQ(0, header_->process_info.process_id); |
+ DCHECK_EQ(0, header_->process_info.create_stamp); |
DCHECK_EQ(0, header_->thread_ref.as_id); |
DCHECK_EQ(0, header_->start_time); |
DCHECK_EQ(0, header_->start_ticks); |
@@ -614,7 +679,6 @@ ThreadActivityTracker::ThreadActivityTracker(void* base, size_t size) |
header_->thread_ref.as_handle = |
PlatformThread::CurrentHandle().platform_handle(); |
#endif |
- header_->process_id.store(GetCurrentProcId(), std::memory_order_relaxed); |
header_->start_time = base::Time::Now().ToInternalValue(); |
header_->start_ticks = base::TimeTicks::Now().ToInternalValue(); |
@@ -624,7 +688,7 @@ ThreadActivityTracker::ThreadActivityTracker(void* base, size_t size) |
// This is done last so as to guarantee that everything above is "released" |
// by the time this value gets written. |
- header_->cookie.store(kHeaderCookie, std::memory_order_release); |
+ header_->process_info.Release_Initialize(); |
valid_ = true; |
DCHECK(IsValid()); |
@@ -769,11 +833,9 @@ void ThreadActivityTracker::ReleaseUserData( |
} |
bool ThreadActivityTracker::IsValid() const { |
- if (header_->cookie.load(std::memory_order_acquire) != kHeaderCookie || |
- header_->process_id.load(std::memory_order_relaxed) == 0 || |
- header_->thread_ref.as_id == 0 || |
- header_->start_time == 0 || |
- header_->start_ticks == 0 || |
+ if (header_->process_info.data_id.load(std::memory_order_acquire) == 0 || |
+ header_->process_info.process_id == 0 || header_->thread_ref.as_id == 0 || |
+ header_->start_time == 0 || header_->start_ticks == 0 || |
header_->stack_slots != stack_slots_ || |
header_->thread_name[sizeof(header_->thread_name) - 1] != '\0') { |
return false; |
@@ -804,12 +866,12 @@ bool ThreadActivityTracker::CreateSnapshot(Snapshot* output_snapshot) const { |
output_snapshot->activity_stack.reserve(stack_slots_); |
for (int attempt = 0; attempt < kMaxAttempts; ++attempt) { |
- // Remember the process and thread IDs to ensure they aren't replaced |
- // during the snapshot operation. Use "acquire" to ensure that all the |
- // non-atomic fields of the structure are valid (at least at the current |
- // moment in time). |
- const int64_t starting_process_id = |
- header_->process_id.load(std::memory_order_acquire); |
+ // Remember the data IDs to ensure nothing is replaced during the snapshot |
+ // operation. Use "acquire" so that all the non-atomic fields of the |
+ // structure are valid (at least at the current moment in time). |
+ const uint32_t starting_id = |
+ header_->process_info.data_id.load(std::memory_order_acquire); |
+ const int64_t starting_process_id = header_->process_info.process_id; |
const int64_t starting_thread_id = header_->thread_ref.as_id; |
// Write a non-zero value to |stack_unchanged| so it's possible to detect |
@@ -850,8 +912,7 @@ bool ThreadActivityTracker::CreateSnapshot(Snapshot* output_snapshot) const { |
output_snapshot->thread_name = |
std::string(header_->thread_name, sizeof(header_->thread_name) - 1); |
output_snapshot->thread_id = header_->thread_ref.as_id; |
- output_snapshot->process_id = |
- header_->process_id.load(std::memory_order_seq_cst); |
+ output_snapshot->process_id = header_->process_info.process_id; |
// All characters of the thread-name buffer were copied so as to not break |
// if the trailing NUL were missing. Now limit the length if the actual |
@@ -859,9 +920,11 @@ bool ThreadActivityTracker::CreateSnapshot(Snapshot* output_snapshot) const { |
output_snapshot->thread_name.resize( |
strlen(output_snapshot->thread_name.c_str())); |
- // If the process or thread ID has changed then the tracker has exited and |
- // the memory reused by a new one. Try again. |
- if (output_snapshot->process_id != starting_process_id || |
+ // If the data ID has changed then the tracker has exited and the memory |
+ // reused by a new one. Try again. |
+ if (header_->process_info.data_id.load(std::memory_order_seq_cst) != |
+ starting_id || |
+ output_snapshot->process_id != starting_process_id || |
output_snapshot->thread_id != starting_thread_id) { |
continue; |
} |
@@ -890,6 +953,23 @@ bool ThreadActivityTracker::CreateSnapshot(Snapshot* output_snapshot) const { |
return false; |
} |
+const void* ThreadActivityTracker::GetBaseAddress() { |
+ return header_; |
+} |
+ |
+void ThreadActivityTracker::SetOwningProcessIdForTesting(ProcessId pid, |
+ int64_t stamp) { |
+ header_->process_info.SetOwningProcessIdForTesting(pid, stamp); |
+} |
+ |
+// static |
+bool ThreadActivityTracker::OwningProcessId(const void* memory, |
+ ProcessId* out_id, |
+ int64_t* out_stamp) { |
+ const Header* header = reinterpret_cast<const Header*>(memory); |
+ return ProcessInfo::OwningProcessId(&header->process_info, out_id, out_stamp); |
+} |
+ |
// static |
size_t ThreadActivityTracker::SizeForStackDepth(int stack_depth) { |
return static_cast<size_t>(stack_depth) * sizeof(Activity) + sizeof(Header); |
@@ -977,6 +1057,9 @@ bool GlobalActivityTracker::ModuleInfoRecord::EncodeFrom( |
pickle_size = pickler.size(); |
changes.store(0, std::memory_order_relaxed); |
+ // Initialize the process info. |
+ process_info.Release_Initialize(); |
+ |
// Now set those fields that can change. |
return UpdateFrom(info); |
} |
@@ -1051,15 +1134,16 @@ ActivityUserData& GlobalActivityTracker::ScopedThreadActivity::user_data() { |
return *user_data_; |
} |
-GlobalActivityTracker::GlobalUserData::GlobalUserData(void* memory, size_t size) |
+GlobalActivityTracker::ThreadSafeUserData::ThreadSafeUserData(void* memory, |
+ size_t size) |
: ActivityUserData(memory, size) {} |
-GlobalActivityTracker::GlobalUserData::~GlobalUserData() {} |
+GlobalActivityTracker::ThreadSafeUserData::~ThreadSafeUserData() {} |
-void GlobalActivityTracker::GlobalUserData::Set(StringPiece name, |
- ValueType type, |
- const void* memory, |
- size_t size) { |
+void GlobalActivityTracker::ThreadSafeUserData::Set(StringPiece name, |
+ ValueType type, |
+ const void* memory, |
+ size_t size) { |
AutoLock lock(data_lock_); |
ActivityUserData::Set(name, type, memory, size); |
} |
@@ -1184,6 +1268,83 @@ void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() { |
delete tracker; |
} |
+void GlobalActivityTracker::SetBackgroundTaskRunner( |
+ const scoped_refptr<TaskRunner>& runner) { |
+ AutoLock lock(global_tracker_lock_); |
+ background_task_runner_ = runner; |
+} |
+ |
+void GlobalActivityTracker::RecordProcessLaunch(ProcessId process_id) { |
+ base::AutoLock lock(global_tracker_lock_); |
+ DCHECK(!base::ContainsKey(known_processes_, process_id)); |
manzagop (departed)
2017/02/22 20:44:15
This is possible due to pid recycling. The map sho
bcwhite
2017/02/22 22:13:02
It's possible only if there was no corresponding R
|
+ known_processes_.insert(process_id); |
+} |
+ |
+void GlobalActivityTracker::RecordProcessExit(ProcessId process_id, |
+ int exit_code) { |
+ DCHECK_NE(GetCurrentProcId(), process_id); |
+ |
+ scoped_refptr<TaskRunner> task_runner; |
+ { |
+ base::AutoLock lock(global_tracker_lock_); |
+ task_runner = background_task_runner_; |
+ auto found = known_processes_.find(process_id); |
+ if (found != known_processes_.end()) |
+ known_processes_.erase(found); |
+ else |
+ DLOG(ERROR) << "Recording exit of unknown process #" << process_id; |
+ } |
+ |
+ int64_t now_stamp = Time::Now().ToInternalValue(); |
+ |
+ // The persistent allocator is thread-safe so run the iteration and |
+ // adjustments on a worker thread if one was provided. |
+ if (task_runner && !task_runner->RunsTasksOnCurrentThread()) { |
+ task_runner->PostTask(FROM_HERE, |
+ Bind(&GlobalActivityTracker::CleanupAfterProcess, |
+ Unretained(this), process_id, now_stamp)); |
+ return; |
+ } |
+ |
+ CleanupAfterProcess(process_id, now_stamp); |
+} |
+ |
+void GlobalActivityTracker::CleanupAfterProcess(ProcessId process_id, |
+ int64_t exit_stamp) { |
+ // The process may not have exited cleanly so its necessary to go through |
+ // all the data structures it may have allocated in the persistent memory |
+ // segment and mark them as "released". This will allow them to be reused |
+ // later on. Memory is cleared here, rather than when it's needed, so as to |
+ // limit the impact at that critical time. |
+ PersistentMemoryAllocator::Iterator iter(allocator_.get()); |
+ PersistentMemoryAllocator::Reference ref; |
+ uint32_t type; |
+ while ((ref = iter.GetNext(&type)) != 0) { |
+ const void* memory = allocator_->GetAsArray<char>( |
+ ref, type, PersistentMemoryAllocator::kSizeAny); |
+ ProcessId found_id; |
+ int64_t create_stamp; |
+ |
+ switch (type) { |
+ case kTypeIdActivityTracker: |
+ case kTypeIdUserDataRecord: |
+ case kTypeIdProcessDataRecord: |
+ case ModuleInfoRecord::kPersistentTypeId: |
+ // By convention, the ProcessInfo structure is always the first |
+ // field of the structure so there's no need to handle all the |
+ // cases separately. |
+ if (ProcessInfo::OwningProcessId(memory, &found_id, &create_stamp)) { |
+ // Only change the type to be "free" if the process ID matches and |
+ // the creation time is before the exit time (so PID re-use doesn't |
+ // cause the erasure of something that is in-use). |
+ if (found_id == process_id && create_stamp < exit_stamp) |
+ allocator_->ChangeType(ref, ~type, type, /*clear=*/true); |
+ } |
+ break; |
+ } |
+ } |
+} |
+ |
void GlobalActivityTracker::RecordLogMessage(StringPiece message) { |
// Allocate at least one extra byte so the string is NUL terminated. All |
// memory returned by the allocator is guaranteed to be zeroed. |
@@ -1247,12 +1408,20 @@ GlobalActivityTracker::GlobalActivityTracker( |
kTypeIdUserDataRecordFree, |
kUserDataSize, |
kCachedUserDataMemories, |
- /*make_iterable=*/false), |
+ /*make_iterable=*/true), |
+ process_data_(allocator_->GetAsArray<char>( |
+ AllocateFrom(allocator_.get(), |
+ kTypeIdProcessDataRecordFree, |
+ kProcessDataSize, |
+ kTypeIdProcessDataRecord), |
+ kTypeIdProcessDataRecord, |
+ kProcessDataSize), |
+ kProcessDataSize), |
global_data_( |
allocator_->GetAsArray<char>( |
allocator_->Allocate(kGlobalDataSize, kTypeIdGlobalDataRecord), |
kTypeIdGlobalDataRecord, |
- PersistentMemoryAllocator::kSizeAny), |
+ kGlobalDataSize), |
kGlobalDataSize) { |
// Ensure the passed memory is valid and empty (iterator finds nothing). |
uint32_t type; |
@@ -1262,7 +1431,9 @@ GlobalActivityTracker::GlobalActivityTracker( |
DCHECK(!g_tracker_); |
subtle::Release_Store(&g_tracker_, reinterpret_cast<uintptr_t>(this)); |
- // The global records must be iterable in order to be found by an analyzer. |
+ // The data records must be iterable in order to be found by an analyzer. |
+ allocator_->MakeIterable(allocator_->GetAsReference( |
+ process_data_.GetBaseAddress(), kTypeIdProcessDataRecord)); |
allocator_->MakeIterable(allocator_->GetAsReference( |
global_data_.GetBaseAddress(), kTypeIdGlobalDataRecord)); |