Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2016 The Chromium Authors. All rights reserved. | 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/debug/activity_tracker.h" | 5 #include "base/debug/activity_tracker.h" |
| 6 | 6 |
| 7 #include <algorithm> | 7 #include <algorithm> |
| 8 #include <limits> | 8 #include <limits> |
| 9 #include <utility> | 9 #include <utility> |
| 10 | 10 |
| 11 #include "base/atomic_sequence_num.h" | 11 #include "base/atomic_sequence_num.h" |
| 12 #include "base/debug/stack_trace.h" | 12 #include "base/debug/stack_trace.h" |
| 13 #include "base/files/file.h" | 13 #include "base/files/file.h" |
| 14 #include "base/files/file_path.h" | 14 #include "base/files/file_path.h" |
| 15 #include "base/files/memory_mapped_file.h" | 15 #include "base/files/memory_mapped_file.h" |
| 16 #include "base/logging.h" | 16 #include "base/logging.h" |
| 17 #include "base/memory/ptr_util.h" | 17 #include "base/memory/ptr_util.h" |
| 18 #include "base/metrics/field_trial.h" | 18 #include "base/metrics/field_trial.h" |
| 19 #include "base/metrics/histogram_macros.h" | 19 #include "base/metrics/histogram_macros.h" |
| 20 #include "base/pending_task.h" | 20 #include "base/pending_task.h" |
| 21 #include "base/pickle.h" | 21 #include "base/pickle.h" |
| 22 #include "base/process/process.h" | 22 #include "base/process/process.h" |
| 23 #include "base/process/process_handle.h" | 23 #include "base/process/process_handle.h" |
| 24 #include "base/stl_util.h" | 24 #include "base/stl_util.h" |
| 25 #include "base/strings/string_util.h" | 25 #include "base/strings/string_util.h" |
| 26 #include "base/strings/utf_string_conversions.h" | |
| 26 #include "base/threading/platform_thread.h" | 27 #include "base/threading/platform_thread.h" |
| 27 | 28 |
| 28 namespace base { | 29 namespace base { |
| 29 namespace debug { | 30 namespace debug { |
| 30 | 31 |
| 31 namespace { | 32 namespace { |
| 32 | 33 |
| 33 // A number that identifies the memory as having been initialized. It's | |
| 34 // arbitrary but happens to be the first 4 bytes of SHA1(ThreadActivityTracker). | |
| 35 // A version number is added on so that major structure changes won't try to | |
| 36 // read an older version (since the cookie won't match). | |
| 37 const uint32_t kHeaderCookie = 0xC0029B24UL + 2; // v2 | |
| 38 | |
| 39 // The minimum depth a stack should support. | 34 // The minimum depth a stack should support. |
| 40 const int kMinStackDepth = 2; | 35 const int kMinStackDepth = 2; |
| 41 | 36 |
| 42 // The amount of memory set aside for holding arbitrary user data (key/value | 37 // The amount of memory set aside for holding arbitrary user data (key/value |
| 43 // pairs) globally or associated with ActivityData entries. | 38 // pairs) globally or associated with ActivityData entries. |
| 44 const size_t kUserDataSize = 1 << 10; // 1 KiB | 39 const size_t kUserDataSize = 1 << 10; // 1 KiB |
| 40 const size_t kProcessDataSize = 4 << 10; // 4 KiB | |
| 45 const size_t kGlobalDataSize = 16 << 10; // 16 KiB | 41 const size_t kGlobalDataSize = 16 << 10; // 16 KiB |
| 46 const size_t kMaxUserDataNameLength = | 42 const size_t kMaxUserDataNameLength = |
| 47 static_cast<size_t>(std::numeric_limits<uint8_t>::max()); | 43 static_cast<size_t>(std::numeric_limits<uint8_t>::max()); |
| 48 | 44 |
| 49 // A constant used to indicate that module information is changing. | 45 // A constant used to indicate that module information is changing. |
| 50 const uint32_t kModuleInformationChanging = 0x80000000; | 46 const uint32_t kModuleInformationChanging = 0x80000000; |
| 51 | 47 |
| 48 // The key used to record process information. | |
| 49 const char kProcessPhaseDataKey[] = "process-phase"; | |
| 50 | |
| 51 // An atomically incrementing number, used to check for recreations of objects | |
| 52 // in the same memory space. | |
| 53 StaticAtomicSequenceNumber g_next_id; | |
| 54 | |
| 52 union ThreadRef { | 55 union ThreadRef { |
| 53 int64_t as_id; | 56 int64_t as_id; |
| 54 #if defined(OS_WIN) | 57 #if defined(OS_WIN) |
| 55 // On Windows, the handle itself is often a pseudo-handle with a common | 58 // On Windows, the handle itself is often a pseudo-handle with a common |
| 56 // value meaning "this thread" and so the thread-id is used. The former | 59 // value meaning "this thread" and so the thread-id is used. The former |
| 57 // can be converted to a thread-id with a system call. | 60 // can be converted to a thread-id with a system call. |
| 58 PlatformThreadId as_tid; | 61 PlatformThreadId as_tid; |
| 59 #elif defined(OS_POSIX) | 62 #elif defined(OS_POSIX) |
| 60 // On Posix, the handle is always a unique identifier so no conversion | 63 // On Posix, the handle is always a unique identifier so no conversion |
| 61 // needs to be done. However, it's value is officially opaque so there | 64 // needs to be done. However, it's value is officially opaque so there |
| 62 // is no one correct way to convert it to a numerical identifier. | 65 // is no one correct way to convert it to a numerical identifier. |
| 63 PlatformThreadHandle::Handle as_handle; | 66 PlatformThreadHandle::Handle as_handle; |
| 64 #endif | 67 #endif |
| 65 }; | 68 }; |
| 66 | 69 |
| 70 // Get the next non-zero identifier. It is only unique within a process. | |
| 71 uint32_t GetNextDataId() { | |
| 72 uint32_t id; | |
| 73 while ((id = g_next_id.GetNext()) == 0) | |
| 74 ; | |
| 75 return id; | |
| 76 } | |
| 77 | |
| 78 // Finds and reuses a specific allocation or creates a new one. | |
| 79 PersistentMemoryAllocator::Reference AllocateFrom( | |
| 80 PersistentMemoryAllocator* allocator, | |
| 81 uint32_t from_type, | |
| 82 size_t size, | |
| 83 uint32_t to_type) { | |
| 84 PersistentMemoryAllocator::Iterator iter(allocator); | |
| 85 PersistentMemoryAllocator::Reference ref; | |
| 86 while ((ref = iter.GetNextOfType(from_type)) != 0) { | |
| 87 DCHECK_LE(size, allocator->GetAllocSize(ref)); | |
| 88 // This can fail if a another thread has just taken it. It isassumed that | |
| 89 // the memory is cleared during the "free" operation. | |
| 90 if (allocator->ChangeType(ref, to_type, from_type, /*clear=*/false)) | |
| 91 return ref; | |
| 92 } | |
| 93 | |
| 94 return allocator->Allocate(size, to_type); | |
| 95 } | |
| 96 | |
| 67 // Determines the previous aligned index. | 97 // Determines the previous aligned index. |
| 68 size_t RoundDownToAlignment(size_t index, size_t alignment) { | 98 size_t RoundDownToAlignment(size_t index, size_t alignment) { |
| 69 return index & (0 - alignment); | 99 return index & (0 - alignment); |
| 70 } | 100 } |
| 71 | 101 |
| 72 // Determines the next aligned index. | 102 // Determines the next aligned index. |
| 73 size_t RoundUpToAlignment(size_t index, size_t alignment) { | 103 size_t RoundUpToAlignment(size_t index, size_t alignment) { |
| 74 return (index + (alignment - 1)) & (0 - alignment); | 104 return (index + (alignment - 1)) & (0 - alignment); |
| 75 } | 105 } |
| 76 | 106 |
| 77 } // namespace | 107 } // namespace |
| 78 | 108 |
| 109 OwningProcess::OwningProcess() {} | |
| 110 OwningProcess::~OwningProcess() {} | |
| 111 | |
| 112 void OwningProcess::Release_Initialize() { | |
| 113 uint32_t old_id = data_id.load(std::memory_order_acquire); | |
| 114 DCHECK_EQ(0U, old_id); | |
| 115 process_id = GetCurrentProcId(); | |
| 116 create_stamp = Time::Now().ToInternalValue(); | |
| 117 data_id.store(GetNextDataId(), std::memory_order_release); | |
| 118 } | |
| 119 | |
| 120 void OwningProcess::SetOwningProcessIdForTesting(ProcessId pid, int64_t stamp) { | |
| 121 process_id = pid; | |
| 122 create_stamp = stamp; | |
| 123 } | |
| 124 | |
| 125 // static | |
| 126 bool OwningProcess::GetOwningProcessId(const void* memory, | |
| 127 ProcessId* out_id, | |
| 128 int64_t* out_stamp) { | |
| 129 const OwningProcess* info = reinterpret_cast<const OwningProcess*>(memory); | |
| 130 uint32_t id = info->data_id.load(std::memory_order_acquire); | |
| 131 if (id == 0) | |
| 132 return false; | |
| 133 | |
| 134 *out_id = static_cast<ProcessId>(info->process_id); | |
| 135 *out_stamp = info->create_stamp; | |
| 136 return id == info->data_id.load(std::memory_order_seq_cst); | |
| 137 } | |
| 79 | 138 |
| 80 // It doesn't matter what is contained in this (though it will be all zeros) | 139 // It doesn't matter what is contained in this (though it will be all zeros) |
| 81 // as only the address of it is important. | 140 // as only the address of it is important. |
| 82 const ActivityData kNullActivityData = {}; | 141 const ActivityData kNullActivityData = {}; |
| 83 | 142 |
| 84 ActivityData ActivityData::ForThread(const PlatformThreadHandle& handle) { | 143 ActivityData ActivityData::ForThread(const PlatformThreadHandle& handle) { |
| 85 ThreadRef thread_ref; | 144 ThreadRef thread_ref; |
| 86 thread_ref.as_id = 0; // Zero the union in case other is smaller. | 145 thread_ref.as_id = 0; // Zero the union in case other is smaller. |
| 87 #if defined(OS_WIN) | 146 #if defined(OS_WIN) |
| 88 thread_ref.as_tid = ::GetThreadId(handle.platform_handle()); | 147 thread_ref.as_tid = ::GetThreadId(handle.platform_handle()); |
| (...skipping 150 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 239 StringPiece ActivityUserData::TypedValue::GetReference() const { | 298 StringPiece ActivityUserData::TypedValue::GetReference() const { |
| 240 DCHECK_EQ(RAW_VALUE_REFERENCE, type_); | 299 DCHECK_EQ(RAW_VALUE_REFERENCE, type_); |
| 241 return ref_value_; | 300 return ref_value_; |
| 242 } | 301 } |
| 243 | 302 |
| 244 StringPiece ActivityUserData::TypedValue::GetStringReference() const { | 303 StringPiece ActivityUserData::TypedValue::GetStringReference() const { |
| 245 DCHECK_EQ(STRING_VALUE_REFERENCE, type_); | 304 DCHECK_EQ(STRING_VALUE_REFERENCE, type_); |
| 246 return ref_value_; | 305 return ref_value_; |
| 247 } | 306 } |
| 248 | 307 |
| 308 // These are required because std::atomic is (currently) not a POD type and | |
| 309 // thus clang requires explicit out-of-line constructors and destructors even | |
| 310 // when they do nothing. | |
| 249 ActivityUserData::ValueInfo::ValueInfo() {} | 311 ActivityUserData::ValueInfo::ValueInfo() {} |
| 250 ActivityUserData::ValueInfo::ValueInfo(ValueInfo&&) = default; | 312 ActivityUserData::ValueInfo::ValueInfo(ValueInfo&&) = default; |
| 251 ActivityUserData::ValueInfo::~ValueInfo() {} | 313 ActivityUserData::ValueInfo::~ValueInfo() {} |
| 252 | 314 ActivityUserData::MemoryHeader::MemoryHeader() {} |
| 253 StaticAtomicSequenceNumber ActivityUserData::next_id_; | 315 ActivityUserData::MemoryHeader::~MemoryHeader() {} |
| 316 ActivityUserData::FieldHeader::FieldHeader() {} | |
| 317 ActivityUserData::FieldHeader::~FieldHeader() {} | |
| 254 | 318 |
| 255 ActivityUserData::ActivityUserData(void* memory, size_t size) | 319 ActivityUserData::ActivityUserData(void* memory, size_t size) |
| 256 : memory_(reinterpret_cast<char*>(memory)), | 320 : memory_(reinterpret_cast<char*>(memory)), |
| 257 available_(RoundDownToAlignment(size, kMemoryAlignment)), | 321 available_(RoundDownToAlignment(size, kMemoryAlignment)), |
| 258 id_(reinterpret_cast<std::atomic<uint32_t>*>(memory)) { | 322 header_(reinterpret_cast<MemoryHeader*>(memory)) { |
| 259 // It's possible that no user data is being stored. | 323 // It's possible that no user data is being stored. |
| 260 if (!memory_) | 324 if (!memory_) |
| 261 return; | 325 return; |
| 262 | 326 |
| 263 DCHECK_LT(kMemoryAlignment, available_); | 327 static_assert(0 == sizeof(MemoryHeader) % kMemoryAlignment, "invalid header"); |
| 264 if (id_->load(std::memory_order_relaxed) == 0) { | 328 DCHECK_LT(sizeof(MemoryHeader), available_); |
| 265 // Generate a new ID and store it in the first 32-bit word of memory_. | 329 if (header_->owner.data_id.load(std::memory_order_acquire) == 0) |
| 266 // |id_| must be non-zero for non-sink instances. | 330 header_->owner.Release_Initialize(); |
| 267 uint32_t id; | 331 memory_ += sizeof(MemoryHeader); |
| 268 while ((id = next_id_.GetNext()) == 0) | 332 available_ -= sizeof(MemoryHeader); |
| 269 ; | |
| 270 id_->store(id, std::memory_order_relaxed); | |
| 271 DCHECK_NE(0U, id_->load(std::memory_order_relaxed)); | |
| 272 } | |
| 273 memory_ += kMemoryAlignment; | |
| 274 available_ -= kMemoryAlignment; | |
| 275 | 333 |
| 276 // If there is already data present, load that. This allows the same class | 334 // If there is already data present, load that. This allows the same class |
| 277 // to be used for analysis through snapshots. | 335 // to be used for analysis through snapshots. |
| 278 ImportExistingData(); | 336 ImportExistingData(); |
| 279 } | 337 } |
| 280 | 338 |
| 281 ActivityUserData::~ActivityUserData() {} | 339 ActivityUserData::~ActivityUserData() {} |
| 282 | 340 |
| 341 bool ActivityUserData::CreateSnapshot(Snapshot* output_snapshot) const { | |
| 342 DCHECK(output_snapshot); | |
| 343 DCHECK(output_snapshot->empty()); | |
| 344 | |
| 345 // Find any new data that may have been added by an active instance of this | |
| 346 // class that is adding records. | |
| 347 ImportExistingData(); | |
| 348 | |
| 349 for (const auto& entry : values_) { | |
| 350 TypedValue value; | |
| 351 value.type_ = entry.second.type; | |
| 352 DCHECK_GE(entry.second.extent, | |
| 353 entry.second.size_ptr->load(std::memory_order_relaxed)); | |
| 354 | |
| 355 switch (entry.second.type) { | |
| 356 case RAW_VALUE: | |
| 357 case STRING_VALUE: | |
| 358 value.long_value_ = | |
| 359 std::string(reinterpret_cast<char*>(entry.second.memory), | |
| 360 entry.second.size_ptr->load(std::memory_order_relaxed)); | |
| 361 break; | |
| 362 case RAW_VALUE_REFERENCE: | |
| 363 case STRING_VALUE_REFERENCE: { | |
| 364 ReferenceRecord* ref = | |
| 365 reinterpret_cast<ReferenceRecord*>(entry.second.memory); | |
| 366 value.ref_value_ = StringPiece( | |
| 367 reinterpret_cast<char*>(static_cast<uintptr_t>(ref->address)), | |
| 368 static_cast<size_t>(ref->size)); | |
| 369 } break; | |
| 370 case BOOL_VALUE: | |
| 371 case CHAR_VALUE: | |
| 372 value.short_value_ = *reinterpret_cast<char*>(entry.second.memory); | |
| 373 break; | |
| 374 case SIGNED_VALUE: | |
| 375 case UNSIGNED_VALUE: | |
| 376 value.short_value_ = *reinterpret_cast<uint64_t*>(entry.second.memory); | |
| 377 break; | |
| 378 case END_OF_VALUES: // Included for completeness purposes. | |
| 379 NOTREACHED(); | |
| 380 } | |
| 381 auto inserted = output_snapshot->insert( | |
| 382 std::make_pair(entry.second.name.as_string(), std::move(value))); | |
| 383 DCHECK(inserted.second); // True if inserted, false if existed. | |
| 384 } | |
| 385 | |
| 386 return true; | |
| 387 } | |
| 388 | |
| 389 const void* ActivityUserData::GetBaseAddress() { | |
| 390 // The |memory_| pointer advances as elements are written but the |header_| | |
| 391 // value is always at the start of the block so just return that. | |
| 392 return header_; | |
| 393 } | |
| 394 | |
| 395 void ActivityUserData::SetOwningProcessIdForTesting(ProcessId pid, | |
| 396 int64_t stamp) { | |
| 397 if (!header_) | |
| 398 return; | |
| 399 header_->owner.SetOwningProcessIdForTesting(pid, stamp); | |
| 400 } | |
| 401 | |
| 402 // static | |
| 403 bool ActivityUserData::GetOwningProcessId(const void* memory, | |
| 404 ProcessId* out_id, | |
| 405 int64_t* out_stamp) { | |
| 406 const MemoryHeader* header = reinterpret_cast<const MemoryHeader*>(memory); | |
| 407 return OwningProcess::GetOwningProcessId(&header->owner, out_id, out_stamp); | |
| 408 } | |
| 409 | |
| 283 void ActivityUserData::Set(StringPiece name, | 410 void ActivityUserData::Set(StringPiece name, |
| 284 ValueType type, | 411 ValueType type, |
| 285 const void* memory, | 412 const void* memory, |
| 286 size_t size) { | 413 size_t size) { |
| 287 DCHECK_GE(std::numeric_limits<uint8_t>::max(), name.length()); | 414 DCHECK_GE(std::numeric_limits<uint8_t>::max(), name.length()); |
| 288 size = std::min(std::numeric_limits<uint16_t>::max() - (kMemoryAlignment - 1), | 415 size = std::min(std::numeric_limits<uint16_t>::max() - (kMemoryAlignment - 1), |
| 289 size); | 416 size); |
| 290 | 417 |
| 291 // It's possible that no user data is being stored. | 418 // It's possible that no user data is being stored. |
| 292 if (!memory_) | 419 if (!memory_) |
| 293 return; | 420 return; |
| 294 | 421 |
| 295 // The storage of a name is limited so use that limit during lookup. | 422 // The storage of a name is limited so use that limit during lookup. |
| 296 if (name.length() > kMaxUserDataNameLength) | 423 if (name.length() > kMaxUserDataNameLength) |
| 297 name.set(name.data(), kMaxUserDataNameLength); | 424 name.set(name.data(), kMaxUserDataNameLength); |
| 298 | 425 |
| 299 ValueInfo* info; | 426 ValueInfo* info; |
| 300 auto existing = values_.find(name); | 427 auto existing = values_.find(name); |
| 301 if (existing != values_.end()) { | 428 if (existing != values_.end()) { |
| 302 info = &existing->second; | 429 info = &existing->second; |
| 303 } else { | 430 } else { |
| 304 // The name size is limited to what can be held in a single byte but | 431 // The name size is limited to what can be held in a single byte but |
| 305 // because there are not alignment constraints on strings, it's set tight | 432 // because there are not alignment constraints on strings, it's set tight |
| 306 // against the header. Its extent (the reserved space, even if it's not | 433 // against the header. Its extent (the reserved space, even if it's not |
| 307 // all used) is calculated so that, when pressed against the header, the | 434 // all used) is calculated so that, when pressed against the header, the |
| 308 // following field will be aligned properly. | 435 // following field will be aligned properly. |
| 309 size_t name_size = name.length(); | 436 size_t name_size = name.length(); |
| 310 size_t name_extent = | 437 size_t name_extent = |
| 311 RoundUpToAlignment(sizeof(Header) + name_size, kMemoryAlignment) - | 438 RoundUpToAlignment(sizeof(FieldHeader) + name_size, kMemoryAlignment) - |
| 312 sizeof(Header); | 439 sizeof(FieldHeader); |
| 313 size_t value_extent = RoundUpToAlignment(size, kMemoryAlignment); | 440 size_t value_extent = RoundUpToAlignment(size, kMemoryAlignment); |
| 314 | 441 |
| 315 // The "base size" is the size of the header and (padded) string key. Stop | 442 // The "base size" is the size of the header and (padded) string key. Stop |
| 316 // now if there's not room enough for even this. | 443 // now if there's not room enough for even this. |
| 317 size_t base_size = sizeof(Header) + name_extent; | 444 size_t base_size = sizeof(FieldHeader) + name_extent; |
| 318 if (base_size > available_) | 445 if (base_size > available_) |
| 319 return; | 446 return; |
| 320 | 447 |
| 321 // The "full size" is the size for storing the entire value. | 448 // The "full size" is the size for storing the entire value. |
| 322 size_t full_size = std::min(base_size + value_extent, available_); | 449 size_t full_size = std::min(base_size + value_extent, available_); |
| 323 | 450 |
| 324 // If the value is actually a single byte, see if it can be stuffed at the | 451 // If the value is actually a single byte, see if it can be stuffed at the |
| 325 // end of the name extent rather than wasting kMemoryAlignment bytes. | 452 // end of the name extent rather than wasting kMemoryAlignment bytes. |
| 326 if (size == 1 && name_extent > name_size) { | 453 if (size == 1 && name_extent > name_size) { |
| 327 full_size = base_size; | 454 full_size = base_size; |
| 328 --name_extent; | 455 --name_extent; |
| 329 --base_size; | 456 --base_size; |
| 330 } | 457 } |
| 331 | 458 |
| 332 // Truncate the stored size to the amount of available memory. Stop now if | 459 // Truncate the stored size to the amount of available memory. Stop now if |
| 333 // there's not any room for even part of the value. | 460 // there's not any room for even part of the value. |
| 334 size = std::min(full_size - base_size, size); | 461 size = std::min(full_size - base_size, size); |
| 335 if (size == 0) | 462 if (size == 0) |
| 336 return; | 463 return; |
| 337 | 464 |
| 338 // Allocate a chunk of memory. | 465 // Allocate a chunk of memory. |
| 339 Header* header = reinterpret_cast<Header*>(memory_); | 466 FieldHeader* header = reinterpret_cast<FieldHeader*>(memory_); |
| 340 memory_ += full_size; | 467 memory_ += full_size; |
| 341 available_ -= full_size; | 468 available_ -= full_size; |
| 342 | 469 |
| 343 // Datafill the header and name records. Memory must be zeroed. The |type| | 470 // Datafill the header and name records. Memory must be zeroed. The |type| |
| 344 // is written last, atomically, to release all the other values. | 471 // is written last, atomically, to release all the other values. |
| 345 DCHECK_EQ(END_OF_VALUES, header->type.load(std::memory_order_relaxed)); | 472 DCHECK_EQ(END_OF_VALUES, header->type.load(std::memory_order_relaxed)); |
| 346 DCHECK_EQ(0, header->value_size.load(std::memory_order_relaxed)); | 473 DCHECK_EQ(0, header->value_size.load(std::memory_order_relaxed)); |
| 347 header->name_size = static_cast<uint8_t>(name_size); | 474 header->name_size = static_cast<uint8_t>(name_size); |
| 348 header->record_size = full_size; | 475 header->record_size = full_size; |
| 349 char* name_memory = reinterpret_cast<char*>(header) + sizeof(Header); | 476 char* name_memory = reinterpret_cast<char*>(header) + sizeof(FieldHeader); |
| 350 void* value_memory = | 477 void* value_memory = |
| 351 reinterpret_cast<char*>(header) + sizeof(Header) + name_extent; | 478 reinterpret_cast<char*>(header) + sizeof(FieldHeader) + name_extent; |
| 352 memcpy(name_memory, name.data(), name_size); | 479 memcpy(name_memory, name.data(), name_size); |
| 353 header->type.store(type, std::memory_order_release); | 480 header->type.store(type, std::memory_order_release); |
| 354 | 481 |
| 355 // Create an entry in |values_| so that this field can be found and changed | 482 // Create an entry in |values_| so that this field can be found and changed |
| 356 // later on without having to allocate new entries. | 483 // later on without having to allocate new entries. |
| 357 StringPiece persistent_name(name_memory, name_size); | 484 StringPiece persistent_name(name_memory, name_size); |
| 358 auto inserted = | 485 auto inserted = |
| 359 values_.insert(std::make_pair(persistent_name, ValueInfo())); | 486 values_.insert(std::make_pair(persistent_name, ValueInfo())); |
| 360 DCHECK(inserted.second); // True if inserted, false if existed. | 487 DCHECK(inserted.second); // True if inserted, false if existed. |
| 361 info = &inserted.first->second; | 488 info = &inserted.first->second; |
| 362 info->name = persistent_name; | 489 info->name = persistent_name; |
| 363 info->memory = value_memory; | 490 info->memory = value_memory; |
| 364 info->size_ptr = &header->value_size; | 491 info->size_ptr = &header->value_size; |
| 365 info->extent = full_size - sizeof(Header) - name_extent; | 492 info->extent = full_size - sizeof(FieldHeader) - name_extent; |
| 366 info->type = type; | 493 info->type = type; |
| 367 } | 494 } |
| 368 | 495 |
| 369 // Copy the value data to storage. The |size| is written last, atomically, to | 496 // Copy the value data to storage. The |size| is written last, atomically, to |
| 370 // release the copied data. Until then, a parallel reader will just ignore | 497 // release the copied data. Until then, a parallel reader will just ignore |
| 371 // records with a zero size. | 498 // records with a zero size. |
| 372 DCHECK_EQ(type, info->type); | 499 DCHECK_EQ(type, info->type); |
| 373 size = std::min(size, info->extent); | 500 size = std::min(size, info->extent); |
| 374 info->size_ptr->store(0, std::memory_order_seq_cst); | 501 info->size_ptr->store(0, std::memory_order_seq_cst); |
| 375 memcpy(info->memory, memory, size); | 502 memcpy(info->memory, memory, size); |
| 376 info->size_ptr->store(size, std::memory_order_release); | 503 info->size_ptr->store(size, std::memory_order_release); |
| 377 } | 504 } |
| 378 | 505 |
| 379 void ActivityUserData::SetReference(StringPiece name, | 506 void ActivityUserData::SetReference(StringPiece name, |
| 380 ValueType type, | 507 ValueType type, |
| 381 const void* memory, | 508 const void* memory, |
| 382 size_t size) { | 509 size_t size) { |
| 383 ReferenceRecord rec; | 510 ReferenceRecord rec; |
| 384 rec.address = reinterpret_cast<uintptr_t>(memory); | 511 rec.address = reinterpret_cast<uintptr_t>(memory); |
| 385 rec.size = size; | 512 rec.size = size; |
| 386 Set(name, type, &rec, sizeof(rec)); | 513 Set(name, type, &rec, sizeof(rec)); |
| 387 } | 514 } |
| 388 | 515 |
| 389 void ActivityUserData::ImportExistingData() const { | 516 void ActivityUserData::ImportExistingData() const { |
| 390 while (available_ > sizeof(Header)) { | 517 while (available_ > sizeof(FieldHeader)) { |
| 391 Header* header = reinterpret_cast<Header*>(memory_); | 518 FieldHeader* header = reinterpret_cast<FieldHeader*>(memory_); |
| 392 ValueType type = | 519 ValueType type = |
| 393 static_cast<ValueType>(header->type.load(std::memory_order_acquire)); | 520 static_cast<ValueType>(header->type.load(std::memory_order_acquire)); |
| 394 if (type == END_OF_VALUES) | 521 if (type == END_OF_VALUES) |
| 395 return; | 522 return; |
| 396 if (header->record_size > available_) | 523 if (header->record_size > available_) |
| 397 return; | 524 return; |
| 398 | 525 |
| 399 size_t value_offset = RoundUpToAlignment(sizeof(Header) + header->name_size, | 526 size_t value_offset = RoundUpToAlignment( |
| 400 kMemoryAlignment); | 527 sizeof(FieldHeader) + header->name_size, kMemoryAlignment); |
| 401 if (header->record_size == value_offset && | 528 if (header->record_size == value_offset && |
| 402 header->value_size.load(std::memory_order_relaxed) == 1) { | 529 header->value_size.load(std::memory_order_relaxed) == 1) { |
| 403 value_offset -= 1; | 530 value_offset -= 1; |
| 404 } | 531 } |
| 405 if (value_offset + header->value_size > header->record_size) | 532 if (value_offset + header->value_size > header->record_size) |
| 406 return; | 533 return; |
| 407 | 534 |
| 408 ValueInfo info; | 535 ValueInfo info; |
| 409 info.name = StringPiece(memory_ + sizeof(Header), header->name_size); | 536 info.name = StringPiece(memory_ + sizeof(FieldHeader), header->name_size); |
| 410 info.type = type; | 537 info.type = type; |
| 411 info.memory = memory_ + value_offset; | 538 info.memory = memory_ + value_offset; |
| 412 info.size_ptr = &header->value_size; | 539 info.size_ptr = &header->value_size; |
| 413 info.extent = header->record_size - value_offset; | 540 info.extent = header->record_size - value_offset; |
| 414 | 541 |
| 415 StringPiece key(info.name); | 542 StringPiece key(info.name); |
| 416 values_.insert(std::make_pair(key, std::move(info))); | 543 values_.insert(std::make_pair(key, std::move(info))); |
| 417 | 544 |
| 418 memory_ += header->record_size; | 545 memory_ += header->record_size; |
| 419 available_ -= header->record_size; | 546 available_ -= header->record_size; |
| 420 } | 547 } |
| 421 } | 548 } |
| 422 | 549 |
| 423 bool ActivityUserData::CreateSnapshot(Snapshot* output_snapshot) const { | |
| 424 DCHECK(output_snapshot); | |
| 425 DCHECK(output_snapshot->empty()); | |
| 426 | |
| 427 // Find any new data that may have been added by an active instance of this | |
| 428 // class that is adding records. | |
| 429 ImportExistingData(); | |
| 430 | |
| 431 for (const auto& entry : values_) { | |
| 432 TypedValue value; | |
| 433 value.type_ = entry.second.type; | |
| 434 DCHECK_GE(entry.second.extent, | |
| 435 entry.second.size_ptr->load(std::memory_order_relaxed)); | |
| 436 | |
| 437 switch (entry.second.type) { | |
| 438 case RAW_VALUE: | |
| 439 case STRING_VALUE: | |
| 440 value.long_value_ = | |
| 441 std::string(reinterpret_cast<char*>(entry.second.memory), | |
| 442 entry.second.size_ptr->load(std::memory_order_relaxed)); | |
| 443 break; | |
| 444 case RAW_VALUE_REFERENCE: | |
| 445 case STRING_VALUE_REFERENCE: { | |
| 446 ReferenceRecord* ref = | |
| 447 reinterpret_cast<ReferenceRecord*>(entry.second.memory); | |
| 448 value.ref_value_ = StringPiece( | |
| 449 reinterpret_cast<char*>(static_cast<uintptr_t>(ref->address)), | |
| 450 static_cast<size_t>(ref->size)); | |
| 451 } break; | |
| 452 case BOOL_VALUE: | |
| 453 case CHAR_VALUE: | |
| 454 value.short_value_ = *reinterpret_cast<char*>(entry.second.memory); | |
| 455 break; | |
| 456 case SIGNED_VALUE: | |
| 457 case UNSIGNED_VALUE: | |
| 458 value.short_value_ = *reinterpret_cast<uint64_t*>(entry.second.memory); | |
| 459 break; | |
| 460 case END_OF_VALUES: // Included for completeness purposes. | |
| 461 NOTREACHED(); | |
| 462 } | |
| 463 auto inserted = output_snapshot->insert( | |
| 464 std::make_pair(entry.second.name.as_string(), std::move(value))); | |
| 465 DCHECK(inserted.second); // True if inserted, false if existed. | |
| 466 } | |
| 467 | |
| 468 return true; | |
| 469 } | |
| 470 | |
| 471 const void* ActivityUserData::GetBaseAddress() { | |
| 472 // The |memory_| pointer advances as elements are written but the |id_| | |
| 473 // value is always at the start of the block so just return that. | |
| 474 return id_; | |
| 475 } | |
| 476 | |
| 477 // This information is kept for every thread that is tracked. It is filled | 550 // This information is kept for every thread that is tracked. It is filled |
| 478 // the very first time the thread is seen. All fields must be of exact sizes | 551 // the very first time the thread is seen. All fields must be of exact sizes |
| 479 // so there is no issue moving between 32 and 64-bit builds. | 552 // so there is no issue moving between 32 and 64-bit builds. |
| 480 struct ThreadActivityTracker::Header { | 553 struct ThreadActivityTracker::Header { |
| 481 // Defined in .h for analyzer access. Increment this if structure changes! | 554 // Defined in .h for analyzer access. Increment this if structure changes! |
| 482 static constexpr uint32_t kPersistentTypeId = | 555 static constexpr uint32_t kPersistentTypeId = |
| 483 GlobalActivityTracker::kTypeIdActivityTracker; | 556 GlobalActivityTracker::kTypeIdActivityTracker; |
| 484 | 557 |
| 485 // Expected size for 32/64-bit check. | 558 // Expected size for 32/64-bit check. |
| 486 static constexpr size_t kExpectedInstanceSize = 80; | 559 static constexpr size_t kExpectedInstanceSize = |
| 560 OwningProcess::kExpectedInstanceSize + 72; | |
| 487 | 561 |
| 488 // This unique number indicates a valid initialization of the memory. | 562 // This information uniquely identifies a process. |
| 489 std::atomic<uint32_t> cookie; | 563 OwningProcess owner; |
| 490 | 564 |
| 491 // The number of Activity slots (spaces that can hold an Activity) that | 565 // The thread-id (thread_ref.as_id) to which this data belongs. This number |
| 492 // immediately follow this structure in memory. | 566 // is not guaranteed to mean anything but combined with the process-id from |
| 493 uint32_t stack_slots; | 567 // OwningProcess is unique among all active trackers. |
| 494 | |
| 495 // The process-id and thread-id (thread_ref.as_id) to which this data belongs. | |
| 496 // These identifiers are not guaranteed to mean anything but are unique, in | |
| 497 // combination, among all active trackers. It would be nice to always have | |
| 498 // the process_id be a 64-bit value but the necessity of having it atomic | |
| 499 // (for the memory barriers it provides) limits it to the natural word size | |
| 500 // of the machine. | |
| 501 #ifdef ARCH_CPU_64_BITS | |
| 502 std::atomic<int64_t> process_id; | |
| 503 #else | |
| 504 std::atomic<int32_t> process_id; | |
| 505 int32_t process_id_padding; | |
| 506 #endif | |
| 507 ThreadRef thread_ref; | 568 ThreadRef thread_ref; |
| 508 | 569 |
| 509 // The start-time and start-ticks when the data was created. Each activity | 570 // The start-time and start-ticks when the data was created. Each activity |
| 510 // record has a |time_internal| value that can be converted to a "wall time" | 571 // record has a |time_internal| value that can be converted to a "wall time" |
| 511 // with these two values. | 572 // with these two values. |
| 512 int64_t start_time; | 573 int64_t start_time; |
| 513 int64_t start_ticks; | 574 int64_t start_ticks; |
| 514 | 575 |
| 576 // The number of Activity slots (spaces that can hold an Activity) that | |
| 577 // immediately follow this structure in memory. | |
| 578 uint32_t stack_slots; | |
| 579 | |
| 580 // Some padding to keep everything 64-bit aligned. | |
| 581 uint32_t padding; | |
| 582 | |
| 515 // The current depth of the stack. This may be greater than the number of | 583 // The current depth of the stack. This may be greater than the number of |
| 516 // slots. If the depth exceeds the number of slots, the newest entries | 584 // slots. If the depth exceeds the number of slots, the newest entries |
| 517 // won't be recorded. | 585 // won't be recorded. |
| 518 std::atomic<uint32_t> current_depth; | 586 std::atomic<uint32_t> current_depth; |
| 519 | 587 |
| 520 // A memory location used to indicate if changes have been made to the stack | 588 // A memory location used to indicate if changes have been made to the stack |
| 521 // that would invalidate an in-progress read of its contents. The active | 589 // that would invalidate an in-progress read of its contents. The active |
| 522 // tracker will zero the value whenever something gets popped from the | 590 // tracker will zero the value whenever something gets popped from the |
| 523 // stack. A monitoring tracker can write a non-zero value here, copy the | 591 // stack. A monitoring tracker can write a non-zero value here, copy the |
| 524 // stack contents, and read the value to know, if it is still non-zero, that | 592 // stack contents, and read the value to know, if it is still non-zero, that |
| (...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 587 sizeof(header_->thread_ref) == sizeof(header_->thread_ref.as_id), | 655 sizeof(header_->thread_ref) == sizeof(header_->thread_ref.as_id), |
| 588 "PlatformThreadHandle::Handle is too big to hold in 64-bit ID"); | 656 "PlatformThreadHandle::Handle is too big to hold in 64-bit ID"); |
| 589 | 657 |
| 590 // Ensure that the alignment of Activity.data is properly aligned to a | 658 // Ensure that the alignment of Activity.data is properly aligned to a |
| 591 // 64-bit boundary so there are no interoperability-issues across cpu | 659 // 64-bit boundary so there are no interoperability-issues across cpu |
| 592 // architectures. | 660 // architectures. |
| 593 static_assert(offsetof(Activity, data) % sizeof(uint64_t) == 0, | 661 static_assert(offsetof(Activity, data) % sizeof(uint64_t) == 0, |
| 594 "ActivityData.data is not 64-bit aligned"); | 662 "ActivityData.data is not 64-bit aligned"); |
| 595 | 663 |
| 596 // Provided memory should either be completely initialized or all zeros. | 664 // Provided memory should either be completely initialized or all zeros. |
| 597 if (header_->cookie.load(std::memory_order_relaxed) == 0) { | 665 if (header_->owner.data_id.load(std::memory_order_relaxed) == 0) { |
| 598 // This is a new file. Double-check other fields and then initialize. | 666 // This is a new file. Double-check other fields and then initialize. |
| 599 DCHECK_EQ(0, header_->process_id.load(std::memory_order_relaxed)); | 667 DCHECK_EQ(0, header_->owner.process_id); |
| 668 DCHECK_EQ(0, header_->owner.create_stamp); | |
| 600 DCHECK_EQ(0, header_->thread_ref.as_id); | 669 DCHECK_EQ(0, header_->thread_ref.as_id); |
| 601 DCHECK_EQ(0, header_->start_time); | 670 DCHECK_EQ(0, header_->start_time); |
| 602 DCHECK_EQ(0, header_->start_ticks); | 671 DCHECK_EQ(0, header_->start_ticks); |
| 603 DCHECK_EQ(0U, header_->stack_slots); | 672 DCHECK_EQ(0U, header_->stack_slots); |
| 604 DCHECK_EQ(0U, header_->current_depth.load(std::memory_order_relaxed)); | 673 DCHECK_EQ(0U, header_->current_depth.load(std::memory_order_relaxed)); |
| 605 DCHECK_EQ(0U, header_->stack_unchanged.load(std::memory_order_relaxed)); | 674 DCHECK_EQ(0U, header_->stack_unchanged.load(std::memory_order_relaxed)); |
| 606 DCHECK_EQ(0, stack_[0].time_internal); | 675 DCHECK_EQ(0, stack_[0].time_internal); |
| 607 DCHECK_EQ(0U, stack_[0].origin_address); | 676 DCHECK_EQ(0U, stack_[0].origin_address); |
| 608 DCHECK_EQ(0U, stack_[0].call_stack[0]); | 677 DCHECK_EQ(0U, stack_[0].call_stack[0]); |
| 609 DCHECK_EQ(0U, stack_[0].data.task.sequence_id); | 678 DCHECK_EQ(0U, stack_[0].data.task.sequence_id); |
| 610 | 679 |
| 611 #if defined(OS_WIN) | 680 #if defined(OS_WIN) |
| 612 header_->thread_ref.as_tid = PlatformThread::CurrentId(); | 681 header_->thread_ref.as_tid = PlatformThread::CurrentId(); |
| 613 #elif defined(OS_POSIX) | 682 #elif defined(OS_POSIX) |
| 614 header_->thread_ref.as_handle = | 683 header_->thread_ref.as_handle = |
| 615 PlatformThread::CurrentHandle().platform_handle(); | 684 PlatformThread::CurrentHandle().platform_handle(); |
| 616 #endif | 685 #endif |
| 617 header_->process_id.store(GetCurrentProcId(), std::memory_order_relaxed); | |
| 618 | 686 |
| 619 header_->start_time = base::Time::Now().ToInternalValue(); | 687 header_->start_time = base::Time::Now().ToInternalValue(); |
| 620 header_->start_ticks = base::TimeTicks::Now().ToInternalValue(); | 688 header_->start_ticks = base::TimeTicks::Now().ToInternalValue(); |
| 621 header_->stack_slots = stack_slots_; | 689 header_->stack_slots = stack_slots_; |
| 622 strlcpy(header_->thread_name, PlatformThread::GetName(), | 690 strlcpy(header_->thread_name, PlatformThread::GetName(), |
| 623 sizeof(header_->thread_name)); | 691 sizeof(header_->thread_name)); |
| 624 | 692 |
| 625 // This is done last so as to guarantee that everything above is "released" | 693 // This is done last so as to guarantee that everything above is "released" |
| 626 // by the time this value gets written. | 694 // by the time this value gets written. |
| 627 header_->cookie.store(kHeaderCookie, std::memory_order_release); | 695 header_->owner.Release_Initialize(); |
| 628 | 696 |
| 629 valid_ = true; | 697 valid_ = true; |
| 630 DCHECK(IsValid()); | 698 DCHECK(IsValid()); |
| 631 } else { | 699 } else { |
| 632 // This is a file with existing data. Perform basic consistency checks. | 700 // This is a file with existing data. Perform basic consistency checks. |
| 633 valid_ = true; | 701 valid_ = true; |
| 634 valid_ = IsValid(); | 702 valid_ = IsValid(); |
| 635 } | 703 } |
| 636 } | 704 } |
| 637 | 705 |
| (...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 762 ActivityId id, | 830 ActivityId id, |
| 763 ActivityTrackerMemoryAllocator* allocator) { | 831 ActivityTrackerMemoryAllocator* allocator) { |
| 764 // User-data is only stored for activities actually held in the stack. | 832 // User-data is only stored for activities actually held in the stack. |
| 765 if (id < stack_slots_ && stack_[id].user_data_ref) { | 833 if (id < stack_slots_ && stack_[id].user_data_ref) { |
| 766 allocator->ReleaseObjectReference(stack_[id].user_data_ref); | 834 allocator->ReleaseObjectReference(stack_[id].user_data_ref); |
| 767 stack_[id].user_data_ref = 0; | 835 stack_[id].user_data_ref = 0; |
| 768 } | 836 } |
| 769 } | 837 } |
| 770 | 838 |
| 771 bool ThreadActivityTracker::IsValid() const { | 839 bool ThreadActivityTracker::IsValid() const { |
| 772 if (header_->cookie.load(std::memory_order_acquire) != kHeaderCookie || | 840 if (header_->owner.data_id.load(std::memory_order_acquire) == 0 || |
| 773 header_->process_id.load(std::memory_order_relaxed) == 0 || | 841 header_->owner.process_id == 0 || header_->thread_ref.as_id == 0 || |
| 774 header_->thread_ref.as_id == 0 || | 842 header_->start_time == 0 || header_->start_ticks == 0 || |
| 775 header_->start_time == 0 || | |
| 776 header_->start_ticks == 0 || | |
| 777 header_->stack_slots != stack_slots_ || | 843 header_->stack_slots != stack_slots_ || |
| 778 header_->thread_name[sizeof(header_->thread_name) - 1] != '\0') { | 844 header_->thread_name[sizeof(header_->thread_name) - 1] != '\0') { |
| 779 return false; | 845 return false; |
| 780 } | 846 } |
| 781 | 847 |
| 782 return valid_; | 848 return valid_; |
| 783 } | 849 } |
| 784 | 850 |
| 785 bool ThreadActivityTracker::CreateSnapshot(Snapshot* output_snapshot) const { | 851 bool ThreadActivityTracker::CreateSnapshot(Snapshot* output_snapshot) const { |
| 786 DCHECK(output_snapshot); | 852 DCHECK(output_snapshot); |
| (...skipping 10 matching lines...) Expand all Loading... | |
| 797 // Stop here if the data isn't valid. | 863 // Stop here if the data isn't valid. |
| 798 if (!IsValid()) | 864 if (!IsValid()) |
| 799 return false; | 865 return false; |
| 800 | 866 |
| 801 // Allocate the maximum size for the stack so it doesn't have to be done | 867 // Allocate the maximum size for the stack so it doesn't have to be done |
| 802 // during the time-sensitive snapshot operation. It is shrunk once the | 868 // during the time-sensitive snapshot operation. It is shrunk once the |
| 803 // actual size is known. | 869 // actual size is known. |
| 804 output_snapshot->activity_stack.reserve(stack_slots_); | 870 output_snapshot->activity_stack.reserve(stack_slots_); |
| 805 | 871 |
| 806 for (int attempt = 0; attempt < kMaxAttempts; ++attempt) { | 872 for (int attempt = 0; attempt < kMaxAttempts; ++attempt) { |
| 807 // Remember the process and thread IDs to ensure they aren't replaced | 873 // Remember the data IDs to ensure nothing is replaced during the snapshot |
| 808 // during the snapshot operation. Use "acquire" to ensure that all the | 874 // operation. Use "acquire" so that all the non-atomic fields of the |
| 809 // non-atomic fields of the structure are valid (at least at the current | 875 // structure are valid (at least at the current moment in time). |
| 810 // moment in time). | 876 const uint32_t starting_id = |
| 811 const int64_t starting_process_id = | 877 header_->owner.data_id.load(std::memory_order_acquire); |
| 812 header_->process_id.load(std::memory_order_acquire); | 878 const int64_t starting_process_id = header_->owner.process_id; |
| 813 const int64_t starting_thread_id = header_->thread_ref.as_id; | 879 const int64_t starting_thread_id = header_->thread_ref.as_id; |
| 814 | 880 |
| 815 // Write a non-zero value to |stack_unchanged| so it's possible to detect | 881 // Write a non-zero value to |stack_unchanged| so it's possible to detect |
| 816 // at the end that nothing has changed since copying the data began. A | 882 // at the end that nothing has changed since copying the data began. A |
| 817 // "cst" operation is required to ensure it occurs before everything else. | 883 // "cst" operation is required to ensure it occurs before everything else. |
| 818 // Using "cst" memory ordering is relatively expensive but this is only | 884 // Using "cst" memory ordering is relatively expensive but this is only |
| 819 // done during analysis so doesn't directly affect the worker threads. | 885 // done during analysis so doesn't directly affect the worker threads. |
| 820 header_->stack_unchanged.store(1, std::memory_order_seq_cst); | 886 header_->stack_unchanged.store(1, std::memory_order_seq_cst); |
| 821 | 887 |
| 822 // Fetching the current depth also "acquires" the contents of the stack. | 888 // Fetching the current depth also "acquires" the contents of the stack. |
| 823 depth = header_->current_depth.load(std::memory_order_acquire); | 889 depth = header_->current_depth.load(std::memory_order_acquire); |
| 824 uint32_t count = std::min(depth, stack_slots_); | 890 uint32_t count = std::min(depth, stack_slots_); |
| 825 output_snapshot->activity_stack.resize(count); | 891 output_snapshot->activity_stack.resize(count); |
| 826 if (count > 0) { | 892 if (count > 0) { |
| 827 // Copy the existing contents. Memcpy is used for speed. | 893 // Copy the existing contents. Memcpy is used for speed. |
| 828 memcpy(&output_snapshot->activity_stack[0], stack_, | 894 memcpy(&output_snapshot->activity_stack[0], stack_, |
| 829 count * sizeof(Activity)); | 895 count * sizeof(Activity)); |
| 830 } | 896 } |
| 831 | 897 |
| 832 // Retry if something changed during the copy. A "cst" operation ensures | 898 // Retry if something changed during the copy. A "cst" operation ensures |
| 833 // it must happen after all the above operations. | 899 // it must happen after all the above operations. |
| 834 if (!header_->stack_unchanged.load(std::memory_order_seq_cst)) | 900 if (!header_->stack_unchanged.load(std::memory_order_seq_cst)) |
| 835 continue; | 901 continue; |
| 836 | 902 |
| 837 // Stack copied. Record it's full depth. | 903 // Stack copied. Record it's full depth. |
| 838 output_snapshot->activity_stack_depth = depth; | 904 output_snapshot->activity_stack_depth = depth; |
| 839 | 905 |
| 840 // TODO(bcwhite): Snapshot other things here. | 906 // TODO(bcwhite): Snapshot other things here. |
| 841 | 907 |
| 842 // Get the general thread information. Loading of "process_id" is guaranteed | 908 // Get the general thread information. |
| 843 // to be last so that it's possible to detect below if any content has | |
| 844 // changed while reading it. It's technically possible for a thread to end, | |
| 845 // have its data cleared, a new thread get created with the same IDs, and | |
| 846 // it perform an action which starts tracking all in the time since the | |
| 847 // ID reads above but the chance is so unlikely that it's not worth the | |
| 848 // effort and complexity of protecting against it (perhaps with an | |
| 849 // "unchanged" field like is done for the stack). | |
| 850 output_snapshot->thread_name = | 909 output_snapshot->thread_name = |
| 851 std::string(header_->thread_name, sizeof(header_->thread_name) - 1); | 910 std::string(header_->thread_name, sizeof(header_->thread_name) - 1); |
| 852 output_snapshot->thread_id = header_->thread_ref.as_id; | 911 output_snapshot->thread_id = header_->thread_ref.as_id; |
| 853 output_snapshot->process_id = | 912 output_snapshot->process_id = header_->owner.process_id; |
| 854 header_->process_id.load(std::memory_order_seq_cst); | |
| 855 | 913 |
| 856 // All characters of the thread-name buffer were copied so as to not break | 914 // All characters of the thread-name buffer were copied so as to not break |
| 857 // if the trailing NUL were missing. Now limit the length if the actual | 915 // if the trailing NUL were missing. Now limit the length if the actual |
| 858 // name is shorter. | 916 // name is shorter. |
| 859 output_snapshot->thread_name.resize( | 917 output_snapshot->thread_name.resize( |
| 860 strlen(output_snapshot->thread_name.c_str())); | 918 strlen(output_snapshot->thread_name.c_str())); |
| 861 | 919 |
| 862 // If the process or thread ID has changed then the tracker has exited and | 920 // If the data ID has changed then the tracker has exited and the memory |
| 863 // the memory reused by a new one. Try again. | 921 // reused by a new one. Try again. |
| 864 if (output_snapshot->process_id != starting_process_id || | 922 if (header_->owner.data_id.load(std::memory_order_seq_cst) != starting_id || |
| 923 output_snapshot->process_id != starting_process_id || | |
| 865 output_snapshot->thread_id != starting_thread_id) { | 924 output_snapshot->thread_id != starting_thread_id) { |
| 866 continue; | 925 continue; |
| 867 } | 926 } |
| 868 | 927 |
| 869 // Only successful if the data is still valid once everything is done since | 928 // Only successful if the data is still valid once everything is done since |
| 870 // it's possible for the thread to end somewhere in the middle and all its | 929 // it's possible for the thread to end somewhere in the middle and all its |
| 871 // values become garbage. | 930 // values become garbage. |
| 872 if (!IsValid()) | 931 if (!IsValid()) |
| 873 return false; | 932 return false; |
| 874 | 933 |
| 875 // Change all the timestamps in the activities from "ticks" to "wall" time. | 934 // Change all the timestamps in the activities from "ticks" to "wall" time. |
| 876 const Time start_time = Time::FromInternalValue(header_->start_time); | 935 const Time start_time = Time::FromInternalValue(header_->start_time); |
| 877 const int64_t start_ticks = header_->start_ticks; | 936 const int64_t start_ticks = header_->start_ticks; |
| 878 for (Activity& activity : output_snapshot->activity_stack) { | 937 for (Activity& activity : output_snapshot->activity_stack) { |
| 879 activity.time_internal = | 938 activity.time_internal = |
| 880 (start_time + | 939 (start_time + |
| 881 TimeDelta::FromInternalValue(activity.time_internal - start_ticks)) | 940 TimeDelta::FromInternalValue(activity.time_internal - start_ticks)) |
| 882 .ToInternalValue(); | 941 .ToInternalValue(); |
| 883 } | 942 } |
| 884 | 943 |
| 885 // Success! | 944 // Success! |
| 886 return true; | 945 return true; |
| 887 } | 946 } |
| 888 | 947 |
| 889 // Too many attempts. | 948 // Too many attempts. |
| 890 return false; | 949 return false; |
| 891 } | 950 } |
| 892 | 951 |
| 952 const void* ThreadActivityTracker::GetBaseAddress() { | |
| 953 return header_; | |
| 954 } | |
| 955 | |
| 956 void ThreadActivityTracker::SetOwningProcessIdForTesting(ProcessId pid, | |
| 957 int64_t stamp) { | |
| 958 header_->owner.SetOwningProcessIdForTesting(pid, stamp); | |
| 959 } | |
| 960 | |
| 961 // static | |
| 962 bool ThreadActivityTracker::GetOwningProcessId(const void* memory, | |
| 963 ProcessId* out_id, | |
| 964 int64_t* out_stamp) { | |
| 965 const Header* header = reinterpret_cast<const Header*>(memory); | |
| 966 return OwningProcess::GetOwningProcessId(&header->owner, out_id, out_stamp); | |
| 967 } | |
| 968 | |
| 893 // static | 969 // static |
| 894 size_t ThreadActivityTracker::SizeForStackDepth(int stack_depth) { | 970 size_t ThreadActivityTracker::SizeForStackDepth(int stack_depth) { |
| 895 return static_cast<size_t>(stack_depth) * sizeof(Activity) + sizeof(Header); | 971 return static_cast<size_t>(stack_depth) * sizeof(Activity) + sizeof(Header); |
| 896 } | 972 } |
| 897 | 973 |
| 898 // The instantiation of the GlobalActivityTracker object. | 974 // The instantiation of the GlobalActivityTracker object. |
| 899 // The object held here will obviously not be destructed at process exit | 975 // The object held here will obviously not be destructed at process exit |
| 900 // but that's best since PersistentMemoryAllocator objects (that underlie | 976 // but that's best since PersistentMemoryAllocator objects (that underlie |
| 901 // GlobalActivityTracker objects) are explicitly forbidden from doing anything | 977 // GlobalActivityTracker objects) are explicitly forbidden from doing anything |
| 902 // essential at exit anyway due to the fact that they depend on data managed | 978 // essential at exit anyway due to the fact that they depend on data managed |
| (...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 970 // These fields never changes and are done before the record is made | 1046 // These fields never changes and are done before the record is made |
| 971 // iterable so no thread protection is necessary. | 1047 // iterable so no thread protection is necessary. |
| 972 size = info.size; | 1048 size = info.size; |
| 973 timestamp = info.timestamp; | 1049 timestamp = info.timestamp; |
| 974 age = info.age; | 1050 age = info.age; |
| 975 memcpy(identifier, info.identifier, sizeof(identifier)); | 1051 memcpy(identifier, info.identifier, sizeof(identifier)); |
| 976 memcpy(pickle, pickler.data(), pickler.size()); | 1052 memcpy(pickle, pickler.data(), pickler.size()); |
| 977 pickle_size = pickler.size(); | 1053 pickle_size = pickler.size(); |
| 978 changes.store(0, std::memory_order_relaxed); | 1054 changes.store(0, std::memory_order_relaxed); |
| 979 | 1055 |
| 1056 // Initialize the owner info. | |
| 1057 owner.Release_Initialize(); | |
| 1058 | |
| 980 // Now set those fields that can change. | 1059 // Now set those fields that can change. |
| 981 return UpdateFrom(info); | 1060 return UpdateFrom(info); |
| 982 } | 1061 } |
| 983 | 1062 |
| 984 bool GlobalActivityTracker::ModuleInfoRecord::UpdateFrom( | 1063 bool GlobalActivityTracker::ModuleInfoRecord::UpdateFrom( |
| 985 const GlobalActivityTracker::ModuleInfo& info) { | 1064 const GlobalActivityTracker::ModuleInfo& info) { |
| 986 // Updates can occur after the record is made visible so make changes atomic. | 1065 // Updates can occur after the record is made visible so make changes atomic. |
| 987 // A "strong" exchange ensures no false failures. | 1066 // A "strong" exchange ensures no false failures. |
| 988 uint32_t old_changes = changes.load(std::memory_order_relaxed); | 1067 uint32_t old_changes = changes.load(std::memory_order_relaxed); |
| 989 uint32_t new_changes = old_changes | kModuleInformationChanging; | 1068 uint32_t new_changes = old_changes | kModuleInformationChanging; |
| (...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1044 AutoLock lock(global->user_data_allocator_lock_); | 1123 AutoLock lock(global->user_data_allocator_lock_); |
| 1045 user_data_ = | 1124 user_data_ = |
| 1046 tracker_->GetUserData(activity_id_, &global->user_data_allocator_); | 1125 tracker_->GetUserData(activity_id_, &global->user_data_allocator_); |
| 1047 } else { | 1126 } else { |
| 1048 user_data_ = MakeUnique<ActivityUserData>(nullptr, 0); | 1127 user_data_ = MakeUnique<ActivityUserData>(nullptr, 0); |
| 1049 } | 1128 } |
| 1050 } | 1129 } |
| 1051 return *user_data_; | 1130 return *user_data_; |
| 1052 } | 1131 } |
| 1053 | 1132 |
| 1054 GlobalActivityTracker::GlobalUserData::GlobalUserData(void* memory, size_t size) | 1133 GlobalActivityTracker::ThreadSafeUserData::ThreadSafeUserData(void* memory, |
| 1134 size_t size) | |
| 1055 : ActivityUserData(memory, size) {} | 1135 : ActivityUserData(memory, size) {} |
| 1056 | 1136 |
| 1057 GlobalActivityTracker::GlobalUserData::~GlobalUserData() {} | 1137 GlobalActivityTracker::ThreadSafeUserData::~ThreadSafeUserData() {} |
| 1058 | 1138 |
| 1059 void GlobalActivityTracker::GlobalUserData::Set(StringPiece name, | 1139 void GlobalActivityTracker::ThreadSafeUserData::Set(StringPiece name, |
| 1060 ValueType type, | 1140 ValueType type, |
| 1061 const void* memory, | 1141 const void* memory, |
| 1062 size_t size) { | 1142 size_t size) { |
| 1063 AutoLock lock(data_lock_); | 1143 AutoLock lock(data_lock_); |
| 1064 ActivityUserData::Set(name, type, memory, size); | 1144 ActivityUserData::Set(name, type, memory, size); |
| 1065 } | 1145 } |
| 1066 | 1146 |
| 1067 GlobalActivityTracker::ManagedActivityTracker::ManagedActivityTracker( | 1147 GlobalActivityTracker::ManagedActivityTracker::ManagedActivityTracker( |
| 1068 PersistentMemoryAllocator::Reference mem_reference, | 1148 PersistentMemoryAllocator::Reference mem_reference, |
| 1069 void* base, | 1149 void* base, |
| 1070 size_t size) | 1150 size_t size) |
| 1071 : ThreadActivityTracker(base, size), | 1151 : ThreadActivityTracker(base, size), |
| 1072 mem_reference_(mem_reference), | 1152 mem_reference_(mem_reference), |
| (...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1177 return tracker; | 1257 return tracker; |
| 1178 } | 1258 } |
| 1179 | 1259 |
| 1180 void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() { | 1260 void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() { |
| 1181 ThreadActivityTracker* tracker = | 1261 ThreadActivityTracker* tracker = |
| 1182 reinterpret_cast<ThreadActivityTracker*>(this_thread_tracker_.Get()); | 1262 reinterpret_cast<ThreadActivityTracker*>(this_thread_tracker_.Get()); |
| 1183 if (tracker) | 1263 if (tracker) |
| 1184 delete tracker; | 1264 delete tracker; |
| 1185 } | 1265 } |
| 1186 | 1266 |
| 1267 void GlobalActivityTracker::SetBackgroundTaskRunner( | |
| 1268 const scoped_refptr<TaskRunner>& runner) { | |
| 1269 AutoLock lock(global_tracker_lock_); | |
| 1270 background_task_runner_ = runner; | |
| 1271 } | |
| 1272 | |
| 1273 void GlobalActivityTracker::SetProcessExitCallback( | |
| 1274 ProcessExitCallback callback) { | |
| 1275 AutoLock lock(global_tracker_lock_); | |
| 1276 process_exit_callback_ = callback; | |
| 1277 } | |
| 1278 | |
| 1279 void GlobalActivityTracker::RecordProcessLaunch( | |
| 1280 ProcessId process_id, | |
| 1281 const FilePath::StringType& cmd) { | |
| 1282 DCHECK_NE(GetCurrentProcId(), process_id); | |
| 1283 | |
| 1284 base::AutoLock lock(global_tracker_lock_); | |
| 1285 if (base::ContainsKey(known_processes_, process_id)) { | |
| 1286 NOTREACHED() << "Process #" << process_id | |
| 1287 << " was previously recorded as \"launched\"" | |
| 1288 << " with no corresponding exit."; | |
|
manzagop (departed)
2017/02/24 15:56:35
Add UMA metric so we get visibility?
bcwhite
2017/03/06 16:33:51
Okay, but I'll do it as a separate CL so as to not
manzagop (departed)
2017/03/06 21:48:41
nit: put a TODO
| |
| 1289 known_processes_.erase(process_id); | |
| 1290 } | |
| 1291 | |
| 1292 #if defined(OS_WIN) | |
| 1293 known_processes_.insert(std::make_pair(process_id, UTF16ToUTF8(cmd))); | |
| 1294 #else | |
| 1295 known_processes_.insert(std::make_pair(process_id, std::move(cmd))); | |
|
manzagop (departed)
2017/02/24 15:56:35
'cmd' is const so I think std::move has no effect.
bcwhite
2017/03/06 16:33:52
Done.
| |
| 1296 #endif | |
| 1297 } | |
| 1298 | |
| 1299 void GlobalActivityTracker::RecordProcessLaunch( | |
| 1300 ProcessId process_id, | |
| 1301 const FilePath::StringType& exe, | |
| 1302 const FilePath::StringType& args) { | |
| 1303 if (exe.find(FILE_PATH_LITERAL(" "))) { | |
| 1304 RecordProcessLaunch(process_id, | |
| 1305 FilePath::StringType(FILE_PATH_LITERAL("\"")) + exe + | |
| 1306 FILE_PATH_LITERAL("\" ") + args); | |
| 1307 } else { | |
| 1308 RecordProcessLaunch(process_id, exe + FILE_PATH_LITERAL(' ') + args); | |
| 1309 } | |
| 1310 } | |
| 1311 | |
| 1312 void GlobalActivityTracker::RecordProcessExit(ProcessId process_id, | |
| 1313 int exit_code) { | |
| 1314 DCHECK_NE(GetCurrentProcId(), process_id); | |
| 1315 | |
| 1316 scoped_refptr<TaskRunner> task_runner; | |
| 1317 std::string command_line; | |
| 1318 { | |
| 1319 base::AutoLock lock(global_tracker_lock_); | |
| 1320 task_runner = background_task_runner_; | |
| 1321 auto found = known_processes_.find(process_id); | |
| 1322 if (found != known_processes_.end()) { | |
| 1323 command_line = std::move(found->second); | |
| 1324 known_processes_.erase(found); | |
| 1325 } else { | |
| 1326 DLOG(ERROR) << "Recording exit of unknown process #" << process_id; | |
| 1327 } | |
| 1328 } | |
| 1329 | |
| 1330 // Use the current time to differentiate the process that just exited | |
| 1331 // from any that might be created in the future with the same ID. | |
| 1332 int64_t now_stamp = Time::Now().ToInternalValue(); | |
| 1333 | |
| 1334 // The persistent allocator is thread-safe so run the iteration and | |
| 1335 // adjustments on a worker thread if one was provided. | |
| 1336 if (task_runner && !task_runner->RunsTasksOnCurrentThread()) { | |
| 1337 task_runner->PostTask( | |
| 1338 FROM_HERE, | |
| 1339 Bind(&GlobalActivityTracker::CleanupAfterProcess, Unretained(this), | |
| 1340 process_id, now_stamp, exit_code, Passed(&command_line))); | |
| 1341 return; | |
| 1342 } | |
| 1343 | |
| 1344 CleanupAfterProcess(process_id, now_stamp, exit_code, | |
| 1345 std::move(command_line)); | |
| 1346 } | |
| 1347 | |
| 1348 void GlobalActivityTracker::SetProcessPhase(ProcessPhase phase) { | |
| 1349 process_data().SetInt(kProcessPhaseDataKey, phase); | |
| 1350 } | |
| 1351 | |
| 1352 void GlobalActivityTracker::CleanupAfterProcess(ProcessId process_id, | |
| 1353 int64_t exit_stamp, | |
| 1354 int exit_code, | |
| 1355 std::string&& command_line) { | |
| 1356 // The process may not have exited cleanly so its necessary to go through | |
| 1357 // all the data structures it may have allocated in the persistent memory | |
| 1358 // segment and mark them as "released". This will allow them to be reused | |
| 1359 // later on. | |
| 1360 | |
| 1361 PersistentMemoryAllocator::Iterator iter(allocator_.get()); | |
| 1362 PersistentMemoryAllocator::Reference ref; | |
| 1363 | |
| 1364 ProcessExitCallback process_exit_callback; | |
| 1365 { | |
| 1366 AutoLock lock(global_tracker_lock_); | |
| 1367 process_exit_callback = process_exit_callback_; | |
| 1368 } | |
| 1369 if (process_exit_callback) { | |
| 1370 // Find the processes user-data record so the process phase can be passed | |
| 1371 // to the callback. | |
| 1372 ActivityUserData::Snapshot process_data_snapshot; | |
| 1373 while ((ref = iter.GetNextOfType(kTypeIdProcessDataRecord)) != 0) { | |
| 1374 const void* memory = allocator_->GetAsArray<char>( | |
| 1375 ref, kTypeIdProcessDataRecord, PersistentMemoryAllocator::kSizeAny); | |
| 1376 ProcessId found_id; | |
| 1377 int64_t create_stamp; | |
| 1378 if (ActivityUserData::GetOwningProcessId(memory, &found_id, | |
| 1379 &create_stamp)) { | |
| 1380 if (found_id == process_id && create_stamp < exit_stamp) { | |
| 1381 const ActivityUserData process_data(const_cast<void*>(memory), | |
| 1382 allocator_->GetAllocSize(ref)); | |
| 1383 process_data.CreateSnapshot(&process_data_snapshot); | |
| 1384 break; // No need to look for any others. | |
| 1385 } | |
| 1386 } | |
| 1387 } | |
| 1388 iter.Reset(); // So it starts anew when used below. | |
| 1389 | |
| 1390 // Record the process's phase at exit so callback doesn't need to go | |
| 1391 // searching | |
| 1392 // based on a private key value. | |
| 1393 ProcessPhase exit_phase = PROCESS_PHASE_UNKNOWN; | |
| 1394 auto phase = process_data_snapshot.find(kProcessPhaseDataKey); | |
| 1395 if (phase != process_data_snapshot.end()) | |
| 1396 exit_phase = static_cast<ProcessPhase>(phase->second.GetInt()); | |
| 1397 | |
| 1398 // Perform the callback. | |
| 1399 process_exit_callback.Run(process_id, exit_stamp, exit_code, exit_phase, | |
| 1400 std::move(command_line), | |
| 1401 std::move(process_data_snapshot)); | |
| 1402 } | |
| 1403 | |
| 1404 // Find all allocations associated with the exited process and free them. | |
| 1405 uint32_t type; | |
| 1406 while ((ref = iter.GetNext(&type)) != 0) { | |
| 1407 switch (type) { | |
| 1408 case kTypeIdActivityTracker: | |
| 1409 case kTypeIdUserDataRecord: | |
| 1410 case kTypeIdProcessDataRecord: | |
| 1411 case ModuleInfoRecord::kPersistentTypeId: { | |
| 1412 const void* memory = allocator_->GetAsArray<char>( | |
| 1413 ref, type, PersistentMemoryAllocator::kSizeAny); | |
| 1414 ProcessId found_id; | |
| 1415 int64_t create_stamp; | |
| 1416 | |
| 1417 // By convention, the OwningProcess structure is always the first | |
| 1418 // field of the structure so there's no need to handle all the | |
| 1419 // cases separately. | |
| 1420 if (OwningProcess::GetOwningProcessId(memory, &found_id, | |
| 1421 &create_stamp)) { | |
| 1422 // Only change the type to be "free" if the process ID matches and | |
| 1423 // the creation time is before the exit time (so PID re-use doesn't | |
| 1424 // cause the erasure of something that is in-use). Memory is cleared | |
| 1425 // here, rather than when it's needed, so as to limit the impact at | |
| 1426 // that critical time. | |
| 1427 if (found_id == process_id && create_stamp < exit_stamp) | |
| 1428 allocator_->ChangeType(ref, ~type, type, /*clear=*/true); | |
| 1429 } | |
| 1430 } break; | |
| 1431 } | |
| 1432 } | |
| 1433 } | |
| 1434 | |
| 1187 void GlobalActivityTracker::RecordLogMessage(StringPiece message) { | 1435 void GlobalActivityTracker::RecordLogMessage(StringPiece message) { |
| 1188 // Allocate at least one extra byte so the string is NUL terminated. All | 1436 // Allocate at least one extra byte so the string is NUL terminated. All |
| 1189 // memory returned by the allocator is guaranteed to be zeroed. | 1437 // memory returned by the allocator is guaranteed to be zeroed. |
| 1190 PersistentMemoryAllocator::Reference ref = | 1438 PersistentMemoryAllocator::Reference ref = |
| 1191 allocator_->Allocate(message.size() + 1, kTypeIdGlobalLogMessage); | 1439 allocator_->Allocate(message.size() + 1, kTypeIdGlobalLogMessage); |
| 1192 char* memory = allocator_->GetAsArray<char>(ref, kTypeIdGlobalLogMessage, | 1440 char* memory = allocator_->GetAsArray<char>(ref, kTypeIdGlobalLogMessage, |
| 1193 message.size() + 1); | 1441 message.size() + 1); |
| 1194 if (memory) { | 1442 if (memory) { |
| 1195 memcpy(memory, message.data(), message.size()); | 1443 memcpy(memory, message.data(), message.size()); |
| 1196 allocator_->MakeIterable(ref); | 1444 allocator_->MakeIterable(ref); |
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1240 kTypeIdActivityTracker, | 1488 kTypeIdActivityTracker, |
| 1241 kTypeIdActivityTrackerFree, | 1489 kTypeIdActivityTrackerFree, |
| 1242 stack_memory_size_, | 1490 stack_memory_size_, |
| 1243 kCachedThreadMemories, | 1491 kCachedThreadMemories, |
| 1244 /*make_iterable=*/true), | 1492 /*make_iterable=*/true), |
| 1245 user_data_allocator_(allocator_.get(), | 1493 user_data_allocator_(allocator_.get(), |
| 1246 kTypeIdUserDataRecord, | 1494 kTypeIdUserDataRecord, |
| 1247 kTypeIdUserDataRecordFree, | 1495 kTypeIdUserDataRecordFree, |
| 1248 kUserDataSize, | 1496 kUserDataSize, |
| 1249 kCachedUserDataMemories, | 1497 kCachedUserDataMemories, |
| 1250 /*make_iterable=*/false), | 1498 /*make_iterable=*/true), |
| 1499 process_data_(allocator_->GetAsArray<char>( | |
| 1500 AllocateFrom(allocator_.get(), | |
| 1501 kTypeIdProcessDataRecordFree, | |
| 1502 kProcessDataSize, | |
| 1503 kTypeIdProcessDataRecord), | |
| 1504 kTypeIdProcessDataRecord, | |
| 1505 kProcessDataSize), | |
| 1506 kProcessDataSize), | |
| 1251 global_data_( | 1507 global_data_( |
| 1252 allocator_->GetAsArray<char>( | 1508 allocator_->GetAsArray<char>( |
| 1253 allocator_->Allocate(kGlobalDataSize, kTypeIdGlobalDataRecord), | 1509 allocator_->Allocate(kGlobalDataSize, kTypeIdGlobalDataRecord), |
| 1254 kTypeIdGlobalDataRecord, | 1510 kTypeIdGlobalDataRecord, |
| 1255 PersistentMemoryAllocator::kSizeAny), | 1511 kGlobalDataSize), |
| 1256 kGlobalDataSize) { | 1512 kGlobalDataSize) { |
| 1257 // Ensure the passed memory is valid and empty (iterator finds nothing). | 1513 // Ensure the passed memory is valid and empty (iterator finds nothing). |
| 1258 uint32_t type; | 1514 uint32_t type; |
| 1259 DCHECK(!PersistentMemoryAllocator::Iterator(allocator_.get()).GetNext(&type)); | 1515 DCHECK(!PersistentMemoryAllocator::Iterator(allocator_.get()).GetNext(&type)); |
| 1260 | 1516 |
| 1261 // Ensure that there is no other global object and then make this one such. | 1517 // Ensure that there is no other global object and then make this one such. |
| 1262 DCHECK(!g_tracker_); | 1518 DCHECK(!g_tracker_); |
| 1263 subtle::Release_Store(&g_tracker_, reinterpret_cast<uintptr_t>(this)); | 1519 subtle::Release_Store(&g_tracker_, reinterpret_cast<uintptr_t>(this)); |
| 1264 | 1520 |
| 1265 // The global records must be iterable in order to be found by an analyzer. | 1521 // The data records must be iterable in order to be found by an analyzer. |
| 1522 allocator_->MakeIterable(allocator_->GetAsReference( | |
| 1523 process_data_.GetBaseAddress(), kTypeIdProcessDataRecord)); | |
| 1266 allocator_->MakeIterable(allocator_->GetAsReference( | 1524 allocator_->MakeIterable(allocator_->GetAsReference( |
| 1267 global_data_.GetBaseAddress(), kTypeIdGlobalDataRecord)); | 1525 global_data_.GetBaseAddress(), kTypeIdGlobalDataRecord)); |
| 1268 | 1526 |
| 1527 // Note that this process has launched. | |
| 1528 SetProcessPhase(PROCESS_LAUNCHED); | |
| 1529 | |
| 1269 // Fetch and record all activated field trials. | 1530 // Fetch and record all activated field trials. |
| 1270 FieldTrial::ActiveGroups active_groups; | 1531 FieldTrial::ActiveGroups active_groups; |
| 1271 FieldTrialList::GetActiveFieldTrialGroups(&active_groups); | 1532 FieldTrialList::GetActiveFieldTrialGroups(&active_groups); |
| 1272 for (auto& group : active_groups) | 1533 for (auto& group : active_groups) |
| 1273 RecordFieldTrial(group.trial_name, group.group_name); | 1534 RecordFieldTrial(group.trial_name, group.group_name); |
| 1274 } | 1535 } |
| 1275 | 1536 |
| 1276 GlobalActivityTracker::~GlobalActivityTracker() { | 1537 GlobalActivityTracker::~GlobalActivityTracker() { |
| 1277 DCHECK_EQ(Get(), this); | 1538 DCHECK_EQ(Get(), this); |
| 1278 DCHECK_EQ(0, thread_tracker_count_.load(std::memory_order_relaxed)); | 1539 DCHECK_EQ(0, thread_tracker_count_.load(std::memory_order_relaxed)); |
| (...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1378 : GlobalActivityTracker::ScopedThreadActivity( | 1639 : GlobalActivityTracker::ScopedThreadActivity( |
| 1379 program_counter, | 1640 program_counter, |
| 1380 nullptr, | 1641 nullptr, |
| 1381 Activity::ACT_PROCESS_WAIT, | 1642 Activity::ACT_PROCESS_WAIT, |
| 1382 ActivityData::ForProcess(process->Pid()), | 1643 ActivityData::ForProcess(process->Pid()), |
| 1383 /*lock_allowed=*/true) {} | 1644 /*lock_allowed=*/true) {} |
| 1384 #endif | 1645 #endif |
| 1385 | 1646 |
| 1386 } // namespace debug | 1647 } // namespace debug |
| 1387 } // namespace base | 1648 } // namespace base |
| OLD | NEW |