Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(201)

Side by Side Diff: base/debug/activity_tracker.cc

Issue 2680123003: Multi-Process Tracking Support (Closed)
Patch Set: addressed review comments by manzagop Created 3 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2016 The Chromium Authors. All rights reserved. 1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/debug/activity_tracker.h" 5 #include "base/debug/activity_tracker.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <limits> 8 #include <limits>
9 #include <utility> 9 #include <utility>
10 10
11 #include "base/atomic_sequence_num.h" 11 #include "base/atomic_sequence_num.h"
12 #include "base/debug/stack_trace.h" 12 #include "base/debug/stack_trace.h"
13 #include "base/files/file.h" 13 #include "base/files/file.h"
14 #include "base/files/file_path.h" 14 #include "base/files/file_path.h"
15 #include "base/files/memory_mapped_file.h" 15 #include "base/files/memory_mapped_file.h"
16 #include "base/logging.h" 16 #include "base/logging.h"
17 #include "base/memory/ptr_util.h" 17 #include "base/memory/ptr_util.h"
18 #include "base/metrics/field_trial.h" 18 #include "base/metrics/field_trial.h"
19 #include "base/metrics/histogram_macros.h" 19 #include "base/metrics/histogram_macros.h"
20 #include "base/pending_task.h" 20 #include "base/pending_task.h"
21 #include "base/pickle.h" 21 #include "base/pickle.h"
22 #include "base/process/process.h" 22 #include "base/process/process.h"
23 #include "base/process/process_handle.h" 23 #include "base/process/process_handle.h"
24 #include "base/stl_util.h" 24 #include "base/stl_util.h"
25 #include "base/strings/string_util.h" 25 #include "base/strings/string_util.h"
26 #include "base/strings/utf_string_conversions.h"
26 #include "base/threading/platform_thread.h" 27 #include "base/threading/platform_thread.h"
27 28
28 namespace base { 29 namespace base {
29 namespace debug { 30 namespace debug {
30 31
31 namespace { 32 namespace {
32 33
33 // A number that identifies the memory as having been initialized. It's
34 // arbitrary but happens to be the first 4 bytes of SHA1(ThreadActivityTracker).
35 // A version number is added on so that major structure changes won't try to
36 // read an older version (since the cookie won't match).
37 const uint32_t kHeaderCookie = 0xC0029B24UL + 2; // v2
38
39 // The minimum depth a stack should support. 34 // The minimum depth a stack should support.
40 const int kMinStackDepth = 2; 35 const int kMinStackDepth = 2;
41 36
42 // The amount of memory set aside for holding arbitrary user data (key/value 37 // The amount of memory set aside for holding arbitrary user data (key/value
43 // pairs) globally or associated with ActivityData entries. 38 // pairs) globally or associated with ActivityData entries.
44 const size_t kUserDataSize = 1 << 10; // 1 KiB 39 const size_t kUserDataSize = 1 << 10; // 1 KiB
40 const size_t kProcessDataSize = 4 << 10; // 4 KiB
45 const size_t kGlobalDataSize = 16 << 10; // 16 KiB 41 const size_t kGlobalDataSize = 16 << 10; // 16 KiB
46 const size_t kMaxUserDataNameLength = 42 const size_t kMaxUserDataNameLength =
47 static_cast<size_t>(std::numeric_limits<uint8_t>::max()); 43 static_cast<size_t>(std::numeric_limits<uint8_t>::max());
48 44
49 // A constant used to indicate that module information is changing. 45 // A constant used to indicate that module information is changing.
50 const uint32_t kModuleInformationChanging = 0x80000000; 46 const uint32_t kModuleInformationChanging = 0x80000000;
51 47
48 // The key used to record process information.
49 const char kProcessPhaseDataKey[] = "process-phase";
50
51 // An atomically incrementing number, used to check for recreations of objects
52 // in the same memory space.
53 StaticAtomicSequenceNumber g_next_id;
54
52 union ThreadRef { 55 union ThreadRef {
53 int64_t as_id; 56 int64_t as_id;
54 #if defined(OS_WIN) 57 #if defined(OS_WIN)
55 // On Windows, the handle itself is often a pseudo-handle with a common 58 // On Windows, the handle itself is often a pseudo-handle with a common
56 // value meaning "this thread" and so the thread-id is used. The former 59 // value meaning "this thread" and so the thread-id is used. The former
57 // can be converted to a thread-id with a system call. 60 // can be converted to a thread-id with a system call.
58 PlatformThreadId as_tid; 61 PlatformThreadId as_tid;
59 #elif defined(OS_POSIX) 62 #elif defined(OS_POSIX)
60 // On Posix, the handle is always a unique identifier so no conversion 63 // On Posix, the handle is always a unique identifier so no conversion
61 // needs to be done. However, it's value is officially opaque so there 64 // needs to be done. However, it's value is officially opaque so there
62 // is no one correct way to convert it to a numerical identifier. 65 // is no one correct way to convert it to a numerical identifier.
63 PlatformThreadHandle::Handle as_handle; 66 PlatformThreadHandle::Handle as_handle;
64 #endif 67 #endif
65 }; 68 };
66 69
70 // Get the next non-zero identifier. It is only unique within a process.
71 uint32_t GetNextDataId() {
72 uint32_t id;
73 while ((id = g_next_id.GetNext()) == 0)
74 ;
75 return id;
76 }
77
78 // Finds and reuses a specific allocation or creates a new one.
79 PersistentMemoryAllocator::Reference AllocateFrom(
80 PersistentMemoryAllocator* allocator,
81 uint32_t from_type,
82 size_t size,
83 uint32_t to_type) {
84 PersistentMemoryAllocator::Iterator iter(allocator);
85 PersistentMemoryAllocator::Reference ref;
86 while ((ref = iter.GetNextOfType(from_type)) != 0) {
87 DCHECK_LE(size, allocator->GetAllocSize(ref));
88 // This can fail if a another thread has just taken it. It isassumed that
manzagop (departed) 2017/03/07 22:08:57 typo: isassumed
bcwhite 2017/03/09 14:07:54 Done.
89 // the memory is cleared during the "free" operation.
90 if (allocator->ChangeType(ref, to_type, from_type, /*clear=*/false))
91 return ref;
92 }
93
94 return allocator->Allocate(size, to_type);
95 }
96
67 // Determines the previous aligned index. 97 // Determines the previous aligned index.
68 size_t RoundDownToAlignment(size_t index, size_t alignment) { 98 size_t RoundDownToAlignment(size_t index, size_t alignment) {
69 return index & (0 - alignment); 99 return index & (0 - alignment);
70 } 100 }
71 101
72 // Determines the next aligned index. 102 // Determines the next aligned index.
73 size_t RoundUpToAlignment(size_t index, size_t alignment) { 103 size_t RoundUpToAlignment(size_t index, size_t alignment) {
74 return (index + (alignment - 1)) & (0 - alignment); 104 return (index + (alignment - 1)) & (0 - alignment);
75 } 105 }
76 106
77 } // namespace 107 } // namespace
78 108
109 OwningProcess::OwningProcess() {}
110 OwningProcess::~OwningProcess() {}
111
112 void OwningProcess::Release_Initialize() {
113 uint32_t old_id = data_id.load(std::memory_order_acquire);
114 DCHECK_EQ(0U, old_id);
115 process_id = GetCurrentProcId();
116 create_stamp = Time::Now().ToInternalValue();
117 data_id.store(GetNextDataId(), std::memory_order_release);
118 }
119
120 void OwningProcess::SetOwningProcessIdForTesting(ProcessId pid, int64_t stamp) {
121 DCHECK_NE(0U, data_id);
122 process_id = pid;
123 create_stamp = stamp;
124 }
125
126 // static
127 bool OwningProcess::GetOwningProcessId(const void* memory,
128 ProcessId* out_id,
129 int64_t* out_stamp) {
130 const OwningProcess* info = reinterpret_cast<const OwningProcess*>(memory);
131 uint32_t id = info->data_id.load(std::memory_order_acquire);
132 if (id == 0)
133 return false;
134
135 *out_id = static_cast<ProcessId>(info->process_id);
136 *out_stamp = info->create_stamp;
137 return id == info->data_id.load(std::memory_order_seq_cst);
138 }
79 139
80 // It doesn't matter what is contained in this (though it will be all zeros) 140 // It doesn't matter what is contained in this (though it will be all zeros)
81 // as only the address of it is important. 141 // as only the address of it is important.
82 const ActivityData kNullActivityData = {}; 142 const ActivityData kNullActivityData = {};
83 143
84 ActivityData ActivityData::ForThread(const PlatformThreadHandle& handle) { 144 ActivityData ActivityData::ForThread(const PlatformThreadHandle& handle) {
85 ThreadRef thread_ref; 145 ThreadRef thread_ref;
86 thread_ref.as_id = 0; // Zero the union in case other is smaller. 146 thread_ref.as_id = 0; // Zero the union in case other is smaller.
87 #if defined(OS_WIN) 147 #if defined(OS_WIN)
88 thread_ref.as_tid = ::GetThreadId(handle.platform_handle()); 148 thread_ref.as_tid = ::GetThreadId(handle.platform_handle());
(...skipping 150 matching lines...) Expand 10 before | Expand all | Expand 10 after
239 StringPiece ActivityUserData::TypedValue::GetReference() const { 299 StringPiece ActivityUserData::TypedValue::GetReference() const {
240 DCHECK_EQ(RAW_VALUE_REFERENCE, type_); 300 DCHECK_EQ(RAW_VALUE_REFERENCE, type_);
241 return ref_value_; 301 return ref_value_;
242 } 302 }
243 303
244 StringPiece ActivityUserData::TypedValue::GetStringReference() const { 304 StringPiece ActivityUserData::TypedValue::GetStringReference() const {
245 DCHECK_EQ(STRING_VALUE_REFERENCE, type_); 305 DCHECK_EQ(STRING_VALUE_REFERENCE, type_);
246 return ref_value_; 306 return ref_value_;
247 } 307 }
248 308
309 // These are required because std::atomic is (currently) not a POD type and
310 // thus clang requires explicit out-of-line constructors and destructors even
311 // when they do nothing.
249 ActivityUserData::ValueInfo::ValueInfo() {} 312 ActivityUserData::ValueInfo::ValueInfo() {}
250 ActivityUserData::ValueInfo::ValueInfo(ValueInfo&&) = default; 313 ActivityUserData::ValueInfo::ValueInfo(ValueInfo&&) = default;
251 ActivityUserData::ValueInfo::~ValueInfo() {} 314 ActivityUserData::ValueInfo::~ValueInfo() {}
252 315 ActivityUserData::MemoryHeader::MemoryHeader() {}
253 StaticAtomicSequenceNumber ActivityUserData::next_id_; 316 ActivityUserData::MemoryHeader::~MemoryHeader() {}
317 ActivityUserData::FieldHeader::FieldHeader() {}
318 ActivityUserData::FieldHeader::~FieldHeader() {}
254 319
255 ActivityUserData::ActivityUserData(void* memory, size_t size) 320 ActivityUserData::ActivityUserData(void* memory, size_t size)
256 : memory_(reinterpret_cast<char*>(memory)), 321 : memory_(reinterpret_cast<char*>(memory)),
257 available_(RoundDownToAlignment(size, kMemoryAlignment)), 322 available_(RoundDownToAlignment(size, kMemoryAlignment)),
258 id_(reinterpret_cast<std::atomic<uint32_t>*>(memory)) { 323 header_(reinterpret_cast<MemoryHeader*>(memory)) {
259 // It's possible that no user data is being stored. 324 // It's possible that no user data is being stored.
260 if (!memory_) 325 if (!memory_)
261 return; 326 return;
262 327
263 DCHECK_LT(kMemoryAlignment, available_); 328 static_assert(0 == sizeof(MemoryHeader) % kMemoryAlignment, "invalid header");
264 if (id_->load(std::memory_order_relaxed) == 0) { 329 DCHECK_LT(sizeof(MemoryHeader), available_);
265 // Generate a new ID and store it in the first 32-bit word of memory_. 330 if (header_->owner.data_id.load(std::memory_order_acquire) == 0)
266 // |id_| must be non-zero for non-sink instances. 331 header_->owner.Release_Initialize();
267 uint32_t id; 332 memory_ += sizeof(MemoryHeader);
268 while ((id = next_id_.GetNext()) == 0) 333 available_ -= sizeof(MemoryHeader);
269 ;
270 id_->store(id, std::memory_order_relaxed);
271 DCHECK_NE(0U, id_->load(std::memory_order_relaxed));
272 }
273 memory_ += kMemoryAlignment;
274 available_ -= kMemoryAlignment;
275 334
276 // If there is already data present, load that. This allows the same class 335 // If there is already data present, load that. This allows the same class
277 // to be used for analysis through snapshots. 336 // to be used for analysis through snapshots.
278 ImportExistingData(); 337 ImportExistingData();
279 } 338 }
280 339
281 ActivityUserData::~ActivityUserData() {} 340 ActivityUserData::~ActivityUserData() {}
282 341
342 bool ActivityUserData::CreateSnapshot(Snapshot* output_snapshot) const {
343 DCHECK(output_snapshot);
344 DCHECK(output_snapshot->empty());
345
346 // Find any new data that may have been added by an active instance of this
347 // class that is adding records.
348 ImportExistingData();
349
350 for (const auto& entry : values_) {
351 TypedValue value;
352 value.type_ = entry.second.type;
353 DCHECK_GE(entry.second.extent,
354 entry.second.size_ptr->load(std::memory_order_relaxed));
355
356 switch (entry.second.type) {
357 case RAW_VALUE:
358 case STRING_VALUE:
359 value.long_value_ =
360 std::string(reinterpret_cast<char*>(entry.second.memory),
361 entry.second.size_ptr->load(std::memory_order_relaxed));
362 break;
363 case RAW_VALUE_REFERENCE:
364 case STRING_VALUE_REFERENCE: {
365 ReferenceRecord* ref =
366 reinterpret_cast<ReferenceRecord*>(entry.second.memory);
367 value.ref_value_ = StringPiece(
368 reinterpret_cast<char*>(static_cast<uintptr_t>(ref->address)),
369 static_cast<size_t>(ref->size));
370 } break;
371 case BOOL_VALUE:
372 case CHAR_VALUE:
373 value.short_value_ = *reinterpret_cast<char*>(entry.second.memory);
374 break;
375 case SIGNED_VALUE:
376 case UNSIGNED_VALUE:
377 value.short_value_ = *reinterpret_cast<uint64_t*>(entry.second.memory);
378 break;
379 case END_OF_VALUES: // Included for completeness purposes.
380 NOTREACHED();
381 }
382 auto inserted = output_snapshot->insert(
383 std::make_pair(entry.second.name.as_string(), std::move(value)));
384 DCHECK(inserted.second); // True if inserted, false if existed.
385 }
386
387 return true;
388 }
389
390 const void* ActivityUserData::GetBaseAddress() {
391 // The |memory_| pointer advances as elements are written but the |header_|
392 // value is always at the start of the block so just return that.
393 return header_;
394 }
395
396 void ActivityUserData::SetOwningProcessIdForTesting(ProcessId pid,
397 int64_t stamp) {
398 if (!header_)
399 return;
400 header_->owner.SetOwningProcessIdForTesting(pid, stamp);
401 }
402
403 // static
404 bool ActivityUserData::GetOwningProcessId(const void* memory,
405 ProcessId* out_id,
406 int64_t* out_stamp) {
407 const MemoryHeader* header = reinterpret_cast<const MemoryHeader*>(memory);
408 return OwningProcess::GetOwningProcessId(&header->owner, out_id, out_stamp);
409 }
410
283 void ActivityUserData::Set(StringPiece name, 411 void ActivityUserData::Set(StringPiece name,
284 ValueType type, 412 ValueType type,
285 const void* memory, 413 const void* memory,
286 size_t size) { 414 size_t size) {
287 DCHECK_GE(std::numeric_limits<uint8_t>::max(), name.length()); 415 DCHECK_GE(std::numeric_limits<uint8_t>::max(), name.length());
288 size = std::min(std::numeric_limits<uint16_t>::max() - (kMemoryAlignment - 1), 416 size = std::min(std::numeric_limits<uint16_t>::max() - (kMemoryAlignment - 1),
289 size); 417 size);
290 418
291 // It's possible that no user data is being stored. 419 // It's possible that no user data is being stored.
292 if (!memory_) 420 if (!memory_)
293 return; 421 return;
294 422
295 // The storage of a name is limited so use that limit during lookup. 423 // The storage of a name is limited so use that limit during lookup.
296 if (name.length() > kMaxUserDataNameLength) 424 if (name.length() > kMaxUserDataNameLength)
297 name.set(name.data(), kMaxUserDataNameLength); 425 name.set(name.data(), kMaxUserDataNameLength);
298 426
299 ValueInfo* info; 427 ValueInfo* info;
300 auto existing = values_.find(name); 428 auto existing = values_.find(name);
301 if (existing != values_.end()) { 429 if (existing != values_.end()) {
302 info = &existing->second; 430 info = &existing->second;
303 } else { 431 } else {
304 // The name size is limited to what can be held in a single byte but 432 // The name size is limited to what can be held in a single byte but
305 // because there are not alignment constraints on strings, it's set tight 433 // because there are not alignment constraints on strings, it's set tight
306 // against the header. Its extent (the reserved space, even if it's not 434 // against the header. Its extent (the reserved space, even if it's not
307 // all used) is calculated so that, when pressed against the header, the 435 // all used) is calculated so that, when pressed against the header, the
308 // following field will be aligned properly. 436 // following field will be aligned properly.
309 size_t name_size = name.length(); 437 size_t name_size = name.length();
310 size_t name_extent = 438 size_t name_extent =
311 RoundUpToAlignment(sizeof(Header) + name_size, kMemoryAlignment) - 439 RoundUpToAlignment(sizeof(FieldHeader) + name_size, kMemoryAlignment) -
312 sizeof(Header); 440 sizeof(FieldHeader);
313 size_t value_extent = RoundUpToAlignment(size, kMemoryAlignment); 441 size_t value_extent = RoundUpToAlignment(size, kMemoryAlignment);
314 442
315 // The "base size" is the size of the header and (padded) string key. Stop 443 // The "base size" is the size of the header and (padded) string key. Stop
316 // now if there's not room enough for even this. 444 // now if there's not room enough for even this.
317 size_t base_size = sizeof(Header) + name_extent; 445 size_t base_size = sizeof(FieldHeader) + name_extent;
318 if (base_size > available_) 446 if (base_size > available_)
319 return; 447 return;
320 448
321 // The "full size" is the size for storing the entire value. 449 // The "full size" is the size for storing the entire value.
322 size_t full_size = std::min(base_size + value_extent, available_); 450 size_t full_size = std::min(base_size + value_extent, available_);
323 451
324 // If the value is actually a single byte, see if it can be stuffed at the 452 // If the value is actually a single byte, see if it can be stuffed at the
325 // end of the name extent rather than wasting kMemoryAlignment bytes. 453 // end of the name extent rather than wasting kMemoryAlignment bytes.
326 if (size == 1 && name_extent > name_size) { 454 if (size == 1 && name_extent > name_size) {
327 full_size = base_size; 455 full_size = base_size;
328 --name_extent; 456 --name_extent;
329 --base_size; 457 --base_size;
330 } 458 }
331 459
332 // Truncate the stored size to the amount of available memory. Stop now if 460 // Truncate the stored size to the amount of available memory. Stop now if
333 // there's not any room for even part of the value. 461 // there's not any room for even part of the value.
334 size = std::min(full_size - base_size, size); 462 size = std::min(full_size - base_size, size);
335 if (size == 0) 463 if (size == 0)
336 return; 464 return;
337 465
338 // Allocate a chunk of memory. 466 // Allocate a chunk of memory.
339 Header* header = reinterpret_cast<Header*>(memory_); 467 FieldHeader* header = reinterpret_cast<FieldHeader*>(memory_);
340 memory_ += full_size; 468 memory_ += full_size;
341 available_ -= full_size; 469 available_ -= full_size;
342 470
343 // Datafill the header and name records. Memory must be zeroed. The |type| 471 // Datafill the header and name records. Memory must be zeroed. The |type|
344 // is written last, atomically, to release all the other values. 472 // is written last, atomically, to release all the other values.
345 DCHECK_EQ(END_OF_VALUES, header->type.load(std::memory_order_relaxed)); 473 DCHECK_EQ(END_OF_VALUES, header->type.load(std::memory_order_relaxed));
346 DCHECK_EQ(0, header->value_size.load(std::memory_order_relaxed)); 474 DCHECK_EQ(0, header->value_size.load(std::memory_order_relaxed));
347 header->name_size = static_cast<uint8_t>(name_size); 475 header->name_size = static_cast<uint8_t>(name_size);
348 header->record_size = full_size; 476 header->record_size = full_size;
349 char* name_memory = reinterpret_cast<char*>(header) + sizeof(Header); 477 char* name_memory = reinterpret_cast<char*>(header) + sizeof(FieldHeader);
350 void* value_memory = 478 void* value_memory =
351 reinterpret_cast<char*>(header) + sizeof(Header) + name_extent; 479 reinterpret_cast<char*>(header) + sizeof(FieldHeader) + name_extent;
352 memcpy(name_memory, name.data(), name_size); 480 memcpy(name_memory, name.data(), name_size);
353 header->type.store(type, std::memory_order_release); 481 header->type.store(type, std::memory_order_release);
354 482
355 // Create an entry in |values_| so that this field can be found and changed 483 // Create an entry in |values_| so that this field can be found and changed
356 // later on without having to allocate new entries. 484 // later on without having to allocate new entries.
357 StringPiece persistent_name(name_memory, name_size); 485 StringPiece persistent_name(name_memory, name_size);
358 auto inserted = 486 auto inserted =
359 values_.insert(std::make_pair(persistent_name, ValueInfo())); 487 values_.insert(std::make_pair(persistent_name, ValueInfo()));
360 DCHECK(inserted.second); // True if inserted, false if existed. 488 DCHECK(inserted.second); // True if inserted, false if existed.
361 info = &inserted.first->second; 489 info = &inserted.first->second;
362 info->name = persistent_name; 490 info->name = persistent_name;
363 info->memory = value_memory; 491 info->memory = value_memory;
364 info->size_ptr = &header->value_size; 492 info->size_ptr = &header->value_size;
365 info->extent = full_size - sizeof(Header) - name_extent; 493 info->extent = full_size - sizeof(FieldHeader) - name_extent;
366 info->type = type; 494 info->type = type;
367 } 495 }
368 496
369 // Copy the value data to storage. The |size| is written last, atomically, to 497 // Copy the value data to storage. The |size| is written last, atomically, to
370 // release the copied data. Until then, a parallel reader will just ignore 498 // release the copied data. Until then, a parallel reader will just ignore
371 // records with a zero size. 499 // records with a zero size.
372 DCHECK_EQ(type, info->type); 500 DCHECK_EQ(type, info->type);
373 size = std::min(size, info->extent); 501 size = std::min(size, info->extent);
374 info->size_ptr->store(0, std::memory_order_seq_cst); 502 info->size_ptr->store(0, std::memory_order_seq_cst);
375 memcpy(info->memory, memory, size); 503 memcpy(info->memory, memory, size);
376 info->size_ptr->store(size, std::memory_order_release); 504 info->size_ptr->store(size, std::memory_order_release);
377 } 505 }
378 506
379 void ActivityUserData::SetReference(StringPiece name, 507 void ActivityUserData::SetReference(StringPiece name,
380 ValueType type, 508 ValueType type,
381 const void* memory, 509 const void* memory,
382 size_t size) { 510 size_t size) {
383 ReferenceRecord rec; 511 ReferenceRecord rec;
384 rec.address = reinterpret_cast<uintptr_t>(memory); 512 rec.address = reinterpret_cast<uintptr_t>(memory);
385 rec.size = size; 513 rec.size = size;
386 Set(name, type, &rec, sizeof(rec)); 514 Set(name, type, &rec, sizeof(rec));
387 } 515 }
388 516
389 void ActivityUserData::ImportExistingData() const { 517 void ActivityUserData::ImportExistingData() const {
390 while (available_ > sizeof(Header)) { 518 while (available_ > sizeof(FieldHeader)) {
391 Header* header = reinterpret_cast<Header*>(memory_); 519 FieldHeader* header = reinterpret_cast<FieldHeader*>(memory_);
392 ValueType type = 520 ValueType type =
393 static_cast<ValueType>(header->type.load(std::memory_order_acquire)); 521 static_cast<ValueType>(header->type.load(std::memory_order_acquire));
394 if (type == END_OF_VALUES) 522 if (type == END_OF_VALUES)
395 return; 523 return;
396 if (header->record_size > available_) 524 if (header->record_size > available_)
397 return; 525 return;
398 526
399 size_t value_offset = RoundUpToAlignment(sizeof(Header) + header->name_size, 527 size_t value_offset = RoundUpToAlignment(
400 kMemoryAlignment); 528 sizeof(FieldHeader) + header->name_size, kMemoryAlignment);
401 if (header->record_size == value_offset && 529 if (header->record_size == value_offset &&
402 header->value_size.load(std::memory_order_relaxed) == 1) { 530 header->value_size.load(std::memory_order_relaxed) == 1) {
403 value_offset -= 1; 531 value_offset -= 1;
404 } 532 }
405 if (value_offset + header->value_size > header->record_size) 533 if (value_offset + header->value_size > header->record_size)
406 return; 534 return;
407 535
408 ValueInfo info; 536 ValueInfo info;
409 info.name = StringPiece(memory_ + sizeof(Header), header->name_size); 537 info.name = StringPiece(memory_ + sizeof(FieldHeader), header->name_size);
410 info.type = type; 538 info.type = type;
411 info.memory = memory_ + value_offset; 539 info.memory = memory_ + value_offset;
412 info.size_ptr = &header->value_size; 540 info.size_ptr = &header->value_size;
413 info.extent = header->record_size - value_offset; 541 info.extent = header->record_size - value_offset;
414 542
415 StringPiece key(info.name); 543 StringPiece key(info.name);
416 values_.insert(std::make_pair(key, std::move(info))); 544 values_.insert(std::make_pair(key, std::move(info)));
417 545
418 memory_ += header->record_size; 546 memory_ += header->record_size;
419 available_ -= header->record_size; 547 available_ -= header->record_size;
420 } 548 }
421 } 549 }
422 550
423 bool ActivityUserData::CreateSnapshot(Snapshot* output_snapshot) const {
424 DCHECK(output_snapshot);
425 DCHECK(output_snapshot->empty());
426
427 // Find any new data that may have been added by an active instance of this
428 // class that is adding records.
429 ImportExistingData();
430
431 for (const auto& entry : values_) {
432 TypedValue value;
433 value.type_ = entry.second.type;
434 DCHECK_GE(entry.second.extent,
435 entry.second.size_ptr->load(std::memory_order_relaxed));
436
437 switch (entry.second.type) {
438 case RAW_VALUE:
439 case STRING_VALUE:
440 value.long_value_ =
441 std::string(reinterpret_cast<char*>(entry.second.memory),
442 entry.second.size_ptr->load(std::memory_order_relaxed));
443 break;
444 case RAW_VALUE_REFERENCE:
445 case STRING_VALUE_REFERENCE: {
446 ReferenceRecord* ref =
447 reinterpret_cast<ReferenceRecord*>(entry.second.memory);
448 value.ref_value_ = StringPiece(
449 reinterpret_cast<char*>(static_cast<uintptr_t>(ref->address)),
450 static_cast<size_t>(ref->size));
451 } break;
452 case BOOL_VALUE:
453 case CHAR_VALUE:
454 value.short_value_ = *reinterpret_cast<char*>(entry.second.memory);
455 break;
456 case SIGNED_VALUE:
457 case UNSIGNED_VALUE:
458 value.short_value_ = *reinterpret_cast<uint64_t*>(entry.second.memory);
459 break;
460 case END_OF_VALUES: // Included for completeness purposes.
461 NOTREACHED();
462 }
463 auto inserted = output_snapshot->insert(
464 std::make_pair(entry.second.name.as_string(), std::move(value)));
465 DCHECK(inserted.second); // True if inserted, false if existed.
466 }
467
468 return true;
469 }
470
471 const void* ActivityUserData::GetBaseAddress() {
472 // The |memory_| pointer advances as elements are written but the |id_|
473 // value is always at the start of the block so just return that.
474 return id_;
475 }
476
477 // This information is kept for every thread that is tracked. It is filled 551 // This information is kept for every thread that is tracked. It is filled
478 // the very first time the thread is seen. All fields must be of exact sizes 552 // the very first time the thread is seen. All fields must be of exact sizes
479 // so there is no issue moving between 32 and 64-bit builds. 553 // so there is no issue moving between 32 and 64-bit builds.
480 struct ThreadActivityTracker::Header { 554 struct ThreadActivityTracker::Header {
481 // Defined in .h for analyzer access. Increment this if structure changes! 555 // Defined in .h for analyzer access. Increment this if structure changes!
482 static constexpr uint32_t kPersistentTypeId = 556 static constexpr uint32_t kPersistentTypeId =
483 GlobalActivityTracker::kTypeIdActivityTracker; 557 GlobalActivityTracker::kTypeIdActivityTracker;
484 558
485 // Expected size for 32/64-bit check. 559 // Expected size for 32/64-bit check.
486 static constexpr size_t kExpectedInstanceSize = 80; 560 static constexpr size_t kExpectedInstanceSize =
561 OwningProcess::kExpectedInstanceSize + 72;
487 562
488 // This unique number indicates a valid initialization of the memory. 563 // This information uniquely identifies a process.
489 std::atomic<uint32_t> cookie; 564 OwningProcess owner;
490 565
491 // The number of Activity slots (spaces that can hold an Activity) that 566 // The thread-id (thread_ref.as_id) to which this data belongs. This number
492 // immediately follow this structure in memory. 567 // is not guaranteed to mean anything but combined with the process-id from
493 uint32_t stack_slots; 568 // OwningProcess is unique among all active trackers.
494
495 // The process-id and thread-id (thread_ref.as_id) to which this data belongs.
496 // These identifiers are not guaranteed to mean anything but are unique, in
497 // combination, among all active trackers. It would be nice to always have
498 // the process_id be a 64-bit value but the necessity of having it atomic
499 // (for the memory barriers it provides) limits it to the natural word size
500 // of the machine.
501 #ifdef ARCH_CPU_64_BITS
502 std::atomic<int64_t> process_id;
503 #else
504 std::atomic<int32_t> process_id;
505 int32_t process_id_padding;
506 #endif
507 ThreadRef thread_ref; 569 ThreadRef thread_ref;
508 570
509 // The start-time and start-ticks when the data was created. Each activity 571 // The start-time and start-ticks when the data was created. Each activity
510 // record has a |time_internal| value that can be converted to a "wall time" 572 // record has a |time_internal| value that can be converted to a "wall time"
511 // with these two values. 573 // with these two values.
512 int64_t start_time; 574 int64_t start_time;
513 int64_t start_ticks; 575 int64_t start_ticks;
514 576
577 // The number of Activity slots (spaces that can hold an Activity) that
578 // immediately follow this structure in memory.
579 uint32_t stack_slots;
580
581 // Some padding to keep everything 64-bit aligned.
582 uint32_t padding;
583
515 // The current depth of the stack. This may be greater than the number of 584 // The current depth of the stack. This may be greater than the number of
516 // slots. If the depth exceeds the number of slots, the newest entries 585 // slots. If the depth exceeds the number of slots, the newest entries
517 // won't be recorded. 586 // won't be recorded.
518 std::atomic<uint32_t> current_depth; 587 std::atomic<uint32_t> current_depth;
519 588
520 // A memory location used to indicate if changes have been made to the stack 589 // A memory location used to indicate if changes have been made to the stack
521 // that would invalidate an in-progress read of its contents. The active 590 // that would invalidate an in-progress read of its contents. The active
522 // tracker will zero the value whenever something gets popped from the 591 // tracker will zero the value whenever something gets popped from the
523 // stack. A monitoring tracker can write a non-zero value here, copy the 592 // stack. A monitoring tracker can write a non-zero value here, copy the
524 // stack contents, and read the value to know, if it is still non-zero, that 593 // stack contents, and read the value to know, if it is still non-zero, that
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after
587 sizeof(header_->thread_ref) == sizeof(header_->thread_ref.as_id), 656 sizeof(header_->thread_ref) == sizeof(header_->thread_ref.as_id),
588 "PlatformThreadHandle::Handle is too big to hold in 64-bit ID"); 657 "PlatformThreadHandle::Handle is too big to hold in 64-bit ID");
589 658
590 // Ensure that the alignment of Activity.data is properly aligned to a 659 // Ensure that the alignment of Activity.data is properly aligned to a
591 // 64-bit boundary so there are no interoperability-issues across cpu 660 // 64-bit boundary so there are no interoperability-issues across cpu
592 // architectures. 661 // architectures.
593 static_assert(offsetof(Activity, data) % sizeof(uint64_t) == 0, 662 static_assert(offsetof(Activity, data) % sizeof(uint64_t) == 0,
594 "ActivityData.data is not 64-bit aligned"); 663 "ActivityData.data is not 64-bit aligned");
595 664
596 // Provided memory should either be completely initialized or all zeros. 665 // Provided memory should either be completely initialized or all zeros.
597 if (header_->cookie.load(std::memory_order_relaxed) == 0) { 666 if (header_->owner.data_id.load(std::memory_order_relaxed) == 0) {
598 // This is a new file. Double-check other fields and then initialize. 667 // This is a new file. Double-check other fields and then initialize.
599 DCHECK_EQ(0, header_->process_id.load(std::memory_order_relaxed)); 668 DCHECK_EQ(0, header_->owner.process_id);
669 DCHECK_EQ(0, header_->owner.create_stamp);
600 DCHECK_EQ(0, header_->thread_ref.as_id); 670 DCHECK_EQ(0, header_->thread_ref.as_id);
601 DCHECK_EQ(0, header_->start_time); 671 DCHECK_EQ(0, header_->start_time);
602 DCHECK_EQ(0, header_->start_ticks); 672 DCHECK_EQ(0, header_->start_ticks);
603 DCHECK_EQ(0U, header_->stack_slots); 673 DCHECK_EQ(0U, header_->stack_slots);
604 DCHECK_EQ(0U, header_->current_depth.load(std::memory_order_relaxed)); 674 DCHECK_EQ(0U, header_->current_depth.load(std::memory_order_relaxed));
605 DCHECK_EQ(0U, header_->stack_unchanged.load(std::memory_order_relaxed)); 675 DCHECK_EQ(0U, header_->stack_unchanged.load(std::memory_order_relaxed));
606 DCHECK_EQ(0, stack_[0].time_internal); 676 DCHECK_EQ(0, stack_[0].time_internal);
607 DCHECK_EQ(0U, stack_[0].origin_address); 677 DCHECK_EQ(0U, stack_[0].origin_address);
608 DCHECK_EQ(0U, stack_[0].call_stack[0]); 678 DCHECK_EQ(0U, stack_[0].call_stack[0]);
609 DCHECK_EQ(0U, stack_[0].data.task.sequence_id); 679 DCHECK_EQ(0U, stack_[0].data.task.sequence_id);
610 680
611 #if defined(OS_WIN) 681 #if defined(OS_WIN)
612 header_->thread_ref.as_tid = PlatformThread::CurrentId(); 682 header_->thread_ref.as_tid = PlatformThread::CurrentId();
613 #elif defined(OS_POSIX) 683 #elif defined(OS_POSIX)
614 header_->thread_ref.as_handle = 684 header_->thread_ref.as_handle =
615 PlatformThread::CurrentHandle().platform_handle(); 685 PlatformThread::CurrentHandle().platform_handle();
616 #endif 686 #endif
617 header_->process_id.store(GetCurrentProcId(), std::memory_order_relaxed);
618 687
619 header_->start_time = base::Time::Now().ToInternalValue(); 688 header_->start_time = base::Time::Now().ToInternalValue();
620 header_->start_ticks = base::TimeTicks::Now().ToInternalValue(); 689 header_->start_ticks = base::TimeTicks::Now().ToInternalValue();
621 header_->stack_slots = stack_slots_; 690 header_->stack_slots = stack_slots_;
622 strlcpy(header_->thread_name, PlatformThread::GetName(), 691 strlcpy(header_->thread_name, PlatformThread::GetName(),
623 sizeof(header_->thread_name)); 692 sizeof(header_->thread_name));
624 693
625 // This is done last so as to guarantee that everything above is "released" 694 // This is done last so as to guarantee that everything above is "released"
626 // by the time this value gets written. 695 // by the time this value gets written.
627 header_->cookie.store(kHeaderCookie, std::memory_order_release); 696 header_->owner.Release_Initialize();
628 697
629 valid_ = true; 698 valid_ = true;
630 DCHECK(IsValid()); 699 DCHECK(IsValid());
631 } else { 700 } else {
632 // This is a file with existing data. Perform basic consistency checks. 701 // This is a file with existing data. Perform basic consistency checks.
633 valid_ = true; 702 valid_ = true;
634 valid_ = IsValid(); 703 valid_ = IsValid();
635 } 704 }
636 } 705 }
637 706
(...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after
762 ActivityId id, 831 ActivityId id,
763 ActivityTrackerMemoryAllocator* allocator) { 832 ActivityTrackerMemoryAllocator* allocator) {
764 // User-data is only stored for activities actually held in the stack. 833 // User-data is only stored for activities actually held in the stack.
765 if (id < stack_slots_ && stack_[id].user_data_ref) { 834 if (id < stack_slots_ && stack_[id].user_data_ref) {
766 allocator->ReleaseObjectReference(stack_[id].user_data_ref); 835 allocator->ReleaseObjectReference(stack_[id].user_data_ref);
767 stack_[id].user_data_ref = 0; 836 stack_[id].user_data_ref = 0;
768 } 837 }
769 } 838 }
770 839
771 bool ThreadActivityTracker::IsValid() const { 840 bool ThreadActivityTracker::IsValid() const {
772 if (header_->cookie.load(std::memory_order_acquire) != kHeaderCookie || 841 if (header_->owner.data_id.load(std::memory_order_acquire) == 0 ||
773 header_->process_id.load(std::memory_order_relaxed) == 0 || 842 header_->owner.process_id == 0 || header_->thread_ref.as_id == 0 ||
774 header_->thread_ref.as_id == 0 || 843 header_->start_time == 0 || header_->start_ticks == 0 ||
775 header_->start_time == 0 ||
776 header_->start_ticks == 0 ||
777 header_->stack_slots != stack_slots_ || 844 header_->stack_slots != stack_slots_ ||
778 header_->thread_name[sizeof(header_->thread_name) - 1] != '\0') { 845 header_->thread_name[sizeof(header_->thread_name) - 1] != '\0') {
779 return false; 846 return false;
780 } 847 }
781 848
782 return valid_; 849 return valid_;
783 } 850 }
784 851
785 bool ThreadActivityTracker::CreateSnapshot(Snapshot* output_snapshot) const { 852 bool ThreadActivityTracker::CreateSnapshot(Snapshot* output_snapshot) const {
786 DCHECK(output_snapshot); 853 DCHECK(output_snapshot);
(...skipping 10 matching lines...) Expand all
797 // Stop here if the data isn't valid. 864 // Stop here if the data isn't valid.
798 if (!IsValid()) 865 if (!IsValid())
799 return false; 866 return false;
800 867
801 // Allocate the maximum size for the stack so it doesn't have to be done 868 // Allocate the maximum size for the stack so it doesn't have to be done
802 // during the time-sensitive snapshot operation. It is shrunk once the 869 // during the time-sensitive snapshot operation. It is shrunk once the
803 // actual size is known. 870 // actual size is known.
804 output_snapshot->activity_stack.reserve(stack_slots_); 871 output_snapshot->activity_stack.reserve(stack_slots_);
805 872
806 for (int attempt = 0; attempt < kMaxAttempts; ++attempt) { 873 for (int attempt = 0; attempt < kMaxAttempts; ++attempt) {
807 // Remember the process and thread IDs to ensure they aren't replaced 874 // Remember the data IDs to ensure nothing is replaced during the snapshot
808 // during the snapshot operation. Use "acquire" to ensure that all the 875 // operation. Use "acquire" so that all the non-atomic fields of the
809 // non-atomic fields of the structure are valid (at least at the current 876 // structure are valid (at least at the current moment in time).
810 // moment in time). 877 const uint32_t starting_id =
811 const int64_t starting_process_id = 878 header_->owner.data_id.load(std::memory_order_acquire);
812 header_->process_id.load(std::memory_order_acquire); 879 const int64_t starting_process_id = header_->owner.process_id;
813 const int64_t starting_thread_id = header_->thread_ref.as_id; 880 const int64_t starting_thread_id = header_->thread_ref.as_id;
814 881
815 // Write a non-zero value to |stack_unchanged| so it's possible to detect 882 // Write a non-zero value to |stack_unchanged| so it's possible to detect
816 // at the end that nothing has changed since copying the data began. A 883 // at the end that nothing has changed since copying the data began. A
817 // "cst" operation is required to ensure it occurs before everything else. 884 // "cst" operation is required to ensure it occurs before everything else.
818 // Using "cst" memory ordering is relatively expensive but this is only 885 // Using "cst" memory ordering is relatively expensive but this is only
819 // done during analysis so doesn't directly affect the worker threads. 886 // done during analysis so doesn't directly affect the worker threads.
820 header_->stack_unchanged.store(1, std::memory_order_seq_cst); 887 header_->stack_unchanged.store(1, std::memory_order_seq_cst);
821 888
822 // Fetching the current depth also "acquires" the contents of the stack. 889 // Fetching the current depth also "acquires" the contents of the stack.
823 depth = header_->current_depth.load(std::memory_order_acquire); 890 depth = header_->current_depth.load(std::memory_order_acquire);
824 uint32_t count = std::min(depth, stack_slots_); 891 uint32_t count = std::min(depth, stack_slots_);
825 output_snapshot->activity_stack.resize(count); 892 output_snapshot->activity_stack.resize(count);
826 if (count > 0) { 893 if (count > 0) {
827 // Copy the existing contents. Memcpy is used for speed. 894 // Copy the existing contents. Memcpy is used for speed.
828 memcpy(&output_snapshot->activity_stack[0], stack_, 895 memcpy(&output_snapshot->activity_stack[0], stack_,
829 count * sizeof(Activity)); 896 count * sizeof(Activity));
830 } 897 }
831 898
832 // Retry if something changed during the copy. A "cst" operation ensures 899 // Retry if something changed during the copy. A "cst" operation ensures
833 // it must happen after all the above operations. 900 // it must happen after all the above operations.
834 if (!header_->stack_unchanged.load(std::memory_order_seq_cst)) 901 if (!header_->stack_unchanged.load(std::memory_order_seq_cst))
835 continue; 902 continue;
836 903
837 // Stack copied. Record it's full depth. 904 // Stack copied. Record it's full depth.
838 output_snapshot->activity_stack_depth = depth; 905 output_snapshot->activity_stack_depth = depth;
839 906
840 // TODO(bcwhite): Snapshot other things here. 907 // TODO(bcwhite): Snapshot other things here.
841 908
842 // Get the general thread information. Loading of "process_id" is guaranteed 909 // Get the general thread information.
843 // to be last so that it's possible to detect below if any content has
844 // changed while reading it. It's technically possible for a thread to end,
845 // have its data cleared, a new thread get created with the same IDs, and
846 // it perform an action which starts tracking all in the time since the
847 // ID reads above but the chance is so unlikely that it's not worth the
848 // effort and complexity of protecting against it (perhaps with an
849 // "unchanged" field like is done for the stack).
850 output_snapshot->thread_name = 910 output_snapshot->thread_name =
851 std::string(header_->thread_name, sizeof(header_->thread_name) - 1); 911 std::string(header_->thread_name, sizeof(header_->thread_name) - 1);
852 output_snapshot->thread_id = header_->thread_ref.as_id; 912 output_snapshot->thread_id = header_->thread_ref.as_id;
853 output_snapshot->process_id = 913 output_snapshot->process_id = header_->owner.process_id;
854 header_->process_id.load(std::memory_order_seq_cst);
855 914
856 // All characters of the thread-name buffer were copied so as to not break 915 // All characters of the thread-name buffer were copied so as to not break
857 // if the trailing NUL were missing. Now limit the length if the actual 916 // if the trailing NUL were missing. Now limit the length if the actual
858 // name is shorter. 917 // name is shorter.
859 output_snapshot->thread_name.resize( 918 output_snapshot->thread_name.resize(
860 strlen(output_snapshot->thread_name.c_str())); 919 strlen(output_snapshot->thread_name.c_str()));
861 920
862 // If the process or thread ID has changed then the tracker has exited and 921 // If the data ID has changed then the tracker has exited and the memory
863 // the memory reused by a new one. Try again. 922 // reused by a new one. Try again.
864 if (output_snapshot->process_id != starting_process_id || 923 if (header_->owner.data_id.load(std::memory_order_seq_cst) != starting_id ||
924 output_snapshot->process_id != starting_process_id ||
865 output_snapshot->thread_id != starting_thread_id) { 925 output_snapshot->thread_id != starting_thread_id) {
866 continue; 926 continue;
867 } 927 }
868 928
869 // Only successful if the data is still valid once everything is done since 929 // Only successful if the data is still valid once everything is done since
870 // it's possible for the thread to end somewhere in the middle and all its 930 // it's possible for the thread to end somewhere in the middle and all its
871 // values become garbage. 931 // values become garbage.
872 if (!IsValid()) 932 if (!IsValid())
873 return false; 933 return false;
874 934
875 // Change all the timestamps in the activities from "ticks" to "wall" time. 935 // Change all the timestamps in the activities from "ticks" to "wall" time.
876 const Time start_time = Time::FromInternalValue(header_->start_time); 936 const Time start_time = Time::FromInternalValue(header_->start_time);
877 const int64_t start_ticks = header_->start_ticks; 937 const int64_t start_ticks = header_->start_ticks;
878 for (Activity& activity : output_snapshot->activity_stack) { 938 for (Activity& activity : output_snapshot->activity_stack) {
879 activity.time_internal = 939 activity.time_internal =
880 (start_time + 940 (start_time +
881 TimeDelta::FromInternalValue(activity.time_internal - start_ticks)) 941 TimeDelta::FromInternalValue(activity.time_internal - start_ticks))
882 .ToInternalValue(); 942 .ToInternalValue();
883 } 943 }
884 944
885 // Success! 945 // Success!
886 return true; 946 return true;
887 } 947 }
888 948
889 // Too many attempts. 949 // Too many attempts.
890 return false; 950 return false;
891 } 951 }
892 952
953 const void* ThreadActivityTracker::GetBaseAddress() {
954 return header_;
955 }
956
957 void ThreadActivityTracker::SetOwningProcessIdForTesting(ProcessId pid,
958 int64_t stamp) {
959 header_->owner.SetOwningProcessIdForTesting(pid, stamp);
960 }
961
962 // static
963 bool ThreadActivityTracker::GetOwningProcessId(const void* memory,
964 ProcessId* out_id,
965 int64_t* out_stamp) {
966 const Header* header = reinterpret_cast<const Header*>(memory);
967 return OwningProcess::GetOwningProcessId(&header->owner, out_id, out_stamp);
968 }
969
893 // static 970 // static
894 size_t ThreadActivityTracker::SizeForStackDepth(int stack_depth) { 971 size_t ThreadActivityTracker::SizeForStackDepth(int stack_depth) {
895 return static_cast<size_t>(stack_depth) * sizeof(Activity) + sizeof(Header); 972 return static_cast<size_t>(stack_depth) * sizeof(Activity) + sizeof(Header);
896 } 973 }
897 974
898 // The instantiation of the GlobalActivityTracker object. 975 // The instantiation of the GlobalActivityTracker object.
899 // The object held here will obviously not be destructed at process exit 976 // The object held here will obviously not be destructed at process exit
900 // but that's best since PersistentMemoryAllocator objects (that underlie 977 // but that's best since PersistentMemoryAllocator objects (that underlie
901 // GlobalActivityTracker objects) are explicitly forbidden from doing anything 978 // GlobalActivityTracker objects) are explicitly forbidden from doing anything
902 // essential at exit anyway due to the fact that they depend on data managed 979 // essential at exit anyway due to the fact that they depend on data managed
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after
970 // These fields never changes and are done before the record is made 1047 // These fields never changes and are done before the record is made
971 // iterable so no thread protection is necessary. 1048 // iterable so no thread protection is necessary.
972 size = info.size; 1049 size = info.size;
973 timestamp = info.timestamp; 1050 timestamp = info.timestamp;
974 age = info.age; 1051 age = info.age;
975 memcpy(identifier, info.identifier, sizeof(identifier)); 1052 memcpy(identifier, info.identifier, sizeof(identifier));
976 memcpy(pickle, pickler.data(), pickler.size()); 1053 memcpy(pickle, pickler.data(), pickler.size());
977 pickle_size = pickler.size(); 1054 pickle_size = pickler.size();
978 changes.store(0, std::memory_order_relaxed); 1055 changes.store(0, std::memory_order_relaxed);
979 1056
1057 // Initialize the owner info.
1058 owner.Release_Initialize();
1059
980 // Now set those fields that can change. 1060 // Now set those fields that can change.
981 return UpdateFrom(info); 1061 return UpdateFrom(info);
982 } 1062 }
983 1063
984 bool GlobalActivityTracker::ModuleInfoRecord::UpdateFrom( 1064 bool GlobalActivityTracker::ModuleInfoRecord::UpdateFrom(
985 const GlobalActivityTracker::ModuleInfo& info) { 1065 const GlobalActivityTracker::ModuleInfo& info) {
986 // Updates can occur after the record is made visible so make changes atomic. 1066 // Updates can occur after the record is made visible so make changes atomic.
987 // A "strong" exchange ensures no false failures. 1067 // A "strong" exchange ensures no false failures.
988 uint32_t old_changes = changes.load(std::memory_order_relaxed); 1068 uint32_t old_changes = changes.load(std::memory_order_relaxed);
989 uint32_t new_changes = old_changes | kModuleInformationChanging; 1069 uint32_t new_changes = old_changes | kModuleInformationChanging;
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after
1044 AutoLock lock(global->user_data_allocator_lock_); 1124 AutoLock lock(global->user_data_allocator_lock_);
1045 user_data_ = 1125 user_data_ =
1046 tracker_->GetUserData(activity_id_, &global->user_data_allocator_); 1126 tracker_->GetUserData(activity_id_, &global->user_data_allocator_);
1047 } else { 1127 } else {
1048 user_data_ = MakeUnique<ActivityUserData>(nullptr, 0); 1128 user_data_ = MakeUnique<ActivityUserData>(nullptr, 0);
1049 } 1129 }
1050 } 1130 }
1051 return *user_data_; 1131 return *user_data_;
1052 } 1132 }
1053 1133
1054 GlobalActivityTracker::GlobalUserData::GlobalUserData(void* memory, size_t size) 1134 GlobalActivityTracker::ThreadSafeUserData::ThreadSafeUserData(void* memory,
1135 size_t size)
1055 : ActivityUserData(memory, size) {} 1136 : ActivityUserData(memory, size) {}
1056 1137
1057 GlobalActivityTracker::GlobalUserData::~GlobalUserData() {} 1138 GlobalActivityTracker::ThreadSafeUserData::~ThreadSafeUserData() {}
1058 1139
1059 void GlobalActivityTracker::GlobalUserData::Set(StringPiece name, 1140 void GlobalActivityTracker::ThreadSafeUserData::Set(StringPiece name,
1060 ValueType type, 1141 ValueType type,
1061 const void* memory, 1142 const void* memory,
1062 size_t size) { 1143 size_t size) {
1063 AutoLock lock(data_lock_); 1144 AutoLock lock(data_lock_);
1064 ActivityUserData::Set(name, type, memory, size); 1145 ActivityUserData::Set(name, type, memory, size);
1065 } 1146 }
1066 1147
1067 GlobalActivityTracker::ManagedActivityTracker::ManagedActivityTracker( 1148 GlobalActivityTracker::ManagedActivityTracker::ManagedActivityTracker(
1068 PersistentMemoryAllocator::Reference mem_reference, 1149 PersistentMemoryAllocator::Reference mem_reference,
1069 void* base, 1150 void* base,
1070 size_t size) 1151 size_t size)
1071 : ThreadActivityTracker(base, size), 1152 : ThreadActivityTracker(base, size),
1072 mem_reference_(mem_reference), 1153 mem_reference_(mem_reference),
(...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after
1177 return tracker; 1258 return tracker;
1178 } 1259 }
1179 1260
1180 void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() { 1261 void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() {
1181 ThreadActivityTracker* tracker = 1262 ThreadActivityTracker* tracker =
1182 reinterpret_cast<ThreadActivityTracker*>(this_thread_tracker_.Get()); 1263 reinterpret_cast<ThreadActivityTracker*>(this_thread_tracker_.Get());
1183 if (tracker) 1264 if (tracker)
1184 delete tracker; 1265 delete tracker;
1185 } 1266 }
1186 1267
1268 void GlobalActivityTracker::SetBackgroundTaskRunner(
1269 const scoped_refptr<TaskRunner>& runner) {
1270 AutoLock lock(global_tracker_lock_);
1271 background_task_runner_ = runner;
1272 }
1273
1274 void GlobalActivityTracker::SetProcessExitCallback(
1275 ProcessExitCallback callback) {
1276 AutoLock lock(global_tracker_lock_);
1277 process_exit_callback_ = callback;
1278 }
1279
1280 void GlobalActivityTracker::RecordProcessLaunch(
1281 ProcessId process_id,
1282 const FilePath::StringType& cmd) {
1283 DCHECK_NE(GetCurrentProcId(), process_id);
1284
1285 base::AutoLock lock(global_tracker_lock_);
1286 if (base::ContainsKey(known_processes_, process_id)) {
1287 // TODO(bcwhite): Measure this in UMA.
1288 NOTREACHED() << "Process #" << process_id
1289 << " was previously recorded as \"launched\""
1290 << " with no corresponding exit.";
1291 known_processes_.erase(process_id);
1292 }
1293
1294 #if defined(OS_WIN)
1295 known_processes_.insert(std::make_pair(process_id, UTF16ToUTF8(cmd)));
1296 #else
1297 known_processes_.insert(std::make_pair(process_id, cmd));
1298 #endif
1299 }
1300
1301 void GlobalActivityTracker::RecordProcessLaunch(
1302 ProcessId process_id,
1303 const FilePath::StringType& exe,
1304 const FilePath::StringType& args) {
1305 if (exe.find(FILE_PATH_LITERAL(" "))) {
1306 RecordProcessLaunch(process_id,
1307 FilePath::StringType(FILE_PATH_LITERAL("\"")) + exe +
1308 FILE_PATH_LITERAL("\" ") + args);
1309 } else {
1310 RecordProcessLaunch(process_id, exe + FILE_PATH_LITERAL(' ') + args);
1311 }
1312 }
1313
1314 void GlobalActivityTracker::RecordProcessExit(ProcessId process_id,
1315 int exit_code) {
1316 DCHECK_NE(GetCurrentProcId(), process_id);
1317
1318 scoped_refptr<TaskRunner> task_runner;
1319 std::string command_line;
1320 {
1321 base::AutoLock lock(global_tracker_lock_);
1322 task_runner = background_task_runner_;
1323 auto found = known_processes_.find(process_id);
1324 if (found != known_processes_.end()) {
1325 command_line = std::move(found->second);
1326 known_processes_.erase(found);
1327 } else {
1328 DLOG(ERROR) << "Recording exit of unknown process #" << process_id;
1329 }
1330 }
1331
1332 // Use the current time to differentiate the process that just exited
1333 // from any that might be created in the future with the same ID.
1334 int64_t now_stamp = Time::Now().ToInternalValue();
1335
1336 // The persistent allocator is thread-safe so run the iteration and
1337 // adjustments on a worker thread if one was provided.
1338 if (task_runner && !task_runner->RunsTasksOnCurrentThread()) {
1339 task_runner->PostTask(
1340 FROM_HERE,
1341 Bind(&GlobalActivityTracker::CleanupAfterProcess, Unretained(this),
1342 process_id, now_stamp, exit_code, Passed(&command_line)));
1343 return;
1344 }
1345
1346 CleanupAfterProcess(process_id, now_stamp, exit_code,
1347 std::move(command_line));
1348 }
1349
1350 void GlobalActivityTracker::SetProcessPhase(ProcessPhase phase) {
1351 process_data().SetInt(kProcessPhaseDataKey, phase);
1352 }
1353
1354 void GlobalActivityTracker::CleanupAfterProcess(ProcessId process_id,
1355 int64_t exit_stamp,
1356 int exit_code,
1357 std::string&& command_line) {
1358 // The process may not have exited cleanly so its necessary to go through
1359 // all the data structures it may have allocated in the persistent memory
1360 // segment and mark them as "released". This will allow them to be reused
1361 // later on.
1362
1363 PersistentMemoryAllocator::Iterator iter(allocator_.get());
1364 PersistentMemoryAllocator::Reference ref;
1365
1366 ProcessExitCallback process_exit_callback;
1367 {
1368 AutoLock lock(global_tracker_lock_);
1369 process_exit_callback = process_exit_callback_;
1370 }
1371 if (process_exit_callback) {
1372 // Find the processes user-data record so the process phase can be passed
1373 // to the callback.
1374 ActivityUserData::Snapshot process_data_snapshot;
1375 while ((ref = iter.GetNextOfType(kTypeIdProcessDataRecord)) != 0) {
1376 const void* memory = allocator_->GetAsArray<char>(
1377 ref, kTypeIdProcessDataRecord, PersistentMemoryAllocator::kSizeAny);
1378 ProcessId found_id;
1379 int64_t create_stamp;
1380 if (ActivityUserData::GetOwningProcessId(memory, &found_id,
1381 &create_stamp)) {
1382 if (found_id == process_id && create_stamp < exit_stamp) {
1383 const ActivityUserData process_data(const_cast<void*>(memory),
1384 allocator_->GetAllocSize(ref));
1385 process_data.CreateSnapshot(&process_data_snapshot);
1386 break; // No need to look for any others.
1387 }
1388 }
1389 }
1390 iter.Reset(); // So it starts anew when used below.
1391
1392 // Record the process's phase at exit so callback doesn't need to go
1393 // searching
manzagop (departed) 2017/03/07 22:08:57 nit: formatting
bcwhite 2017/03/09 14:07:54 Done.
1394 // based on a private key value.
1395 ProcessPhase exit_phase = PROCESS_PHASE_UNKNOWN;
1396 auto phase = process_data_snapshot.find(kProcessPhaseDataKey);
1397 if (phase != process_data_snapshot.end())
1398 exit_phase = static_cast<ProcessPhase>(phase->second.GetInt());
1399
1400 // Perform the callback.
1401 process_exit_callback.Run(process_id, exit_stamp, exit_code, exit_phase,
1402 std::move(command_line),
1403 std::move(process_data_snapshot));
1404 }
1405
1406 // Find all allocations associated with the exited process and free them.
1407 uint32_t type;
1408 while ((ref = iter.GetNext(&type)) != 0) {
1409 switch (type) {
1410 case kTypeIdActivityTracker:
1411 case kTypeIdUserDataRecord:
1412 case kTypeIdProcessDataRecord:
1413 case ModuleInfoRecord::kPersistentTypeId: {
1414 const void* memory = allocator_->GetAsArray<char>(
1415 ref, type, PersistentMemoryAllocator::kSizeAny);
1416 ProcessId found_id;
1417 int64_t create_stamp;
1418
1419 // By convention, the OwningProcess structure is always the first
1420 // field of the structure so there's no need to handle all the
1421 // cases separately.
1422 if (OwningProcess::GetOwningProcessId(memory, &found_id,
1423 &create_stamp)) {
1424 // Only change the type to be "free" if the process ID matches and
1425 // the creation time is before the exit time (so PID re-use doesn't
1426 // cause the erasure of something that is in-use). Memory is cleared
1427 // here, rather than when it's needed, so as to limit the impact at
1428 // that critical time.
1429 if (found_id == process_id && create_stamp < exit_stamp)
1430 allocator_->ChangeType(ref, ~type, type, /*clear=*/true);
1431 }
1432 } break;
1433 }
1434 }
1435 }
1436
1187 void GlobalActivityTracker::RecordLogMessage(StringPiece message) { 1437 void GlobalActivityTracker::RecordLogMessage(StringPiece message) {
1188 // Allocate at least one extra byte so the string is NUL terminated. All 1438 // Allocate at least one extra byte so the string is NUL terminated. All
1189 // memory returned by the allocator is guaranteed to be zeroed. 1439 // memory returned by the allocator is guaranteed to be zeroed.
1190 PersistentMemoryAllocator::Reference ref = 1440 PersistentMemoryAllocator::Reference ref =
1191 allocator_->Allocate(message.size() + 1, kTypeIdGlobalLogMessage); 1441 allocator_->Allocate(message.size() + 1, kTypeIdGlobalLogMessage);
1192 char* memory = allocator_->GetAsArray<char>(ref, kTypeIdGlobalLogMessage, 1442 char* memory = allocator_->GetAsArray<char>(ref, kTypeIdGlobalLogMessage,
1193 message.size() + 1); 1443 message.size() + 1);
1194 if (memory) { 1444 if (memory) {
1195 memcpy(memory, message.data(), message.size()); 1445 memcpy(memory, message.data(), message.size());
1196 allocator_->MakeIterable(ref); 1446 allocator_->MakeIterable(ref);
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
1240 kTypeIdActivityTracker, 1490 kTypeIdActivityTracker,
1241 kTypeIdActivityTrackerFree, 1491 kTypeIdActivityTrackerFree,
1242 stack_memory_size_, 1492 stack_memory_size_,
1243 kCachedThreadMemories, 1493 kCachedThreadMemories,
1244 /*make_iterable=*/true), 1494 /*make_iterable=*/true),
1245 user_data_allocator_(allocator_.get(), 1495 user_data_allocator_(allocator_.get(),
1246 kTypeIdUserDataRecord, 1496 kTypeIdUserDataRecord,
1247 kTypeIdUserDataRecordFree, 1497 kTypeIdUserDataRecordFree,
1248 kUserDataSize, 1498 kUserDataSize,
1249 kCachedUserDataMemories, 1499 kCachedUserDataMemories,
1250 /*make_iterable=*/false), 1500 /*make_iterable=*/true),
1501 process_data_(allocator_->GetAsArray<char>(
1502 AllocateFrom(allocator_.get(),
1503 kTypeIdProcessDataRecordFree,
1504 kProcessDataSize,
1505 kTypeIdProcessDataRecord),
1506 kTypeIdProcessDataRecord,
1507 kProcessDataSize),
1508 kProcessDataSize),
1251 global_data_( 1509 global_data_(
1252 allocator_->GetAsArray<char>( 1510 allocator_->GetAsArray<char>(
1253 allocator_->Allocate(kGlobalDataSize, kTypeIdGlobalDataRecord), 1511 allocator_->Allocate(kGlobalDataSize, kTypeIdGlobalDataRecord),
1254 kTypeIdGlobalDataRecord, 1512 kTypeIdGlobalDataRecord,
1255 PersistentMemoryAllocator::kSizeAny), 1513 kGlobalDataSize),
1256 kGlobalDataSize) { 1514 kGlobalDataSize) {
1257 // Ensure the passed memory is valid and empty (iterator finds nothing). 1515 // Ensure the passed memory is valid and empty (iterator finds nothing).
1258 uint32_t type; 1516 uint32_t type;
1259 DCHECK(!PersistentMemoryAllocator::Iterator(allocator_.get()).GetNext(&type)); 1517 DCHECK(!PersistentMemoryAllocator::Iterator(allocator_.get()).GetNext(&type));
1260 1518
1261 // Ensure that there is no other global object and then make this one such. 1519 // Ensure that there is no other global object and then make this one such.
1262 DCHECK(!g_tracker_); 1520 DCHECK(!g_tracker_);
1263 subtle::Release_Store(&g_tracker_, reinterpret_cast<uintptr_t>(this)); 1521 subtle::Release_Store(&g_tracker_, reinterpret_cast<uintptr_t>(this));
1264 1522
1265 // The global records must be iterable in order to be found by an analyzer. 1523 // The data records must be iterable in order to be found by an analyzer.
1524 allocator_->MakeIterable(allocator_->GetAsReference(
1525 process_data_.GetBaseAddress(), kTypeIdProcessDataRecord));
1266 allocator_->MakeIterable(allocator_->GetAsReference( 1526 allocator_->MakeIterable(allocator_->GetAsReference(
1267 global_data_.GetBaseAddress(), kTypeIdGlobalDataRecord)); 1527 global_data_.GetBaseAddress(), kTypeIdGlobalDataRecord));
1268 1528
1529 // Note that this process has launched.
1530 SetProcessPhase(PROCESS_LAUNCHED);
1531
1269 // Fetch and record all activated field trials. 1532 // Fetch and record all activated field trials.
1270 FieldTrial::ActiveGroups active_groups; 1533 FieldTrial::ActiveGroups active_groups;
1271 FieldTrialList::GetActiveFieldTrialGroups(&active_groups); 1534 FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
1272 for (auto& group : active_groups) 1535 for (auto& group : active_groups)
1273 RecordFieldTrial(group.trial_name, group.group_name); 1536 RecordFieldTrial(group.trial_name, group.group_name);
1274 } 1537 }
1275 1538
1276 GlobalActivityTracker::~GlobalActivityTracker() { 1539 GlobalActivityTracker::~GlobalActivityTracker() {
1277 DCHECK_EQ(Get(), this); 1540 DCHECK_EQ(Get(), this);
1278 DCHECK_EQ(0, thread_tracker_count_.load(std::memory_order_relaxed)); 1541 DCHECK_EQ(0, thread_tracker_count_.load(std::memory_order_relaxed));
(...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after
1378 : GlobalActivityTracker::ScopedThreadActivity( 1641 : GlobalActivityTracker::ScopedThreadActivity(
1379 program_counter, 1642 program_counter,
1380 nullptr, 1643 nullptr,
1381 Activity::ACT_PROCESS_WAIT, 1644 Activity::ACT_PROCESS_WAIT,
1382 ActivityData::ForProcess(process->Pid()), 1645 ActivityData::ForProcess(process->Pid()),
1383 /*lock_allowed=*/true) {} 1646 /*lock_allowed=*/true) {}
1384 #endif 1647 #endif
1385 1648
1386 } // namespace debug 1649 } // namespace debug
1387 } // namespace base 1650 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698