Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(213)

Side by Side Diff: base/debug/activity_tracker.cc

Issue 2680123003: Multi-Process Tracking Support (Closed)
Patch Set: move tracking from target_process to sandbox_win Created 3 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « base/debug/activity_tracker.h ('k') | base/debug/activity_tracker_unittest.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2016 The Chromium Authors. All rights reserved. 1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/debug/activity_tracker.h" 5 #include "base/debug/activity_tracker.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <limits> 8 #include <limits>
9 #include <utility> 9 #include <utility>
10 10
11 #include "base/atomic_sequence_num.h" 11 #include "base/atomic_sequence_num.h"
12 #include "base/debug/stack_trace.h" 12 #include "base/debug/stack_trace.h"
13 #include "base/files/file.h" 13 #include "base/files/file.h"
14 #include "base/files/file_path.h" 14 #include "base/files/file_path.h"
15 #include "base/files/memory_mapped_file.h" 15 #include "base/files/memory_mapped_file.h"
16 #include "base/logging.h" 16 #include "base/logging.h"
17 #include "base/memory/ptr_util.h" 17 #include "base/memory/ptr_util.h"
18 #include "base/metrics/field_trial.h" 18 #include "base/metrics/field_trial.h"
19 #include "base/metrics/histogram_macros.h" 19 #include "base/metrics/histogram_macros.h"
20 #include "base/pending_task.h" 20 #include "base/pending_task.h"
21 #include "base/pickle.h" 21 #include "base/pickle.h"
22 #include "base/process/process.h" 22 #include "base/process/process.h"
23 #include "base/process/process_handle.h" 23 #include "base/process/process_handle.h"
24 #include "base/stl_util.h" 24 #include "base/stl_util.h"
25 #include "base/strings/string_util.h" 25 #include "base/strings/string_util.h"
26 #include "base/strings/utf_string_conversions.h"
26 #include "base/threading/platform_thread.h" 27 #include "base/threading/platform_thread.h"
27 28
28 namespace base { 29 namespace base {
29 namespace debug { 30 namespace debug {
30 31
31 namespace { 32 namespace {
32 33
33 // A number that identifies the memory as having been initialized. It's
34 // arbitrary but happens to be the first 4 bytes of SHA1(ThreadActivityTracker).
35 // A version number is added on so that major structure changes won't try to
36 // read an older version (since the cookie won't match).
37 const uint32_t kHeaderCookie = 0xC0029B24UL + 2; // v2
38
39 // The minimum depth a stack should support. 34 // The minimum depth a stack should support.
40 const int kMinStackDepth = 2; 35 const int kMinStackDepth = 2;
41 36
42 // The amount of memory set aside for holding arbitrary user data (key/value 37 // The amount of memory set aside for holding arbitrary user data (key/value
43 // pairs) globally or associated with ActivityData entries. 38 // pairs) globally or associated with ActivityData entries.
44 const size_t kUserDataSize = 1 << 10; // 1 KiB 39 const size_t kUserDataSize = 1 << 10; // 1 KiB
40 const size_t kProcessDataSize = 4 << 10; // 4 KiB
45 const size_t kGlobalDataSize = 16 << 10; // 16 KiB 41 const size_t kGlobalDataSize = 16 << 10; // 16 KiB
46 const size_t kMaxUserDataNameLength = 42 const size_t kMaxUserDataNameLength =
47 static_cast<size_t>(std::numeric_limits<uint8_t>::max()); 43 static_cast<size_t>(std::numeric_limits<uint8_t>::max());
48 44
49 // A constant used to indicate that module information is changing. 45 // A constant used to indicate that module information is changing.
50 const uint32_t kModuleInformationChanging = 0x80000000; 46 const uint32_t kModuleInformationChanging = 0x80000000;
51 47
48 // The key used to record process information.
49 const char kProcessPhaseDataKey[] = "process-phase";
50
51 // An atomically incrementing number, used to check for recreations of objects
52 // in the same memory space.
53 StaticAtomicSequenceNumber g_next_id;
54
52 union ThreadRef { 55 union ThreadRef {
53 int64_t as_id; 56 int64_t as_id;
54 #if defined(OS_WIN) 57 #if defined(OS_WIN)
55 // On Windows, the handle itself is often a pseudo-handle with a common 58 // On Windows, the handle itself is often a pseudo-handle with a common
56 // value meaning "this thread" and so the thread-id is used. The former 59 // value meaning "this thread" and so the thread-id is used. The former
57 // can be converted to a thread-id with a system call. 60 // can be converted to a thread-id with a system call.
58 PlatformThreadId as_tid; 61 PlatformThreadId as_tid;
59 #elif defined(OS_POSIX) 62 #elif defined(OS_POSIX)
60 // On Posix, the handle is always a unique identifier so no conversion 63 // On Posix, the handle is always a unique identifier so no conversion
61 // needs to be done. However, it's value is officially opaque so there 64 // needs to be done. However, it's value is officially opaque so there
62 // is no one correct way to convert it to a numerical identifier. 65 // is no one correct way to convert it to a numerical identifier.
63 PlatformThreadHandle::Handle as_handle; 66 PlatformThreadHandle::Handle as_handle;
64 #endif 67 #endif
65 }; 68 };
66 69
70 // Get the next non-zero identifier. It is only unique within a process.
71 uint32_t GetNextDataId() {
72 uint32_t id;
73 while ((id = g_next_id.GetNext()) == 0)
74 ;
75 return id;
76 }
77
78 // Finds and reuses a specific allocation or creates a new one.
79 PersistentMemoryAllocator::Reference AllocateFrom(
80 PersistentMemoryAllocator* allocator,
81 uint32_t from_type,
82 size_t size,
83 uint32_t to_type) {
84 PersistentMemoryAllocator::Iterator iter(allocator);
85 PersistentMemoryAllocator::Reference ref;
86 while ((ref = iter.GetNextOfType(from_type)) != 0) {
87 DCHECK_LE(size, allocator->GetAllocSize(ref));
88 // This can fail if a another thread has just taken it. It is assumed that
89 // the memory is cleared during the "free" operation.
90 if (allocator->ChangeType(ref, to_type, from_type, /*clear=*/false))
91 return ref;
92 }
93
94 return allocator->Allocate(size, to_type);
95 }
96
67 // Determines the previous aligned index. 97 // Determines the previous aligned index.
68 size_t RoundDownToAlignment(size_t index, size_t alignment) { 98 size_t RoundDownToAlignment(size_t index, size_t alignment) {
69 return index & (0 - alignment); 99 return index & (0 - alignment);
70 } 100 }
71 101
72 // Determines the next aligned index. 102 // Determines the next aligned index.
73 size_t RoundUpToAlignment(size_t index, size_t alignment) { 103 size_t RoundUpToAlignment(size_t index, size_t alignment) {
74 return (index + (alignment - 1)) & (0 - alignment); 104 return (index + (alignment - 1)) & (0 - alignment);
75 } 105 }
76 106
77 } // namespace 107 } // namespace
78 108
109 OwningProcess::OwningProcess() {}
110 OwningProcess::~OwningProcess() {}
111
112 void OwningProcess::Release_Initialize() {
113 uint32_t old_id = data_id.load(std::memory_order_acquire);
114 DCHECK_EQ(0U, old_id);
115 process_id = GetCurrentProcId();
116 create_stamp = Time::Now().ToInternalValue();
117 data_id.store(GetNextDataId(), std::memory_order_release);
118 }
119
120 void OwningProcess::SetOwningProcessIdForTesting(ProcessId pid, int64_t stamp) {
121 DCHECK_NE(0U, data_id);
122 process_id = pid;
123 create_stamp = stamp;
124 }
125
126 // static
127 bool OwningProcess::GetOwningProcessId(const void* memory,
128 ProcessId* out_id,
129 int64_t* out_stamp) {
130 const OwningProcess* info = reinterpret_cast<const OwningProcess*>(memory);
131 uint32_t id = info->data_id.load(std::memory_order_acquire);
132 if (id == 0)
133 return false;
134
135 *out_id = static_cast<ProcessId>(info->process_id);
136 *out_stamp = info->create_stamp;
137 return id == info->data_id.load(std::memory_order_seq_cst);
138 }
79 139
80 // It doesn't matter what is contained in this (though it will be all zeros) 140 // It doesn't matter what is contained in this (though it will be all zeros)
81 // as only the address of it is important. 141 // as only the address of it is important.
82 const ActivityData kNullActivityData = {}; 142 const ActivityData kNullActivityData = {};
83 143
84 ActivityData ActivityData::ForThread(const PlatformThreadHandle& handle) { 144 ActivityData ActivityData::ForThread(const PlatformThreadHandle& handle) {
85 ThreadRef thread_ref; 145 ThreadRef thread_ref;
86 thread_ref.as_id = 0; // Zero the union in case other is smaller. 146 thread_ref.as_id = 0; // Zero the union in case other is smaller.
87 #if defined(OS_WIN) 147 #if defined(OS_WIN)
88 thread_ref.as_tid = ::GetThreadId(handle.platform_handle()); 148 thread_ref.as_tid = ::GetThreadId(handle.platform_handle());
(...skipping 150 matching lines...) Expand 10 before | Expand all | Expand 10 after
239 StringPiece ActivityUserData::TypedValue::GetReference() const { 299 StringPiece ActivityUserData::TypedValue::GetReference() const {
240 DCHECK_EQ(RAW_VALUE_REFERENCE, type_); 300 DCHECK_EQ(RAW_VALUE_REFERENCE, type_);
241 return ref_value_; 301 return ref_value_;
242 } 302 }
243 303
244 StringPiece ActivityUserData::TypedValue::GetStringReference() const { 304 StringPiece ActivityUserData::TypedValue::GetStringReference() const {
245 DCHECK_EQ(STRING_VALUE_REFERENCE, type_); 305 DCHECK_EQ(STRING_VALUE_REFERENCE, type_);
246 return ref_value_; 306 return ref_value_;
247 } 307 }
248 308
309 // These are required because std::atomic is (currently) not a POD type and
310 // thus clang requires explicit out-of-line constructors and destructors even
311 // when they do nothing.
249 ActivityUserData::ValueInfo::ValueInfo() {} 312 ActivityUserData::ValueInfo::ValueInfo() {}
250 ActivityUserData::ValueInfo::ValueInfo(ValueInfo&&) = default; 313 ActivityUserData::ValueInfo::ValueInfo(ValueInfo&&) = default;
251 ActivityUserData::ValueInfo::~ValueInfo() {} 314 ActivityUserData::ValueInfo::~ValueInfo() {}
252 315 ActivityUserData::MemoryHeader::MemoryHeader() {}
253 StaticAtomicSequenceNumber ActivityUserData::next_id_; 316 ActivityUserData::MemoryHeader::~MemoryHeader() {}
317 ActivityUserData::FieldHeader::FieldHeader() {}
318 ActivityUserData::FieldHeader::~FieldHeader() {}
254 319
255 ActivityUserData::ActivityUserData(void* memory, size_t size) 320 ActivityUserData::ActivityUserData(void* memory, size_t size)
256 : memory_(reinterpret_cast<char*>(memory)), 321 : memory_(reinterpret_cast<char*>(memory)),
257 available_(RoundDownToAlignment(size, kMemoryAlignment)), 322 available_(RoundDownToAlignment(size, kMemoryAlignment)),
258 id_(reinterpret_cast<std::atomic<uint32_t>*>(memory)) { 323 header_(reinterpret_cast<MemoryHeader*>(memory)) {
259 // It's possible that no user data is being stored. 324 // It's possible that no user data is being stored.
260 if (!memory_) 325 if (!memory_)
261 return; 326 return;
262 327
263 DCHECK_LT(kMemoryAlignment, available_); 328 static_assert(0 == sizeof(MemoryHeader) % kMemoryAlignment, "invalid header");
264 if (id_->load(std::memory_order_relaxed) == 0) { 329 DCHECK_LT(sizeof(MemoryHeader), available_);
265 // Generate a new ID and store it in the first 32-bit word of memory_. 330 if (header_->owner.data_id.load(std::memory_order_acquire) == 0)
266 // |id_| must be non-zero for non-sink instances. 331 header_->owner.Release_Initialize();
267 uint32_t id; 332 memory_ += sizeof(MemoryHeader);
268 while ((id = next_id_.GetNext()) == 0) 333 available_ -= sizeof(MemoryHeader);
269 ;
270 id_->store(id, std::memory_order_relaxed);
271 DCHECK_NE(0U, id_->load(std::memory_order_relaxed));
272 }
273 memory_ += kMemoryAlignment;
274 available_ -= kMemoryAlignment;
275 334
276 // If there is already data present, load that. This allows the same class 335 // If there is already data present, load that. This allows the same class
277 // to be used for analysis through snapshots. 336 // to be used for analysis through snapshots.
278 ImportExistingData(); 337 ImportExistingData();
279 } 338 }
280 339
281 ActivityUserData::~ActivityUserData() {} 340 ActivityUserData::~ActivityUserData() {}
282 341
342 bool ActivityUserData::CreateSnapshot(Snapshot* output_snapshot) const {
343 DCHECK(output_snapshot);
344 DCHECK(output_snapshot->empty());
345
346 // Find any new data that may have been added by an active instance of this
347 // class that is adding records.
348 ImportExistingData();
349
350 for (const auto& entry : values_) {
351 TypedValue value;
352 value.type_ = entry.second.type;
353 DCHECK_GE(entry.second.extent,
354 entry.second.size_ptr->load(std::memory_order_relaxed));
355
356 switch (entry.second.type) {
357 case RAW_VALUE:
358 case STRING_VALUE:
359 value.long_value_ =
360 std::string(reinterpret_cast<char*>(entry.second.memory),
361 entry.second.size_ptr->load(std::memory_order_relaxed));
362 break;
363 case RAW_VALUE_REFERENCE:
364 case STRING_VALUE_REFERENCE: {
365 ReferenceRecord* ref =
366 reinterpret_cast<ReferenceRecord*>(entry.second.memory);
367 value.ref_value_ = StringPiece(
368 reinterpret_cast<char*>(static_cast<uintptr_t>(ref->address)),
369 static_cast<size_t>(ref->size));
370 } break;
371 case BOOL_VALUE:
372 case CHAR_VALUE:
373 value.short_value_ = *reinterpret_cast<char*>(entry.second.memory);
374 break;
375 case SIGNED_VALUE:
376 case UNSIGNED_VALUE:
377 value.short_value_ = *reinterpret_cast<uint64_t*>(entry.second.memory);
378 break;
379 case END_OF_VALUES: // Included for completeness purposes.
380 NOTREACHED();
381 }
382 auto inserted = output_snapshot->insert(
383 std::make_pair(entry.second.name.as_string(), std::move(value)));
384 DCHECK(inserted.second); // True if inserted, false if existed.
385 }
386
387 return true;
388 }
389
390 const void* ActivityUserData::GetBaseAddress() const {
391 // The |memory_| pointer advances as elements are written but the |header_|
392 // value is always at the start of the block so just return that.
393 return header_;
394 }
395
396 void ActivityUserData::SetOwningProcessIdForTesting(ProcessId pid,
397 int64_t stamp) {
398 if (!header_)
399 return;
400 header_->owner.SetOwningProcessIdForTesting(pid, stamp);
401 }
402
403 // static
404 bool ActivityUserData::GetOwningProcessId(const void* memory,
405 ProcessId* out_id,
406 int64_t* out_stamp) {
407 const MemoryHeader* header = reinterpret_cast<const MemoryHeader*>(memory);
408 return OwningProcess::GetOwningProcessId(&header->owner, out_id, out_stamp);
409 }
410
283 void ActivityUserData::Set(StringPiece name, 411 void ActivityUserData::Set(StringPiece name,
284 ValueType type, 412 ValueType type,
285 const void* memory, 413 const void* memory,
286 size_t size) { 414 size_t size) {
287 DCHECK_GE(std::numeric_limits<uint8_t>::max(), name.length()); 415 DCHECK_GE(std::numeric_limits<uint8_t>::max(), name.length());
288 size = std::min(std::numeric_limits<uint16_t>::max() - (kMemoryAlignment - 1), 416 size = std::min(std::numeric_limits<uint16_t>::max() - (kMemoryAlignment - 1),
289 size); 417 size);
290 418
291 // It's possible that no user data is being stored. 419 // It's possible that no user data is being stored.
292 if (!memory_) 420 if (!memory_)
293 return; 421 return;
294 422
295 // The storage of a name is limited so use that limit during lookup. 423 // The storage of a name is limited so use that limit during lookup.
296 if (name.length() > kMaxUserDataNameLength) 424 if (name.length() > kMaxUserDataNameLength)
297 name.set(name.data(), kMaxUserDataNameLength); 425 name.set(name.data(), kMaxUserDataNameLength);
298 426
299 ValueInfo* info; 427 ValueInfo* info;
300 auto existing = values_.find(name); 428 auto existing = values_.find(name);
301 if (existing != values_.end()) { 429 if (existing != values_.end()) {
302 info = &existing->second; 430 info = &existing->second;
303 } else { 431 } else {
304 // The name size is limited to what can be held in a single byte but 432 // The name size is limited to what can be held in a single byte but
305 // because there are not alignment constraints on strings, it's set tight 433 // because there are not alignment constraints on strings, it's set tight
306 // against the header. Its extent (the reserved space, even if it's not 434 // against the header. Its extent (the reserved space, even if it's not
307 // all used) is calculated so that, when pressed against the header, the 435 // all used) is calculated so that, when pressed against the header, the
308 // following field will be aligned properly. 436 // following field will be aligned properly.
309 size_t name_size = name.length(); 437 size_t name_size = name.length();
310 size_t name_extent = 438 size_t name_extent =
311 RoundUpToAlignment(sizeof(Header) + name_size, kMemoryAlignment) - 439 RoundUpToAlignment(sizeof(FieldHeader) + name_size, kMemoryAlignment) -
312 sizeof(Header); 440 sizeof(FieldHeader);
313 size_t value_extent = RoundUpToAlignment(size, kMemoryAlignment); 441 size_t value_extent = RoundUpToAlignment(size, kMemoryAlignment);
314 442
315 // The "base size" is the size of the header and (padded) string key. Stop 443 // The "base size" is the size of the header and (padded) string key. Stop
316 // now if there's not room enough for even this. 444 // now if there's not room enough for even this.
317 size_t base_size = sizeof(Header) + name_extent; 445 size_t base_size = sizeof(FieldHeader) + name_extent;
318 if (base_size > available_) 446 if (base_size > available_)
319 return; 447 return;
320 448
321 // The "full size" is the size for storing the entire value. 449 // The "full size" is the size for storing the entire value.
322 size_t full_size = std::min(base_size + value_extent, available_); 450 size_t full_size = std::min(base_size + value_extent, available_);
323 451
324 // If the value is actually a single byte, see if it can be stuffed at the 452 // If the value is actually a single byte, see if it can be stuffed at the
325 // end of the name extent rather than wasting kMemoryAlignment bytes. 453 // end of the name extent rather than wasting kMemoryAlignment bytes.
326 if (size == 1 && name_extent > name_size) { 454 if (size == 1 && name_extent > name_size) {
327 full_size = base_size; 455 full_size = base_size;
328 --name_extent; 456 --name_extent;
329 --base_size; 457 --base_size;
330 } 458 }
331 459
332 // Truncate the stored size to the amount of available memory. Stop now if 460 // Truncate the stored size to the amount of available memory. Stop now if
333 // there's not any room for even part of the value. 461 // there's not any room for even part of the value.
334 if (size != 0) { 462 if (size != 0) {
335 size = std::min(full_size - base_size, size); 463 size = std::min(full_size - base_size, size);
336 if (size == 0) 464 if (size == 0)
337 return; 465 return;
338 } 466 }
339 467
340 // Allocate a chunk of memory. 468 // Allocate a chunk of memory.
341 Header* header = reinterpret_cast<Header*>(memory_); 469 FieldHeader* header = reinterpret_cast<FieldHeader*>(memory_);
342 memory_ += full_size; 470 memory_ += full_size;
343 available_ -= full_size; 471 available_ -= full_size;
344 472
345 // Datafill the header and name records. Memory must be zeroed. The |type| 473 // Datafill the header and name records. Memory must be zeroed. The |type|
346 // is written last, atomically, to release all the other values. 474 // is written last, atomically, to release all the other values.
347 DCHECK_EQ(END_OF_VALUES, header->type.load(std::memory_order_relaxed)); 475 DCHECK_EQ(END_OF_VALUES, header->type.load(std::memory_order_relaxed));
348 DCHECK_EQ(0, header->value_size.load(std::memory_order_relaxed)); 476 DCHECK_EQ(0, header->value_size.load(std::memory_order_relaxed));
349 header->name_size = static_cast<uint8_t>(name_size); 477 header->name_size = static_cast<uint8_t>(name_size);
350 header->record_size = full_size; 478 header->record_size = full_size;
351 char* name_memory = reinterpret_cast<char*>(header) + sizeof(Header); 479 char* name_memory = reinterpret_cast<char*>(header) + sizeof(FieldHeader);
352 void* value_memory = 480 void* value_memory =
353 reinterpret_cast<char*>(header) + sizeof(Header) + name_extent; 481 reinterpret_cast<char*>(header) + sizeof(FieldHeader) + name_extent;
354 memcpy(name_memory, name.data(), name_size); 482 memcpy(name_memory, name.data(), name_size);
355 header->type.store(type, std::memory_order_release); 483 header->type.store(type, std::memory_order_release);
356 484
357 // Create an entry in |values_| so that this field can be found and changed 485 // Create an entry in |values_| so that this field can be found and changed
358 // later on without having to allocate new entries. 486 // later on without having to allocate new entries.
359 StringPiece persistent_name(name_memory, name_size); 487 StringPiece persistent_name(name_memory, name_size);
360 auto inserted = 488 auto inserted =
361 values_.insert(std::make_pair(persistent_name, ValueInfo())); 489 values_.insert(std::make_pair(persistent_name, ValueInfo()));
362 DCHECK(inserted.second); // True if inserted, false if existed. 490 DCHECK(inserted.second); // True if inserted, false if existed.
363 info = &inserted.first->second; 491 info = &inserted.first->second;
364 info->name = persistent_name; 492 info->name = persistent_name;
365 info->memory = value_memory; 493 info->memory = value_memory;
366 info->size_ptr = &header->value_size; 494 info->size_ptr = &header->value_size;
367 info->extent = full_size - sizeof(Header) - name_extent; 495 info->extent = full_size - sizeof(FieldHeader) - name_extent;
368 info->type = type; 496 info->type = type;
369 } 497 }
370 498
371 // Copy the value data to storage. The |size| is written last, atomically, to 499 // Copy the value data to storage. The |size| is written last, atomically, to
372 // release the copied data. Until then, a parallel reader will just ignore 500 // release the copied data. Until then, a parallel reader will just ignore
373 // records with a zero size. 501 // records with a zero size.
374 DCHECK_EQ(type, info->type); 502 DCHECK_EQ(type, info->type);
375 size = std::min(size, info->extent); 503 size = std::min(size, info->extent);
376 info->size_ptr->store(0, std::memory_order_seq_cst); 504 info->size_ptr->store(0, std::memory_order_seq_cst);
377 memcpy(info->memory, memory, size); 505 memcpy(info->memory, memory, size);
378 info->size_ptr->store(size, std::memory_order_release); 506 info->size_ptr->store(size, std::memory_order_release);
379 } 507 }
380 508
381 void ActivityUserData::SetReference(StringPiece name, 509 void ActivityUserData::SetReference(StringPiece name,
382 ValueType type, 510 ValueType type,
383 const void* memory, 511 const void* memory,
384 size_t size) { 512 size_t size) {
385 ReferenceRecord rec; 513 ReferenceRecord rec;
386 rec.address = reinterpret_cast<uintptr_t>(memory); 514 rec.address = reinterpret_cast<uintptr_t>(memory);
387 rec.size = size; 515 rec.size = size;
388 Set(name, type, &rec, sizeof(rec)); 516 Set(name, type, &rec, sizeof(rec));
389 } 517 }
390 518
391 void ActivityUserData::ImportExistingData() const { 519 void ActivityUserData::ImportExistingData() const {
392 while (available_ > sizeof(Header)) { 520 while (available_ > sizeof(FieldHeader)) {
393 Header* header = reinterpret_cast<Header*>(memory_); 521 FieldHeader* header = reinterpret_cast<FieldHeader*>(memory_);
394 ValueType type = 522 ValueType type =
395 static_cast<ValueType>(header->type.load(std::memory_order_acquire)); 523 static_cast<ValueType>(header->type.load(std::memory_order_acquire));
396 if (type == END_OF_VALUES) 524 if (type == END_OF_VALUES)
397 return; 525 return;
398 if (header->record_size > available_) 526 if (header->record_size > available_)
399 return; 527 return;
400 528
401 size_t value_offset = RoundUpToAlignment(sizeof(Header) + header->name_size, 529 size_t value_offset = RoundUpToAlignment(
402 kMemoryAlignment); 530 sizeof(FieldHeader) + header->name_size, kMemoryAlignment);
403 if (header->record_size == value_offset && 531 if (header->record_size == value_offset &&
404 header->value_size.load(std::memory_order_relaxed) == 1) { 532 header->value_size.load(std::memory_order_relaxed) == 1) {
405 value_offset -= 1; 533 value_offset -= 1;
406 } 534 }
407 if (value_offset + header->value_size > header->record_size) 535 if (value_offset + header->value_size > header->record_size)
408 return; 536 return;
409 537
410 ValueInfo info; 538 ValueInfo info;
411 info.name = StringPiece(memory_ + sizeof(Header), header->name_size); 539 info.name = StringPiece(memory_ + sizeof(FieldHeader), header->name_size);
412 info.type = type; 540 info.type = type;
413 info.memory = memory_ + value_offset; 541 info.memory = memory_ + value_offset;
414 info.size_ptr = &header->value_size; 542 info.size_ptr = &header->value_size;
415 info.extent = header->record_size - value_offset; 543 info.extent = header->record_size - value_offset;
416 544
417 StringPiece key(info.name); 545 StringPiece key(info.name);
418 values_.insert(std::make_pair(key, std::move(info))); 546 values_.insert(std::make_pair(key, std::move(info)));
419 547
420 memory_ += header->record_size; 548 memory_ += header->record_size;
421 available_ -= header->record_size; 549 available_ -= header->record_size;
422 } 550 }
423 } 551 }
424 552
425 bool ActivityUserData::CreateSnapshot(Snapshot* output_snapshot) const {
426 DCHECK(output_snapshot);
427 DCHECK(output_snapshot->empty());
428
429 // Find any new data that may have been added by an active instance of this
430 // class that is adding records.
431 ImportExistingData();
432
433 for (const auto& entry : values_) {
434 TypedValue value;
435 value.type_ = entry.second.type;
436 DCHECK_GE(entry.second.extent,
437 entry.second.size_ptr->load(std::memory_order_relaxed));
438
439 switch (entry.second.type) {
440 case RAW_VALUE:
441 case STRING_VALUE:
442 value.long_value_ =
443 std::string(reinterpret_cast<char*>(entry.second.memory),
444 entry.second.size_ptr->load(std::memory_order_relaxed));
445 break;
446 case RAW_VALUE_REFERENCE:
447 case STRING_VALUE_REFERENCE: {
448 ReferenceRecord* ref =
449 reinterpret_cast<ReferenceRecord*>(entry.second.memory);
450 value.ref_value_ = StringPiece(
451 reinterpret_cast<char*>(static_cast<uintptr_t>(ref->address)),
452 static_cast<size_t>(ref->size));
453 } break;
454 case BOOL_VALUE:
455 case CHAR_VALUE:
456 value.short_value_ = *reinterpret_cast<char*>(entry.second.memory);
457 break;
458 case SIGNED_VALUE:
459 case UNSIGNED_VALUE:
460 value.short_value_ = *reinterpret_cast<uint64_t*>(entry.second.memory);
461 break;
462 case END_OF_VALUES: // Included for completeness purposes.
463 NOTREACHED();
464 }
465 auto inserted = output_snapshot->insert(
466 std::make_pair(entry.second.name.as_string(), std::move(value)));
467 DCHECK(inserted.second); // True if inserted, false if existed.
468 }
469
470 return true;
471 }
472
473 const void* ActivityUserData::GetBaseAddress() {
474 // The |memory_| pointer advances as elements are written but the |id_|
475 // value is always at the start of the block so just return that.
476 return id_;
477 }
478
479 // This information is kept for every thread that is tracked. It is filled 553 // This information is kept for every thread that is tracked. It is filled
480 // the very first time the thread is seen. All fields must be of exact sizes 554 // the very first time the thread is seen. All fields must be of exact sizes
481 // so there is no issue moving between 32 and 64-bit builds. 555 // so there is no issue moving between 32 and 64-bit builds.
482 struct ThreadActivityTracker::Header { 556 struct ThreadActivityTracker::Header {
483 // Defined in .h for analyzer access. Increment this if structure changes! 557 // Defined in .h for analyzer access. Increment this if structure changes!
484 static constexpr uint32_t kPersistentTypeId = 558 static constexpr uint32_t kPersistentTypeId =
485 GlobalActivityTracker::kTypeIdActivityTracker; 559 GlobalActivityTracker::kTypeIdActivityTracker;
486 560
487 // Expected size for 32/64-bit check. 561 // Expected size for 32/64-bit check.
488 static constexpr size_t kExpectedInstanceSize = 80; 562 static constexpr size_t kExpectedInstanceSize =
563 OwningProcess::kExpectedInstanceSize + 72;
489 564
490 // This unique number indicates a valid initialization of the memory. 565 // This information uniquely identifies a process.
491 std::atomic<uint32_t> cookie; 566 OwningProcess owner;
492 567
493 // The number of Activity slots (spaces that can hold an Activity) that 568 // The thread-id (thread_ref.as_id) to which this data belongs. This number
494 // immediately follow this structure in memory. 569 // is not guaranteed to mean anything but combined with the process-id from
495 uint32_t stack_slots; 570 // OwningProcess is unique among all active trackers.
496
497 // The process-id and thread-id (thread_ref.as_id) to which this data belongs.
498 // These identifiers are not guaranteed to mean anything but are unique, in
499 // combination, among all active trackers. It would be nice to always have
500 // the process_id be a 64-bit value but the necessity of having it atomic
501 // (for the memory barriers it provides) limits it to the natural word size
502 // of the machine.
503 #ifdef ARCH_CPU_64_BITS
504 std::atomic<int64_t> process_id;
505 #else
506 std::atomic<int32_t> process_id;
507 int32_t process_id_padding;
508 #endif
509 ThreadRef thread_ref; 571 ThreadRef thread_ref;
510 572
511 // The start-time and start-ticks when the data was created. Each activity 573 // The start-time and start-ticks when the data was created. Each activity
512 // record has a |time_internal| value that can be converted to a "wall time" 574 // record has a |time_internal| value that can be converted to a "wall time"
513 // with these two values. 575 // with these two values.
514 int64_t start_time; 576 int64_t start_time;
515 int64_t start_ticks; 577 int64_t start_ticks;
516 578
579 // The number of Activity slots (spaces that can hold an Activity) that
580 // immediately follow this structure in memory.
581 uint32_t stack_slots;
582
583 // Some padding to keep everything 64-bit aligned.
584 uint32_t padding;
585
517 // The current depth of the stack. This may be greater than the number of 586 // The current depth of the stack. This may be greater than the number of
518 // slots. If the depth exceeds the number of slots, the newest entries 587 // slots. If the depth exceeds the number of slots, the newest entries
519 // won't be recorded. 588 // won't be recorded.
520 std::atomic<uint32_t> current_depth; 589 std::atomic<uint32_t> current_depth;
521 590
522 // A memory location used to indicate if changes have been made to the stack 591 // A memory location used to indicate if changes have been made to the stack
523 // that would invalidate an in-progress read of its contents. The active 592 // that would invalidate an in-progress read of its contents. The active
524 // tracker will zero the value whenever something gets popped from the 593 // tracker will zero the value whenever something gets popped from the
525 // stack. A monitoring tracker can write a non-zero value here, copy the 594 // stack. A monitoring tracker can write a non-zero value here, copy the
526 // stack contents, and read the value to know, if it is still non-zero, that 595 // stack contents, and read the value to know, if it is still non-zero, that
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after
589 sizeof(header_->thread_ref) == sizeof(header_->thread_ref.as_id), 658 sizeof(header_->thread_ref) == sizeof(header_->thread_ref.as_id),
590 "PlatformThreadHandle::Handle is too big to hold in 64-bit ID"); 659 "PlatformThreadHandle::Handle is too big to hold in 64-bit ID");
591 660
592 // Ensure that the alignment of Activity.data is properly aligned to a 661 // Ensure that the alignment of Activity.data is properly aligned to a
593 // 64-bit boundary so there are no interoperability-issues across cpu 662 // 64-bit boundary so there are no interoperability-issues across cpu
594 // architectures. 663 // architectures.
595 static_assert(offsetof(Activity, data) % sizeof(uint64_t) == 0, 664 static_assert(offsetof(Activity, data) % sizeof(uint64_t) == 0,
596 "ActivityData.data is not 64-bit aligned"); 665 "ActivityData.data is not 64-bit aligned");
597 666
598 // Provided memory should either be completely initialized or all zeros. 667 // Provided memory should either be completely initialized or all zeros.
599 if (header_->cookie.load(std::memory_order_relaxed) == 0) { 668 if (header_->owner.data_id.load(std::memory_order_relaxed) == 0) {
600 // This is a new file. Double-check other fields and then initialize. 669 // This is a new file. Double-check other fields and then initialize.
601 DCHECK_EQ(0, header_->process_id.load(std::memory_order_relaxed)); 670 DCHECK_EQ(0, header_->owner.process_id);
671 DCHECK_EQ(0, header_->owner.create_stamp);
602 DCHECK_EQ(0, header_->thread_ref.as_id); 672 DCHECK_EQ(0, header_->thread_ref.as_id);
603 DCHECK_EQ(0, header_->start_time); 673 DCHECK_EQ(0, header_->start_time);
604 DCHECK_EQ(0, header_->start_ticks); 674 DCHECK_EQ(0, header_->start_ticks);
605 DCHECK_EQ(0U, header_->stack_slots); 675 DCHECK_EQ(0U, header_->stack_slots);
606 DCHECK_EQ(0U, header_->current_depth.load(std::memory_order_relaxed)); 676 DCHECK_EQ(0U, header_->current_depth.load(std::memory_order_relaxed));
607 DCHECK_EQ(0U, header_->stack_unchanged.load(std::memory_order_relaxed)); 677 DCHECK_EQ(0U, header_->stack_unchanged.load(std::memory_order_relaxed));
608 DCHECK_EQ(0, stack_[0].time_internal); 678 DCHECK_EQ(0, stack_[0].time_internal);
609 DCHECK_EQ(0U, stack_[0].origin_address); 679 DCHECK_EQ(0U, stack_[0].origin_address);
610 DCHECK_EQ(0U, stack_[0].call_stack[0]); 680 DCHECK_EQ(0U, stack_[0].call_stack[0]);
611 DCHECK_EQ(0U, stack_[0].data.task.sequence_id); 681 DCHECK_EQ(0U, stack_[0].data.task.sequence_id);
612 682
613 #if defined(OS_WIN) 683 #if defined(OS_WIN)
614 header_->thread_ref.as_tid = PlatformThread::CurrentId(); 684 header_->thread_ref.as_tid = PlatformThread::CurrentId();
615 #elif defined(OS_POSIX) 685 #elif defined(OS_POSIX)
616 header_->thread_ref.as_handle = 686 header_->thread_ref.as_handle =
617 PlatformThread::CurrentHandle().platform_handle(); 687 PlatformThread::CurrentHandle().platform_handle();
618 #endif 688 #endif
619 header_->process_id.store(GetCurrentProcId(), std::memory_order_relaxed);
620 689
621 header_->start_time = base::Time::Now().ToInternalValue(); 690 header_->start_time = base::Time::Now().ToInternalValue();
622 header_->start_ticks = base::TimeTicks::Now().ToInternalValue(); 691 header_->start_ticks = base::TimeTicks::Now().ToInternalValue();
623 header_->stack_slots = stack_slots_; 692 header_->stack_slots = stack_slots_;
624 strlcpy(header_->thread_name, PlatformThread::GetName(), 693 strlcpy(header_->thread_name, PlatformThread::GetName(),
625 sizeof(header_->thread_name)); 694 sizeof(header_->thread_name));
626 695
627 // This is done last so as to guarantee that everything above is "released" 696 // This is done last so as to guarantee that everything above is "released"
628 // by the time this value gets written. 697 // by the time this value gets written.
629 header_->cookie.store(kHeaderCookie, std::memory_order_release); 698 header_->owner.Release_Initialize();
630 699
631 valid_ = true; 700 valid_ = true;
632 DCHECK(IsValid()); 701 DCHECK(IsValid());
633 } else { 702 } else {
634 // This is a file with existing data. Perform basic consistency checks. 703 // This is a file with existing data. Perform basic consistency checks.
635 valid_ = true; 704 valid_ = true;
636 valid_ = IsValid(); 705 valid_ = IsValid();
637 } 706 }
638 } 707 }
639 708
(...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after
764 ActivityId id, 833 ActivityId id,
765 ActivityTrackerMemoryAllocator* allocator) { 834 ActivityTrackerMemoryAllocator* allocator) {
766 // User-data is only stored for activities actually held in the stack. 835 // User-data is only stored for activities actually held in the stack.
767 if (id < stack_slots_ && stack_[id].user_data_ref) { 836 if (id < stack_slots_ && stack_[id].user_data_ref) {
768 allocator->ReleaseObjectReference(stack_[id].user_data_ref); 837 allocator->ReleaseObjectReference(stack_[id].user_data_ref);
769 stack_[id].user_data_ref = 0; 838 stack_[id].user_data_ref = 0;
770 } 839 }
771 } 840 }
772 841
773 bool ThreadActivityTracker::IsValid() const { 842 bool ThreadActivityTracker::IsValid() const {
774 if (header_->cookie.load(std::memory_order_acquire) != kHeaderCookie || 843 if (header_->owner.data_id.load(std::memory_order_acquire) == 0 ||
775 header_->process_id.load(std::memory_order_relaxed) == 0 || 844 header_->owner.process_id == 0 || header_->thread_ref.as_id == 0 ||
776 header_->thread_ref.as_id == 0 || 845 header_->start_time == 0 || header_->start_ticks == 0 ||
777 header_->start_time == 0 ||
778 header_->start_ticks == 0 ||
779 header_->stack_slots != stack_slots_ || 846 header_->stack_slots != stack_slots_ ||
780 header_->thread_name[sizeof(header_->thread_name) - 1] != '\0') { 847 header_->thread_name[sizeof(header_->thread_name) - 1] != '\0') {
781 return false; 848 return false;
782 } 849 }
783 850
784 return valid_; 851 return valid_;
785 } 852 }
786 853
787 bool ThreadActivityTracker::CreateSnapshot(Snapshot* output_snapshot) const { 854 bool ThreadActivityTracker::CreateSnapshot(Snapshot* output_snapshot) const {
788 DCHECK(output_snapshot); 855 DCHECK(output_snapshot);
(...skipping 10 matching lines...) Expand all
799 // Stop here if the data isn't valid. 866 // Stop here if the data isn't valid.
800 if (!IsValid()) 867 if (!IsValid())
801 return false; 868 return false;
802 869
803 // Allocate the maximum size for the stack so it doesn't have to be done 870 // Allocate the maximum size for the stack so it doesn't have to be done
804 // during the time-sensitive snapshot operation. It is shrunk once the 871 // during the time-sensitive snapshot operation. It is shrunk once the
805 // actual size is known. 872 // actual size is known.
806 output_snapshot->activity_stack.reserve(stack_slots_); 873 output_snapshot->activity_stack.reserve(stack_slots_);
807 874
808 for (int attempt = 0; attempt < kMaxAttempts; ++attempt) { 875 for (int attempt = 0; attempt < kMaxAttempts; ++attempt) {
809 // Remember the process and thread IDs to ensure they aren't replaced 876 // Remember the data IDs to ensure nothing is replaced during the snapshot
810 // during the snapshot operation. Use "acquire" to ensure that all the 877 // operation. Use "acquire" so that all the non-atomic fields of the
811 // non-atomic fields of the structure are valid (at least at the current 878 // structure are valid (at least at the current moment in time).
812 // moment in time). 879 const uint32_t starting_id =
813 const int64_t starting_process_id = 880 header_->owner.data_id.load(std::memory_order_acquire);
814 header_->process_id.load(std::memory_order_acquire); 881 const int64_t starting_process_id = header_->owner.process_id;
815 const int64_t starting_thread_id = header_->thread_ref.as_id; 882 const int64_t starting_thread_id = header_->thread_ref.as_id;
816 883
817 // Write a non-zero value to |stack_unchanged| so it's possible to detect 884 // Write a non-zero value to |stack_unchanged| so it's possible to detect
818 // at the end that nothing has changed since copying the data began. A 885 // at the end that nothing has changed since copying the data began. A
819 // "cst" operation is required to ensure it occurs before everything else. 886 // "cst" operation is required to ensure it occurs before everything else.
820 // Using "cst" memory ordering is relatively expensive but this is only 887 // Using "cst" memory ordering is relatively expensive but this is only
821 // done during analysis so doesn't directly affect the worker threads. 888 // done during analysis so doesn't directly affect the worker threads.
822 header_->stack_unchanged.store(1, std::memory_order_seq_cst); 889 header_->stack_unchanged.store(1, std::memory_order_seq_cst);
823 890
824 // Fetching the current depth also "acquires" the contents of the stack. 891 // Fetching the current depth also "acquires" the contents of the stack.
825 depth = header_->current_depth.load(std::memory_order_acquire); 892 depth = header_->current_depth.load(std::memory_order_acquire);
826 uint32_t count = std::min(depth, stack_slots_); 893 uint32_t count = std::min(depth, stack_slots_);
827 output_snapshot->activity_stack.resize(count); 894 output_snapshot->activity_stack.resize(count);
828 if (count > 0) { 895 if (count > 0) {
829 // Copy the existing contents. Memcpy is used for speed. 896 // Copy the existing contents. Memcpy is used for speed.
830 memcpy(&output_snapshot->activity_stack[0], stack_, 897 memcpy(&output_snapshot->activity_stack[0], stack_,
831 count * sizeof(Activity)); 898 count * sizeof(Activity));
832 } 899 }
833 900
834 // Retry if something changed during the copy. A "cst" operation ensures 901 // Retry if something changed during the copy. A "cst" operation ensures
835 // it must happen after all the above operations. 902 // it must happen after all the above operations.
836 if (!header_->stack_unchanged.load(std::memory_order_seq_cst)) 903 if (!header_->stack_unchanged.load(std::memory_order_seq_cst))
837 continue; 904 continue;
838 905
839 // Stack copied. Record it's full depth. 906 // Stack copied. Record it's full depth.
840 output_snapshot->activity_stack_depth = depth; 907 output_snapshot->activity_stack_depth = depth;
841 908
842 // TODO(bcwhite): Snapshot other things here. 909 // TODO(bcwhite): Snapshot other things here.
843 910
844 // Get the general thread information. Loading of "process_id" is guaranteed 911 // Get the general thread information.
845 // to be last so that it's possible to detect below if any content has
846 // changed while reading it. It's technically possible for a thread to end,
847 // have its data cleared, a new thread get created with the same IDs, and
848 // it perform an action which starts tracking all in the time since the
849 // ID reads above but the chance is so unlikely that it's not worth the
850 // effort and complexity of protecting against it (perhaps with an
851 // "unchanged" field like is done for the stack).
852 output_snapshot->thread_name = 912 output_snapshot->thread_name =
853 std::string(header_->thread_name, sizeof(header_->thread_name) - 1); 913 std::string(header_->thread_name, sizeof(header_->thread_name) - 1);
854 output_snapshot->thread_id = header_->thread_ref.as_id; 914 output_snapshot->thread_id = header_->thread_ref.as_id;
855 output_snapshot->process_id = 915 output_snapshot->process_id = header_->owner.process_id;
856 header_->process_id.load(std::memory_order_seq_cst);
857 916
858 // All characters of the thread-name buffer were copied so as to not break 917 // All characters of the thread-name buffer were copied so as to not break
859 // if the trailing NUL were missing. Now limit the length if the actual 918 // if the trailing NUL were missing. Now limit the length if the actual
860 // name is shorter. 919 // name is shorter.
861 output_snapshot->thread_name.resize( 920 output_snapshot->thread_name.resize(
862 strlen(output_snapshot->thread_name.c_str())); 921 strlen(output_snapshot->thread_name.c_str()));
863 922
864 // If the process or thread ID has changed then the tracker has exited and 923 // If the data ID has changed then the tracker has exited and the memory
865 // the memory reused by a new one. Try again. 924 // reused by a new one. Try again.
866 if (output_snapshot->process_id != starting_process_id || 925 if (header_->owner.data_id.load(std::memory_order_seq_cst) != starting_id ||
926 output_snapshot->process_id != starting_process_id ||
867 output_snapshot->thread_id != starting_thread_id) { 927 output_snapshot->thread_id != starting_thread_id) {
868 continue; 928 continue;
869 } 929 }
870 930
871 // Only successful if the data is still valid once everything is done since 931 // Only successful if the data is still valid once everything is done since
872 // it's possible for the thread to end somewhere in the middle and all its 932 // it's possible for the thread to end somewhere in the middle and all its
873 // values become garbage. 933 // values become garbage.
874 if (!IsValid()) 934 if (!IsValid())
875 return false; 935 return false;
876 936
877 // Change all the timestamps in the activities from "ticks" to "wall" time. 937 // Change all the timestamps in the activities from "ticks" to "wall" time.
878 const Time start_time = Time::FromInternalValue(header_->start_time); 938 const Time start_time = Time::FromInternalValue(header_->start_time);
879 const int64_t start_ticks = header_->start_ticks; 939 const int64_t start_ticks = header_->start_ticks;
880 for (Activity& activity : output_snapshot->activity_stack) { 940 for (Activity& activity : output_snapshot->activity_stack) {
881 activity.time_internal = 941 activity.time_internal =
882 (start_time + 942 (start_time +
883 TimeDelta::FromInternalValue(activity.time_internal - start_ticks)) 943 TimeDelta::FromInternalValue(activity.time_internal - start_ticks))
884 .ToInternalValue(); 944 .ToInternalValue();
885 } 945 }
886 946
887 // Success! 947 // Success!
888 return true; 948 return true;
889 } 949 }
890 950
891 // Too many attempts. 951 // Too many attempts.
892 return false; 952 return false;
893 } 953 }
894 954
955 const void* ThreadActivityTracker::GetBaseAddress() {
956 return header_;
957 }
958
959 void ThreadActivityTracker::SetOwningProcessIdForTesting(ProcessId pid,
960 int64_t stamp) {
961 header_->owner.SetOwningProcessIdForTesting(pid, stamp);
962 }
963
964 // static
965 bool ThreadActivityTracker::GetOwningProcessId(const void* memory,
966 ProcessId* out_id,
967 int64_t* out_stamp) {
968 const Header* header = reinterpret_cast<const Header*>(memory);
969 return OwningProcess::GetOwningProcessId(&header->owner, out_id, out_stamp);
970 }
971
895 // static 972 // static
896 size_t ThreadActivityTracker::SizeForStackDepth(int stack_depth) { 973 size_t ThreadActivityTracker::SizeForStackDepth(int stack_depth) {
897 return static_cast<size_t>(stack_depth) * sizeof(Activity) + sizeof(Header); 974 return static_cast<size_t>(stack_depth) * sizeof(Activity) + sizeof(Header);
898 } 975 }
899 976
900 // The instantiation of the GlobalActivityTracker object. 977 // The instantiation of the GlobalActivityTracker object.
901 // The object held here will obviously not be destructed at process exit 978 // The object held here will obviously not be destructed at process exit
902 // but that's best since PersistentMemoryAllocator objects (that underlie 979 // but that's best since PersistentMemoryAllocator objects (that underlie
903 // GlobalActivityTracker objects) are explicitly forbidden from doing anything 980 // GlobalActivityTracker objects) are explicitly forbidden from doing anything
904 // essential at exit anyway due to the fact that they depend on data managed 981 // essential at exit anyway due to the fact that they depend on data managed
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after
972 // These fields never changes and are done before the record is made 1049 // These fields never changes and are done before the record is made
973 // iterable so no thread protection is necessary. 1050 // iterable so no thread protection is necessary.
974 size = info.size; 1051 size = info.size;
975 timestamp = info.timestamp; 1052 timestamp = info.timestamp;
976 age = info.age; 1053 age = info.age;
977 memcpy(identifier, info.identifier, sizeof(identifier)); 1054 memcpy(identifier, info.identifier, sizeof(identifier));
978 memcpy(pickle, pickler.data(), pickler.size()); 1055 memcpy(pickle, pickler.data(), pickler.size());
979 pickle_size = pickler.size(); 1056 pickle_size = pickler.size();
980 changes.store(0, std::memory_order_relaxed); 1057 changes.store(0, std::memory_order_relaxed);
981 1058
1059 // Initialize the owner info.
1060 owner.Release_Initialize();
1061
982 // Now set those fields that can change. 1062 // Now set those fields that can change.
983 return UpdateFrom(info); 1063 return UpdateFrom(info);
984 } 1064 }
985 1065
986 bool GlobalActivityTracker::ModuleInfoRecord::UpdateFrom( 1066 bool GlobalActivityTracker::ModuleInfoRecord::UpdateFrom(
987 const GlobalActivityTracker::ModuleInfo& info) { 1067 const GlobalActivityTracker::ModuleInfo& info) {
988 // Updates can occur after the record is made visible so make changes atomic. 1068 // Updates can occur after the record is made visible so make changes atomic.
989 // A "strong" exchange ensures no false failures. 1069 // A "strong" exchange ensures no false failures.
990 uint32_t old_changes = changes.load(std::memory_order_relaxed); 1070 uint32_t old_changes = changes.load(std::memory_order_relaxed);
991 uint32_t new_changes = old_changes | kModuleInformationChanging; 1071 uint32_t new_changes = old_changes | kModuleInformationChanging;
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after
1046 AutoLock lock(global->user_data_allocator_lock_); 1126 AutoLock lock(global->user_data_allocator_lock_);
1047 user_data_ = 1127 user_data_ =
1048 tracker_->GetUserData(activity_id_, &global->user_data_allocator_); 1128 tracker_->GetUserData(activity_id_, &global->user_data_allocator_);
1049 } else { 1129 } else {
1050 user_data_ = MakeUnique<ActivityUserData>(nullptr, 0); 1130 user_data_ = MakeUnique<ActivityUserData>(nullptr, 0);
1051 } 1131 }
1052 } 1132 }
1053 return *user_data_; 1133 return *user_data_;
1054 } 1134 }
1055 1135
1056 GlobalActivityTracker::GlobalUserData::GlobalUserData(void* memory, size_t size) 1136 GlobalActivityTracker::ThreadSafeUserData::ThreadSafeUserData(void* memory,
1137 size_t size)
1057 : ActivityUserData(memory, size) {} 1138 : ActivityUserData(memory, size) {}
1058 1139
1059 GlobalActivityTracker::GlobalUserData::~GlobalUserData() {} 1140 GlobalActivityTracker::ThreadSafeUserData::~ThreadSafeUserData() {}
1060 1141
1061 void GlobalActivityTracker::GlobalUserData::Set(StringPiece name, 1142 void GlobalActivityTracker::ThreadSafeUserData::Set(StringPiece name,
1062 ValueType type, 1143 ValueType type,
1063 const void* memory, 1144 const void* memory,
1064 size_t size) { 1145 size_t size) {
1065 AutoLock lock(data_lock_); 1146 AutoLock lock(data_lock_);
1066 ActivityUserData::Set(name, type, memory, size); 1147 ActivityUserData::Set(name, type, memory, size);
1067 } 1148 }
1068 1149
1069 GlobalActivityTracker::ManagedActivityTracker::ManagedActivityTracker( 1150 GlobalActivityTracker::ManagedActivityTracker::ManagedActivityTracker(
1070 PersistentMemoryAllocator::Reference mem_reference, 1151 PersistentMemoryAllocator::Reference mem_reference,
1071 void* base, 1152 void* base,
1072 size_t size) 1153 size_t size)
1073 : ThreadActivityTracker(base, size), 1154 : ThreadActivityTracker(base, size),
1074 mem_reference_(mem_reference), 1155 mem_reference_(mem_reference),
(...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after
1179 return tracker; 1260 return tracker;
1180 } 1261 }
1181 1262
1182 void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() { 1263 void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() {
1183 ThreadActivityTracker* tracker = 1264 ThreadActivityTracker* tracker =
1184 reinterpret_cast<ThreadActivityTracker*>(this_thread_tracker_.Get()); 1265 reinterpret_cast<ThreadActivityTracker*>(this_thread_tracker_.Get());
1185 if (tracker) 1266 if (tracker)
1186 delete tracker; 1267 delete tracker;
1187 } 1268 }
1188 1269
1270 void GlobalActivityTracker::SetBackgroundTaskRunner(
1271 const scoped_refptr<TaskRunner>& runner) {
1272 AutoLock lock(global_tracker_lock_);
1273 background_task_runner_ = runner;
1274 }
1275
1276 void GlobalActivityTracker::SetProcessExitCallback(
1277 ProcessExitCallback callback) {
1278 AutoLock lock(global_tracker_lock_);
1279 process_exit_callback_ = callback;
1280 }
1281
1282 void GlobalActivityTracker::RecordProcessLaunch(
1283 ProcessId process_id,
1284 const FilePath::StringType& cmd) {
1285 DCHECK_NE(GetCurrentProcId(), process_id);
1286
1287 base::AutoLock lock(global_tracker_lock_);
1288 if (base::ContainsKey(known_processes_, process_id)) {
1289 // TODO(bcwhite): Measure this in UMA.
1290 NOTREACHED() << "Process #" << process_id
1291 << " was previously recorded as \"launched\""
1292 << " with no corresponding exit.";
1293 known_processes_.erase(process_id);
1294 }
1295
1296 #if defined(OS_WIN)
1297 known_processes_.insert(std::make_pair(process_id, UTF16ToUTF8(cmd)));
1298 #else
1299 known_processes_.insert(std::make_pair(process_id, cmd));
1300 #endif
1301 }
1302
1303 void GlobalActivityTracker::RecordProcessLaunch(
1304 ProcessId process_id,
1305 const FilePath::StringType& exe,
1306 const FilePath::StringType& args) {
1307 if (exe.find(FILE_PATH_LITERAL(" "))) {
1308 RecordProcessLaunch(process_id,
1309 FilePath::StringType(FILE_PATH_LITERAL("\"")) + exe +
1310 FILE_PATH_LITERAL("\" ") + args);
1311 } else {
1312 RecordProcessLaunch(process_id, exe + FILE_PATH_LITERAL(' ') + args);
1313 }
1314 }
1315
1316 void GlobalActivityTracker::RecordProcessExit(ProcessId process_id,
1317 int exit_code) {
1318 DCHECK_NE(GetCurrentProcId(), process_id);
1319
1320 scoped_refptr<TaskRunner> task_runner;
1321 std::string command_line;
1322 {
1323 base::AutoLock lock(global_tracker_lock_);
1324 task_runner = background_task_runner_;
1325 auto found = known_processes_.find(process_id);
1326 if (found != known_processes_.end()) {
1327 command_line = std::move(found->second);
1328 known_processes_.erase(found);
1329 } else {
1330 DLOG(ERROR) << "Recording exit of unknown process #" << process_id;
1331 }
1332 }
1333
1334 // Use the current time to differentiate the process that just exited
1335 // from any that might be created in the future with the same ID.
1336 int64_t now_stamp = Time::Now().ToInternalValue();
1337
1338 // The persistent allocator is thread-safe so run the iteration and
1339 // adjustments on a worker thread if one was provided.
1340 if (task_runner && !task_runner->RunsTasksOnCurrentThread()) {
1341 task_runner->PostTask(
1342 FROM_HERE,
1343 Bind(&GlobalActivityTracker::CleanupAfterProcess, Unretained(this),
1344 process_id, now_stamp, exit_code, Passed(&command_line)));
1345 return;
1346 }
1347
1348 CleanupAfterProcess(process_id, now_stamp, exit_code,
1349 std::move(command_line));
1350 }
1351
1352 void GlobalActivityTracker::SetProcessPhase(ProcessPhase phase) {
1353 process_data().SetInt(kProcessPhaseDataKey, phase);
1354 }
1355
1356 void GlobalActivityTracker::CleanupAfterProcess(ProcessId process_id,
1357 int64_t exit_stamp,
1358 int exit_code,
1359 std::string&& command_line) {
1360 // The process may not have exited cleanly so its necessary to go through
1361 // all the data structures it may have allocated in the persistent memory
1362 // segment and mark them as "released". This will allow them to be reused
1363 // later on.
1364
1365 PersistentMemoryAllocator::Iterator iter(allocator_.get());
1366 PersistentMemoryAllocator::Reference ref;
1367
1368 ProcessExitCallback process_exit_callback;
1369 {
1370 AutoLock lock(global_tracker_lock_);
1371 process_exit_callback = process_exit_callback_;
1372 }
1373 if (process_exit_callback) {
1374 // Find the processes user-data record so the process phase can be passed
1375 // to the callback.
1376 ActivityUserData::Snapshot process_data_snapshot;
1377 while ((ref = iter.GetNextOfType(kTypeIdProcessDataRecord)) != 0) {
1378 const void* memory = allocator_->GetAsArray<char>(
1379 ref, kTypeIdProcessDataRecord, PersistentMemoryAllocator::kSizeAny);
1380 ProcessId found_id;
1381 int64_t create_stamp;
1382 if (ActivityUserData::GetOwningProcessId(memory, &found_id,
1383 &create_stamp)) {
1384 if (found_id == process_id && create_stamp < exit_stamp) {
1385 const ActivityUserData process_data(const_cast<void*>(memory),
1386 allocator_->GetAllocSize(ref));
1387 process_data.CreateSnapshot(&process_data_snapshot);
1388 break; // No need to look for any others.
1389 }
1390 }
1391 }
1392 iter.Reset(); // So it starts anew when used below.
1393
1394 // Record the process's phase at exit so callback doesn't need to go
1395 // searching based on a private key value.
1396 ProcessPhase exit_phase = PROCESS_PHASE_UNKNOWN;
1397 auto phase = process_data_snapshot.find(kProcessPhaseDataKey);
1398 if (phase != process_data_snapshot.end())
1399 exit_phase = static_cast<ProcessPhase>(phase->second.GetInt());
1400
1401 // Perform the callback.
1402 process_exit_callback.Run(process_id, exit_stamp, exit_code, exit_phase,
1403 std::move(command_line),
1404 std::move(process_data_snapshot));
1405 }
1406
1407 // Find all allocations associated with the exited process and free them.
1408 uint32_t type;
1409 while ((ref = iter.GetNext(&type)) != 0) {
1410 switch (type) {
1411 case kTypeIdActivityTracker:
1412 case kTypeIdUserDataRecord:
1413 case kTypeIdProcessDataRecord:
1414 case ModuleInfoRecord::kPersistentTypeId: {
1415 const void* memory = allocator_->GetAsArray<char>(
1416 ref, type, PersistentMemoryAllocator::kSizeAny);
1417 ProcessId found_id;
1418 int64_t create_stamp;
1419
1420 // By convention, the OwningProcess structure is always the first
1421 // field of the structure so there's no need to handle all the
1422 // cases separately.
1423 if (OwningProcess::GetOwningProcessId(memory, &found_id,
1424 &create_stamp)) {
1425 // Only change the type to be "free" if the process ID matches and
1426 // the creation time is before the exit time (so PID re-use doesn't
1427 // cause the erasure of something that is in-use). Memory is cleared
1428 // here, rather than when it's needed, so as to limit the impact at
1429 // that critical time.
1430 if (found_id == process_id && create_stamp < exit_stamp)
1431 allocator_->ChangeType(ref, ~type, type, /*clear=*/true);
1432 }
1433 } break;
1434 }
1435 }
1436 }
1437
1189 void GlobalActivityTracker::RecordLogMessage(StringPiece message) { 1438 void GlobalActivityTracker::RecordLogMessage(StringPiece message) {
1190 // Allocate at least one extra byte so the string is NUL terminated. All 1439 // Allocate at least one extra byte so the string is NUL terminated. All
1191 // memory returned by the allocator is guaranteed to be zeroed. 1440 // memory returned by the allocator is guaranteed to be zeroed.
1192 PersistentMemoryAllocator::Reference ref = 1441 PersistentMemoryAllocator::Reference ref =
1193 allocator_->Allocate(message.size() + 1, kTypeIdGlobalLogMessage); 1442 allocator_->Allocate(message.size() + 1, kTypeIdGlobalLogMessage);
1194 char* memory = allocator_->GetAsArray<char>(ref, kTypeIdGlobalLogMessage, 1443 char* memory = allocator_->GetAsArray<char>(ref, kTypeIdGlobalLogMessage,
1195 message.size() + 1); 1444 message.size() + 1);
1196 if (memory) { 1445 if (memory) {
1197 memcpy(memory, message.data(), message.size()); 1446 memcpy(memory, message.data(), message.size());
1198 allocator_->MakeIterable(ref); 1447 allocator_->MakeIterable(ref);
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
1242 kTypeIdActivityTracker, 1491 kTypeIdActivityTracker,
1243 kTypeIdActivityTrackerFree, 1492 kTypeIdActivityTrackerFree,
1244 stack_memory_size_, 1493 stack_memory_size_,
1245 kCachedThreadMemories, 1494 kCachedThreadMemories,
1246 /*make_iterable=*/true), 1495 /*make_iterable=*/true),
1247 user_data_allocator_(allocator_.get(), 1496 user_data_allocator_(allocator_.get(),
1248 kTypeIdUserDataRecord, 1497 kTypeIdUserDataRecord,
1249 kTypeIdUserDataRecordFree, 1498 kTypeIdUserDataRecordFree,
1250 kUserDataSize, 1499 kUserDataSize,
1251 kCachedUserDataMemories, 1500 kCachedUserDataMemories,
1252 /*make_iterable=*/false), 1501 /*make_iterable=*/true),
1502 process_data_(allocator_->GetAsArray<char>(
1503 AllocateFrom(allocator_.get(),
1504 kTypeIdProcessDataRecordFree,
1505 kProcessDataSize,
1506 kTypeIdProcessDataRecord),
1507 kTypeIdProcessDataRecord,
1508 kProcessDataSize),
1509 kProcessDataSize),
1253 global_data_( 1510 global_data_(
1254 allocator_->GetAsArray<char>( 1511 allocator_->GetAsArray<char>(
1255 allocator_->Allocate(kGlobalDataSize, kTypeIdGlobalDataRecord), 1512 allocator_->Allocate(kGlobalDataSize, kTypeIdGlobalDataRecord),
1256 kTypeIdGlobalDataRecord, 1513 kTypeIdGlobalDataRecord,
1257 PersistentMemoryAllocator::kSizeAny), 1514 kGlobalDataSize),
1258 kGlobalDataSize) { 1515 kGlobalDataSize) {
1259 // Ensure the passed memory is valid and empty (iterator finds nothing). 1516 // Ensure the passed memory is valid and empty (iterator finds nothing).
1260 uint32_t type; 1517 uint32_t type;
1261 DCHECK(!PersistentMemoryAllocator::Iterator(allocator_.get()).GetNext(&type)); 1518 DCHECK(!PersistentMemoryAllocator::Iterator(allocator_.get()).GetNext(&type));
1262 1519
1263 // Ensure that there is no other global object and then make this one such. 1520 // Ensure that there is no other global object and then make this one such.
1264 DCHECK(!g_tracker_); 1521 DCHECK(!g_tracker_);
1265 subtle::Release_Store(&g_tracker_, reinterpret_cast<uintptr_t>(this)); 1522 subtle::Release_Store(&g_tracker_, reinterpret_cast<uintptr_t>(this));
1266 1523
1267 // The global records must be iterable in order to be found by an analyzer. 1524 // The data records must be iterable in order to be found by an analyzer.
1525 allocator_->MakeIterable(allocator_->GetAsReference(
1526 process_data_.GetBaseAddress(), kTypeIdProcessDataRecord));
1268 allocator_->MakeIterable(allocator_->GetAsReference( 1527 allocator_->MakeIterable(allocator_->GetAsReference(
1269 global_data_.GetBaseAddress(), kTypeIdGlobalDataRecord)); 1528 global_data_.GetBaseAddress(), kTypeIdGlobalDataRecord));
1270 1529
1530 // Note that this process has launched.
1531 SetProcessPhase(PROCESS_LAUNCHED);
1532
1271 // Fetch and record all activated field trials. 1533 // Fetch and record all activated field trials.
1272 FieldTrial::ActiveGroups active_groups; 1534 FieldTrial::ActiveGroups active_groups;
1273 FieldTrialList::GetActiveFieldTrialGroups(&active_groups); 1535 FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
1274 for (auto& group : active_groups) 1536 for (auto& group : active_groups)
1275 RecordFieldTrial(group.trial_name, group.group_name); 1537 RecordFieldTrial(group.trial_name, group.group_name);
1276 } 1538 }
1277 1539
1278 GlobalActivityTracker::~GlobalActivityTracker() { 1540 GlobalActivityTracker::~GlobalActivityTracker() {
1279 DCHECK_EQ(Get(), this); 1541 DCHECK_EQ(Get(), this);
1280 DCHECK_EQ(0, thread_tracker_count_.load(std::memory_order_relaxed)); 1542 DCHECK_EQ(0, thread_tracker_count_.load(std::memory_order_relaxed));
(...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after
1380 : GlobalActivityTracker::ScopedThreadActivity( 1642 : GlobalActivityTracker::ScopedThreadActivity(
1381 program_counter, 1643 program_counter,
1382 nullptr, 1644 nullptr,
1383 Activity::ACT_PROCESS_WAIT, 1645 Activity::ACT_PROCESS_WAIT,
1384 ActivityData::ForProcess(process->Pid()), 1646 ActivityData::ForProcess(process->Pid()),
1385 /*lock_allowed=*/true) {} 1647 /*lock_allowed=*/true) {}
1386 #endif 1648 #endif
1387 1649
1388 } // namespace debug 1650 } // namespace debug
1389 } // namespace base 1651 } // namespace base
OLDNEW
« no previous file with comments | « base/debug/activity_tracker.h ('k') | base/debug/activity_tracker_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698