Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(200)

Side by Side Diff: base/debug/activity_tracker.cc

Issue 2680123003: Multi-Process Tracking Support (Closed)
Patch Set: added defined process-phase recording and process-exit callback support Created 3 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « base/debug/activity_tracker.h ('k') | base/debug/activity_tracker_unittest.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2016 The Chromium Authors. All rights reserved. 1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/debug/activity_tracker.h" 5 #include "base/debug/activity_tracker.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <limits> 8 #include <limits>
9 #include <utility> 9 #include <utility>
10 10
(...skipping 12 matching lines...) Expand all
23 #include "base/process/process_handle.h" 23 #include "base/process/process_handle.h"
24 #include "base/stl_util.h" 24 #include "base/stl_util.h"
25 #include "base/strings/string_util.h" 25 #include "base/strings/string_util.h"
26 #include "base/threading/platform_thread.h" 26 #include "base/threading/platform_thread.h"
27 27
28 namespace base { 28 namespace base {
29 namespace debug { 29 namespace debug {
30 30
31 namespace { 31 namespace {
32 32
33 // A number that identifies the memory as having been initialized. It's
34 // arbitrary but happens to be the first 4 bytes of SHA1(ThreadActivityTracker).
35 // A version number is added on so that major structure changes won't try to
36 // read an older version (since the cookie won't match).
37 const uint32_t kHeaderCookie = 0xC0029B24UL + 2; // v2
38
39 // The minimum depth a stack should support. 33 // The minimum depth a stack should support.
40 const int kMinStackDepth = 2; 34 const int kMinStackDepth = 2;
41 35
42 // The amount of memory set aside for holding arbitrary user data (key/value 36 // The amount of memory set aside for holding arbitrary user data (key/value
43 // pairs) globally or associated with ActivityData entries. 37 // pairs) globally or associated with ActivityData entries.
44 const size_t kUserDataSize = 1 << 10; // 1 KiB 38 const size_t kUserDataSize = 1 << 10; // 1 KiB
39 const size_t kProcessDataSize = 4 << 10; // 4 KiB
45 const size_t kGlobalDataSize = 16 << 10; // 16 KiB 40 const size_t kGlobalDataSize = 16 << 10; // 16 KiB
46 const size_t kMaxUserDataNameLength = 41 const size_t kMaxUserDataNameLength =
47 static_cast<size_t>(std::numeric_limits<uint8_t>::max()); 42 static_cast<size_t>(std::numeric_limits<uint8_t>::max());
48 43
49 // A constant used to indicate that module information is changing. 44 // A constant used to indicate that module information is changing.
50 const uint32_t kModuleInformationChanging = 0x80000000; 45 const uint32_t kModuleInformationChanging = 0x80000000;
51 46
47 // The key used to record process information.
48 const char kProcessPhaseDataKey[] = "process-phase";
49
50 // An atomically incrementing number, used to check for recreations of objects
51 // in the same memory space.
52 StaticAtomicSequenceNumber g_next_id;
53
52 union ThreadRef { 54 union ThreadRef {
53 int64_t as_id; 55 int64_t as_id;
54 #if defined(OS_WIN) 56 #if defined(OS_WIN)
55 // On Windows, the handle itself is often a pseudo-handle with a common 57 // On Windows, the handle itself is often a pseudo-handle with a common
56 // value meaning "this thread" and so the thread-id is used. The former 58 // value meaning "this thread" and so the thread-id is used. The former
57 // can be converted to a thread-id with a system call. 59 // can be converted to a thread-id with a system call.
58 PlatformThreadId as_tid; 60 PlatformThreadId as_tid;
59 #elif defined(OS_POSIX) 61 #elif defined(OS_POSIX)
60 // On Posix, the handle is always a unique identifier so no conversion 62 // On Posix, the handle is always a unique identifier so no conversion
61 // needs to be done. However, it's value is officially opaque so there 63 // needs to be done. However, it's value is officially opaque so there
62 // is no one correct way to convert it to a numerical identifier. 64 // is no one correct way to convert it to a numerical identifier.
63 PlatformThreadHandle::Handle as_handle; 65 PlatformThreadHandle::Handle as_handle;
64 #endif 66 #endif
65 }; 67 };
66 68
69 // Get the next non-zero identifier. It is only unique within a process.
70 uint32_t GetNextDataId() {
71 uint32_t id;
72 while ((id = g_next_id.GetNext()) == 0)
73 ;
74 return id;
75 }
76
77 // Finds and reuses a specific allocation or creates a new one.
78 PersistentMemoryAllocator::Reference AllocateFrom(
79 PersistentMemoryAllocator* allocator,
80 uint32_t from_type,
81 size_t size,
82 uint32_t to_type) {
83 PersistentMemoryAllocator::Iterator iter(allocator);
84 PersistentMemoryAllocator::Reference ref;
85 while ((ref = iter.GetNextOfType(from_type)) != 0) {
86 DCHECK_LE(size, allocator->GetAllocSize(ref));
87 // This can fail if a another thread has just taken it. It isassumed that
88 // the memory is cleared during the "free" operation.
89 if (allocator->ChangeType(ref, to_type, from_type, /*clear=*/false))
90 return ref;
91 }
92
93 return allocator->Allocate(size, to_type);
94 }
95
67 // Determines the previous aligned index. 96 // Determines the previous aligned index.
68 size_t RoundDownToAlignment(size_t index, size_t alignment) { 97 size_t RoundDownToAlignment(size_t index, size_t alignment) {
69 return index & (0 - alignment); 98 return index & (0 - alignment);
70 } 99 }
71 100
72 // Determines the next aligned index. 101 // Determines the next aligned index.
73 size_t RoundUpToAlignment(size_t index, size_t alignment) { 102 size_t RoundUpToAlignment(size_t index, size_t alignment) {
74 return (index + (alignment - 1)) & (0 - alignment); 103 return (index + (alignment - 1)) & (0 - alignment);
75 } 104 }
76 105
77 } // namespace 106 } // namespace
78 107
108 ProcessInfo::ProcessInfo() {}
109 ProcessInfo::~ProcessInfo() {}
110
111 void ProcessInfo::Release_Initialize() {
112 uint32_t old_id = data_id.load(std::memory_order_acquire);
113 DCHECK_EQ(0U, old_id);
114 process_id = GetCurrentProcId();
115 create_stamp = Time::Now().ToInternalValue();
116 data_id.store(GetNextDataId(), std::memory_order_release);
117 }
118
119 void ProcessInfo::SetOwningProcessIdForTesting(ProcessId pid, int64_t stamp) {
120 process_id = pid;
121 create_stamp = stamp;
122 }
123
124 // static
125 bool ProcessInfo::GetOwningProcessId(const void* memory,
126 ProcessId* out_id,
127 int64_t* out_stamp) {
128 const ProcessInfo* info = reinterpret_cast<const ProcessInfo*>(memory);
129 uint32_t id = info->data_id.load(std::memory_order_acquire);
130 if (id == 0)
131 return false;
132
133 *out_id = static_cast<ProcessId>(info->process_id);
134 *out_stamp = info->create_stamp;
135 return id == info->data_id.load(std::memory_order_seq_cst);
136 }
79 137
80 // It doesn't matter what is contained in this (though it will be all zeros) 138 // It doesn't matter what is contained in this (though it will be all zeros)
81 // as only the address of it is important. 139 // as only the address of it is important.
82 const ActivityData kNullActivityData = {}; 140 const ActivityData kNullActivityData = {};
83 141
84 ActivityData ActivityData::ForThread(const PlatformThreadHandle& handle) { 142 ActivityData ActivityData::ForThread(const PlatformThreadHandle& handle) {
85 ThreadRef thread_ref; 143 ThreadRef thread_ref;
86 thread_ref.as_id = 0; // Zero the union in case other is smaller. 144 thread_ref.as_id = 0; // Zero the union in case other is smaller.
87 #if defined(OS_WIN) 145 #if defined(OS_WIN)
88 thread_ref.as_tid = ::GetThreadId(handle.platform_handle()); 146 thread_ref.as_tid = ::GetThreadId(handle.platform_handle());
(...skipping 150 matching lines...) Expand 10 before | Expand all | Expand 10 after
239 StringPiece ActivityUserData::TypedValue::GetReference() const { 297 StringPiece ActivityUserData::TypedValue::GetReference() const {
240 DCHECK_EQ(RAW_VALUE_REFERENCE, type_); 298 DCHECK_EQ(RAW_VALUE_REFERENCE, type_);
241 return ref_value_; 299 return ref_value_;
242 } 300 }
243 301
244 StringPiece ActivityUserData::TypedValue::GetStringReference() const { 302 StringPiece ActivityUserData::TypedValue::GetStringReference() const {
245 DCHECK_EQ(STRING_VALUE_REFERENCE, type_); 303 DCHECK_EQ(STRING_VALUE_REFERENCE, type_);
246 return ref_value_; 304 return ref_value_;
247 } 305 }
248 306
307 // These are required because std::atomic is (currently) not a POD type and
308 // thus clang requires explicit out-of-line constructors and destructors even
309 // when they do nothing.
249 ActivityUserData::ValueInfo::ValueInfo() {} 310 ActivityUserData::ValueInfo::ValueInfo() {}
250 ActivityUserData::ValueInfo::ValueInfo(ValueInfo&&) = default; 311 ActivityUserData::ValueInfo::ValueInfo(ValueInfo&&) = default;
251 ActivityUserData::ValueInfo::~ValueInfo() {} 312 ActivityUserData::ValueInfo::~ValueInfo() {}
252 313 ActivityUserData::MemoryHeader::MemoryHeader() {}
253 StaticAtomicSequenceNumber ActivityUserData::next_id_; 314 ActivityUserData::MemoryHeader::~MemoryHeader() {}
315 ActivityUserData::FieldHeader::FieldHeader() {}
316 ActivityUserData::FieldHeader::~FieldHeader() {}
254 317
255 ActivityUserData::ActivityUserData(void* memory, size_t size) 318 ActivityUserData::ActivityUserData(void* memory, size_t size)
256 : memory_(reinterpret_cast<char*>(memory)), 319 : memory_(reinterpret_cast<char*>(memory)),
257 available_(RoundDownToAlignment(size, kMemoryAlignment)), 320 available_(RoundDownToAlignment(size, kMemoryAlignment)),
258 id_(reinterpret_cast<std::atomic<uint32_t>*>(memory)) { 321 header_(reinterpret_cast<MemoryHeader*>(memory)) {
259 // It's possible that no user data is being stored. 322 // It's possible that no user data is being stored.
260 if (!memory_) 323 if (!memory_)
261 return; 324 return;
262 325
263 DCHECK_LT(kMemoryAlignment, available_); 326 static_assert(0 == sizeof(MemoryHeader) % kMemoryAlignment, "invalid header");
264 if (id_->load(std::memory_order_relaxed) == 0) { 327 DCHECK_LT(sizeof(MemoryHeader), available_);
265 // Generate a new ID and store it in the first 32-bit word of memory_. 328 if (header_->process_info.data_id.load(std::memory_order_acquire) == 0)
266 // |id_| must be non-zero for non-sink instances. 329 header_->process_info.Release_Initialize();
267 uint32_t id; 330 memory_ += sizeof(MemoryHeader);
268 while ((id = next_id_.GetNext()) == 0) 331 available_ -= sizeof(MemoryHeader);
269 ;
270 id_->store(id, std::memory_order_relaxed);
271 DCHECK_NE(0U, id_->load(std::memory_order_relaxed));
272 }
273 memory_ += kMemoryAlignment;
274 available_ -= kMemoryAlignment;
275 332
276 // If there is already data present, load that. This allows the same class 333 // If there is already data present, load that. This allows the same class
277 // to be used for analysis through snapshots. 334 // to be used for analysis through snapshots.
278 ImportExistingData(); 335 ImportExistingData();
279 } 336 }
280 337
281 ActivityUserData::~ActivityUserData() {} 338 ActivityUserData::~ActivityUserData() {}
282 339
340 bool ActivityUserData::CreateSnapshot(Snapshot* output_snapshot) const {
341 DCHECK(output_snapshot);
342 DCHECK(output_snapshot->empty());
343
344 // Find any new data that may have been added by an active instance of this
345 // class that is adding records.
346 ImportExistingData();
347
348 for (const auto& entry : values_) {
349 TypedValue value;
350 value.type_ = entry.second.type;
351 DCHECK_GE(entry.second.extent,
352 entry.second.size_ptr->load(std::memory_order_relaxed));
353
354 switch (entry.second.type) {
355 case RAW_VALUE:
356 case STRING_VALUE:
357 value.long_value_ =
358 std::string(reinterpret_cast<char*>(entry.second.memory),
359 entry.second.size_ptr->load(std::memory_order_relaxed));
360 break;
361 case RAW_VALUE_REFERENCE:
362 case STRING_VALUE_REFERENCE: {
363 ReferenceRecord* ref =
364 reinterpret_cast<ReferenceRecord*>(entry.second.memory);
365 value.ref_value_ = StringPiece(
366 reinterpret_cast<char*>(static_cast<uintptr_t>(ref->address)),
367 static_cast<size_t>(ref->size));
368 } break;
369 case BOOL_VALUE:
370 case CHAR_VALUE:
371 value.short_value_ = *reinterpret_cast<char*>(entry.second.memory);
372 break;
373 case SIGNED_VALUE:
374 case UNSIGNED_VALUE:
375 value.short_value_ = *reinterpret_cast<uint64_t*>(entry.second.memory);
376 break;
377 case END_OF_VALUES: // Included for completeness purposes.
378 NOTREACHED();
379 }
380 auto inserted = output_snapshot->insert(
381 std::make_pair(entry.second.name.as_string(), std::move(value)));
382 DCHECK(inserted.second); // True if inserted, false if existed.
383 }
384
385 return true;
386 }
387
388 const void* ActivityUserData::GetBaseAddress() {
389 // The |memory_| pointer advances as elements are written but the |header_|
390 // value is always at the start of the block so just return that.
391 return header_;
392 }
393
394 void ActivityUserData::SetOwningProcessIdForTesting(ProcessId pid,
395 int64_t stamp) {
396 if (!header_)
397 return;
398 header_->process_info.SetOwningProcessIdForTesting(pid, stamp);
399 }
400
401 // static
402 bool ActivityUserData::GetOwningProcessId(const void* memory,
403 ProcessId* out_id,
404 int64_t* out_stamp) {
405 const MemoryHeader* header = reinterpret_cast<const MemoryHeader*>(memory);
406 return ProcessInfo::GetOwningProcessId(&header->process_info, out_id,
407 out_stamp);
408 }
409
283 void ActivityUserData::Set(StringPiece name, 410 void ActivityUserData::Set(StringPiece name,
284 ValueType type, 411 ValueType type,
285 const void* memory, 412 const void* memory,
286 size_t size) { 413 size_t size) {
287 DCHECK_GE(std::numeric_limits<uint8_t>::max(), name.length()); 414 DCHECK_GE(std::numeric_limits<uint8_t>::max(), name.length());
288 size = std::min(std::numeric_limits<uint16_t>::max() - (kMemoryAlignment - 1), 415 size = std::min(std::numeric_limits<uint16_t>::max() - (kMemoryAlignment - 1),
289 size); 416 size);
290 417
291 // It's possible that no user data is being stored. 418 // It's possible that no user data is being stored.
292 if (!memory_) 419 if (!memory_)
293 return; 420 return;
294 421
295 // The storage of a name is limited so use that limit during lookup. 422 // The storage of a name is limited so use that limit during lookup.
296 if (name.length() > kMaxUserDataNameLength) 423 if (name.length() > kMaxUserDataNameLength)
297 name.set(name.data(), kMaxUserDataNameLength); 424 name.set(name.data(), kMaxUserDataNameLength);
298 425
299 ValueInfo* info; 426 ValueInfo* info;
300 auto existing = values_.find(name); 427 auto existing = values_.find(name);
301 if (existing != values_.end()) { 428 if (existing != values_.end()) {
302 info = &existing->second; 429 info = &existing->second;
303 } else { 430 } else {
304 // The name size is limited to what can be held in a single byte but 431 // The name size is limited to what can be held in a single byte but
305 // because there are not alignment constraints on strings, it's set tight 432 // because there are not alignment constraints on strings, it's set tight
306 // against the header. Its extent (the reserved space, even if it's not 433 // against the header. Its extent (the reserved space, even if it's not
307 // all used) is calculated so that, when pressed against the header, the 434 // all used) is calculated so that, when pressed against the header, the
308 // following field will be aligned properly. 435 // following field will be aligned properly.
309 size_t name_size = name.length(); 436 size_t name_size = name.length();
310 size_t name_extent = 437 size_t name_extent =
311 RoundUpToAlignment(sizeof(Header) + name_size, kMemoryAlignment) - 438 RoundUpToAlignment(sizeof(FieldHeader) + name_size, kMemoryAlignment) -
312 sizeof(Header); 439 sizeof(FieldHeader);
313 size_t value_extent = RoundUpToAlignment(size, kMemoryAlignment); 440 size_t value_extent = RoundUpToAlignment(size, kMemoryAlignment);
314 441
315 // The "base size" is the size of the header and (padded) string key. Stop 442 // The "base size" is the size of the header and (padded) string key. Stop
316 // now if there's not room enough for even this. 443 // now if there's not room enough for even this.
317 size_t base_size = sizeof(Header) + name_extent; 444 size_t base_size = sizeof(FieldHeader) + name_extent;
318 if (base_size > available_) 445 if (base_size > available_)
319 return; 446 return;
320 447
321 // The "full size" is the size for storing the entire value. 448 // The "full size" is the size for storing the entire value.
322 size_t full_size = std::min(base_size + value_extent, available_); 449 size_t full_size = std::min(base_size + value_extent, available_);
323 450
324 // If the value is actually a single byte, see if it can be stuffed at the 451 // If the value is actually a single byte, see if it can be stuffed at the
325 // end of the name extent rather than wasting kMemoryAlignment bytes. 452 // end of the name extent rather than wasting kMemoryAlignment bytes.
326 if (size == 1 && name_extent > name_size) { 453 if (size == 1 && name_extent > name_size) {
327 full_size = base_size; 454 full_size = base_size;
328 --name_extent; 455 --name_extent;
329 --base_size; 456 --base_size;
330 } 457 }
331 458
332 // Truncate the stored size to the amount of available memory. Stop now if 459 // Truncate the stored size to the amount of available memory. Stop now if
333 // there's not any room for even part of the value. 460 // there's not any room for even part of the value.
334 size = std::min(full_size - base_size, size); 461 size = std::min(full_size - base_size, size);
335 if (size == 0) 462 if (size == 0)
336 return; 463 return;
337 464
338 // Allocate a chunk of memory. 465 // Allocate a chunk of memory.
339 Header* header = reinterpret_cast<Header*>(memory_); 466 FieldHeader* header = reinterpret_cast<FieldHeader*>(memory_);
340 memory_ += full_size; 467 memory_ += full_size;
341 available_ -= full_size; 468 available_ -= full_size;
342 469
343 // Datafill the header and name records. Memory must be zeroed. The |type| 470 // Datafill the header and name records. Memory must be zeroed. The |type|
344 // is written last, atomically, to release all the other values. 471 // is written last, atomically, to release all the other values.
manzagop (departed) 2017/02/22 20:44:16 Re: the "global" activity data, I don't think this
bcwhite 2017/02/22 22:13:04 Yeah, that's a problem. And, unfortunately, it's
manzagop (departed) 2017/02/24 15:56:35 I'd say let's ignore for now. It conceptually soun
bcwhite 2017/03/06 16:33:51 Acknowledged.
345 DCHECK_EQ(END_OF_VALUES, header->type.load(std::memory_order_relaxed)); 472 DCHECK_EQ(END_OF_VALUES, header->type.load(std::memory_order_relaxed));
346 DCHECK_EQ(0, header->value_size.load(std::memory_order_relaxed)); 473 DCHECK_EQ(0, header->value_size.load(std::memory_order_relaxed));
347 header->name_size = static_cast<uint8_t>(name_size); 474 header->name_size = static_cast<uint8_t>(name_size);
348 header->record_size = full_size; 475 header->record_size = full_size;
349 char* name_memory = reinterpret_cast<char*>(header) + sizeof(Header); 476 char* name_memory = reinterpret_cast<char*>(header) + sizeof(FieldHeader);
350 void* value_memory = 477 void* value_memory =
351 reinterpret_cast<char*>(header) + sizeof(Header) + name_extent; 478 reinterpret_cast<char*>(header) + sizeof(FieldHeader) + name_extent;
352 memcpy(name_memory, name.data(), name_size); 479 memcpy(name_memory, name.data(), name_size);
353 header->type.store(type, std::memory_order_release); 480 header->type.store(type, std::memory_order_release);
354 481
355 // Create an entry in |values_| so that this field can be found and changed 482 // Create an entry in |values_| so that this field can be found and changed
356 // later on without having to allocate new entries. 483 // later on without having to allocate new entries.
357 StringPiece persistent_name(name_memory, name_size); 484 StringPiece persistent_name(name_memory, name_size);
358 auto inserted = 485 auto inserted =
359 values_.insert(std::make_pair(persistent_name, ValueInfo())); 486 values_.insert(std::make_pair(persistent_name, ValueInfo()));
360 DCHECK(inserted.second); // True if inserted, false if existed. 487 DCHECK(inserted.second); // True if inserted, false if existed.
361 info = &inserted.first->second; 488 info = &inserted.first->second;
362 info->name = persistent_name; 489 info->name = persistent_name;
363 info->memory = value_memory; 490 info->memory = value_memory;
364 info->size_ptr = &header->value_size; 491 info->size_ptr = &header->value_size;
365 info->extent = full_size - sizeof(Header) - name_extent; 492 info->extent = full_size - sizeof(FieldHeader) - name_extent;
366 info->type = type; 493 info->type = type;
367 } 494 }
368 495
369 // Copy the value data to storage. The |size| is written last, atomically, to 496 // Copy the value data to storage. The |size| is written last, atomically, to
370 // release the copied data. Until then, a parallel reader will just ignore 497 // release the copied data. Until then, a parallel reader will just ignore
371 // records with a zero size. 498 // records with a zero size.
372 DCHECK_EQ(type, info->type); 499 DCHECK_EQ(type, info->type);
373 size = std::min(size, info->extent); 500 size = std::min(size, info->extent);
374 info->size_ptr->store(0, std::memory_order_seq_cst); 501 info->size_ptr->store(0, std::memory_order_seq_cst);
375 memcpy(info->memory, memory, size); 502 memcpy(info->memory, memory, size);
376 info->size_ptr->store(size, std::memory_order_release); 503 info->size_ptr->store(size, std::memory_order_release);
377 } 504 }
378 505
379 void ActivityUserData::SetReference(StringPiece name, 506 void ActivityUserData::SetReference(StringPiece name,
380 ValueType type, 507 ValueType type,
381 const void* memory, 508 const void* memory,
382 size_t size) { 509 size_t size) {
383 ReferenceRecord rec; 510 ReferenceRecord rec;
384 rec.address = reinterpret_cast<uintptr_t>(memory); 511 rec.address = reinterpret_cast<uintptr_t>(memory);
385 rec.size = size; 512 rec.size = size;
386 Set(name, type, &rec, sizeof(rec)); 513 Set(name, type, &rec, sizeof(rec));
387 } 514 }
388 515
389 void ActivityUserData::ImportExistingData() const { 516 void ActivityUserData::ImportExistingData() const {
390 while (available_ > sizeof(Header)) { 517 while (available_ > sizeof(FieldHeader)) {
391 Header* header = reinterpret_cast<Header*>(memory_); 518 FieldHeader* header = reinterpret_cast<FieldHeader*>(memory_);
392 ValueType type = 519 ValueType type =
393 static_cast<ValueType>(header->type.load(std::memory_order_acquire)); 520 static_cast<ValueType>(header->type.load(std::memory_order_acquire));
394 if (type == END_OF_VALUES) 521 if (type == END_OF_VALUES)
395 return; 522 return;
396 if (header->record_size > available_) 523 if (header->record_size > available_)
397 return; 524 return;
398 525
399 size_t value_offset = RoundUpToAlignment(sizeof(Header) + header->name_size, 526 size_t value_offset = RoundUpToAlignment(
400 kMemoryAlignment); 527 sizeof(FieldHeader) + header->name_size, kMemoryAlignment);
401 if (header->record_size == value_offset && 528 if (header->record_size == value_offset &&
402 header->value_size.load(std::memory_order_relaxed) == 1) { 529 header->value_size.load(std::memory_order_relaxed) == 1) {
403 value_offset -= 1; 530 value_offset -= 1;
404 } 531 }
405 if (value_offset + header->value_size > header->record_size) 532 if (value_offset + header->value_size > header->record_size)
406 return; 533 return;
407 534
408 ValueInfo info; 535 ValueInfo info;
409 info.name = StringPiece(memory_ + sizeof(Header), header->name_size); 536 info.name = StringPiece(memory_ + sizeof(FieldHeader), header->name_size);
410 info.type = type; 537 info.type = type;
411 info.memory = memory_ + value_offset; 538 info.memory = memory_ + value_offset;
412 info.size_ptr = &header->value_size; 539 info.size_ptr = &header->value_size;
413 info.extent = header->record_size - value_offset; 540 info.extent = header->record_size - value_offset;
414 541
415 StringPiece key(info.name); 542 StringPiece key(info.name);
416 values_.insert(std::make_pair(key, std::move(info))); 543 values_.insert(std::make_pair(key, std::move(info)));
417 544
418 memory_ += header->record_size; 545 memory_ += header->record_size;
419 available_ -= header->record_size; 546 available_ -= header->record_size;
420 } 547 }
421 } 548 }
422 549
423 bool ActivityUserData::CreateSnapshot(Snapshot* output_snapshot) const {
424 DCHECK(output_snapshot);
425 DCHECK(output_snapshot->empty());
426
427 // Find any new data that may have been added by an active instance of this
428 // class that is adding records.
429 ImportExistingData();
430
431 for (const auto& entry : values_) {
432 TypedValue value;
433 value.type_ = entry.second.type;
434 DCHECK_GE(entry.second.extent,
435 entry.second.size_ptr->load(std::memory_order_relaxed));
436
437 switch (entry.second.type) {
438 case RAW_VALUE:
439 case STRING_VALUE:
440 value.long_value_ =
441 std::string(reinterpret_cast<char*>(entry.second.memory),
442 entry.second.size_ptr->load(std::memory_order_relaxed));
443 break;
444 case RAW_VALUE_REFERENCE:
445 case STRING_VALUE_REFERENCE: {
446 ReferenceRecord* ref =
447 reinterpret_cast<ReferenceRecord*>(entry.second.memory);
448 value.ref_value_ = StringPiece(
449 reinterpret_cast<char*>(static_cast<uintptr_t>(ref->address)),
450 static_cast<size_t>(ref->size));
451 } break;
452 case BOOL_VALUE:
453 case CHAR_VALUE:
454 value.short_value_ = *reinterpret_cast<char*>(entry.second.memory);
455 break;
456 case SIGNED_VALUE:
457 case UNSIGNED_VALUE:
458 value.short_value_ = *reinterpret_cast<uint64_t*>(entry.second.memory);
459 break;
460 case END_OF_VALUES: // Included for completeness purposes.
461 NOTREACHED();
462 }
463 auto inserted = output_snapshot->insert(
464 std::make_pair(entry.second.name.as_string(), std::move(value)));
465 DCHECK(inserted.second); // True if inserted, false if existed.
466 }
467
468 return true;
469 }
470
471 const void* ActivityUserData::GetBaseAddress() {
472 // The |memory_| pointer advances as elements are written but the |id_|
473 // value is always at the start of the block so just return that.
474 return id_;
475 }
476
477 // This information is kept for every thread that is tracked. It is filled 550 // This information is kept for every thread that is tracked. It is filled
478 // the very first time the thread is seen. All fields must be of exact sizes 551 // the very first time the thread is seen. All fields must be of exact sizes
479 // so there is no issue moving between 32 and 64-bit builds. 552 // so there is no issue moving between 32 and 64-bit builds.
480 struct ThreadActivityTracker::Header { 553 struct ThreadActivityTracker::Header {
481 // Defined in .h for analyzer access. Increment this if structure changes! 554 // Defined in .h for analyzer access. Increment this if structure changes!
482 static constexpr uint32_t kPersistentTypeId = 555 static constexpr uint32_t kPersistentTypeId =
483 GlobalActivityTracker::kTypeIdActivityTracker; 556 GlobalActivityTracker::kTypeIdActivityTracker;
484 557
485 // Expected size for 32/64-bit check. 558 // Expected size for 32/64-bit check.
486 static constexpr size_t kExpectedInstanceSize = 80; 559 static constexpr size_t kExpectedInstanceSize =
560 ProcessInfo::kExpectedInstanceSize + 72;
487 561
488 // This unique number indicates a valid initialization of the memory. 562 // This information uniquely identifies a process.
489 std::atomic<uint32_t> cookie; 563 ProcessInfo process_info;
490 564
491 // The number of Activity slots (spaces that can hold an Activity) that 565 // The thread-id (thread_ref.as_id) to which this data belongs. This number
492 // immediately follow this structure in memory. 566 // is not guaranteed to mean anything but combined with the process-id from
493 uint32_t stack_slots; 567 // ProcessInfo is unique among all active trackers.
494
495 // The process-id and thread-id (thread_ref.as_id) to which this data belongs.
496 // These identifiers are not guaranteed to mean anything but are unique, in
497 // combination, among all active trackers. It would be nice to always have
498 // the process_id be a 64-bit value but the necessity of having it atomic
499 // (for the memory barriers it provides) limits it to the natural word size
500 // of the machine.
501 #ifdef ARCH_CPU_64_BITS
502 std::atomic<int64_t> process_id;
503 #else
504 std::atomic<int32_t> process_id;
505 int32_t process_id_padding;
506 #endif
507 ThreadRef thread_ref; 568 ThreadRef thread_ref;
508 569
509 // The start-time and start-ticks when the data was created. Each activity 570 // The start-time and start-ticks when the data was created. Each activity
510 // record has a |time_internal| value that can be converted to a "wall time" 571 // record has a |time_internal| value that can be converted to a "wall time"
511 // with these two values. 572 // with these two values.
512 int64_t start_time; 573 int64_t start_time;
513 int64_t start_ticks; 574 int64_t start_ticks;
514 575
576 // The number of Activity slots (spaces that can hold an Activity) that
577 // immediately follow this structure in memory.
578 uint32_t stack_slots;
579
580 // Some padding to keep everything 64-bit aligned.
581 uint32_t padding;
582
515 // The current depth of the stack. This may be greater than the number of 583 // The current depth of the stack. This may be greater than the number of
516 // slots. If the depth exceeds the number of slots, the newest entries 584 // slots. If the depth exceeds the number of slots, the newest entries
517 // won't be recorded. 585 // won't be recorded.
518 std::atomic<uint32_t> current_depth; 586 std::atomic<uint32_t> current_depth;
519 587
520 // A memory location used to indicate if changes have been made to the stack 588 // A memory location used to indicate if changes have been made to the stack
521 // that would invalidate an in-progress read of its contents. The active 589 // that would invalidate an in-progress read of its contents. The active
522 // tracker will zero the value whenever something gets popped from the 590 // tracker will zero the value whenever something gets popped from the
523 // stack. A monitoring tracker can write a non-zero value here, copy the 591 // stack. A monitoring tracker can write a non-zero value here, copy the
524 // stack contents, and read the value to know, if it is still non-zero, that 592 // stack contents, and read the value to know, if it is still non-zero, that
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after
587 sizeof(header_->thread_ref) == sizeof(header_->thread_ref.as_id), 655 sizeof(header_->thread_ref) == sizeof(header_->thread_ref.as_id),
588 "PlatformThreadHandle::Handle is too big to hold in 64-bit ID"); 656 "PlatformThreadHandle::Handle is too big to hold in 64-bit ID");
589 657
590 // Ensure that the alignment of Activity.data is properly aligned to a 658 // Ensure that the alignment of Activity.data is properly aligned to a
591 // 64-bit boundary so there are no interoperability-issues across cpu 659 // 64-bit boundary so there are no interoperability-issues across cpu
592 // architectures. 660 // architectures.
593 static_assert(offsetof(Activity, data) % sizeof(uint64_t) == 0, 661 static_assert(offsetof(Activity, data) % sizeof(uint64_t) == 0,
594 "ActivityData.data is not 64-bit aligned"); 662 "ActivityData.data is not 64-bit aligned");
595 663
596 // Provided memory should either be completely initialized or all zeros. 664 // Provided memory should either be completely initialized or all zeros.
597 if (header_->cookie.load(std::memory_order_relaxed) == 0) { 665 if (header_->process_info.data_id.load(std::memory_order_relaxed) == 0) {
598 // This is a new file. Double-check other fields and then initialize. 666 // This is a new file. Double-check other fields and then initialize.
599 DCHECK_EQ(0, header_->process_id.load(std::memory_order_relaxed)); 667 DCHECK_EQ(0, header_->process_info.process_id);
668 DCHECK_EQ(0, header_->process_info.create_stamp);
600 DCHECK_EQ(0, header_->thread_ref.as_id); 669 DCHECK_EQ(0, header_->thread_ref.as_id);
601 DCHECK_EQ(0, header_->start_time); 670 DCHECK_EQ(0, header_->start_time);
602 DCHECK_EQ(0, header_->start_ticks); 671 DCHECK_EQ(0, header_->start_ticks);
603 DCHECK_EQ(0U, header_->stack_slots); 672 DCHECK_EQ(0U, header_->stack_slots);
604 DCHECK_EQ(0U, header_->current_depth.load(std::memory_order_relaxed)); 673 DCHECK_EQ(0U, header_->current_depth.load(std::memory_order_relaxed));
605 DCHECK_EQ(0U, header_->stack_unchanged.load(std::memory_order_relaxed)); 674 DCHECK_EQ(0U, header_->stack_unchanged.load(std::memory_order_relaxed));
606 DCHECK_EQ(0, stack_[0].time_internal); 675 DCHECK_EQ(0, stack_[0].time_internal);
607 DCHECK_EQ(0U, stack_[0].origin_address); 676 DCHECK_EQ(0U, stack_[0].origin_address);
608 DCHECK_EQ(0U, stack_[0].call_stack[0]); 677 DCHECK_EQ(0U, stack_[0].call_stack[0]);
609 DCHECK_EQ(0U, stack_[0].data.task.sequence_id); 678 DCHECK_EQ(0U, stack_[0].data.task.sequence_id);
610 679
611 #if defined(OS_WIN) 680 #if defined(OS_WIN)
612 header_->thread_ref.as_tid = PlatformThread::CurrentId(); 681 header_->thread_ref.as_tid = PlatformThread::CurrentId();
613 #elif defined(OS_POSIX) 682 #elif defined(OS_POSIX)
614 header_->thread_ref.as_handle = 683 header_->thread_ref.as_handle =
615 PlatformThread::CurrentHandle().platform_handle(); 684 PlatformThread::CurrentHandle().platform_handle();
616 #endif 685 #endif
617 header_->process_id.store(GetCurrentProcId(), std::memory_order_relaxed);
618 686
619 header_->start_time = base::Time::Now().ToInternalValue(); 687 header_->start_time = base::Time::Now().ToInternalValue();
620 header_->start_ticks = base::TimeTicks::Now().ToInternalValue(); 688 header_->start_ticks = base::TimeTicks::Now().ToInternalValue();
621 header_->stack_slots = stack_slots_; 689 header_->stack_slots = stack_slots_;
622 strlcpy(header_->thread_name, PlatformThread::GetName(), 690 strlcpy(header_->thread_name, PlatformThread::GetName(),
623 sizeof(header_->thread_name)); 691 sizeof(header_->thread_name));
624 692
625 // This is done last so as to guarantee that everything above is "released" 693 // This is done last so as to guarantee that everything above is "released"
626 // by the time this value gets written. 694 // by the time this value gets written.
627 header_->cookie.store(kHeaderCookie, std::memory_order_release); 695 header_->process_info.Release_Initialize();
628 696
629 valid_ = true; 697 valid_ = true;
630 DCHECK(IsValid()); 698 DCHECK(IsValid());
631 } else { 699 } else {
632 // This is a file with existing data. Perform basic consistency checks. 700 // This is a file with existing data. Perform basic consistency checks.
633 valid_ = true; 701 valid_ = true;
634 valid_ = IsValid(); 702 valid_ = IsValid();
635 } 703 }
636 } 704 }
637 705
(...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after
762 ActivityId id, 830 ActivityId id,
763 ActivityTrackerMemoryAllocator* allocator) { 831 ActivityTrackerMemoryAllocator* allocator) {
764 // User-data is only stored for activities actually held in the stack. 832 // User-data is only stored for activities actually held in the stack.
765 if (id < stack_slots_ && stack_[id].user_data_ref) { 833 if (id < stack_slots_ && stack_[id].user_data_ref) {
766 allocator->ReleaseObjectReference(stack_[id].user_data_ref); 834 allocator->ReleaseObjectReference(stack_[id].user_data_ref);
767 stack_[id].user_data_ref = 0; 835 stack_[id].user_data_ref = 0;
768 } 836 }
769 } 837 }
770 838
771 bool ThreadActivityTracker::IsValid() const { 839 bool ThreadActivityTracker::IsValid() const {
772 if (header_->cookie.load(std::memory_order_acquire) != kHeaderCookie || 840 if (header_->process_info.data_id.load(std::memory_order_acquire) == 0 ||
773 header_->process_id.load(std::memory_order_relaxed) == 0 || 841 header_->process_info.process_id == 0 || header_->thread_ref.as_id == 0 ||
774 header_->thread_ref.as_id == 0 || 842 header_->start_time == 0 || header_->start_ticks == 0 ||
775 header_->start_time == 0 ||
776 header_->start_ticks == 0 ||
777 header_->stack_slots != stack_slots_ || 843 header_->stack_slots != stack_slots_ ||
778 header_->thread_name[sizeof(header_->thread_name) - 1] != '\0') { 844 header_->thread_name[sizeof(header_->thread_name) - 1] != '\0') {
779 return false; 845 return false;
780 } 846 }
781 847
782 return valid_; 848 return valid_;
783 } 849 }
784 850
785 bool ThreadActivityTracker::CreateSnapshot(Snapshot* output_snapshot) const { 851 bool ThreadActivityTracker::CreateSnapshot(Snapshot* output_snapshot) const {
786 DCHECK(output_snapshot); 852 DCHECK(output_snapshot);
(...skipping 10 matching lines...) Expand all
797 // Stop here if the data isn't valid. 863 // Stop here if the data isn't valid.
798 if (!IsValid()) 864 if (!IsValid())
799 return false; 865 return false;
800 866
801 // Allocate the maximum size for the stack so it doesn't have to be done 867 // Allocate the maximum size for the stack so it doesn't have to be done
802 // during the time-sensitive snapshot operation. It is shrunk once the 868 // during the time-sensitive snapshot operation. It is shrunk once the
803 // actual size is known. 869 // actual size is known.
804 output_snapshot->activity_stack.reserve(stack_slots_); 870 output_snapshot->activity_stack.reserve(stack_slots_);
805 871
806 for (int attempt = 0; attempt < kMaxAttempts; ++attempt) { 872 for (int attempt = 0; attempt < kMaxAttempts; ++attempt) {
807 // Remember the process and thread IDs to ensure they aren't replaced 873 // Remember the data IDs to ensure nothing is replaced during the snapshot
808 // during the snapshot operation. Use "acquire" to ensure that all the 874 // operation. Use "acquire" so that all the non-atomic fields of the
809 // non-atomic fields of the structure are valid (at least at the current 875 // structure are valid (at least at the current moment in time).
810 // moment in time). 876 const uint32_t starting_id =
811 const int64_t starting_process_id = 877 header_->process_info.data_id.load(std::memory_order_acquire);
812 header_->process_id.load(std::memory_order_acquire); 878 const int64_t starting_process_id = header_->process_info.process_id;
813 const int64_t starting_thread_id = header_->thread_ref.as_id; 879 const int64_t starting_thread_id = header_->thread_ref.as_id;
814 880
815 // Write a non-zero value to |stack_unchanged| so it's possible to detect 881 // Write a non-zero value to |stack_unchanged| so it's possible to detect
816 // at the end that nothing has changed since copying the data began. A 882 // at the end that nothing has changed since copying the data began. A
817 // "cst" operation is required to ensure it occurs before everything else. 883 // "cst" operation is required to ensure it occurs before everything else.
818 // Using "cst" memory ordering is relatively expensive but this is only 884 // Using "cst" memory ordering is relatively expensive but this is only
819 // done during analysis so doesn't directly affect the worker threads. 885 // done during analysis so doesn't directly affect the worker threads.
820 header_->stack_unchanged.store(1, std::memory_order_seq_cst); 886 header_->stack_unchanged.store(1, std::memory_order_seq_cst);
821 887
822 // Fetching the current depth also "acquires" the contents of the stack. 888 // Fetching the current depth also "acquires" the contents of the stack.
823 depth = header_->current_depth.load(std::memory_order_acquire); 889 depth = header_->current_depth.load(std::memory_order_acquire);
824 uint32_t count = std::min(depth, stack_slots_); 890 uint32_t count = std::min(depth, stack_slots_);
825 output_snapshot->activity_stack.resize(count); 891 output_snapshot->activity_stack.resize(count);
826 if (count > 0) { 892 if (count > 0) {
827 // Copy the existing contents. Memcpy is used for speed. 893 // Copy the existing contents. Memcpy is used for speed.
828 memcpy(&output_snapshot->activity_stack[0], stack_, 894 memcpy(&output_snapshot->activity_stack[0], stack_,
829 count * sizeof(Activity)); 895 count * sizeof(Activity));
830 } 896 }
831 897
832 // Retry if something changed during the copy. A "cst" operation ensures 898 // Retry if something changed during the copy. A "cst" operation ensures
833 // it must happen after all the above operations. 899 // it must happen after all the above operations.
834 if (!header_->stack_unchanged.load(std::memory_order_seq_cst)) 900 if (!header_->stack_unchanged.load(std::memory_order_seq_cst))
835 continue; 901 continue;
836 902
837 // Stack copied. Record it's full depth. 903 // Stack copied. Record it's full depth.
838 output_snapshot->activity_stack_depth = depth; 904 output_snapshot->activity_stack_depth = depth;
839 905
840 // TODO(bcwhite): Snapshot other things here. 906 // TODO(bcwhite): Snapshot other things here.
841 907
842 // Get the general thread information. Loading of "process_id" is guaranteed 908 // Get the general thread information. Loading of "process_id" is guaranteed
manzagop (departed) 2017/02/22 20:44:16 Update comment about motivation for process_id bei
bcwhite 2017/02/22 22:13:04 Done.
843 // to be last so that it's possible to detect below if any content has 909 // to be last so that it's possible to detect below if any content has
844 // changed while reading it. It's technically possible for a thread to end, 910 // changed while reading it. It's technically possible for a thread to end,
845 // have its data cleared, a new thread get created with the same IDs, and 911 // have its data cleared, a new thread get created with the same IDs, and
846 // it perform an action which starts tracking all in the time since the 912 // it perform an action which starts tracking all in the time since the
847 // ID reads above but the chance is so unlikely that it's not worth the 913 // ID reads above but the chance is so unlikely that it's not worth the
848 // effort and complexity of protecting against it (perhaps with an 914 // effort and complexity of protecting against it (perhaps with an
849 // "unchanged" field like is done for the stack). 915 // "unchanged" field like is done for the stack).
850 output_snapshot->thread_name = 916 output_snapshot->thread_name =
851 std::string(header_->thread_name, sizeof(header_->thread_name) - 1); 917 std::string(header_->thread_name, sizeof(header_->thread_name) - 1);
852 output_snapshot->thread_id = header_->thread_ref.as_id; 918 output_snapshot->thread_id = header_->thread_ref.as_id;
853 output_snapshot->process_id = 919 output_snapshot->process_id = header_->process_info.process_id;
854 header_->process_id.load(std::memory_order_seq_cst);
855 920
856 // All characters of the thread-name buffer were copied so as to not break 921 // All characters of the thread-name buffer were copied so as to not break
857 // if the trailing NUL were missing. Now limit the length if the actual 922 // if the trailing NUL were missing. Now limit the length if the actual
858 // name is shorter. 923 // name is shorter.
859 output_snapshot->thread_name.resize( 924 output_snapshot->thread_name.resize(
860 strlen(output_snapshot->thread_name.c_str())); 925 strlen(output_snapshot->thread_name.c_str()));
861 926
862 // If the process or thread ID has changed then the tracker has exited and 927 // If the data ID has changed then the tracker has exited and the memory
manzagop (departed) 2017/02/22 20:44:16 Same question about the freeing operation: do we n
bcwhite 2017/02/22 22:13:04 Acknowledged.
863 // the memory reused by a new one. Try again. 928 // reused by a new one. Try again.
864 if (output_snapshot->process_id != starting_process_id || 929 if (header_->process_info.data_id.load(std::memory_order_seq_cst) !=
930 starting_id ||
931 output_snapshot->process_id != starting_process_id ||
865 output_snapshot->thread_id != starting_thread_id) { 932 output_snapshot->thread_id != starting_thread_id) {
866 continue; 933 continue;
867 } 934 }
868 935
869 // Only successful if the data is still valid once everything is done since 936 // Only successful if the data is still valid once everything is done since
870 // it's possible for the thread to end somewhere in the middle and all its 937 // it's possible for the thread to end somewhere in the middle and all its
871 // values become garbage. 938 // values become garbage.
872 if (!IsValid()) 939 if (!IsValid())
873 return false; 940 return false;
874 941
875 // Change all the timestamps in the activities from "ticks" to "wall" time. 942 // Change all the timestamps in the activities from "ticks" to "wall" time.
876 const Time start_time = Time::FromInternalValue(header_->start_time); 943 const Time start_time = Time::FromInternalValue(header_->start_time);
877 const int64_t start_ticks = header_->start_ticks; 944 const int64_t start_ticks = header_->start_ticks;
878 for (Activity& activity : output_snapshot->activity_stack) { 945 for (Activity& activity : output_snapshot->activity_stack) {
879 activity.time_internal = 946 activity.time_internal =
880 (start_time + 947 (start_time +
881 TimeDelta::FromInternalValue(activity.time_internal - start_ticks)) 948 TimeDelta::FromInternalValue(activity.time_internal - start_ticks))
882 .ToInternalValue(); 949 .ToInternalValue();
883 } 950 }
884 951
885 // Success! 952 // Success!
886 return true; 953 return true;
887 } 954 }
888 955
889 // Too many attempts. 956 // Too many attempts.
890 return false; 957 return false;
891 } 958 }
892 959
960 const void* ThreadActivityTracker::GetBaseAddress() {
961 return header_;
962 }
963
964 void ThreadActivityTracker::SetOwningProcessIdForTesting(ProcessId pid,
965 int64_t stamp) {
966 header_->process_info.SetOwningProcessIdForTesting(pid, stamp);
967 }
968
969 // static
970 bool ThreadActivityTracker::GetOwningProcessId(const void* memory,
971 ProcessId* out_id,
972 int64_t* out_stamp) {
973 const Header* header = reinterpret_cast<const Header*>(memory);
974 return ProcessInfo::GetOwningProcessId(&header->process_info, out_id,
975 out_stamp);
976 }
977
893 // static 978 // static
894 size_t ThreadActivityTracker::SizeForStackDepth(int stack_depth) { 979 size_t ThreadActivityTracker::SizeForStackDepth(int stack_depth) {
895 return static_cast<size_t>(stack_depth) * sizeof(Activity) + sizeof(Header); 980 return static_cast<size_t>(stack_depth) * sizeof(Activity) + sizeof(Header);
896 } 981 }
897 982
898 // The instantiation of the GlobalActivityTracker object. 983 // The instantiation of the GlobalActivityTracker object.
899 // The object held here will obviously not be destructed at process exit 984 // The object held here will obviously not be destructed at process exit
900 // but that's best since PersistentMemoryAllocator objects (that underlie 985 // but that's best since PersistentMemoryAllocator objects (that underlie
901 // GlobalActivityTracker objects) are explicitly forbidden from doing anything 986 // GlobalActivityTracker objects) are explicitly forbidden from doing anything
902 // essential at exit anyway due to the fact that they depend on data managed 987 // essential at exit anyway due to the fact that they depend on data managed
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after
970 // These fields never changes and are done before the record is made 1055 // These fields never changes and are done before the record is made
971 // iterable so no thread protection is necessary. 1056 // iterable so no thread protection is necessary.
972 size = info.size; 1057 size = info.size;
973 timestamp = info.timestamp; 1058 timestamp = info.timestamp;
974 age = info.age; 1059 age = info.age;
975 memcpy(identifier, info.identifier, sizeof(identifier)); 1060 memcpy(identifier, info.identifier, sizeof(identifier));
976 memcpy(pickle, pickler.data(), pickler.size()); 1061 memcpy(pickle, pickler.data(), pickler.size());
977 pickle_size = pickler.size(); 1062 pickle_size = pickler.size();
978 changes.store(0, std::memory_order_relaxed); 1063 changes.store(0, std::memory_order_relaxed);
979 1064
1065 // Initialize the process info.
1066 process_info.Release_Initialize();
1067
980 // Now set those fields that can change. 1068 // Now set those fields that can change.
981 return UpdateFrom(info); 1069 return UpdateFrom(info);
982 } 1070 }
983 1071
984 bool GlobalActivityTracker::ModuleInfoRecord::UpdateFrom( 1072 bool GlobalActivityTracker::ModuleInfoRecord::UpdateFrom(
985 const GlobalActivityTracker::ModuleInfo& info) { 1073 const GlobalActivityTracker::ModuleInfo& info) {
986 // Updates can occur after the record is made visible so make changes atomic. 1074 // Updates can occur after the record is made visible so make changes atomic.
987 // A "strong" exchange ensures no false failures. 1075 // A "strong" exchange ensures no false failures.
988 uint32_t old_changes = changes.load(std::memory_order_relaxed); 1076 uint32_t old_changes = changes.load(std::memory_order_relaxed);
989 uint32_t new_changes = old_changes | kModuleInformationChanging; 1077 uint32_t new_changes = old_changes | kModuleInformationChanging;
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after
1044 AutoLock lock(global->user_data_allocator_lock_); 1132 AutoLock lock(global->user_data_allocator_lock_);
1045 user_data_ = 1133 user_data_ =
1046 tracker_->GetUserData(activity_id_, &global->user_data_allocator_); 1134 tracker_->GetUserData(activity_id_, &global->user_data_allocator_);
1047 } else { 1135 } else {
1048 user_data_ = MakeUnique<ActivityUserData>(nullptr, 0); 1136 user_data_ = MakeUnique<ActivityUserData>(nullptr, 0);
1049 } 1137 }
1050 } 1138 }
1051 return *user_data_; 1139 return *user_data_;
1052 } 1140 }
1053 1141
1054 GlobalActivityTracker::GlobalUserData::GlobalUserData(void* memory, size_t size) 1142 GlobalActivityTracker::ThreadSafeUserData::ThreadSafeUserData(void* memory,
1143 size_t size)
1055 : ActivityUserData(memory, size) {} 1144 : ActivityUserData(memory, size) {}
1056 1145
1057 GlobalActivityTracker::GlobalUserData::~GlobalUserData() {} 1146 GlobalActivityTracker::ThreadSafeUserData::~ThreadSafeUserData() {}
1058 1147
1059 void GlobalActivityTracker::GlobalUserData::Set(StringPiece name, 1148 void GlobalActivityTracker::ThreadSafeUserData::Set(StringPiece name,
1060 ValueType type, 1149 ValueType type,
1061 const void* memory, 1150 const void* memory,
1062 size_t size) { 1151 size_t size) {
1063 AutoLock lock(data_lock_); 1152 AutoLock lock(data_lock_);
1064 ActivityUserData::Set(name, type, memory, size); 1153 ActivityUserData::Set(name, type, memory, size);
1065 } 1154 }
1066 1155
1067 GlobalActivityTracker::ManagedActivityTracker::ManagedActivityTracker( 1156 GlobalActivityTracker::ManagedActivityTracker::ManagedActivityTracker(
1068 PersistentMemoryAllocator::Reference mem_reference, 1157 PersistentMemoryAllocator::Reference mem_reference,
1069 void* base, 1158 void* base,
1070 size_t size) 1159 size_t size)
1071 : ThreadActivityTracker(base, size), 1160 : ThreadActivityTracker(base, size),
1072 mem_reference_(mem_reference), 1161 mem_reference_(mem_reference),
(...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after
1177 return tracker; 1266 return tracker;
1178 } 1267 }
1179 1268
1180 void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() { 1269 void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() {
1181 ThreadActivityTracker* tracker = 1270 ThreadActivityTracker* tracker =
1182 reinterpret_cast<ThreadActivityTracker*>(this_thread_tracker_.Get()); 1271 reinterpret_cast<ThreadActivityTracker*>(this_thread_tracker_.Get());
1183 if (tracker) 1272 if (tracker)
1184 delete tracker; 1273 delete tracker;
1185 } 1274 }
1186 1275
1276 void GlobalActivityTracker::SetBackgroundTaskRunner(
1277 const scoped_refptr<TaskRunner>& runner) {
1278 AutoLock lock(global_tracker_lock_);
1279 background_task_runner_ = runner;
1280 }
1281
1282 void GlobalActivityTracker::SetProcessExitCallback(
1283 ProcessExitCallback callback) {
1284 AutoLock lock(global_tracker_lock_);
1285 process_exit_callback_ = callback;
1286 }
1287
1288 void GlobalActivityTracker::RecordProcessLaunch(ProcessId process_id) {
1289 DCHECK_NE(GetCurrentProcId(), process_id);
1290
1291 base::AutoLock lock(global_tracker_lock_);
1292 DCHECK(!base::ContainsKey(known_processes_, process_id));
1293 known_processes_.insert(process_id);
1294 }
1295
1296 void GlobalActivityTracker::RecordProcessExit(ProcessId process_id,
1297 int exit_code) {
1298 DCHECK_NE(GetCurrentProcId(), process_id);
1299
1300 scoped_refptr<TaskRunner> task_runner;
1301 {
1302 base::AutoLock lock(global_tracker_lock_);
1303 task_runner = background_task_runner_;
1304 auto found = known_processes_.find(process_id);
1305 if (found != known_processes_.end())
1306 known_processes_.erase(found);
1307 else
1308 DLOG(ERROR) << "Recording exit of unknown process #" << process_id;
1309 }
1310
1311 int64_t now_stamp = Time::Now().ToInternalValue();
1312
1313 // The persistent allocator is thread-safe so run the iteration and
1314 // adjustments on a worker thread if one was provided.
1315 if (task_runner && !task_runner->RunsTasksOnCurrentThread()) {
1316 task_runner->PostTask(
1317 FROM_HERE, Bind(&GlobalActivityTracker::CleanupAfterProcess,
1318 Unretained(this), process_id, now_stamp, exit_code));
1319 return;
1320 }
1321
1322 CleanupAfterProcess(process_id, now_stamp, exit_code);
manzagop (departed) 2017/02/22 20:44:15 Same as other comment, pid recycling may be an iss
bcwhite 2017/02/22 22:13:04 In this case, any new process will have a "create_
1323 }
1324
1325 void GlobalActivityTracker::SetProcessPhase(ProcessPhase phase) {
1326 process_data().SetInt(kProcessPhaseDataKey, phase);
1327 }
1328
1329 void GlobalActivityTracker::CleanupAfterProcess(ProcessId process_id,
1330 int64_t exit_stamp,
1331 int exit_code) {
1332 // The process may not have exited cleanly so its necessary to go through
1333 // all the data structures it may have allocated in the persistent memory
1334 // segment and mark them as "released". This will allow them to be reused
1335 // later on.
1336
1337 PersistentMemoryAllocator::Iterator iter(allocator_.get());
1338 PersistentMemoryAllocator::Reference ref;
1339
1340 ProcessExitCallback process_exit_callback;
1341 {
1342 AutoLock lock(global_tracker_lock_);
1343 process_exit_callback = process_exit_callback_;
1344 }
1345 if (process_exit_callback) {
1346 // Find the processes user-data record so the process phase can be passed
1347 // to the callback.
1348 ActivityUserData::Snapshot process_data_snapshot;
1349 while ((ref = iter.GetNextOfType(kTypeIdProcessDataRecord)) != 0) {
1350 const void* memory = allocator_->GetAsArray<char>(
1351 ref, kTypeIdProcessDataRecord, PersistentMemoryAllocator::kSizeAny);
1352 ProcessId found_id;
1353 int64_t create_stamp;
1354 if (ActivityUserData::GetOwningProcessId(memory, &found_id,
1355 &create_stamp)) {
1356 if (found_id == process_id && create_stamp < exit_stamp) {
1357 const ActivityUserData process_data(const_cast<void*>(memory),
1358 allocator_->GetAllocSize(ref));
1359 process_data.CreateSnapshot(&process_data_snapshot);
1360 break; // No need to look for any others.
1361 }
1362 }
1363 }
1364 iter.Reset(); // So it starts anew when used below.
1365
1366 // Record the process's phase at exit so callback doesn't need to go
1367 // searching
1368 // based on a private key value.
1369 ProcessPhase exit_phase = PROCESS_PHASE_UNKNOWN;
1370 auto phase = process_data_snapshot.find(kProcessPhaseDataKey);
1371 if (phase != process_data_snapshot.end())
1372 exit_phase = static_cast<ProcessPhase>(phase->second.GetInt());
1373
1374 // Perform the callback.
1375 process_exit_callback.Run(process_id, exit_stamp, exit_code, exit_phase,
1376 std::move(process_data_snapshot));
1377 }
1378
1379 // Find all allocations associated with the exited process and free them.
1380 uint32_t type;
1381 while ((ref = iter.GetNext(&type)) != 0) {
1382 switch (type) {
1383 case kTypeIdActivityTracker:
1384 case kTypeIdUserDataRecord:
1385 case kTypeIdProcessDataRecord:
1386 case ModuleInfoRecord::kPersistentTypeId: {
1387 const void* memory = allocator_->GetAsArray<char>(
1388 ref, type, PersistentMemoryAllocator::kSizeAny);
1389 ProcessId found_id;
1390 int64_t create_stamp;
1391
1392 // By convention, the ProcessInfo structure is always the first
1393 // field of the structure so there's no need to handle all the
1394 // cases separately.
1395 if (ProcessInfo::GetOwningProcessId(memory, &found_id, &create_stamp)) {
1396 // Only change the type to be "free" if the process ID matches and
1397 // the creation time is before the exit time (so PID re-use doesn't
1398 // cause the erasure of something that is in-use). Memory is cleared
1399 // here, rather than when it's needed, so as to limit the impact at
1400 // that critical time.
1401 if (found_id == process_id && create_stamp < exit_stamp)
1402 allocator_->ChangeType(ref, ~type, type, /*clear=*/true);
1403 }
1404 } break;
1405 }
1406 }
1407 }
1408
1187 void GlobalActivityTracker::RecordLogMessage(StringPiece message) { 1409 void GlobalActivityTracker::RecordLogMessage(StringPiece message) {
1188 // Allocate at least one extra byte so the string is NUL terminated. All 1410 // Allocate at least one extra byte so the string is NUL terminated. All
1189 // memory returned by the allocator is guaranteed to be zeroed. 1411 // memory returned by the allocator is guaranteed to be zeroed.
1190 PersistentMemoryAllocator::Reference ref = 1412 PersistentMemoryAllocator::Reference ref =
1191 allocator_->Allocate(message.size() + 1, kTypeIdGlobalLogMessage); 1413 allocator_->Allocate(message.size() + 1, kTypeIdGlobalLogMessage);
1192 char* memory = allocator_->GetAsArray<char>(ref, kTypeIdGlobalLogMessage, 1414 char* memory = allocator_->GetAsArray<char>(ref, kTypeIdGlobalLogMessage,
1193 message.size() + 1); 1415 message.size() + 1);
1194 if (memory) { 1416 if (memory) {
1195 memcpy(memory, message.data(), message.size()); 1417 memcpy(memory, message.data(), message.size());
1196 allocator_->MakeIterable(ref); 1418 allocator_->MakeIterable(ref);
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
1240 kTypeIdActivityTracker, 1462 kTypeIdActivityTracker,
1241 kTypeIdActivityTrackerFree, 1463 kTypeIdActivityTrackerFree,
1242 stack_memory_size_, 1464 stack_memory_size_,
1243 kCachedThreadMemories, 1465 kCachedThreadMemories,
1244 /*make_iterable=*/true), 1466 /*make_iterable=*/true),
1245 user_data_allocator_(allocator_.get(), 1467 user_data_allocator_(allocator_.get(),
1246 kTypeIdUserDataRecord, 1468 kTypeIdUserDataRecord,
1247 kTypeIdUserDataRecordFree, 1469 kTypeIdUserDataRecordFree,
1248 kUserDataSize, 1470 kUserDataSize,
1249 kCachedUserDataMemories, 1471 kCachedUserDataMemories,
1250 /*make_iterable=*/false), 1472 /*make_iterable=*/true),
1473 process_data_(allocator_->GetAsArray<char>(
1474 AllocateFrom(allocator_.get(),
1475 kTypeIdProcessDataRecordFree,
1476 kProcessDataSize,
1477 kTypeIdProcessDataRecord),
1478 kTypeIdProcessDataRecord,
1479 kProcessDataSize),
1480 kProcessDataSize),
1251 global_data_( 1481 global_data_(
1252 allocator_->GetAsArray<char>( 1482 allocator_->GetAsArray<char>(
1253 allocator_->Allocate(kGlobalDataSize, kTypeIdGlobalDataRecord), 1483 allocator_->Allocate(kGlobalDataSize, kTypeIdGlobalDataRecord),
1254 kTypeIdGlobalDataRecord, 1484 kTypeIdGlobalDataRecord,
1255 PersistentMemoryAllocator::kSizeAny), 1485 kGlobalDataSize),
1256 kGlobalDataSize) { 1486 kGlobalDataSize) {
1257 // Ensure the passed memory is valid and empty (iterator finds nothing). 1487 // Ensure the passed memory is valid and empty (iterator finds nothing).
1258 uint32_t type; 1488 uint32_t type;
1259 DCHECK(!PersistentMemoryAllocator::Iterator(allocator_.get()).GetNext(&type)); 1489 DCHECK(!PersistentMemoryAllocator::Iterator(allocator_.get()).GetNext(&type));
1260 1490
1261 // Ensure that there is no other global object and then make this one such. 1491 // Ensure that there is no other global object and then make this one such.
1262 DCHECK(!g_tracker_); 1492 DCHECK(!g_tracker_);
1263 subtle::Release_Store(&g_tracker_, reinterpret_cast<uintptr_t>(this)); 1493 subtle::Release_Store(&g_tracker_, reinterpret_cast<uintptr_t>(this));
1264 1494
1265 // The global records must be iterable in order to be found by an analyzer. 1495 // The data records must be iterable in order to be found by an analyzer.
1496 allocator_->MakeIterable(allocator_->GetAsReference(
1497 process_data_.GetBaseAddress(), kTypeIdProcessDataRecord));
1266 allocator_->MakeIterable(allocator_->GetAsReference( 1498 allocator_->MakeIterable(allocator_->GetAsReference(
1267 global_data_.GetBaseAddress(), kTypeIdGlobalDataRecord)); 1499 global_data_.GetBaseAddress(), kTypeIdGlobalDataRecord));
1268 1500
1501 // Note that this process has launched.
1502 SetProcessPhase(PROCESS_LAUNCHED);
1503
1269 // Fetch and record all activated field trials. 1504 // Fetch and record all activated field trials.
1270 FieldTrial::ActiveGroups active_groups; 1505 FieldTrial::ActiveGroups active_groups;
1271 FieldTrialList::GetActiveFieldTrialGroups(&active_groups); 1506 FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
1272 for (auto& group : active_groups) 1507 for (auto& group : active_groups)
1273 RecordFieldTrial(group.trial_name, group.group_name); 1508 RecordFieldTrial(group.trial_name, group.group_name);
1274 } 1509 }
1275 1510
1276 GlobalActivityTracker::~GlobalActivityTracker() { 1511 GlobalActivityTracker::~GlobalActivityTracker() {
1277 DCHECK_EQ(Get(), this); 1512 DCHECK_EQ(Get(), this);
1278 DCHECK_EQ(0, thread_tracker_count_.load(std::memory_order_relaxed)); 1513 DCHECK_EQ(0, thread_tracker_count_.load(std::memory_order_relaxed));
1279 subtle::Release_Store(&g_tracker_, 0); 1514 subtle::Release_Store(&g_tracker_, 0);
1515 SetProcessPhase(PROCESS_EXITED_CLEANLY);
1280 } 1516 }
1281 1517
1282 void GlobalActivityTracker::ReturnTrackerMemory( 1518 void GlobalActivityTracker::ReturnTrackerMemory(
1283 ManagedActivityTracker* tracker) { 1519 ManagedActivityTracker* tracker) {
1284 PersistentMemoryAllocator::Reference mem_reference = tracker->mem_reference_; 1520 PersistentMemoryAllocator::Reference mem_reference = tracker->mem_reference_;
1285 void* mem_base = tracker->mem_base_; 1521 void* mem_base = tracker->mem_base_;
1286 DCHECK(mem_reference); 1522 DCHECK(mem_reference);
1287 DCHECK(mem_base); 1523 DCHECK(mem_base);
1288 1524
1289 // Remove the destructed tracker from the set of known ones. 1525 // Remove the destructed tracker from the set of known ones.
(...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after
1378 : GlobalActivityTracker::ScopedThreadActivity( 1614 : GlobalActivityTracker::ScopedThreadActivity(
1379 program_counter, 1615 program_counter,
1380 nullptr, 1616 nullptr,
1381 Activity::ACT_PROCESS_WAIT, 1617 Activity::ACT_PROCESS_WAIT,
1382 ActivityData::ForProcess(process->Pid()), 1618 ActivityData::ForProcess(process->Pid()),
1383 /*lock_allowed=*/true) {} 1619 /*lock_allowed=*/true) {}
1384 #endif 1620 #endif
1385 1621
1386 } // namespace debug 1622 } // namespace debug
1387 } // namespace base 1623 } // namespace base
OLDNEW
« no previous file with comments | « base/debug/activity_tracker.h ('k') | base/debug/activity_tracker_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698