Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(220)

Side by Side Diff: base/debug/activity_tracker.cc

Issue 2680123003: Multi-Process Tracking Support (Closed)
Patch Set: refactor process-info tracking; add process-info to module tracking Created 3 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2016 The Chromium Authors. All rights reserved. 1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/debug/activity_tracker.h" 5 #include "base/debug/activity_tracker.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <limits> 8 #include <limits>
9 #include <utility> 9 #include <utility>
10 10
(...skipping 12 matching lines...) Expand all
23 #include "base/process/process_handle.h" 23 #include "base/process/process_handle.h"
24 #include "base/stl_util.h" 24 #include "base/stl_util.h"
25 #include "base/strings/string_util.h" 25 #include "base/strings/string_util.h"
26 #include "base/threading/platform_thread.h" 26 #include "base/threading/platform_thread.h"
27 27
28 namespace base { 28 namespace base {
29 namespace debug { 29 namespace debug {
30 30
31 namespace { 31 namespace {
32 32
33 // A number that identifies the memory as having been initialized. It's
34 // arbitrary but happens to be the first 4 bytes of SHA1(ThreadActivityTracker).
35 // A version number is added on so that major structure changes won't try to
36 // read an older version (since the cookie won't match).
37 const uint32_t kHeaderCookie = 0xC0029B24UL + 2; // v2
38
39 // The minimum depth a stack should support. 33 // The minimum depth a stack should support.
40 const int kMinStackDepth = 2; 34 const int kMinStackDepth = 2;
41 35
42 // The amount of memory set aside for holding arbitrary user data (key/value 36 // The amount of memory set aside for holding arbitrary user data (key/value
43 // pairs) globally or associated with ActivityData entries. 37 // pairs) globally or associated with ActivityData entries.
44 const size_t kUserDataSize = 1 << 10; // 1 KiB 38 const size_t kUserDataSize = 1 << 10; // 1 KiB
39 const size_t kProcessDataSize = 4 << 10; // 4 KiB
45 const size_t kGlobalDataSize = 16 << 10; // 16 KiB 40 const size_t kGlobalDataSize = 16 << 10; // 16 KiB
46 const size_t kMaxUserDataNameLength = 41 const size_t kMaxUserDataNameLength =
47 static_cast<size_t>(std::numeric_limits<uint8_t>::max()); 42 static_cast<size_t>(std::numeric_limits<uint8_t>::max());
48 43
49 // A constant used to indicate that module information is changing. 44 // A constant used to indicate that module information is changing.
50 const uint32_t kModuleInformationChanging = 0x80000000; 45 const uint32_t kModuleInformationChanging = 0x80000000;
51 46
47 // An atomically incrementing number, used to check for recreations of objects
48 // in the same memory space.
49 StaticAtomicSequenceNumber g_next_id;
50
52 union ThreadRef { 51 union ThreadRef {
53 int64_t as_id; 52 int64_t as_id;
54 #if defined(OS_WIN) 53 #if defined(OS_WIN)
55 // On Windows, the handle itself is often a pseudo-handle with a common 54 // On Windows, the handle itself is often a pseudo-handle with a common
56 // value meaning "this thread" and so the thread-id is used. The former 55 // value meaning "this thread" and so the thread-id is used. The former
57 // can be converted to a thread-id with a system call. 56 // can be converted to a thread-id with a system call.
58 PlatformThreadId as_tid; 57 PlatformThreadId as_tid;
59 #elif defined(OS_POSIX) 58 #elif defined(OS_POSIX)
60 // On Posix, the handle is always a unique identifier so no conversion 59 // On Posix, the handle is always a unique identifier so no conversion
61 // needs to be done. However, it's value is officially opaque so there 60 // needs to be done. However, it's value is officially opaque so there
62 // is no one correct way to convert it to a numerical identifier. 61 // is no one correct way to convert it to a numerical identifier.
63 PlatformThreadHandle::Handle as_handle; 62 PlatformThreadHandle::Handle as_handle;
64 #endif 63 #endif
65 }; 64 };
66 65
66 // Get the next non-zero identifier. It is only unique within a process.
67 uint32_t GetNextDataId() {
68 uint32_t id;
69 while ((id = g_next_id.GetNext()) == 0)
70 ;
71 return id;
72 }
73
74 // Finds and reuses a specific allocation or creates a new one.
75 PersistentMemoryAllocator::Reference AllocateFrom(
76 PersistentMemoryAllocator* allocator,
77 uint32_t from_type,
78 size_t size,
79 uint32_t to_type) {
80 PersistentMemoryAllocator::Iterator iter(allocator);
81 PersistentMemoryAllocator::Reference ref;
82 while ((ref = iter.GetNextOfType(from_type)) != 0) {
83 DCHECK_LE(size, allocator->GetAllocSize(ref));
84 // This can fail if a another thread has just taken it. It isassumed that
85 // the memory is cleared during the "free" operation.
86 if (allocator->ChangeType(ref, to_type, from_type, /*clear=*/false))
87 return ref;
88 }
89
90 return allocator->Allocate(size, to_type);
91 }
92
67 // Determines the previous aligned index. 93 // Determines the previous aligned index.
68 size_t RoundDownToAlignment(size_t index, size_t alignment) { 94 size_t RoundDownToAlignment(size_t index, size_t alignment) {
69 return index & (0 - alignment); 95 return index & (0 - alignment);
70 } 96 }
71 97
72 // Determines the next aligned index. 98 // Determines the next aligned index.
73 size_t RoundUpToAlignment(size_t index, size_t alignment) { 99 size_t RoundUpToAlignment(size_t index, size_t alignment) {
74 return (index + (alignment - 1)) & (0 - alignment); 100 return (index + (alignment - 1)) & (0 - alignment);
75 } 101 }
76 102
77 } // namespace 103 } // namespace
78 104
105 ProcessInfo::ProcessInfo() {}
106 ProcessInfo::~ProcessInfo() {}
107
108 void ProcessInfo::Release_Initialize() {
109 uint32_t old_id = data_id.load(std::memory_order_acquire);
110 DCHECK_EQ(0U, old_id);
111 process_id = GetCurrentProcId();
112 create_stamp = Time::Now().ToInternalValue();
113 data_id.store(GetNextDataId(), std::memory_order_release);
114 }
115
116 void ProcessInfo::SetOwningProcessIdForTesting(ProcessId pid, int64_t stamp) {
117 process_id = pid;
118 create_stamp = stamp;
manzagop (departed) 2017/02/22 20:44:15 Do you need/want to set data_id so that ProcessInf
bcwhite 2017/02/22 22:13:02 All three already have valid, non-zero values. Th
manzagop (departed) 2017/02/24 15:56:35 Is that because calling Release_Initialize is a pr
bcwhite 2017/03/06 16:33:51 Done.
119 }
120
121 // static
122 bool ProcessInfo::OwningProcessId(const void* memory,
123 ProcessId* out_id,
124 int64_t* out_stamp) {
125 const ProcessInfo* info = reinterpret_cast<const ProcessInfo*>(memory);
126 uint32_t id = info->data_id.load(std::memory_order_acquire);
127 if (id == 0)
128 return false;
129
130 *out_id = static_cast<ProcessId>(info->process_id);
131 *out_stamp = info->create_stamp;
132 return id == info->data_id.load(std::memory_order_seq_cst);
manzagop (departed) 2017/02/22 20:44:15 Can you say more about this operation? IIUC there
bcwhite 2017/02/22 22:13:02 It could since memset doesn't define the order in
manzagop (departed) 2017/02/24 15:56:35 I'm totally going to forget about this! :) Could y
bcwhite 2017/03/06 16:33:51 It's already out for review: https://codereview.ch
133 }
79 134
80 // It doesn't matter what is contained in this (though it will be all zeros) 135 // It doesn't matter what is contained in this (though it will be all zeros)
81 // as only the address of it is important. 136 // as only the address of it is important.
82 const ActivityData kNullActivityData = {}; 137 const ActivityData kNullActivityData = {};
83 138
84 ActivityData ActivityData::ForThread(const PlatformThreadHandle& handle) { 139 ActivityData ActivityData::ForThread(const PlatformThreadHandle& handle) {
85 ThreadRef thread_ref; 140 ThreadRef thread_ref;
86 thread_ref.as_id = 0; // Zero the union in case other is smaller. 141 thread_ref.as_id = 0; // Zero the union in case other is smaller.
87 #if defined(OS_WIN) 142 #if defined(OS_WIN)
88 thread_ref.as_tid = ::GetThreadId(handle.platform_handle()); 143 thread_ref.as_tid = ::GetThreadId(handle.platform_handle());
(...skipping 150 matching lines...) Expand 10 before | Expand all | Expand 10 after
239 StringPiece ActivityUserData::TypedValue::GetReference() const { 294 StringPiece ActivityUserData::TypedValue::GetReference() const {
240 DCHECK_EQ(RAW_VALUE_REFERENCE, type_); 295 DCHECK_EQ(RAW_VALUE_REFERENCE, type_);
241 return ref_value_; 296 return ref_value_;
242 } 297 }
243 298
244 StringPiece ActivityUserData::TypedValue::GetStringReference() const { 299 StringPiece ActivityUserData::TypedValue::GetStringReference() const {
245 DCHECK_EQ(STRING_VALUE_REFERENCE, type_); 300 DCHECK_EQ(STRING_VALUE_REFERENCE, type_);
246 return ref_value_; 301 return ref_value_;
247 } 302 }
248 303
304 // These are required because std::atomic is (currently) not a POD type and
305 // thus clang requires explicit out-of-line constructors and destructors even
306 // when they do nothing.
249 ActivityUserData::ValueInfo::ValueInfo() {} 307 ActivityUserData::ValueInfo::ValueInfo() {}
250 ActivityUserData::ValueInfo::ValueInfo(ValueInfo&&) = default; 308 ActivityUserData::ValueInfo::ValueInfo(ValueInfo&&) = default;
251 ActivityUserData::ValueInfo::~ValueInfo() {} 309 ActivityUserData::ValueInfo::~ValueInfo() {}
252 310 ActivityUserData::MemoryHeader::MemoryHeader() {}
253 StaticAtomicSequenceNumber ActivityUserData::next_id_; 311 ActivityUserData::MemoryHeader::~MemoryHeader() {}
312 ActivityUserData::FieldHeader::FieldHeader() {}
313 ActivityUserData::FieldHeader::~FieldHeader() {}
254 314
255 ActivityUserData::ActivityUserData(void* memory, size_t size) 315 ActivityUserData::ActivityUserData(void* memory, size_t size)
256 : memory_(reinterpret_cast<char*>(memory)), 316 : memory_(reinterpret_cast<char*>(memory)),
257 available_(RoundDownToAlignment(size, kMemoryAlignment)), 317 available_(RoundDownToAlignment(size, kMemoryAlignment)),
258 id_(reinterpret_cast<std::atomic<uint32_t>*>(memory)) { 318 header_(reinterpret_cast<MemoryHeader*>(memory)) {
259 // It's possible that no user data is being stored. 319 // It's possible that no user data is being stored.
260 if (!memory_) 320 if (!memory_)
261 return; 321 return;
262 322
263 DCHECK_LT(kMemoryAlignment, available_); 323 static_assert(0 == sizeof(MemoryHeader) % kMemoryAlignment, "invalid header");
264 if (id_->load(std::memory_order_relaxed) == 0) { 324 DCHECK_LT(sizeof(MemoryHeader), available_);
265 // Generate a new ID and store it in the first 32-bit word of memory_. 325 if (header_->process_info.data_id.load(std::memory_order_acquire) == 0)
266 // |id_| must be non-zero for non-sink instances. 326 header_->process_info.Release_Initialize();
267 uint32_t id; 327 memory_ += sizeof(MemoryHeader);
268 while ((id = next_id_.GetNext()) == 0) 328 available_ -= sizeof(MemoryHeader);
269 ;
270 id_->store(id, std::memory_order_relaxed);
271 DCHECK_NE(0U, id_->load(std::memory_order_relaxed));
272 }
273 memory_ += kMemoryAlignment;
274 available_ -= kMemoryAlignment;
275 329
276 // If there is already data present, load that. This allows the same class 330 // If there is already data present, load that. This allows the same class
277 // to be used for analysis through snapshots. 331 // to be used for analysis through snapshots.
278 ImportExistingData(); 332 ImportExistingData();
279 } 333 }
280 334
281 ActivityUserData::~ActivityUserData() {} 335 ActivityUserData::~ActivityUserData() {}
282 336
337 bool ActivityUserData::CreateSnapshot(Snapshot* output_snapshot) const {
338 DCHECK(output_snapshot);
339 DCHECK(output_snapshot->empty());
340
341 // Find any new data that may have been added by an active instance of this
342 // class that is adding records.
343 ImportExistingData();
344
345 for (const auto& entry : values_) {
346 TypedValue value;
347 value.type_ = entry.second.type;
348 DCHECK_GE(entry.second.extent,
349 entry.second.size_ptr->load(std::memory_order_relaxed));
350
351 switch (entry.second.type) {
352 case RAW_VALUE:
353 case STRING_VALUE:
354 value.long_value_ =
355 std::string(reinterpret_cast<char*>(entry.second.memory),
356 entry.second.size_ptr->load(std::memory_order_relaxed));
357 break;
358 case RAW_VALUE_REFERENCE:
359 case STRING_VALUE_REFERENCE: {
360 ReferenceRecord* ref =
361 reinterpret_cast<ReferenceRecord*>(entry.second.memory);
362 value.ref_value_ = StringPiece(
363 reinterpret_cast<char*>(static_cast<uintptr_t>(ref->address)),
364 static_cast<size_t>(ref->size));
365 } break;
366 case BOOL_VALUE:
367 case CHAR_VALUE:
368 value.short_value_ = *reinterpret_cast<char*>(entry.second.memory);
369 break;
370 case SIGNED_VALUE:
371 case UNSIGNED_VALUE:
372 value.short_value_ = *reinterpret_cast<uint64_t*>(entry.second.memory);
373 break;
374 case END_OF_VALUES: // Included for completeness purposes.
375 NOTREACHED();
376 }
377 auto inserted = output_snapshot->insert(
378 std::make_pair(entry.second.name.as_string(), std::move(value)));
379 DCHECK(inserted.second); // True if inserted, false if existed.
380 }
381
382 return true;
383 }
384
385 const void* ActivityUserData::GetBaseAddress() {
386 // The |memory_| pointer advances as elements are written but the |header_|
387 // value is always at the start of the block so just return that.
388 return header_;
389 }
390
391 void ActivityUserData::SetOwningProcessIdForTesting(ProcessId pid,
392 int64_t stamp) {
393 if (!header_)
394 return;
395 header_->process_info.SetOwningProcessIdForTesting(pid, stamp);
396 }
397
398 // static
399 bool ActivityUserData::OwningProcessId(const void* memory,
400 ProcessId* out_id,
401 int64_t* out_stamp) {
402 const MemoryHeader* header = reinterpret_cast<const MemoryHeader*>(memory);
403 return ProcessInfo::OwningProcessId(&header->process_info, out_id, out_stamp);
404 }
405
283 void ActivityUserData::Set(StringPiece name, 406 void ActivityUserData::Set(StringPiece name,
284 ValueType type, 407 ValueType type,
285 const void* memory, 408 const void* memory,
286 size_t size) { 409 size_t size) {
287 DCHECK_GE(std::numeric_limits<uint8_t>::max(), name.length()); 410 DCHECK_GE(std::numeric_limits<uint8_t>::max(), name.length());
288 size = std::min(std::numeric_limits<uint16_t>::max() - (kMemoryAlignment - 1), 411 size = std::min(std::numeric_limits<uint16_t>::max() - (kMemoryAlignment - 1),
289 size); 412 size);
290 413
291 // It's possible that no user data is being stored. 414 // It's possible that no user data is being stored.
292 if (!memory_) 415 if (!memory_)
293 return; 416 return;
294 417
295 // The storage of a name is limited so use that limit during lookup. 418 // The storage of a name is limited so use that limit during lookup.
296 if (name.length() > kMaxUserDataNameLength) 419 if (name.length() > kMaxUserDataNameLength)
297 name.set(name.data(), kMaxUserDataNameLength); 420 name.set(name.data(), kMaxUserDataNameLength);
298 421
299 ValueInfo* info; 422 ValueInfo* info;
300 auto existing = values_.find(name); 423 auto existing = values_.find(name);
301 if (existing != values_.end()) { 424 if (existing != values_.end()) {
302 info = &existing->second; 425 info = &existing->second;
303 } else { 426 } else {
304 // The name size is limited to what can be held in a single byte but 427 // The name size is limited to what can be held in a single byte but
305 // because there are not alignment constraints on strings, it's set tight 428 // because there are not alignment constraints on strings, it's set tight
306 // against the header. Its extent (the reserved space, even if it's not 429 // against the header. Its extent (the reserved space, even if it's not
307 // all used) is calculated so that, when pressed against the header, the 430 // all used) is calculated so that, when pressed against the header, the
308 // following field will be aligned properly. 431 // following field will be aligned properly.
309 size_t name_size = name.length(); 432 size_t name_size = name.length();
310 size_t name_extent = 433 size_t name_extent =
311 RoundUpToAlignment(sizeof(Header) + name_size, kMemoryAlignment) - 434 RoundUpToAlignment(sizeof(FieldHeader) + name_size, kMemoryAlignment) -
312 sizeof(Header); 435 sizeof(FieldHeader);
313 size_t value_extent = RoundUpToAlignment(size, kMemoryAlignment); 436 size_t value_extent = RoundUpToAlignment(size, kMemoryAlignment);
314 437
315 // The "base size" is the size of the header and (padded) string key. Stop 438 // The "base size" is the size of the header and (padded) string key. Stop
316 // now if there's not room enough for even this. 439 // now if there's not room enough for even this.
317 size_t base_size = sizeof(Header) + name_extent; 440 size_t base_size = sizeof(FieldHeader) + name_extent;
318 if (base_size > available_) 441 if (base_size > available_)
319 return; 442 return;
320 443
321 // The "full size" is the size for storing the entire value. 444 // The "full size" is the size for storing the entire value.
322 size_t full_size = std::min(base_size + value_extent, available_); 445 size_t full_size = std::min(base_size + value_extent, available_);
323 446
324 // If the value is actually a single byte, see if it can be stuffed at the 447 // If the value is actually a single byte, see if it can be stuffed at the
325 // end of the name extent rather than wasting kMemoryAlignment bytes. 448 // end of the name extent rather than wasting kMemoryAlignment bytes.
326 if (size == 1 && name_extent > name_size) { 449 if (size == 1 && name_extent > name_size) {
327 full_size = base_size; 450 full_size = base_size;
328 --name_extent; 451 --name_extent;
329 --base_size; 452 --base_size;
330 } 453 }
331 454
332 // Truncate the stored size to the amount of available memory. Stop now if 455 // Truncate the stored size to the amount of available memory. Stop now if
333 // there's not any room for even part of the value. 456 // there's not any room for even part of the value.
334 size = std::min(full_size - base_size, size); 457 size = std::min(full_size - base_size, size);
335 if (size == 0) 458 if (size == 0)
336 return; 459 return;
337 460
338 // Allocate a chunk of memory. 461 // Allocate a chunk of memory.
339 Header* header = reinterpret_cast<Header*>(memory_); 462 FieldHeader* header = reinterpret_cast<FieldHeader*>(memory_);
340 memory_ += full_size; 463 memory_ += full_size;
341 available_ -= full_size; 464 available_ -= full_size;
342 465
343 // Datafill the header and name records. Memory must be zeroed. The |type| 466 // Datafill the header and name records. Memory must be zeroed. The |type|
344 // is written last, atomically, to release all the other values. 467 // is written last, atomically, to release all the other values.
345 DCHECK_EQ(END_OF_VALUES, header->type.load(std::memory_order_relaxed)); 468 DCHECK_EQ(END_OF_VALUES, header->type.load(std::memory_order_relaxed));
346 DCHECK_EQ(0, header->value_size.load(std::memory_order_relaxed)); 469 DCHECK_EQ(0, header->value_size.load(std::memory_order_relaxed));
347 header->name_size = static_cast<uint8_t>(name_size); 470 header->name_size = static_cast<uint8_t>(name_size);
348 header->record_size = full_size; 471 header->record_size = full_size;
349 char* name_memory = reinterpret_cast<char*>(header) + sizeof(Header); 472 char* name_memory = reinterpret_cast<char*>(header) + sizeof(FieldHeader);
350 void* value_memory = 473 void* value_memory =
351 reinterpret_cast<char*>(header) + sizeof(Header) + name_extent; 474 reinterpret_cast<char*>(header) + sizeof(FieldHeader) + name_extent;
352 memcpy(name_memory, name.data(), name_size); 475 memcpy(name_memory, name.data(), name_size);
353 header->type.store(type, std::memory_order_release); 476 header->type.store(type, std::memory_order_release);
354 477
355 // Create an entry in |values_| so that this field can be found and changed 478 // Create an entry in |values_| so that this field can be found and changed
356 // later on without having to allocate new entries. 479 // later on without having to allocate new entries.
357 StringPiece persistent_name(name_memory, name_size); 480 StringPiece persistent_name(name_memory, name_size);
358 auto inserted = 481 auto inserted =
359 values_.insert(std::make_pair(persistent_name, ValueInfo())); 482 values_.insert(std::make_pair(persistent_name, ValueInfo()));
360 DCHECK(inserted.second); // True if inserted, false if existed. 483 DCHECK(inserted.second); // True if inserted, false if existed.
361 info = &inserted.first->second; 484 info = &inserted.first->second;
362 info->name = persistent_name; 485 info->name = persistent_name;
363 info->memory = value_memory; 486 info->memory = value_memory;
364 info->size_ptr = &header->value_size; 487 info->size_ptr = &header->value_size;
365 info->extent = full_size - sizeof(Header) - name_extent; 488 info->extent = full_size - sizeof(FieldHeader) - name_extent;
366 info->type = type; 489 info->type = type;
367 } 490 }
368 491
369 // Copy the value data to storage. The |size| is written last, atomically, to 492 // Copy the value data to storage. The |size| is written last, atomically, to
370 // release the copied data. Until then, a parallel reader will just ignore 493 // release the copied data. Until then, a parallel reader will just ignore
371 // records with a zero size. 494 // records with a zero size.
372 DCHECK_EQ(type, info->type); 495 DCHECK_EQ(type, info->type);
373 size = std::min(size, info->extent); 496 size = std::min(size, info->extent);
374 info->size_ptr->store(0, std::memory_order_seq_cst); 497 info->size_ptr->store(0, std::memory_order_seq_cst);
375 memcpy(info->memory, memory, size); 498 memcpy(info->memory, memory, size);
376 info->size_ptr->store(size, std::memory_order_release); 499 info->size_ptr->store(size, std::memory_order_release);
377 } 500 }
378 501
379 void ActivityUserData::SetReference(StringPiece name, 502 void ActivityUserData::SetReference(StringPiece name,
380 ValueType type, 503 ValueType type,
381 const void* memory, 504 const void* memory,
382 size_t size) { 505 size_t size) {
383 ReferenceRecord rec; 506 ReferenceRecord rec;
384 rec.address = reinterpret_cast<uintptr_t>(memory); 507 rec.address = reinterpret_cast<uintptr_t>(memory);
385 rec.size = size; 508 rec.size = size;
386 Set(name, type, &rec, sizeof(rec)); 509 Set(name, type, &rec, sizeof(rec));
387 } 510 }
388 511
389 void ActivityUserData::ImportExistingData() const { 512 void ActivityUserData::ImportExistingData() const {
390 while (available_ > sizeof(Header)) { 513 while (available_ > sizeof(FieldHeader)) {
391 Header* header = reinterpret_cast<Header*>(memory_); 514 FieldHeader* header = reinterpret_cast<FieldHeader*>(memory_);
392 ValueType type = 515 ValueType type =
393 static_cast<ValueType>(header->type.load(std::memory_order_acquire)); 516 static_cast<ValueType>(header->type.load(std::memory_order_acquire));
394 if (type == END_OF_VALUES) 517 if (type == END_OF_VALUES)
395 return; 518 return;
396 if (header->record_size > available_) 519 if (header->record_size > available_)
397 return; 520 return;
398 521
399 size_t value_offset = RoundUpToAlignment(sizeof(Header) + header->name_size, 522 size_t value_offset = RoundUpToAlignment(
400 kMemoryAlignment); 523 sizeof(FieldHeader) + header->name_size, kMemoryAlignment);
401 if (header->record_size == value_offset && 524 if (header->record_size == value_offset &&
402 header->value_size.load(std::memory_order_relaxed) == 1) { 525 header->value_size.load(std::memory_order_relaxed) == 1) {
403 value_offset -= 1; 526 value_offset -= 1;
404 } 527 }
405 if (value_offset + header->value_size > header->record_size) 528 if (value_offset + header->value_size > header->record_size)
406 return; 529 return;
407 530
408 ValueInfo info; 531 ValueInfo info;
409 info.name = StringPiece(memory_ + sizeof(Header), header->name_size); 532 info.name = StringPiece(memory_ + sizeof(FieldHeader), header->name_size);
410 info.type = type; 533 info.type = type;
411 info.memory = memory_ + value_offset; 534 info.memory = memory_ + value_offset;
412 info.size_ptr = &header->value_size; 535 info.size_ptr = &header->value_size;
413 info.extent = header->record_size - value_offset; 536 info.extent = header->record_size - value_offset;
414 537
415 StringPiece key(info.name); 538 StringPiece key(info.name);
416 values_.insert(std::make_pair(key, std::move(info))); 539 values_.insert(std::make_pair(key, std::move(info)));
417 540
418 memory_ += header->record_size; 541 memory_ += header->record_size;
419 available_ -= header->record_size; 542 available_ -= header->record_size;
420 } 543 }
421 } 544 }
422 545
423 bool ActivityUserData::CreateSnapshot(Snapshot* output_snapshot) const {
424 DCHECK(output_snapshot);
425 DCHECK(output_snapshot->empty());
426
427 // Find any new data that may have been added by an active instance of this
428 // class that is adding records.
429 ImportExistingData();
430
431 for (const auto& entry : values_) {
432 TypedValue value;
433 value.type_ = entry.second.type;
434 DCHECK_GE(entry.second.extent,
435 entry.second.size_ptr->load(std::memory_order_relaxed));
436
437 switch (entry.second.type) {
438 case RAW_VALUE:
439 case STRING_VALUE:
440 value.long_value_ =
441 std::string(reinterpret_cast<char*>(entry.second.memory),
442 entry.second.size_ptr->load(std::memory_order_relaxed));
443 break;
444 case RAW_VALUE_REFERENCE:
445 case STRING_VALUE_REFERENCE: {
446 ReferenceRecord* ref =
447 reinterpret_cast<ReferenceRecord*>(entry.second.memory);
448 value.ref_value_ = StringPiece(
449 reinterpret_cast<char*>(static_cast<uintptr_t>(ref->address)),
450 static_cast<size_t>(ref->size));
451 } break;
452 case BOOL_VALUE:
453 case CHAR_VALUE:
454 value.short_value_ = *reinterpret_cast<char*>(entry.second.memory);
455 break;
456 case SIGNED_VALUE:
457 case UNSIGNED_VALUE:
458 value.short_value_ = *reinterpret_cast<uint64_t*>(entry.second.memory);
459 break;
460 case END_OF_VALUES: // Included for completeness purposes.
461 NOTREACHED();
462 }
463 auto inserted = output_snapshot->insert(
464 std::make_pair(entry.second.name.as_string(), std::move(value)));
465 DCHECK(inserted.second); // True if inserted, false if existed.
466 }
467
468 return true;
469 }
470
471 const void* ActivityUserData::GetBaseAddress() {
472 // The |memory_| pointer advances as elements are written but the |id_|
473 // value is always at the start of the block so just return that.
474 return id_;
475 }
476
477 // This information is kept for every thread that is tracked. It is filled 546 // This information is kept for every thread that is tracked. It is filled
478 // the very first time the thread is seen. All fields must be of exact sizes 547 // the very first time the thread is seen. All fields must be of exact sizes
479 // so there is no issue moving between 32 and 64-bit builds. 548 // so there is no issue moving between 32 and 64-bit builds.
480 struct ThreadActivityTracker::Header { 549 struct ThreadActivityTracker::Header {
481 // Defined in .h for analyzer access. Increment this if structure changes! 550 // Defined in .h for analyzer access. Increment this if structure changes!
482 static constexpr uint32_t kPersistentTypeId = 551 static constexpr uint32_t kPersistentTypeId =
483 GlobalActivityTracker::kTypeIdActivityTracker; 552 GlobalActivityTracker::kTypeIdActivityTracker;
484 553
485 // Expected size for 32/64-bit check. 554 // Expected size for 32/64-bit check.
486 static constexpr size_t kExpectedInstanceSize = 80; 555 static constexpr size_t kExpectedInstanceSize =
556 ProcessInfo::kExpectedInstanceSize + 72;
487 557
488 // This unique number indicates a valid initialization of the memory. 558 // This information uniquely identifies a process.
489 std::atomic<uint32_t> cookie; 559 ProcessInfo process_info;
490 560
491 // The number of Activity slots (spaces that can hold an Activity) that 561 // The thread-id (thread_ref.as_id) to which this data belongs. This number
492 // immediately follow this structure in memory. 562 // is not guaranteed to mean anything but combined with the process-id from
493 uint32_t stack_slots; 563 // ProcessInfo is unique among all active trackers.
494
495 // The process-id and thread-id (thread_ref.as_id) to which this data belongs.
496 // These identifiers are not guaranteed to mean anything but are unique, in
497 // combination, among all active trackers. It would be nice to always have
498 // the process_id be a 64-bit value but the necessity of having it atomic
499 // (for the memory barriers it provides) limits it to the natural word size
500 // of the machine.
501 #ifdef ARCH_CPU_64_BITS
502 std::atomic<int64_t> process_id;
503 #else
504 std::atomic<int32_t> process_id;
505 int32_t process_id_padding;
506 #endif
507 ThreadRef thread_ref; 564 ThreadRef thread_ref;
508 565
509 // The start-time and start-ticks when the data was created. Each activity 566 // The start-time and start-ticks when the data was created. Each activity
510 // record has a |time_internal| value that can be converted to a "wall time" 567 // record has a |time_internal| value that can be converted to a "wall time"
511 // with these two values. 568 // with these two values.
512 int64_t start_time; 569 int64_t start_time;
513 int64_t start_ticks; 570 int64_t start_ticks;
514 571
572 // The number of Activity slots (spaces that can hold an Activity) that
573 // immediately follow this structure in memory.
574 uint32_t stack_slots;
575
576 // Some padding to keep everything 64-bit aligned.
577 uint32_t padding;
578
515 // The current depth of the stack. This may be greater than the number of 579 // The current depth of the stack. This may be greater than the number of
516 // slots. If the depth exceeds the number of slots, the newest entries 580 // slots. If the depth exceeds the number of slots, the newest entries
517 // won't be recorded. 581 // won't be recorded.
518 std::atomic<uint32_t> current_depth; 582 std::atomic<uint32_t> current_depth;
519 583
520 // A memory location used to indicate if changes have been made to the stack 584 // A memory location used to indicate if changes have been made to the stack
521 // that would invalidate an in-progress read of its contents. The active 585 // that would invalidate an in-progress read of its contents. The active
522 // tracker will zero the value whenever something gets popped from the 586 // tracker will zero the value whenever something gets popped from the
523 // stack. A monitoring tracker can write a non-zero value here, copy the 587 // stack. A monitoring tracker can write a non-zero value here, copy the
524 // stack contents, and read the value to know, if it is still non-zero, that 588 // stack contents, and read the value to know, if it is still non-zero, that
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after
587 sizeof(header_->thread_ref) == sizeof(header_->thread_ref.as_id), 651 sizeof(header_->thread_ref) == sizeof(header_->thread_ref.as_id),
588 "PlatformThreadHandle::Handle is too big to hold in 64-bit ID"); 652 "PlatformThreadHandle::Handle is too big to hold in 64-bit ID");
589 653
590 // Ensure that the alignment of Activity.data is properly aligned to a 654 // Ensure that the alignment of Activity.data is properly aligned to a
591 // 64-bit boundary so there are no interoperability-issues across cpu 655 // 64-bit boundary so there are no interoperability-issues across cpu
592 // architectures. 656 // architectures.
593 static_assert(offsetof(Activity, data) % sizeof(uint64_t) == 0, 657 static_assert(offsetof(Activity, data) % sizeof(uint64_t) == 0,
594 "ActivityData.data is not 64-bit aligned"); 658 "ActivityData.data is not 64-bit aligned");
595 659
596 // Provided memory should either be completely initialized or all zeros. 660 // Provided memory should either be completely initialized or all zeros.
597 if (header_->cookie.load(std::memory_order_relaxed) == 0) { 661 if (header_->process_info.data_id.load(std::memory_order_relaxed) == 0) {
598 // This is a new file. Double-check other fields and then initialize. 662 // This is a new file. Double-check other fields and then initialize.
599 DCHECK_EQ(0, header_->process_id.load(std::memory_order_relaxed)); 663 DCHECK_EQ(0, header_->process_info.process_id);
664 DCHECK_EQ(0, header_->process_info.create_stamp);
600 DCHECK_EQ(0, header_->thread_ref.as_id); 665 DCHECK_EQ(0, header_->thread_ref.as_id);
601 DCHECK_EQ(0, header_->start_time); 666 DCHECK_EQ(0, header_->start_time);
602 DCHECK_EQ(0, header_->start_ticks); 667 DCHECK_EQ(0, header_->start_ticks);
603 DCHECK_EQ(0U, header_->stack_slots); 668 DCHECK_EQ(0U, header_->stack_slots);
604 DCHECK_EQ(0U, header_->current_depth.load(std::memory_order_relaxed)); 669 DCHECK_EQ(0U, header_->current_depth.load(std::memory_order_relaxed));
605 DCHECK_EQ(0U, header_->stack_unchanged.load(std::memory_order_relaxed)); 670 DCHECK_EQ(0U, header_->stack_unchanged.load(std::memory_order_relaxed));
606 DCHECK_EQ(0, stack_[0].time_internal); 671 DCHECK_EQ(0, stack_[0].time_internal);
607 DCHECK_EQ(0U, stack_[0].origin_address); 672 DCHECK_EQ(0U, stack_[0].origin_address);
608 DCHECK_EQ(0U, stack_[0].call_stack[0]); 673 DCHECK_EQ(0U, stack_[0].call_stack[0]);
609 DCHECK_EQ(0U, stack_[0].data.task.sequence_id); 674 DCHECK_EQ(0U, stack_[0].data.task.sequence_id);
610 675
611 #if defined(OS_WIN) 676 #if defined(OS_WIN)
612 header_->thread_ref.as_tid = PlatformThread::CurrentId(); 677 header_->thread_ref.as_tid = PlatformThread::CurrentId();
613 #elif defined(OS_POSIX) 678 #elif defined(OS_POSIX)
614 header_->thread_ref.as_handle = 679 header_->thread_ref.as_handle =
615 PlatformThread::CurrentHandle().platform_handle(); 680 PlatformThread::CurrentHandle().platform_handle();
616 #endif 681 #endif
617 header_->process_id.store(GetCurrentProcId(), std::memory_order_relaxed);
618 682
619 header_->start_time = base::Time::Now().ToInternalValue(); 683 header_->start_time = base::Time::Now().ToInternalValue();
620 header_->start_ticks = base::TimeTicks::Now().ToInternalValue(); 684 header_->start_ticks = base::TimeTicks::Now().ToInternalValue();
621 header_->stack_slots = stack_slots_; 685 header_->stack_slots = stack_slots_;
622 strlcpy(header_->thread_name, PlatformThread::GetName(), 686 strlcpy(header_->thread_name, PlatformThread::GetName(),
623 sizeof(header_->thread_name)); 687 sizeof(header_->thread_name));
624 688
625 // This is done last so as to guarantee that everything above is "released" 689 // This is done last so as to guarantee that everything above is "released"
626 // by the time this value gets written. 690 // by the time this value gets written.
627 header_->cookie.store(kHeaderCookie, std::memory_order_release); 691 header_->process_info.Release_Initialize();
628 692
629 valid_ = true; 693 valid_ = true;
630 DCHECK(IsValid()); 694 DCHECK(IsValid());
631 } else { 695 } else {
632 // This is a file with existing data. Perform basic consistency checks. 696 // This is a file with existing data. Perform basic consistency checks.
633 valid_ = true; 697 valid_ = true;
634 valid_ = IsValid(); 698 valid_ = IsValid();
635 } 699 }
636 } 700 }
637 701
(...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after
762 ActivityId id, 826 ActivityId id,
763 ActivityTrackerMemoryAllocator* allocator) { 827 ActivityTrackerMemoryAllocator* allocator) {
764 // User-data is only stored for activities actually held in the stack. 828 // User-data is only stored for activities actually held in the stack.
765 if (id < stack_slots_ && stack_[id].user_data_ref) { 829 if (id < stack_slots_ && stack_[id].user_data_ref) {
766 allocator->ReleaseObjectReference(stack_[id].user_data_ref); 830 allocator->ReleaseObjectReference(stack_[id].user_data_ref);
767 stack_[id].user_data_ref = 0; 831 stack_[id].user_data_ref = 0;
768 } 832 }
769 } 833 }
770 834
771 bool ThreadActivityTracker::IsValid() const { 835 bool ThreadActivityTracker::IsValid() const {
772 if (header_->cookie.load(std::memory_order_acquire) != kHeaderCookie || 836 if (header_->process_info.data_id.load(std::memory_order_acquire) == 0 ||
773 header_->process_id.load(std::memory_order_relaxed) == 0 || 837 header_->process_info.process_id == 0 || header_->thread_ref.as_id == 0 ||
774 header_->thread_ref.as_id == 0 || 838 header_->start_time == 0 || header_->start_ticks == 0 ||
775 header_->start_time == 0 ||
776 header_->start_ticks == 0 ||
777 header_->stack_slots != stack_slots_ || 839 header_->stack_slots != stack_slots_ ||
778 header_->thread_name[sizeof(header_->thread_name) - 1] != '\0') { 840 header_->thread_name[sizeof(header_->thread_name) - 1] != '\0') {
779 return false; 841 return false;
780 } 842 }
781 843
782 return valid_; 844 return valid_;
783 } 845 }
784 846
785 bool ThreadActivityTracker::CreateSnapshot(Snapshot* output_snapshot) const { 847 bool ThreadActivityTracker::CreateSnapshot(Snapshot* output_snapshot) const {
786 DCHECK(output_snapshot); 848 DCHECK(output_snapshot);
(...skipping 10 matching lines...) Expand all
797 // Stop here if the data isn't valid. 859 // Stop here if the data isn't valid.
798 if (!IsValid()) 860 if (!IsValid())
799 return false; 861 return false;
800 862
801 // Allocate the maximum size for the stack so it doesn't have to be done 863 // Allocate the maximum size for the stack so it doesn't have to be done
802 // during the time-sensitive snapshot operation. It is shrunk once the 864 // during the time-sensitive snapshot operation. It is shrunk once the
803 // actual size is known. 865 // actual size is known.
804 output_snapshot->activity_stack.reserve(stack_slots_); 866 output_snapshot->activity_stack.reserve(stack_slots_);
805 867
806 for (int attempt = 0; attempt < kMaxAttempts; ++attempt) { 868 for (int attempt = 0; attempt < kMaxAttempts; ++attempt) {
807 // Remember the process and thread IDs to ensure they aren't replaced 869 // Remember the data IDs to ensure nothing is replaced during the snapshot
808 // during the snapshot operation. Use "acquire" to ensure that all the 870 // operation. Use "acquire" so that all the non-atomic fields of the
809 // non-atomic fields of the structure are valid (at least at the current 871 // structure are valid (at least at the current moment in time).
810 // moment in time). 872 const uint32_t starting_id =
811 const int64_t starting_process_id = 873 header_->process_info.data_id.load(std::memory_order_acquire);
812 header_->process_id.load(std::memory_order_acquire); 874 const int64_t starting_process_id = header_->process_info.process_id;
813 const int64_t starting_thread_id = header_->thread_ref.as_id; 875 const int64_t starting_thread_id = header_->thread_ref.as_id;
814 876
815 // Write a non-zero value to |stack_unchanged| so it's possible to detect 877 // Write a non-zero value to |stack_unchanged| so it's possible to detect
816 // at the end that nothing has changed since copying the data began. A 878 // at the end that nothing has changed since copying the data began. A
817 // "cst" operation is required to ensure it occurs before everything else. 879 // "cst" operation is required to ensure it occurs before everything else.
818 // Using "cst" memory ordering is relatively expensive but this is only 880 // Using "cst" memory ordering is relatively expensive but this is only
819 // done during analysis so doesn't directly affect the worker threads. 881 // done during analysis so doesn't directly affect the worker threads.
820 header_->stack_unchanged.store(1, std::memory_order_seq_cst); 882 header_->stack_unchanged.store(1, std::memory_order_seq_cst);
821 883
822 // Fetching the current depth also "acquires" the contents of the stack. 884 // Fetching the current depth also "acquires" the contents of the stack.
(...skipping 20 matching lines...) Expand all
843 // to be last so that it's possible to detect below if any content has 905 // to be last so that it's possible to detect below if any content has
844 // changed while reading it. It's technically possible for a thread to end, 906 // changed while reading it. It's technically possible for a thread to end,
845 // have its data cleared, a new thread get created with the same IDs, and 907 // have its data cleared, a new thread get created with the same IDs, and
846 // it perform an action which starts tracking all in the time since the 908 // it perform an action which starts tracking all in the time since the
847 // ID reads above but the chance is so unlikely that it's not worth the 909 // ID reads above but the chance is so unlikely that it's not worth the
848 // effort and complexity of protecting against it (perhaps with an 910 // effort and complexity of protecting against it (perhaps with an
849 // "unchanged" field like is done for the stack). 911 // "unchanged" field like is done for the stack).
850 output_snapshot->thread_name = 912 output_snapshot->thread_name =
851 std::string(header_->thread_name, sizeof(header_->thread_name) - 1); 913 std::string(header_->thread_name, sizeof(header_->thread_name) - 1);
852 output_snapshot->thread_id = header_->thread_ref.as_id; 914 output_snapshot->thread_id = header_->thread_ref.as_id;
853 output_snapshot->process_id = 915 output_snapshot->process_id = header_->process_info.process_id;
854 header_->process_id.load(std::memory_order_seq_cst);
855 916
856 // All characters of the thread-name buffer were copied so as to not break 917 // All characters of the thread-name buffer were copied so as to not break
857 // if the trailing NUL were missing. Now limit the length if the actual 918 // if the trailing NUL were missing. Now limit the length if the actual
858 // name is shorter. 919 // name is shorter.
859 output_snapshot->thread_name.resize( 920 output_snapshot->thread_name.resize(
860 strlen(output_snapshot->thread_name.c_str())); 921 strlen(output_snapshot->thread_name.c_str()));
861 922
862 // If the process or thread ID has changed then the tracker has exited and 923 // If the data ID has changed then the tracker has exited and the memory
863 // the memory reused by a new one. Try again. 924 // reused by a new one. Try again.
864 if (output_snapshot->process_id != starting_process_id || 925 if (header_->process_info.data_id.load(std::memory_order_seq_cst) !=
926 starting_id ||
927 output_snapshot->process_id != starting_process_id ||
865 output_snapshot->thread_id != starting_thread_id) { 928 output_snapshot->thread_id != starting_thread_id) {
866 continue; 929 continue;
867 } 930 }
868 931
869 // Only successful if the data is still valid once everything is done since 932 // Only successful if the data is still valid once everything is done since
870 // it's possible for the thread to end somewhere in the middle and all its 933 // it's possible for the thread to end somewhere in the middle and all its
871 // values become garbage. 934 // values become garbage.
872 if (!IsValid()) 935 if (!IsValid())
873 return false; 936 return false;
874 937
875 // Change all the timestamps in the activities from "ticks" to "wall" time. 938 // Change all the timestamps in the activities from "ticks" to "wall" time.
876 const Time start_time = Time::FromInternalValue(header_->start_time); 939 const Time start_time = Time::FromInternalValue(header_->start_time);
877 const int64_t start_ticks = header_->start_ticks; 940 const int64_t start_ticks = header_->start_ticks;
878 for (Activity& activity : output_snapshot->activity_stack) { 941 for (Activity& activity : output_snapshot->activity_stack) {
879 activity.time_internal = 942 activity.time_internal =
880 (start_time + 943 (start_time +
881 TimeDelta::FromInternalValue(activity.time_internal - start_ticks)) 944 TimeDelta::FromInternalValue(activity.time_internal - start_ticks))
882 .ToInternalValue(); 945 .ToInternalValue();
883 } 946 }
884 947
885 // Success! 948 // Success!
886 return true; 949 return true;
887 } 950 }
888 951
889 // Too many attempts. 952 // Too many attempts.
890 return false; 953 return false;
891 } 954 }
892 955
956 const void* ThreadActivityTracker::GetBaseAddress() {
957 return header_;
958 }
959
960 void ThreadActivityTracker::SetOwningProcessIdForTesting(ProcessId pid,
961 int64_t stamp) {
962 header_->process_info.SetOwningProcessIdForTesting(pid, stamp);
963 }
964
965 // static
966 bool ThreadActivityTracker::OwningProcessId(const void* memory,
967 ProcessId* out_id,
968 int64_t* out_stamp) {
969 const Header* header = reinterpret_cast<const Header*>(memory);
970 return ProcessInfo::OwningProcessId(&header->process_info, out_id, out_stamp);
971 }
972
893 // static 973 // static
894 size_t ThreadActivityTracker::SizeForStackDepth(int stack_depth) { 974 size_t ThreadActivityTracker::SizeForStackDepth(int stack_depth) {
895 return static_cast<size_t>(stack_depth) * sizeof(Activity) + sizeof(Header); 975 return static_cast<size_t>(stack_depth) * sizeof(Activity) + sizeof(Header);
896 } 976 }
897 977
898 // The instantiation of the GlobalActivityTracker object. 978 // The instantiation of the GlobalActivityTracker object.
899 // The object held here will obviously not be destructed at process exit 979 // The object held here will obviously not be destructed at process exit
900 // but that's best since PersistentMemoryAllocator objects (that underlie 980 // but that's best since PersistentMemoryAllocator objects (that underlie
901 // GlobalActivityTracker objects) are explicitly forbidden from doing anything 981 // GlobalActivityTracker objects) are explicitly forbidden from doing anything
902 // essential at exit anyway due to the fact that they depend on data managed 982 // essential at exit anyway due to the fact that they depend on data managed
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after
970 // These fields never changes and are done before the record is made 1050 // These fields never changes and are done before the record is made
971 // iterable so no thread protection is necessary. 1051 // iterable so no thread protection is necessary.
972 size = info.size; 1052 size = info.size;
973 timestamp = info.timestamp; 1053 timestamp = info.timestamp;
974 age = info.age; 1054 age = info.age;
975 memcpy(identifier, info.identifier, sizeof(identifier)); 1055 memcpy(identifier, info.identifier, sizeof(identifier));
976 memcpy(pickle, pickler.data(), pickler.size()); 1056 memcpy(pickle, pickler.data(), pickler.size());
977 pickle_size = pickler.size(); 1057 pickle_size = pickler.size();
978 changes.store(0, std::memory_order_relaxed); 1058 changes.store(0, std::memory_order_relaxed);
979 1059
1060 // Initialize the process info.
1061 process_info.Release_Initialize();
1062
980 // Now set those fields that can change. 1063 // Now set those fields that can change.
981 return UpdateFrom(info); 1064 return UpdateFrom(info);
982 } 1065 }
983 1066
984 bool GlobalActivityTracker::ModuleInfoRecord::UpdateFrom( 1067 bool GlobalActivityTracker::ModuleInfoRecord::UpdateFrom(
985 const GlobalActivityTracker::ModuleInfo& info) { 1068 const GlobalActivityTracker::ModuleInfo& info) {
986 // Updates can occur after the record is made visible so make changes atomic. 1069 // Updates can occur after the record is made visible so make changes atomic.
987 // A "strong" exchange ensures no false failures. 1070 // A "strong" exchange ensures no false failures.
988 uint32_t old_changes = changes.load(std::memory_order_relaxed); 1071 uint32_t old_changes = changes.load(std::memory_order_relaxed);
989 uint32_t new_changes = old_changes | kModuleInformationChanging; 1072 uint32_t new_changes = old_changes | kModuleInformationChanging;
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after
1044 AutoLock lock(global->user_data_allocator_lock_); 1127 AutoLock lock(global->user_data_allocator_lock_);
1045 user_data_ = 1128 user_data_ =
1046 tracker_->GetUserData(activity_id_, &global->user_data_allocator_); 1129 tracker_->GetUserData(activity_id_, &global->user_data_allocator_);
1047 } else { 1130 } else {
1048 user_data_ = MakeUnique<ActivityUserData>(nullptr, 0); 1131 user_data_ = MakeUnique<ActivityUserData>(nullptr, 0);
1049 } 1132 }
1050 } 1133 }
1051 return *user_data_; 1134 return *user_data_;
1052 } 1135 }
1053 1136
1054 GlobalActivityTracker::GlobalUserData::GlobalUserData(void* memory, size_t size) 1137 GlobalActivityTracker::ThreadSafeUserData::ThreadSafeUserData(void* memory,
1138 size_t size)
1055 : ActivityUserData(memory, size) {} 1139 : ActivityUserData(memory, size) {}
1056 1140
1057 GlobalActivityTracker::GlobalUserData::~GlobalUserData() {} 1141 GlobalActivityTracker::ThreadSafeUserData::~ThreadSafeUserData() {}
1058 1142
1059 void GlobalActivityTracker::GlobalUserData::Set(StringPiece name, 1143 void GlobalActivityTracker::ThreadSafeUserData::Set(StringPiece name,
1060 ValueType type, 1144 ValueType type,
1061 const void* memory, 1145 const void* memory,
1062 size_t size) { 1146 size_t size) {
1063 AutoLock lock(data_lock_); 1147 AutoLock lock(data_lock_);
1064 ActivityUserData::Set(name, type, memory, size); 1148 ActivityUserData::Set(name, type, memory, size);
1065 } 1149 }
1066 1150
1067 GlobalActivityTracker::ManagedActivityTracker::ManagedActivityTracker( 1151 GlobalActivityTracker::ManagedActivityTracker::ManagedActivityTracker(
1068 PersistentMemoryAllocator::Reference mem_reference, 1152 PersistentMemoryAllocator::Reference mem_reference,
1069 void* base, 1153 void* base,
1070 size_t size) 1154 size_t size)
1071 : ThreadActivityTracker(base, size), 1155 : ThreadActivityTracker(base, size),
1072 mem_reference_(mem_reference), 1156 mem_reference_(mem_reference),
(...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after
1177 return tracker; 1261 return tracker;
1178 } 1262 }
1179 1263
1180 void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() { 1264 void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() {
1181 ThreadActivityTracker* tracker = 1265 ThreadActivityTracker* tracker =
1182 reinterpret_cast<ThreadActivityTracker*>(this_thread_tracker_.Get()); 1266 reinterpret_cast<ThreadActivityTracker*>(this_thread_tracker_.Get());
1183 if (tracker) 1267 if (tracker)
1184 delete tracker; 1268 delete tracker;
1185 } 1269 }
1186 1270
1271 void GlobalActivityTracker::SetBackgroundTaskRunner(
1272 const scoped_refptr<TaskRunner>& runner) {
1273 AutoLock lock(global_tracker_lock_);
1274 background_task_runner_ = runner;
1275 }
1276
1277 void GlobalActivityTracker::RecordProcessLaunch(ProcessId process_id) {
1278 base::AutoLock lock(global_tracker_lock_);
1279 DCHECK(!base::ContainsKey(known_processes_, process_id));
manzagop (departed) 2017/02/22 20:44:15 This is possible due to pid recycling. The map sho
bcwhite 2017/02/22 22:13:02 It's possible only if there was no corresponding R
1280 known_processes_.insert(process_id);
1281 }
1282
1283 void GlobalActivityTracker::RecordProcessExit(ProcessId process_id,
1284 int exit_code) {
1285 DCHECK_NE(GetCurrentProcId(), process_id);
1286
1287 scoped_refptr<TaskRunner> task_runner;
1288 {
1289 base::AutoLock lock(global_tracker_lock_);
1290 task_runner = background_task_runner_;
1291 auto found = known_processes_.find(process_id);
1292 if (found != known_processes_.end())
1293 known_processes_.erase(found);
1294 else
1295 DLOG(ERROR) << "Recording exit of unknown process #" << process_id;
1296 }
1297
1298 int64_t now_stamp = Time::Now().ToInternalValue();
1299
1300 // The persistent allocator is thread-safe so run the iteration and
1301 // adjustments on a worker thread if one was provided.
1302 if (task_runner && !task_runner->RunsTasksOnCurrentThread()) {
1303 task_runner->PostTask(FROM_HERE,
1304 Bind(&GlobalActivityTracker::CleanupAfterProcess,
1305 Unretained(this), process_id, now_stamp));
1306 return;
1307 }
1308
1309 CleanupAfterProcess(process_id, now_stamp);
1310 }
1311
1312 void GlobalActivityTracker::CleanupAfterProcess(ProcessId process_id,
1313 int64_t exit_stamp) {
1314 // The process may not have exited cleanly so its necessary to go through
1315 // all the data structures it may have allocated in the persistent memory
1316 // segment and mark them as "released". This will allow them to be reused
1317 // later on. Memory is cleared here, rather than when it's needed, so as to
1318 // limit the impact at that critical time.
1319 PersistentMemoryAllocator::Iterator iter(allocator_.get());
1320 PersistentMemoryAllocator::Reference ref;
1321 uint32_t type;
1322 while ((ref = iter.GetNext(&type)) != 0) {
1323 const void* memory = allocator_->GetAsArray<char>(
1324 ref, type, PersistentMemoryAllocator::kSizeAny);
1325 ProcessId found_id;
1326 int64_t create_stamp;
1327
1328 switch (type) {
1329 case kTypeIdActivityTracker:
1330 case kTypeIdUserDataRecord:
1331 case kTypeIdProcessDataRecord:
1332 case ModuleInfoRecord::kPersistentTypeId:
1333 // By convention, the ProcessInfo structure is always the first
1334 // field of the structure so there's no need to handle all the
1335 // cases separately.
1336 if (ProcessInfo::OwningProcessId(memory, &found_id, &create_stamp)) {
1337 // Only change the type to be "free" if the process ID matches and
1338 // the creation time is before the exit time (so PID re-use doesn't
1339 // cause the erasure of something that is in-use).
1340 if (found_id == process_id && create_stamp < exit_stamp)
1341 allocator_->ChangeType(ref, ~type, type, /*clear=*/true);
1342 }
1343 break;
1344 }
1345 }
1346 }
1347
1187 void GlobalActivityTracker::RecordLogMessage(StringPiece message) { 1348 void GlobalActivityTracker::RecordLogMessage(StringPiece message) {
1188 // Allocate at least one extra byte so the string is NUL terminated. All 1349 // Allocate at least one extra byte so the string is NUL terminated. All
1189 // memory returned by the allocator is guaranteed to be zeroed. 1350 // memory returned by the allocator is guaranteed to be zeroed.
1190 PersistentMemoryAllocator::Reference ref = 1351 PersistentMemoryAllocator::Reference ref =
1191 allocator_->Allocate(message.size() + 1, kTypeIdGlobalLogMessage); 1352 allocator_->Allocate(message.size() + 1, kTypeIdGlobalLogMessage);
1192 char* memory = allocator_->GetAsArray<char>(ref, kTypeIdGlobalLogMessage, 1353 char* memory = allocator_->GetAsArray<char>(ref, kTypeIdGlobalLogMessage,
1193 message.size() + 1); 1354 message.size() + 1);
1194 if (memory) { 1355 if (memory) {
1195 memcpy(memory, message.data(), message.size()); 1356 memcpy(memory, message.data(), message.size());
1196 allocator_->MakeIterable(ref); 1357 allocator_->MakeIterable(ref);
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
1240 kTypeIdActivityTracker, 1401 kTypeIdActivityTracker,
1241 kTypeIdActivityTrackerFree, 1402 kTypeIdActivityTrackerFree,
1242 stack_memory_size_, 1403 stack_memory_size_,
1243 kCachedThreadMemories, 1404 kCachedThreadMemories,
1244 /*make_iterable=*/true), 1405 /*make_iterable=*/true),
1245 user_data_allocator_(allocator_.get(), 1406 user_data_allocator_(allocator_.get(),
1246 kTypeIdUserDataRecord, 1407 kTypeIdUserDataRecord,
1247 kTypeIdUserDataRecordFree, 1408 kTypeIdUserDataRecordFree,
1248 kUserDataSize, 1409 kUserDataSize,
1249 kCachedUserDataMemories, 1410 kCachedUserDataMemories,
1250 /*make_iterable=*/false), 1411 /*make_iterable=*/true),
1412 process_data_(allocator_->GetAsArray<char>(
1413 AllocateFrom(allocator_.get(),
1414 kTypeIdProcessDataRecordFree,
1415 kProcessDataSize,
1416 kTypeIdProcessDataRecord),
1417 kTypeIdProcessDataRecord,
1418 kProcessDataSize),
1419 kProcessDataSize),
1251 global_data_( 1420 global_data_(
1252 allocator_->GetAsArray<char>( 1421 allocator_->GetAsArray<char>(
1253 allocator_->Allocate(kGlobalDataSize, kTypeIdGlobalDataRecord), 1422 allocator_->Allocate(kGlobalDataSize, kTypeIdGlobalDataRecord),
1254 kTypeIdGlobalDataRecord, 1423 kTypeIdGlobalDataRecord,
1255 PersistentMemoryAllocator::kSizeAny), 1424 kGlobalDataSize),
1256 kGlobalDataSize) { 1425 kGlobalDataSize) {
1257 // Ensure the passed memory is valid and empty (iterator finds nothing). 1426 // Ensure the passed memory is valid and empty (iterator finds nothing).
1258 uint32_t type; 1427 uint32_t type;
1259 DCHECK(!PersistentMemoryAllocator::Iterator(allocator_.get()).GetNext(&type)); 1428 DCHECK(!PersistentMemoryAllocator::Iterator(allocator_.get()).GetNext(&type));
1260 1429
1261 // Ensure that there is no other global object and then make this one such. 1430 // Ensure that there is no other global object and then make this one such.
1262 DCHECK(!g_tracker_); 1431 DCHECK(!g_tracker_);
1263 subtle::Release_Store(&g_tracker_, reinterpret_cast<uintptr_t>(this)); 1432 subtle::Release_Store(&g_tracker_, reinterpret_cast<uintptr_t>(this));
1264 1433
1265 // The global records must be iterable in order to be found by an analyzer. 1434 // The data records must be iterable in order to be found by an analyzer.
1435 allocator_->MakeIterable(allocator_->GetAsReference(
1436 process_data_.GetBaseAddress(), kTypeIdProcessDataRecord));
1266 allocator_->MakeIterable(allocator_->GetAsReference( 1437 allocator_->MakeIterable(allocator_->GetAsReference(
1267 global_data_.GetBaseAddress(), kTypeIdGlobalDataRecord)); 1438 global_data_.GetBaseAddress(), kTypeIdGlobalDataRecord));
1268 1439
1269 // Fetch and record all activated field trials. 1440 // Fetch and record all activated field trials.
1270 FieldTrial::ActiveGroups active_groups; 1441 FieldTrial::ActiveGroups active_groups;
1271 FieldTrialList::GetActiveFieldTrialGroups(&active_groups); 1442 FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
1272 for (auto& group : active_groups) 1443 for (auto& group : active_groups)
1273 RecordFieldTrial(group.trial_name, group.group_name); 1444 RecordFieldTrial(group.trial_name, group.group_name);
1274 } 1445 }
1275 1446
(...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after
1378 : GlobalActivityTracker::ScopedThreadActivity( 1549 : GlobalActivityTracker::ScopedThreadActivity(
1379 program_counter, 1550 program_counter,
1380 nullptr, 1551 nullptr,
1381 Activity::ACT_PROCESS_WAIT, 1552 Activity::ACT_PROCESS_WAIT,
1382 ActivityData::ForProcess(process->Pid()), 1553 ActivityData::ForProcess(process->Pid()),
1383 /*lock_allowed=*/true) {} 1554 /*lock_allowed=*/true) {}
1384 #endif 1555 #endif
1385 1556
1386 } // namespace debug 1557 } // namespace debug
1387 } // namespace base 1558 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698