Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(30)

Side by Side Diff: base/debug/activity_tracker.cc

Issue 2680123003: Multi-Process Tracking Support (Closed)
Patch Set: wire actual process launch/exit to global tracker Created 3 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2016 The Chromium Authors. All rights reserved. 1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/debug/activity_tracker.h" 5 #include "base/debug/activity_tracker.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <limits> 8 #include <limits>
9 #include <utility> 9 #include <utility>
10 10
(...skipping 24 matching lines...) Expand all
35 // A version number is added on so that major structure changes won't try to 35 // A version number is added on so that major structure changes won't try to
36 // read an older version (since the cookie won't match). 36 // read an older version (since the cookie won't match).
37 const uint32_t kHeaderCookie = 0xC0029B24UL + 2; // v2 37 const uint32_t kHeaderCookie = 0xC0029B24UL + 2; // v2
38 38
39 // The minimum depth a stack should support. 39 // The minimum depth a stack should support.
40 const int kMinStackDepth = 2; 40 const int kMinStackDepth = 2;
41 41
42 // The amount of memory set aside for holding arbitrary user data (key/value 42 // The amount of memory set aside for holding arbitrary user data (key/value
43 // pairs) globally or associated with ActivityData entries. 43 // pairs) globally or associated with ActivityData entries.
44 const size_t kUserDataSize = 1 << 10; // 1 KiB 44 const size_t kUserDataSize = 1 << 10; // 1 KiB
45 const size_t kProcessDataSize = 4 << 10; // 4 KiB
45 const size_t kGlobalDataSize = 16 << 10; // 16 KiB 46 const size_t kGlobalDataSize = 16 << 10; // 16 KiB
46 const size_t kMaxUserDataNameLength = 47 const size_t kMaxUserDataNameLength =
47 static_cast<size_t>(std::numeric_limits<uint8_t>::max()); 48 static_cast<size_t>(std::numeric_limits<uint8_t>::max());
48 49
49 // A constant used to indicate that module information is changing. 50 // A constant used to indicate that module information is changing.
50 const uint32_t kModuleInformationChanging = 0x80000000; 51 const uint32_t kModuleInformationChanging = 0x80000000;
51 52
52 union ThreadRef { 53 union ThreadRef {
53 int64_t as_id; 54 int64_t as_id;
54 #if defined(OS_WIN) 55 #if defined(OS_WIN)
55 // On Windows, the handle itself is often a pseudo-handle with a common 56 // On Windows, the handle itself is often a pseudo-handle with a common
56 // value meaning "this thread" and so the thread-id is used. The former 57 // value meaning "this thread" and so the thread-id is used. The former
57 // can be converted to a thread-id with a system call. 58 // can be converted to a thread-id with a system call.
58 PlatformThreadId as_tid; 59 PlatformThreadId as_tid;
59 #elif defined(OS_POSIX) 60 #elif defined(OS_POSIX)
60 // On Posix, the handle is always a unique identifier so no conversion 61 // On Posix, the handle is always a unique identifier so no conversion
61 // needs to be done. However, it's value is officially opaque so there 62 // needs to be done. However, it's value is officially opaque so there
62 // is no one correct way to convert it to a numerical identifier. 63 // is no one correct way to convert it to a numerical identifier.
63 PlatformThreadHandle::Handle as_handle; 64 PlatformThreadHandle::Handle as_handle;
64 #endif 65 #endif
65 }; 66 };
66 67
68 // Finds and reuses a specific allocation or creates a new one.
69 PersistentMemoryAllocator::Reference AllocateFrom(
70 PersistentMemoryAllocator* allocator,
71 uint32_t from_type,
72 size_t size,
73 uint32_t to_type) {
74 PersistentMemoryAllocator::Iterator iter(allocator);
75 PersistentMemoryAllocator::Reference ref;
76 while ((ref = iter.GetNextOfType(from_type)) != 0) {
77 DCHECK_LE(size, allocator->GetAllocSize(ref));
78 // This can fail if a another thread has just taken it. It isassumed that
79 // the memory is cleared during the "free" operation.
80 if (allocator->ChangeType(ref, to_type, from_type, /*clear=*/false))
81 return ref;
82 }
83
84 return allocator->Allocate(size, to_type);
85 }
86
67 // Determines the previous aligned index. 87 // Determines the previous aligned index.
68 size_t RoundDownToAlignment(size_t index, size_t alignment) { 88 size_t RoundDownToAlignment(size_t index, size_t alignment) {
69 return index & (0 - alignment); 89 return index & (0 - alignment);
70 } 90 }
71 91
72 // Determines the next aligned index. 92 // Determines the next aligned index.
73 size_t RoundUpToAlignment(size_t index, size_t alignment) { 93 size_t RoundUpToAlignment(size_t index, size_t alignment) {
74 return (index + (alignment - 1)) & (0 - alignment); 94 return (index + (alignment - 1)) & (0 - alignment);
75 } 95 }
76 96
(...skipping 162 matching lines...) Expand 10 before | Expand all | Expand 10 after
239 StringPiece ActivityUserData::TypedValue::GetReference() const { 259 StringPiece ActivityUserData::TypedValue::GetReference() const {
240 DCHECK_EQ(RAW_VALUE_REFERENCE, type_); 260 DCHECK_EQ(RAW_VALUE_REFERENCE, type_);
241 return ref_value_; 261 return ref_value_;
242 } 262 }
243 263
244 StringPiece ActivityUserData::TypedValue::GetStringReference() const { 264 StringPiece ActivityUserData::TypedValue::GetStringReference() const {
245 DCHECK_EQ(STRING_VALUE_REFERENCE, type_); 265 DCHECK_EQ(STRING_VALUE_REFERENCE, type_);
246 return ref_value_; 266 return ref_value_;
247 } 267 }
248 268
269 // These are required because std::atomic is (currently) not a POD type and
270 // thus clang requires explicit out-of-line constructors and destructors even
271 // when they do nothing.
249 ActivityUserData::ValueInfo::ValueInfo() {} 272 ActivityUserData::ValueInfo::ValueInfo() {}
250 ActivityUserData::ValueInfo::ValueInfo(ValueInfo&&) = default; 273 ActivityUserData::ValueInfo::ValueInfo(ValueInfo&&) = default;
251 ActivityUserData::ValueInfo::~ValueInfo() {} 274 ActivityUserData::ValueInfo::~ValueInfo() {}
275 ActivityUserData::MemoryHeader::MemoryHeader() {}
276 ActivityUserData::MemoryHeader::~MemoryHeader() {}
277 ActivityUserData::FieldHeader::FieldHeader() {}
278 ActivityUserData::FieldHeader::~FieldHeader() {}
252 279
253 StaticAtomicSequenceNumber ActivityUserData::next_id_; 280 StaticAtomicSequenceNumber ActivityUserData::next_id_;
254 281
255 ActivityUserData::ActivityUserData(void* memory, size_t size) 282 ActivityUserData::ActivityUserData(void* memory, size_t size)
256 : memory_(reinterpret_cast<char*>(memory)), 283 : memory_(reinterpret_cast<char*>(memory)),
257 available_(RoundDownToAlignment(size, kMemoryAlignment)), 284 available_(RoundDownToAlignment(size, kMemoryAlignment)),
258 id_(reinterpret_cast<std::atomic<uint32_t>*>(memory)) { 285 header_(reinterpret_cast<MemoryHeader*>(memory)) {
259 // It's possible that no user data is being stored. 286 // It's possible that no user data is being stored.
260 if (!memory_) 287 if (!memory_)
261 return; 288 return;
262 289
263 DCHECK_LT(kMemoryAlignment, available_); 290 static_assert(0 == sizeof(MemoryHeader) % kMemoryAlignment, "invalid header");
264 if (id_->load(std::memory_order_relaxed) == 0) { 291 DCHECK_LT(sizeof(MemoryHeader), available_);
265 // Generate a new ID and store it in the first 32-bit word of memory_. 292 if (header_->data_id.load(std::memory_order_acquire) == 0) {
266 // |id_| must be non-zero for non-sink instances. 293 // Store the current process ID so analysis can determine which process
294 // generated the data. This is done first so it can be released later.
295 header_->process_id = GetCurrentProcId();
296
297 // Generate a new ID and store it in the header.
298 // |data_id| must be non-zero for non-sink instances.
267 uint32_t id; 299 uint32_t id;
268 while ((id = next_id_.GetNext()) == 0) 300 while ((id = next_id_.GetNext()) == 0)
269 ; 301 ;
270 id_->store(id, std::memory_order_relaxed); 302 header_->data_id.store(id, std::memory_order_release);
271 DCHECK_NE(0U, id_->load(std::memory_order_relaxed));
272 } 303 }
273 memory_ += kMemoryAlignment; 304 memory_ += sizeof(MemoryHeader);
274 available_ -= kMemoryAlignment; 305 available_ -= sizeof(MemoryHeader);
275 306
276 // If there is already data present, load that. This allows the same class 307 // If there is already data present, load that. This allows the same class
277 // to be used for analysis through snapshots. 308 // to be used for analysis through snapshots.
278 ImportExistingData(); 309 ImportExistingData();
279 } 310 }
280 311
281 ActivityUserData::~ActivityUserData() {} 312 ActivityUserData::~ActivityUserData() {}
282 313
314 bool ActivityUserData::CreateSnapshot(Snapshot* output_snapshot) const {
315 DCHECK(output_snapshot);
316 DCHECK(output_snapshot->empty());
317
318 // Find any new data that may have been added by an active instance of this
319 // class that is adding records.
320 ImportExistingData();
321
322 for (const auto& entry : values_) {
323 TypedValue value;
324 value.type_ = entry.second.type;
325 DCHECK_GE(entry.second.extent,
326 entry.second.size_ptr->load(std::memory_order_relaxed));
327
328 switch (entry.second.type) {
329 case RAW_VALUE:
330 case STRING_VALUE:
331 value.long_value_ =
332 std::string(reinterpret_cast<char*>(entry.second.memory),
333 entry.second.size_ptr->load(std::memory_order_relaxed));
334 break;
335 case RAW_VALUE_REFERENCE:
336 case STRING_VALUE_REFERENCE: {
337 ReferenceRecord* ref =
338 reinterpret_cast<ReferenceRecord*>(entry.second.memory);
339 value.ref_value_ = StringPiece(
340 reinterpret_cast<char*>(static_cast<uintptr_t>(ref->address)),
341 static_cast<size_t>(ref->size));
342 } break;
343 case BOOL_VALUE:
344 case CHAR_VALUE:
345 value.short_value_ = *reinterpret_cast<char*>(entry.second.memory);
346 break;
347 case SIGNED_VALUE:
348 case UNSIGNED_VALUE:
349 value.short_value_ = *reinterpret_cast<uint64_t*>(entry.second.memory);
350 break;
351 case END_OF_VALUES: // Included for completeness purposes.
352 NOTREACHED();
353 }
354 auto inserted = output_snapshot->insert(
355 std::make_pair(entry.second.name.as_string(), std::move(value)));
356 DCHECK(inserted.second); // True if inserted, false if existed.
357 }
358
359 return true;
360 }
361
362 const void* ActivityUserData::GetBaseAddress() {
363 // The |memory_| pointer advances as elements are written but the |header_|
364 // value is always at the start of the block so just return that.
365 return header_;
366 }
367
368 void ActivityUserData::SetOwningProcessIdForTesting(ProcessId id,
369 int64_t stamp) {
370 if (!header_)
371 return;
372 header_->process_id = id;
373 header_->create_stamp = stamp;
374 }
375
376 // static
377 bool ActivityUserData::OwningProcessId(const void* memory,
378 ProcessId* out_id,
379 int64_t* out_stamp) {
380 const MemoryHeader* header = reinterpret_cast<const MemoryHeader*>(memory);
381 if (header->data_id.load(std::memory_order_acquire) == 0)
382 return false;
383
384 *out_id = static_cast<ProcessId>(header->process_id);
385 *out_stamp = header->create_stamp;
386 return true;
387 }
388
283 void ActivityUserData::Set(StringPiece name, 389 void ActivityUserData::Set(StringPiece name,
284 ValueType type, 390 ValueType type,
285 const void* memory, 391 const void* memory,
286 size_t size) { 392 size_t size) {
287 DCHECK_GE(std::numeric_limits<uint8_t>::max(), name.length()); 393 DCHECK_GE(std::numeric_limits<uint8_t>::max(), name.length());
288 size = std::min(std::numeric_limits<uint16_t>::max() - (kMemoryAlignment - 1), 394 size = std::min(std::numeric_limits<uint16_t>::max() - (kMemoryAlignment - 1),
289 size); 395 size);
290 396
291 // It's possible that no user data is being stored. 397 // It's possible that no user data is being stored.
292 if (!memory_) 398 if (!memory_)
293 return; 399 return;
294 400
295 // The storage of a name is limited so use that limit during lookup. 401 // The storage of a name is limited so use that limit during lookup.
296 if (name.length() > kMaxUserDataNameLength) 402 if (name.length() > kMaxUserDataNameLength)
297 name.set(name.data(), kMaxUserDataNameLength); 403 name.set(name.data(), kMaxUserDataNameLength);
298 404
299 ValueInfo* info; 405 ValueInfo* info;
300 auto existing = values_.find(name); 406 auto existing = values_.find(name);
301 if (existing != values_.end()) { 407 if (existing != values_.end()) {
302 info = &existing->second; 408 info = &existing->second;
303 } else { 409 } else {
304 // The name size is limited to what can be held in a single byte but 410 // The name size is limited to what can be held in a single byte but
305 // because there are not alignment constraints on strings, it's set tight 411 // because there are not alignment constraints on strings, it's set tight
306 // against the header. Its extent (the reserved space, even if it's not 412 // against the header. Its extent (the reserved space, even if it's not
307 // all used) is calculated so that, when pressed against the header, the 413 // all used) is calculated so that, when pressed against the header, the
308 // following field will be aligned properly. 414 // following field will be aligned properly.
309 size_t name_size = name.length(); 415 size_t name_size = name.length();
310 size_t name_extent = 416 size_t name_extent =
311 RoundUpToAlignment(sizeof(Header) + name_size, kMemoryAlignment) - 417 RoundUpToAlignment(sizeof(FieldHeader) + name_size, kMemoryAlignment) -
312 sizeof(Header); 418 sizeof(FieldHeader);
313 size_t value_extent = RoundUpToAlignment(size, kMemoryAlignment); 419 size_t value_extent = RoundUpToAlignment(size, kMemoryAlignment);
314 420
315 // The "base size" is the size of the header and (padded) string key. Stop 421 // The "base size" is the size of the header and (padded) string key. Stop
316 // now if there's not room enough for even this. 422 // now if there's not room enough for even this.
317 size_t base_size = sizeof(Header) + name_extent; 423 size_t base_size = sizeof(FieldHeader) + name_extent;
318 if (base_size > available_) 424 if (base_size > available_)
319 return; 425 return;
320 426
321 // The "full size" is the size for storing the entire value. 427 // The "full size" is the size for storing the entire value.
322 size_t full_size = std::min(base_size + value_extent, available_); 428 size_t full_size = std::min(base_size + value_extent, available_);
323 429
324 // If the value is actually a single byte, see if it can be stuffed at the 430 // If the value is actually a single byte, see if it can be stuffed at the
325 // end of the name extent rather than wasting kMemoryAlignment bytes. 431 // end of the name extent rather than wasting kMemoryAlignment bytes.
326 if (size == 1 && name_extent > name_size) { 432 if (size == 1 && name_extent > name_size) {
327 full_size = base_size; 433 full_size = base_size;
328 --name_extent; 434 --name_extent;
329 --base_size; 435 --base_size;
330 } 436 }
331 437
332 // Truncate the stored size to the amount of available memory. Stop now if 438 // Truncate the stored size to the amount of available memory. Stop now if
333 // there's not any room for even part of the value. 439 // there's not any room for even part of the value.
334 size = std::min(full_size - base_size, size); 440 size = std::min(full_size - base_size, size);
335 if (size == 0) 441 if (size == 0)
336 return; 442 return;
337 443
338 // Allocate a chunk of memory. 444 // Allocate a chunk of memory.
339 Header* header = reinterpret_cast<Header*>(memory_); 445 FieldHeader* header = reinterpret_cast<FieldHeader*>(memory_);
340 memory_ += full_size; 446 memory_ += full_size;
341 available_ -= full_size; 447 available_ -= full_size;
342 448
343 // Datafill the header and name records. Memory must be zeroed. The |type| 449 // Datafill the header and name records. Memory must be zeroed. The |type|
344 // is written last, atomically, to release all the other values. 450 // is written last, atomically, to release all the other values.
345 DCHECK_EQ(END_OF_VALUES, header->type.load(std::memory_order_relaxed)); 451 DCHECK_EQ(END_OF_VALUES, header->type.load(std::memory_order_relaxed));
346 DCHECK_EQ(0, header->value_size.load(std::memory_order_relaxed)); 452 DCHECK_EQ(0, header->value_size.load(std::memory_order_relaxed));
347 header->name_size = static_cast<uint8_t>(name_size); 453 header->name_size = static_cast<uint8_t>(name_size);
348 header->record_size = full_size; 454 header->record_size = full_size;
349 char* name_memory = reinterpret_cast<char*>(header) + sizeof(Header); 455 char* name_memory = reinterpret_cast<char*>(header) + sizeof(FieldHeader);
350 void* value_memory = 456 void* value_memory =
351 reinterpret_cast<char*>(header) + sizeof(Header) + name_extent; 457 reinterpret_cast<char*>(header) + sizeof(FieldHeader) + name_extent;
352 memcpy(name_memory, name.data(), name_size); 458 memcpy(name_memory, name.data(), name_size);
353 header->type.store(type, std::memory_order_release); 459 header->type.store(type, std::memory_order_release);
354 460
355 // Create an entry in |values_| so that this field can be found and changed 461 // Create an entry in |values_| so that this field can be found and changed
356 // later on without having to allocate new entries. 462 // later on without having to allocate new entries.
357 StringPiece persistent_name(name_memory, name_size); 463 StringPiece persistent_name(name_memory, name_size);
358 auto inserted = 464 auto inserted =
359 values_.insert(std::make_pair(persistent_name, ValueInfo())); 465 values_.insert(std::make_pair(persistent_name, ValueInfo()));
360 DCHECK(inserted.second); // True if inserted, false if existed. 466 DCHECK(inserted.second); // True if inserted, false if existed.
361 info = &inserted.first->second; 467 info = &inserted.first->second;
362 info->name = persistent_name; 468 info->name = persistent_name;
363 info->memory = value_memory; 469 info->memory = value_memory;
364 info->size_ptr = &header->value_size; 470 info->size_ptr = &header->value_size;
365 info->extent = full_size - sizeof(Header) - name_extent; 471 info->extent = full_size - sizeof(FieldHeader) - name_extent;
366 info->type = type; 472 info->type = type;
367 } 473 }
368 474
369 // Copy the value data to storage. The |size| is written last, atomically, to 475 // Copy the value data to storage. The |size| is written last, atomically, to
370 // release the copied data. Until then, a parallel reader will just ignore 476 // release the copied data. Until then, a parallel reader will just ignore
371 // records with a zero size. 477 // records with a zero size.
372 DCHECK_EQ(type, info->type); 478 DCHECK_EQ(type, info->type);
373 size = std::min(size, info->extent); 479 size = std::min(size, info->extent);
374 info->size_ptr->store(0, std::memory_order_seq_cst); 480 info->size_ptr->store(0, std::memory_order_seq_cst);
375 memcpy(info->memory, memory, size); 481 memcpy(info->memory, memory, size);
376 info->size_ptr->store(size, std::memory_order_release); 482 info->size_ptr->store(size, std::memory_order_release);
377 } 483 }
378 484
379 void ActivityUserData::SetReference(StringPiece name, 485 void ActivityUserData::SetReference(StringPiece name,
380 ValueType type, 486 ValueType type,
381 const void* memory, 487 const void* memory,
382 size_t size) { 488 size_t size) {
383 ReferenceRecord rec; 489 ReferenceRecord rec;
384 rec.address = reinterpret_cast<uintptr_t>(memory); 490 rec.address = reinterpret_cast<uintptr_t>(memory);
385 rec.size = size; 491 rec.size = size;
386 Set(name, type, &rec, sizeof(rec)); 492 Set(name, type, &rec, sizeof(rec));
387 } 493 }
388 494
389 void ActivityUserData::ImportExistingData() const { 495 void ActivityUserData::ImportExistingData() const {
390 while (available_ > sizeof(Header)) { 496 while (available_ > sizeof(FieldHeader)) {
391 Header* header = reinterpret_cast<Header*>(memory_); 497 FieldHeader* header = reinterpret_cast<FieldHeader*>(memory_);
392 ValueType type = 498 ValueType type =
393 static_cast<ValueType>(header->type.load(std::memory_order_acquire)); 499 static_cast<ValueType>(header->type.load(std::memory_order_acquire));
394 if (type == END_OF_VALUES) 500 if (type == END_OF_VALUES)
395 return; 501 return;
396 if (header->record_size > available_) 502 if (header->record_size > available_)
397 return; 503 return;
398 504
399 size_t value_offset = RoundUpToAlignment(sizeof(Header) + header->name_size, 505 size_t value_offset = RoundUpToAlignment(
400 kMemoryAlignment); 506 sizeof(FieldHeader) + header->name_size, kMemoryAlignment);
401 if (header->record_size == value_offset && 507 if (header->record_size == value_offset &&
402 header->value_size.load(std::memory_order_relaxed) == 1) { 508 header->value_size.load(std::memory_order_relaxed) == 1) {
403 value_offset -= 1; 509 value_offset -= 1;
404 } 510 }
405 if (value_offset + header->value_size > header->record_size) 511 if (value_offset + header->value_size > header->record_size)
406 return; 512 return;
407 513
408 ValueInfo info; 514 ValueInfo info;
409 info.name = StringPiece(memory_ + sizeof(Header), header->name_size); 515 info.name = StringPiece(memory_ + sizeof(FieldHeader), header->name_size);
410 info.type = type; 516 info.type = type;
411 info.memory = memory_ + value_offset; 517 info.memory = memory_ + value_offset;
412 info.size_ptr = &header->value_size; 518 info.size_ptr = &header->value_size;
413 info.extent = header->record_size - value_offset; 519 info.extent = header->record_size - value_offset;
414 520
415 StringPiece key(info.name); 521 StringPiece key(info.name);
416 values_.insert(std::make_pair(key, std::move(info))); 522 values_.insert(std::make_pair(key, std::move(info)));
417 523
418 memory_ += header->record_size; 524 memory_ += header->record_size;
419 available_ -= header->record_size; 525 available_ -= header->record_size;
420 } 526 }
421 } 527 }
422 528
423 bool ActivityUserData::CreateSnapshot(Snapshot* output_snapshot) const {
424 DCHECK(output_snapshot);
425 DCHECK(output_snapshot->empty());
426
427 // Find any new data that may have been added by an active instance of this
428 // class that is adding records.
429 ImportExistingData();
430
431 for (const auto& entry : values_) {
432 TypedValue value;
433 value.type_ = entry.second.type;
434 DCHECK_GE(entry.second.extent,
435 entry.second.size_ptr->load(std::memory_order_relaxed));
436
437 switch (entry.second.type) {
438 case RAW_VALUE:
439 case STRING_VALUE:
440 value.long_value_ =
441 std::string(reinterpret_cast<char*>(entry.second.memory),
442 entry.second.size_ptr->load(std::memory_order_relaxed));
443 break;
444 case RAW_VALUE_REFERENCE:
445 case STRING_VALUE_REFERENCE: {
446 ReferenceRecord* ref =
447 reinterpret_cast<ReferenceRecord*>(entry.second.memory);
448 value.ref_value_ = StringPiece(
449 reinterpret_cast<char*>(static_cast<uintptr_t>(ref->address)),
450 static_cast<size_t>(ref->size));
451 } break;
452 case BOOL_VALUE:
453 case CHAR_VALUE:
454 value.short_value_ = *reinterpret_cast<char*>(entry.second.memory);
455 break;
456 case SIGNED_VALUE:
457 case UNSIGNED_VALUE:
458 value.short_value_ = *reinterpret_cast<uint64_t*>(entry.second.memory);
459 break;
460 case END_OF_VALUES: // Included for completeness purposes.
461 NOTREACHED();
462 }
463 auto inserted = output_snapshot->insert(
464 std::make_pair(entry.second.name.as_string(), std::move(value)));
465 DCHECK(inserted.second); // True if inserted, false if existed.
466 }
467
468 return true;
469 }
470
471 const void* ActivityUserData::GetBaseAddress() {
472 // The |memory_| pointer advances as elements are written but the |id_|
473 // value is always at the start of the block so just return that.
474 return id_;
475 }
476
477 // This information is kept for every thread that is tracked. It is filled 529 // This information is kept for every thread that is tracked. It is filled
478 // the very first time the thread is seen. All fields must be of exact sizes 530 // the very first time the thread is seen. All fields must be of exact sizes
479 // so there is no issue moving between 32 and 64-bit builds. 531 // so there is no issue moving between 32 and 64-bit builds.
480 struct ThreadActivityTracker::Header { 532 struct ThreadActivityTracker::Header {
481 // Defined in .h for analyzer access. Increment this if structure changes! 533 // Defined in .h for analyzer access. Increment this if structure changes!
482 static constexpr uint32_t kPersistentTypeId = 534 static constexpr uint32_t kPersistentTypeId =
483 GlobalActivityTracker::kTypeIdActivityTracker; 535 GlobalActivityTracker::kTypeIdActivityTracker;
484 536
485 // Expected size for 32/64-bit check. 537 // Expected size for 32/64-bit check.
486 static constexpr size_t kExpectedInstanceSize = 80; 538 static constexpr size_t kExpectedInstanceSize = 80;
(...skipping 396 matching lines...) Expand 10 before | Expand all | Expand 10 after
883 } 935 }
884 936
885 // Success! 937 // Success!
886 return true; 938 return true;
887 } 939 }
888 940
889 // Too many attempts. 941 // Too many attempts.
890 return false; 942 return false;
891 } 943 }
892 944
945 const void* ThreadActivityTracker::GetBaseAddress() {
946 return header_;
947 }
948
949 void ThreadActivityTracker::SetOwningProcessIdForTesting(ProcessId id,
950 int64_t stamp) {
951 header_->process_id.store(id, std::memory_order_relaxed);
952 header_->start_time = stamp;
953 }
954
955 // static
956 bool ThreadActivityTracker::OwningProcessId(const void* memory,
957 ProcessId* out_id,
958 int64_t* out_stamp) {
959 const Header* header = reinterpret_cast<const Header*>(memory);
960 if (header->cookie.load(std::memory_order_acquire) != kHeaderCookie)
961 return false;
962
963 *out_id = static_cast<ProcessId>(
964 header->process_id.load(std::memory_order_relaxed));
965 *out_stamp = header->start_time;
966 return true;
967 }
968
893 // static 969 // static
894 size_t ThreadActivityTracker::SizeForStackDepth(int stack_depth) { 970 size_t ThreadActivityTracker::SizeForStackDepth(int stack_depth) {
895 return static_cast<size_t>(stack_depth) * sizeof(Activity) + sizeof(Header); 971 return static_cast<size_t>(stack_depth) * sizeof(Activity) + sizeof(Header);
896 } 972 }
897 973
898 // The instantiation of the GlobalActivityTracker object. 974 // The instantiation of the GlobalActivityTracker object.
899 // The object held here will obviously not be destructed at process exit 975 // The object held here will obviously not be destructed at process exit
900 // but that's best since PersistentMemoryAllocator objects (that underlie 976 // but that's best since PersistentMemoryAllocator objects (that underlie
901 // GlobalActivityTracker objects) are explicitly forbidden from doing anything 977 // GlobalActivityTracker objects) are explicitly forbidden from doing anything
902 // essential at exit anyway due to the fact that they depend on data managed 978 // essential at exit anyway due to the fact that they depend on data managed
(...skipping 274 matching lines...) Expand 10 before | Expand all | Expand 10 after
1177 return tracker; 1253 return tracker;
1178 } 1254 }
1179 1255
1180 void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() { 1256 void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() {
1181 ThreadActivityTracker* tracker = 1257 ThreadActivityTracker* tracker =
1182 reinterpret_cast<ThreadActivityTracker*>(this_thread_tracker_.Get()); 1258 reinterpret_cast<ThreadActivityTracker*>(this_thread_tracker_.Get());
1183 if (tracker) 1259 if (tracker)
1184 delete tracker; 1260 delete tracker;
1185 } 1261 }
1186 1262
1263 void GlobalActivityTracker::SetBackgroundTaskRunner(
1264 const scoped_refptr<TaskRunner>& runner) {
1265 AutoLock lock(global_tracker_lock_);
1266 background_task_runner_ = runner;
1267 }
1268
1269 void GlobalActivityTracker::RecordProcessLaunch(ProcessId process_id) {}
1270
1271 void GlobalActivityTracker::RecordProcessExit(ProcessId process_id,
1272 int exit_code) {
1273 DCHECK_NE(GetCurrentProcId(), process_id);
1274
1275 int64_t now_stamp = Time::Now().ToInternalValue();
1276
1277 // The persistent allocator is thread-safe so run the iteration and
1278 // adjustments on a worker thread if one was provided.
1279 {
1280 AutoLock lock(global_tracker_lock_);
1281 if (background_task_runner_ &&
1282 !background_task_runner_->RunsTasksOnCurrentThread()) {
1283 background_task_runner_->PostTask(
1284 FROM_HERE, Bind(&GlobalActivityTracker::RecordProcessExitImpl,
1285 Unretained(this), process_id, exit_code, now_stamp));
1286 return;
1287 }
1288 }
1289
1290 RecordProcessExitImpl(process_id, exit_code, now_stamp);
1291 }
1292
1293 void GlobalActivityTracker::RecordProcessExitImpl(ProcessId process_id,
1294 int exit_code,
1295 int64_t exit_stamp) {
1296 // The process may not have exited cleanly so its necessary to go through
1297 // all the data structures it may have allocated in the persistent memory
1298 // segment and mark them as "released". This will allow them to be reused
1299 // later on. Memory is cleared here, rather than when it's needed, so as to
1300 // limit the impact at that critical time.
1301 PersistentMemoryAllocator::Iterator iter(allocator_.get());
1302 PersistentMemoryAllocator::Reference ref;
1303 uint32_t type;
1304 while ((ref = iter.GetNext(&type)) != 0) {
1305 const void* memory = allocator_->GetAsArray<char>(
1306 ref, type, PersistentMemoryAllocator::kSizeAny);
1307 ProcessId found_id;
1308 int64_t create_stamp;
1309
1310 switch (type) {
1311 case kTypeIdActivityTracker:
1312 if (ThreadActivityTracker::OwningProcessId(memory, &found_id,
1313 &create_stamp)) {
1314 break;
1315 }
1316 continue;
1317
1318 case kTypeIdUserDataRecord:
1319 case kTypeIdProcessDataRecord:
1320 if (ActivityUserData::OwningProcessId(memory, &found_id,
1321 &create_stamp)) {
1322 break;
1323 }
1324 continue;
1325
1326 default:
1327 continue;
1328 }
1329
1330 // Only change the type to be "free" if the process ID matches and the
1331 // creation time is before the exit time (so PID re-use doesn't cause
1332 // the erasure of something that is in-use).
1333 if (found_id == process_id && create_stamp < exit_stamp)
1334 allocator_->ChangeType(ref, ~type, type, /*clear=*/true);
1335 }
1336 }
1337
1187 void GlobalActivityTracker::RecordLogMessage(StringPiece message) { 1338 void GlobalActivityTracker::RecordLogMessage(StringPiece message) {
1188 // Allocate at least one extra byte so the string is NUL terminated. All 1339 // Allocate at least one extra byte so the string is NUL terminated. All
1189 // memory returned by the allocator is guaranteed to be zeroed. 1340 // memory returned by the allocator is guaranteed to be zeroed.
1190 PersistentMemoryAllocator::Reference ref = 1341 PersistentMemoryAllocator::Reference ref =
1191 allocator_->Allocate(message.size() + 1, kTypeIdGlobalLogMessage); 1342 allocator_->Allocate(message.size() + 1, kTypeIdGlobalLogMessage);
1192 char* memory = allocator_->GetAsArray<char>(ref, kTypeIdGlobalLogMessage, 1343 char* memory = allocator_->GetAsArray<char>(ref, kTypeIdGlobalLogMessage,
1193 message.size() + 1); 1344 message.size() + 1);
1194 if (memory) { 1345 if (memory) {
1195 memcpy(memory, message.data(), message.size()); 1346 memcpy(memory, message.data(), message.size());
1196 allocator_->MakeIterable(ref); 1347 allocator_->MakeIterable(ref);
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
1240 kTypeIdActivityTracker, 1391 kTypeIdActivityTracker,
1241 kTypeIdActivityTrackerFree, 1392 kTypeIdActivityTrackerFree,
1242 stack_memory_size_, 1393 stack_memory_size_,
1243 kCachedThreadMemories, 1394 kCachedThreadMemories,
1244 /*make_iterable=*/true), 1395 /*make_iterable=*/true),
1245 user_data_allocator_(allocator_.get(), 1396 user_data_allocator_(allocator_.get(),
1246 kTypeIdUserDataRecord, 1397 kTypeIdUserDataRecord,
1247 kTypeIdUserDataRecordFree, 1398 kTypeIdUserDataRecordFree,
1248 kUserDataSize, 1399 kUserDataSize,
1249 kCachedUserDataMemories, 1400 kCachedUserDataMemories,
1250 /*make_iterable=*/false), 1401 /*make_iterable=*/true),
1402 process_data_(allocator_->GetAsArray<char>(
1403 AllocateFrom(allocator_.get(),
1404 kTypeIdProcessDataRecordFree,
1405 kProcessDataSize,
1406 kTypeIdProcessDataRecord),
1407 kTypeIdProcessDataRecord,
1408 kProcessDataSize),
1409 kProcessDataSize),
1251 global_data_( 1410 global_data_(
1252 allocator_->GetAsArray<char>( 1411 allocator_->GetAsArray<char>(
1253 allocator_->Allocate(kGlobalDataSize, kTypeIdGlobalDataRecord), 1412 allocator_->Allocate(kGlobalDataSize, kTypeIdGlobalDataRecord),
1254 kTypeIdGlobalDataRecord, 1413 kTypeIdGlobalDataRecord,
1255 PersistentMemoryAllocator::kSizeAny), 1414 kGlobalDataSize),
1256 kGlobalDataSize) { 1415 kGlobalDataSize) {
1257 // Ensure the passed memory is valid and empty (iterator finds nothing). 1416 // Ensure the passed memory is valid and empty (iterator finds nothing).
1258 uint32_t type; 1417 uint32_t type;
1259 DCHECK(!PersistentMemoryAllocator::Iterator(allocator_.get()).GetNext(&type)); 1418 DCHECK(!PersistentMemoryAllocator::Iterator(allocator_.get()).GetNext(&type));
1260 1419
1261 // Ensure that there is no other global object and then make this one such. 1420 // Ensure that there is no other global object and then make this one such.
1262 DCHECK(!g_tracker_); 1421 DCHECK(!g_tracker_);
1263 subtle::NoBarrier_Store(&g_tracker_, reinterpret_cast<uintptr_t>(this)); 1422 subtle::NoBarrier_Store(&g_tracker_, reinterpret_cast<uintptr_t>(this));
1264 1423
1265 // The global records must be iterable in order to be found by an analyzer. 1424 // The data records must be iterable in order to be found by an analyzer.
1425 allocator_->MakeIterable(allocator_->GetAsReference(
1426 process_data_.GetBaseAddress(), kTypeIdProcessDataRecord));
1266 allocator_->MakeIterable(allocator_->GetAsReference( 1427 allocator_->MakeIterable(allocator_->GetAsReference(
1267 global_data_.GetBaseAddress(), kTypeIdGlobalDataRecord)); 1428 global_data_.GetBaseAddress(), kTypeIdGlobalDataRecord));
1268 1429
1269 // Fetch and record all activated field trials. 1430 // Fetch and record all activated field trials.
1270 FieldTrial::ActiveGroups active_groups; 1431 FieldTrial::ActiveGroups active_groups;
1271 FieldTrialList::GetActiveFieldTrialGroups(&active_groups); 1432 FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
1272 for (auto& group : active_groups) 1433 for (auto& group : active_groups)
1273 RecordFieldTrial(group.trial_name, group.group_name); 1434 RecordFieldTrial(group.trial_name, group.group_name);
1274 } 1435 }
1275 1436
(...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after
1378 : GlobalActivityTracker::ScopedThreadActivity( 1539 : GlobalActivityTracker::ScopedThreadActivity(
1379 program_counter, 1540 program_counter,
1380 nullptr, 1541 nullptr,
1381 Activity::ACT_PROCESS_WAIT, 1542 Activity::ACT_PROCESS_WAIT,
1382 ActivityData::ForProcess(process->Pid()), 1543 ActivityData::ForProcess(process->Pid()),
1383 /*lock_allowed=*/true) {} 1544 /*lock_allowed=*/true) {}
1384 #endif 1545 #endif
1385 1546
1386 } // namespace debug 1547 } // namespace debug
1387 } // namespace base 1548 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698