Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2016 The Chromium Authors. All rights reserved. | 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/debug/activity_tracker.h" | 5 #include "base/debug/activity_tracker.h" |
| 6 | 6 |
| 7 #include "base/debug/stack_trace.h" | 7 #include "base/debug/stack_trace.h" |
| 8 #include "base/files/file.h" | 8 #include "base/files/file.h" |
| 9 #include "base/files/file_path.h" | 9 #include "base/files/file_path.h" |
| 10 #include "base/files/memory_mapped_file.h" | 10 #include "base/files/memory_mapped_file.h" |
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 59 ThreadRef thread_ref; | 59 ThreadRef thread_ref; |
| 60 thread_ref.as_id = 0; // Zero the union in case other is smaller. | 60 thread_ref.as_id = 0; // Zero the union in case other is smaller. |
| 61 #if defined(OS_WIN) | 61 #if defined(OS_WIN) |
| 62 thread_ref.as_tid = ::GetThreadId(handle.platform_handle()); | 62 thread_ref.as_tid = ::GetThreadId(handle.platform_handle()); |
| 63 #elif defined(OS_POSIX) | 63 #elif defined(OS_POSIX) |
| 64 thread_ref.as_handle = handle.platform_handle(); | 64 thread_ref.as_handle = handle.platform_handle(); |
| 65 #endif | 65 #endif |
| 66 return ForThread(thread_ref.as_id); | 66 return ForThread(thread_ref.as_id); |
| 67 } | 67 } |
| 68 | 68 |
| 69 ActivityTrackerMemoryAllocator::ActivityTrackerMemoryAllocator( | |
| 70 PersistentMemoryAllocator* allocator, | |
| 71 uint32_t object_type, | |
| 72 size_t object_size, | |
| 73 size_t cache_size) | |
| 74 : allocator_(allocator), | |
| 75 object_type_(object_type), | |
| 76 object_size_(object_size), | |
| 77 cache_size_(cache_size), | |
| 78 iterator_(allocator), | |
| 79 cache_values_(new Reference[cache_size]), | |
| 80 cache_used_(0) { | |
| 81 DCHECK(allocator); | |
| 82 } | |
| 83 | |
| 84 ActivityTrackerMemoryAllocator::~ActivityTrackerMemoryAllocator() {} | |
| 85 | |
| 86 ActivityTrackerMemoryAllocator::Reference | |
| 87 ActivityTrackerMemoryAllocator::GetObjectReference() { | |
| 88 // First see if there is a cached value that can be returned. This is much | |
| 89 // faster than searching the memory system for free blocks. | |
| 90 while (cache_used_ > 0) { | |
| 91 Reference cached = cache_values_[--cache_used_]; | |
| 92 // Change the type of the cached object to the proper type and return it. | |
| 93 // If the type-change fails that means another thread has taken this from | |
| 94 // under us (via the search below) so ignore it and keep trying. | |
| 95 if (allocator_->ChangeType(cached, object_type_, ~object_type_)) | |
| 96 return cached; | |
| 97 } | |
| 98 | |
| 99 // Fetch the next "free" (~type) object from persistent memory. Rather | |
| 100 // than restart the iterator at the head each time and likely waste time | |
| 101 // going again through objects that aren't relevant, the iterator continues | |
| 102 // from where it last left off and is only reset when the end is reached. | |
| 103 // If the returned reference matches |last|, then it has wrapped without | |
| 104 // finding anything. | |
| 105 const Reference last = iterator_.GetLast(); | |
| 106 while (true) { | |
| 107 uint32_t type; | |
| 108 Reference found = iterator_.GetNext(&type); | |
| 109 if (found && type == ~object_type_) { | |
|
Alexei Svitkine (slow)
2016/10/13 15:25:21
I don't like you inlining this ~ logic in multiple
bcwhite
2016/10/13 17:03:22
Done.
| |
| 110 // Found a free object. Change it to the proper type and return it. If | |
| 111 // the type-change fails that means another thread has taken this from | |
| 112 // under us so ignore it and keep trying. | |
| 113 if (allocator_->ChangeType(found, object_type_, ~object_type_)) | |
| 114 return found; | |
| 115 } | |
| 116 if (found == last) { | |
| 117 // Wrapped. No desired object was found. | |
| 118 break; | |
| 119 } | |
| 120 if (!found) { | |
| 121 // Reached end; start over at the beginning. | |
| 122 iterator_.Reset(); | |
| 123 } | |
| 124 } | |
| 125 | |
| 126 // No free block was found so instead allocate a new one. | |
| 127 Reference allocated = allocator_->Allocate(object_size_, object_type_); | |
| 128 if (allocated) | |
| 129 allocator_->MakeIterable(allocated); | |
| 130 return allocated; | |
| 131 } | |
| 132 | |
| 133 void ActivityTrackerMemoryAllocator::ReleaseObjectReference(Reference ref) { | |
| 134 // Zero the memory so that it is ready for immediate use if needed later. | |
| 135 char* mem_base = allocator_->GetAsObject<char>(ref, object_type_); | |
| 136 DCHECK(mem_base); | |
| 137 memset(mem_base, 0, object_size_); | |
| 138 | |
| 139 // Mark object as free (use ~type as free-type). | |
| 140 bool success = allocator_->ChangeType(ref, ~object_type_, object_type_); | |
| 141 DCHECK(success); | |
| 142 | |
| 143 // Add this reference to our "free" cache if there is space. | |
|
Alexei Svitkine (slow)
2016/10/13 15:25:21
Nit: Can you expand comment to mention what the im
bcwhite
2016/10/13 17:03:22
Done.
| |
| 144 if (cache_used_ < cache_size_) | |
| 145 cache_values_[cache_used_++] = ref; | |
| 146 } | |
| 147 | |
| 69 // static | 148 // static |
| 70 void Activity::FillFrom(Activity* activity, | 149 void Activity::FillFrom(Activity* activity, |
| 71 const void* origin, | 150 const void* origin, |
| 72 Type type, | 151 Type type, |
| 73 const ActivityData& data) { | 152 const ActivityData& data) { |
| 74 activity->time_internal = base::TimeTicks::Now().ToInternalValue(); | 153 activity->time_internal = base::TimeTicks::Now().ToInternalValue(); |
| 75 activity->origin_address = reinterpret_cast<uintptr_t>(origin); | 154 activity->origin_address = reinterpret_cast<uintptr_t>(origin); |
| 76 activity->activity_type = type; | 155 activity->activity_type = type; |
| 77 activity->data = data; | 156 activity->data = data; |
| 78 | 157 |
| (...skipping 413 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 492 uint64_t id, | 571 uint64_t id, |
| 493 StringPiece name, | 572 StringPiece name, |
| 494 int stack_depth) { | 573 int stack_depth) { |
| 495 CreateWithAllocator( | 574 CreateWithAllocator( |
| 496 MakeUnique<LocalPersistentMemoryAllocator>(size, id, name), stack_depth); | 575 MakeUnique<LocalPersistentMemoryAllocator>(size, id, name), stack_depth); |
| 497 } | 576 } |
| 498 | 577 |
| 499 ThreadActivityTracker* GlobalActivityTracker::CreateTrackerForCurrentThread() { | 578 ThreadActivityTracker* GlobalActivityTracker::CreateTrackerForCurrentThread() { |
| 500 DCHECK(!this_thread_tracker_.Get()); | 579 DCHECK(!this_thread_tracker_.Get()); |
| 501 | 580 |
| 502 PersistentMemoryAllocator::Reference mem_reference = | 581 PersistentMemoryAllocator::Reference mem_reference; |
| 503 PersistentMemoryAllocator::kReferenceNull; | |
| 504 DCHECK(!mem_reference); // invalid_value should be checkable with ! | |
| 505 | 582 |
| 506 while (true) { | 583 { |
| 507 // Get the first available memory from the top of the FIFO. | 584 base::AutoLock autolock(thread_tracker_allocator_lock_); |
| 508 if (!available_memories_.pop(&mem_reference)) | 585 mem_reference = thread_tracker_allocator_.GetObjectReference(); |
| 509 break; | |
| 510 | |
| 511 // Turn the reference back into one of the activity-tracker type. This can | |
| 512 // fail if something else has already taken the block and changed its type. | |
| 513 if (allocator_->ChangeType(mem_reference, kTypeIdActivityTracker, | |
| 514 kTypeIdActivityTrackerFree)) { | |
| 515 break; | |
| 516 } | |
| 517 } | 586 } |
| 518 | 587 |
| 519 // Handle the case where no known available memories were found. | |
| 520 if (!mem_reference) { | 588 if (!mem_reference) { |
| 521 // Allocate a block of memory from the persistent segment. | 589 // Failure. This shouldn't happen. But be graceful if it does, probably |
| 522 mem_reference = | 590 // because the underlying allocator wasn't given enough memory to satisfy |
| 523 allocator_->Allocate(stack_memory_size_, kTypeIdActivityTracker); | 591 // to all possible requests. |
| 524 if (mem_reference) { | 592 NOTREACHED(); |
| 525 // Success. Make the allocation iterable so it can be found later. | 593 // Report the thread-count at which the allocator was full so that the |
| 526 allocator_->MakeIterable(mem_reference); | 594 // failure can be seen and underlying memory resized appropriately. |
| 527 } else { | 595 UMA_HISTOGRAM_COUNTS_1000( |
| 528 // Failure. Look for any free blocks that weren't held in the cache | 596 "ActivityTracker.ThreadTrackers.MemLimitTrackerCount", |
| 529 // of available memories and try to claim it. This can happen if the | 597 thread_tracker_count_.load(std::memory_order_relaxed)); |
| 530 // |available_memories_| stack isn't sufficiently large to hold all | 598 // Return null, just as if tracking wasn't enabled. |
| 531 // released memories or if multiple independent processes are sharing | 599 return nullptr; |
| 532 // the memory segment. | |
| 533 PersistentMemoryAllocator::Iterator iter(allocator_.get()); | |
| 534 while ((mem_reference = iter.GetNextOfType(kTypeIdActivityTrackerFree)) != | |
| 535 0) { | |
| 536 if (allocator_->ChangeType(mem_reference, kTypeIdActivityTracker, | |
| 537 kTypeIdActivityTrackerFree)) { | |
| 538 break; | |
| 539 } | |
| 540 mem_reference = 0; | |
| 541 } | |
| 542 if (!mem_reference) { | |
| 543 // Dobule Failure. This shouldn't happen. But be graceful if it does, | |
| 544 // probably because the underlying allocator wasn't given enough memory | |
| 545 // to satisfy all possible requests. | |
| 546 NOTREACHED(); | |
| 547 // Report the thread-count at which the allocator was full so that the | |
| 548 // failure can be seen and underlying memory resized appropriately. | |
| 549 UMA_HISTOGRAM_COUNTS_1000( | |
| 550 "ActivityTracker.ThreadTrackers.MemLimitTrackerCount", | |
| 551 thread_tracker_count_.load(std::memory_order_relaxed)); | |
| 552 // Return null, just as if tracking wasn't enabled. | |
| 553 return nullptr; | |
| 554 } | |
| 555 } | |
| 556 } | 600 } |
| 557 | 601 |
| 558 // Convert the memory block found above into an actual memory address. | 602 // Convert the memory block found above into an actual memory address. |
| 559 DCHECK(mem_reference); | 603 DCHECK(mem_reference); |
| 560 void* mem_base = | 604 void* mem_base = |
| 561 allocator_->GetAsObject<char>(mem_reference, kTypeIdActivityTracker); | 605 allocator_->GetAsObject<char>(mem_reference, kTypeIdActivityTracker); |
| 562 DCHECK(mem_base); | 606 DCHECK(mem_base); |
| 563 DCHECK_LE(stack_memory_size_, allocator_->GetAllocSize(mem_reference)); | 607 DCHECK_LE(stack_memory_size_, allocator_->GetAllocSize(mem_reference)); |
| 564 | 608 |
| 565 // Create a tracker with the acquired memory and set it as the tracker | 609 // Create a tracker with the acquired memory and set it as the tracker |
| (...skipping 16 matching lines...) Expand all Loading... | |
| 582 delete tracker; | 626 delete tracker; |
| 583 } | 627 } |
| 584 | 628 |
| 585 GlobalActivityTracker::GlobalActivityTracker( | 629 GlobalActivityTracker::GlobalActivityTracker( |
| 586 std::unique_ptr<PersistentMemoryAllocator> allocator, | 630 std::unique_ptr<PersistentMemoryAllocator> allocator, |
| 587 int stack_depth) | 631 int stack_depth) |
| 588 : allocator_(std::move(allocator)), | 632 : allocator_(std::move(allocator)), |
| 589 stack_memory_size_(ThreadActivityTracker::SizeForStackDepth(stack_depth)), | 633 stack_memory_size_(ThreadActivityTracker::SizeForStackDepth(stack_depth)), |
| 590 this_thread_tracker_(&OnTLSDestroy), | 634 this_thread_tracker_(&OnTLSDestroy), |
| 591 thread_tracker_count_(0), | 635 thread_tracker_count_(0), |
| 592 available_memories_(kMaxThreadCount) { | 636 thread_tracker_allocator_(allocator_.get(), |
| 637 kTypeIdActivityTracker, | |
| 638 stack_memory_size_, | |
| 639 kCachedThreadMemories) { | |
| 593 // Ensure the passed memory is valid and empty (iterator finds nothing). | 640 // Ensure the passed memory is valid and empty (iterator finds nothing). |
| 594 uint32_t type; | 641 uint32_t type; |
| 595 DCHECK(!PersistentMemoryAllocator::Iterator(allocator_.get()).GetNext(&type)); | 642 DCHECK(!PersistentMemoryAllocator::Iterator(allocator_.get()).GetNext(&type)); |
| 596 | 643 |
| 597 // Ensure that there is no other global object and then make this one such. | 644 // Ensure that there is no other global object and then make this one such. |
| 598 DCHECK(!g_tracker_); | 645 DCHECK(!g_tracker_); |
| 599 g_tracker_ = this; | 646 g_tracker_ = this; |
| 600 } | 647 } |
| 601 | 648 |
| 602 GlobalActivityTracker::~GlobalActivityTracker() { | 649 GlobalActivityTracker::~GlobalActivityTracker() { |
| 603 DCHECK_EQ(g_tracker_, this); | 650 DCHECK_EQ(g_tracker_, this); |
| 604 DCHECK_EQ(0, thread_tracker_count_.load(std::memory_order_relaxed)); | 651 DCHECK_EQ(0, thread_tracker_count_.load(std::memory_order_relaxed)); |
| 605 g_tracker_ = nullptr; | 652 g_tracker_ = nullptr; |
| 606 } | 653 } |
| 607 | 654 |
| 608 void GlobalActivityTracker::ReturnTrackerMemory( | 655 void GlobalActivityTracker::ReturnTrackerMemory( |
| 609 ManagedActivityTracker* tracker) { | 656 ManagedActivityTracker* tracker) { |
| 610 PersistentMemoryAllocator::Reference mem_reference = tracker->mem_reference_; | 657 PersistentMemoryAllocator::Reference mem_reference = tracker->mem_reference_; |
| 611 void* mem_base = tracker->mem_base_; | 658 void* mem_base = tracker->mem_base_; |
| 612 DCHECK(mem_reference); | 659 DCHECK(mem_reference); |
| 613 DCHECK(mem_base); | 660 DCHECK(mem_base); |
| 614 | 661 |
| 615 // Zero the memory so that it is ready for use if needed again later. It's | |
| 616 // better to clear the memory now, when a thread is exiting, than to do it | |
| 617 // when it is first needed by a thread doing actual work. | |
| 618 memset(mem_base, 0, stack_memory_size_); | |
| 619 | |
| 620 // Remove the destructed tracker from the set of known ones. | 662 // Remove the destructed tracker from the set of known ones. |
| 621 DCHECK_LE(1, thread_tracker_count_.load(std::memory_order_relaxed)); | 663 DCHECK_LE(1, thread_tracker_count_.load(std::memory_order_relaxed)); |
| 622 thread_tracker_count_.fetch_sub(1, std::memory_order_relaxed); | 664 thread_tracker_count_.fetch_sub(1, std::memory_order_relaxed); |
| 623 | 665 |
| 624 // The memory was within the persistent memory allocator. Change its type | 666 // Release this memory for re-use at a later time. |
| 625 // so it is effectively marked as "free". | 667 base::AutoLock autolock(thread_tracker_allocator_lock_); |
| 626 allocator_->ChangeType(mem_reference, kTypeIdActivityTrackerFree, | 668 thread_tracker_allocator_.ReleaseObjectReference(mem_reference); |
| 627 kTypeIdActivityTracker); | |
| 628 | |
| 629 // Push this on the internal cache of available memory blocks so it can | |
| 630 // be found and reused quickly. If the push somehow exceeds the maximum | |
| 631 // size of the cache, it will fail but a fallback check in CreateTracker | |
| 632 // will find it by (slow) iteration. | |
| 633 available_memories_.push(mem_reference); | |
| 634 } | 669 } |
| 635 | 670 |
| 636 // static | 671 // static |
| 637 void GlobalActivityTracker::OnTLSDestroy(void* value) { | 672 void GlobalActivityTracker::OnTLSDestroy(void* value) { |
| 638 delete reinterpret_cast<ManagedActivityTracker*>(value); | 673 delete reinterpret_cast<ManagedActivityTracker*>(value); |
| 639 } | 674 } |
| 640 | 675 |
| 641 ScopedActivity::ScopedActivity(const tracked_objects::Location& location, | 676 ScopedActivity::ScopedActivity(const tracked_objects::Location& location, |
| 642 uint8_t action, | 677 uint8_t action, |
| 643 uint32_t id, | 678 uint32_t id, |
| (...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 704 const base::Process* process) | 739 const base::Process* process) |
| 705 : GlobalActivityTracker::ScopedThreadActivity( | 740 : GlobalActivityTracker::ScopedThreadActivity( |
| 706 nullptr, | 741 nullptr, |
| 707 Activity::ACT_PROCESS_WAIT, | 742 Activity::ACT_PROCESS_WAIT, |
| 708 ActivityData::ForProcess(process->Pid()), | 743 ActivityData::ForProcess(process->Pid()), |
| 709 /*lock_allowed=*/true) {} | 744 /*lock_allowed=*/true) {} |
| 710 #endif | 745 #endif |
| 711 | 746 |
| 712 } // namespace debug | 747 } // namespace debug |
| 713 } // namespace base | 748 } // namespace base |
| OLD | NEW |