Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(46)

Side by Side Diff: base/debug/activity_tracker.cc

Issue 2387733002: Move memory management code into separate class for future reuse. (Closed)
Patch Set: use method for determining 'free' type Created 4 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2016 The Chromium Authors. All rights reserved. 1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/debug/activity_tracker.h" 5 #include "base/debug/activity_tracker.h"
6 6
7 #include "base/debug/stack_trace.h" 7 #include "base/debug/stack_trace.h"
8 #include "base/files/file.h" 8 #include "base/files/file.h"
9 #include "base/files/file_path.h" 9 #include "base/files/file_path.h"
10 #include "base/files/memory_mapped_file.h" 10 #include "base/files/memory_mapped_file.h"
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
59 ThreadRef thread_ref; 59 ThreadRef thread_ref;
60 thread_ref.as_id = 0; // Zero the union in case other is smaller. 60 thread_ref.as_id = 0; // Zero the union in case other is smaller.
61 #if defined(OS_WIN) 61 #if defined(OS_WIN)
62 thread_ref.as_tid = ::GetThreadId(handle.platform_handle()); 62 thread_ref.as_tid = ::GetThreadId(handle.platform_handle());
63 #elif defined(OS_POSIX) 63 #elif defined(OS_POSIX)
64 thread_ref.as_handle = handle.platform_handle(); 64 thread_ref.as_handle = handle.platform_handle();
65 #endif 65 #endif
66 return ForThread(thread_ref.as_id); 66 return ForThread(thread_ref.as_id);
67 } 67 }
68 68
69 ActivityTrackerMemoryAllocator::ActivityTrackerMemoryAllocator(
70 PersistentMemoryAllocator* allocator,
71 uint32_t object_type,
72 size_t object_size,
73 size_t cache_size)
74 : allocator_(allocator),
75 object_type_(object_type),
76 object_size_(object_size),
77 cache_size_(cache_size),
78 iterator_(allocator),
79 cache_values_(new Reference[cache_size]),
80 cache_used_(0) {
81 DCHECK(allocator);
82 }
83
84 ActivityTrackerMemoryAllocator::~ActivityTrackerMemoryAllocator() {}
85
86 ActivityTrackerMemoryAllocator::Reference
87 ActivityTrackerMemoryAllocator::GetObjectReference() {
88 // First see if there is a cached value that can be returned. This is much
89 // faster than searching the memory system for free blocks.
90 while (cache_used_ > 0) {
91 Reference cached = cache_values_[--cache_used_];
92 // Change the type of the cached object to the proper type and return it.
93 // If the type-change fails that means another thread has taken this from
94 // under us (via the search below) so ignore it and keep trying.
95 if (allocator_->ChangeType(cached, object_type_, FreeTypeOf(object_type_)))
96 return cached;
97 }
98
99 // Fetch the next "free" object from persistent memory. Rather than restart
100 // the iterator at the head each time and likely waste time going again
101 // through objects that aren't relevant, the iterator continues from where it
102 // it last left off and is only reset when the end is reached. If the returned
103 // reference matches |last|, then it has wrapped without finding anything.
104 const Reference last = iterator_.GetLast();
105 while (true) {
106 uint32_t type;
107 Reference found = iterator_.GetNext(&type);
108 if (found && type == FreeTypeOf(object_type_)) {
109 // Found a free object. Change it to the proper type and return it. If
110 // the type-change fails that means another thread has taken this from
111 // under us so ignore it and keep trying.
112 if (allocator_->ChangeType(found, object_type_, FreeTypeOf(object_type_)))
113 return found;
114 }
115 if (found == last) {
116 // Wrapped. No desired object was found.
117 break;
118 }
119 if (!found) {
120 // Reached end; start over at the beginning.
121 iterator_.Reset();
122 }
123 }
124
125 // No free block was found so instead allocate a new one.
126 Reference allocated = allocator_->Allocate(object_size_, object_type_);
127 if (allocated)
128 allocator_->MakeIterable(allocated);
129 return allocated;
130 }
131
132 void ActivityTrackerMemoryAllocator::ReleaseObjectReference(Reference ref) {
133 // Zero the memory so that it is ready for immediate use if needed later.
134 char* mem_base = allocator_->GetAsObject<char>(ref, object_type_);
135 DCHECK(mem_base);
136 memset(mem_base, 0, object_size_);
137
138 // Mark object as free.
139 bool success =
140 allocator_->ChangeType(ref, FreeTypeOf(object_type_), object_type_);
141 DCHECK(success);
142
143 // Add this reference to our "free" cache if there is space. If not, the type
144 // has still been changed to indicate that it is free so this (or another)
145 // thread can find it, albeit more slowly, using the iteration method above.
146 if (cache_used_ < cache_size_)
147 cache_values_[cache_used_++] = ref;
148 }
149
69 // static 150 // static
70 void Activity::FillFrom(Activity* activity, 151 void Activity::FillFrom(Activity* activity,
71 const void* origin, 152 const void* origin,
72 Type type, 153 Type type,
73 const ActivityData& data) { 154 const ActivityData& data) {
74 activity->time_internal = base::TimeTicks::Now().ToInternalValue(); 155 activity->time_internal = base::TimeTicks::Now().ToInternalValue();
75 activity->origin_address = reinterpret_cast<uintptr_t>(origin); 156 activity->origin_address = reinterpret_cast<uintptr_t>(origin);
76 activity->activity_type = type; 157 activity->activity_type = type;
77 activity->data = data; 158 activity->data = data;
78 159
(...skipping 413 matching lines...) Expand 10 before | Expand all | Expand 10 after
492 uint64_t id, 573 uint64_t id,
493 StringPiece name, 574 StringPiece name,
494 int stack_depth) { 575 int stack_depth) {
495 CreateWithAllocator( 576 CreateWithAllocator(
496 MakeUnique<LocalPersistentMemoryAllocator>(size, id, name), stack_depth); 577 MakeUnique<LocalPersistentMemoryAllocator>(size, id, name), stack_depth);
497 } 578 }
498 579
499 ThreadActivityTracker* GlobalActivityTracker::CreateTrackerForCurrentThread() { 580 ThreadActivityTracker* GlobalActivityTracker::CreateTrackerForCurrentThread() {
500 DCHECK(!this_thread_tracker_.Get()); 581 DCHECK(!this_thread_tracker_.Get());
501 582
502 PersistentMemoryAllocator::Reference mem_reference = 583 PersistentMemoryAllocator::Reference mem_reference;
503 PersistentMemoryAllocator::kReferenceNull;
504 DCHECK(!mem_reference); // invalid_value should be checkable with !
505 584
506 while (true) { 585 {
507 // Get the first available memory from the top of the FIFO. 586 base::AutoLock autolock(thread_tracker_allocator_lock_);
508 if (!available_memories_.pop(&mem_reference)) 587 mem_reference = thread_tracker_allocator_.GetObjectReference();
509 break;
510
511 // Turn the reference back into one of the activity-tracker type. This can
512 // fail if something else has already taken the block and changed its type.
513 if (allocator_->ChangeType(mem_reference, kTypeIdActivityTracker,
514 kTypeIdActivityTrackerFree)) {
515 break;
516 }
517 } 588 }
518 589
519 // Handle the case where no known available memories were found.
520 if (!mem_reference) { 590 if (!mem_reference) {
521 // Allocate a block of memory from the persistent segment. 591 // Failure. This shouldn't happen. But be graceful if it does, probably
522 mem_reference = 592 // because the underlying allocator wasn't given enough memory to satisfy
523 allocator_->Allocate(stack_memory_size_, kTypeIdActivityTracker); 593 // to all possible requests.
524 if (mem_reference) { 594 NOTREACHED();
525 // Success. Make the allocation iterable so it can be found later. 595 // Report the thread-count at which the allocator was full so that the
526 allocator_->MakeIterable(mem_reference); 596 // failure can be seen and underlying memory resized appropriately.
527 } else { 597 UMA_HISTOGRAM_COUNTS_1000(
528 // Failure. Look for any free blocks that weren't held in the cache 598 "ActivityTracker.ThreadTrackers.MemLimitTrackerCount",
529 // of available memories and try to claim it. This can happen if the 599 thread_tracker_count_.load(std::memory_order_relaxed));
530 // |available_memories_| stack isn't sufficiently large to hold all 600 // Return null, just as if tracking wasn't enabled.
531 // released memories or if multiple independent processes are sharing 601 return nullptr;
532 // the memory segment.
533 PersistentMemoryAllocator::Iterator iter(allocator_.get());
534 while ((mem_reference = iter.GetNextOfType(kTypeIdActivityTrackerFree)) !=
535 0) {
536 if (allocator_->ChangeType(mem_reference, kTypeIdActivityTracker,
537 kTypeIdActivityTrackerFree)) {
538 break;
539 }
540 mem_reference = 0;
541 }
542 if (!mem_reference) {
543 // Dobule Failure. This shouldn't happen. But be graceful if it does,
544 // probably because the underlying allocator wasn't given enough memory
545 // to satisfy all possible requests.
546 NOTREACHED();
547 // Report the thread-count at which the allocator was full so that the
548 // failure can be seen and underlying memory resized appropriately.
549 UMA_HISTOGRAM_COUNTS_1000(
550 "ActivityTracker.ThreadTrackers.MemLimitTrackerCount",
551 thread_tracker_count_.load(std::memory_order_relaxed));
552 // Return null, just as if tracking wasn't enabled.
553 return nullptr;
554 }
555 }
556 } 602 }
557 603
558 // Convert the memory block found above into an actual memory address. 604 // Convert the memory block found above into an actual memory address.
559 DCHECK(mem_reference); 605 DCHECK(mem_reference);
560 void* mem_base = 606 void* mem_base =
561 allocator_->GetAsObject<char>(mem_reference, kTypeIdActivityTracker); 607 allocator_->GetAsObject<char>(mem_reference, kTypeIdActivityTracker);
562 DCHECK(mem_base); 608 DCHECK(mem_base);
563 DCHECK_LE(stack_memory_size_, allocator_->GetAllocSize(mem_reference)); 609 DCHECK_LE(stack_memory_size_, allocator_->GetAllocSize(mem_reference));
564 610
565 // Create a tracker with the acquired memory and set it as the tracker 611 // Create a tracker with the acquired memory and set it as the tracker
(...skipping 16 matching lines...) Expand all
582 delete tracker; 628 delete tracker;
583 } 629 }
584 630
585 GlobalActivityTracker::GlobalActivityTracker( 631 GlobalActivityTracker::GlobalActivityTracker(
586 std::unique_ptr<PersistentMemoryAllocator> allocator, 632 std::unique_ptr<PersistentMemoryAllocator> allocator,
587 int stack_depth) 633 int stack_depth)
588 : allocator_(std::move(allocator)), 634 : allocator_(std::move(allocator)),
589 stack_memory_size_(ThreadActivityTracker::SizeForStackDepth(stack_depth)), 635 stack_memory_size_(ThreadActivityTracker::SizeForStackDepth(stack_depth)),
590 this_thread_tracker_(&OnTLSDestroy), 636 this_thread_tracker_(&OnTLSDestroy),
591 thread_tracker_count_(0), 637 thread_tracker_count_(0),
592 available_memories_(kMaxThreadCount) { 638 thread_tracker_allocator_(allocator_.get(),
639 kTypeIdActivityTracker,
640 stack_memory_size_,
641 kCachedThreadMemories) {
593 // Ensure the passed memory is valid and empty (iterator finds nothing). 642 // Ensure the passed memory is valid and empty (iterator finds nothing).
594 uint32_t type; 643 uint32_t type;
595 DCHECK(!PersistentMemoryAllocator::Iterator(allocator_.get()).GetNext(&type)); 644 DCHECK(!PersistentMemoryAllocator::Iterator(allocator_.get()).GetNext(&type));
596 645
597 // Ensure that there is no other global object and then make this one such. 646 // Ensure that there is no other global object and then make this one such.
598 DCHECK(!g_tracker_); 647 DCHECK(!g_tracker_);
599 g_tracker_ = this; 648 g_tracker_ = this;
600 } 649 }
601 650
602 GlobalActivityTracker::~GlobalActivityTracker() { 651 GlobalActivityTracker::~GlobalActivityTracker() {
603 DCHECK_EQ(g_tracker_, this); 652 DCHECK_EQ(g_tracker_, this);
604 DCHECK_EQ(0, thread_tracker_count_.load(std::memory_order_relaxed)); 653 DCHECK_EQ(0, thread_tracker_count_.load(std::memory_order_relaxed));
605 g_tracker_ = nullptr; 654 g_tracker_ = nullptr;
606 } 655 }
607 656
608 void GlobalActivityTracker::ReturnTrackerMemory( 657 void GlobalActivityTracker::ReturnTrackerMemory(
609 ManagedActivityTracker* tracker) { 658 ManagedActivityTracker* tracker) {
610 PersistentMemoryAllocator::Reference mem_reference = tracker->mem_reference_; 659 PersistentMemoryAllocator::Reference mem_reference = tracker->mem_reference_;
611 void* mem_base = tracker->mem_base_; 660 void* mem_base = tracker->mem_base_;
612 DCHECK(mem_reference); 661 DCHECK(mem_reference);
613 DCHECK(mem_base); 662 DCHECK(mem_base);
614 663
615 // Zero the memory so that it is ready for use if needed again later. It's
616 // better to clear the memory now, when a thread is exiting, than to do it
617 // when it is first needed by a thread doing actual work.
618 memset(mem_base, 0, stack_memory_size_);
619
620 // Remove the destructed tracker from the set of known ones. 664 // Remove the destructed tracker from the set of known ones.
621 DCHECK_LE(1, thread_tracker_count_.load(std::memory_order_relaxed)); 665 DCHECK_LE(1, thread_tracker_count_.load(std::memory_order_relaxed));
622 thread_tracker_count_.fetch_sub(1, std::memory_order_relaxed); 666 thread_tracker_count_.fetch_sub(1, std::memory_order_relaxed);
623 667
624 // The memory was within the persistent memory allocator. Change its type 668 // Release this memory for re-use at a later time.
625 // so it is effectively marked as "free". 669 base::AutoLock autolock(thread_tracker_allocator_lock_);
626 allocator_->ChangeType(mem_reference, kTypeIdActivityTrackerFree, 670 thread_tracker_allocator_.ReleaseObjectReference(mem_reference);
627 kTypeIdActivityTracker);
628
629 // Push this on the internal cache of available memory blocks so it can
630 // be found and reused quickly. If the push somehow exceeds the maximum
631 // size of the cache, it will fail but a fallback check in CreateTracker
632 // will find it by (slow) iteration.
633 available_memories_.push(mem_reference);
634 } 671 }
635 672
636 // static 673 // static
637 void GlobalActivityTracker::OnTLSDestroy(void* value) { 674 void GlobalActivityTracker::OnTLSDestroy(void* value) {
638 delete reinterpret_cast<ManagedActivityTracker*>(value); 675 delete reinterpret_cast<ManagedActivityTracker*>(value);
639 } 676 }
640 677
641 ScopedActivity::ScopedActivity(const tracked_objects::Location& location, 678 ScopedActivity::ScopedActivity(const tracked_objects::Location& location,
642 uint8_t action, 679 uint8_t action,
643 uint32_t id, 680 uint32_t id,
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
704 const base::Process* process) 741 const base::Process* process)
705 : GlobalActivityTracker::ScopedThreadActivity( 742 : GlobalActivityTracker::ScopedThreadActivity(
706 nullptr, 743 nullptr,
707 Activity::ACT_PROCESS_WAIT, 744 Activity::ACT_PROCESS_WAIT,
708 ActivityData::ForProcess(process->Pid()), 745 ActivityData::ForProcess(process->Pid()),
709 /*lock_allowed=*/true) {} 746 /*lock_allowed=*/true) {}
710 #endif 747 #endif
711 748
712 } // namespace debug 749 } // namespace debug
713 } // namespace base 750 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698