Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(77)

Side by Side Diff: base/debug/activity_tracker.cc

Issue 2387733002: Move memory management code into separate class for future reuse. (Closed)
Patch Set: object_free -> object_free_type Created 4 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « base/debug/activity_tracker.h ('k') | base/debug/activity_tracker_unittest.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2016 The Chromium Authors. All rights reserved. 1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/debug/activity_tracker.h" 5 #include "base/debug/activity_tracker.h"
6 6
7 #include "base/debug/stack_trace.h" 7 #include "base/debug/stack_trace.h"
8 #include "base/files/file.h" 8 #include "base/files/file.h"
9 #include "base/files/file_path.h" 9 #include "base/files/file_path.h"
10 #include "base/files/memory_mapped_file.h" 10 #include "base/files/memory_mapped_file.h"
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
59 ThreadRef thread_ref; 59 ThreadRef thread_ref;
60 thread_ref.as_id = 0; // Zero the union in case other is smaller. 60 thread_ref.as_id = 0; // Zero the union in case other is smaller.
61 #if defined(OS_WIN) 61 #if defined(OS_WIN)
62 thread_ref.as_tid = ::GetThreadId(handle.platform_handle()); 62 thread_ref.as_tid = ::GetThreadId(handle.platform_handle());
63 #elif defined(OS_POSIX) 63 #elif defined(OS_POSIX)
64 thread_ref.as_handle = handle.platform_handle(); 64 thread_ref.as_handle = handle.platform_handle();
65 #endif 65 #endif
66 return ForThread(thread_ref.as_id); 66 return ForThread(thread_ref.as_id);
67 } 67 }
68 68
69 ActivityTrackerMemoryAllocator::ActivityTrackerMemoryAllocator(
70 PersistentMemoryAllocator* allocator,
71 uint32_t object_type,
72 uint32_t object_free_type,
73 size_t object_size,
74 size_t cache_size)
75 : allocator_(allocator),
76 object_type_(object_type),
77 object_free_type_(object_free_type),
78 object_size_(object_size),
79 cache_size_(cache_size),
80 iterator_(allocator),
81 cache_values_(new Reference[cache_size]),
82 cache_used_(0) {
83 DCHECK(allocator);
84 }
85
86 ActivityTrackerMemoryAllocator::~ActivityTrackerMemoryAllocator() {}
87
88 ActivityTrackerMemoryAllocator::Reference
89 ActivityTrackerMemoryAllocator::GetObjectReference() {
90 // First see if there is a cached value that can be returned. This is much
91 // faster than searching the memory system for free blocks.
92 while (cache_used_ > 0) {
93 Reference cached = cache_values_[--cache_used_];
94 // Change the type of the cached object to the proper type and return it.
95 // If the type-change fails that means another thread has taken this from
96 // under us (via the search below) so ignore it and keep trying.
97 if (allocator_->ChangeType(cached, object_type_, object_free_type_))
98 return cached;
99 }
100
101 // Fetch the next "free" object from persistent memory. Rather than restart
102 // the iterator at the head each time and likely waste time going again
103 // through objects that aren't relevant, the iterator continues from where
104 // it last left off and is only reset when the end is reached. If the
105 // returned reference matches |last|, then it has wrapped without finding
106 // anything.
107 const Reference last = iterator_.GetLast();
108 while (true) {
109 uint32_t type;
110 Reference found = iterator_.GetNext(&type);
111 if (found && type == object_free_type_) {
112 // Found a free object. Change it to the proper type and return it. If
113 // the type-change fails that means another thread has taken this from
114 // under us so ignore it and keep trying.
115 if (allocator_->ChangeType(found, object_type_, object_free_type_))
116 return found;
117 }
118 if (found == last) {
119 // Wrapped. No desired object was found.
120 break;
121 }
122 if (!found) {
123 // Reached end; start over at the beginning.
124 iterator_.Reset();
125 }
126 }
127
128 // No free block was found so instead allocate a new one.
129 Reference allocated = allocator_->Allocate(object_size_, object_type_);
130 if (allocated)
131 allocator_->MakeIterable(allocated);
132 return allocated;
133 }
134
135 void ActivityTrackerMemoryAllocator::ReleaseObjectReference(Reference ref) {
136 // Zero the memory so that it is ready for immediate use if needed later.
137 char* mem_base = allocator_->GetAsObject<char>(ref, object_type_);
138 DCHECK(mem_base);
139 memset(mem_base, 0, object_size_);
140
141 // Mark object as free.
142 bool success = allocator_->ChangeType(ref, object_free_type_, object_type_);
143 DCHECK(success);
144
145 // Add this reference to our "free" cache if there is space. If not, the type
146 // has still been changed to indicate that it is free so this (or another)
147 // thread can find it, albeit more slowly, using the iteration method above.
148 if (cache_used_ < cache_size_)
149 cache_values_[cache_used_++] = ref;
150 }
151
69 // static 152 // static
70 void Activity::FillFrom(Activity* activity, 153 void Activity::FillFrom(Activity* activity,
71 const void* origin, 154 const void* origin,
72 Type type, 155 Type type,
73 const ActivityData& data) { 156 const ActivityData& data) {
74 activity->time_internal = base::TimeTicks::Now().ToInternalValue(); 157 activity->time_internal = base::TimeTicks::Now().ToInternalValue();
75 activity->origin_address = reinterpret_cast<uintptr_t>(origin); 158 activity->origin_address = reinterpret_cast<uintptr_t>(origin);
76 activity->activity_type = type; 159 activity->activity_type = type;
77 activity->data = data; 160 activity->data = data;
78 161
(...skipping 413 matching lines...) Expand 10 before | Expand all | Expand 10 after
492 uint64_t id, 575 uint64_t id,
493 StringPiece name, 576 StringPiece name,
494 int stack_depth) { 577 int stack_depth) {
495 CreateWithAllocator( 578 CreateWithAllocator(
496 MakeUnique<LocalPersistentMemoryAllocator>(size, id, name), stack_depth); 579 MakeUnique<LocalPersistentMemoryAllocator>(size, id, name), stack_depth);
497 } 580 }
498 581
499 ThreadActivityTracker* GlobalActivityTracker::CreateTrackerForCurrentThread() { 582 ThreadActivityTracker* GlobalActivityTracker::CreateTrackerForCurrentThread() {
500 DCHECK(!this_thread_tracker_.Get()); 583 DCHECK(!this_thread_tracker_.Get());
501 584
502 PersistentMemoryAllocator::Reference mem_reference = 585 PersistentMemoryAllocator::Reference mem_reference;
503 PersistentMemoryAllocator::kReferenceNull;
504 DCHECK(!mem_reference); // invalid_value should be checkable with !
505 586
506 while (true) { 587 {
507 // Get the first available memory from the top of the FIFO. 588 base::AutoLock autolock(thread_tracker_allocator_lock_);
508 if (!available_memories_.pop(&mem_reference)) 589 mem_reference = thread_tracker_allocator_.GetObjectReference();
509 break;
510
511 // Turn the reference back into one of the activity-tracker type. This can
512 // fail if something else has already taken the block and changed its type.
513 if (allocator_->ChangeType(mem_reference, kTypeIdActivityTracker,
514 kTypeIdActivityTrackerFree)) {
515 break;
516 }
517 } 590 }
518 591
519 // Handle the case where no known available memories were found.
520 if (!mem_reference) { 592 if (!mem_reference) {
521 // Allocate a block of memory from the persistent segment. 593 // Failure. This shouldn't happen. But be graceful if it does, probably
522 mem_reference = 594 // because the underlying allocator wasn't given enough memory to satisfy
523 allocator_->Allocate(stack_memory_size_, kTypeIdActivityTracker); 595 // to all possible requests.
524 if (mem_reference) { 596 NOTREACHED();
525 // Success. Make the allocation iterable so it can be found later. 597 // Report the thread-count at which the allocator was full so that the
526 allocator_->MakeIterable(mem_reference); 598 // failure can be seen and underlying memory resized appropriately.
527 } else { 599 UMA_HISTOGRAM_COUNTS_1000(
528 // Failure. Look for any free blocks that weren't held in the cache 600 "ActivityTracker.ThreadTrackers.MemLimitTrackerCount",
529 // of available memories and try to claim it. This can happen if the 601 thread_tracker_count_.load(std::memory_order_relaxed));
530 // |available_memories_| stack isn't sufficiently large to hold all 602 // Return null, just as if tracking wasn't enabled.
531 // released memories or if multiple independent processes are sharing 603 return nullptr;
532 // the memory segment.
533 PersistentMemoryAllocator::Iterator iter(allocator_.get());
534 while ((mem_reference = iter.GetNextOfType(kTypeIdActivityTrackerFree)) !=
535 0) {
536 if (allocator_->ChangeType(mem_reference, kTypeIdActivityTracker,
537 kTypeIdActivityTrackerFree)) {
538 break;
539 }
540 mem_reference = 0;
541 }
542 if (!mem_reference) {
543 // Dobule Failure. This shouldn't happen. But be graceful if it does,
544 // probably because the underlying allocator wasn't given enough memory
545 // to satisfy all possible requests.
546 NOTREACHED();
547 // Report the thread-count at which the allocator was full so that the
548 // failure can be seen and underlying memory resized appropriately.
549 UMA_HISTOGRAM_COUNTS_1000(
550 "ActivityTracker.ThreadTrackers.MemLimitTrackerCount",
551 thread_tracker_count_.load(std::memory_order_relaxed));
552 // Return null, just as if tracking wasn't enabled.
553 return nullptr;
554 }
555 }
556 } 604 }
557 605
558 // Convert the memory block found above into an actual memory address. 606 // Convert the memory block found above into an actual memory address.
559 DCHECK(mem_reference); 607 DCHECK(mem_reference);
560 void* mem_base = 608 void* mem_base =
561 allocator_->GetAsObject<char>(mem_reference, kTypeIdActivityTracker); 609 allocator_->GetAsObject<char>(mem_reference, kTypeIdActivityTracker);
562 DCHECK(mem_base); 610 DCHECK(mem_base);
563 DCHECK_LE(stack_memory_size_, allocator_->GetAllocSize(mem_reference)); 611 DCHECK_LE(stack_memory_size_, allocator_->GetAllocSize(mem_reference));
564 612
565 // Create a tracker with the acquired memory and set it as the tracker 613 // Create a tracker with the acquired memory and set it as the tracker
(...skipping 16 matching lines...) Expand all
582 delete tracker; 630 delete tracker;
583 } 631 }
584 632
585 GlobalActivityTracker::GlobalActivityTracker( 633 GlobalActivityTracker::GlobalActivityTracker(
586 std::unique_ptr<PersistentMemoryAllocator> allocator, 634 std::unique_ptr<PersistentMemoryAllocator> allocator,
587 int stack_depth) 635 int stack_depth)
588 : allocator_(std::move(allocator)), 636 : allocator_(std::move(allocator)),
589 stack_memory_size_(ThreadActivityTracker::SizeForStackDepth(stack_depth)), 637 stack_memory_size_(ThreadActivityTracker::SizeForStackDepth(stack_depth)),
590 this_thread_tracker_(&OnTLSDestroy), 638 this_thread_tracker_(&OnTLSDestroy),
591 thread_tracker_count_(0), 639 thread_tracker_count_(0),
592 available_memories_(kMaxThreadCount) { 640 thread_tracker_allocator_(allocator_.get(),
641 kTypeIdActivityTracker,
642 kTypeIdActivityTrackerFree,
643 stack_memory_size_,
644 kCachedThreadMemories) {
593 // Ensure the passed memory is valid and empty (iterator finds nothing). 645 // Ensure the passed memory is valid and empty (iterator finds nothing).
594 uint32_t type; 646 uint32_t type;
595 DCHECK(!PersistentMemoryAllocator::Iterator(allocator_.get()).GetNext(&type)); 647 DCHECK(!PersistentMemoryAllocator::Iterator(allocator_.get()).GetNext(&type));
596 648
597 // Ensure that there is no other global object and then make this one such. 649 // Ensure that there is no other global object and then make this one such.
598 DCHECK(!g_tracker_); 650 DCHECK(!g_tracker_);
599 g_tracker_ = this; 651 g_tracker_ = this;
600 } 652 }
601 653
602 GlobalActivityTracker::~GlobalActivityTracker() { 654 GlobalActivityTracker::~GlobalActivityTracker() {
603 DCHECK_EQ(g_tracker_, this); 655 DCHECK_EQ(g_tracker_, this);
604 DCHECK_EQ(0, thread_tracker_count_.load(std::memory_order_relaxed)); 656 DCHECK_EQ(0, thread_tracker_count_.load(std::memory_order_relaxed));
605 g_tracker_ = nullptr; 657 g_tracker_ = nullptr;
606 } 658 }
607 659
608 void GlobalActivityTracker::ReturnTrackerMemory( 660 void GlobalActivityTracker::ReturnTrackerMemory(
609 ManagedActivityTracker* tracker) { 661 ManagedActivityTracker* tracker) {
610 PersistentMemoryAllocator::Reference mem_reference = tracker->mem_reference_; 662 PersistentMemoryAllocator::Reference mem_reference = tracker->mem_reference_;
611 void* mem_base = tracker->mem_base_; 663 void* mem_base = tracker->mem_base_;
612 DCHECK(mem_reference); 664 DCHECK(mem_reference);
613 DCHECK(mem_base); 665 DCHECK(mem_base);
614 666
615 // Zero the memory so that it is ready for use if needed again later. It's
616 // better to clear the memory now, when a thread is exiting, than to do it
617 // when it is first needed by a thread doing actual work.
618 memset(mem_base, 0, stack_memory_size_);
619
620 // Remove the destructed tracker from the set of known ones. 667 // Remove the destructed tracker from the set of known ones.
621 DCHECK_LE(1, thread_tracker_count_.load(std::memory_order_relaxed)); 668 DCHECK_LE(1, thread_tracker_count_.load(std::memory_order_relaxed));
622 thread_tracker_count_.fetch_sub(1, std::memory_order_relaxed); 669 thread_tracker_count_.fetch_sub(1, std::memory_order_relaxed);
623 670
624 // The memory was within the persistent memory allocator. Change its type 671 // Release this memory for re-use at a later time.
625 // so it is effectively marked as "free". 672 base::AutoLock autolock(thread_tracker_allocator_lock_);
626 allocator_->ChangeType(mem_reference, kTypeIdActivityTrackerFree, 673 thread_tracker_allocator_.ReleaseObjectReference(mem_reference);
627 kTypeIdActivityTracker);
628
629 // Push this on the internal cache of available memory blocks so it can
630 // be found and reused quickly. If the push somehow exceeds the maximum
631 // size of the cache, it will fail but a fallback check in CreateTracker
632 // will find it by (slow) iteration.
633 available_memories_.push(mem_reference);
634 } 674 }
635 675
636 // static 676 // static
637 void GlobalActivityTracker::OnTLSDestroy(void* value) { 677 void GlobalActivityTracker::OnTLSDestroy(void* value) {
638 delete reinterpret_cast<ManagedActivityTracker*>(value); 678 delete reinterpret_cast<ManagedActivityTracker*>(value);
639 } 679 }
640 680
641 ScopedActivity::ScopedActivity(const tracked_objects::Location& location, 681 ScopedActivity::ScopedActivity(const tracked_objects::Location& location,
642 uint8_t action, 682 uint8_t action,
643 uint32_t id, 683 uint32_t id,
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
704 const base::Process* process) 744 const base::Process* process)
705 : GlobalActivityTracker::ScopedThreadActivity( 745 : GlobalActivityTracker::ScopedThreadActivity(
706 nullptr, 746 nullptr,
707 Activity::ACT_PROCESS_WAIT, 747 Activity::ACT_PROCESS_WAIT,
708 ActivityData::ForProcess(process->Pid()), 748 ActivityData::ForProcess(process->Pid()),
709 /*lock_allowed=*/true) {} 749 /*lock_allowed=*/true) {}
710 #endif 750 #endif
711 751
712 } // namespace debug 752 } // namespace debug
713 } // namespace base 753 } // namespace base
OLDNEW
« no previous file with comments | « base/debug/activity_tracker.h ('k') | base/debug/activity_tracker_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698