Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(13)

Side by Side Diff: base/trace_event/heap_profiler_allocation_register.cc

Issue 2089253002: [tracing] Optimize AllocationRegister and increase max backtrace depth. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Fix Windows Created 4 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/trace_event/heap_profiler_allocation_register.h" 5 #include "base/trace_event/heap_profiler_allocation_register.h"
6 6
7 #include <algorithm>
8
7 #include "base/trace_event/trace_event_memory_overhead.h" 9 #include "base/trace_event/trace_event_memory_overhead.h"
8 10
9 namespace base { 11 namespace base {
10 namespace trace_event { 12 namespace trace_event {
11 13
12 AllocationRegister::AllocationRegister()
13 : AllocationRegister(kNumBuckets * kNumCellsPerBucket) {}
14
15 AllocationRegister::AllocationRegister(uint32_t num_cells)
16 // Reserve enough address space to store |num_cells_| entries if necessary,
17 // with a guard page after it to crash the program when attempting to store
18 // more entries.
19 : num_cells_(num_cells),
20 cells_(static_cast<Cell*>(AllocateVirtualMemory(num_cells_ *
21 sizeof(Cell)))),
22 buckets_(static_cast<CellIndex*>(
23 AllocateVirtualMemory(kNumBuckets * sizeof(CellIndex)))),
24
25 // The free list is empty. The first unused cell is cell 1, because index
26 // 0 is used as list terminator.
27 free_list_(0),
28 next_unused_cell_(1) {}
29
30 AllocationRegister::~AllocationRegister() {
31 FreeVirtualMemory(buckets_, kNumBuckets * sizeof(CellIndex));
32 FreeVirtualMemory(cells_, num_cells_ * sizeof(Cell));
33 }
34
35 void AllocationRegister::Insert(void* address,
36 size_t size,
37 AllocationContext context) {
38 DCHECK(address != nullptr);
39 if (size == 0)
40 return;
41
42 CellIndex* idx_ptr = Lookup(address);
43
44 // If the index is 0, the address is not yet present, so insert it.
45 if (*idx_ptr == 0) {
46 *idx_ptr = GetFreeCell();
47
48 // The address stored in a cell is const as long as it is exposed (via the
49 // iterators or |Get|), but because cells are re-used, a const cast is
50 // required to set it on insert and remove.
51 void* const& allocation_address = cells_[*idx_ptr].allocation.address;
52 const_cast<void*&>(allocation_address) = address;
53 cells_[*idx_ptr].next = 0;
54 }
55
56 cells_[*idx_ptr].allocation.size = size;
57 cells_[*idx_ptr].allocation.context = context;
58 }
59
60 void AllocationRegister::Remove(void* address) {
61 // Get a pointer to the index of the cell that stores |address|. The index can
62 // be an element of |buckets_| or the |next| member of a cell.
63 CellIndex* idx_ptr = Lookup(address);
64 CellIndex freed_idx = *idx_ptr;
65
66 // If the index is 0, the address was not there in the first place.
67 if (freed_idx == 0)
68 return;
69
70 // The cell at the index is now free, remove it from the linked list for
71 // |Hash(address)|.
72 Cell* freed_cell = &cells_[freed_idx];
73 *idx_ptr = freed_cell->next;
74
75 // Put the free cell at the front of the free list.
76 freed_cell->next = free_list_;
77 free_list_ = freed_idx;
78
79 // Reset the address, so that on iteration the free cell is ignored.
80 const_cast<void*&>(freed_cell->allocation.address) = nullptr;
81 }
82
83 AllocationRegister::Allocation* AllocationRegister::Get(void* address) {
84 CellIndex* idx_ptr = Lookup(address);
85
86 // If the index is 0, the address is not present in the table.
87 return *idx_ptr == 0 ? nullptr : &cells_[*idx_ptr].allocation;
88 }
89
90 AllocationRegister::ConstIterator AllocationRegister::begin() const {
91 // Initialize the iterator's index to 0. Cell 0 never stores an entry.
92 ConstIterator iterator(*this, 0);
93 // Incrementing will advance the iterator to the first used cell.
94 ++iterator;
95 return iterator;
96 }
97
98 AllocationRegister::ConstIterator AllocationRegister::end() const {
99 // Cell |next_unused_cell_ - 1| is the last cell that could contain an entry,
100 // so index |next_unused_cell_| is an iterator past the last element, in line
101 // with the STL iterator conventions.
102 return ConstIterator(*this, next_unused_cell_);
103 }
104
105 AllocationRegister::ConstIterator::ConstIterator( 14 AllocationRegister::ConstIterator::ConstIterator(
106 const AllocationRegister& alloc_register, 15 const AllocationRegister& alloc_register, AllocationIndex index)
107 CellIndex index) 16 : register_(alloc_register),
108 : register_(alloc_register), index_(index) {} 17 index_(index) {}
109 18
110 void AllocationRegister::ConstIterator::operator++() { 19 void AllocationRegister::ConstIterator::operator++() {
111 // Find the next cell with a non-null address until all cells that could 20 index_ = register_.allocations_.Next(index_ + 1);
112 // possibly be used have been iterated. A null address indicates a free cell.
113 do {
114 index_++;
115 } while (index_ < register_.next_unused_cell_ &&
116 register_.cells_[index_].allocation.address == nullptr);
117 } 21 }
118 22
119 bool AllocationRegister::ConstIterator::operator!=( 23 bool AllocationRegister::ConstIterator::operator!=(
120 const ConstIterator& other) const { 24 const ConstIterator& other) const {
121 return index_ != other.index_; 25 return index_ != other.index_;
122 } 26 }
123 27
124 const AllocationRegister::Allocation& AllocationRegister::ConstIterator:: 28 AllocationRegister::Allocation
125 operator*() const { 29 AllocationRegister::ConstIterator::operator*() const {
126 return register_.cells_[index_].allocation; 30 return register_.GetAllocation(index_);
127 } 31 }
128 32
129 AllocationRegister::CellIndex* AllocationRegister::Lookup(void* address) { 33 size_t AllocationRegister::BacktraceHasher::operator () (
130 // The list head is in |buckets_| at the hash offset. 34 const Backtrace& backtrace) const {
131 CellIndex* idx_ptr = &buckets_[Hash(address)]; 35 const size_t kSampleLength = 10;
132 36
133 // Chase down the list until the cell that holds |address| is found, 37 uintptr_t total_value = 0;
134 // or until the list ends.
135 while (*idx_ptr != 0 && cells_[*idx_ptr].allocation.address != address)
136 idx_ptr = &cells_[*idx_ptr].next;
137 38
138 return idx_ptr; 39 size_t head_end = std::min(backtrace.frame_count, kSampleLength);
40 for (size_t i = 0; i != head_end; ++i) {
41 total_value += reinterpret_cast<uintptr_t>(backtrace.frames[i].value);
42 }
43
44 size_t tail_start = backtrace.frame_count -
45 std::min(backtrace.frame_count - head_end, kSampleLength);
46 for (size_t i = tail_start; i != backtrace.frame_count; ++i) {
47 total_value += reinterpret_cast<uintptr_t>(backtrace.frames[i].value);
48 }
49
50 total_value += backtrace.frame_count;
51
52 // These magic constants give best results in terms of average collisions
53 // per backtrace. They were found by replaying real backtraces from Linux
54 // and Android against different hash functions.
55 return (total_value * 131101) >> 14;
139 } 56 }
140 57
141 AllocationRegister::CellIndex AllocationRegister::GetFreeCell() { 58 size_t AllocationRegister::AddressHasher::operator () (
142 // First try to re-use a cell from the freelist. 59 const void* address) const {
143 if (free_list_) {
144 CellIndex idx = free_list_;
145 free_list_ = cells_[idx].next;
146 return idx;
147 }
148
149 // Otherwise pick the next cell that has not been touched before.
150 CellIndex idx = next_unused_cell_;
151 next_unused_cell_++;
152
153 // If the hash table has too little capacity (when too little address space
154 // was reserved for |cells_|), |next_unused_cell_| can be an index outside of
155 // the allocated storage. A guard page is allocated there to crash the
156 // program in that case. There are alternative solutions:
157 // - Deal with it, increase capacity by reallocating |cells_|.
158 // - Refuse to insert and let the caller deal with it.
159 // Because free cells are re-used before accessing fresh cells with a higher
160 // index, and because reserving address space without touching it is cheap,
161 // the simplest solution is to just allocate a humongous chunk of address
162 // space.
163
164 DCHECK_LT(next_unused_cell_, num_cells_ + 1);
165
166 return idx;
167 }
168
169 // static
170 uint32_t AllocationRegister::Hash(void* address) {
171 // The multiplicative hashing scheme from [Knuth 1998]. The value of |a| has 60 // The multiplicative hashing scheme from [Knuth 1998]. The value of |a| has
172 // been chosen carefully based on measurements with real-word data (addresses 61 // been chosen carefully based on measurements with real-word data (addresses
173 // recorded from a Chrome trace run). It is the first prime after 2^17. For 62 // recorded from a Chrome trace run). It is the first prime after 2^17. For
174 // |shift|, 13, 14 and 15 yield good results. These values are tuned to 2^18 63 // |shift|, 13, 14 and 15 yield good results. These values are tuned to 2^18
175 // buckets. Microbenchmarks show that this simple scheme outperforms fancy 64 // buckets. Microbenchmarks show that this simple scheme outperforms fancy
176 // hashes like Murmur3 by 20 to 40 percent. 65 // hashes like Murmur3 by 20 to 40 percent.
177 const uintptr_t key = reinterpret_cast<uintptr_t>(address); 66 const uintptr_t key = reinterpret_cast<uintptr_t>(address);
178 const uintptr_t a = 131101; 67 const uintptr_t a = 131101;
179 const uintptr_t shift = 14; 68 const uintptr_t shift = 14;
180 const uintptr_t h = (key * a) >> shift; 69 const uintptr_t h = (key * a) >> shift;
181 return static_cast<uint32_t>(h) & kNumBucketsMask; 70 return h;
71 }
72
73 AllocationRegister::AllocationRegister()
74 : AllocationRegister(kAllocationCapacity, kBacktraceCapacity) {}
75
76 AllocationRegister::AllocationRegister(size_t allocation_capacity,
77 size_t backtrace_capacity)
78 : allocations_(allocation_capacity),
79 backtraces_(backtrace_capacity) {}
80
81 AllocationRegister::~AllocationRegister() {
82 }
83
84 void AllocationRegister::Insert(const void* address,
85 size_t size,
86 const AllocationContext& context) {
87 DCHECK(address != nullptr);
88 if (size == 0) {
89 return;
90 }
91
92 AllocationInfo info = {
93 size,
94 context.type_name,
95 InsertBacktrace(context.backtrace)
96 };
97
98 // Try to insert the allocation.
99 auto index_and_flag = allocations_.Insert(address, info);
100 if (!index_and_flag.second) {
101 // |address| is already there - overwrite the allocation info.
102 auto& old_info = allocations_.Get(index_and_flag.first).second;
103 RemoveBacktrace(old_info.backtrace_index);
104 old_info = info;
105 }
106 }
107
108 void AllocationRegister::Remove(const void* address) {
109 auto index = allocations_.Find(address);
110 if (index == AllocationMap::kInvalidKVIndex) {
111 return;
112 }
113
114 const AllocationInfo& info = allocations_.Get(index).second;
115 RemoveBacktrace(info.backtrace_index);
116 allocations_.Remove(index);
117 }
118
119 bool AllocationRegister::Get(const void* address,
120 Allocation* out_allocation) const {
121 auto index = allocations_.Find(address);
122 if (index == AllocationMap::kInvalidKVIndex) {
123 return false;
124 }
125
126 if (out_allocation) {
127 *out_allocation = GetAllocation(index);
128 }
129 return true;
130 }
131
132 AllocationRegister::ConstIterator AllocationRegister::begin() const {
133 return ConstIterator(*this, allocations_.Next(0));
134 }
135
136 AllocationRegister::ConstIterator AllocationRegister::end() const {
137 return ConstIterator(*this, AllocationMap::kInvalidKVIndex);
182 } 138 }
183 139
184 void AllocationRegister::EstimateTraceMemoryOverhead( 140 void AllocationRegister::EstimateTraceMemoryOverhead(
185 TraceEventMemoryOverhead* overhead) const { 141 TraceEventMemoryOverhead* overhead) const {
186 // Estimate memory overhead by counting all of the cells that have ever been
187 // touched. Don't report mmapped memory as allocated, because it has not been
188 // allocated by malloc.
189 size_t allocated = sizeof(AllocationRegister); 142 size_t allocated = sizeof(AllocationRegister);
190 size_t resident = sizeof(AllocationRegister) 143 size_t resident = sizeof(AllocationRegister)
191 // Include size of touched cells (size of |*cells_|). 144 + allocations_.EstimateUsedMemory()
192 + sizeof(Cell) * next_unused_cell_ 145 + backtraces_.EstimateUsedMemory();
193 // Size of |*buckets_|.
194 + sizeof(CellIndex) * kNumBuckets;
195 overhead->Add("AllocationRegister", allocated, resident); 146 overhead->Add("AllocationRegister", allocated, resident);
196 } 147 }
197 148
149 AllocationRegister::BacktraceMap::KVIndex AllocationRegister::InsertBacktrace(
150 const Backtrace& backtrace) {
151 auto index = backtraces_.Insert(backtrace, 0).first;
152 auto& backtrace_and_count = backtraces_.Get(index);
153 backtrace_and_count.second++;
154 return index;
155 }
156
157 void AllocationRegister::RemoveBacktrace(BacktraceMap::KVIndex index) {
158 auto& backtrace_and_count = backtraces_.Get(index);
159 if (--backtrace_and_count.second == 0) {
160 // Backtrace is not referenced anymore - remove it.
161 backtraces_.Remove(index);
162 }
163 }
164
165 AllocationRegister::Allocation AllocationRegister::GetAllocation(
166 AllocationMap::KVIndex index) const {
167 const auto& address_and_info = allocations_.Get(index);
168 const auto& backtrace_and_count = backtraces_.Get(
169 address_and_info.second.backtrace_index);
170 return {
171 address_and_info.first,
172 address_and_info.second.size,
173 AllocationContext(
174 backtrace_and_count.first,
175 address_and_info.second.type_name)
176 };
177 }
178
198 } // namespace trace_event 179 } // namespace trace_event
199 } // namespace base 180 } // namespace base
OLDNEW
« no previous file with comments | « base/trace_event/heap_profiler_allocation_register.h ('k') | base/trace_event/heap_profiler_allocation_register_posix.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698