Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(28)

Side by Side Diff: base/trace_event/heap_profiler_allocation_register.cc

Issue 2890363003: Enable sharding of AllocationRegister on desktop. (Closed)
Patch Set: comments from primiano, thakis. Created 3 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/trace_event/heap_profiler_allocation_register.h" 5 #include "base/trace_event/heap_profiler_allocation_register.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <limits> 8 #include <limits>
9 9
10 #include "base/trace_event/trace_event_memory_overhead.h"
11
12 namespace base { 10 namespace base {
13 namespace trace_event { 11 namespace trace_event {
14 12
13 size_t AllocationRegister::AddressHasher::operator()(
14 const void* address) const {
15 // The multiplicative hashing scheme from [Knuth 1998]. The value of |a| has
16 // been chosen carefully based on measurements with real-word data (addresses
17 // recorded from a Chrome trace run). It is the first prime after 2^17. For
18 // |shift|, 15 yield good results for both 2^18 and 2^19 bucket sizes.
19 // Microbenchmarks show that this simple scheme outperforms fancy hashes like
20 // Murmur3 by 20 to 40 percent.
21 const uintptr_t key = reinterpret_cast<uintptr_t>(address);
22 const uintptr_t a = 131101;
23 const uintptr_t shift = 15;
24 const uintptr_t h = (key * a) >> shift;
25 return h;
26 }
27
15 AllocationRegister::ConstIterator::ConstIterator( 28 AllocationRegister::ConstIterator::ConstIterator(
16 const AllocationRegister& alloc_register, 29 const AllocationRegister& alloc_register,
17 AllocationIndex index) 30 AllocationIndex index)
18 : register_(alloc_register), index_(index) {} 31 : register_(alloc_register), index_(index) {}
19 32
20 void AllocationRegister::ConstIterator::operator++() { 33 void AllocationRegister::ConstIterator::operator++() {
21 index_ = register_.allocations_.Next(index_ + 1); 34 index_ = register_.allocations_.Next(index_ + 1);
22 } 35 }
23 36
24 bool AllocationRegister::ConstIterator::operator!=( 37 bool AllocationRegister::ConstIterator::operator!=(
(...skipping 24 matching lines...) Expand all
49 } 62 }
50 63
51 total_value += backtrace.frame_count; 64 total_value += backtrace.frame_count;
52 65
53 // These magic constants give best results in terms of average collisions 66 // These magic constants give best results in terms of average collisions
54 // per backtrace. They were found by replaying real backtraces from Linux 67 // per backtrace. They were found by replaying real backtraces from Linux
55 // and Android against different hash functions. 68 // and Android against different hash functions.
56 return (total_value * 131101) >> 14; 69 return (total_value * 131101) >> 14;
57 } 70 }
58 71
59 size_t AllocationRegister::AddressHasher::operator()(
60 const void* address) const {
61 // The multiplicative hashing scheme from [Knuth 1998]. The value of |a| has
62 // been chosen carefully based on measurements with real-word data (addresses
63 // recorded from a Chrome trace run). It is the first prime after 2^17. For
64 // |shift|, 15 yield good results for both 2^18 and 2^19 bucket sizes.
65 // Microbenchmarks show that this simple scheme outperforms fancy hashes like
66 // Murmur3 by 20 to 40 percent.
67 const uintptr_t key = reinterpret_cast<uintptr_t>(address);
68 const uintptr_t a = 131101;
69 const uintptr_t shift = 15;
70 const uintptr_t h = (key * a) >> shift;
71 return h;
72 }
73
74 AllocationRegister::AllocationRegister() 72 AllocationRegister::AllocationRegister()
75 : AllocationRegister(kAllocationCapacity, kBacktraceCapacity) {} 73 : AllocationRegister(kAllocationCapacity, kBacktraceCapacity) {}
76 74
77 AllocationRegister::AllocationRegister(size_t allocation_capacity, 75 AllocationRegister::AllocationRegister(size_t allocation_capacity,
78 size_t backtrace_capacity) 76 size_t backtrace_capacity)
79 : allocations_(allocation_capacity), backtraces_(backtrace_capacity) { 77 : allocations_(allocation_capacity), backtraces_(backtrace_capacity) {
80 Backtrace sentinel = {}; 78 Backtrace sentinel = {};
81 sentinel.frames[0] = StackFrame::FromThreadName("[out of heap profiler mem]"); 79 sentinel.frames[0] = StackFrame::FromThreadName("[out of heap profiler mem]");
82 sentinel.frame_count = 1; 80 sentinel.frame_count = 1;
83 81
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after
145 } 143 }
146 144
147 AllocationRegister::ConstIterator AllocationRegister::begin() const { 145 AllocationRegister::ConstIterator AllocationRegister::begin() const {
148 return ConstIterator(*this, allocations_.Next(0)); 146 return ConstIterator(*this, allocations_.Next(0));
149 } 147 }
150 148
151 AllocationRegister::ConstIterator AllocationRegister::end() const { 149 AllocationRegister::ConstIterator AllocationRegister::end() const {
152 return ConstIterator(*this, AllocationMap::kInvalidKVIndex); 150 return ConstIterator(*this, AllocationMap::kInvalidKVIndex);
153 } 151 }
154 152
155 void AllocationRegister::EstimateTraceMemoryOverhead( 153 size_t AllocationRegister::EstimateAllocatedMemory() const {
156 TraceEventMemoryOverhead* overhead) const { 154 return sizeof(AllocationRegister);
157 size_t allocated = sizeof(AllocationRegister); 155 }
158 size_t resident = sizeof(AllocationRegister) + 156
159 allocations_.EstimateUsedMemory() + 157 size_t AllocationRegister::EstimateResidentMemory() const {
160 backtraces_.EstimateUsedMemory(); 158 return sizeof(AllocationRegister) + allocations_.EstimateUsedMemory() +
161 overhead->Add(TraceEventMemoryOverhead::kHeapProfilerAllocationRegister, 159 backtraces_.EstimateUsedMemory();
162 allocated, resident);
163 } 160 }
164 161
165 AllocationRegister::BacktraceMap::KVIndex AllocationRegister::InsertBacktrace( 162 AllocationRegister::BacktraceMap::KVIndex AllocationRegister::InsertBacktrace(
166 const Backtrace& backtrace) { 163 const Backtrace& backtrace) {
167 auto index = backtraces_.Insert(backtrace, 0).first; 164 auto index = backtraces_.Insert(backtrace, 0).first;
168 if (index == BacktraceMap::kInvalidKVIndex) 165 if (index == BacktraceMap::kInvalidKVIndex)
169 return kOutOfStorageBacktraceIndex; 166 return kOutOfStorageBacktraceIndex;
170 auto& backtrace_and_count = backtraces_.Get(index); 167 auto& backtrace_and_count = backtraces_.Get(index);
171 backtrace_and_count.second++; 168 backtrace_and_count.second++;
172 return index; 169 return index;
(...skipping 13 matching lines...) Expand all
186 const auto& address_and_info = allocations_.Get(index); 183 const auto& address_and_info = allocations_.Get(index);
187 const auto& backtrace_and_count = 184 const auto& backtrace_and_count =
188 backtraces_.Get(address_and_info.second.backtrace_index); 185 backtraces_.Get(address_and_info.second.backtrace_index);
189 return {address_and_info.first, address_and_info.second.size, 186 return {address_and_info.first, address_and_info.second.size,
190 AllocationContext(backtrace_and_count.first, 187 AllocationContext(backtrace_and_count.first,
191 address_and_info.second.type_name)}; 188 address_and_info.second.type_name)};
192 } 189 }
193 190
194 } // namespace trace_event 191 } // namespace trace_event
195 } // namespace base 192 } // namespace base
OLDNEW
« no previous file with comments | « base/trace_event/heap_profiler_allocation_register.h ('k') | base/trace_event/malloc_dump_provider.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698