Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(85)

Side by Side Diff: base/trace_event/heap_profiler_allocation_register.cc

Issue 2787383002: On heap tracking datastructure overflow, degrade instead of CHECK() (Closed)
Patch Set: fix sign comparison in test that made win unhappy Created 3 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/trace_event/heap_profiler_allocation_register.h" 5 #include "base/trace_event/heap_profiler_allocation_register.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <limits>
8 9
9 #include "base/trace_event/trace_event_memory_overhead.h" 10 #include "base/trace_event/trace_event_memory_overhead.h"
10 11
11 namespace base { 12 namespace base {
12 namespace trace_event { 13 namespace trace_event {
13 14
14 AllocationRegister::ConstIterator::ConstIterator( 15 AllocationRegister::ConstIterator::ConstIterator(
15 const AllocationRegister& alloc_register, AllocationIndex index) 16 const AllocationRegister& alloc_register,
16 : register_(alloc_register), 17 AllocationIndex index)
17 index_(index) {} 18 : register_(alloc_register), index_(index) {}
18 19
19 void AllocationRegister::ConstIterator::operator++() { 20 void AllocationRegister::ConstIterator::operator++() {
20 index_ = register_.allocations_.Next(index_ + 1); 21 index_ = register_.allocations_.Next(index_ + 1);
21 } 22 }
22 23
23 bool AllocationRegister::ConstIterator::operator!=( 24 bool AllocationRegister::ConstIterator::operator!=(
24 const ConstIterator& other) const { 25 const ConstIterator& other) const {
25 return index_ != other.index_; 26 return index_ != other.index_;
26 } 27 }
27 28
28 AllocationRegister::Allocation 29 AllocationRegister::Allocation AllocationRegister::ConstIterator::operator*()
29 AllocationRegister::ConstIterator::operator*() const { 30 const {
30 return register_.GetAllocation(index_); 31 return register_.GetAllocation(index_);
31 } 32 }
32 33
33 size_t AllocationRegister::BacktraceHasher::operator () ( 34 size_t AllocationRegister::BacktraceHasher::operator()(
34 const Backtrace& backtrace) const { 35 const Backtrace& backtrace) const {
35 const size_t kSampleLength = 10; 36 const size_t kSampleLength = 10;
36 37
37 uintptr_t total_value = 0; 38 uintptr_t total_value = 0;
38 39
39 size_t head_end = std::min(backtrace.frame_count, kSampleLength); 40 size_t head_end = std::min(backtrace.frame_count, kSampleLength);
40 for (size_t i = 0; i != head_end; ++i) { 41 for (size_t i = 0; i != head_end; ++i) {
41 total_value += reinterpret_cast<uintptr_t>(backtrace.frames[i].value); 42 total_value += reinterpret_cast<uintptr_t>(backtrace.frames[i].value);
42 } 43 }
43 44
44 size_t tail_start = backtrace.frame_count - 45 size_t tail_start = backtrace.frame_count -
45 std::min(backtrace.frame_count - head_end, kSampleLength); 46 std::min(backtrace.frame_count - head_end, kSampleLength);
46 for (size_t i = tail_start; i != backtrace.frame_count; ++i) { 47 for (size_t i = tail_start; i != backtrace.frame_count; ++i) {
47 total_value += reinterpret_cast<uintptr_t>(backtrace.frames[i].value); 48 total_value += reinterpret_cast<uintptr_t>(backtrace.frames[i].value);
48 } 49 }
49 50
50 total_value += backtrace.frame_count; 51 total_value += backtrace.frame_count;
51 52
52 // These magic constants give best results in terms of average collisions 53 // These magic constants give best results in terms of average collisions
53 // per backtrace. They were found by replaying real backtraces from Linux 54 // per backtrace. They were found by replaying real backtraces from Linux
54 // and Android against different hash functions. 55 // and Android against different hash functions.
55 return (total_value * 131101) >> 14; 56 return (total_value * 131101) >> 14;
56 } 57 }
57 58
58 size_t AllocationRegister::AddressHasher::operator () ( 59 size_t AllocationRegister::AddressHasher::operator()(
59 const void* address) const { 60 const void* address) const {
60 // The multiplicative hashing scheme from [Knuth 1998]. The value of |a| has 61 // The multiplicative hashing scheme from [Knuth 1998]. The value of |a| has
61 // been chosen carefully based on measurements with real-word data (addresses 62 // been chosen carefully based on measurements with real-word data (addresses
62 // recorded from a Chrome trace run). It is the first prime after 2^17. For 63 // recorded from a Chrome trace run). It is the first prime after 2^17. For
63 // |shift|, 15 yield good results for both 2^18 and 2^19 bucket sizes. 64 // |shift|, 15 yield good results for both 2^18 and 2^19 bucket sizes.
64 // Microbenchmarks show that this simple scheme outperforms fancy hashes like 65 // Microbenchmarks show that this simple scheme outperforms fancy hashes like
65 // Murmur3 by 20 to 40 percent. 66 // Murmur3 by 20 to 40 percent.
66 const uintptr_t key = reinterpret_cast<uintptr_t>(address); 67 const uintptr_t key = reinterpret_cast<uintptr_t>(address);
67 const uintptr_t a = 131101; 68 const uintptr_t a = 131101;
68 const uintptr_t shift = 15; 69 const uintptr_t shift = 15;
69 const uintptr_t h = (key * a) >> shift; 70 const uintptr_t h = (key * a) >> shift;
70 return h; 71 return h;
71 } 72 }
72 73
73 AllocationRegister::AllocationRegister() 74 AllocationRegister::AllocationRegister()
74 : AllocationRegister(kAllocationCapacity, kBacktraceCapacity) {} 75 : AllocationRegister(kAllocationCapacity, kBacktraceCapacity) {}
75 76
76 AllocationRegister::AllocationRegister(size_t allocation_capacity, 77 AllocationRegister::AllocationRegister(size_t allocation_capacity,
77 size_t backtrace_capacity) 78 size_t backtrace_capacity)
78 : allocations_(allocation_capacity), 79 : allocations_(allocation_capacity), backtraces_(backtrace_capacity) {
79 backtraces_(backtrace_capacity) {} 80 Backtrace sentinel = {};
81 sentinel.frames[0] = StackFrame::FromThreadName("[out of heap profiler mem]");
82 sentinel.frame_count = 1;
80 83
81 AllocationRegister::~AllocationRegister() { 84 // Rationale for max / 2: in theory we could just start the sentinel with a
85 // refcount == 0. However, this optimization avoids to hit the 2nd condition
86 // of the "if" in RemoveBacktrace, hence reducing the chances of hurting the
87 // fastpath. From a functional viewpoint, the sentinel is safe even if we wrap
88 // over the refcount.
89 BacktraceMap::KVPair::second_type sentinel_refcount =
90 std::numeric_limits<BacktraceMap::KVPair::second_type>::max() / 2;
91 auto index_and_flag = backtraces_.Insert(sentinel, sentinel_refcount);
92 DCHECK(index_and_flag.second);
93 out_of_storage_backtrace_index_ = index_and_flag.first;
82 } 94 }
83 95
84 void AllocationRegister::Insert(const void* address, 96 AllocationRegister::~AllocationRegister() {}
97
98 bool AllocationRegister::Insert(const void* address,
85 size_t size, 99 size_t size,
86 const AllocationContext& context) { 100 const AllocationContext& context) {
87 DCHECK(address != nullptr); 101 DCHECK(address != nullptr);
88 if (size == 0) { 102 if (size == 0) {
89 return; 103 return false;
90 } 104 }
91 105
92 AllocationInfo info = { 106 AllocationInfo info = {size, context.type_name,
93 size, 107 InsertBacktrace(context.backtrace)};
94 context.type_name,
95 InsertBacktrace(context.backtrace)
96 };
97 108
98 // Try to insert the allocation. 109 // Try to insert the allocation.
99 auto index_and_flag = allocations_.Insert(address, info); 110 auto index_and_flag = allocations_.Insert(address, info);
100 if (!index_and_flag.second) { 111 if (!index_and_flag.second &&
112 index_and_flag.first != AllocationMap::kInvalidKVIndex) {
101 // |address| is already there - overwrite the allocation info. 113 // |address| is already there - overwrite the allocation info.
102 auto& old_info = allocations_.Get(index_and_flag.first).second; 114 auto& old_info = allocations_.Get(index_and_flag.first).second;
103 RemoveBacktrace(old_info.backtrace_index); 115 RemoveBacktrace(old_info.backtrace_index);
104 old_info = info; 116 old_info = info;
117 return true;
105 } 118 }
119
120 return index_and_flag.second;
106 } 121 }
107 122
108 void AllocationRegister::Remove(const void* address) { 123 void AllocationRegister::Remove(const void* address) {
109 auto index = allocations_.Find(address); 124 auto index = allocations_.Find(address);
110 if (index == AllocationMap::kInvalidKVIndex) { 125 if (index == AllocationMap::kInvalidKVIndex) {
111 return; 126 return;
112 } 127 }
113 128
114 const AllocationInfo& info = allocations_.Get(index).second; 129 const AllocationInfo& info = allocations_.Get(index).second;
115 RemoveBacktrace(info.backtrace_index); 130 RemoveBacktrace(info.backtrace_index);
(...skipping 17 matching lines...) Expand all
133 return ConstIterator(*this, allocations_.Next(0)); 148 return ConstIterator(*this, allocations_.Next(0));
134 } 149 }
135 150
136 AllocationRegister::ConstIterator AllocationRegister::end() const { 151 AllocationRegister::ConstIterator AllocationRegister::end() const {
137 return ConstIterator(*this, AllocationMap::kInvalidKVIndex); 152 return ConstIterator(*this, AllocationMap::kInvalidKVIndex);
138 } 153 }
139 154
140 void AllocationRegister::EstimateTraceMemoryOverhead( 155 void AllocationRegister::EstimateTraceMemoryOverhead(
141 TraceEventMemoryOverhead* overhead) const { 156 TraceEventMemoryOverhead* overhead) const {
142 size_t allocated = sizeof(AllocationRegister); 157 size_t allocated = sizeof(AllocationRegister);
143 size_t resident = sizeof(AllocationRegister) 158 size_t resident = sizeof(AllocationRegister) +
144 + allocations_.EstimateUsedMemory() 159 allocations_.EstimateUsedMemory() +
145 + backtraces_.EstimateUsedMemory(); 160 backtraces_.EstimateUsedMemory();
146 overhead->Add("AllocationRegister", allocated, resident); 161 overhead->Add("AllocationRegister", allocated, resident);
147 } 162 }
148 163
149 AllocationRegister::BacktraceMap::KVIndex AllocationRegister::InsertBacktrace( 164 AllocationRegister::BacktraceMap::KVIndex AllocationRegister::InsertBacktrace(
150 const Backtrace& backtrace) { 165 const Backtrace& backtrace) {
151 auto index = backtraces_.Insert(backtrace, 0).first; 166 auto index = backtraces_.Insert(backtrace, 0).first;
167 if (index == BacktraceMap::kInvalidKVIndex)
168 return out_of_storage_backtrace_index_;
152 auto& backtrace_and_count = backtraces_.Get(index); 169 auto& backtrace_and_count = backtraces_.Get(index);
153 backtrace_and_count.second++; 170 backtrace_and_count.second++;
154 return index; 171 return index;
155 } 172 }
156 173
157 void AllocationRegister::RemoveBacktrace(BacktraceMap::KVIndex index) { 174 void AllocationRegister::RemoveBacktrace(BacktraceMap::KVIndex index) {
158 auto& backtrace_and_count = backtraces_.Get(index); 175 auto& backtrace_and_count = backtraces_.Get(index);
159 if (--backtrace_and_count.second == 0) { 176 if (--backtrace_and_count.second == 0 &&
177 index != out_of_storage_backtrace_index_) {
160 // Backtrace is not referenced anymore - remove it. 178 // Backtrace is not referenced anymore - remove it.
161 backtraces_.Remove(index); 179 backtraces_.Remove(index);
162 } 180 }
163 } 181 }
164 182
165 AllocationRegister::Allocation AllocationRegister::GetAllocation( 183 AllocationRegister::Allocation AllocationRegister::GetAllocation(
166 AllocationMap::KVIndex index) const { 184 AllocationMap::KVIndex index) const {
167 const auto& address_and_info = allocations_.Get(index); 185 const auto& address_and_info = allocations_.Get(index);
168 const auto& backtrace_and_count = backtraces_.Get( 186 const auto& backtrace_and_count =
169 address_and_info.second.backtrace_index); 187 backtraces_.Get(address_and_info.second.backtrace_index);
170 return { 188 return {address_and_info.first, address_and_info.second.size,
171 address_and_info.first, 189 AllocationContext(backtrace_and_count.first,
172 address_and_info.second.size, 190 address_and_info.second.type_name)};
173 AllocationContext(
174 backtrace_and_count.first,
175 address_and_info.second.type_name)
176 };
177 } 191 }
178 192
179 } // namespace trace_event 193 } // namespace trace_event
180 } // namespace base 194 } // namespace base
OLDNEW
« no previous file with comments | « base/trace_event/heap_profiler_allocation_register.h ('k') | base/trace_event/heap_profiler_allocation_register_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698