OLD | NEW |
1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_ | 5 #ifndef BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_ |
6 #define BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_ | 6 #define BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_ |
7 | 7 |
8 #include <stddef.h> | 8 #include <stddef.h> |
9 #include <stdint.h> | 9 #include <stdint.h> |
10 | 10 |
11 #include <utility> | 11 #include <utility> |
12 | 12 |
13 #include "base/bits.h" | 13 #include "base/bits.h" |
14 #include "base/logging.h" | 14 #include "base/logging.h" |
15 #include "base/macros.h" | 15 #include "base/macros.h" |
16 #include "base/process/process_metrics.h" | 16 #include "base/process/process_metrics.h" |
17 #include "base/template_util.h" | 17 #include "base/template_util.h" |
18 #include "base/trace_event/heap_profiler_allocation_context.h" | 18 #include "base/trace_event/heap_profiler_allocation_context.h" |
| 19 #include "build/build_config.h" |
19 | 20 |
20 namespace base { | 21 namespace base { |
21 namespace trace_event { | 22 namespace trace_event { |
22 | 23 |
23 class AllocationRegisterTest; | 24 class AllocationRegisterTest; |
24 | 25 |
25 namespace internal { | 26 namespace internal { |
26 | 27 |
27 // Allocates a region of virtual address space of |size| rounded up to the | 28 // Allocates a region of virtual address space of |size| rounded up to the |
28 // system page size. The memory is zeroed by the system. A guard page is | 29 // system page size. The memory is zeroed by the system. A guard page is |
(...skipping 162 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
191 // was reserved for |cells_|), |next_unused_cell_| can be an index outside | 192 // was reserved for |cells_|), |next_unused_cell_| can be an index outside |
192 // of the allocated storage. A guard page is allocated there to crash the | 193 // of the allocated storage. A guard page is allocated there to crash the |
193 // program in that case. There are alternative solutions: | 194 // program in that case. There are alternative solutions: |
194 // - Deal with it, increase capacity by reallocating |cells_|. | 195 // - Deal with it, increase capacity by reallocating |cells_|. |
195 // - Refuse to insert and let the caller deal with it. | 196 // - Refuse to insert and let the caller deal with it. |
196 // Because free cells are re-used before accessing fresh cells with a higher | 197 // Because free cells are re-used before accessing fresh cells with a higher |
197 // index, and because reserving address space without touching it is cheap, | 198 // index, and because reserving address space without touching it is cheap, |
198 // the simplest solution is to just allocate a humongous chunk of address | 199 // the simplest solution is to just allocate a humongous chunk of address |
199 // space. | 200 // space. |
200 | 201 |
201 DCHECK_LT(next_unused_cell_, num_cells_ + 1); | 202 CHECK_LT(next_unused_cell_, num_cells_ + 1) |
| 203 << "Allocation Register hash table has too little capacity. Increase " |
| 204 "the capacity to run heap profiler in large sessions."; |
202 | 205 |
203 return &cells_[idx]; | 206 return &cells_[idx]; |
204 } | 207 } |
205 | 208 |
206 // Returns a value in the range [0, NumBuckets - 1] (inclusive). | 209 // Returns a value in the range [0, NumBuckets - 1] (inclusive). |
207 size_t Hash(const Key& key) const { | 210 size_t Hash(const Key& key) const { |
208 if (NumBuckets == (NumBuckets & ~(NumBuckets - 1))) { | 211 if (NumBuckets == (NumBuckets & ~(NumBuckets - 1))) { |
209 // NumBuckets is a power of 2. | 212 // NumBuckets is a power of 2. |
210 return KeyHasher()(key) & (NumBuckets - 1); | 213 return KeyHasher()(key) & (NumBuckets - 1); |
211 } else { | 214 } else { |
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
292 | 295 |
293 ConstIterator begin() const; | 296 ConstIterator begin() const; |
294 ConstIterator end() const; | 297 ConstIterator end() const; |
295 | 298 |
296 // Estimates memory overhead including |sizeof(AllocationRegister)|. | 299 // Estimates memory overhead including |sizeof(AllocationRegister)|. |
297 void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead) const; | 300 void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead) const; |
298 | 301 |
299 private: | 302 private: |
300 friend AllocationRegisterTest; | 303 friend AllocationRegisterTest; |
301 | 304 |
302 // Expect max 1.5M allocations. Number of buckets is 2^18 for optimal | 305 // Expect lower number of allocations from mobile platforms. Load factor |
303 // hashing and should be changed together with AddressHasher. | 306 // (capacity / bucket count) is kept less than 10 for optimal hashing. The |
| 307 // number of buckets should be changed together with AddressHasher. |
| 308 #if defined(OS_ANDROID) || defined(OS_IOS) |
304 static const size_t kAllocationBuckets = 1 << 18; | 309 static const size_t kAllocationBuckets = 1 << 18; |
305 static const size_t kAllocationCapacity = 1500000; | 310 static const size_t kAllocationCapacity = 1500000; |
| 311 #else |
| 312 static const size_t kAllocationBuckets = 1 << 19; |
| 313 static const size_t kAllocationCapacity = 5000000; |
| 314 #endif |
306 | 315 |
307 // 2^16 works well with BacktraceHasher. When increasing this number make | 316 // 2^16 works well with BacktraceHasher. When increasing this number make |
308 // sure BacktraceHasher still produces low number of collisions. | 317 // sure BacktraceHasher still produces low number of collisions. |
309 static const size_t kBacktraceBuckets = 1 << 16; | 318 static const size_t kBacktraceBuckets = 1 << 16; |
310 #if defined(OS_ANDROID) | 319 #if defined(OS_ANDROID) |
311 static const size_t kBacktraceCapacity = 32000; // 22K was observed | 320 static const size_t kBacktraceCapacity = 32000; // 22K was observed |
312 #else | 321 #else |
313 static const size_t kBacktraceCapacity = 55000; // 45K was observed on Linux | 322 static const size_t kBacktraceCapacity = 55000; // 45K was observed on Linux |
314 #endif | 323 #endif |
315 | 324 |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
350 AllocationMap allocations_; | 359 AllocationMap allocations_; |
351 BacktraceMap backtraces_; | 360 BacktraceMap backtraces_; |
352 | 361 |
353 DISALLOW_COPY_AND_ASSIGN(AllocationRegister); | 362 DISALLOW_COPY_AND_ASSIGN(AllocationRegister); |
354 }; | 363 }; |
355 | 364 |
356 } // namespace trace_event | 365 } // namespace trace_event |
357 } // namespace base | 366 } // namespace base |
358 | 367 |
359 #endif // BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_ | 368 #endif // BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_ |
OLD | NEW |