Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(206)

Side by Side Diff: base/trace_event/heap_profiler_allocation_register.h

Issue 2890363003: Enable sharding of AllocationRegister on desktop. (Closed)
Patch Set: comment from primiano. Created 3 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_ 5 #ifndef BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_
6 #define BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_ 6 #define BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_
7 7
8 #include <stddef.h> 8 #include <stddef.h>
9 #include <stdint.h> 9 #include <stdint.h>
10 10
(...skipping 229 matching lines...) Expand 10 before | Expand all | Expand 10 after
240 // The index of the first element of |cells_| that has not been used before. 240 // The index of the first element of |cells_| that has not been used before.
241 // If the free list is empty and a new cell is needed, the cell at this index 241 // If the free list is empty and a new cell is needed, the cell at this index
242 // is used. This is the high water mark for the number of entries stored. 242 // is used. This is the high water mark for the number of entries stored.
243 size_t next_unused_cell_; 243 size_t next_unused_cell_;
244 244
245 DISALLOW_COPY_AND_ASSIGN(FixedHashMap); 245 DISALLOW_COPY_AND_ASSIGN(FixedHashMap);
246 }; 246 };
247 247
248 } // namespace internal 248 } // namespace internal
249 249
250 class TraceEventMemoryOverhead;
251
252 // The allocation register keeps track of all allocations that have not been 250 // The allocation register keeps track of all allocations that have not been
253 // freed. Internally it has two hashtables: one for Backtraces and one for 251 // freed. Internally it has two hashtables: one for Backtraces and one for
254 // actual allocations. Sizes of both hashtables are fixed, and this class 252 // actual allocations. Sizes of both hashtables are fixed, and this class
255 // allocates (mmaps) only in its constructor. 253 // allocates (mmaps) only in its constructor.
256 // 254 //
257 // When either hash table hits max size, new inserts are dropped. 255 // When either hash table hits max size, new inserts are dropped.
258 class BASE_EXPORT AllocationRegister { 256 class BASE_EXPORT AllocationRegister {
259 public: 257 public:
260 // Details about an allocation. 258 // Details about an allocation.
261 struct Allocation { 259 struct Allocation {
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
299 // Removes the address from the table if it is present. It is ok to call this 297 // Removes the address from the table if it is present. It is ok to call this
300 // with a null pointer. 298 // with a null pointer.
301 void Remove(const void* address); 299 void Remove(const void* address);
302 300
303 // Finds allocation for the address and fills |out_allocation|. 301 // Finds allocation for the address and fills |out_allocation|.
304 bool Get(const void* address, Allocation* out_allocation) const; 302 bool Get(const void* address, Allocation* out_allocation) const;
305 303
306 ConstIterator begin() const; 304 ConstIterator begin() const;
307 ConstIterator end() const; 305 ConstIterator end() const;
308 306
309 // Estimates memory overhead including |sizeof(AllocationRegister)|. 307 // Estimates memory in use.
310 void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead) const; 308 size_t EstimateAllocatedMemory() const;
309 size_t EstimateResidentMemory() const;
311 310
312 private: 311 private:
313 friend AllocationRegisterTest; 312 friend AllocationRegisterTest;
314 313
315 // Expect lower number of allocations from mobile platforms. Load factor 314 // Expect lower number of allocations from mobile platforms. Load factor
316 // (capacity / bucket count) is kept less than 10 for optimal hashing. The 315 // (capacity / bucket count) is kept less than 10 for optimal hashing. The
317 // number of buckets should be changed together with AddressHasher. 316 // number of buckets should be changed together with AddressHasher.
318 #if defined(OS_ANDROID) || defined(OS_IOS) 317 #if defined(OS_ANDROID) || defined(OS_IOS)
318 // Note that allocations are currently sharded over 1 different instance of
319 // AllocationRegister. See "base/trace_event/malloc_dump_provider.cc".
319 static const size_t kAllocationBuckets = 1 << 18; 320 static const size_t kAllocationBuckets = 1 << 18;
320 static const size_t kAllocationCapacity = 1500000; 321 static const size_t kAllocationCapacity = 1500000;
321 #else 322 #else
322 static const size_t kAllocationBuckets = 1 << 19; 323 // Note that allocations are currently sharded over 256 different instances of
323 static const size_t kAllocationCapacity = 5000000; 324 // AllocationRegister. See "base/trace_event/malloc_dump_provider.cc".
325 static const size_t kAllocationBuckets = 1 << 16;
326 static const size_t kAllocationCapacity = 400000;
324 #endif 327 #endif
325 328
326 // 2^16 works well with BacktraceHasher. When increasing this number make 329 #if defined(OS_ANDROID) || defined(OS_IOS)
327 // sure BacktraceHasher still produces low number of collisions. 330 // Note that allocations are currently sharded over 1 different instance of
331 // AllocationRegister. See "base/trace_event/malloc_dump_provider.cc".
328 static const size_t kBacktraceBuckets = 1 << 16; 332 static const size_t kBacktraceBuckets = 1 << 16;
329 #if defined(OS_ANDROID)
330 static const size_t kBacktraceCapacity = 32000; // 22K was observed 333 static const size_t kBacktraceCapacity = 32000; // 22K was observed
331 #else 334 #else
332 static const size_t kBacktraceCapacity = 55000; // 45K was observed on Linux 335 // Note that allocations are currently sharded over 256 different instances of
336 // AllocationRegister. See "base/trace_event/malloc_dump_provider.cc".
337 static const size_t kBacktraceBuckets = 1 << 12;
338 static const size_t kBacktraceCapacity = 10000; // 45K was observed on Linux
333 #endif 339 #endif
334 340
335 struct BacktraceHasher { 341 struct BacktraceHasher {
336 size_t operator () (const Backtrace& backtrace) const; 342 size_t operator () (const Backtrace& backtrace) const;
337 }; 343 };
338 344
339 using BacktraceMap = internal::FixedHashMap< 345 using BacktraceMap = internal::FixedHashMap<
340 kBacktraceBuckets, 346 kBacktraceBuckets,
341 Backtrace, 347 Backtrace,
342 size_t, // Number of references to the backtrace (the key). Incremented 348 size_t, // Number of references to the backtrace (the key). Incremented
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
377 // this assumption. 383 // this assumption.
378 enum : BacktraceMap::KVIndex { kOutOfStorageBacktraceIndex = 0 }; 384 enum : BacktraceMap::KVIndex { kOutOfStorageBacktraceIndex = 0 };
379 385
380 DISALLOW_COPY_AND_ASSIGN(AllocationRegister); 386 DISALLOW_COPY_AND_ASSIGN(AllocationRegister);
381 }; 387 };
382 388
383 } // namespace trace_event 389 } // namespace trace_event
384 } // namespace base 390 } // namespace base
385 391
386 #endif // BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_ 392 #endif // BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698