Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(475)

Side by Side Diff: base/trace_event/heap_profiler_allocation_register.h

Issue 2890363003: Enable sharding of AllocationRegister on desktop. (Closed)
Patch Set: comments from primiano, thakis. Created 3 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « base/BUILD.gn ('k') | base/trace_event/heap_profiler_allocation_register.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_ 5 #ifndef BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_
6 #define BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_ 6 #define BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_
7 7
8 #include <stddef.h> 8 #include <stddef.h>
9 #include <stdint.h> 9 #include <stdint.h>
10 10
(...skipping 229 matching lines...) Expand 10 before | Expand all | Expand 10 after
240 // The index of the first element of |cells_| that has not been used before. 240 // The index of the first element of |cells_| that has not been used before.
241 // If the free list is empty and a new cell is needed, the cell at this index 241 // If the free list is empty and a new cell is needed, the cell at this index
242 // is used. This is the high water mark for the number of entries stored. 242 // is used. This is the high water mark for the number of entries stored.
243 size_t next_unused_cell_; 243 size_t next_unused_cell_;
244 244
245 DISALLOW_COPY_AND_ASSIGN(FixedHashMap); 245 DISALLOW_COPY_AND_ASSIGN(FixedHashMap);
246 }; 246 };
247 247
248 } // namespace internal 248 } // namespace internal
249 249
250 class TraceEventMemoryOverhead;
251
252 // The allocation register keeps track of all allocations that have not been 250 // The allocation register keeps track of all allocations that have not been
253 // freed. Internally it has two hashtables: one for Backtraces and one for 251 // freed. Internally it has two hashtables: one for Backtraces and one for
254 // actual allocations. Sizes of both hashtables are fixed, and this class 252 // actual allocations. Sizes of both hashtables are fixed, and this class
255 // allocates (mmaps) only in its constructor. 253 // allocates (mmaps) only in its constructor.
256 // 254 //
257 // When either hash table hits max size, new inserts are dropped. 255 // When either hash table hits max size, new inserts are dropped.
258 class BASE_EXPORT AllocationRegister { 256 class BASE_EXPORT AllocationRegister {
259 public: 257 public:
260 // Details about an allocation. 258 // Details about an allocation.
261 struct Allocation { 259 struct Allocation {
262 const void* address; 260 const void* address;
263 size_t size; 261 size_t size;
264 AllocationContext context; 262 AllocationContext context;
265 }; 263 };
266 264
265 struct BASE_EXPORT AddressHasher {
266 size_t operator()(const void* address) const;
267 };
268
267 // An iterator that iterates entries in no particular order. 269 // An iterator that iterates entries in no particular order.
268 class BASE_EXPORT ConstIterator { 270 class BASE_EXPORT ConstIterator {
269 public: 271 public:
270 void operator++(); 272 void operator++();
271 bool operator!=(const ConstIterator& other) const; 273 bool operator!=(const ConstIterator& other) const;
272 Allocation operator*() const; 274 Allocation operator*() const;
273 275
274 private: 276 private:
275 friend class AllocationRegister; 277 friend class AllocationRegister;
276 using AllocationIndex = size_t; 278 using AllocationIndex = size_t;
(...skipping 22 matching lines...) Expand all
299 // Removes the address from the table if it is present. It is ok to call this 301 // Removes the address from the table if it is present. It is ok to call this
300 // with a null pointer. 302 // with a null pointer.
301 void Remove(const void* address); 303 void Remove(const void* address);
302 304
303 // Finds allocation for the address and fills |out_allocation|. 305 // Finds allocation for the address and fills |out_allocation|.
304 bool Get(const void* address, Allocation* out_allocation) const; 306 bool Get(const void* address, Allocation* out_allocation) const;
305 307
306 ConstIterator begin() const; 308 ConstIterator begin() const;
307 ConstIterator end() const; 309 ConstIterator end() const;
308 310
309 // Estimates memory overhead including |sizeof(AllocationRegister)|. 311 // Estimates memory in use.
310 void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead) const; 312 size_t EstimateAllocatedMemory() const;
313 size_t EstimateResidentMemory() const;
311 314
312 private: 315 private:
313 friend AllocationRegisterTest; 316 friend AllocationRegisterTest;
314 317
315 // Expect lower number of allocations from mobile platforms. Load factor 318 // Expect lower number of allocations from mobile platforms. Load factor
316 // (capacity / bucket count) is kept less than 10 for optimal hashing. The 319 // (capacity / bucket count) is kept less than 10 for optimal hashing. The
317 // number of buckets should be changed together with AddressHasher. 320 // number of buckets should be changed together with AddressHasher.
318 #if defined(OS_ANDROID) || defined(OS_IOS) 321 #if defined(OS_ANDROID) || defined(OS_IOS)
322 // Note that allocations are currently sharded over 1 different instance of
323 // AllocationRegister. See "base/trace_event/malloc_dump_provider.cc".
319 static const size_t kAllocationBuckets = 1 << 18; 324 static const size_t kAllocationBuckets = 1 << 18;
320 static const size_t kAllocationCapacity = 1500000; 325 static const size_t kAllocationCapacity = 1500000;
321 #else 326 #else
322 static const size_t kAllocationBuckets = 1 << 19; 327 // Note that allocations are currently sharded over 256 different instances of
323 static const size_t kAllocationCapacity = 5000000; 328 // AllocationRegister. See "base/trace_event/malloc_dump_provider.cc".
329 static const size_t kAllocationBuckets = 1 << 16;
330 static const size_t kAllocationCapacity = 400000;
324 #endif 331 #endif
325 332
326 // 2^16 works well with BacktraceHasher. When increasing this number make 333 #if defined(OS_ANDROID) || defined(OS_IOS)
327 // sure BacktraceHasher still produces low number of collisions. 334 // Note that allocations are currently sharded over 1 different instance of
335 // AllocationRegister. See "base/trace_event/malloc_dump_provider.cc".
328 static const size_t kBacktraceBuckets = 1 << 16; 336 static const size_t kBacktraceBuckets = 1 << 16;
329 #if defined(OS_ANDROID)
330 static const size_t kBacktraceCapacity = 32000; // 22K was observed 337 static const size_t kBacktraceCapacity = 32000; // 22K was observed
331 #else 338 #else
332 static const size_t kBacktraceCapacity = 55000; // 45K was observed on Linux 339 // Note that allocations are currently sharded over 256 different instances of
340 // AllocationRegister. See "base/trace_event/malloc_dump_provider.cc".
341 static const size_t kBacktraceBuckets = 1 << 12;
342 static const size_t kBacktraceCapacity = 10000; // 45K was observed on Linux
333 #endif 343 #endif
334 344
335 struct BacktraceHasher { 345 struct BacktraceHasher {
336 size_t operator () (const Backtrace& backtrace) const; 346 size_t operator () (const Backtrace& backtrace) const;
337 }; 347 };
338 348
339 using BacktraceMap = internal::FixedHashMap< 349 using BacktraceMap = internal::FixedHashMap<
340 kBacktraceBuckets, 350 kBacktraceBuckets,
341 Backtrace, 351 Backtrace,
342 size_t, // Number of references to the backtrace (the key). Incremented 352 size_t, // Number of references to the backtrace (the key). Incremented
343 // when an allocation that references the backtrace is inserted, 353 // when an allocation that references the backtrace is inserted,
344 // and decremented when the allocation is removed. When the 354 // and decremented when the allocation is removed. When the
345 // number drops to zero, the backtrace is removed from the map. 355 // number drops to zero, the backtrace is removed from the map.
346 BacktraceHasher>; 356 BacktraceHasher>;
347 357
348 struct AllocationInfo { 358 struct AllocationInfo {
349 size_t size; 359 size_t size;
350 const char* type_name; 360 const char* type_name;
351 BacktraceMap::KVIndex backtrace_index; 361 BacktraceMap::KVIndex backtrace_index;
352 }; 362 };
353 363
354 struct AddressHasher {
355 size_t operator () (const void* address) const;
356 };
357
358 using AllocationMap = internal::FixedHashMap< 364 using AllocationMap = internal::FixedHashMap<
359 kAllocationBuckets, 365 kAllocationBuckets,
360 const void*, 366 const void*,
361 AllocationInfo, 367 AllocationInfo,
362 AddressHasher>; 368 AddressHasher>;
363 369
364 BacktraceMap::KVIndex InsertBacktrace(const Backtrace& backtrace); 370 BacktraceMap::KVIndex InsertBacktrace(const Backtrace& backtrace);
365 void RemoveBacktrace(BacktraceMap::KVIndex index); 371 void RemoveBacktrace(BacktraceMap::KVIndex index);
366 372
367 Allocation GetAllocation(AllocationMap::KVIndex) const; 373 Allocation GetAllocation(AllocationMap::KVIndex) const;
368 374
369 AllocationMap allocations_; 375 AllocationMap allocations_;
370 BacktraceMap backtraces_; 376 BacktraceMap backtraces_;
371 377
372 // Sentinel used when the |backtraces_| table is full. 378 // Sentinel used when the |backtraces_| table is full.
373 // 379 //
374 // This is a slightly abstraction to allow for constant propagation. It 380 // This is a slightly abstraction to allow for constant propagation. It
375 // knows that the sentinel will be the first item inserted into the table 381 // knows that the sentinel will be the first item inserted into the table
376 // and that the first index retuned will be 0. The constructor DCHECKs 382 // and that the first index retuned will be 0. The constructor DCHECKs
377 // this assumption. 383 // this assumption.
378 enum : BacktraceMap::KVIndex { kOutOfStorageBacktraceIndex = 0 }; 384 enum : BacktraceMap::KVIndex { kOutOfStorageBacktraceIndex = 0 };
379 385
380 DISALLOW_COPY_AND_ASSIGN(AllocationRegister); 386 DISALLOW_COPY_AND_ASSIGN(AllocationRegister);
381 }; 387 };
382 388
383 } // namespace trace_event 389 } // namespace trace_event
384 } // namespace base 390 } // namespace base
385 391
386 #endif // BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_ 392 #endif // BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_
OLDNEW
« no previous file with comments | « base/BUILD.gn ('k') | base/trace_event/heap_profiler_allocation_register.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698