Chromium Code Reviews

Side by Side Diff: third_party/tcmalloc/chromium/src/heap-profile-table.h

Issue 12388070: Count m(un)map for each stacktrace in MemoryRegionMap instead of HeapProfileTable. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: nit fix Created 7 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments.
Jump to:
View unified diff | | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2006, Google Inc. 1 // Copyright (c) 2006, Google Inc.
2 // All rights reserved. 2 // All rights reserved.
3 // 3 //
4 // Redistribution and use in source and binary forms, with or without 4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are 5 // modification, are permitted provided that the following conditions are
6 // met: 6 // met:
7 // 7 //
8 // * Redistributions of source code must retain the above copyright 8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer. 9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above 10 // * Redistributions in binary form must reproduce the above
(...skipping 20 matching lines...)
31 // Author: Sanjay Ghemawat 31 // Author: Sanjay Ghemawat
32 // Maxim Lifantsev (refactoring) 32 // Maxim Lifantsev (refactoring)
33 // 33 //
34 34
35 #ifndef BASE_HEAP_PROFILE_TABLE_H_ 35 #ifndef BASE_HEAP_PROFILE_TABLE_H_
36 #define BASE_HEAP_PROFILE_TABLE_H_ 36 #define BASE_HEAP_PROFILE_TABLE_H_
37 37
38 #include "addressmap-inl.h" 38 #include "addressmap-inl.h"
39 #include "base/basictypes.h" 39 #include "base/basictypes.h"
40 #include "base/logging.h" // for RawFD 40 #include "base/logging.h" // for RawFD
41 #include "heap-profile-stats.h"
41 42
42 #if defined(TYPE_PROFILING) 43 #if defined(TYPE_PROFILING)
43 #include <gperftools/type_profiler_map.h> 44 #include <gperftools/type_profiler_map.h>
44 #endif // defined(TYPE_PROFILING) 45 #endif // defined(TYPE_PROFILING)
45 46
46 // Table to maintain a heap profile data inside, 47 // Table to maintain a heap profile data inside,
47 // i.e. the set of currently active heap memory allocations. 48 // i.e. the set of currently active heap memory allocations.
48 // thread-unsafe and non-reentrant code: 49 // thread-unsafe and non-reentrant code:
49 // each instance object must be used by one thread 50 // each instance object must be used by one thread
50 // at a time w/o self-recursion. 51 // at a time w/o self-recursion.
51 // 52 //
52 // TODO(maxim): add a unittest for this class. 53 // TODO(maxim): add a unittest for this class.
53 class HeapProfileTable { 54 class HeapProfileTable {
54 public: 55 public:
55 56
56 // Extension to be used for heap pforile files. 57 // Extension to be used for heap pforile files.
57 static const char kFileExt[]; 58 static const char kFileExt[];
58 59
59 // Longest stack trace we record. 60 // Longest stack trace we record.
60 static const int kMaxStackDepth = 32; 61 static const int kMaxStackDepth = 32;
61 62
62 // data types ---------------------------- 63 // data types ----------------------------
63 64
64 // Profile stats. 65 // Profile stats.
65 struct Stats { 66 typedef HeapProfileStats::Stats Stats;
66 int32 allocs; // Number of allocation calls
67 int32 frees; // Number of free calls
68 int64 alloc_size; // Total size of all allocated objects so far
69 int64 free_size; // Total size of all freed objects so far
70
71 // semantic equality
72 bool Equivalent(const Stats& x) const {
73 return allocs - frees == x.allocs - x.frees &&
74 alloc_size - free_size == x.alloc_size - x.free_size;
75 }
76 };
77 67
78 // Possible marks for MarkCurrentAllocations and MarkUnmarkedAllocations. New 68 // Possible marks for MarkCurrentAllocations and MarkUnmarkedAllocations. New
79 // allocations are marked with UNMARKED by default. 69 // allocations are marked with UNMARKED by default.
80 enum AllocationMark { 70 enum AllocationMark {
81 UNMARKED = 0, 71 UNMARKED = 0,
82 MARK_ONE, 72 MARK_ONE,
83 MARK_TWO, 73 MARK_TWO,
84 MARK_THREE 74 MARK_THREE
85 }; 75 };
86 76
(...skipping 13 matching lines...)
100 int stack_depth; // Depth of stack trace 90 int stack_depth; // Depth of stack trace
101 const void* const* call_stack; // Stack trace 91 const void* const* call_stack; // Stack trace
102 }; 92 };
103 93
104 // Memory (de)allocator interface we'll use. 94 // Memory (de)allocator interface we'll use.
105 typedef void* (*Allocator)(size_t size); 95 typedef void* (*Allocator)(size_t size);
106 typedef void (*DeAllocator)(void* ptr); 96 typedef void (*DeAllocator)(void* ptr);
107 97
108 // interface --------------------------- 98 // interface ---------------------------
109 99
110 HeapProfileTable(Allocator alloc, DeAllocator dealloc); 100 HeapProfileTable(Allocator alloc, DeAllocator dealloc, bool profile_mmap);
111 ~HeapProfileTable(); 101 ~HeapProfileTable();
112 102
113 // Collect the stack trace for the function that asked to do the 103 // Collect the stack trace for the function that asked to do the
114 // allocation for passing to RecordAlloc() below. 104 // allocation for passing to RecordAlloc() below.
115 // 105 //
116 // The stack trace is stored in 'stack'. The stack depth is returned. 106 // The stack trace is stored in 'stack'. The stack depth is returned.
117 // 107 //
118 // 'skip_count' gives the number of stack frames between this call 108 // 'skip_count' gives the number of stack frames between this call
119 // and the memory allocation function. 109 // and the memory allocation function.
120 static int GetCallerStackTrace(int skip_count, void* stack[kMaxStackDepth]); 110 static int GetCallerStackTrace(int skip_count, void* stack[kMaxStackDepth]);
(...skipping 41 matching lines...)
162 // mmap'ed regions. 152 // mmap'ed regions.
163 const Stats& total() const { return total_; } 153 const Stats& total() const { return total_; }
164 154
165 // Allocation data iteration callback: gets passed object pointer and 155 // Allocation data iteration callback: gets passed object pointer and
166 // fully-filled AllocInfo. 156 // fully-filled AllocInfo.
167 typedef void (*AllocIterator)(const void* ptr, const AllocInfo& info); 157 typedef void (*AllocIterator)(const void* ptr, const AllocInfo& info);
168 158
169 // Iterate over the allocation profile data calling "callback" 159 // Iterate over the allocation profile data calling "callback"
170 // for every allocation. 160 // for every allocation.
171 void IterateAllocs(AllocIterator callback) const { 161 void IterateAllocs(AllocIterator callback) const {
172 alloc_address_map_->Iterate(MapArgsAllocIterator, callback); 162 address_map_->Iterate(MapArgsAllocIterator, callback);
173 } 163 }
174 164
175 // Callback for iterating through addresses of all allocated objects. Accepts 165 // Callback for iterating through addresses of all allocated objects. Accepts
176 // pointer to user data and object pointer. 166 // pointer to user data and object pointer.
177 typedef void (*AddressIterator)(void* data, const void* ptr); 167 typedef void (*AddressIterator)(void* data, const void* ptr);
178 168
179 // Iterate over the addresses of all allocated objects. 169 // Iterate over the addresses of all allocated objects.
180 void IterateAllocationAddresses(AddressIterator, void* data); 170 void IterateAllocationAddresses(AddressIterator, void* data);
181 171
182 // Allocation context profile data iteration callback 172 // Allocation context profile data iteration callback
(...skipping 24 matching lines...)
207 // Release a previously taken snapshot. snapshot must not 197 // Release a previously taken snapshot. snapshot must not
208 // be used after this call. 198 // be used after this call.
209 void ReleaseSnapshot(Snapshot* snapshot); 199 void ReleaseSnapshot(Snapshot* snapshot);
210 200
211 // Return a snapshot of every non-live, non-ignored object in *this. 201 // Return a snapshot of every non-live, non-ignored object in *this.
212 // If "base" is non-NULL, skip any objects present in "base". 202 // If "base" is non-NULL, skip any objects present in "base".
213 // As a side-effect, clears the "live" bit on every live object in *this. 203 // As a side-effect, clears the "live" bit on every live object in *this.
214 // Caller must call ReleaseSnapshot() on result when no longer needed. 204 // Caller must call ReleaseSnapshot() on result when no longer needed.
215 Snapshot* NonLiveSnapshot(Snapshot* base); 205 Snapshot* NonLiveSnapshot(Snapshot* base);
216 206
217 // Refresh the internal mmap information from MemoryRegionMap. Results of
218 // FillOrderedProfile and IterateOrderedAllocContexts will contain mmap'ed
219 // memory regions as at calling RefreshMMapData.
220 // 'mmap_alloc' is an allocator for an address map. A function which calls
221 // LowLevelAlloc::AllocWithArena is expected like the constractor.
222 // 'mmap_dealloc' is a corresponding deallocator to 'mmap_alloc'.
223 // They are introduced to avoid expected memory fragmentation and bloat in
224 // an arena. A dedicated arena for this function allows disposing whole the
225 // arena after ClearMMapData.
226 void RefreshMMapData(Allocator mmap_alloc, DeAllocator mmap_dealloc);
227
228 // Clear the internal mmap information. Results of FillOrderedProfile and
229 // IterateOrderedAllocContexts won't contain mmap'ed memory regions after
230 // calling ClearMMapData.
231 void ClearMMapData();
232
233 // Dump a list of allocations marked as "live" along with their creation 207 // Dump a list of allocations marked as "live" along with their creation
234 // stack traces and sizes to a file named |file_name|. Together with 208 // stack traces and sizes to a file named |file_name|. Together with
235 // MarkCurrentAllocatiosn and MarkUnmarkedAllocations this can be used 209 // MarkCurrentAllocatiosn and MarkUnmarkedAllocations this can be used
236 // to find objects that are created in a certain time span: 210 // to find objects that are created in a certain time span:
237 // 1. Invoke MarkCurrentAllocations(MARK_ONE) to mark the start of the 211 // 1. Invoke MarkCurrentAllocations(MARK_ONE) to mark the start of the
238 // timespan. 212 // timespan.
239 // 2. Perform whatever action you suspect allocates memory that is not 213 // 2. Perform whatever action you suspect allocates memory that is not
240 // correctly freed. 214 // correctly freed.
241 // 3. Invoke MarkUnmarkedAllocations(MARK_TWO). 215 // 3. Invoke MarkUnmarkedAllocations(MARK_TWO).
242 // 4. Perform whatever action is supposed to free the memory again. New 216 // 4. Perform whatever action is supposed to free the memory again. New
(...skipping 10 matching lines...)
253 void DumpTypeStatistics(const char* file_name) const; 227 void DumpTypeStatistics(const char* file_name) const;
254 #endif // defined(TYPE_PROFILING) 228 #endif // defined(TYPE_PROFILING)
255 229
256 private: 230 private:
257 friend class DeepHeapProfile; 231 friend class DeepHeapProfile;
258 232
259 // data types ---------------------------- 233 // data types ----------------------------
260 234
261 // Hash table bucket to hold (de)allocation stats 235 // Hash table bucket to hold (de)allocation stats
262 // for a given allocation call stack trace. 236 // for a given allocation call stack trace.
263 struct Bucket : public Stats { 237 typedef HeapProfileStats::Bucket Bucket;
264 uintptr_t hash; // Hash value of the stack trace
265 int depth; // Depth of stack trace
266 const void** stack; // Stack trace
267 Bucket* next; // Next entry in hash-table
268 };
269 238
270 // Info stored in the address map 239 // Info stored in the address map
271 struct AllocValue { 240 struct AllocValue {
272 // Access to the stack-trace bucket 241 // Access to the stack-trace bucket
273 Bucket* bucket() const { 242 Bucket* bucket() const {
274 return reinterpret_cast<Bucket*>(bucket_rep & ~uintptr_t(kMask)); 243 return reinterpret_cast<Bucket*>(bucket_rep & ~uintptr_t(kMask));
275 } 244 }
276 // This also does set_live(false). 245 // This also does set_live(false).
277 void set_bucket(Bucket* b) { bucket_rep = reinterpret_cast<uintptr_t>(b); } 246 void set_bucket(Bucket* b) { bucket_rep = reinterpret_cast<uintptr_t>(b); }
278 size_t bytes; // Number of bytes in this allocation 247 size_t bytes; // Number of bytes in this allocation
(...skipping 24 matching lines...)
303 static const int kMask = kLive | kIgnore; 272 static const int kMask = kLive | kIgnore;
304 273
305 uintptr_t bucket_rep; 274 uintptr_t bucket_rep;
306 }; 275 };
307 276
308 // helper for FindInsideAlloc 277 // helper for FindInsideAlloc
309 static size_t AllocValueSize(const AllocValue& v) { return v.bytes; } 278 static size_t AllocValueSize(const AllocValue& v) { return v.bytes; }
310 279
311 typedef AddressMap<AllocValue> AllocationMap; 280 typedef AddressMap<AllocValue> AllocationMap;
312 281
282 // Arguments that need to be passed DumpBucketIterator callback below.
283 struct BufferArgs {
284 char* buf;
Alexander Potapenko 2013/03/07 06:48:38 Please put the methods (including ctors) before me
Dai Mikurube (NOT FULLTIME) 2013/03/07 12:32:16 Done.
285 int buflen;
286 int bufsize;
287
288 BufferArgs(char* a, int b, int c)
289 : buf(a), buflen(b), bufsize(c) { }
290 };
Alexander Potapenko 2013/03/07 06:48:38 Please add DISALLOW_COPY_AND_ASSIGN here and to ot
Dai Mikurube (NOT FULLTIME) 2013/03/07 12:32:16 Done.
291
313 // Arguments that need to be passed DumpNonLiveIterator callback below. 292 // Arguments that need to be passed DumpNonLiveIterator callback below.
314 struct DumpArgs { 293 struct DumpArgs {
315 RawFD fd; // file to write to 294 RawFD fd; // file to write to
316 Stats* profile_stats; // stats to update (may be NULL) 295 Stats* profile_stats; // stats to update (may be NULL)
317 296
318 DumpArgs(RawFD a, Stats* d) 297 DumpArgs(RawFD a, Stats* d)
319 : fd(a), profile_stats(d) { } 298 : fd(a), profile_stats(d) { }
320 }; 299 };
321 300
322 // Arguments that need to be passed DumpMarkedIterator callback below. 301 // Arguments that need to be passed DumpMarkedIterator callback below.
(...skipping 46 matching lines...)
369 // counting bucket b. 348 // counting bucket b.
370 // 349 //
371 // "extra" is appended to the unparsed bucket. Typically it is empty, 350 // "extra" is appended to the unparsed bucket. Typically it is empty,
372 // but may be set to something like " heapprofile" for the total 351 // but may be set to something like " heapprofile" for the total
373 // bucket to indicate the type of the profile. 352 // bucket to indicate the type of the profile.
374 static int UnparseBucket(const Bucket& b, 353 static int UnparseBucket(const Bucket& b,
375 char* buf, int buflen, int bufsize, 354 char* buf, int buflen, int bufsize,
376 const char* extra, 355 const char* extra,
377 Stats* profile_stats); 356 Stats* profile_stats);
378 357
379 // Deallocate a given allocation map. 358 // Get the bucket for the caller stack trace 'key' of depth 'depth'
380 void DeallocateAllocationMap(AllocationMap* allocation); 359 // creating the bucket if needed.
381 360 Bucket* GetBucket(int depth, const void* const key[]);
382 // Deallocate a given bucket table.
383 void DeallocateBucketTable(Bucket** table);
384
385 // Get the bucket for the caller stack trace 'key' of depth 'depth' from a
386 // bucket hash map 'table' creating the bucket if needed. '*bucket_count'
387 // is incremented both when 'bucket_count' is not NULL and when a new
388 // bucket object is created.
389 Bucket* GetBucket(int depth, const void* const key[], Bucket** table,
390 int* bucket_count);
391 361
392 // Helper for IterateAllocs to do callback signature conversion 362 // Helper for IterateAllocs to do callback signature conversion
393 // from AllocationMap::Iterate to AllocIterator. 363 // from AllocationMap::Iterate to AllocIterator.
394 static void MapArgsAllocIterator(const void* ptr, AllocValue* v, 364 static void MapArgsAllocIterator(const void* ptr, AllocValue* v,
395 AllocIterator callback) { 365 AllocIterator callback) {
396 AllocInfo info; 366 AllocInfo info;
397 info.object_size = v->bytes; 367 info.object_size = v->bytes;
398 info.call_stack = v->bucket()->stack; 368 info.call_stack = v->bucket()->stack;
399 info.stack_depth = v->bucket()->depth; 369 info.stack_depth = v->bucket()->depth;
400 info.live = v->live(); 370 info.live = v->live();
401 info.ignored = v->ignore(); 371 info.ignored = v->ignore();
402 callback(ptr, info); 372 callback(ptr, info);
403 } 373 }
404 374
375 // Helper to dump a bucket.
376 inline static void DumpBucketIterator(const Bucket* bucket,
377 BufferArgs* args);
378
405 // Helper for IterateAllocationAddresses. 379 // Helper for IterateAllocationAddresses.
406 inline static void AllocationAddressesIterator( 380 inline static void AllocationAddressesIterator(
407 const void* ptr, 381 const void* ptr,
408 AllocValue* v, 382 AllocValue* v,
409 const AllocationAddressIteratorArgs& args); 383 const AllocationAddressIteratorArgs& args);
410 384
411 // Helper for MarkCurrentAllocations and MarkUnmarkedAllocations. 385 // Helper for MarkCurrentAllocations and MarkUnmarkedAllocations.
412 inline static void MarkIterator(const void* ptr, AllocValue* v, 386 inline static void MarkIterator(const void* ptr, AllocValue* v,
413 const MarkArgs& args); 387 const MarkArgs& args);
414 388
415 // Helper for DumpNonLiveProfile to do object-granularity 389 // Helper for DumpNonLiveProfile to do object-granularity
416 // heap profile dumping. It gets passed to AllocationMap::Iterate. 390 // heap profile dumping. It gets passed to AllocationMap::Iterate.
417 inline static void DumpNonLiveIterator(const void* ptr, AllocValue* v, 391 inline static void DumpNonLiveIterator(const void* ptr, AllocValue* v,
418 const DumpArgs& args); 392 const DumpArgs& args);
419 393
420 // Helper for DumpMarkedObjects to dump all allocations with a given mark. It 394 // Helper for DumpMarkedObjects to dump all allocations with a given mark. It
421 // gets passed to AllocationMap::Iterate. 395 // gets passed to AllocationMap::Iterate.
422 inline static void DumpMarkedIterator(const void* ptr, AllocValue* v, 396 inline static void DumpMarkedIterator(const void* ptr, AllocValue* v,
423 const DumpMarkedArgs& args); 397 const DumpMarkedArgs& args);
424 398
425 // Helper for filling size variables in buckets by zero.
426 inline static void ZeroBucketCountsIterator(
427 const void* ptr, AllocValue* v, HeapProfileTable* heap_profile);
428
429 #if defined(TYPE_PROFILING) 399 #if defined(TYPE_PROFILING)
430 inline static void TallyTypesItererator(const void* ptr, 400 inline static void TallyTypesItererator(const void* ptr,
431 AllocValue* value, 401 AllocValue* value,
432 AddressMap<TypeCount>* type_size_map); 402 AddressMap<TypeCount>* type_size_map);
433 403
434 inline static void DumpTypesIterator(const void* ptr, 404 inline static void DumpTypesIterator(const void* ptr,
435 TypeCount* size, 405 TypeCount* size,
436 const DumpArgs& args); 406 const DumpArgs& args);
437 #endif // defined(TYPE_PROFILING) 407 #endif // defined(TYPE_PROFILING)
438 408
439 // Helper for IterateOrderedAllocContexts and FillOrderedProfile. 409 // Helper for IterateOrderedAllocContexts and FillOrderedProfile.
440 // Creates a sorted list of Buckets whose length is num_alloc_buckets_ + 410 // Creates a sorted list of Buckets whose length is num_buckets_.
441 // num_avaliable_mmap_buckets_.
442 // The caller is responsible for deallocating the returned list. 411 // The caller is responsible for deallocating the returned list.
443 Bucket** MakeSortedBucketList() const; 412 Bucket** MakeSortedBucketList() const;
444 413
445 // Helper for TakeSnapshot. Saves object to snapshot. 414 // Helper for TakeSnapshot. Saves object to snapshot.
446 static void AddToSnapshot(const void* ptr, AllocValue* v, Snapshot* s); 415 static void AddToSnapshot(const void* ptr, AllocValue* v, Snapshot* s);
447 416
448 // Arguments passed to AddIfNonLive 417 // Arguments passed to AddIfNonLive
449 struct AddNonLiveArgs { 418 struct AddNonLiveArgs {
450 Snapshot* dest; 419 Snapshot* dest;
451 Snapshot* base; 420 Snapshot* base;
(...skipping 12 matching lines...)
464 AllocationMap* allocations); 433 AllocationMap* allocations);
465 434
466 // data ---------------------------- 435 // data ----------------------------
467 436
468 // Memory (de)allocator that we use. 437 // Memory (de)allocator that we use.
469 Allocator alloc_; 438 Allocator alloc_;
470 DeAllocator dealloc_; 439 DeAllocator dealloc_;
471 440
472 // Overall profile stats; we use only the Stats part, 441 // Overall profile stats; we use only the Stats part,
473 // but make it a Bucket to pass to UnparseBucket. 442 // but make it a Bucket to pass to UnparseBucket.
474 // It doesn't contain mmap'ed regions.
475 Bucket total_; 443 Bucket total_;
476 444
445 bool profile_mmap_;
446
477 // Bucket hash table for malloc. 447 // Bucket hash table for malloc.
478 // We hand-craft one instead of using one of the pre-written 448 // We hand-craft one instead of using one of the pre-written
479 // ones because we do not want to use malloc when operating on the table. 449 // ones because we do not want to use malloc when operating on the table.
480 // It is only few lines of code, so no big deal. 450 // It is only few lines of code, so no big deal.
481 Bucket** alloc_table_; 451 Bucket** bucket_table_;
482 int num_alloc_buckets_; 452 int num_buckets_;
483
484 // Bucket hash table for mmap.
485 // This table is filled with the information from MemoryRegionMap by calling
486 // RefreshMMapData.
487 Bucket** mmap_table_;
488 int num_available_mmap_buckets_;
489 453
490 // Map of all currently allocated objects and mapped regions we know about. 454 // Map of all currently allocated objects and mapped regions we know about.
491 AllocationMap* alloc_address_map_; 455 AllocationMap* address_map_;
492 AllocationMap* mmap_address_map_;
493 456
494 DISALLOW_COPY_AND_ASSIGN(HeapProfileTable); 457 DISALLOW_COPY_AND_ASSIGN(HeapProfileTable);
495 }; 458 };
496 459
497 class HeapProfileTable::Snapshot { 460 class HeapProfileTable::Snapshot {
498 public: 461 public:
499 const Stats& total() const { return total_; } 462 const Stats& total() const { return total_; }
500 463
501 // Report anything in this snapshot as a leak. 464 // Report anything in this snapshot as a leak.
502 // May use new/delete for temporary storage. 465 // May use new/delete for temporary storage.
(...skipping 37 matching lines...)
540 // Helpers for sorting and generating leak reports 503 // Helpers for sorting and generating leak reports
541 struct Entry; 504 struct Entry;
542 struct ReportState; 505 struct ReportState;
543 static void ReportCallback(const void* ptr, AllocValue* v, ReportState*); 506 static void ReportCallback(const void* ptr, AllocValue* v, ReportState*);
544 static void ReportObject(const void* ptr, AllocValue* v, char*); 507 static void ReportObject(const void* ptr, AllocValue* v, char*);
545 508
546 DISALLOW_COPY_AND_ASSIGN(Snapshot); 509 DISALLOW_COPY_AND_ASSIGN(Snapshot);
547 }; 510 };
548 511
549 #endif // BASE_HEAP_PROFILE_TABLE_H_ 512 #endif // BASE_HEAP_PROFILE_TABLE_H_
OLDNEW

Powered by Google App Engine