OLD | NEW |
---|---|
1 // Copyright (c) 2006, Google Inc. | 1 // Copyright (c) 2006, Google Inc. |
2 // All rights reserved. | 2 // All rights reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
(...skipping 20 matching lines...) Expand all Loading... | |
31 // Author: Sanjay Ghemawat | 31 // Author: Sanjay Ghemawat |
32 // Maxim Lifantsev (refactoring) | 32 // Maxim Lifantsev (refactoring) |
33 // | 33 // |
34 | 34 |
35 #ifndef BASE_HEAP_PROFILE_TABLE_H_ | 35 #ifndef BASE_HEAP_PROFILE_TABLE_H_ |
36 #define BASE_HEAP_PROFILE_TABLE_H_ | 36 #define BASE_HEAP_PROFILE_TABLE_H_ |
37 | 37 |
38 #include "addressmap-inl.h" | 38 #include "addressmap-inl.h" |
39 #include "base/basictypes.h" | 39 #include "base/basictypes.h" |
40 #include "base/logging.h" // for RawFD | 40 #include "base/logging.h" // for RawFD |
41 #include "heap-profile-stats.h" | |
41 | 42 |
42 #if defined(TYPE_PROFILING) | 43 #if defined(TYPE_PROFILING) |
43 #include <gperftools/type_profiler_map.h> | 44 #include <gperftools/type_profiler_map.h> |
44 #endif // defined(TYPE_PROFILING) | 45 #endif // defined(TYPE_PROFILING) |
45 | 46 |
46 // Table to maintain a heap profile data inside, | 47 // Table to maintain a heap profile data inside, |
47 // i.e. the set of currently active heap memory allocations. | 48 // i.e. the set of currently active heap memory allocations. |
48 // thread-unsafe and non-reentrant code: | 49 // thread-unsafe and non-reentrant code: |
49 // each instance object must be used by one thread | 50 // each instance object must be used by one thread |
50 // at a time w/o self-recursion. | 51 // at a time w/o self-recursion. |
51 // | 52 // |
52 // TODO(maxim): add a unittest for this class. | 53 // TODO(maxim): add a unittest for this class. |
53 class HeapProfileTable { | 54 class HeapProfileTable { |
54 public: | 55 public: |
55 | 56 |
56 // Extension to be used for heap pforile files. | 57 // Extension to be used for heap pforile files. |
57 static const char kFileExt[]; | 58 static const char kFileExt[]; |
58 | 59 |
59 // Longest stack trace we record. | 60 // Longest stack trace we record. |
60 static const int kMaxStackDepth = 32; | 61 static const int kMaxStackDepth = 32; |
61 | 62 |
62 // data types ---------------------------- | 63 // data types ---------------------------- |
63 | 64 |
64 // Profile stats. | 65 // Profile stats. |
65 struct Stats { | 66 typedef HeapProfileStats Stats; |
66 int32 allocs; // Number of allocation calls | |
67 int32 frees; // Number of free calls | |
68 int64 alloc_size; // Total size of all allocated objects so far | |
69 int64 free_size; // Total size of all freed objects so far | |
70 | |
71 // semantic equality | |
72 bool Equivalent(const Stats& x) const { | |
73 return allocs - frees == x.allocs - x.frees && | |
74 alloc_size - free_size == x.alloc_size - x.free_size; | |
75 } | |
76 }; | |
77 | 67 |
78 // Possible marks for MarkCurrentAllocations and MarkUnmarkedAllocations. New | 68 // Possible marks for MarkCurrentAllocations and MarkUnmarkedAllocations. New |
79 // allocations are marked with UNMARKED by default. | 69 // allocations are marked with UNMARKED by default. |
80 enum AllocationMark { | 70 enum AllocationMark { |
81 UNMARKED = 0, | 71 UNMARKED = 0, |
82 MARK_ONE, | 72 MARK_ONE, |
83 MARK_TWO, | 73 MARK_TWO, |
84 MARK_THREE | 74 MARK_THREE |
85 }; | 75 }; |
86 | 76 |
(...skipping 13 matching lines...) Expand all Loading... | |
100 int stack_depth; // Depth of stack trace | 90 int stack_depth; // Depth of stack trace |
101 const void* const* call_stack; // Stack trace | 91 const void* const* call_stack; // Stack trace |
102 }; | 92 }; |
103 | 93 |
104 // Memory (de)allocator interface we'll use. | 94 // Memory (de)allocator interface we'll use. |
105 typedef void* (*Allocator)(size_t size); | 95 typedef void* (*Allocator)(size_t size); |
106 typedef void (*DeAllocator)(void* ptr); | 96 typedef void (*DeAllocator)(void* ptr); |
107 | 97 |
108 // interface --------------------------- | 98 // interface --------------------------- |
109 | 99 |
110 HeapProfileTable(Allocator alloc, DeAllocator dealloc); | 100 HeapProfileTable(Allocator alloc, DeAllocator dealloc, bool profile_mmap); |
111 ~HeapProfileTable(); | 101 ~HeapProfileTable(); |
112 | 102 |
113 // Collect the stack trace for the function that asked to do the | 103 // Collect the stack trace for the function that asked to do the |
114 // allocation for passing to RecordAlloc() below. | 104 // allocation for passing to RecordAlloc() below. |
115 // | 105 // |
116 // The stack trace is stored in 'stack'. The stack depth is returned. | 106 // The stack trace is stored in 'stack'. The stack depth is returned. |
117 // | 107 // |
118 // 'skip_count' gives the number of stack frames between this call | 108 // 'skip_count' gives the number of stack frames between this call |
119 // and the memory allocation function. | 109 // and the memory allocation function. |
120 static int GetCallerStackTrace(int skip_count, void* stack[kMaxStackDepth]); | 110 static int GetCallerStackTrace(int skip_count, void* stack[kMaxStackDepth]); |
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
162 // mmap'ed regions. | 152 // mmap'ed regions. |
163 const Stats& total() const { return total_; } | 153 const Stats& total() const { return total_; } |
164 | 154 |
165 // Allocation data iteration callback: gets passed object pointer and | 155 // Allocation data iteration callback: gets passed object pointer and |
166 // fully-filled AllocInfo. | 156 // fully-filled AllocInfo. |
167 typedef void (*AllocIterator)(const void* ptr, const AllocInfo& info); | 157 typedef void (*AllocIterator)(const void* ptr, const AllocInfo& info); |
168 | 158 |
169 // Iterate over the allocation profile data calling "callback" | 159 // Iterate over the allocation profile data calling "callback" |
170 // for every allocation. | 160 // for every allocation. |
171 void IterateAllocs(AllocIterator callback) const { | 161 void IterateAllocs(AllocIterator callback) const { |
172 alloc_address_map_->Iterate(MapArgsAllocIterator, callback); | 162 address_map_->Iterate(MapArgsAllocIterator, callback); |
173 } | 163 } |
174 | 164 |
175 // Callback for iterating through addresses of all allocated objects. Accepts | 165 // Callback for iterating through addresses of all allocated objects. Accepts |
176 // pointer to user data and object pointer. | 166 // pointer to user data and object pointer. |
177 typedef void (*AddressIterator)(void* data, const void* ptr); | 167 typedef void (*AddressIterator)(void* data, const void* ptr); |
178 | 168 |
179 // Iterate over the addresses of all allocated objects. | 169 // Iterate over the addresses of all allocated objects. |
180 void IterateAllocationAddresses(AddressIterator, void* data); | 170 void IterateAllocationAddresses(AddressIterator, void* data); |
181 | 171 |
182 // Allocation context profile data iteration callback | 172 // Allocation context profile data iteration callback |
(...skipping 24 matching lines...) Expand all Loading... | |
207 // Release a previously taken snapshot. snapshot must not | 197 // Release a previously taken snapshot. snapshot must not |
208 // be used after this call. | 198 // be used after this call. |
209 void ReleaseSnapshot(Snapshot* snapshot); | 199 void ReleaseSnapshot(Snapshot* snapshot); |
210 | 200 |
211 // Return a snapshot of every non-live, non-ignored object in *this. | 201 // Return a snapshot of every non-live, non-ignored object in *this. |
212 // If "base" is non-NULL, skip any objects present in "base". | 202 // If "base" is non-NULL, skip any objects present in "base". |
213 // As a side-effect, clears the "live" bit on every live object in *this. | 203 // As a side-effect, clears the "live" bit on every live object in *this. |
214 // Caller must call ReleaseSnapshot() on result when no longer needed. | 204 // Caller must call ReleaseSnapshot() on result when no longer needed. |
215 Snapshot* NonLiveSnapshot(Snapshot* base); | 205 Snapshot* NonLiveSnapshot(Snapshot* base); |
216 | 206 |
217 // Refresh the internal mmap information from MemoryRegionMap. Results of | |
218 // FillOrderedProfile and IterateOrderedAllocContexts will contain mmap'ed | |
219 // memory regions as at calling RefreshMMapData. | |
220 // 'mmap_alloc' is an allocator for an address map. A function which calls | |
221 // LowLevelAlloc::AllocWithArena is expected like the constractor. | |
222 // 'mmap_dealloc' is a corresponding deallocator to 'mmap_alloc'. | |
223 // They are introduced to avoid expected memory fragmentation and bloat in | |
224 // an arena. A dedicated arena for this function allows disposing whole the | |
225 // arena after ClearMMapData. | |
226 void RefreshMMapData(Allocator mmap_alloc, DeAllocator mmap_dealloc); | |
227 | |
228 // Clear the internal mmap information. Results of FillOrderedProfile and | |
229 // IterateOrderedAllocContexts won't contain mmap'ed memory regions after | |
230 // calling ClearMMapData. | |
231 void ClearMMapData(); | |
232 | |
233 // Dump a list of allocations marked as "live" along with their creation | 207 // Dump a list of allocations marked as "live" along with their creation |
234 // stack traces and sizes to a file named |file_name|. Together with | 208 // stack traces and sizes to a file named |file_name|. Together with |
235 // MarkCurrentAllocatiosn and MarkUnmarkedAllocations this can be used | 209 // MarkCurrentAllocatiosn and MarkUnmarkedAllocations this can be used |
236 // to find objects that are created in a certain time span: | 210 // to find objects that are created in a certain time span: |
237 // 1. Invoke MarkCurrentAllocations(MARK_ONE) to mark the start of the | 211 // 1. Invoke MarkCurrentAllocations(MARK_ONE) to mark the start of the |
238 // timespan. | 212 // timespan. |
239 // 2. Perform whatever action you suspect allocates memory that is not | 213 // 2. Perform whatever action you suspect allocates memory that is not |
240 // correctly freed. | 214 // correctly freed. |
241 // 3. Invoke MarkUnmarkedAllocations(MARK_TWO). | 215 // 3. Invoke MarkUnmarkedAllocations(MARK_TWO). |
242 // 4. Perform whatever action is supposed to free the memory again. New | 216 // 4. Perform whatever action is supposed to free the memory again. New |
(...skipping 10 matching lines...) Expand all Loading... | |
253 void DumpTypeStatistics(const char* file_name) const; | 227 void DumpTypeStatistics(const char* file_name) const; |
254 #endif // defined(TYPE_PROFILING) | 228 #endif // defined(TYPE_PROFILING) |
255 | 229 |
256 private: | 230 private: |
257 friend class DeepHeapProfile; | 231 friend class DeepHeapProfile; |
258 | 232 |
259 // data types ---------------------------- | 233 // data types ---------------------------- |
260 | 234 |
261 // Hash table bucket to hold (de)allocation stats | 235 // Hash table bucket to hold (de)allocation stats |
262 // for a given allocation call stack trace. | 236 // for a given allocation call stack trace. |
263 struct Bucket : public Stats { | 237 typedef HeapProfileBucket Bucket; |
264 uintptr_t hash; // Hash value of the stack trace | |
265 int depth; // Depth of stack trace | |
266 const void** stack; // Stack trace | |
267 Bucket* next; // Next entry in hash-table | |
268 }; | |
269 | 238 |
270 // Info stored in the address map | 239 // Info stored in the address map |
271 struct AllocValue { | 240 struct AllocValue { |
272 // Access to the stack-trace bucket | 241 // Access to the stack-trace bucket |
273 Bucket* bucket() const { | 242 Bucket* bucket() const { |
274 return reinterpret_cast<Bucket*>(bucket_rep & ~uintptr_t(kMask)); | 243 return reinterpret_cast<Bucket*>(bucket_rep & ~uintptr_t(kMask)); |
275 } | 244 } |
276 // This also does set_live(false). | 245 // This also does set_live(false). |
277 void set_bucket(Bucket* b) { bucket_rep = reinterpret_cast<uintptr_t>(b); } | 246 void set_bucket(Bucket* b) { bucket_rep = reinterpret_cast<uintptr_t>(b); } |
278 size_t bytes; // Number of bytes in this allocation | 247 size_t bytes; // Number of bytes in this allocation |
(...skipping 24 matching lines...) Expand all Loading... | |
303 static const int kMask = kLive | kIgnore; | 272 static const int kMask = kLive | kIgnore; |
304 | 273 |
305 uintptr_t bucket_rep; | 274 uintptr_t bucket_rep; |
306 }; | 275 }; |
307 | 276 |
308 // helper for FindInsideAlloc | 277 // helper for FindInsideAlloc |
309 static size_t AllocValueSize(const AllocValue& v) { return v.bytes; } | 278 static size_t AllocValueSize(const AllocValue& v) { return v.bytes; } |
310 | 279 |
311 typedef AddressMap<AllocValue> AllocationMap; | 280 typedef AddressMap<AllocValue> AllocationMap; |
312 | 281 |
282 // Arguments that need to be passed DumpBucketIterator callback below. | |
283 struct BufferArgs { | |
284 BufferArgs(char* a, int b, int c) | |
jar (doing other things)
2013/03/08 04:43:31
nit: avoid using one character variable names.
Dai Mikurube (NOT FULLTIME)
2013/03/12 08:24:10
Ok, done.
Also, I fixed (refactored) other existi
| |
285 : buf(a), buflen(b), bufsize(c) { } | |
jar (doing other things)
2013/03/08 04:43:31
nit: indent
Dai Mikurube (NOT FULLTIME)
2013/03/12 08:24:10
Done, and same for others.
| |
286 | |
287 char* buf; | |
288 int buflen; | |
289 int bufsize; | |
290 | |
291 DISALLOW_COPY_AND_ASSIGN(BufferArgs); | |
292 }; | |
293 | |
313 // Arguments that need to be passed DumpNonLiveIterator callback below. | 294 // Arguments that need to be passed DumpNonLiveIterator callback below. |
314 struct DumpArgs { | 295 struct DumpArgs { |
315 RawFD fd; // file to write to | 296 RawFD fd; // file to write to |
316 Stats* profile_stats; // stats to update (may be NULL) | 297 Stats* profile_stats; // stats to update (may be NULL) |
317 | 298 |
318 DumpArgs(RawFD a, Stats* d) | 299 DumpArgs(RawFD a, Stats* d) |
319 : fd(a), profile_stats(d) { } | 300 : fd(a), profile_stats(d) { } |
320 }; | 301 }; |
321 | 302 |
322 // Arguments that need to be passed DumpMarkedIterator callback below. | 303 // Arguments that need to be passed DumpMarkedIterator callback below. |
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
369 // counting bucket b. | 350 // counting bucket b. |
370 // | 351 // |
371 // "extra" is appended to the unparsed bucket. Typically it is empty, | 352 // "extra" is appended to the unparsed bucket. Typically it is empty, |
372 // but may be set to something like " heapprofile" for the total | 353 // but may be set to something like " heapprofile" for the total |
373 // bucket to indicate the type of the profile. | 354 // bucket to indicate the type of the profile. |
374 static int UnparseBucket(const Bucket& b, | 355 static int UnparseBucket(const Bucket& b, |
375 char* buf, int buflen, int bufsize, | 356 char* buf, int buflen, int bufsize, |
376 const char* extra, | 357 const char* extra, |
377 Stats* profile_stats); | 358 Stats* profile_stats); |
378 | 359 |
379 // Deallocate a given allocation map. | 360 // Get the bucket for the caller stack trace 'key' of depth 'depth' |
380 void DeallocateAllocationMap(AllocationMap* allocation); | 361 // creating the bucket if needed. |
381 | 362 Bucket* GetBucket(int depth, const void* const key[]); |
382 // Deallocate a given bucket table. | |
383 void DeallocateBucketTable(Bucket** table); | |
384 | |
385 // Get the bucket for the caller stack trace 'key' of depth 'depth' from a | |
386 // bucket hash map 'table' creating the bucket if needed. '*bucket_count' | |
387 // is incremented both when 'bucket_count' is not NULL and when a new | |
388 // bucket object is created. | |
389 Bucket* GetBucket(int depth, const void* const key[], Bucket** table, | |
390 int* bucket_count); | |
391 | 363 |
392 // Helper for IterateAllocs to do callback signature conversion | 364 // Helper for IterateAllocs to do callback signature conversion |
393 // from AllocationMap::Iterate to AllocIterator. | 365 // from AllocationMap::Iterate to AllocIterator. |
394 static void MapArgsAllocIterator(const void* ptr, AllocValue* v, | 366 static void MapArgsAllocIterator(const void* ptr, AllocValue* v, |
395 AllocIterator callback) { | 367 AllocIterator callback) { |
396 AllocInfo info; | 368 AllocInfo info; |
397 info.object_size = v->bytes; | 369 info.object_size = v->bytes; |
398 info.call_stack = v->bucket()->stack; | 370 info.call_stack = v->bucket()->stack; |
399 info.stack_depth = v->bucket()->depth; | 371 info.stack_depth = v->bucket()->depth; |
400 info.live = v->live(); | 372 info.live = v->live(); |
401 info.ignored = v->ignore(); | 373 info.ignored = v->ignore(); |
402 callback(ptr, info); | 374 callback(ptr, info); |
403 } | 375 } |
404 | 376 |
377 // Helper to dump a bucket. | |
378 inline static void DumpBucketIterator(const Bucket* bucket, | |
379 BufferArgs* args); | |
380 | |
405 // Helper for IterateAllocationAddresses. | 381 // Helper for IterateAllocationAddresses. |
406 inline static void AllocationAddressesIterator( | 382 inline static void AllocationAddressesIterator( |
407 const void* ptr, | 383 const void* ptr, |
408 AllocValue* v, | 384 AllocValue* v, |
409 const AllocationAddressIteratorArgs& args); | 385 const AllocationAddressIteratorArgs& args); |
410 | 386 |
411 // Helper for MarkCurrentAllocations and MarkUnmarkedAllocations. | 387 // Helper for MarkCurrentAllocations and MarkUnmarkedAllocations. |
412 inline static void MarkIterator(const void* ptr, AllocValue* v, | 388 inline static void MarkIterator(const void* ptr, AllocValue* v, |
413 const MarkArgs& args); | 389 const MarkArgs& args); |
414 | 390 |
415 // Helper for DumpNonLiveProfile to do object-granularity | 391 // Helper for DumpNonLiveProfile to do object-granularity |
416 // heap profile dumping. It gets passed to AllocationMap::Iterate. | 392 // heap profile dumping. It gets passed to AllocationMap::Iterate. |
417 inline static void DumpNonLiveIterator(const void* ptr, AllocValue* v, | 393 inline static void DumpNonLiveIterator(const void* ptr, AllocValue* v, |
418 const DumpArgs& args); | 394 const DumpArgs& args); |
419 | 395 |
420 // Helper for DumpMarkedObjects to dump all allocations with a given mark. It | 396 // Helper for DumpMarkedObjects to dump all allocations with a given mark. It |
421 // gets passed to AllocationMap::Iterate. | 397 // gets passed to AllocationMap::Iterate. |
422 inline static void DumpMarkedIterator(const void* ptr, AllocValue* v, | 398 inline static void DumpMarkedIterator(const void* ptr, AllocValue* v, |
423 const DumpMarkedArgs& args); | 399 const DumpMarkedArgs& args); |
424 | 400 |
425 // Helper for filling size variables in buckets by zero. | |
426 inline static void ZeroBucketCountsIterator( | |
427 const void* ptr, AllocValue* v, HeapProfileTable* heap_profile); | |
428 | |
429 #if defined(TYPE_PROFILING) | 401 #if defined(TYPE_PROFILING) |
430 inline static void TallyTypesItererator(const void* ptr, | 402 inline static void TallyTypesItererator(const void* ptr, |
431 AllocValue* value, | 403 AllocValue* value, |
432 AddressMap<TypeCount>* type_size_map); | 404 AddressMap<TypeCount>* type_size_map); |
433 | 405 |
434 inline static void DumpTypesIterator(const void* ptr, | 406 inline static void DumpTypesIterator(const void* ptr, |
435 TypeCount* size, | 407 TypeCount* size, |
436 const DumpArgs& args); | 408 const DumpArgs& args); |
437 #endif // defined(TYPE_PROFILING) | 409 #endif // defined(TYPE_PROFILING) |
438 | 410 |
439 // Helper for IterateOrderedAllocContexts and FillOrderedProfile. | 411 // Helper for IterateOrderedAllocContexts and FillOrderedProfile. |
440 // Creates a sorted list of Buckets whose length is num_alloc_buckets_ + | 412 // Creates a sorted list of Buckets whose length is num_buckets_. |
441 // num_avaliable_mmap_buckets_. | |
442 // The caller is responsible for deallocating the returned list. | 413 // The caller is responsible for deallocating the returned list. |
443 Bucket** MakeSortedBucketList() const; | 414 Bucket** MakeSortedBucketList() const; |
444 | 415 |
445 // Helper for TakeSnapshot. Saves object to snapshot. | 416 // Helper for TakeSnapshot. Saves object to snapshot. |
446 static void AddToSnapshot(const void* ptr, AllocValue* v, Snapshot* s); | 417 static void AddToSnapshot(const void* ptr, AllocValue* v, Snapshot* s); |
447 | 418 |
448 // Arguments passed to AddIfNonLive | 419 // Arguments passed to AddIfNonLive |
449 struct AddNonLiveArgs { | 420 struct AddNonLiveArgs { |
450 Snapshot* dest; | 421 Snapshot* dest; |
451 Snapshot* base; | 422 Snapshot* base; |
(...skipping 12 matching lines...) Expand all Loading... | |
464 AllocationMap* allocations); | 435 AllocationMap* allocations); |
465 | 436 |
466 // data ---------------------------- | 437 // data ---------------------------- |
467 | 438 |
468 // Memory (de)allocator that we use. | 439 // Memory (de)allocator that we use. |
469 Allocator alloc_; | 440 Allocator alloc_; |
470 DeAllocator dealloc_; | 441 DeAllocator dealloc_; |
471 | 442 |
472 // Overall profile stats; we use only the Stats part, | 443 // Overall profile stats; we use only the Stats part, |
473 // but make it a Bucket to pass to UnparseBucket. | 444 // but make it a Bucket to pass to UnparseBucket. |
474 // It doesn't contain mmap'ed regions. | |
475 Bucket total_; | 445 Bucket total_; |
476 | 446 |
447 bool profile_mmap_; | |
448 | |
477 // Bucket hash table for malloc. | 449 // Bucket hash table for malloc. |
478 // We hand-craft one instead of using one of the pre-written | 450 // We hand-craft one instead of using one of the pre-written |
479 // ones because we do not want to use malloc when operating on the table. | 451 // ones because we do not want to use malloc when operating on the table. |
480 // It is only few lines of code, so no big deal. | 452 // It is only few lines of code, so no big deal. |
481 Bucket** alloc_table_; | 453 Bucket** bucket_table_; |
482 int num_alloc_buckets_; | 454 int num_buckets_; |
483 | |
484 // Bucket hash table for mmap. | |
485 // This table is filled with the information from MemoryRegionMap by calling | |
486 // RefreshMMapData. | |
487 Bucket** mmap_table_; | |
488 int num_available_mmap_buckets_; | |
489 | 455 |
490 // Map of all currently allocated objects and mapped regions we know about. | 456 // Map of all currently allocated objects and mapped regions we know about. |
491 AllocationMap* alloc_address_map_; | 457 AllocationMap* address_map_; |
492 AllocationMap* mmap_address_map_; | |
493 | 458 |
494 DISALLOW_COPY_AND_ASSIGN(HeapProfileTable); | 459 DISALLOW_COPY_AND_ASSIGN(HeapProfileTable); |
495 }; | 460 }; |
496 | 461 |
497 class HeapProfileTable::Snapshot { | 462 class HeapProfileTable::Snapshot { |
498 public: | 463 public: |
499 const Stats& total() const { return total_; } | 464 const Stats& total() const { return total_; } |
500 | 465 |
501 // Report anything in this snapshot as a leak. | 466 // Report anything in this snapshot as a leak. |
502 // May use new/delete for temporary storage. | 467 // May use new/delete for temporary storage. |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
540 // Helpers for sorting and generating leak reports | 505 // Helpers for sorting and generating leak reports |
541 struct Entry; | 506 struct Entry; |
542 struct ReportState; | 507 struct ReportState; |
543 static void ReportCallback(const void* ptr, AllocValue* v, ReportState*); | 508 static void ReportCallback(const void* ptr, AllocValue* v, ReportState*); |
544 static void ReportObject(const void* ptr, AllocValue* v, char*); | 509 static void ReportObject(const void* ptr, AllocValue* v, char*); |
545 | 510 |
546 DISALLOW_COPY_AND_ASSIGN(Snapshot); | 511 DISALLOW_COPY_AND_ASSIGN(Snapshot); |
547 }; | 512 }; |
548 | 513 |
549 #endif // BASE_HEAP_PROFILE_TABLE_H_ | 514 #endif // BASE_HEAP_PROFILE_TABLE_H_ |
OLD | NEW |