OLD | NEW |
1 // Copyright (c) 2006, Google Inc. | 1 // Copyright (c) 2006, Google Inc. |
2 // All rights reserved. | 2 // All rights reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
(...skipping 20 matching lines...) Expand all Loading... |
31 // Author: Sanjay Ghemawat | 31 // Author: Sanjay Ghemawat |
32 // Maxim Lifantsev (refactoring) | 32 // Maxim Lifantsev (refactoring) |
33 // | 33 // |
34 | 34 |
35 #ifndef BASE_HEAP_PROFILE_TABLE_H_ | 35 #ifndef BASE_HEAP_PROFILE_TABLE_H_ |
36 #define BASE_HEAP_PROFILE_TABLE_H_ | 36 #define BASE_HEAP_PROFILE_TABLE_H_ |
37 | 37 |
38 #include "addressmap-inl.h" | 38 #include "addressmap-inl.h" |
39 #include "base/basictypes.h" | 39 #include "base/basictypes.h" |
40 #include "base/logging.h" // for RawFD | 40 #include "base/logging.h" // for RawFD |
| 41 #include "heap-profile-stats.h" |
41 | 42 |
42 #if defined(TYPE_PROFILING) | 43 #if defined(TYPE_PROFILING) |
43 #include <gperftools/type_profiler_map.h> | 44 #include <gperftools/type_profiler_map.h> |
44 #endif // defined(TYPE_PROFILING) | 45 #endif // defined(TYPE_PROFILING) |
45 | 46 |
46 // Table to maintain a heap profile data inside, | 47 // Table to maintain a heap profile data inside, |
47 // i.e. the set of currently active heap memory allocations. | 48 // i.e. the set of currently active heap memory allocations. |
48 // thread-unsafe and non-reentrant code: | 49 // thread-unsafe and non-reentrant code: |
49 // each instance object must be used by one thread | 50 // each instance object must be used by one thread |
50 // at a time w/o self-recursion. | 51 // at a time w/o self-recursion. |
51 // | 52 // |
52 // TODO(maxim): add a unittest for this class. | 53 // TODO(maxim): add a unittest for this class. |
53 class HeapProfileTable { | 54 class HeapProfileTable { |
54 public: | 55 public: |
55 | 56 |
56 // Extension to be used for heap pforile files. | 57 // Extension to be used for heap pforile files. |
57 static const char kFileExt[]; | 58 static const char kFileExt[]; |
58 | 59 |
59 // Longest stack trace we record. | 60 // Longest stack trace we record. |
60 static const int kMaxStackDepth = 32; | 61 static const int kMaxStackDepth = 32; |
61 | 62 |
62 // data types ---------------------------- | 63 // data types ---------------------------- |
63 | 64 |
64 // Profile stats. | 65 // Profile stats. |
65 struct Stats { | 66 typedef HeapProfileStats Stats; |
66 int32 allocs; // Number of allocation calls | |
67 int32 frees; // Number of free calls | |
68 int64 alloc_size; // Total size of all allocated objects so far | |
69 int64 free_size; // Total size of all freed objects so far | |
70 | |
71 // semantic equality | |
72 bool Equivalent(const Stats& x) const { | |
73 return allocs - frees == x.allocs - x.frees && | |
74 alloc_size - free_size == x.alloc_size - x.free_size; | |
75 } | |
76 }; | |
77 | 67 |
78 // Possible marks for MarkCurrentAllocations and MarkUnmarkedAllocations. New | 68 // Possible marks for MarkCurrentAllocations and MarkUnmarkedAllocations. New |
79 // allocations are marked with UNMARKED by default. | 69 // allocations are marked with UNMARKED by default. |
80 enum AllocationMark { | 70 enum AllocationMark { |
81 UNMARKED = 0, | 71 UNMARKED = 0, |
82 MARK_ONE, | 72 MARK_ONE, |
83 MARK_TWO, | 73 MARK_TWO, |
84 MARK_THREE | 74 MARK_THREE |
85 }; | 75 }; |
86 | 76 |
(...skipping 13 matching lines...) Expand all Loading... |
100 int stack_depth; // Depth of stack trace | 90 int stack_depth; // Depth of stack trace |
101 const void* const* call_stack; // Stack trace | 91 const void* const* call_stack; // Stack trace |
102 }; | 92 }; |
103 | 93 |
104 // Memory (de)allocator interface we'll use. | 94 // Memory (de)allocator interface we'll use. |
105 typedef void* (*Allocator)(size_t size); | 95 typedef void* (*Allocator)(size_t size); |
106 typedef void (*DeAllocator)(void* ptr); | 96 typedef void (*DeAllocator)(void* ptr); |
107 | 97 |
108 // interface --------------------------- | 98 // interface --------------------------- |
109 | 99 |
110 HeapProfileTable(Allocator alloc, DeAllocator dealloc); | 100 HeapProfileTable(Allocator alloc, DeAllocator dealloc, bool profile_mmap); |
111 ~HeapProfileTable(); | 101 ~HeapProfileTable(); |
112 | 102 |
113 // Collect the stack trace for the function that asked to do the | 103 // Collect the stack trace for the function that asked to do the |
114 // allocation for passing to RecordAlloc() below. | 104 // allocation for passing to RecordAlloc() below. |
115 // | 105 // |
116 // The stack trace is stored in 'stack'. The stack depth is returned. | 106 // The stack trace is stored in 'stack'. The stack depth is returned. |
117 // | 107 // |
118 // 'skip_count' gives the number of stack frames between this call | 108 // 'skip_count' gives the number of stack frames between this call |
119 // and the memory allocation function. | 109 // and the memory allocation function. |
120 static int GetCallerStackTrace(int skip_count, void* stack[kMaxStackDepth]); | 110 static int GetCallerStackTrace(int skip_count, void* stack[kMaxStackDepth]); |
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
162 // mmap'ed regions. | 152 // mmap'ed regions. |
163 const Stats& total() const { return total_; } | 153 const Stats& total() const { return total_; } |
164 | 154 |
165 // Allocation data iteration callback: gets passed object pointer and | 155 // Allocation data iteration callback: gets passed object pointer and |
166 // fully-filled AllocInfo. | 156 // fully-filled AllocInfo. |
167 typedef void (*AllocIterator)(const void* ptr, const AllocInfo& info); | 157 typedef void (*AllocIterator)(const void* ptr, const AllocInfo& info); |
168 | 158 |
169 // Iterate over the allocation profile data calling "callback" | 159 // Iterate over the allocation profile data calling "callback" |
170 // for every allocation. | 160 // for every allocation. |
171 void IterateAllocs(AllocIterator callback) const { | 161 void IterateAllocs(AllocIterator callback) const { |
172 alloc_address_map_->Iterate(MapArgsAllocIterator, callback); | 162 address_map_->Iterate(MapArgsAllocIterator, callback); |
173 } | 163 } |
174 | 164 |
175 // Callback for iterating through addresses of all allocated objects. Accepts | 165 // Callback for iterating through addresses of all allocated objects. Accepts |
176 // pointer to user data and object pointer. | 166 // pointer to user data and object pointer. |
177 typedef void (*AddressIterator)(void* data, const void* ptr); | 167 typedef void (*AddressIterator)(void* data, const void* ptr); |
178 | 168 |
179 // Iterate over the addresses of all allocated objects. | 169 // Iterate over the addresses of all allocated objects. |
180 void IterateAllocationAddresses(AddressIterator, void* data); | 170 void IterateAllocationAddresses(AddressIterator, void* data); |
181 | 171 |
182 // Allocation context profile data iteration callback | 172 // Allocation context profile data iteration callback |
(...skipping 24 matching lines...) Expand all Loading... |
207 // Release a previously taken snapshot. snapshot must not | 197 // Release a previously taken snapshot. snapshot must not |
208 // be used after this call. | 198 // be used after this call. |
209 void ReleaseSnapshot(Snapshot* snapshot); | 199 void ReleaseSnapshot(Snapshot* snapshot); |
210 | 200 |
211 // Return a snapshot of every non-live, non-ignored object in *this. | 201 // Return a snapshot of every non-live, non-ignored object in *this. |
212 // If "base" is non-NULL, skip any objects present in "base". | 202 // If "base" is non-NULL, skip any objects present in "base". |
213 // As a side-effect, clears the "live" bit on every live object in *this. | 203 // As a side-effect, clears the "live" bit on every live object in *this. |
214 // Caller must call ReleaseSnapshot() on result when no longer needed. | 204 // Caller must call ReleaseSnapshot() on result when no longer needed. |
215 Snapshot* NonLiveSnapshot(Snapshot* base); | 205 Snapshot* NonLiveSnapshot(Snapshot* base); |
216 | 206 |
217 // Refresh the internal mmap information from MemoryRegionMap. Results of | |
218 // FillOrderedProfile and IterateOrderedAllocContexts will contain mmap'ed | |
219 // memory regions as at calling RefreshMMapData. | |
220 // 'mmap_alloc' is an allocator for an address map. A function which calls | |
221 // LowLevelAlloc::AllocWithArena is expected like the constractor. | |
222 // 'mmap_dealloc' is a corresponding deallocator to 'mmap_alloc'. | |
223 // They are introduced to avoid expected memory fragmentation and bloat in | |
224 // an arena. A dedicated arena for this function allows disposing whole the | |
225 // arena after ClearMMapData. | |
226 void RefreshMMapData(Allocator mmap_alloc, DeAllocator mmap_dealloc); | |
227 | |
228 // Clear the internal mmap information. Results of FillOrderedProfile and | |
229 // IterateOrderedAllocContexts won't contain mmap'ed memory regions after | |
230 // calling ClearMMapData. | |
231 void ClearMMapData(); | |
232 | |
233 // Dump a list of allocations marked as "live" along with their creation | 207 // Dump a list of allocations marked as "live" along with their creation |
234 // stack traces and sizes to a file named |file_name|. Together with | 208 // stack traces and sizes to a file named |file_name|. Together with |
235 // MarkCurrentAllocatiosn and MarkUnmarkedAllocations this can be used | 209 // MarkCurrentAllocatiosn and MarkUnmarkedAllocations this can be used |
236 // to find objects that are created in a certain time span: | 210 // to find objects that are created in a certain time span: |
237 // 1. Invoke MarkCurrentAllocations(MARK_ONE) to mark the start of the | 211 // 1. Invoke MarkCurrentAllocations(MARK_ONE) to mark the start of the |
238 // timespan. | 212 // timespan. |
239 // 2. Perform whatever action you suspect allocates memory that is not | 213 // 2. Perform whatever action you suspect allocates memory that is not |
240 // correctly freed. | 214 // correctly freed. |
241 // 3. Invoke MarkUnmarkedAllocations(MARK_TWO). | 215 // 3. Invoke MarkUnmarkedAllocations(MARK_TWO). |
242 // 4. Perform whatever action is supposed to free the memory again. New | 216 // 4. Perform whatever action is supposed to free the memory again. New |
(...skipping 10 matching lines...) Expand all Loading... |
253 void DumpTypeStatistics(const char* file_name) const; | 227 void DumpTypeStatistics(const char* file_name) const; |
254 #endif // defined(TYPE_PROFILING) | 228 #endif // defined(TYPE_PROFILING) |
255 | 229 |
256 private: | 230 private: |
257 friend class DeepHeapProfile; | 231 friend class DeepHeapProfile; |
258 | 232 |
259 // data types ---------------------------- | 233 // data types ---------------------------- |
260 | 234 |
261 // Hash table bucket to hold (de)allocation stats | 235 // Hash table bucket to hold (de)allocation stats |
262 // for a given allocation call stack trace. | 236 // for a given allocation call stack trace. |
263 struct Bucket : public Stats { | 237 typedef HeapProfileBucket Bucket; |
264 uintptr_t hash; // Hash value of the stack trace | |
265 int depth; // Depth of stack trace | |
266 const void** stack; // Stack trace | |
267 Bucket* next; // Next entry in hash-table | |
268 }; | |
269 | 238 |
270 // Info stored in the address map | 239 // Info stored in the address map |
271 struct AllocValue { | 240 struct AllocValue { |
272 // Access to the stack-trace bucket | 241 // Access to the stack-trace bucket |
273 Bucket* bucket() const { | 242 Bucket* bucket() const { |
274 return reinterpret_cast<Bucket*>(bucket_rep & ~uintptr_t(kMask)); | 243 return reinterpret_cast<Bucket*>(bucket_rep & ~uintptr_t(kMask)); |
275 } | 244 } |
276 // This also does set_live(false). | 245 // This also does set_live(false). |
277 void set_bucket(Bucket* b) { bucket_rep = reinterpret_cast<uintptr_t>(b); } | 246 void set_bucket(Bucket* b) { bucket_rep = reinterpret_cast<uintptr_t>(b); } |
278 size_t bytes; // Number of bytes in this allocation | 247 size_t bytes; // Number of bytes in this allocation |
(...skipping 24 matching lines...) Expand all Loading... |
303 static const int kMask = kLive | kIgnore; | 272 static const int kMask = kLive | kIgnore; |
304 | 273 |
305 uintptr_t bucket_rep; | 274 uintptr_t bucket_rep; |
306 }; | 275 }; |
307 | 276 |
308 // helper for FindInsideAlloc | 277 // helper for FindInsideAlloc |
309 static size_t AllocValueSize(const AllocValue& v) { return v.bytes; } | 278 static size_t AllocValueSize(const AllocValue& v) { return v.bytes; } |
310 | 279 |
311 typedef AddressMap<AllocValue> AllocationMap; | 280 typedef AddressMap<AllocValue> AllocationMap; |
312 | 281 |
| 282 // Arguments that need to be passed DumpBucketIterator callback below. |
| 283 struct BufferArgs { |
| 284 BufferArgs(char* buf_arg, int buflen_arg, int bufsize_arg) |
| 285 : buf(buf_arg), |
| 286 buflen(buflen_arg), |
| 287 bufsize(bufsize_arg) { |
| 288 } |
| 289 |
| 290 char* buf; |
| 291 int buflen; |
| 292 int bufsize; |
| 293 |
| 294 DISALLOW_COPY_AND_ASSIGN(BufferArgs); |
| 295 }; |
| 296 |
313 // Arguments that need to be passed DumpNonLiveIterator callback below. | 297 // Arguments that need to be passed DumpNonLiveIterator callback below. |
314 struct DumpArgs { | 298 struct DumpArgs { |
| 299 DumpArgs(RawFD fd_arg, Stats* profile_stats_arg) |
| 300 : fd(fd_arg), |
| 301 profile_stats(profile_stats_arg) { |
| 302 } |
| 303 |
315 RawFD fd; // file to write to | 304 RawFD fd; // file to write to |
316 Stats* profile_stats; // stats to update (may be NULL) | 305 Stats* profile_stats; // stats to update (may be NULL) |
317 | |
318 DumpArgs(RawFD a, Stats* d) | |
319 : fd(a), profile_stats(d) { } | |
320 }; | 306 }; |
321 | 307 |
322 // Arguments that need to be passed DumpMarkedIterator callback below. | 308 // Arguments that need to be passed DumpMarkedIterator callback below. |
323 struct DumpMarkedArgs { | 309 struct DumpMarkedArgs { |
| 310 DumpMarkedArgs(RawFD fd_arg, AllocationMark mark_arg) |
| 311 : fd(fd_arg), |
| 312 mark(mark_arg) { |
| 313 } |
| 314 |
324 RawFD fd; // file to write to. | 315 RawFD fd; // file to write to. |
325 AllocationMark mark; // The mark of the allocations to process. | 316 AllocationMark mark; // The mark of the allocations to process. |
326 | |
327 DumpMarkedArgs(RawFD a, AllocationMark m) : fd(a), mark(m) { } | |
328 }; | 317 }; |
329 | 318 |
330 // Arguments that need to be passed MarkIterator callback below. | 319 // Arguments that need to be passed MarkIterator callback below. |
331 struct MarkArgs { | 320 struct MarkArgs { |
| 321 MarkArgs(AllocationMark mark_arg, bool mark_all_arg) |
| 322 : mark(mark_arg), |
| 323 mark_all(mark_all_arg) { |
| 324 } |
| 325 |
332 AllocationMark mark; // The mark to put on allocations. | 326 AllocationMark mark; // The mark to put on allocations. |
333 bool mark_all; // True if all allocations should be marked. Otherwise just | 327 bool mark_all; // True if all allocations should be marked. Otherwise just |
334 // mark unmarked allocations. | 328 // mark unmarked allocations. |
335 | |
336 MarkArgs(AllocationMark m, bool a) : mark(m), mark_all(a) { } | |
337 }; | 329 }; |
338 | 330 |
339 #if defined(TYPE_PROFILING) | 331 #if defined(TYPE_PROFILING) |
340 struct TypeCount { | 332 struct TypeCount { |
341 size_t bytes; | |
342 unsigned int objects; | |
343 | |
344 TypeCount(size_t bytes_arg, unsigned int objects_arg) | 333 TypeCount(size_t bytes_arg, unsigned int objects_arg) |
345 : bytes(bytes_arg), | 334 : bytes(bytes_arg), |
346 objects(objects_arg) { | 335 objects(objects_arg) { |
347 } | 336 } |
| 337 |
| 338 size_t bytes; |
| 339 unsigned int objects; |
348 }; | 340 }; |
349 #endif // defined(TYPE_PROFILING) | 341 #endif // defined(TYPE_PROFILING) |
350 | 342 |
351 struct AllocationAddressIteratorArgs { | 343 struct AllocationAddressIteratorArgs { |
| 344 AllocationAddressIteratorArgs(AddressIterator callback_arg, void* data_arg) |
| 345 : callback(callback_arg), |
| 346 data(data_arg) { |
| 347 } |
| 348 |
352 AddressIterator callback; | 349 AddressIterator callback; |
353 void* data; | 350 void* data; |
354 | |
355 AllocationAddressIteratorArgs(AddressIterator iterator, void* d) | |
356 : callback(iterator), | |
357 data(d) { | |
358 } | |
359 }; | 351 }; |
360 | 352 |
361 // helpers ---------------------------- | 353 // helpers ---------------------------- |
362 | 354 |
363 // Unparse bucket b and print its portion of profile dump into buf. | 355 // Unparse bucket b and print its portion of profile dump into buf. |
364 // We return the amount of space in buf that we use. We start printing | 356 // We return the amount of space in buf that we use. We start printing |
365 // at buf + buflen, and promise not to go beyond buf + bufsize. | 357 // at buf + buflen, and promise not to go beyond buf + bufsize. |
366 // We do not provision for 0-terminating 'buf'. | 358 // We do not provision for 0-terminating 'buf'. |
367 // | 359 // |
368 // If profile_stats is non-NULL, we update *profile_stats by | 360 // If profile_stats is non-NULL, we update *profile_stats by |
369 // counting bucket b. | 361 // counting bucket b. |
370 // | 362 // |
371 // "extra" is appended to the unparsed bucket. Typically it is empty, | 363 // "extra" is appended to the unparsed bucket. Typically it is empty, |
372 // but may be set to something like " heapprofile" for the total | 364 // but may be set to something like " heapprofile" for the total |
373 // bucket to indicate the type of the profile. | 365 // bucket to indicate the type of the profile. |
374 static int UnparseBucket(const Bucket& b, | 366 static int UnparseBucket(const Bucket& b, |
375 char* buf, int buflen, int bufsize, | 367 char* buf, int buflen, int bufsize, |
376 const char* extra, | 368 const char* extra, |
377 Stats* profile_stats); | 369 Stats* profile_stats); |
378 | 370 |
379 // Deallocate a given allocation map. | 371 // Get the bucket for the caller stack trace 'key' of depth 'depth' |
380 void DeallocateAllocationMap(AllocationMap* allocation); | 372 // creating the bucket if needed. |
381 | 373 Bucket* GetBucket(int depth, const void* const key[]); |
382 // Deallocate a given bucket table. | |
383 void DeallocateBucketTable(Bucket** table); | |
384 | |
385 // Get the bucket for the caller stack trace 'key' of depth 'depth' from a | |
386 // bucket hash map 'table' creating the bucket if needed. '*bucket_count' | |
387 // is incremented both when 'bucket_count' is not NULL and when a new | |
388 // bucket object is created. | |
389 Bucket* GetBucket(int depth, const void* const key[], Bucket** table, | |
390 int* bucket_count); | |
391 | 374 |
392 // Helper for IterateAllocs to do callback signature conversion | 375 // Helper for IterateAllocs to do callback signature conversion |
393 // from AllocationMap::Iterate to AllocIterator. | 376 // from AllocationMap::Iterate to AllocIterator. |
394 static void MapArgsAllocIterator(const void* ptr, AllocValue* v, | 377 static void MapArgsAllocIterator(const void* ptr, AllocValue* v, |
395 AllocIterator callback) { | 378 AllocIterator callback) { |
396 AllocInfo info; | 379 AllocInfo info; |
397 info.object_size = v->bytes; | 380 info.object_size = v->bytes; |
398 info.call_stack = v->bucket()->stack; | 381 info.call_stack = v->bucket()->stack; |
399 info.stack_depth = v->bucket()->depth; | 382 info.stack_depth = v->bucket()->depth; |
400 info.live = v->live(); | 383 info.live = v->live(); |
401 info.ignored = v->ignore(); | 384 info.ignored = v->ignore(); |
402 callback(ptr, info); | 385 callback(ptr, info); |
403 } | 386 } |
404 | 387 |
| 388 // Helper to dump a bucket. |
| 389 inline static void DumpBucketIterator(const Bucket* bucket, |
| 390 BufferArgs* args); |
| 391 |
405 // Helper for IterateAllocationAddresses. | 392 // Helper for IterateAllocationAddresses. |
406 inline static void AllocationAddressesIterator( | 393 inline static void AllocationAddressesIterator( |
407 const void* ptr, | 394 const void* ptr, |
408 AllocValue* v, | 395 AllocValue* v, |
409 const AllocationAddressIteratorArgs& args); | 396 const AllocationAddressIteratorArgs& args); |
410 | 397 |
411 // Helper for MarkCurrentAllocations and MarkUnmarkedAllocations. | 398 // Helper for MarkCurrentAllocations and MarkUnmarkedAllocations. |
412 inline static void MarkIterator(const void* ptr, AllocValue* v, | 399 inline static void MarkIterator(const void* ptr, AllocValue* v, |
413 const MarkArgs& args); | 400 const MarkArgs& args); |
414 | 401 |
415 // Helper for DumpNonLiveProfile to do object-granularity | 402 // Helper for DumpNonLiveProfile to do object-granularity |
416 // heap profile dumping. It gets passed to AllocationMap::Iterate. | 403 // heap profile dumping. It gets passed to AllocationMap::Iterate. |
417 inline static void DumpNonLiveIterator(const void* ptr, AllocValue* v, | 404 inline static void DumpNonLiveIterator(const void* ptr, AllocValue* v, |
418 const DumpArgs& args); | 405 const DumpArgs& args); |
419 | 406 |
420 // Helper for DumpMarkedObjects to dump all allocations with a given mark. It | 407 // Helper for DumpMarkedObjects to dump all allocations with a given mark. It |
421 // gets passed to AllocationMap::Iterate. | 408 // gets passed to AllocationMap::Iterate. |
422 inline static void DumpMarkedIterator(const void* ptr, AllocValue* v, | 409 inline static void DumpMarkedIterator(const void* ptr, AllocValue* v, |
423 const DumpMarkedArgs& args); | 410 const DumpMarkedArgs& args); |
424 | 411 |
425 // Helper for filling size variables in buckets by zero. | |
426 inline static void ZeroBucketCountsIterator( | |
427 const void* ptr, AllocValue* v, HeapProfileTable* heap_profile); | |
428 | |
429 #if defined(TYPE_PROFILING) | 412 #if defined(TYPE_PROFILING) |
430 inline static void TallyTypesItererator(const void* ptr, | 413 inline static void TallyTypesItererator(const void* ptr, |
431 AllocValue* value, | 414 AllocValue* value, |
432 AddressMap<TypeCount>* type_size_map); | 415 AddressMap<TypeCount>* type_size_map); |
433 | 416 |
434 inline static void DumpTypesIterator(const void* ptr, | 417 inline static void DumpTypesIterator(const void* ptr, |
435 TypeCount* size, | 418 TypeCount* size, |
436 const DumpArgs& args); | 419 const DumpArgs& args); |
437 #endif // defined(TYPE_PROFILING) | 420 #endif // defined(TYPE_PROFILING) |
438 | 421 |
439 // Helper for IterateOrderedAllocContexts and FillOrderedProfile. | 422 // Helper for IterateOrderedAllocContexts and FillOrderedProfile. |
440 // Creates a sorted list of Buckets whose length is num_alloc_buckets_ + | 423 // Creates a sorted list of Buckets whose length is num_buckets_. |
441 // num_avaliable_mmap_buckets_. | |
442 // The caller is responsible for deallocating the returned list. | 424 // The caller is responsible for deallocating the returned list. |
443 Bucket** MakeSortedBucketList() const; | 425 Bucket** MakeSortedBucketList() const; |
444 | 426 |
445 // Helper for TakeSnapshot. Saves object to snapshot. | 427 // Helper for TakeSnapshot. Saves object to snapshot. |
446 static void AddToSnapshot(const void* ptr, AllocValue* v, Snapshot* s); | 428 static void AddToSnapshot(const void* ptr, AllocValue* v, Snapshot* s); |
447 | 429 |
448 // Arguments passed to AddIfNonLive | 430 // Arguments passed to AddIfNonLive |
449 struct AddNonLiveArgs { | 431 struct AddNonLiveArgs { |
450 Snapshot* dest; | 432 Snapshot* dest; |
451 Snapshot* base; | 433 Snapshot* base; |
(...skipping 12 matching lines...) Expand all Loading... |
464 AllocationMap* allocations); | 446 AllocationMap* allocations); |
465 | 447 |
466 // data ---------------------------- | 448 // data ---------------------------- |
467 | 449 |
468 // Memory (de)allocator that we use. | 450 // Memory (de)allocator that we use. |
469 Allocator alloc_; | 451 Allocator alloc_; |
470 DeAllocator dealloc_; | 452 DeAllocator dealloc_; |
471 | 453 |
472 // Overall profile stats; we use only the Stats part, | 454 // Overall profile stats; we use only the Stats part, |
473 // but make it a Bucket to pass to UnparseBucket. | 455 // but make it a Bucket to pass to UnparseBucket. |
474 // It doesn't contain mmap'ed regions. | |
475 Bucket total_; | 456 Bucket total_; |
476 | 457 |
| 458 bool profile_mmap_; |
| 459 |
477 // Bucket hash table for malloc. | 460 // Bucket hash table for malloc. |
478 // We hand-craft one instead of using one of the pre-written | 461 // We hand-craft one instead of using one of the pre-written |
479 // ones because we do not want to use malloc when operating on the table. | 462 // ones because we do not want to use malloc when operating on the table. |
480 // It is only few lines of code, so no big deal. | 463 // It is only few lines of code, so no big deal. |
481 Bucket** alloc_table_; | 464 Bucket** bucket_table_; |
482 int num_alloc_buckets_; | 465 int num_buckets_; |
483 | |
484 // Bucket hash table for mmap. | |
485 // This table is filled with the information from MemoryRegionMap by calling | |
486 // RefreshMMapData. | |
487 Bucket** mmap_table_; | |
488 int num_available_mmap_buckets_; | |
489 | 466 |
490 // Map of all currently allocated objects and mapped regions we know about. | 467 // Map of all currently allocated objects and mapped regions we know about. |
491 AllocationMap* alloc_address_map_; | 468 AllocationMap* address_map_; |
492 AllocationMap* mmap_address_map_; | |
493 | 469 |
494 DISALLOW_COPY_AND_ASSIGN(HeapProfileTable); | 470 DISALLOW_COPY_AND_ASSIGN(HeapProfileTable); |
495 }; | 471 }; |
496 | 472 |
497 class HeapProfileTable::Snapshot { | 473 class HeapProfileTable::Snapshot { |
498 public: | 474 public: |
499 const Stats& total() const { return total_; } | 475 const Stats& total() const { return total_; } |
500 | 476 |
501 // Report anything in this snapshot as a leak. | 477 // Report anything in this snapshot as a leak. |
502 // May use new/delete for temporary storage. | 478 // May use new/delete for temporary storage. |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
540 // Helpers for sorting and generating leak reports | 516 // Helpers for sorting and generating leak reports |
541 struct Entry; | 517 struct Entry; |
542 struct ReportState; | 518 struct ReportState; |
543 static void ReportCallback(const void* ptr, AllocValue* v, ReportState*); | 519 static void ReportCallback(const void* ptr, AllocValue* v, ReportState*); |
544 static void ReportObject(const void* ptr, AllocValue* v, char*); | 520 static void ReportObject(const void* ptr, AllocValue* v, char*); |
545 | 521 |
546 DISALLOW_COPY_AND_ASSIGN(Snapshot); | 522 DISALLOW_COPY_AND_ASSIGN(Snapshot); |
547 }; | 523 }; |
548 | 524 |
549 #endif // BASE_HEAP_PROFILE_TABLE_H_ | 525 #endif // BASE_HEAP_PROFILE_TABLE_H_ |
OLD | NEW |