OLD | NEW |
---|---|
1 // Copyright (c) 2006, Google Inc. | 1 // Copyright (c) 2006, Google Inc. |
2 // All rights reserved. | 2 // All rights reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
(...skipping 21 matching lines...) Expand all Loading... | |
32 // Maxim Lifantsev (refactoring) | 32 // Maxim Lifantsev (refactoring) |
33 // | 33 // |
34 | 34 |
35 #ifndef BASE_HEAP_PROFILE_TABLE_H_ | 35 #ifndef BASE_HEAP_PROFILE_TABLE_H_ |
36 #define BASE_HEAP_PROFILE_TABLE_H_ | 36 #define BASE_HEAP_PROFILE_TABLE_H_ |
37 | 37 |
38 #include "addressmap-inl.h" | 38 #include "addressmap-inl.h" |
39 #include "base/basictypes.h" | 39 #include "base/basictypes.h" |
40 #include "base/logging.h" // for RawFD | 40 #include "base/logging.h" // for RawFD |
41 | 41 |
42 #if defined(TYPE_PROFILING) | |
43 #include <gperftools/type_profiler_map.h> | |
44 #endif // defined(TYPE_PROFILING) | |
45 | |
42 // Table to maintain a heap profile data inside, | 46 // Table to maintain a heap profile data inside, |
43 // i.e. the set of currently active heap memory allocations. | 47 // i.e. the set of currently active heap memory allocations. |
44 // thread-unsafe and non-reentrant code: | 48 // thread-unsafe and non-reentrant code: |
45 // each instance object must be used by one thread | 49 // each instance object must be used by one thread |
46 // at a time w/o self-recursion. | 50 // at a time w/o self-recursion. |
47 // | 51 // |
48 // TODO(maxim): add a unittest for this class. | 52 // TODO(maxim): add a unittest for this class. |
49 class HeapProfileTable { | 53 class HeapProfileTable { |
50 public: | 54 public: |
51 | 55 |
(...skipping 179 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
231 // 4. Perform whatever action is supposed to free the memory again. New | 235 // 4. Perform whatever action is supposed to free the memory again. New |
232 // allocations are not marked. So all allocations that are marked as | 236 // allocations are not marked. So all allocations that are marked as |
233 // "live" where created during step 2. | 237 // "live" where created during step 2. |
234 // 5. Invoke DumpMarkedObjects(MARK_TWO) to get the list of allocations that | 238 // 5. Invoke DumpMarkedObjects(MARK_TWO) to get the list of allocations that |
235 // were created during step 2, but survived step 4. | 239 // were created during step 2, but survived step 4. |
236 // | 240 // |
237 // Note that this functionality cannot be used if the HeapProfileTable is | 241 // Note that this functionality cannot be used if the HeapProfileTable is |
238 // used for leak checking (using HeapLeakChecker). | 242 // used for leak checking (using HeapLeakChecker). |
239 void DumpMarkedObjects(AllocationMark mark, const char* file_name); | 243 void DumpMarkedObjects(AllocationMark mark, const char* file_name); |
240 | 244 |
245 #if defined(TYPE_PROFILING) | |
246 void DumpTypeStatistics(const char* file_name) const; | |
247 #endif // defined(TYPE_PROFILING) | |
248 | |
241 private: | 249 private: |
242 friend class DeepHeapProfile; | 250 friend class DeepHeapProfile; |
243 | 251 |
244 // data types ---------------------------- | 252 // data types ---------------------------- |
245 | 253 |
246 // Hash table bucket to hold (de)allocation stats | 254 // Hash table bucket to hold (de)allocation stats |
247 // for a given allocation call stack trace. | 255 // for a given allocation call stack trace. |
248 struct Bucket : public Stats { | 256 struct Bucket : public Stats { |
249 uintptr_t hash; // Hash value of the stack trace | 257 uintptr_t hash; // Hash value of the stack trace |
250 int depth; // Depth of stack trace | 258 int depth; // Depth of stack trace |
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
314 | 322 |
315 // Arguments that need to be passed MarkIterator callback below. | 323 // Arguments that need to be passed MarkIterator callback below. |
316 struct MarkArgs { | 324 struct MarkArgs { |
317 AllocationMark mark; // The mark to put on allocations. | 325 AllocationMark mark; // The mark to put on allocations. |
318 bool mark_all; // True if all allocations should be marked. Otherwise just | 326 bool mark_all; // True if all allocations should be marked. Otherwise just |
319 // mark unmarked allocations. | 327 // mark unmarked allocations. |
320 | 328 |
321 MarkArgs(AllocationMark m, bool a) : mark(m), mark_all(a) { } | 329 MarkArgs(AllocationMark m, bool a) : mark(m), mark_all(a) { } |
322 }; | 330 }; |
323 | 331 |
332 #if defined(TYPE_PROFILING) | |
333 struct TypeCount { | |
334 size_t bytes; | |
335 unsigned int objects; | |
336 | |
337 TypeCount(size_t bytes_arg, unsigned int objects_arg) | |
jar (doing other things)
2012/10/01 21:59:44
nit: constructors are always listed first in struc
Dai Mikurube (NOT FULLTIME)
2012/10/02 06:35:12
Yeah, I thought that, but I followed the others in
| |
338 : bytes(bytes_arg), | |
339 objects(objects_arg) { | |
340 } | |
341 }; | |
342 #endif // defined(TYPE_PROFILING) | |
343 | |
324 // helpers ---------------------------- | 344 // helpers ---------------------------- |
325 | 345 |
326 // Unparse bucket b and print its portion of profile dump into buf. | 346 // Unparse bucket b and print its portion of profile dump into buf. |
327 // We return the amount of space in buf that we use. We start printing | 347 // We return the amount of space in buf that we use. We start printing |
328 // at buf + buflen, and promise not to go beyond buf + bufsize. | 348 // at buf + buflen, and promise not to go beyond buf + bufsize. |
329 // We do not provision for 0-terminating 'buf'. | 349 // We do not provision for 0-terminating 'buf'. |
330 // | 350 // |
331 // If profile_stats is non-NULL, we update *profile_stats by | 351 // If profile_stats is non-NULL, we update *profile_stats by |
332 // counting bucket b. | 352 // counting bucket b. |
333 // | 353 // |
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
376 | 396 |
377 // Helper for DumpMarkedObjects to dump all allocations with a given mark. It | 397 // Helper for DumpMarkedObjects to dump all allocations with a given mark. It |
378 // gets passed to AllocationMap::Iterate. | 398 // gets passed to AllocationMap::Iterate. |
379 inline static void DumpMarkedIterator(const void* ptr, AllocValue* v, | 399 inline static void DumpMarkedIterator(const void* ptr, AllocValue* v, |
380 const DumpMarkedArgs& args); | 400 const DumpMarkedArgs& args); |
381 | 401 |
382 // Helper for filling size variables in buckets by zero. | 402 // Helper for filling size variables in buckets by zero. |
383 inline static void ZeroBucketCountsIterator( | 403 inline static void ZeroBucketCountsIterator( |
384 const void* ptr, AllocValue* v, HeapProfileTable* heap_profile); | 404 const void* ptr, AllocValue* v, HeapProfileTable* heap_profile); |
385 | 405 |
406 #if defined(TYPE_PROFILING) | |
407 inline static void CountUpTypeIterator(const void* ptr, | |
408 AllocValue* value, | |
409 AddressMap<TypeCount>* type_size_map); | |
410 | |
411 inline static void DumpTypeIterator(const void* ptr, | |
412 TypeCount* size, | |
413 const DumpArgs& args); | |
414 #endif // defined(TYPE_PROFILING) | |
415 | |
386 // Helper for IterateOrderedAllocContexts and FillOrderedProfile. | 416 // Helper for IterateOrderedAllocContexts and FillOrderedProfile. |
387 // Creates a sorted list of Buckets whose length is num_alloc_buckets_ + | 417 // Creates a sorted list of Buckets whose length is num_alloc_buckets_ + |
388 // num_avaliable_mmap_buckets_. | 418 // num_avaliable_mmap_buckets_. |
389 // The caller is responsible for deallocating the returned list. | 419 // The caller is responsible for deallocating the returned list. |
390 Bucket** MakeSortedBucketList() const; | 420 Bucket** MakeSortedBucketList() const; |
391 | 421 |
392 // Helper for TakeSnapshot. Saves object to snapshot. | 422 // Helper for TakeSnapshot. Saves object to snapshot. |
393 static void AddToSnapshot(const void* ptr, AllocValue* v, Snapshot* s); | 423 static void AddToSnapshot(const void* ptr, AllocValue* v, Snapshot* s); |
394 | 424 |
395 // Arguments passed to AddIfNonLive | 425 // Arguments passed to AddIfNonLive |
(...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
487 // Helpers for sorting and generating leak reports | 517 // Helpers for sorting and generating leak reports |
488 struct Entry; | 518 struct Entry; |
489 struct ReportState; | 519 struct ReportState; |
490 static void ReportCallback(const void* ptr, AllocValue* v, ReportState*); | 520 static void ReportCallback(const void* ptr, AllocValue* v, ReportState*); |
491 static void ReportObject(const void* ptr, AllocValue* v, char*); | 521 static void ReportObject(const void* ptr, AllocValue* v, char*); |
492 | 522 |
493 DISALLOW_COPY_AND_ASSIGN(Snapshot); | 523 DISALLOW_COPY_AND_ASSIGN(Snapshot); |
494 }; | 524 }; |
495 | 525 |
496 #endif // BASE_HEAP_PROFILE_TABLE_H_ | 526 #endif // BASE_HEAP_PROFILE_TABLE_H_ |
OLD | NEW |