| OLD | NEW |
| 1 // Copyright (c) 2006, Google Inc. | 1 // Copyright (c) 2006, Google Inc. |
| 2 // All rights reserved. | 2 // All rights reserved. |
| 3 // | 3 // |
| 4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
| 5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
| 6 // met: | 6 // met: |
| 7 // | 7 // |
| 8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
| 9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
| 10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
| (...skipping 21 matching lines...) Expand all Loading... |
| 32 // Maxim Lifantsev (refactoring) | 32 // Maxim Lifantsev (refactoring) |
| 33 // | 33 // |
| 34 | 34 |
| 35 #ifndef BASE_HEAP_PROFILE_TABLE_H_ | 35 #ifndef BASE_HEAP_PROFILE_TABLE_H_ |
| 36 #define BASE_HEAP_PROFILE_TABLE_H_ | 36 #define BASE_HEAP_PROFILE_TABLE_H_ |
| 37 | 37 |
| 38 #include "addressmap-inl.h" | 38 #include "addressmap-inl.h" |
| 39 #include "base/basictypes.h" | 39 #include "base/basictypes.h" |
| 40 #include "base/logging.h" // for RawFD | 40 #include "base/logging.h" // for RawFD |
| 41 | 41 |
| 42 #if defined(PROFILING_ALLOCATED_TYPE) |
| 43 #include <gperftools/allocated_type_map.h> |
| 44 #endif // PROFILING_ALLOCATED_TYPE |
| 45 |
| 42 // Table to maintain a heap profile data inside, | 46 // Table to maintain a heap profile data inside, |
| 43 // i.e. the set of currently active heap memory allocations. | 47 // i.e. the set of currently active heap memory allocations. |
| 44 // thread-unsafe and non-reentrant code: | 48 // thread-unsafe and non-reentrant code: |
| 45 // each instance object must be used by one thread | 49 // each instance object must be used by one thread |
| 46 // at a time w/o self-recursion. | 50 // at a time w/o self-recursion. |
| 47 // | 51 // |
| 48 // TODO(maxim): add a unittest for this class. | 52 // TODO(maxim): add a unittest for this class. |
| 49 class HeapProfileTable { | 53 class HeapProfileTable { |
| 50 public: | 54 public: |
| 51 | 55 |
| (...skipping 179 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 231 // 4. Perform whatever action is supposed to free the memory again. New | 235 // 4. Perform whatever action is supposed to free the memory again. New |
| 232 // allocations are not marked. So all allocations that are marked as | 236 // allocations are not marked. So all allocations that are marked as |
| 233 // "live" where created during step 2. | 237 // "live" where created during step 2. |
| 234 // 5. Invoke DumpMarkedObjects(MARK_TWO) to get the list of allocations that | 238 // 5. Invoke DumpMarkedObjects(MARK_TWO) to get the list of allocations that |
| 235 // were created during step 2, but survived step 4. | 239 // were created during step 2, but survived step 4. |
| 236 // | 240 // |
| 237 // Note that this functionality cannot be used if the HeapProfileTable is | 241 // Note that this functionality cannot be used if the HeapProfileTable is |
| 238 // used for leak checking (using HeapLeakChecker). | 242 // used for leak checking (using HeapLeakChecker). |
| 239 void DumpMarkedObjects(AllocationMark mark, const char* file_name); | 243 void DumpMarkedObjects(AllocationMark mark, const char* file_name); |
| 240 | 244 |
| 245 #if defined(PROFILING_ALLOCATED_TYPE) |
| 246 void DumpAllocatedTypeStatistics(const char* file_name) const; |
| 247 #endif // PROFILING_ALLOCATED_TYPE |
| 248 |
| 241 private: | 249 private: |
| 242 friend class DeepHeapProfile; | 250 friend class DeepHeapProfile; |
| 243 | 251 |
| 244 // data types ---------------------------- | 252 // data types ---------------------------- |
| 245 | 253 |
| 246 // Hash table bucket to hold (de)allocation stats | 254 // Hash table bucket to hold (de)allocation stats |
| 247 // for a given allocation call stack trace. | 255 // for a given allocation call stack trace. |
| 248 struct Bucket : public Stats { | 256 struct Bucket : public Stats { |
| 249 uintptr_t hash; // Hash value of the stack trace | 257 uintptr_t hash; // Hash value of the stack trace |
| 250 int depth; // Depth of stack trace | 258 int depth; // Depth of stack trace |
| (...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 314 | 322 |
| 315 // Arguments that need to be passed MarkIterator callback below. | 323 // Arguments that need to be passed MarkIterator callback below. |
| 316 struct MarkArgs { | 324 struct MarkArgs { |
| 317 AllocationMark mark; // The mark to put on allocations. | 325 AllocationMark mark; // The mark to put on allocations. |
| 318 bool mark_all; // True if all allocations should be marked. Otherwise just | 326 bool mark_all; // True if all allocations should be marked. Otherwise just |
| 319 // mark unmarked allocations. | 327 // mark unmarked allocations. |
| 320 | 328 |
| 321 MarkArgs(AllocationMark m, bool a) : mark(m), mark_all(a) { } | 329 MarkArgs(AllocationMark m, bool a) : mark(m), mark_all(a) { } |
| 322 }; | 330 }; |
| 323 | 331 |
| 332 #if defined(PROFILING_ALLOCATED_TYPE) |
| 333 struct AllocatedTypeCount { |
| 334 size_t bytes; |
| 335 unsigned int objects; |
| 336 |
| 337 AllocatedTypeCount(size_t a, unsigned int b) |
| 338 : bytes(a), objects(b) { } |
| 339 }; |
| 340 #endif // PROFILING_ALLOCATED_TYPE |
| 341 |
| 324 // helpers ---------------------------- | 342 // helpers ---------------------------- |
| 325 | 343 |
| 326 // Unparse bucket b and print its portion of profile dump into buf. | 344 // Unparse bucket b and print its portion of profile dump into buf. |
| 327 // We return the amount of space in buf that we use. We start printing | 345 // We return the amount of space in buf that we use. We start printing |
| 328 // at buf + buflen, and promise not to go beyond buf + bufsize. | 346 // at buf + buflen, and promise not to go beyond buf + bufsize. |
| 329 // We do not provision for 0-terminating 'buf'. | 347 // We do not provision for 0-terminating 'buf'. |
| 330 // | 348 // |
| 331 // If profile_stats is non-NULL, we update *profile_stats by | 349 // If profile_stats is non-NULL, we update *profile_stats by |
| 332 // counting bucket b. | 350 // counting bucket b. |
| 333 // | 351 // |
| (...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 376 | 394 |
| 377 // Helper for DumpMarkedObjects to dump all allocations with a given mark. It | 395 // Helper for DumpMarkedObjects to dump all allocations with a given mark. It |
| 378 // gets passed to AllocationMap::Iterate. | 396 // gets passed to AllocationMap::Iterate. |
| 379 inline static void DumpMarkedIterator(const void* ptr, AllocValue* v, | 397 inline static void DumpMarkedIterator(const void* ptr, AllocValue* v, |
| 380 const DumpMarkedArgs& args); | 398 const DumpMarkedArgs& args); |
| 381 | 399 |
| 382 // Helper for filling size variables in buckets by zero. | 400 // Helper for filling size variables in buckets by zero. |
| 383 inline static void ZeroBucketCountsIterator( | 401 inline static void ZeroBucketCountsIterator( |
| 384 const void* ptr, AllocValue* v, HeapProfileTable* heap_profile); | 402 const void* ptr, AllocValue* v, HeapProfileTable* heap_profile); |
| 385 | 403 |
| 404 #if defined(PROFILING_ALLOCATED_TYPE) |
| 405 inline static void CountUpAllocatedTypeIterator( |
| 406 const void* ptr, AllocValue* v, |
| 407 AddressMap<AllocatedTypeCount>* type_size_map); |
| 408 |
| 409 inline static void DumpAllocatedTypeIterator( |
| 410 const void* ptr, AllocatedTypeCount* size, const DumpArgs& args); |
| 411 #endif // PROFILING_ALLOCATED_TYPE |
| 412 |
| 386 // Helper for IterateOrderedAllocContexts and FillOrderedProfile. | 413 // Helper for IterateOrderedAllocContexts and FillOrderedProfile. |
| 387 // Creates a sorted list of Buckets whose length is num_alloc_buckets_ + | 414 // Creates a sorted list of Buckets whose length is num_alloc_buckets_ + |
| 388 // num_avaliable_mmap_buckets_. | 415 // num_avaliable_mmap_buckets_. |
| 389 // The caller is responsible for deallocating the returned list. | 416 // The caller is responsible for deallocating the returned list. |
| 390 Bucket** MakeSortedBucketList() const; | 417 Bucket** MakeSortedBucketList() const; |
| 391 | 418 |
| 392 // Helper for TakeSnapshot. Saves object to snapshot. | 419 // Helper for TakeSnapshot. Saves object to snapshot. |
| 393 static void AddToSnapshot(const void* ptr, AllocValue* v, Snapshot* s); | 420 static void AddToSnapshot(const void* ptr, AllocValue* v, Snapshot* s); |
| 394 | 421 |
| 395 // Arguments passed to AddIfNonLive | 422 // Arguments passed to AddIfNonLive |
| (...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 487 // Helpers for sorting and generating leak reports | 514 // Helpers for sorting and generating leak reports |
| 488 struct Entry; | 515 struct Entry; |
| 489 struct ReportState; | 516 struct ReportState; |
| 490 static void ReportCallback(const void* ptr, AllocValue* v, ReportState*); | 517 static void ReportCallback(const void* ptr, AllocValue* v, ReportState*); |
| 491 static void ReportObject(const void* ptr, AllocValue* v, char*); | 518 static void ReportObject(const void* ptr, AllocValue* v, char*); |
| 492 | 519 |
| 493 DISALLOW_COPY_AND_ASSIGN(Snapshot); | 520 DISALLOW_COPY_AND_ASSIGN(Snapshot); |
| 494 }; | 521 }; |
| 495 | 522 |
| 496 #endif // BASE_HEAP_PROFILE_TABLE_H_ | 523 #endif // BASE_HEAP_PROFILE_TABLE_H_ |
| OLD | NEW |