OLD | NEW |
1 // Copyright (c) 2006, Google Inc. | 1 // Copyright (c) 2006, Google Inc. |
2 // All rights reserved. | 2 // All rights reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
(...skipping 21 matching lines...) Expand all Loading... |
32 // Maxim Lifantsev (refactoring) | 32 // Maxim Lifantsev (refactoring) |
33 // | 33 // |
34 | 34 |
35 #ifndef BASE_HEAP_PROFILE_TABLE_H_ | 35 #ifndef BASE_HEAP_PROFILE_TABLE_H_ |
36 #define BASE_HEAP_PROFILE_TABLE_H_ | 36 #define BASE_HEAP_PROFILE_TABLE_H_ |
37 | 37 |
38 #include "addressmap-inl.h" | 38 #include "addressmap-inl.h" |
39 #include "base/basictypes.h" | 39 #include "base/basictypes.h" |
40 #include "base/logging.h" // for RawFD | 40 #include "base/logging.h" // for RawFD |
41 | 41 |
| 42 #ifndef DEEP_PROFILER_ON |
| 43 #define DEEP_PROFILER_ON |
| 44 #endif |
| 45 |
42 // Table to maintain a heap profile data inside, | 46 // Table to maintain a heap profile data inside, |
43 // i.e. the set of currently active heap memory allocations. | 47 // i.e. the set of currently active heap memory allocations. |
44 // thread-unsafe and non-reentrant code: | 48 // thread-unsafe and non-reentrant code: |
45 // each instance object must be used by one thread | 49 // each instance object must be used by one thread |
46 // at a time w/o self-recursion. | 50 // at a time w/o self-recursion. |
47 // | 51 // |
48 // TODO(maxim): add a unittest for this class. | 52 // TODO(maxim): add a unittest for this class. |
49 class HeapProfileTable { | 53 class HeapProfileTable { |
50 public: | 54 public: |
51 | 55 |
(...skipping 10 matching lines...) Expand all Loading... |
62 int32 allocs; // Number of allocation calls | 66 int32 allocs; // Number of allocation calls |
63 int32 frees; // Number of free calls | 67 int32 frees; // Number of free calls |
64 int64 alloc_size; // Total size of all allocated objects so far | 68 int64 alloc_size; // Total size of all allocated objects so far |
65 int64 free_size; // Total size of all freed objects so far | 69 int64 free_size; // Total size of all freed objects so far |
66 | 70 |
67 // semantic equality | 71 // semantic equality |
68 bool Equivalent(const Stats& x) const { | 72 bool Equivalent(const Stats& x) const { |
69 return allocs - frees == x.allocs - x.frees && | 73 return allocs - frees == x.allocs - x.frees && |
70 alloc_size - free_size == x.alloc_size - x.free_size; | 74 alloc_size - free_size == x.alloc_size - x.free_size; |
71 } | 75 } |
| 76 |
| 77 #ifdef DEEP_PROFILER_ON |
| 78 // These are used in the deep memory profiler |
| 79 int64 committed_size; |
| 80 #endif |
72 }; | 81 }; |
73 | 82 |
74 // Info we can return about an allocation. | 83 // Info we can return about an allocation. |
75 struct AllocInfo { | 84 struct AllocInfo { |
76 size_t object_size; // size of the allocation | 85 size_t object_size; // size of the allocation |
77 const void* const* call_stack; // call stack that made the allocation call | 86 const void* const* call_stack; // call stack that made the allocation call |
78 int stack_depth; // depth of call_stack | 87 int stack_depth; // depth of call_stack |
79 bool live; | 88 bool live; |
80 bool ignored; | 89 bool ignored; |
81 }; | 90 }; |
(...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
174 // Release a previously taken snapshot. snapshot must not | 183 // Release a previously taken snapshot. snapshot must not |
175 // be used after this call. | 184 // be used after this call. |
176 void ReleaseSnapshot(Snapshot* snapshot); | 185 void ReleaseSnapshot(Snapshot* snapshot); |
177 | 186 |
178 // Return a snapshot of every non-live, non-ignored object in *this. | 187 // Return a snapshot of every non-live, non-ignored object in *this. |
179 // If "base" is non-NULL, skip any objects present in "base". | 188 // If "base" is non-NULL, skip any objects present in "base". |
180 // As a side-effect, clears the "live" bit on every live object in *this. | 189 // As a side-effect, clears the "live" bit on every live object in *this. |
181 // Caller must call ReleaseSnapshot() on result when no longer needed. | 190 // Caller must call ReleaseSnapshot() on result when no longer needed. |
182 Snapshot* NonLiveSnapshot(Snapshot* base); | 191 Snapshot* NonLiveSnapshot(Snapshot* base); |
183 | 192 |
| 193 void MMapRecordBegin() { mmap_record_ = true; } |
| 194 void MMapRecordEnd() { mmap_record_ = false; } |
| 195 |
| 196 #ifdef DEEP_PROFILER_ON |
| 197 friend class DeepMemoryProfiler; |
| 198 #endif |
184 private: | 199 private: |
185 | 200 |
186 // data types ---------------------------- | 201 // data types ---------------------------- |
187 | 202 |
188 // Hash table bucket to hold (de)allocation stats | 203 // Hash table bucket to hold (de)allocation stats |
189 // for a given allocation call stack trace. | 204 // for a given allocation call stack trace. |
190 struct Bucket : public Stats { | 205 struct Bucket : public Stats { |
191 uintptr_t hash; // Hash value of the stack trace | 206 uintptr_t hash; // Hash value of the stack trace |
192 int depth; // Depth of stack trace | 207 int depth; // Depth of stack trace |
193 const void** stack; // Stack trace | 208 const void** stack; // Stack trace |
194 Bucket* next; // Next entry in hash-table | 209 Bucket* next; // Next entry in hash-table |
| 210 #ifdef DEEP_PROFILER_ON |
| 211 int id; // Unique ID of the bucket |
| 212 bool is_logged; // True if the stracktrace is logged to a file |
| 213 #endif |
195 }; | 214 }; |
196 | 215 |
197 // Info stored in the address map | 216 // Info stored in the address map |
198 struct AllocValue { | 217 struct AllocValue { |
199 // Access to the stack-trace bucket | 218 // Access to the stack-trace bucket |
200 Bucket* bucket() const { | 219 Bucket* bucket() const { |
201 return reinterpret_cast<Bucket*>(bucket_rep & ~uintptr_t(kMask)); | 220 return reinterpret_cast<Bucket*>(bucket_rep & ~uintptr_t(kMask)); |
202 } | 221 } |
203 // This also does set_live(false). | 222 // This also does set_live(false). |
204 void set_bucket(Bucket* b) { bucket_rep = reinterpret_cast<uintptr_t>(b); } | 223 void set_bucket(Bucket* b) { bucket_rep = reinterpret_cast<uintptr_t>(b); } |
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
273 info.live = v->live(); | 292 info.live = v->live(); |
274 info.ignored = v->ignore(); | 293 info.ignored = v->ignore(); |
275 callback(ptr, info); | 294 callback(ptr, info); |
276 } | 295 } |
277 | 296 |
278 // Helper for DumpNonLiveProfile to do object-granularity | 297 // Helper for DumpNonLiveProfile to do object-granularity |
279 // heap profile dumping. It gets passed to AllocationMap::Iterate. | 298 // heap profile dumping. It gets passed to AllocationMap::Iterate. |
280 inline static void DumpNonLiveIterator(const void* ptr, AllocValue* v, | 299 inline static void DumpNonLiveIterator(const void* ptr, AllocValue* v, |
281 const DumpArgs& args); | 300 const DumpArgs& args); |
282 | 301 |
| 302 // Creates a list of Buckets whose length is num_buckets_. |
| 303 // The caller is responsible for dellocating the returned list. |
| 304 Bucket** MakeBucketList() const; |
| 305 |
283 // Helper for IterateOrderedAllocContexts and FillOrderedProfile. | 306 // Helper for IterateOrderedAllocContexts and FillOrderedProfile. |
284 // Creates a sorted list of Buckets whose length is num_buckets_. | 307 // Creates a sorted list of Buckets whose length is num_buckets_. |
285 // The caller is responsible for dellocating the returned list. | 308 // The caller is responsible for dellocating the returned list. |
286 Bucket** MakeSortedBucketList() const; | 309 Bucket** MakeSortedBucketList() const; |
287 | 310 |
288 // Helper for TakeSnapshot. Saves object to snapshot. | 311 // Helper for TakeSnapshot. Saves object to snapshot. |
289 static void AddToSnapshot(const void* ptr, AllocValue* v, Snapshot* s); | 312 static void AddToSnapshot(const void* ptr, AllocValue* v, Snapshot* s); |
290 | 313 |
291 // Arguments passed to AddIfNonLive | 314 // Arguments passed to AddIfNonLive |
292 struct AddNonLiveArgs { | 315 struct AddNonLiveArgs { |
(...skipping 25 matching lines...) Expand all Loading... |
318 | 341 |
319 // Bucket hash table. | 342 // Bucket hash table. |
320 // We hand-craft one instead of using one of the pre-written | 343 // We hand-craft one instead of using one of the pre-written |
321 // ones because we do not want to use malloc when operating on the table. | 344 // ones because we do not want to use malloc when operating on the table. |
322 // It is only few lines of code, so no big deal. | 345 // It is only few lines of code, so no big deal. |
323 Bucket** table_; | 346 Bucket** table_; |
324 int num_buckets_; | 347 int num_buckets_; |
325 | 348 |
326 // Map of all currently allocated objects we know about. | 349 // Map of all currently allocated objects we know about. |
327 AllocationMap* allocation_; | 350 AllocationMap* allocation_; |
| 351 // Mmap allocations are saved in a separate map |
| 352 // because mmap and tcmalloc allocations could have the same address |
| 353 AllocationMap* allocation_mmap_; |
| 354 bool mmap_record_; |
328 | 355 |
329 DISALLOW_COPY_AND_ASSIGN(HeapProfileTable); | 356 DISALLOW_COPY_AND_ASSIGN(HeapProfileTable); |
330 }; | 357 }; |
331 | 358 |
332 class HeapProfileTable::Snapshot { | 359 class HeapProfileTable::Snapshot { |
333 public: | 360 public: |
334 const Stats& total() const { return total_; } | 361 const Stats& total() const { return total_; } |
335 | 362 |
336 // Report anything in this snapshot as a leak. | 363 // Report anything in this snapshot as a leak. |
337 // May use new/delete for temporary storage. | 364 // May use new/delete for temporary storage. |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
375 // Helpers for sorting and generating leak reports | 402 // Helpers for sorting and generating leak reports |
376 struct Entry; | 403 struct Entry; |
377 struct ReportState; | 404 struct ReportState; |
378 static void ReportCallback(const void* ptr, AllocValue* v, ReportState*); | 405 static void ReportCallback(const void* ptr, AllocValue* v, ReportState*); |
379 static void ReportObject(const void* ptr, AllocValue* v, char*); | 406 static void ReportObject(const void* ptr, AllocValue* v, char*); |
380 | 407 |
381 DISALLOW_COPY_AND_ASSIGN(Snapshot); | 408 DISALLOW_COPY_AND_ASSIGN(Snapshot); |
382 }; | 409 }; |
383 | 410 |
384 #endif // BASE_HEAP_PROFILE_TABLE_H_ | 411 #endif // BASE_HEAP_PROFILE_TABLE_H_ |
OLD | NEW |