OLD | NEW |
1 // Copyright (c) 2006, Google Inc. | 1 // Copyright (c) 2006, Google Inc. |
2 // All rights reserved. | 2 // All rights reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
126 // If "ptr" points to a recorded allocation and it's not marked as live | 126 // If "ptr" points to a recorded allocation and it's not marked as live |
127 // mark it as live and return true. Else return false. | 127 // mark it as live and return true. Else return false. |
128 // All allocations start as non-live. | 128 // All allocations start as non-live. |
129 bool MarkAsLive(const void* ptr); | 129 bool MarkAsLive(const void* ptr); |
130 | 130 |
131 // If "ptr" points to a recorded allocation, mark it as "ignored". | 131 // If "ptr" points to a recorded allocation, mark it as "ignored". |
132 // Ignored objects are treated like other objects, except that they | 132 // Ignored objects are treated like other objects, except that they |
133 // are skipped in heap checking reports. | 133 // are skipped in heap checking reports. |
134 void MarkAsIgnored(const void* ptr); | 134 void MarkAsIgnored(const void* ptr); |
135 | 135 |
136 // Return current total (de)allocation statistics. | 136 // Return current total (de)allocation statistics. It doesn't contain |
| 137 // mmap'ed regions. |
137 const Stats& total() const { return total_; } | 138 const Stats& total() const { return total_; } |
138 | 139 |
139 // Allocation data iteration callback: gets passed object pointer and | 140 // Allocation data iteration callback: gets passed object pointer and |
140 // fully-filled AllocInfo. | 141 // fully-filled AllocInfo. |
141 typedef void (*AllocIterator)(const void* ptr, const AllocInfo& info); | 142 typedef void (*AllocIterator)(const void* ptr, const AllocInfo& info); |
142 | 143 |
143 // Iterate over the allocation profile data calling "callback" | 144 // Iterate over the allocation profile data calling "callback" |
144 // for every allocation. | 145 // for every allocation. |
145 void IterateAllocs(AllocIterator callback) const { | 146 void IterateAllocs(AllocIterator callback) const { |
146 allocation_->Iterate(MapArgsAllocIterator, callback); | 147 alloc_address_map_->Iterate(MapArgsAllocIterator, callback); |
147 } | 148 } |
148 | 149 |
149 // Allocation context profile data iteration callback | 150 // Allocation context profile data iteration callback |
150 typedef void (*AllocContextIterator)(const AllocContextInfo& info); | 151 typedef void (*AllocContextIterator)(const AllocContextInfo& info); |
151 | 152 |
152 // Iterate over the allocation context profile data calling "callback" | 153 // Iterate over the allocation context profile data calling "callback" |
153 // for every allocation context. Allocation contexts are ordered by the | 154 // for every allocation context. Allocation contexts are ordered by the |
154 // size of allocated space. | 155 // size of allocated space. |
155 void IterateOrderedAllocContexts(AllocContextIterator callback) const; | 156 void IterateOrderedAllocContexts(AllocContextIterator callback) const; |
156 | 157 |
(...skipping 17 matching lines...) Expand all Loading... |
174 // Release a previously taken snapshot. snapshot must not | 175 // Release a previously taken snapshot. snapshot must not |
175 // be used after this call. | 176 // be used after this call. |
176 void ReleaseSnapshot(Snapshot* snapshot); | 177 void ReleaseSnapshot(Snapshot* snapshot); |
177 | 178 |
178 // Return a snapshot of every non-live, non-ignored object in *this. | 179 // Return a snapshot of every non-live, non-ignored object in *this. |
179 // If "base" is non-NULL, skip any objects present in "base". | 180 // If "base" is non-NULL, skip any objects present in "base". |
180 // As a side-effect, clears the "live" bit on every live object in *this. | 181 // As a side-effect, clears the "live" bit on every live object in *this. |
181 // Caller must call ReleaseSnapshot() on result when no longer needed. | 182 // Caller must call ReleaseSnapshot() on result when no longer needed. |
182 Snapshot* NonLiveSnapshot(Snapshot* base); | 183 Snapshot* NonLiveSnapshot(Snapshot* base); |
183 | 184 |
| 185 // Refresh the internal mmap information from MemoryRegionMap. Results of |
| 186 // FillOrderedProfile and IterateOrderedAllocContexts will contain mmap'ed |
| 187 // memory regions as at calling RefreshMMapData. |
| 188 void RefreshMMapData(); |
| 189 |
| 190 // Clear the internal mmap information. Results of FillOrderedProfile and |
| 191 // IterateOrderedAllocContexts won't contain mmap'ed memory regions after |
| 192 // calling ClearMMapData. |
| 193 void ClearMMapData(); |
| 194 |
184 private: | 195 private: |
185 | 196 |
186 // data types ---------------------------- | 197 // data types ---------------------------- |
187 | 198 |
188 // Hash table bucket to hold (de)allocation stats | 199 // Hash table bucket to hold (de)allocation stats |
189 // for a given allocation call stack trace. | 200 // for a given allocation call stack trace. |
190 struct Bucket : public Stats { | 201 struct Bucket : public Stats { |
191 uintptr_t hash; // Hash value of the stack trace | 202 uintptr_t hash; // Hash value of the stack trace |
192 int depth; // Depth of stack trace | 203 int depth; // Depth of stack trace |
193 const void** stack; // Stack trace | 204 const void** stack; // Stack trace |
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
251 // counting bucket b. | 262 // counting bucket b. |
252 // | 263 // |
253 // "extra" is appended to the unparsed bucket. Typically it is empty, | 264 // "extra" is appended to the unparsed bucket. Typically it is empty, |
254 // but may be set to something like " heapprofile" for the total | 265 // but may be set to something like " heapprofile" for the total |
255 // bucket to indicate the type of the profile. | 266 // bucket to indicate the type of the profile. |
256 static int UnparseBucket(const Bucket& b, | 267 static int UnparseBucket(const Bucket& b, |
257 char* buf, int buflen, int bufsize, | 268 char* buf, int buflen, int bufsize, |
258 const char* extra, | 269 const char* extra, |
259 Stats* profile_stats); | 270 Stats* profile_stats); |
260 | 271 |
261 // Get the bucket for the caller stack trace 'key' of depth 'depth' | 272 // Deallocate a given allocation map. |
262 // creating the bucket if needed. | 273 void DeallocateAllocationMap(AllocationMap* allocation); |
263 Bucket* GetBucket(int depth, const void* const key[]); | 274 |
| 275 // Deallocate a given bucket table. |
| 276 void DeallocateBucketTable(Bucket** table); |
| 277 |
| 278 // Get the bucket for the caller stack trace 'key' of depth 'depth' from a |
| 279 // bucket hash map 'table' creating the bucket if needed. '*bucket_count' |
| 280 // is incremented both when 'bucket_count' is not NULL and when a new |
| 281 // bucket object is created. |
| 282 Bucket* GetBucket(int depth, const void* const key[], Bucket** table, |
| 283 int* bucket_count); |
264 | 284 |
265 // Helper for IterateAllocs to do callback signature conversion | 285 // Helper for IterateAllocs to do callback signature conversion |
266 // from AllocationMap::Iterate to AllocIterator. | 286 // from AllocationMap::Iterate to AllocIterator. |
267 static void MapArgsAllocIterator(const void* ptr, AllocValue* v, | 287 static void MapArgsAllocIterator(const void* ptr, AllocValue* v, |
268 AllocIterator callback) { | 288 AllocIterator callback) { |
269 AllocInfo info; | 289 AllocInfo info; |
270 info.object_size = v->bytes; | 290 info.object_size = v->bytes; |
271 info.call_stack = v->bucket()->stack; | 291 info.call_stack = v->bucket()->stack; |
272 info.stack_depth = v->bucket()->depth; | 292 info.stack_depth = v->bucket()->depth; |
273 info.live = v->live(); | 293 info.live = v->live(); |
274 info.ignored = v->ignore(); | 294 info.ignored = v->ignore(); |
275 callback(ptr, info); | 295 callback(ptr, info); |
276 } | 296 } |
277 | 297 |
278 // Helper for DumpNonLiveProfile to do object-granularity | 298 // Helper for DumpNonLiveProfile to do object-granularity |
279 // heap profile dumping. It gets passed to AllocationMap::Iterate. | 299 // heap profile dumping. It gets passed to AllocationMap::Iterate. |
280 inline static void DumpNonLiveIterator(const void* ptr, AllocValue* v, | 300 inline static void DumpNonLiveIterator(const void* ptr, AllocValue* v, |
281 const DumpArgs& args); | 301 const DumpArgs& args); |
282 | 302 |
| 303 // Helper for filling size variables in buckets by zero. |
| 304 inline static void ZeroBucketCountsIterator( |
| 305 const void* ptr, AllocValue* v, HeapProfileTable* heap_profile); |
| 306 |
283 // Helper for IterateOrderedAllocContexts and FillOrderedProfile. | 307 // Helper for IterateOrderedAllocContexts and FillOrderedProfile. |
284 // Creates a sorted list of Buckets whose length is num_buckets_. | 308 // Creates a sorted list of Buckets whose length is num_alloc_buckets_ + |
285 // The caller is responsible for dellocating the returned list. | 309 // num_avaliable_mmap_buckets_. |
| 310 // The caller is responsible for deallocating the returned list. |
286 Bucket** MakeSortedBucketList() const; | 311 Bucket** MakeSortedBucketList() const; |
287 | 312 |
288 // Helper for TakeSnapshot. Saves object to snapshot. | 313 // Helper for TakeSnapshot. Saves object to snapshot. |
289 static void AddToSnapshot(const void* ptr, AllocValue* v, Snapshot* s); | 314 static void AddToSnapshot(const void* ptr, AllocValue* v, Snapshot* s); |
290 | 315 |
291 // Arguments passed to AddIfNonLive | 316 // Arguments passed to AddIfNonLive |
292 struct AddNonLiveArgs { | 317 struct AddNonLiveArgs { |
293 Snapshot* dest; | 318 Snapshot* dest; |
294 Snapshot* base; | 319 Snapshot* base; |
295 }; | 320 }; |
(...skipping 11 matching lines...) Expand all Loading... |
307 AllocationMap* allocations); | 332 AllocationMap* allocations); |
308 | 333 |
309 // data ---------------------------- | 334 // data ---------------------------- |
310 | 335 |
311 // Memory (de)allocator that we use. | 336 // Memory (de)allocator that we use. |
312 Allocator alloc_; | 337 Allocator alloc_; |
313 DeAllocator dealloc_; | 338 DeAllocator dealloc_; |
314 | 339 |
315 // Overall profile stats; we use only the Stats part, | 340 // Overall profile stats; we use only the Stats part, |
316 // but make it a Bucket to pass to UnparseBucket. | 341 // but make it a Bucket to pass to UnparseBucket. |
| 342 // It doesn't contain mmap'ed regions. |
317 Bucket total_; | 343 Bucket total_; |
318 | 344 |
319 // Bucket hash table. | 345 // Bucket hash table for malloc. |
320 // We hand-craft one instead of using one of the pre-written | 346 // We hand-craft one instead of using one of the pre-written |
321 // ones because we do not want to use malloc when operating on the table. | 347 // ones because we do not want to use malloc when operating on the table. |
322 // It is only few lines of code, so no big deal. | 348 // It is only few lines of code, so no big deal. |
323 Bucket** table_; | 349 Bucket** alloc_table_; |
324 int num_buckets_; | 350 int num_alloc_buckets_; |
325 | 351 |
326 // Map of all currently allocated objects we know about. | 352 // Bucket hash table for mmap. |
327 AllocationMap* allocation_; | 353 // This table is filled with the information from MemoryRegionMap by calling |
| 354 // RefreshMMapData. |
| 355 Bucket** mmap_table_; |
| 356 int num_available_mmap_buckets_; |
| 357 |
| 358 // Map of all currently allocated objects and mapped regions we know about. |
| 359 AllocationMap* alloc_address_map_; |
| 360 AllocationMap* mmap_address_map_; |
328 | 361 |
329 DISALLOW_COPY_AND_ASSIGN(HeapProfileTable); | 362 DISALLOW_COPY_AND_ASSIGN(HeapProfileTable); |
330 }; | 363 }; |
331 | 364 |
332 class HeapProfileTable::Snapshot { | 365 class HeapProfileTable::Snapshot { |
333 public: | 366 public: |
334 const Stats& total() const { return total_; } | 367 const Stats& total() const { return total_; } |
335 | 368 |
336 // Report anything in this snapshot as a leak. | 369 // Report anything in this snapshot as a leak. |
337 // May use new/delete for temporary storage. | 370 // May use new/delete for temporary storage. |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
375 // Helpers for sorting and generating leak reports | 408 // Helpers for sorting and generating leak reports |
376 struct Entry; | 409 struct Entry; |
377 struct ReportState; | 410 struct ReportState; |
378 static void ReportCallback(const void* ptr, AllocValue* v, ReportState*); | 411 static void ReportCallback(const void* ptr, AllocValue* v, ReportState*); |
379 static void ReportObject(const void* ptr, AllocValue* v, char*); | 412 static void ReportObject(const void* ptr, AllocValue* v, char*); |
380 | 413 |
381 DISALLOW_COPY_AND_ASSIGN(Snapshot); | 414 DISALLOW_COPY_AND_ASSIGN(Snapshot); |
382 }; | 415 }; |
383 | 416 |
384 #endif // BASE_HEAP_PROFILE_TABLE_H_ | 417 #endif // BASE_HEAP_PROFILE_TABLE_H_ |
OLD | NEW |