| OLD | NEW |
| 1 /* Copyright (c) 2006, Google Inc. | 1 /* Copyright (c) 2006, Google Inc. |
| 2 * All rights reserved. | 2 * All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 27 matching lines...) Expand all Loading... |
| 38 | 38 |
| 39 #ifdef HAVE_PTHREAD | 39 #ifdef HAVE_PTHREAD |
| 40 #include <pthread.h> | 40 #include <pthread.h> |
| 41 #endif | 41 #endif |
| 42 #include <stddef.h> | 42 #include <stddef.h> |
| 43 #include <set> | 43 #include <set> |
| 44 #include "base/stl_allocator.h" | 44 #include "base/stl_allocator.h" |
| 45 #include "base/spinlock.h" | 45 #include "base/spinlock.h" |
| 46 #include "base/thread_annotations.h" | 46 #include "base/thread_annotations.h" |
| 47 #include "base/low_level_alloc.h" | 47 #include "base/low_level_alloc.h" |
| 48 #include "heap-profile-stats.h" |
| 48 | 49 |
| 49 // TODO(maxim): add a unittest: | 50 // TODO(maxim): add a unittest: |
| 50 // execute a bunch of mmaps and compare memory map what strace logs | 51 // execute a bunch of mmaps and compare memory map what strace logs |
| 51 // execute a bunch of mmap/munmup and compare memory map with | 52 // execute a bunch of mmap/munmup and compare memory map with |
| 52 // own accounting of what those mmaps generated | 53 // own accounting of what those mmaps generated |
| 53 | 54 |
| 54 // Thread-safe class to collect and query the map of all memory regions | 55 // Thread-safe class to collect and query the map of all memory regions |
| 55 // in a process that have been created with mmap, munmap, mremap, sbrk. | 56 // in a process that have been created with mmap, munmap, mremap, sbrk. |
| 56 // For each memory region, we keep track of (and provide to users) | 57 // For each memory region, we keep track of (and provide to users) |
| 57 // the stack trace that allocated that memory region. | 58 // the stack trace that allocated that memory region. |
| 58 // The recorded stack trace depth is bounded by | 59 // The recorded stack trace depth is bounded by |
| 59 // a user-supplied max_stack_depth parameter of Init(). | 60 // a user-supplied max_stack_depth parameter of Init(). |
| 60 // After initialization with Init() | 61 // After initialization with Init() |
| 61 // (which can happened even before global object constructor execution) | 62 // (which can happened even before global object constructor execution) |
| 62 // we collect the map by installing and monitoring MallocHook-s | 63 // we collect the map by installing and monitoring MallocHook-s |
| 63 // to mmap, munmap, mremap, sbrk. | 64 // to mmap, munmap, mremap, sbrk. |
| 64 // At any time one can query this map via provided interface. | 65 // At any time one can query this map via provided interface. |
| 65 // For more details on the design of MemoryRegionMap | 66 // For more details on the design of MemoryRegionMap |
| 66 // see the comment at the top of our .cc file. | 67 // see the comment at the top of our .cc file. |
| 67 class MemoryRegionMap { | 68 class MemoryRegionMap { |
| 68 private: | 69 private: |
| 69 // Max call stack recording depth supported by Init(). Set it to be | 70 // Max call stack recording depth supported by Init(). Set it to be |
| 70 // high enough for all our clients. Note: we do not define storage | 71 // high enough for all our clients. Note: we do not define storage |
| 71 // for this (doing that requires special handling in windows), so | 72 // for this (doing that requires special handling in windows), so |
| 72 // don't take the address of it! | 73 // don't take the address of it! |
| 73 static const int kMaxStackDepth = 32; | 74 static const int kMaxStackDepth = 32; |
| 74 | 75 |
| 76 // Size of the hash table of buckets. A structure of the bucket table is |
| 77 // described in heap-profile-stats.h. |
| 78 static const int kHashTableSize = 179999; |
| 79 |
| 75 public: | 80 public: |
| 76 // interface ================================================================ | 81 // interface ================================================================ |
| 77 | 82 |
| 78 // Every client of MemoryRegionMap must call Init() before first use, | 83 // Every client of MemoryRegionMap must call Init() before first use, |
| 79 // and Shutdown() after last use. This allows us to reference count | 84 // and Shutdown() after last use. This allows us to reference count |
| 80 // this (singleton) class properly. MemoryRegionMap assumes it's the | 85 // this (singleton) class properly. MemoryRegionMap assumes it's the |
| 81 // only client of MallocHooks, so a client can only register other | 86 // only client of MallocHooks, so a client can only register other |
| 82 // MallocHooks after calling Init() and must unregister them before | 87 // MallocHooks after calling Init() and must unregister them before |
| 83 // calling Shutdown(). | 88 // calling Shutdown(). |
| 84 | 89 |
| 85 // Initialize this module to record memory allocation stack traces. | 90 // Initialize this module to record memory allocation stack traces. |
| 86 // Stack traces that have more than "max_stack_depth" frames | 91 // Stack traces that have more than "max_stack_depth" frames |
| 87 // are automatically shrunk to "max_stack_depth" when they are recorded. | 92 // are automatically shrunk to "max_stack_depth" when they are recorded. |
| 88 // Init() can be called more than once w/o harm, largest max_stack_depth | 93 // Init() can be called more than once w/o harm, largest max_stack_depth |
| 89 // will be the effective one. | 94 // will be the effective one. |
| 95 // When "use_buckets" is true, then counts of mmap and munmap sizes will be |
| 96 // recorded with each stack trace. If Init() is called more than once, then |
| 97 // counting will be effective after any call contained "use_buckets" of true. |
| 90 // It will install mmap, munmap, mremap, sbrk hooks | 98 // It will install mmap, munmap, mremap, sbrk hooks |
| 91 // and initialize arena_ and our hook and locks, hence one can use | 99 // and initialize arena_ and our hook and locks, hence one can use |
| 92 // MemoryRegionMap::Lock()/Unlock() to manage the locks. | 100 // MemoryRegionMap::Lock()/Unlock() to manage the locks. |
| 93 // Uses Lock/Unlock inside. | 101 // Uses Lock/Unlock inside. |
| 94 static void Init(int max_stack_depth); | 102 static void Init(int max_stack_depth, bool use_buckets); |
| 95 | 103 |
| 96 // Try to shutdown this module undoing what Init() did. | 104 // Try to shutdown this module undoing what Init() did. |
| 97 // Returns true iff could do full shutdown (or it was not attempted). | 105 // Returns true iff could do full shutdown (or it was not attempted). |
| 98 // Full shutdown is attempted when the number of Shutdown() calls equals | 106 // Full shutdown is attempted when the number of Shutdown() calls equals |
| 99 // the number of Init() calls. | 107 // the number of Init() calls. |
| 100 static bool Shutdown(); | 108 static bool Shutdown(); |
| 101 | 109 |
| 110 // Return true if MemoryRegionMap is initialized and recording, i.e. when |
| 111 // then number of Init() calls are more than the number of Shutdown() calls. |
| 112 static bool IsRecordingLocked(); |
| 113 |
| 102 // Locks to protect our internal data structures. | 114 // Locks to protect our internal data structures. |
| 103 // These also protect use of arena_ if our Init() has been done. | 115 // These also protect use of arena_ if our Init() has been done. |
| 104 // The lock is recursive. | 116 // The lock is recursive. |
| 105 static void Lock() EXCLUSIVE_LOCK_FUNCTION(lock_); | 117 static void Lock() EXCLUSIVE_LOCK_FUNCTION(lock_); |
| 106 static void Unlock() UNLOCK_FUNCTION(lock_); | 118 static void Unlock() UNLOCK_FUNCTION(lock_); |
| 107 | 119 |
| 108 // Returns true when the lock is held by this thread (for use in RAW_CHECK-s). | 120 // Returns true when the lock is held by this thread (for use in RAW_CHECK-s). |
| 109 static bool LockIsHeld(); | 121 static bool LockIsHeld(); |
| 110 | 122 |
| 111 // Locker object that acquires the MemoryRegionMap::Lock | 123 // Locker object that acquires the MemoryRegionMap::Lock |
| (...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 207 // Returns success. Uses Lock/Unlock inside. | 219 // Returns success. Uses Lock/Unlock inside. |
| 208 static bool FindRegion(uintptr_t addr, Region* result); | 220 static bool FindRegion(uintptr_t addr, Region* result); |
| 209 | 221 |
| 210 // Find the region that contains stack_top, mark that region as | 222 // Find the region that contains stack_top, mark that region as |
| 211 // a stack region, and write its data into *result if found, | 223 // a stack region, and write its data into *result if found, |
| 212 // in which case *result gets filled so that it stays fully functional | 224 // in which case *result gets filled so that it stays fully functional |
| 213 // even when the underlying region gets removed from MemoryRegionMap. | 225 // even when the underlying region gets removed from MemoryRegionMap. |
| 214 // Returns success. Uses Lock/Unlock inside. | 226 // Returns success. Uses Lock/Unlock inside. |
| 215 static bool FindAndMarkStackRegion(uintptr_t stack_top, Region* result); | 227 static bool FindAndMarkStackRegion(uintptr_t stack_top, Region* result); |
| 216 | 228 |
| 229 // Iterate over the buckets which store mmap and munmap counts per stack |
| 230 // trace. It calls "callback" for each bucket, and passes "arg" to it. |
| 231 template<class Type> |
| 232 static void IterateBuckets(void (*callback)(const HeapProfileBucket*, Type), |
| 233 Type arg); |
| 234 |
| 235 // Get the bucket whose caller stack trace is "key". The stack trace is |
| 236 // used to a depth of "depth" at most. The requested bucket is created if |
| 237 // needed. |
| 238 // The bucket table is described in heap-profile-stats.h. |
| 239 static HeapProfileBucket* GetBucket(int depth, const void* const key[]); |
| 240 |
| 217 private: // our internal types ============================================== | 241 private: // our internal types ============================================== |
| 218 | 242 |
| 219 // Region comparator for sorting with STL | 243 // Region comparator for sorting with STL |
| 220 struct RegionCmp { | 244 struct RegionCmp { |
| 221 bool operator()(const Region& x, const Region& y) const { | 245 bool operator()(const Region& x, const Region& y) const { |
| 222 return x.end_addr < y.end_addr; | 246 return x.end_addr < y.end_addr; |
| 223 } | 247 } |
| 224 }; | 248 }; |
| 225 | 249 |
| 226 // We allocate STL objects in our own arena. | 250 // We allocate STL objects in our own arena. |
| (...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 273 static LowLevelAlloc::Arena* arena_; | 297 static LowLevelAlloc::Arena* arena_; |
| 274 | 298 |
| 275 // Set of the mmap/sbrk/mremap-ed memory regions | 299 // Set of the mmap/sbrk/mremap-ed memory regions |
| 276 // To be accessed *only* when Lock() is held. | 300 // To be accessed *only* when Lock() is held. |
| 277 // Hence we protect the non-recursive lock used inside of arena_ | 301 // Hence we protect the non-recursive lock used inside of arena_ |
| 278 // with our recursive Lock(). This lets a user prevent deadlocks | 302 // with our recursive Lock(). This lets a user prevent deadlocks |
| 279 // when threads are stopped by ListAllProcessThreads at random spots | 303 // when threads are stopped by ListAllProcessThreads at random spots |
| 280 // simply by acquiring our recursive Lock() before that. | 304 // simply by acquiring our recursive Lock() before that. |
| 281 static RegionSet* regions_; | 305 static RegionSet* regions_; |
| 282 | 306 |
| 283 // Lock to protect regions_ variable and the data behind. | 307 // Lock to protect regions_ and buckets_ variables and the data behind. |
| 284 static SpinLock lock_; | 308 static SpinLock lock_; |
| 285 // Lock to protect the recursive lock itself. | 309 // Lock to protect the recursive lock itself. |
| 286 static SpinLock owner_lock_; | 310 static SpinLock owner_lock_; |
| 287 | 311 |
| 288 // Recursion count for the recursive lock. | 312 // Recursion count for the recursive lock. |
| 289 static int recursion_count_; | 313 static int recursion_count_; |
| 290 // The thread id of the thread that's inside the recursive lock. | 314 // The thread id of the thread that's inside the recursive lock. |
| 291 static pthread_t lock_owner_tid_; | 315 static pthread_t lock_owner_tid_; |
| 292 | 316 |
| 293 // Total size of all mapped pages so far | 317 // Total size of all mapped pages so far |
| 294 static int64 map_size_; | 318 static int64 map_size_; |
| 295 // Total size of all unmapped pages so far | 319 // Total size of all unmapped pages so far |
| 296 static int64 unmap_size_; | 320 static int64 unmap_size_; |
| 297 | 321 |
| 322 // Bucket hash table which is described in heap-profile-stats.h. |
| 323 static HeapProfileBucket** bucket_table_ GUARDED_BY(lock_); |
| 324 static int num_buckets_ GUARDED_BY(lock_); |
| 325 |
| 326 // The following members are local to MemoryRegionMap::GetBucket() |
| 327 // and MemoryRegionMap::HandleSavedBucketsLocked() |
| 328 // and are file-level to ensure that they are initialized at load time. |
| 329 // |
| 330 // These are used as temporary storage to break the infinite cycle of mmap |
| 331 // calling our hook which (sometimes) causes mmap. It must be a static |
| 332 // fixed-size array. The size 20 is just an expected value for safety. |
| 333 // The details are described in memory_region_map.cc. |
| 334 |
| 335 // Number of unprocessed bucket inserts. |
| 336 static int saved_buckets_count_ GUARDED_BY(lock_); |
| 337 |
| 338 // Unprocessed inserts (must be big enough to hold all mmaps that can be |
| 339 // caused by a GetBucket call). |
| 340 // Bucket has no constructor, so that c-tor execution does not interfere |
| 341 // with the any-time use of the static memory behind saved_buckets. |
| 342 static HeapProfileBucket saved_buckets_[20] GUARDED_BY(lock_); |
| 343 |
| 344 static const void* saved_buckets_keys_[20][kMaxStackDepth] GUARDED_BY(lock_); |
| 345 |
| 298 // helpers ================================================================== | 346 // helpers ================================================================== |
| 299 | 347 |
| 300 // Helper for FindRegion and FindAndMarkStackRegion: | 348 // Helper for FindRegion and FindAndMarkStackRegion: |
| 301 // returns the region covering 'addr' or NULL; assumes our lock_ is held. | 349 // returns the region covering 'addr' or NULL; assumes our lock_ is held. |
| 302 static const Region* DoFindRegionLocked(uintptr_t addr); | 350 static const Region* DoFindRegionLocked(uintptr_t addr); |
| 303 | 351 |
| 304 // Verifying wrapper around regions_->insert(region) | 352 // Verifying wrapper around regions_->insert(region) |
| 305 // To be called to do InsertRegionLocked's work only! | 353 // To be called to do InsertRegionLocked's work only! |
| 306 inline static void DoInsertRegionLocked(const Region& region); | 354 inline static void DoInsertRegionLocked(const Region& region); |
| 307 // Handle regions saved by InsertRegionLocked into a tmp static array | 355 // Handle regions saved by InsertRegionLocked into a tmp static array |
| 308 // by calling insert_func on them. | 356 // by calling insert_func on them. |
| 309 inline static void HandleSavedRegionsLocked( | 357 inline static void HandleSavedRegionsLocked( |
| 310 void (*insert_func)(const Region& region)); | 358 void (*insert_func)(const Region& region)); |
| 359 |
| 360 // Restore buckets saved in a tmp static array by GetBucket to the bucket |
| 361 // table where all buckets eventually should be. |
| 362 static void RestoreSavedBucketsLocked(); |
| 363 |
| 311 // Wrapper around DoInsertRegionLocked | 364 // Wrapper around DoInsertRegionLocked |
| 312 // that handles the case of recursive allocator calls. | 365 // that handles the case of recursive allocator calls. |
| 313 inline static void InsertRegionLocked(const Region& region); | 366 inline static void InsertRegionLocked(const Region& region); |
| 314 | 367 |
| 315 // Record addition of a memory region at address "start" of size "size" | 368 // Record addition of a memory region at address "start" of size "size" |
| 316 // (called from our mmap/mremap/sbrk hooks). | 369 // (called from our mmap/mremap/sbrk hooks). |
| 317 static void RecordRegionAddition(const void* start, size_t size); | 370 static void RecordRegionAddition(const void* start, size_t size); |
| 318 // Record deletion of a memory region at address "start" of size "size" | 371 // Record deletion of a memory region at address "start" of size "size" |
| 319 // (called from our munmap/mremap/sbrk hooks). | 372 // (called from our munmap/mremap/sbrk hooks). |
| 320 static void RecordRegionRemoval(const void* start, size_t size); | 373 static void RecordRegionRemoval(const void* start, size_t size); |
| 321 | 374 |
| 375 // Record deletion of a memory region of size "size" in a bucket whose |
| 376 // caller stack trace is "key". The stack trace is used to a depth of |
| 377 // "depth" at most. |
| 378 static void RecordRegionRemovalInBucket(int depth, |
| 379 const void* const key[], |
| 380 size_t size); |
| 381 |
| 322 // Hooks for MallocHook | 382 // Hooks for MallocHook |
| 323 static void MmapHook(const void* result, | 383 static void MmapHook(const void* result, |
| 324 const void* start, size_t size, | 384 const void* start, size_t size, |
| 325 int prot, int flags, | 385 int prot, int flags, |
| 326 int fd, off_t offset); | 386 int fd, off_t offset); |
| 327 static void MunmapHook(const void* ptr, size_t size); | 387 static void MunmapHook(const void* ptr, size_t size); |
| 328 static void MremapHook(const void* result, const void* old_addr, | 388 static void MremapHook(const void* result, const void* old_addr, |
| 329 size_t old_size, size_t new_size, int flags, | 389 size_t old_size, size_t new_size, int flags, |
| 330 const void* new_addr); | 390 const void* new_addr); |
| 331 static void SbrkHook(const void* result, ptrdiff_t increment); | 391 static void SbrkHook(const void* result, ptrdiff_t increment); |
| 332 | 392 |
| 333 // Log all memory regions; Useful for debugging only. | 393 // Log all memory regions; Useful for debugging only. |
| 334 // Assumes Lock() is held | 394 // Assumes Lock() is held |
| 335 static void LogAllLocked(); | 395 static void LogAllLocked(); |
| 336 | 396 |
| 337 DISALLOW_COPY_AND_ASSIGN(MemoryRegionMap); | 397 DISALLOW_COPY_AND_ASSIGN(MemoryRegionMap); |
| 338 }; | 398 }; |
| 339 | 399 |
| 400 template <class Type> |
| 401 void MemoryRegionMap::IterateBuckets( |
| 402 void (*callback)(const HeapProfileBucket*, Type), Type callback_arg) { |
| 403 for (int index = 0; index < kHashTableSize; index++) { |
| 404 for (HeapProfileBucket* bucket = bucket_table_[index]; |
| 405 bucket != NULL; |
| 406 bucket = bucket->next) { |
| 407 callback(bucket, callback_arg); |
| 408 } |
| 409 } |
| 410 } |
| 411 |
| 340 #endif // BASE_MEMORY_REGION_MAP_H_ | 412 #endif // BASE_MEMORY_REGION_MAP_H_ |
| OLD | NEW |