Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(144)

Side by Side Diff: third_party/tcmalloc/chromium/src/memory_region_map.h

Issue 12388070: Count m(un)map for each stacktrace in MemoryRegionMap instead of HeapProfileTable. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: nit fix Created 7 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 /* Copyright (c) 2006, Google Inc. 1 /* Copyright (c) 2006, Google Inc.
2 * All rights reserved. 2 * All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 27 matching lines...) Expand all
38 38
39 #ifdef HAVE_PTHREAD 39 #ifdef HAVE_PTHREAD
40 #include <pthread.h> 40 #include <pthread.h>
41 #endif 41 #endif
42 #include <stddef.h> 42 #include <stddef.h>
43 #include <set> 43 #include <set>
44 #include "base/stl_allocator.h" 44 #include "base/stl_allocator.h"
45 #include "base/spinlock.h" 45 #include "base/spinlock.h"
46 #include "base/thread_annotations.h" 46 #include "base/thread_annotations.h"
47 #include "base/low_level_alloc.h" 47 #include "base/low_level_alloc.h"
48 #include "heap-profile-stats.h"
48 49
49 // TODO(maxim): add a unittest: 50 // TODO(maxim): add a unittest:
50 // execute a bunch of mmaps and compare memory map what strace logs 51 // execute a bunch of mmaps and compare memory map what strace logs
51 // execute a bunch of mmap/munmup and compare memory map with 52 // execute a bunch of mmap/munmup and compare memory map with
52 // own accounting of what those mmaps generated 53 // own accounting of what those mmaps generated
53 54
54 // Thread-safe class to collect and query the map of all memory regions 55 // Thread-safe class to collect and query the map of all memory regions
55 // in a process that have been created with mmap, munmap, mremap, sbrk. 56 // in a process that have been created with mmap, munmap, mremap, sbrk.
56 // For each memory region, we keep track of (and provide to users) 57 // For each memory region, we keep track of (and provide to users)
57 // the stack trace that allocated that memory region. 58 // the stack trace that allocated that memory region.
58 // The recorded stack trace depth is bounded by 59 // The recorded stack trace depth is bounded by
59 // a user-supplied max_stack_depth parameter of Init(). 60 // a user-supplied max_stack_depth parameter of Init().
60 // After initialization with Init() 61 // After initialization with Init()
61 // (which can happened even before global object constructor execution) 62 // (which can happened even before global object constructor execution)
62 // we collect the map by installing and monitoring MallocHook-s 63 // we collect the map by installing and monitoring MallocHook-s
63 // to mmap, munmap, mremap, sbrk. 64 // to mmap, munmap, mremap, sbrk.
64 // At any time one can query this map via provided interface. 65 // At any time one can query this map via provided interface.
65 // For more details on the design of MemoryRegionMap 66 // For more details on the design of MemoryRegionMap
66 // see the comment at the top of our .cc file. 67 // see the comment at the top of our .cc file.
67 class MemoryRegionMap { 68 class MemoryRegionMap {
68 private: 69 private:
69 // Max call stack recording depth supported by Init(). Set it to be 70 // Max call stack recording depth supported by Init(). Set it to be
70 // high enough for all our clients. Note: we do not define storage 71 // high enough for all our clients. Note: we do not define storage
71 // for this (doing that requires special handling in windows), so 72 // for this (doing that requires special handling in windows), so
72 // don't take the address of it! 73 // don't take the address of it!
73 static const int kMaxStackDepth = 32; 74 static const int kMaxStackDepth = 32;
74 75
76 static const int kHashTableSize = 179999;
77
75 public: 78 public:
76 // interface ================================================================ 79 // interface ================================================================
77 80
78 // Every client of MemoryRegionMap must call Init() before first use, 81 // Every client of MemoryRegionMap must call Init() before first use,
79 // and Shutdown() after last use. This allows us to reference count 82 // and Shutdown() after last use. This allows us to reference count
80 // this (singleton) class properly. MemoryRegionMap assumes it's the 83 // this (singleton) class properly. MemoryRegionMap assumes it's the
81 // only client of MallocHooks, so a client can only register other 84 // only client of MallocHooks, so a client can only register other
82 // MallocHooks after calling Init() and must unregister them before 85 // MallocHooks after calling Init() and must unregister them before
83 // calling Shutdown(). 86 // calling Shutdown().
84 87
85 // Initialize this module to record memory allocation stack traces. 88 // Initialize this module to record memory allocation stack traces.
86 // Stack traces that have more than "max_stack_depth" frames 89 // Stack traces that have more than "max_stack_depth" frames
87 // are automatically shrunk to "max_stack_depth" when they are recorded. 90 // are automatically shrunk to "max_stack_depth" when they are recorded.
88 // Init() can be called more than once w/o harm, largest max_stack_depth 91 // Init() can be called more than once w/o harm, largest max_stack_depth
89 // will be the effective one. 92 // will be the effective one.
90 // It will install mmap, munmap, mremap, sbrk hooks 93 // It will install mmap, munmap, mremap, sbrk hooks
91 // and initialize arena_ and our hook and locks, hence one can use 94 // and initialize arena_ and our hook and locks, hence one can use
92 // MemoryRegionMap::Lock()/Unlock() to manage the locks. 95 // MemoryRegionMap::Lock()/Unlock() to manage the locks.
93 // Uses Lock/Unlock inside. 96 // Uses Lock/Unlock inside.
94 static void Init(int max_stack_depth); 97 static void Init(int max_stack_depth, bool use_buckets);
95 98
96 // Try to shutdown this module undoing what Init() did. 99 // Try to shutdown this module undoing what Init() did.
97 // Returns true iff could do full shutdown (or it was not attempted). 100 // Returns true iff could do full shutdown (or it was not attempted).
98 // Full shutdown is attempted when the number of Shutdown() calls equals 101 // Full shutdown is attempted when the number of Shutdown() calls equals
99 // the number of Init() calls. 102 // the number of Init() calls.
100 static bool Shutdown(); 103 static bool Shutdown();
101 104
105 static bool IsWorking();
106
102 // Locks to protect our internal data structures. 107 // Locks to protect our internal data structures.
103 // These also protect use of arena_ if our Init() has been done. 108 // These also protect use of arena_ if our Init() has been done.
104 // The lock is recursive. 109 // The lock is recursive.
105 static void Lock() EXCLUSIVE_LOCK_FUNCTION(lock_); 110 static void Lock() EXCLUSIVE_LOCK_FUNCTION(lock_);
106 static void Unlock() UNLOCK_FUNCTION(lock_); 111 static void Unlock() UNLOCK_FUNCTION(lock_);
107 112
108 // Returns true when the lock is held by this thread (for use in RAW_CHECK-s). 113 // Returns true when the lock is held by this thread (for use in RAW_CHECK-s).
109 static bool LockIsHeld(); 114 static bool LockIsHeld();
110 115
111 // Locker object that acquires the MemoryRegionMap::Lock 116 // Locker object that acquires the MemoryRegionMap::Lock
112 // for the duration of its lifetime (a C++ scope). 117 // for the duration of its lifetime (a C++ scope).
113 class LockHolder { 118 class LockHolder {
114 public: 119 public:
115 LockHolder() { Lock(); } 120 LockHolder() { Lock(); }
116 ~LockHolder() { Unlock(); } 121 ~LockHolder() { Unlock(); }
117 private: 122 private:
118 DISALLOW_COPY_AND_ASSIGN(LockHolder); 123 DISALLOW_COPY_AND_ASSIGN(LockHolder);
119 }; 124 };
120 125
126 // Profile stats.
127 typedef HeapProfileStats::Stats Stats;
128 typedef HeapProfileStats::Bucket Bucket;
129
121 // A memory region that we know about through malloc_hook-s. 130 // A memory region that we know about through malloc_hook-s.
122 // This is essentially an interface through which MemoryRegionMap 131 // This is essentially an interface through which MemoryRegionMap
123 // exports the collected data to its clients. Thread-compatible. 132 // exports the collected data to its clients. Thread-compatible.
124 struct Region { 133 struct Region {
125 uintptr_t start_addr; // region start address 134 uintptr_t start_addr; // region start address
126 uintptr_t end_addr; // region end address 135 uintptr_t end_addr; // region end address
127 int call_stack_depth; // number of caller stack frames that we saved 136 int call_stack_depth; // number of caller stack frames that we saved
128 const void* call_stack[kMaxStackDepth]; // caller address stack array 137 const void* call_stack[kMaxStackDepth]; // caller address stack array
129 // filled to call_stack_depth size 138 // filled to call_stack_depth size
130 bool is_stack; // does this region contain a thread's stack: 139 bool is_stack; // does this region contain a thread's stack:
(...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after
207 // Returns success. Uses Lock/Unlock inside. 216 // Returns success. Uses Lock/Unlock inside.
208 static bool FindRegion(uintptr_t addr, Region* result); 217 static bool FindRegion(uintptr_t addr, Region* result);
209 218
210 // Find the region that contains stack_top, mark that region as 219 // Find the region that contains stack_top, mark that region as
211 // a stack region, and write its data into *result if found, 220 // a stack region, and write its data into *result if found,
212 // in which case *result gets filled so that it stays fully functional 221 // in which case *result gets filled so that it stays fully functional
213 // even when the underlying region gets removed from MemoryRegionMap. 222 // even when the underlying region gets removed from MemoryRegionMap.
214 // Returns success. Uses Lock/Unlock inside. 223 // Returns success. Uses Lock/Unlock inside.
215 static bool FindAndMarkStackRegion(uintptr_t stack_top, Region* result); 224 static bool FindAndMarkStackRegion(uintptr_t stack_top, Region* result);
216 225
226 template<class Type>
227 static void IterateBuckets(void (*callback)(const Bucket*, Type), Type arg);
228
229 static Bucket* GetBucket(int depth, const void* const key[]);
230
217 private: // our internal types ============================================== 231 private: // our internal types ==============================================
218 232
219 // Region comparator for sorting with STL 233 // Region comparator for sorting with STL
220 struct RegionCmp { 234 struct RegionCmp {
221 bool operator()(const Region& x, const Region& y) const { 235 bool operator()(const Region& x, const Region& y) const {
222 return x.end_addr < y.end_addr; 236 return x.end_addr < y.end_addr;
223 } 237 }
224 }; 238 };
225 239
226 // We allocate STL objects in our own arena. 240 // We allocate STL objects in our own arena.
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after
288 // Recursion count for the recursive lock. 302 // Recursion count for the recursive lock.
289 static int recursion_count_; 303 static int recursion_count_;
290 // The thread id of the thread that's inside the recursive lock. 304 // The thread id of the thread that's inside the recursive lock.
291 static pthread_t lock_owner_tid_; 305 static pthread_t lock_owner_tid_;
292 306
293 // Total size of all mapped pages so far 307 // Total size of all mapped pages so far
294 static int64 map_size_; 308 static int64 map_size_;
295 // Total size of all unmapped pages so far 309 // Total size of all unmapped pages so far
296 static int64 unmap_size_; 310 static int64 unmap_size_;
297 311
312 // Bucket hash table.
313 static Bucket** bucket_table_;
314 static int num_buckets_;
315
316 // Number of unprocessed bucket inserts.
317 static int saved_buckets_count_;
318
319 // Unprocessed inserts (must be big enough to hold all mmaps that can be
320 // caused by a GetBucket call).
321 // Bucket has no constructor, so that c-tor execution does not interfere
322 // with the any-time use of the static memory behind saved_buckets.
323 static Bucket saved_buckets_[20];
324
325 static const void* saved_buckets_keys_[20][kMaxStackDepth];
326
298 // helpers ================================================================== 327 // helpers ==================================================================
299 328
300 // Helper for FindRegion and FindAndMarkStackRegion: 329 // Helper for FindRegion and FindAndMarkStackRegion:
301 // returns the region covering 'addr' or NULL; assumes our lock_ is held. 330 // returns the region covering 'addr' or NULL; assumes our lock_ is held.
302 static const Region* DoFindRegionLocked(uintptr_t addr); 331 static const Region* DoFindRegionLocked(uintptr_t addr);
303 332
304 // Verifying wrapper around regions_->insert(region) 333 // Verifying wrapper around regions_->insert(region)
305 // To be called to do InsertRegionLocked's work only! 334 // To be called to do InsertRegionLocked's work only!
306 inline static void DoInsertRegionLocked(const Region& region); 335 inline static void DoInsertRegionLocked(const Region& region);
307 // Handle regions saved by InsertRegionLocked into a tmp static array 336 // Handle regions saved by InsertRegionLocked into a tmp static array
308 // by calling insert_func on them. 337 // by calling insert_func on them.
309 inline static void HandleSavedRegionsLocked( 338 inline static void HandleSavedRegionsLocked(
310 void (*insert_func)(const Region& region)); 339 void (*insert_func)(const Region& region));
340
341 inline static void HandleSavedBucketsLocked();
342
311 // Wrapper around DoInsertRegionLocked 343 // Wrapper around DoInsertRegionLocked
312 // that handles the case of recursive allocator calls. 344 // that handles the case of recursive allocator calls.
313 inline static void InsertRegionLocked(const Region& region); 345 inline static void InsertRegionLocked(const Region& region);
314 346
315 // Record addition of a memory region at address "start" of size "size" 347 // Record addition of a memory region at address "start" of size "size"
316 // (called from our mmap/mremap/sbrk hooks). 348 // (called from our mmap/mremap/sbrk hooks).
317 static void RecordRegionAddition(const void* start, size_t size); 349 static void RecordRegionAddition(const void* start, size_t size);
318 // Record deletion of a memory region at address "start" of size "size" 350 // Record deletion of a memory region at address "start" of size "size"
319 // (called from our munmap/mremap/sbrk hooks). 351 // (called from our munmap/mremap/sbrk hooks).
320 static void RecordRegionRemoval(const void* start, size_t size); 352 static void RecordRegionRemoval(const void* start, size_t size);
321 353
322 // Hooks for MallocHook 354 // Hooks for MallocHook
323 static void MmapHook(const void* result, 355 static void MmapHook(const void* result,
324 const void* start, size_t size, 356 const void* start, size_t size,
325 int prot, int flags, 357 int prot, int flags,
326 int fd, off_t offset); 358 int fd, off_t offset);
327 static void MunmapHook(const void* ptr, size_t size); 359 static void MunmapHook(const void* ptr, size_t size);
328 static void MremapHook(const void* result, const void* old_addr, 360 static void MremapHook(const void* result, const void* old_addr,
329 size_t old_size, size_t new_size, int flags, 361 size_t old_size, size_t new_size, int flags,
330 const void* new_addr); 362 const void* new_addr);
331 static void SbrkHook(const void* result, ptrdiff_t increment); 363 static void SbrkHook(const void* result, ptrdiff_t increment);
332 364
333 // Log all memory regions; Useful for debugging only. 365 // Log all memory regions; Useful for debugging only.
334 // Assumes Lock() is held 366 // Assumes Lock() is held
335 static void LogAllLocked(); 367 static void LogAllLocked();
336 368
337 DISALLOW_COPY_AND_ASSIGN(MemoryRegionMap); 369 DISALLOW_COPY_AND_ASSIGN(MemoryRegionMap);
338 }; 370 };
339 371
372 template <class Type>
373 void MemoryRegionMap::IterateBuckets(
374 void (*callback)(const Bucket*, Type), Type arg) {
375 for (int b = 0; b < kHashTableSize; b++) {
376 for (Bucket* x = bucket_table_[b]; x != 0; x = x->next) {
Alexander Potapenko 2013/03/07 06:08:25 x != NULL this is.
Dai Mikurube (NOT FULLTIME) 2013/03/07 12:32:16 Done.
377 callback(x, arg);
378 }
379 }
380 }
381
340 #endif // BASE_MEMORY_REGION_MAP_H_ 382 #endif // BASE_MEMORY_REGION_MAP_H_
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698