OLD | NEW |
---|---|
1 /* Copyright (c) 2006, Google Inc. | 1 /* Copyright (c) 2006, Google Inc. |
2 * All rights reserved. | 2 * All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
77 // to get memory, thus we are able to call LowLevelAlloc from | 77 // to get memory, thus we are able to call LowLevelAlloc from |
78 // our mmap/sbrk hooks without causing a deadlock in it. | 78 // our mmap/sbrk hooks without causing a deadlock in it. |
79 // For the same reason of deadlock prevention the locking in MemoryRegionMap | 79 // For the same reason of deadlock prevention the locking in MemoryRegionMap |
80 // itself is write-recursive which is an exception to Google's mutex usage. | 80 // itself is write-recursive which is an exception to Google's mutex usage. |
81 // | 81 // |
82 // We still need to break the infinite cycle of mmap calling our hook, | 82 // We still need to break the infinite cycle of mmap calling our hook, |
83 // which asks LowLevelAlloc for memory to record this mmap, | 83 // which asks LowLevelAlloc for memory to record this mmap, |
84 // which (sometimes) causes mmap, which calls our hook, and so on. | 84 // which (sometimes) causes mmap, which calls our hook, and so on. |
85 // We do this as follows: on a recursive call of MemoryRegionMap's | 85 // We do this as follows: on a recursive call of MemoryRegionMap's |
86 // mmap/sbrk/mremap hook we record the data about the allocation in a | 86 // mmap/sbrk/mremap hook we record the data about the allocation in a |
87 // static fixed-sized stack (saved_regions), when the recursion unwinds | 87 // static fixed-sized stack (saved_regions and saved_buckets), when the |
88 // but before returning from the outer hook call we unwind this stack and | 88 // recursion unwinds but before returning from the outer hook call we unwind |
89 // move the data from saved_regions to its permanent place in the RegionSet, | 89 // this stack and move the data from saved_regions and saved_buckets to its |
90 // permanent place in the RegionSet and "bucket_table" respectively, | |
90 // which can cause more allocations and mmap-s and recursion and unwinding, | 91 // which can cause more allocations and mmap-s and recursion and unwinding, |
91 // but the whole process ends eventually due to the fact that for the small | 92 // but the whole process ends eventually due to the fact that for the small |
92 // allocations we are doing LowLevelAlloc reuses one mmap call and parcels out | 93 // allocations we are doing LowLevelAlloc reuses one mmap call and parcels out |
93 // the memory it created to satisfy several of our allocation requests. | 94 // the memory it created to satisfy several of our allocation requests. |
94 // | 95 // |
95 | 96 |
96 // ========================================================================= // | 97 // ========================================================================= // |
97 | 98 |
98 #include <config.h> | 99 #include <config.h> |
99 | 100 |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
140 int MemoryRegionMap::max_stack_depth_ = 0; | 141 int MemoryRegionMap::max_stack_depth_ = 0; |
141 MemoryRegionMap::RegionSet* MemoryRegionMap::regions_ = NULL; | 142 MemoryRegionMap::RegionSet* MemoryRegionMap::regions_ = NULL; |
142 LowLevelAlloc::Arena* MemoryRegionMap::arena_ = NULL; | 143 LowLevelAlloc::Arena* MemoryRegionMap::arena_ = NULL; |
143 SpinLock MemoryRegionMap::lock_(SpinLock::LINKER_INITIALIZED); | 144 SpinLock MemoryRegionMap::lock_(SpinLock::LINKER_INITIALIZED); |
144 SpinLock MemoryRegionMap::owner_lock_( // ACQUIRED_AFTER(lock_) | 145 SpinLock MemoryRegionMap::owner_lock_( // ACQUIRED_AFTER(lock_) |
145 SpinLock::LINKER_INITIALIZED); | 146 SpinLock::LINKER_INITIALIZED); |
146 int MemoryRegionMap::recursion_count_ = 0; // GUARDED_BY(owner_lock_) | 147 int MemoryRegionMap::recursion_count_ = 0; // GUARDED_BY(owner_lock_) |
147 pthread_t MemoryRegionMap::lock_owner_tid_; // GUARDED_BY(owner_lock_) | 148 pthread_t MemoryRegionMap::lock_owner_tid_; // GUARDED_BY(owner_lock_) |
148 int64 MemoryRegionMap::map_size_ = 0; | 149 int64 MemoryRegionMap::map_size_ = 0; |
149 int64 MemoryRegionMap::unmap_size_ = 0; | 150 int64 MemoryRegionMap::unmap_size_ = 0; |
151 HeapProfileBucket** MemoryRegionMap::bucket_table_ = NULL; | |
152 int MemoryRegionMap::num_buckets_ = 0; | |
153 int MemoryRegionMap::saved_buckets_count_ = 0; | |
willchan no longer on Chromium
2013/03/13 21:58:48
Are these variables guarded by any locks? Should w
Dai Mikurube (NOT FULLTIME)
2013/03/14 09:33:47
Yes, they should be accessed between Lock() and Un
| |
154 HeapProfileBucket MemoryRegionMap::saved_buckets_[20]; | |
155 const void* MemoryRegionMap::saved_buckets_keys_[20][kMaxStackDepth]; | |
150 | 156 |
151 // ========================================================================= // | 157 // ========================================================================= // |
152 | 158 |
153 // Simple hook into execution of global object constructors, | 159 // Simple hook into execution of global object constructors, |
154 // so that we do not call pthread_self() when it does not yet work. | 160 // so that we do not call pthread_self() when it does not yet work. |
155 static bool libpthread_initialized = false; | 161 static bool libpthread_initialized = false; |
156 static bool initializer = (libpthread_initialized = true, true); | 162 static bool initializer = (libpthread_initialized = true, true); |
157 | 163 |
158 static inline bool current_thread_is(pthread_t should_be) { | 164 static inline bool current_thread_is(pthread_t should_be) { |
159 // Before main() runs, there's only one thread, so we're always that thread | 165 // Before main() runs, there's only one thread, so we're always that thread |
(...skipping 15 matching lines...) Expand all Loading... | |
175 // We use RegionSetRep with noop c-tor so that global construction | 181 // We use RegionSetRep with noop c-tor so that global construction |
176 // does not interfere. | 182 // does not interfere. |
177 static MemoryRegionMap::RegionSetRep regions_rep; | 183 static MemoryRegionMap::RegionSetRep regions_rep; |
178 | 184 |
179 // ========================================================================= // | 185 // ========================================================================= // |
180 | 186 |
181 // Has InsertRegionLocked been called recursively | 187 // Has InsertRegionLocked been called recursively |
182 // (or rather should we *not* use regions_ to record a hooked mmap). | 188 // (or rather should we *not* use regions_ to record a hooked mmap). |
183 static bool recursive_insert = false; | 189 static bool recursive_insert = false; |
184 | 190 |
185 void MemoryRegionMap::Init(int max_stack_depth) { | 191 void MemoryRegionMap::Init(int max_stack_depth, bool use_buckets) { |
186 RAW_VLOG(10, "MemoryRegionMap Init"); | 192 RAW_VLOG(10, "MemoryRegionMap Init"); |
187 RAW_CHECK(max_stack_depth >= 0, ""); | 193 RAW_CHECK(max_stack_depth >= 0, ""); |
188 // Make sure we don't overflow the memory in region stacks: | 194 // Make sure we don't overflow the memory in region stacks: |
189 RAW_CHECK(max_stack_depth <= kMaxStackDepth, | 195 RAW_CHECK(max_stack_depth <= kMaxStackDepth, |
190 "need to increase kMaxStackDepth?"); | 196 "need to increase kMaxStackDepth?"); |
191 Lock(); | 197 Lock(); |
192 client_count_ += 1; | 198 client_count_ += 1; |
193 max_stack_depth_ = max(max_stack_depth_, max_stack_depth); | 199 max_stack_depth_ = max(max_stack_depth_, max_stack_depth); |
194 if (client_count_ > 1) { | 200 if (client_count_ > 1) { |
195 // not first client: already did initialization-proper | 201 // not first client: already did initialization-proper |
(...skipping 11 matching lines...) Expand all Loading... | |
207 // recursive_insert allows us to buffer info about these mmap calls. | 213 // recursive_insert allows us to buffer info about these mmap calls. |
208 // Note that Init() can be (and is) sometimes called | 214 // Note that Init() can be (and is) sometimes called |
209 // already from within an mmap/sbrk hook. | 215 // already from within an mmap/sbrk hook. |
210 recursive_insert = true; | 216 recursive_insert = true; |
211 arena_ = LowLevelAlloc::NewArena(0, LowLevelAlloc::DefaultArena()); | 217 arena_ = LowLevelAlloc::NewArena(0, LowLevelAlloc::DefaultArena()); |
212 recursive_insert = false; | 218 recursive_insert = false; |
213 HandleSavedRegionsLocked(&InsertRegionLocked); // flush the buffered ones | 219 HandleSavedRegionsLocked(&InsertRegionLocked); // flush the buffered ones |
214 // Can't instead use HandleSavedRegionsLocked(&DoInsertRegionLocked) before | 220 // Can't instead use HandleSavedRegionsLocked(&DoInsertRegionLocked) before |
215 // recursive_insert = false; as InsertRegionLocked will also construct | 221 // recursive_insert = false; as InsertRegionLocked will also construct |
216 // regions_ on demand for us. | 222 // regions_ on demand for us. |
223 if (use_buckets) { | |
224 const int table_bytes = kHashTableSize * sizeof(*bucket_table_); | |
225 recursive_insert = true; | |
226 bucket_table_ = reinterpret_cast<HeapProfileBucket**>( | |
willchan no longer on Chromium
2013/03/13 21:58:48
static_cast<>?
Dai Mikurube (NOT FULLTIME)
2013/03/14 09:33:47
static_cast works here. Replaced.
Many of existi
| |
227 MyAllocator::Allocate(table_bytes)); | |
228 recursive_insert = false; | |
229 memset(bucket_table_, 0, table_bytes); | |
230 num_buckets_ = 0; | |
231 } | |
217 Unlock(); | 232 Unlock(); |
218 RAW_VLOG(10, "MemoryRegionMap Init done"); | 233 RAW_VLOG(10, "MemoryRegionMap Init done"); |
219 } | 234 } |
220 | 235 |
221 bool MemoryRegionMap::Shutdown() { | 236 bool MemoryRegionMap::Shutdown() { |
222 RAW_VLOG(10, "MemoryRegionMap Shutdown"); | 237 RAW_VLOG(10, "MemoryRegionMap Shutdown"); |
223 Lock(); | 238 Lock(); |
224 RAW_CHECK(client_count_ > 0, ""); | 239 RAW_CHECK(client_count_ > 0, ""); |
225 client_count_ -= 1; | 240 client_count_ -= 1; |
226 if (client_count_ != 0) { // not last client; need not really shutdown | 241 if (client_count_ != 0) { // not last client; need not really shutdown |
227 Unlock(); | 242 Unlock(); |
228 RAW_VLOG(10, "MemoryRegionMap Shutdown decrement done"); | 243 RAW_VLOG(10, "MemoryRegionMap Shutdown decrement done"); |
229 return true; | 244 return true; |
230 } | 245 } |
246 if (bucket_table_ != NULL) { | |
247 for (int i = 0; i < kHashTableSize; i++) { | |
248 for (HeapProfileBucket* x = bucket_table_[i]; x != 0; /**/) { | |
249 HeapProfileBucket* b = x; | |
willchan no longer on Chromium
2013/03/13 21:58:48
Commenting generally about this since you brought
Dai Mikurube (NOT FULLTIME)
2013/03/14 09:33:47
Sounds reasonable renaming. Done it (also in some
| |
250 x = x->next; | |
251 MyAllocator::Free(b->stack, 0); | |
252 MyAllocator::Free(b, 0); | |
253 } | |
254 } | |
255 MyAllocator::Free(bucket_table_, 0); | |
256 num_buckets_ = 0; | |
257 bucket_table_ = NULL; | |
258 } | |
231 RAW_CHECK(MallocHook::RemoveMmapHook(&MmapHook), ""); | 259 RAW_CHECK(MallocHook::RemoveMmapHook(&MmapHook), ""); |
232 RAW_CHECK(MallocHook::RemoveMremapHook(&MremapHook), ""); | 260 RAW_CHECK(MallocHook::RemoveMremapHook(&MremapHook), ""); |
233 RAW_CHECK(MallocHook::RemoveSbrkHook(&SbrkHook), ""); | 261 RAW_CHECK(MallocHook::RemoveSbrkHook(&SbrkHook), ""); |
234 RAW_CHECK(MallocHook::RemoveMunmapHook(&MunmapHook), ""); | 262 RAW_CHECK(MallocHook::RemoveMunmapHook(&MunmapHook), ""); |
235 if (regions_) regions_->~RegionSet(); | 263 if (regions_) regions_->~RegionSet(); |
236 regions_ = NULL; | 264 regions_ = NULL; |
237 bool deleted_arena = LowLevelAlloc::DeleteArena(arena_); | 265 bool deleted_arena = LowLevelAlloc::DeleteArena(arena_); |
238 if (deleted_arena) { | 266 if (deleted_arena) { |
239 arena_ = 0; | 267 arena_ = 0; |
240 } else { | 268 } else { |
241 RAW_LOG(WARNING, "Can't delete LowLevelAlloc arena: it's being used"); | 269 RAW_LOG(WARNING, "Can't delete LowLevelAlloc arena: it's being used"); |
242 } | 270 } |
243 Unlock(); | 271 Unlock(); |
244 RAW_VLOG(10, "MemoryRegionMap Shutdown done"); | 272 RAW_VLOG(10, "MemoryRegionMap Shutdown done"); |
245 return deleted_arena; | 273 return deleted_arena; |
246 } | 274 } |
247 | 275 |
276 bool MemoryRegionMap::IsRecording() { | |
277 RAW_VLOG(10, "MemoryRegionMap IsRecording"); | |
278 Lock(); | |
279 bool is_working = (client_count_ > 0); | |
280 Unlock(); | |
281 RAW_VLOG(10, "MemoryRegionMap IsRecording done"); | |
282 return is_working; | |
willchan no longer on Chromium
2013/03/13 21:58:48
Just doublechecking...this looks like it could be
Dai Mikurube (NOT FULLTIME)
2013/03/14 09:33:47
Ahh, you're right. It could be a problem. Change
| |
283 } | |
284 | |
248 // Invariants (once libpthread_initialized is true): | 285 // Invariants (once libpthread_initialized is true): |
249 // * While lock_ is not held, recursion_count_ is 0 (and | 286 // * While lock_ is not held, recursion_count_ is 0 (and |
250 // lock_owner_tid_ is the previous owner, but we don't rely on | 287 // lock_owner_tid_ is the previous owner, but we don't rely on |
251 // that). | 288 // that). |
252 // * recursion_count_ and lock_owner_tid_ are only written while | 289 // * recursion_count_ and lock_owner_tid_ are only written while |
253 // both lock_ and owner_lock_ are held. They may be read under | 290 // both lock_ and owner_lock_ are held. They may be read under |
254 // just owner_lock_. | 291 // just owner_lock_. |
255 // * At entry and exit of Lock() and Unlock(), the current thread | 292 // * At entry and exit of Lock() and Unlock(), the current thread |
256 // owns lock_ iff pthread_equal(lock_owner_tid_, pthread_self()) | 293 // owns lock_ iff pthread_equal(lock_owner_tid_, pthread_self()) |
257 // && recursion_count_ > 0. | 294 // && recursion_count_ > 0. |
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
329 reinterpret_cast<void*>(region->start_addr), | 366 reinterpret_cast<void*>(region->start_addr), |
330 reinterpret_cast<void*>(region->end_addr)); | 367 reinterpret_cast<void*>(region->end_addr)); |
331 const_cast<Region*>(region)->set_is_stack(); // now we know | 368 const_cast<Region*>(region)->set_is_stack(); // now we know |
332 // cast is safe (set_is_stack does not change the set ordering key) | 369 // cast is safe (set_is_stack does not change the set ordering key) |
333 *result = *region; // create *result as an independent copy | 370 *result = *region; // create *result as an independent copy |
334 } | 371 } |
335 Unlock(); | 372 Unlock(); |
336 return region != NULL; | 373 return region != NULL; |
337 } | 374 } |
338 | 375 |
376 HeapProfileBucket* MemoryRegionMap::GetBucket(int depth, | |
377 const void* const key[]) { | |
378 // Make hash-value | |
379 uintptr_t h = 0; | |
380 for (int i = 0; i < depth; i++) { | |
381 h += reinterpret_cast<uintptr_t>(key[i]); | |
382 h += h << 10; | |
383 h ^= h >> 6; | |
384 } | |
385 h += h << 3; | |
386 h ^= h >> 11; | |
387 | |
388 // Lookup stack trace in table | |
389 unsigned int buck = ((unsigned int) h) % kHashTableSize; | |
willchan no longer on Chromium
2013/03/13 21:58:48
Is |buck| short for |bucket|? As the style guide i
Dai Mikurube (NOT FULLTIME)
2013/03/14 09:33:47
It actually comes from heap-profile-table's GetBuc
| |
390 for (HeapProfileBucket* b = bucket_table_[buck]; b != 0; b = b->next) { | |
391 if ((b->hash == h) && | |
392 (b->depth == depth) && | |
393 std::equal(key, key + depth, b->stack)) { | |
394 return b; | |
395 } | |
396 } | |
397 | |
398 // Create new bucket | |
399 const size_t key_size = sizeof(key[0]) * depth; | |
400 HeapProfileBucket* b; | |
401 if (recursive_insert) { // recursion: save in saved_buckets_ | |
402 const void** kcopy = saved_buckets_keys_[saved_buckets_count_]; | |
403 std::copy(key, key + depth, kcopy); | |
404 b = &saved_buckets_[saved_buckets_count_]; | |
405 memset(b, 0, sizeof(*b)); | |
406 ++saved_buckets_count_; | |
407 b->stack = kcopy; | |
408 b->next = NULL; | |
409 } else { | |
410 recursive_insert = true; | |
411 const void** kcopy = reinterpret_cast<const void**>( | |
willchan no longer on Chromium
2013/03/13 21:58:48
Don't we want static_cast<> here? What does kcopy
Dai Mikurube (NOT FULLTIME)
2013/03/14 09:33:47
Replaced it to static_cast and renamed kcopy to ke
| |
412 MyAllocator::Allocate(key_size)); | |
413 recursive_insert = false; | |
414 std::copy(key, key + depth, kcopy); | |
415 recursive_insert = true; | |
416 b = reinterpret_cast<HeapProfileBucket*>( | |
417 MyAllocator::Allocate(sizeof(HeapProfileBucket))); | |
418 recursive_insert = false; | |
419 memset(b, 0, sizeof(*b)); | |
420 b->stack = kcopy; | |
421 b->next = bucket_table_[buck]; | |
422 } | |
423 b->hash = h; | |
424 b->depth = depth; | |
425 bucket_table_[buck] = b; | |
426 ++num_buckets_; | |
427 return b; | |
428 } | |
429 | |
339 MemoryRegionMap::RegionIterator MemoryRegionMap::BeginRegionLocked() { | 430 MemoryRegionMap::RegionIterator MemoryRegionMap::BeginRegionLocked() { |
340 RAW_CHECK(LockIsHeld(), "should be held (by this thread)"); | 431 RAW_CHECK(LockIsHeld(), "should be held (by this thread)"); |
341 RAW_CHECK(regions_ != NULL, ""); | 432 RAW_CHECK(regions_ != NULL, ""); |
342 return regions_->begin(); | 433 return regions_->begin(); |
343 } | 434 } |
344 | 435 |
345 MemoryRegionMap::RegionIterator MemoryRegionMap::EndRegionLocked() { | 436 MemoryRegionMap::RegionIterator MemoryRegionMap::EndRegionLocked() { |
346 RAW_CHECK(LockIsHeld(), "should be held (by this thread)"); | 437 RAW_CHECK(LockIsHeld(), "should be held (by this thread)"); |
347 RAW_CHECK(regions_ != NULL, ""); | 438 RAW_CHECK(regions_ != NULL, ""); |
348 return regions_->end(); | 439 return regions_->end(); |
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
397 while (saved_regions_count > 0) { | 488 while (saved_regions_count > 0) { |
398 // Making a local-var copy of the region argument to insert_func | 489 // Making a local-var copy of the region argument to insert_func |
399 // including its stack (w/o doing any memory allocations) is important: | 490 // including its stack (w/o doing any memory allocations) is important: |
400 // in many cases the memory in saved_regions | 491 // in many cases the memory in saved_regions |
401 // will get written-to during the (*insert_func)(r) call below. | 492 // will get written-to during the (*insert_func)(r) call below. |
402 Region r = saved_regions[--saved_regions_count]; | 493 Region r = saved_regions[--saved_regions_count]; |
403 (*insert_func)(r); | 494 (*insert_func)(r); |
404 } | 495 } |
405 } | 496 } |
406 | 497 |
498 void MemoryRegionMap::RestoreSavedBucketsLocked() { | |
499 while (saved_buckets_count_ > 0) { | |
500 HeapProfileBucket b = saved_buckets_[--saved_buckets_count_]; | |
501 unsigned int buck = ((unsigned int) b.hash) % kHashTableSize; | |
willchan no longer on Chromium
2013/03/13 21:58:48
static_cast<>. we discourage c-style casts in the
Dai Mikurube (NOT FULLTIME)
2013/03/14 09:33:47
Replaced to static_cast (also in GetBucket).
Also
| |
502 bool is_found = false; | |
503 for (HeapProfileBucket* found = bucket_table_[buck]; | |
504 found != 0; | |
505 found = found->next) { | |
506 if ((found->hash == b.hash) && (found->depth == b.depth) && | |
507 std::equal(b.stack, b.stack + b.depth, found->stack)) { | |
508 found->allocs += b.allocs; | |
509 found->alloc_size += b.alloc_size; | |
510 found->frees += b.frees; | |
511 found->free_size += b.free_size; | |
512 is_found = true; | |
513 break; | |
514 } | |
515 } | |
516 if (is_found) continue; | |
517 | |
518 const size_t key_size = sizeof(b.stack[0]) * b.depth; | |
519 const void** kcopy = reinterpret_cast<const void**>( | |
willchan no longer on Chromium
2013/03/13 21:58:48
static_cast
Dai Mikurube (NOT FULLTIME)
2013/03/14 09:33:47
Done.
| |
520 MyAllocator::Allocate(key_size)); | |
521 std::copy(b.stack, b.stack + b.depth, kcopy); | |
522 HeapProfileBucket* new_b = reinterpret_cast<HeapProfileBucket*>( | |
willchan no longer on Chromium
2013/03/13 21:58:48
s/new_b/new_bucket/
Dai Mikurube (NOT FULLTIME)
2013/03/14 09:33:47
Done.
| |
523 MyAllocator::Allocate(sizeof(HeapProfileBucket))); | |
524 memset(new_b, 0, sizeof(*new_b)); | |
525 new_b->hash = b.hash; | |
526 new_b->depth = b.depth; | |
527 new_b->stack = kcopy; | |
528 new_b->next = bucket_table_[buck]; | |
529 bucket_table_[buck] = new_b; | |
530 ++num_buckets_; | |
531 } | |
532 } | |
533 | |
407 inline void MemoryRegionMap::InsertRegionLocked(const Region& region) { | 534 inline void MemoryRegionMap::InsertRegionLocked(const Region& region) { |
408 RAW_CHECK(LockIsHeld(), "should be held (by this thread)"); | 535 RAW_CHECK(LockIsHeld(), "should be held (by this thread)"); |
409 // We can be called recursively, because RegionSet constructor | 536 // We can be called recursively, because RegionSet constructor |
410 // and DoInsertRegionLocked() (called below) can call the allocator. | 537 // and DoInsertRegionLocked() (called below) can call the allocator. |
411 // recursive_insert tells us if that's the case. When this happens, | 538 // recursive_insert tells us if that's the case. When this happens, |
412 // region insertion information is recorded in saved_regions[], | 539 // region insertion information is recorded in saved_regions[], |
413 // and taken into account when the recursion unwinds. | 540 // and taken into account when the recursion unwinds. |
414 // Do the insert: | 541 // Do the insert: |
415 if (recursive_insert) { // recursion: save in saved_regions | 542 if (recursive_insert) { // recursion: save in saved_regions |
416 RAW_VLOG(12, "Saving recursive insert of region %p..%p from %p", | 543 RAW_VLOG(12, "Saving recursive insert of region %p..%p from %p", |
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
461 RAW_VLOG(10, "New global region %p..%p from %p", | 588 RAW_VLOG(10, "New global region %p..%p from %p", |
462 reinterpret_cast<void*>(region.start_addr), | 589 reinterpret_cast<void*>(region.start_addr), |
463 reinterpret_cast<void*>(region.end_addr), | 590 reinterpret_cast<void*>(region.end_addr), |
464 reinterpret_cast<void*>(region.caller())); | 591 reinterpret_cast<void*>(region.caller())); |
465 // Note: none of the above allocates memory. | 592 // Note: none of the above allocates memory. |
466 Lock(); // recursively lock | 593 Lock(); // recursively lock |
467 map_size_ += size; | 594 map_size_ += size; |
468 InsertRegionLocked(region); | 595 InsertRegionLocked(region); |
469 // This will (eventually) allocate storage for and copy over the stack data | 596 // This will (eventually) allocate storage for and copy over the stack data |
470 // from region.call_stack_data_ that is pointed by region.call_stack(). | 597 // from region.call_stack_data_ that is pointed by region.call_stack(). |
598 if (bucket_table_ != NULL) { | |
599 HeapProfileBucket* b = GetBucket(depth, region.call_stack); | |
600 ++b->allocs; | |
601 b->alloc_size += size; | |
602 if (!recursive_insert) { | |
603 recursive_insert = true; | |
604 RestoreSavedBucketsLocked(); | |
605 recursive_insert = false; | |
606 } | |
607 } | |
471 Unlock(); | 608 Unlock(); |
472 } | 609 } |
473 | 610 |
474 void MemoryRegionMap::RecordRegionRemoval(const void* start, size_t size) { | 611 void MemoryRegionMap::RecordRegionRemoval(const void* start, size_t size) { |
475 Lock(); | 612 Lock(); |
476 if (recursive_insert) { | 613 if (recursive_insert) { |
477 // First remove the removed region from saved_regions, if it's | 614 // First remove the removed region from saved_regions, if it's |
478 // there, to prevent overrunning saved_regions in recursive | 615 // there, to prevent overrunning saved_regions in recursive |
479 // map/unmap call sequences, and also from later inserting regions | 616 // map/unmap call sequences, and also from later inserting regions |
480 // which have already been unmapped. | 617 // which have already been unmapped. |
481 uintptr_t start_addr = reinterpret_cast<uintptr_t>(start); | 618 uintptr_t start_addr = reinterpret_cast<uintptr_t>(start); |
482 uintptr_t end_addr = start_addr + size; | 619 uintptr_t end_addr = start_addr + size; |
483 int put_pos = 0; | 620 int put_pos = 0; |
484 int old_count = saved_regions_count; | 621 int old_count = saved_regions_count; |
485 for (int i = 0; i < old_count; ++i, ++put_pos) { | 622 for (int i = 0; i < old_count; ++i, ++put_pos) { |
486 Region& r = saved_regions[i]; | 623 Region& r = saved_regions[i]; |
487 if (r.start_addr == start_addr && r.end_addr == end_addr) { | 624 if (r.start_addr == start_addr && r.end_addr == end_addr) { |
488 // An exact match, so it's safe to remove. | 625 // An exact match, so it's safe to remove. |
626 RecordRegionRemovalInBucket(r.call_stack_depth, r.call_stack, size); | |
489 --saved_regions_count; | 627 --saved_regions_count; |
490 --put_pos; | 628 --put_pos; |
491 RAW_VLOG(10, ("Insta-Removing saved region %p..%p; " | 629 RAW_VLOG(10, ("Insta-Removing saved region %p..%p; " |
492 "now have %d saved regions"), | 630 "now have %d saved regions"), |
493 reinterpret_cast<void*>(start_addr), | 631 reinterpret_cast<void*>(start_addr), |
494 reinterpret_cast<void*>(end_addr), | 632 reinterpret_cast<void*>(end_addr), |
495 saved_regions_count); | 633 saved_regions_count); |
496 } else { | 634 } else { |
497 if (put_pos < i) { | 635 if (put_pos < i) { |
498 saved_regions[put_pos] = saved_regions[i]; | 636 saved_regions[put_pos] = saved_regions[i]; |
(...skipping 24 matching lines...) Expand all Loading... | |
523 region != regions_->end() && region->start_addr < end_addr; | 661 region != regions_->end() && region->start_addr < end_addr; |
524 /*noop*/) { | 662 /*noop*/) { |
525 RAW_VLOG(13, "Looking at region %p..%p", | 663 RAW_VLOG(13, "Looking at region %p..%p", |
526 reinterpret_cast<void*>(region->start_addr), | 664 reinterpret_cast<void*>(region->start_addr), |
527 reinterpret_cast<void*>(region->end_addr)); | 665 reinterpret_cast<void*>(region->end_addr)); |
528 if (start_addr <= region->start_addr && | 666 if (start_addr <= region->start_addr && |
529 region->end_addr <= end_addr) { // full deletion | 667 region->end_addr <= end_addr) { // full deletion |
530 RAW_VLOG(12, "Deleting region %p..%p", | 668 RAW_VLOG(12, "Deleting region %p..%p", |
531 reinterpret_cast<void*>(region->start_addr), | 669 reinterpret_cast<void*>(region->start_addr), |
532 reinterpret_cast<void*>(region->end_addr)); | 670 reinterpret_cast<void*>(region->end_addr)); |
671 RecordRegionRemovalInBucket(region->call_stack_depth, region->call_stack, | |
672 region->end_addr - region->start_addr); | |
533 RegionSet::iterator d = region; | 673 RegionSet::iterator d = region; |
534 ++region; | 674 ++region; |
535 regions_->erase(d); | 675 regions_->erase(d); |
536 continue; | 676 continue; |
537 } else if (region->start_addr < start_addr && | 677 } else if (region->start_addr < start_addr && |
538 end_addr < region->end_addr) { // cutting-out split | 678 end_addr < region->end_addr) { // cutting-out split |
539 RAW_VLOG(12, "Splitting region %p..%p in two", | 679 RAW_VLOG(12, "Splitting region %p..%p in two", |
540 reinterpret_cast<void*>(region->start_addr), | 680 reinterpret_cast<void*>(region->start_addr), |
541 reinterpret_cast<void*>(region->end_addr)); | 681 reinterpret_cast<void*>(region->end_addr)); |
682 RecordRegionRemovalInBucket(region->call_stack_depth, region->call_stack, | |
683 end_addr - start_addr); | |
542 // Make another region for the start portion: | 684 // Make another region for the start portion: |
543 // The new region has to be the start portion because we can't | 685 // The new region has to be the start portion because we can't |
544 // just modify region->end_addr as it's the sorting key. | 686 // just modify region->end_addr as it's the sorting key. |
545 Region r = *region; | 687 Region r = *region; |
546 r.set_end_addr(start_addr); | 688 r.set_end_addr(start_addr); |
547 InsertRegionLocked(r); | 689 InsertRegionLocked(r); |
548 // cut *region from start: | 690 // cut *region from start: |
549 const_cast<Region&>(*region).set_start_addr(end_addr); | 691 const_cast<Region&>(*region).set_start_addr(end_addr); |
550 } else if (end_addr > region->start_addr && | 692 } else if (end_addr > region->start_addr && |
551 start_addr <= region->start_addr) { // cut from start | 693 start_addr <= region->start_addr) { // cut from start |
552 RAW_VLOG(12, "Start-chopping region %p..%p", | 694 RAW_VLOG(12, "Start-chopping region %p..%p", |
553 reinterpret_cast<void*>(region->start_addr), | 695 reinterpret_cast<void*>(region->start_addr), |
554 reinterpret_cast<void*>(region->end_addr)); | 696 reinterpret_cast<void*>(region->end_addr)); |
697 RecordRegionRemovalInBucket(region->call_stack_depth, region->call_stack, | |
698 end_addr - region->start_addr); | |
555 const_cast<Region&>(*region).set_start_addr(end_addr); | 699 const_cast<Region&>(*region).set_start_addr(end_addr); |
556 } else if (start_addr > region->start_addr && | 700 } else if (start_addr > region->start_addr && |
557 start_addr < region->end_addr) { // cut from end | 701 start_addr < region->end_addr) { // cut from end |
558 RAW_VLOG(12, "End-chopping region %p..%p", | 702 RAW_VLOG(12, "End-chopping region %p..%p", |
559 reinterpret_cast<void*>(region->start_addr), | 703 reinterpret_cast<void*>(region->start_addr), |
560 reinterpret_cast<void*>(region->end_addr)); | 704 reinterpret_cast<void*>(region->end_addr)); |
705 RecordRegionRemovalInBucket(region->call_stack_depth, region->call_stack, | |
706 region->end_addr - start_addr); | |
561 // Can't just modify region->end_addr (it's the sorting key): | 707 // Can't just modify region->end_addr (it's the sorting key): |
562 Region r = *region; | 708 Region r = *region; |
563 r.set_end_addr(start_addr); | 709 r.set_end_addr(start_addr); |
564 RegionSet::iterator d = region; | 710 RegionSet::iterator d = region; |
565 ++region; | 711 ++region; |
566 // It's safe to erase before inserting since r is independent of *d: | 712 // It's safe to erase before inserting since r is independent of *d: |
567 // r contains an own copy of the call stack: | 713 // r contains an own copy of the call stack: |
568 regions_->erase(d); | 714 regions_->erase(d); |
569 InsertRegionLocked(r); | 715 InsertRegionLocked(r); |
570 continue; | 716 continue; |
571 } | 717 } |
572 ++region; | 718 ++region; |
573 } | 719 } |
574 RAW_VLOG(12, "Removed region %p..%p; have %"PRIuS" regions", | 720 RAW_VLOG(12, "Removed region %p..%p; have %"PRIuS" regions", |
575 reinterpret_cast<void*>(start_addr), | 721 reinterpret_cast<void*>(start_addr), |
576 reinterpret_cast<void*>(end_addr), | 722 reinterpret_cast<void*>(end_addr), |
577 regions_->size()); | 723 regions_->size()); |
578 if (VLOG_IS_ON(12)) LogAllLocked(); | 724 if (VLOG_IS_ON(12)) LogAllLocked(); |
579 unmap_size_ += size; | 725 unmap_size_ += size; |
580 Unlock(); | 726 Unlock(); |
581 } | 727 } |
582 | 728 |
729 void MemoryRegionMap::RecordRegionRemovalInBucket(int depth, | |
730 const void* const stack[], | |
731 size_t size) { | |
732 if (bucket_table_ == NULL) return; | |
733 HeapProfileBucket* b = GetBucket(depth, stack); | |
734 ++b->frees; | |
735 b->free_size += size; | |
736 } | |
737 | |
583 void MemoryRegionMap::MmapHook(const void* result, | 738 void MemoryRegionMap::MmapHook(const void* result, |
584 const void* start, size_t size, | 739 const void* start, size_t size, |
585 int prot, int flags, | 740 int prot, int flags, |
586 int fd, off_t offset) { | 741 int fd, off_t offset) { |
587 // TODO(maxim): replace all 0x%"PRIxS" by %p when RAW_VLOG uses a safe | 742 // TODO(maxim): replace all 0x%"PRIxS" by %p when RAW_VLOG uses a safe |
588 // snprintf reimplementation that does not malloc to pretty-print NULL | 743 // snprintf reimplementation that does not malloc to pretty-print NULL |
589 RAW_VLOG(10, "MMap = 0x%"PRIxPTR" of %"PRIuS" at %"PRIu64" " | 744 RAW_VLOG(10, "MMap = 0x%"PRIxPTR" of %"PRIuS" at %"PRIu64" " |
590 "prot %d flags %d fd %d offs %"PRId64, | 745 "prot %d flags %d fd %d offs %"PRId64, |
591 reinterpret_cast<uintptr_t>(result), size, | 746 reinterpret_cast<uintptr_t>(result), size, |
592 reinterpret_cast<uint64>(start), prot, flags, fd, | 747 reinterpret_cast<uint64>(start), prot, flags, fd, |
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
643 r != regions_->end(); ++r) { | 798 r != regions_->end(); ++r) { |
644 RAW_LOG(INFO, "Memory region 0x%"PRIxPTR"..0x%"PRIxPTR" " | 799 RAW_LOG(INFO, "Memory region 0x%"PRIxPTR"..0x%"PRIxPTR" " |
645 "from 0x%"PRIxPTR" stack=%d", | 800 "from 0x%"PRIxPTR" stack=%d", |
646 r->start_addr, r->end_addr, r->caller(), r->is_stack); | 801 r->start_addr, r->end_addr, r->caller(), r->is_stack); |
647 RAW_CHECK(previous < r->end_addr, "wow, we messed up the set order"); | 802 RAW_CHECK(previous < r->end_addr, "wow, we messed up the set order"); |
648 // this must be caused by uncontrolled recursive operations on regions_ | 803 // this must be caused by uncontrolled recursive operations on regions_ |
649 previous = r->end_addr; | 804 previous = r->end_addr; |
650 } | 805 } |
651 RAW_LOG(INFO, "End of regions list"); | 806 RAW_LOG(INFO, "End of regions list"); |
652 } | 807 } |
OLD | NEW |