OLD | NEW |
1 /* Copyright (c) 2006, Google Inc. | 1 /* Copyright (c) 2006, Google Inc. |
2 * All rights reserved. | 2 * All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 129 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
140 int MemoryRegionMap::max_stack_depth_ = 0; | 140 int MemoryRegionMap::max_stack_depth_ = 0; |
141 MemoryRegionMap::RegionSet* MemoryRegionMap::regions_ = NULL; | 141 MemoryRegionMap::RegionSet* MemoryRegionMap::regions_ = NULL; |
142 LowLevelAlloc::Arena* MemoryRegionMap::arena_ = NULL; | 142 LowLevelAlloc::Arena* MemoryRegionMap::arena_ = NULL; |
143 SpinLock MemoryRegionMap::lock_(SpinLock::LINKER_INITIALIZED); | 143 SpinLock MemoryRegionMap::lock_(SpinLock::LINKER_INITIALIZED); |
144 SpinLock MemoryRegionMap::owner_lock_( // ACQUIRED_AFTER(lock_) | 144 SpinLock MemoryRegionMap::owner_lock_( // ACQUIRED_AFTER(lock_) |
145 SpinLock::LINKER_INITIALIZED); | 145 SpinLock::LINKER_INITIALIZED); |
146 int MemoryRegionMap::recursion_count_ = 0; // GUARDED_BY(owner_lock_) | 146 int MemoryRegionMap::recursion_count_ = 0; // GUARDED_BY(owner_lock_) |
147 pthread_t MemoryRegionMap::lock_owner_tid_; // GUARDED_BY(owner_lock_) | 147 pthread_t MemoryRegionMap::lock_owner_tid_; // GUARDED_BY(owner_lock_) |
148 int64 MemoryRegionMap::map_size_ = 0; | 148 int64 MemoryRegionMap::map_size_ = 0; |
149 int64 MemoryRegionMap::unmap_size_ = 0; | 149 int64 MemoryRegionMap::unmap_size_ = 0; |
| 150 MemoryRegionMap::Bucket** MemoryRegionMap::bucket_table_ = NULL; |
| 151 int MemoryRegionMap::num_buckets_ = 0; |
| 152 int MemoryRegionMap::saved_buckets_count_ = 0; |
| 153 MemoryRegionMap::Bucket MemoryRegionMap::saved_buckets_[20]; |
| 154 const void* MemoryRegionMap::saved_buckets_keys_[20][kMaxStackDepth]; |
150 | 155 |
151 // ========================================================================= // | 156 // ========================================================================= // |
152 | 157 |
153 // Simple hook into execution of global object constructors, | 158 // Simple hook into execution of global object constructors, |
154 // so that we do not call pthread_self() when it does not yet work. | 159 // so that we do not call pthread_self() when it does not yet work. |
155 static bool libpthread_initialized = false; | 160 static bool libpthread_initialized = false; |
156 static bool initializer = (libpthread_initialized = true, true); | 161 static bool initializer = (libpthread_initialized = true, true); |
157 | 162 |
158 static inline bool current_thread_is(pthread_t should_be) { | 163 static inline bool current_thread_is(pthread_t should_be) { |
159 // Before main() runs, there's only one thread, so we're always that thread | 164 // Before main() runs, there's only one thread, so we're always that thread |
(...skipping 15 matching lines...) Expand all Loading... |
175 // We use RegionSetRep with noop c-tor so that global construction | 180 // We use RegionSetRep with noop c-tor so that global construction |
176 // does not interfere. | 181 // does not interfere. |
177 static MemoryRegionMap::RegionSetRep regions_rep; | 182 static MemoryRegionMap::RegionSetRep regions_rep; |
178 | 183 |
179 // ========================================================================= // | 184 // ========================================================================= // |
180 | 185 |
181 // Has InsertRegionLocked been called recursively | 186 // Has InsertRegionLocked been called recursively |
182 // (or rather should we *not* use regions_ to record a hooked mmap). | 187 // (or rather should we *not* use regions_ to record a hooked mmap). |
183 static bool recursive_insert = false; | 188 static bool recursive_insert = false; |
184 | 189 |
185 void MemoryRegionMap::Init(int max_stack_depth) { | 190 void MemoryRegionMap::Init(int max_stack_depth, bool use_buckets) { |
186 RAW_VLOG(10, "MemoryRegionMap Init"); | 191 RAW_VLOG(10, "MemoryRegionMap Init"); |
187 RAW_CHECK(max_stack_depth >= 0, ""); | 192 RAW_CHECK(max_stack_depth >= 0, ""); |
188 // Make sure we don't overflow the memory in region stacks: | 193 // Make sure we don't overflow the memory in region stacks: |
189 RAW_CHECK(max_stack_depth <= kMaxStackDepth, | 194 RAW_CHECK(max_stack_depth <= kMaxStackDepth, |
190 "need to increase kMaxStackDepth?"); | 195 "need to increase kMaxStackDepth?"); |
191 Lock(); | 196 Lock(); |
192 client_count_ += 1; | 197 client_count_ += 1; |
193 max_stack_depth_ = max(max_stack_depth_, max_stack_depth); | 198 max_stack_depth_ = max(max_stack_depth_, max_stack_depth); |
194 if (client_count_ > 1) { | 199 if (client_count_ > 1) { |
195 // not first client: already did initialization-proper | 200 // not first client: already did initialization-proper |
(...skipping 11 matching lines...) Expand all Loading... |
207 // recursive_insert allows us to buffer info about these mmap calls. | 212 // recursive_insert allows us to buffer info about these mmap calls. |
208 // Note that Init() can be (and is) sometimes called | 213 // Note that Init() can be (and is) sometimes called |
209 // already from within an mmap/sbrk hook. | 214 // already from within an mmap/sbrk hook. |
210 recursive_insert = true; | 215 recursive_insert = true; |
211 arena_ = LowLevelAlloc::NewArena(0, LowLevelAlloc::DefaultArena()); | 216 arena_ = LowLevelAlloc::NewArena(0, LowLevelAlloc::DefaultArena()); |
212 recursive_insert = false; | 217 recursive_insert = false; |
213 HandleSavedRegionsLocked(&InsertRegionLocked); // flush the buffered ones | 218 HandleSavedRegionsLocked(&InsertRegionLocked); // flush the buffered ones |
214 // Can't instead use HandleSavedRegionsLocked(&DoInsertRegionLocked) before | 219 // Can't instead use HandleSavedRegionsLocked(&DoInsertRegionLocked) before |
215 // recursive_insert = false; as InsertRegionLocked will also construct | 220 // recursive_insert = false; as InsertRegionLocked will also construct |
216 // regions_ on demand for us. | 221 // regions_ on demand for us. |
| 222 if (use_buckets) { |
| 223 const int table_bytes = kHashTableSize * sizeof(*bucket_table_); |
| 224 recursive_insert = true; |
| 225 bucket_table_ = reinterpret_cast<Bucket**>( |
| 226 MyAllocator::Allocate(table_bytes)); |
| 227 recursive_insert = false; |
| 228 memset(bucket_table_, 0, table_bytes); |
| 229 num_buckets_ = 0; |
| 230 } |
217 Unlock(); | 231 Unlock(); |
218 RAW_VLOG(10, "MemoryRegionMap Init done"); | 232 RAW_VLOG(10, "MemoryRegionMap Init done"); |
219 } | 233 } |
220 | 234 |
221 bool MemoryRegionMap::Shutdown() { | 235 bool MemoryRegionMap::Shutdown() { |
222 RAW_VLOG(10, "MemoryRegionMap Shutdown"); | 236 RAW_VLOG(10, "MemoryRegionMap Shutdown"); |
223 Lock(); | 237 Lock(); |
224 RAW_CHECK(client_count_ > 0, ""); | 238 RAW_CHECK(client_count_ > 0, ""); |
225 client_count_ -= 1; | 239 client_count_ -= 1; |
226 if (client_count_ != 0) { // not last client; need not really shutdown | 240 if (client_count_ != 0) { // not last client; need not really shutdown |
227 Unlock(); | 241 Unlock(); |
228 RAW_VLOG(10, "MemoryRegionMap Shutdown decrement done"); | 242 RAW_VLOG(10, "MemoryRegionMap Shutdown decrement done"); |
229 return true; | 243 return true; |
230 } | 244 } |
| 245 if (bucket_table_ != NULL) { |
| 246 for (int i = 0; i < kHashTableSize; i++) { |
| 247 for (Bucket* x = bucket_table_[i]; x != 0; /**/) { |
| 248 Bucket* b = x; |
| 249 x = x->next; |
| 250 MyAllocator::Free(b->stack, 0); |
| 251 MyAllocator::Free(b, 0); |
| 252 } |
| 253 } |
| 254 MyAllocator::Free(bucket_table_, 0); |
| 255 num_buckets_ = 0; |
| 256 bucket_table_ = NULL; |
| 257 } |
231 RAW_CHECK(MallocHook::RemoveMmapHook(&MmapHook), ""); | 258 RAW_CHECK(MallocHook::RemoveMmapHook(&MmapHook), ""); |
232 RAW_CHECK(MallocHook::RemoveMremapHook(&MremapHook), ""); | 259 RAW_CHECK(MallocHook::RemoveMremapHook(&MremapHook), ""); |
233 RAW_CHECK(MallocHook::RemoveSbrkHook(&SbrkHook), ""); | 260 RAW_CHECK(MallocHook::RemoveSbrkHook(&SbrkHook), ""); |
234 RAW_CHECK(MallocHook::RemoveMunmapHook(&MunmapHook), ""); | 261 RAW_CHECK(MallocHook::RemoveMunmapHook(&MunmapHook), ""); |
235 if (regions_) regions_->~RegionSet(); | 262 if (regions_) regions_->~RegionSet(); |
236 regions_ = NULL; | 263 regions_ = NULL; |
237 bool deleted_arena = LowLevelAlloc::DeleteArena(arena_); | 264 bool deleted_arena = LowLevelAlloc::DeleteArena(arena_); |
238 if (deleted_arena) { | 265 if (deleted_arena) { |
239 arena_ = 0; | 266 arena_ = 0; |
240 } else { | 267 } else { |
241 RAW_LOG(WARNING, "Can't delete LowLevelAlloc arena: it's being used"); | 268 RAW_LOG(WARNING, "Can't delete LowLevelAlloc arena: it's being used"); |
242 } | 269 } |
243 Unlock(); | 270 Unlock(); |
244 RAW_VLOG(10, "MemoryRegionMap Shutdown done"); | 271 RAW_VLOG(10, "MemoryRegionMap Shutdown done"); |
245 return deleted_arena; | 272 return deleted_arena; |
246 } | 273 } |
247 | 274 |
| 275 bool MemoryRegionMap::IsWorking() { |
| 276 RAW_VLOG(10, "MemoryRegionMap IsWorking"); |
| 277 Lock(); |
| 278 bool is_working = (client_count_ > 0); |
| 279 Unlock(); |
| 280 RAW_VLOG(10, "MemoryRegionMap IsWorking done"); |
| 281 return is_working; |
| 282 } |
| 283 |
248 // Invariants (once libpthread_initialized is true): | 284 // Invariants (once libpthread_initialized is true): |
249 // * While lock_ is not held, recursion_count_ is 0 (and | 285 // * While lock_ is not held, recursion_count_ is 0 (and |
250 // lock_owner_tid_ is the previous owner, but we don't rely on | 286 // lock_owner_tid_ is the previous owner, but we don't rely on |
251 // that). | 287 // that). |
252 // * recursion_count_ and lock_owner_tid_ are only written while | 288 // * recursion_count_ and lock_owner_tid_ are only written while |
253 // both lock_ and owner_lock_ are held. They may be read under | 289 // both lock_ and owner_lock_ are held. They may be read under |
254 // just owner_lock_. | 290 // just owner_lock_. |
255 // * At entry and exit of Lock() and Unlock(), the current thread | 291 // * At entry and exit of Lock() and Unlock(), the current thread |
256 // owns lock_ iff pthread_equal(lock_owner_tid_, pthread_self()) | 292 // owns lock_ iff pthread_equal(lock_owner_tid_, pthread_self()) |
257 // && recursion_count_ > 0. | 293 // && recursion_count_ > 0. |
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
329 reinterpret_cast<void*>(region->start_addr), | 365 reinterpret_cast<void*>(region->start_addr), |
330 reinterpret_cast<void*>(region->end_addr)); | 366 reinterpret_cast<void*>(region->end_addr)); |
331 const_cast<Region*>(region)->set_is_stack(); // now we know | 367 const_cast<Region*>(region)->set_is_stack(); // now we know |
332 // cast is safe (set_is_stack does not change the set ordering key) | 368 // cast is safe (set_is_stack does not change the set ordering key) |
333 *result = *region; // create *result as an independent copy | 369 *result = *region; // create *result as an independent copy |
334 } | 370 } |
335 Unlock(); | 371 Unlock(); |
336 return region != NULL; | 372 return region != NULL; |
337 } | 373 } |
338 | 374 |
| 375 MemoryRegionMap::Bucket* MemoryRegionMap::GetBucket(int depth, |
| 376 const void* const key[]) { |
| 377 // Make hash-value |
| 378 uintptr_t h = 0; |
| 379 for (int i = 0; i < depth; i++) { |
| 380 h += reinterpret_cast<uintptr_t>(key[i]); |
| 381 h += h << 10; |
| 382 h ^= h >> 6; |
| 383 } |
| 384 h += h << 3; |
| 385 h ^= h >> 11; |
| 386 |
| 387 // Lookup stack trace in table |
| 388 unsigned int buck = ((unsigned int) h) % kHashTableSize; |
| 389 for (Bucket* b = bucket_table_[buck]; b != 0; b = b->next) { |
| 390 if ((b->hash == h) && |
| 391 (b->depth == depth) && |
| 392 std::equal(key, key + depth, b->stack)) { |
| 393 return b; |
| 394 } |
| 395 } |
| 396 |
| 397 // Create new bucket |
| 398 const size_t key_size = sizeof(key[0]) * depth; |
| 399 Bucket* b; |
| 400 if (recursive_insert) { // recursion: save in saved_buckets_ |
| 401 const void** kcopy = saved_buckets_keys_[saved_buckets_count_]; |
| 402 std::copy(key, key + depth, kcopy); |
| 403 b = &saved_buckets_[saved_buckets_count_]; |
| 404 memset(b, 0, sizeof(*b)); |
| 405 ++saved_buckets_count_; |
| 406 b->stack = kcopy; |
| 407 b->next = NULL; |
| 408 } else { |
| 409 recursive_insert = true; |
| 410 const void** kcopy = reinterpret_cast<const void**>( |
| 411 MyAllocator::Allocate(key_size)); |
| 412 recursive_insert = false; |
| 413 std::copy(key, key + depth, kcopy); |
| 414 recursive_insert = true; |
| 415 b = reinterpret_cast<Bucket*>( |
| 416 MyAllocator::Allocate(sizeof(Bucket))); |
| 417 recursive_insert = false; |
| 418 memset(b, 0, sizeof(*b)); |
| 419 b->stack = kcopy; |
| 420 b->next = bucket_table_[buck]; |
| 421 } |
| 422 b->hash = h; |
| 423 b->depth = depth; |
| 424 bucket_table_[buck] = b; |
| 425 ++num_buckets_; |
| 426 return b; |
| 427 } |
| 428 |
339 MemoryRegionMap::RegionIterator MemoryRegionMap::BeginRegionLocked() { | 429 MemoryRegionMap::RegionIterator MemoryRegionMap::BeginRegionLocked() { |
340 RAW_CHECK(LockIsHeld(), "should be held (by this thread)"); | 430 RAW_CHECK(LockIsHeld(), "should be held (by this thread)"); |
341 RAW_CHECK(regions_ != NULL, ""); | 431 RAW_CHECK(regions_ != NULL, ""); |
342 return regions_->begin(); | 432 return regions_->begin(); |
343 } | 433 } |
344 | 434 |
345 MemoryRegionMap::RegionIterator MemoryRegionMap::EndRegionLocked() { | 435 MemoryRegionMap::RegionIterator MemoryRegionMap::EndRegionLocked() { |
346 RAW_CHECK(LockIsHeld(), "should be held (by this thread)"); | 436 RAW_CHECK(LockIsHeld(), "should be held (by this thread)"); |
347 RAW_CHECK(regions_ != NULL, ""); | 437 RAW_CHECK(regions_ != NULL, ""); |
348 return regions_->end(); | 438 return regions_->end(); |
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
397 while (saved_regions_count > 0) { | 487 while (saved_regions_count > 0) { |
398 // Making a local-var copy of the region argument to insert_func | 488 // Making a local-var copy of the region argument to insert_func |
399 // including its stack (w/o doing any memory allocations) is important: | 489 // including its stack (w/o doing any memory allocations) is important: |
400 // in many cases the memory in saved_regions | 490 // in many cases the memory in saved_regions |
401 // will get written-to during the (*insert_func)(r) call below. | 491 // will get written-to during the (*insert_func)(r) call below. |
402 Region r = saved_regions[--saved_regions_count]; | 492 Region r = saved_regions[--saved_regions_count]; |
403 (*insert_func)(r); | 493 (*insert_func)(r); |
404 } | 494 } |
405 } | 495 } |
406 | 496 |
| 497 inline void MemoryRegionMap::HandleSavedBucketsLocked() { |
| 498 while (saved_buckets_count_ > 0) { |
| 499 Bucket b = saved_buckets_[--saved_buckets_count_]; |
| 500 unsigned int buck = ((unsigned int) b.hash) % kHashTableSize; |
| 501 bool is_found = false; |
| 502 for (Bucket* found = bucket_table_[buck]; found != 0; found = found->next) { |
| 503 if ((found->hash == b.hash) && (found->depth == b.depth) && |
| 504 std::equal(b.stack, b.stack + b.depth, found->stack)) { |
| 505 found->allocs += b.allocs; |
| 506 found->alloc_size += b.alloc_size; |
| 507 found->frees += b.frees; |
| 508 found->free_size += b.free_size; |
| 509 is_found = true; |
| 510 break; |
| 511 } |
| 512 } |
| 513 if (is_found) continue; |
| 514 |
| 515 const size_t key_size = sizeof(b.stack[0]) * b.depth; |
| 516 const void** kcopy = reinterpret_cast<const void**>( |
| 517 MyAllocator::Allocate(key_size)); |
| 518 std::copy(b.stack, b.stack + b.depth, kcopy); |
| 519 Bucket* new_b = reinterpret_cast<Bucket*>( |
| 520 MyAllocator::Allocate(sizeof(Bucket))); |
| 521 memset(new_b, 0, sizeof(*new_b)); |
| 522 new_b->hash = b.hash; |
| 523 new_b->depth = b.depth; |
| 524 new_b->stack = kcopy; |
| 525 new_b->next = bucket_table_[buck]; |
| 526 bucket_table_[buck] = new_b; |
| 527 ++num_buckets_; |
| 528 } |
| 529 } |
| 530 |
407 inline void MemoryRegionMap::InsertRegionLocked(const Region& region) { | 531 inline void MemoryRegionMap::InsertRegionLocked(const Region& region) { |
408 RAW_CHECK(LockIsHeld(), "should be held (by this thread)"); | 532 RAW_CHECK(LockIsHeld(), "should be held (by this thread)"); |
409 // We can be called recursively, because RegionSet constructor | 533 // We can be called recursively, because RegionSet constructor |
410 // and DoInsertRegionLocked() (called below) can call the allocator. | 534 // and DoInsertRegionLocked() (called below) can call the allocator. |
411 // recursive_insert tells us if that's the case. When this happens, | 535 // recursive_insert tells us if that's the case. When this happens, |
412 // region insertion information is recorded in saved_regions[], | 536 // region insertion information is recorded in saved_regions[], |
413 // and taken into account when the recursion unwinds. | 537 // and taken into account when the recursion unwinds. |
414 // Do the insert: | 538 // Do the insert: |
415 if (recursive_insert) { // recursion: save in saved_regions | 539 if (recursive_insert) { // recursion: save in saved_regions |
416 RAW_VLOG(12, "Saving recursive insert of region %p..%p from %p", | 540 RAW_VLOG(12, "Saving recursive insert of region %p..%p from %p", |
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
461 RAW_VLOG(10, "New global region %p..%p from %p", | 585 RAW_VLOG(10, "New global region %p..%p from %p", |
462 reinterpret_cast<void*>(region.start_addr), | 586 reinterpret_cast<void*>(region.start_addr), |
463 reinterpret_cast<void*>(region.end_addr), | 587 reinterpret_cast<void*>(region.end_addr), |
464 reinterpret_cast<void*>(region.caller())); | 588 reinterpret_cast<void*>(region.caller())); |
465 // Note: none of the above allocates memory. | 589 // Note: none of the above allocates memory. |
466 Lock(); // recursively lock | 590 Lock(); // recursively lock |
467 map_size_ += size; | 591 map_size_ += size; |
468 InsertRegionLocked(region); | 592 InsertRegionLocked(region); |
469 // This will (eventually) allocate storage for and copy over the stack data | 593 // This will (eventually) allocate storage for and copy over the stack data |
470 // from region.call_stack_data_ that is pointed by region.call_stack(). | 594 // from region.call_stack_data_ that is pointed by region.call_stack(). |
| 595 if (bucket_table_ != NULL) { |
| 596 Bucket* b = GetBucket(depth, region.call_stack); |
| 597 ++b->allocs; |
| 598 b->alloc_size += size; |
| 599 if (!recursive_insert) { |
| 600 recursive_insert = true; |
| 601 HandleSavedBucketsLocked(); |
| 602 recursive_insert = false; |
| 603 } |
| 604 } |
471 Unlock(); | 605 Unlock(); |
472 } | 606 } |
473 | 607 |
474 void MemoryRegionMap::RecordRegionRemoval(const void* start, size_t size) { | 608 void MemoryRegionMap::RecordRegionRemoval(const void* start, size_t size) { |
475 Lock(); | 609 Lock(); |
476 if (recursive_insert) { | 610 if (recursive_insert) { |
477 // First remove the removed region from saved_regions, if it's | 611 // First remove the removed region from saved_regions, if it's |
478 // there, to prevent overrunning saved_regions in recursive | 612 // there, to prevent overrunning saved_regions in recursive |
479 // map/unmap call sequences, and also from later inserting regions | 613 // map/unmap call sequences, and also from later inserting regions |
480 // which have already been unmapped. | 614 // which have already been unmapped. |
481 uintptr_t start_addr = reinterpret_cast<uintptr_t>(start); | 615 uintptr_t start_addr = reinterpret_cast<uintptr_t>(start); |
482 uintptr_t end_addr = start_addr + size; | 616 uintptr_t end_addr = start_addr + size; |
483 int put_pos = 0; | 617 int put_pos = 0; |
484 int old_count = saved_regions_count; | 618 int old_count = saved_regions_count; |
485 for (int i = 0; i < old_count; ++i, ++put_pos) { | 619 for (int i = 0; i < old_count; ++i, ++put_pos) { |
486 Region& r = saved_regions[i]; | 620 Region& r = saved_regions[i]; |
487 if (r.start_addr == start_addr && r.end_addr == end_addr) { | 621 if (r.start_addr == start_addr && r.end_addr == end_addr) { |
488 // An exact match, so it's safe to remove. | 622 // An exact match, so it's safe to remove. |
| 623 RecordRegionRemovalInBucket(r.call_stack_depth, r.call_stack, size); |
489 --saved_regions_count; | 624 --saved_regions_count; |
490 --put_pos; | 625 --put_pos; |
491 RAW_VLOG(10, ("Insta-Removing saved region %p..%p; " | 626 RAW_VLOG(10, ("Insta-Removing saved region %p..%p; " |
492 "now have %d saved regions"), | 627 "now have %d saved regions"), |
493 reinterpret_cast<void*>(start_addr), | 628 reinterpret_cast<void*>(start_addr), |
494 reinterpret_cast<void*>(end_addr), | 629 reinterpret_cast<void*>(end_addr), |
495 saved_regions_count); | 630 saved_regions_count); |
496 } else { | 631 } else { |
497 if (put_pos < i) { | 632 if (put_pos < i) { |
498 saved_regions[put_pos] = saved_regions[i]; | 633 saved_regions[put_pos] = saved_regions[i]; |
(...skipping 24 matching lines...) Expand all Loading... |
523 region != regions_->end() && region->start_addr < end_addr; | 658 region != regions_->end() && region->start_addr < end_addr; |
524 /*noop*/) { | 659 /*noop*/) { |
525 RAW_VLOG(13, "Looking at region %p..%p", | 660 RAW_VLOG(13, "Looking at region %p..%p", |
526 reinterpret_cast<void*>(region->start_addr), | 661 reinterpret_cast<void*>(region->start_addr), |
527 reinterpret_cast<void*>(region->end_addr)); | 662 reinterpret_cast<void*>(region->end_addr)); |
528 if (start_addr <= region->start_addr && | 663 if (start_addr <= region->start_addr && |
529 region->end_addr <= end_addr) { // full deletion | 664 region->end_addr <= end_addr) { // full deletion |
530 RAW_VLOG(12, "Deleting region %p..%p", | 665 RAW_VLOG(12, "Deleting region %p..%p", |
531 reinterpret_cast<void*>(region->start_addr), | 666 reinterpret_cast<void*>(region->start_addr), |
532 reinterpret_cast<void*>(region->end_addr)); | 667 reinterpret_cast<void*>(region->end_addr)); |
| 668 RecordRegionRemovalInBucket(region->call_stack_depth, region->call_stack, |
| 669 region->end_addr - region->start_addr); |
533 RegionSet::iterator d = region; | 670 RegionSet::iterator d = region; |
534 ++region; | 671 ++region; |
535 regions_->erase(d); | 672 regions_->erase(d); |
536 continue; | 673 continue; |
537 } else if (region->start_addr < start_addr && | 674 } else if (region->start_addr < start_addr && |
538 end_addr < region->end_addr) { // cutting-out split | 675 end_addr < region->end_addr) { // cutting-out split |
539 RAW_VLOG(12, "Splitting region %p..%p in two", | 676 RAW_VLOG(12, "Splitting region %p..%p in two", |
540 reinterpret_cast<void*>(region->start_addr), | 677 reinterpret_cast<void*>(region->start_addr), |
541 reinterpret_cast<void*>(region->end_addr)); | 678 reinterpret_cast<void*>(region->end_addr)); |
| 679 RecordRegionRemovalInBucket(region->call_stack_depth, region->call_stack, |
| 680 end_addr - start_addr); |
542 // Make another region for the start portion: | 681 // Make another region for the start portion: |
543 // The new region has to be the start portion because we can't | 682 // The new region has to be the start portion because we can't |
544 // just modify region->end_addr as it's the sorting key. | 683 // just modify region->end_addr as it's the sorting key. |
545 Region r = *region; | 684 Region r = *region; |
546 r.set_end_addr(start_addr); | 685 r.set_end_addr(start_addr); |
547 InsertRegionLocked(r); | 686 InsertRegionLocked(r); |
548 // cut *region from start: | 687 // cut *region from start: |
549 const_cast<Region&>(*region).set_start_addr(end_addr); | 688 const_cast<Region&>(*region).set_start_addr(end_addr); |
550 } else if (end_addr > region->start_addr && | 689 } else if (end_addr > region->start_addr && |
551 start_addr <= region->start_addr) { // cut from start | 690 start_addr <= region->start_addr) { // cut from start |
552 RAW_VLOG(12, "Start-chopping region %p..%p", | 691 RAW_VLOG(12, "Start-chopping region %p..%p", |
553 reinterpret_cast<void*>(region->start_addr), | 692 reinterpret_cast<void*>(region->start_addr), |
554 reinterpret_cast<void*>(region->end_addr)); | 693 reinterpret_cast<void*>(region->end_addr)); |
| 694 RecordRegionRemovalInBucket(region->call_stack_depth, region->call_stack, |
| 695 end_addr - region->start_addr); |
555 const_cast<Region&>(*region).set_start_addr(end_addr); | 696 const_cast<Region&>(*region).set_start_addr(end_addr); |
556 } else if (start_addr > region->start_addr && | 697 } else if (start_addr > region->start_addr && |
557 start_addr < region->end_addr) { // cut from end | 698 start_addr < region->end_addr) { // cut from end |
558 RAW_VLOG(12, "End-chopping region %p..%p", | 699 RAW_VLOG(12, "End-chopping region %p..%p", |
559 reinterpret_cast<void*>(region->start_addr), | 700 reinterpret_cast<void*>(region->start_addr), |
560 reinterpret_cast<void*>(region->end_addr)); | 701 reinterpret_cast<void*>(region->end_addr)); |
| 702 RecordRegionRemovalInBucket(region->call_stack_depth, region->call_stack, |
| 703 region->end_addr - start_addr); |
561 // Can't just modify region->end_addr (it's the sorting key): | 704 // Can't just modify region->end_addr (it's the sorting key): |
562 Region r = *region; | 705 Region r = *region; |
563 r.set_end_addr(start_addr); | 706 r.set_end_addr(start_addr); |
564 RegionSet::iterator d = region; | 707 RegionSet::iterator d = region; |
565 ++region; | 708 ++region; |
566 // It's safe to erase before inserting since r is independent of *d: | 709 // It's safe to erase before inserting since r is independent of *d: |
567 // r contains an own copy of the call stack: | 710 // r contains an own copy of the call stack: |
568 regions_->erase(d); | 711 regions_->erase(d); |
569 InsertRegionLocked(r); | 712 InsertRegionLocked(r); |
570 continue; | 713 continue; |
571 } | 714 } |
572 ++region; | 715 ++region; |
573 } | 716 } |
574 RAW_VLOG(12, "Removed region %p..%p; have %"PRIuS" regions", | 717 RAW_VLOG(12, "Removed region %p..%p; have %"PRIuS" regions", |
575 reinterpret_cast<void*>(start_addr), | 718 reinterpret_cast<void*>(start_addr), |
576 reinterpret_cast<void*>(end_addr), | 719 reinterpret_cast<void*>(end_addr), |
577 regions_->size()); | 720 regions_->size()); |
578 if (VLOG_IS_ON(12)) LogAllLocked(); | 721 if (VLOG_IS_ON(12)) LogAllLocked(); |
579 unmap_size_ += size; | 722 unmap_size_ += size; |
580 Unlock(); | 723 Unlock(); |
581 } | 724 } |
582 | 725 |
| 726 void MemoryRegionMap::RecordRegionRemovalInBucket(int depth, |
| 727 const void* const stack[], |
| 728 size_t size) { |
| 729 if (bucket_table_ == NULL) return; |
| 730 Bucket* b = GetBucket(depth, stack); |
| 731 ++b->frees; |
| 732 b->free_size += size; |
| 733 } |
| 734 |
583 void MemoryRegionMap::MmapHook(const void* result, | 735 void MemoryRegionMap::MmapHook(const void* result, |
584 const void* start, size_t size, | 736 const void* start, size_t size, |
585 int prot, int flags, | 737 int prot, int flags, |
586 int fd, off_t offset) { | 738 int fd, off_t offset) { |
587 // TODO(maxim): replace all 0x%"PRIxS" by %p when RAW_VLOG uses a safe | 739 // TODO(maxim): replace all 0x%"PRIxS" by %p when RAW_VLOG uses a safe |
588 // snprintf reimplementation that does not malloc to pretty-print NULL | 740 // snprintf reimplementation that does not malloc to pretty-print NULL |
589 RAW_VLOG(10, "MMap = 0x%"PRIxPTR" of %"PRIuS" at %"PRIu64" " | 741 RAW_VLOG(10, "MMap = 0x%"PRIxPTR" of %"PRIuS" at %"PRIu64" " |
590 "prot %d flags %d fd %d offs %"PRId64, | 742 "prot %d flags %d fd %d offs %"PRId64, |
591 reinterpret_cast<uintptr_t>(result), size, | 743 reinterpret_cast<uintptr_t>(result), size, |
592 reinterpret_cast<uint64>(start), prot, flags, fd, | 744 reinterpret_cast<uint64>(start), prot, flags, fd, |
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
643 r != regions_->end(); ++r) { | 795 r != regions_->end(); ++r) { |
644 RAW_LOG(INFO, "Memory region 0x%"PRIxPTR"..0x%"PRIxPTR" " | 796 RAW_LOG(INFO, "Memory region 0x%"PRIxPTR"..0x%"PRIxPTR" " |
645 "from 0x%"PRIxPTR" stack=%d", | 797 "from 0x%"PRIxPTR" stack=%d", |
646 r->start_addr, r->end_addr, r->caller(), r->is_stack); | 798 r->start_addr, r->end_addr, r->caller(), r->is_stack); |
647 RAW_CHECK(previous < r->end_addr, "wow, we messed up the set order"); | 799 RAW_CHECK(previous < r->end_addr, "wow, we messed up the set order"); |
648 // this must be caused by uncontrolled recursive operations on regions_ | 800 // this must be caused by uncontrolled recursive operations on regions_ |
649 previous = r->end_addr; | 801 previous = r->end_addr; |
650 } | 802 } |
651 RAW_LOG(INFO, "End of regions list"); | 803 RAW_LOG(INFO, "End of regions list"); |
652 } | 804 } |
OLD | NEW |