| OLD | NEW |
| 1 /* Copyright (c) 2006, Google Inc. | 1 /* Copyright (c) 2006, Google Inc. |
| 2 * All rights reserved. | 2 * All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 127 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 138 | 138 |
| 139 int MemoryRegionMap::client_count_ = 0; | 139 int MemoryRegionMap::client_count_ = 0; |
| 140 int MemoryRegionMap::max_stack_depth_ = 0; | 140 int MemoryRegionMap::max_stack_depth_ = 0; |
| 141 MemoryRegionMap::RegionSet* MemoryRegionMap::regions_ = NULL; | 141 MemoryRegionMap::RegionSet* MemoryRegionMap::regions_ = NULL; |
| 142 LowLevelAlloc::Arena* MemoryRegionMap::arena_ = NULL; | 142 LowLevelAlloc::Arena* MemoryRegionMap::arena_ = NULL; |
| 143 SpinLock MemoryRegionMap::lock_(SpinLock::LINKER_INITIALIZED); | 143 SpinLock MemoryRegionMap::lock_(SpinLock::LINKER_INITIALIZED); |
| 144 SpinLock MemoryRegionMap::owner_lock_( // ACQUIRED_AFTER(lock_) | 144 SpinLock MemoryRegionMap::owner_lock_( // ACQUIRED_AFTER(lock_) |
| 145 SpinLock::LINKER_INITIALIZED); | 145 SpinLock::LINKER_INITIALIZED); |
| 146 int MemoryRegionMap::recursion_count_ = 0; // GUARDED_BY(owner_lock_) | 146 int MemoryRegionMap::recursion_count_ = 0; // GUARDED_BY(owner_lock_) |
| 147 pthread_t MemoryRegionMap::lock_owner_tid_; // GUARDED_BY(owner_lock_) | 147 pthread_t MemoryRegionMap::lock_owner_tid_; // GUARDED_BY(owner_lock_) |
| 148 int64 MemoryRegionMap::map_size_ = 0; |
| 149 int64 MemoryRegionMap::unmap_size_ = 0; |
| 148 | 150 |
| 149 // ========================================================================= // | 151 // ========================================================================= // |
| 150 | 152 |
| 151 // Simple hook into execution of global object constructors, | 153 // Simple hook into execution of global object constructors, |
| 152 // so that we do not call pthread_self() when it does not yet work. | 154 // so that we do not call pthread_self() when it does not yet work. |
| 153 static bool libpthread_initialized = false; | 155 static bool libpthread_initialized = false; |
| 154 static bool initializer = (libpthread_initialized = true, true); | 156 static bool initializer = (libpthread_initialized = true, true); |
| 155 | 157 |
| 156 static inline bool current_thread_is(pthread_t should_be) { | 158 static inline bool current_thread_is(pthread_t should_be) { |
| 157 // Before main() runs, there's only one thread, so we're always that thread | 159 // Before main() runs, there's only one thread, so we're always that thread |
| (...skipping 297 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 455 ? MallocHook::GetCallerStackTrace(const_cast<void**>(region.call_stack), | 457 ? MallocHook::GetCallerStackTrace(const_cast<void**>(region.call_stack), |
| 456 max_stack_depth_, kStripFrames + 1) | 458 max_stack_depth_, kStripFrames + 1) |
| 457 : 0; | 459 : 0; |
| 458 region.set_call_stack_depth(depth); // record stack info fully | 460 region.set_call_stack_depth(depth); // record stack info fully |
| 459 RAW_VLOG(10, "New global region %p..%p from %p", | 461 RAW_VLOG(10, "New global region %p..%p from %p", |
| 460 reinterpret_cast<void*>(region.start_addr), | 462 reinterpret_cast<void*>(region.start_addr), |
| 461 reinterpret_cast<void*>(region.end_addr), | 463 reinterpret_cast<void*>(region.end_addr), |
| 462 reinterpret_cast<void*>(region.caller())); | 464 reinterpret_cast<void*>(region.caller())); |
| 463 // Note: none of the above allocates memory. | 465 // Note: none of the above allocates memory. |
| 464 Lock(); // recursively lock | 466 Lock(); // recursively lock |
| 467 map_size_ += size; |
| 465 InsertRegionLocked(region); | 468 InsertRegionLocked(region); |
| 466 // This will (eventually) allocate storage for and copy over the stack data | 469 // This will (eventually) allocate storage for and copy over the stack data |
| 467 // from region.call_stack_data_ that is pointed by region.call_stack(). | 470 // from region.call_stack_data_ that is pointed by region.call_stack(). |
| 468 Unlock(); | 471 Unlock(); |
| 469 } | 472 } |
| 470 | 473 |
| 471 void MemoryRegionMap::RecordRegionRemoval(const void* start, size_t size) { | 474 void MemoryRegionMap::RecordRegionRemoval(const void* start, size_t size) { |
| 472 Lock(); | 475 Lock(); |
| 473 if (recursive_insert) { | 476 if (recursive_insert) { |
| 474 // First remove the removed region from saved_regions, if it's | 477 // First remove the removed region from saved_regions, if it's |
| (...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 566 InsertRegionLocked(r); | 569 InsertRegionLocked(r); |
| 567 continue; | 570 continue; |
| 568 } | 571 } |
| 569 ++region; | 572 ++region; |
| 570 } | 573 } |
| 571 RAW_VLOG(12, "Removed region %p..%p; have %"PRIuS" regions", | 574 RAW_VLOG(12, "Removed region %p..%p; have %"PRIuS" regions", |
| 572 reinterpret_cast<void*>(start_addr), | 575 reinterpret_cast<void*>(start_addr), |
| 573 reinterpret_cast<void*>(end_addr), | 576 reinterpret_cast<void*>(end_addr), |
| 574 regions_->size()); | 577 regions_->size()); |
| 575 if (VLOG_IS_ON(12)) LogAllLocked(); | 578 if (VLOG_IS_ON(12)) LogAllLocked(); |
| 579 unmap_size_ += size; |
| 576 Unlock(); | 580 Unlock(); |
| 577 } | 581 } |
| 578 | 582 |
| 579 void MemoryRegionMap::MmapHook(const void* result, | 583 void MemoryRegionMap::MmapHook(const void* result, |
| 580 const void* start, size_t size, | 584 const void* start, size_t size, |
| 581 int prot, int flags, | 585 int prot, int flags, |
| 582 int fd, off_t offset) { | 586 int fd, off_t offset) { |
| 583 // TODO(maxim): replace all 0x%"PRIxS" by %p when RAW_VLOG uses a safe | 587 // TODO(maxim): replace all 0x%"PRIxS" by %p when RAW_VLOG uses a safe |
| 584 // snprintf reimplementation that does not malloc to pretty-print NULL | 588 // snprintf reimplementation that does not malloc to pretty-print NULL |
| 585 RAW_VLOG(10, "MMap = 0x%"PRIxPTR" of %"PRIuS" at %llu " | 589 RAW_VLOG(10, "MMap = 0x%"PRIxPTR" of %"PRIuS" at %llu " |
| (...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 639 r != regions_->end(); ++r) { | 643 r != regions_->end(); ++r) { |
| 640 RAW_LOG(INFO, "Memory region 0x%"PRIxPTR"..0x%"PRIxPTR" " | 644 RAW_LOG(INFO, "Memory region 0x%"PRIxPTR"..0x%"PRIxPTR" " |
| 641 "from 0x%"PRIxPTR" stack=%d", | 645 "from 0x%"PRIxPTR" stack=%d", |
| 642 r->start_addr, r->end_addr, r->caller(), r->is_stack); | 646 r->start_addr, r->end_addr, r->caller(), r->is_stack); |
| 643 RAW_CHECK(previous < r->end_addr, "wow, we messed up the set order"); | 647 RAW_CHECK(previous < r->end_addr, "wow, we messed up the set order"); |
| 644 // this must be caused by uncontrolled recursive operations on regions_ | 648 // this must be caused by uncontrolled recursive operations on regions_ |
| 645 previous = r->end_addr; | 649 previous = r->end_addr; |
| 646 } | 650 } |
| 647 RAW_LOG(INFO, "End of regions list"); | 651 RAW_LOG(INFO, "End of regions list"); |
| 648 } | 652 } |
| OLD | NEW |