Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(103)

Unified Diff: third_party/tcmalloc/chromium/src/memory_region_map.cc

Issue 12388070: Count m(un)map for each stacktrace in MemoryRegionMap instead of HeapProfileTable. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: addressed willchan's comments Created 7 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « third_party/tcmalloc/chromium/src/memory_region_map.h ('k') | no next file » | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: third_party/tcmalloc/chromium/src/memory_region_map.cc
diff --git a/third_party/tcmalloc/chromium/src/memory_region_map.cc b/third_party/tcmalloc/chromium/src/memory_region_map.cc
index 1a8117248055988d996bd8110e9e18c3a66024db..dcc73c4ac6dc0307c38b1f094f426dfdf7c434a5 100644
--- a/third_party/tcmalloc/chromium/src/memory_region_map.cc
+++ b/third_party/tcmalloc/chromium/src/memory_region_map.cc
@@ -84,9 +84,10 @@
// which (sometimes) causes mmap, which calls our hook, and so on.
// We do this as follows: on a recursive call of MemoryRegionMap's
// mmap/sbrk/mremap hook we record the data about the allocation in a
-// static fixed-sized stack (saved_regions), when the recursion unwinds
-// but before returning from the outer hook call we unwind this stack and
-// move the data from saved_regions to its permanent place in the RegionSet,
+// static fixed-sized stack (saved_regions and saved_buckets), when the
+// recursion unwinds but before returning from the outer hook call we unwind
+// this stack and move the data from saved_regions and saved_buckets to its
+// permanent place in the RegionSet and "bucket_table" respectively,
// which can cause more allocations and mmap-s and recursion and unwinding,
// but the whole process ends eventually due to the fact that for the small
// allocations we are doing LowLevelAlloc reuses one mmap call and parcels out
@@ -147,6 +148,13 @@ int MemoryRegionMap::recursion_count_ = 0; // GUARDED_BY(owner_lock_)
pthread_t MemoryRegionMap::lock_owner_tid_; // GUARDED_BY(owner_lock_)
int64 MemoryRegionMap::map_size_ = 0;
int64 MemoryRegionMap::unmap_size_ = 0;
+HeapProfileBucket** MemoryRegionMap::bucket_table_ = NULL; // GUARDED_BY(lock_)
+int MemoryRegionMap::num_buckets_ = 0; // GUARDED_BY(lock_)
+int MemoryRegionMap::saved_buckets_count_ = 0; // GUARDED_BY(lock_)
+HeapProfileBucket MemoryRegionMap::saved_buckets_[20]; // GUARDED_BY(lock_)
+
+// GUARDED_BY(lock_)
+const void* MemoryRegionMap::saved_buckets_keys_[20][kMaxStackDepth];
// ========================================================================= //
@@ -182,7 +190,7 @@ static MemoryRegionMap::RegionSetRep regions_rep;
// (or rather should we *not* use regions_ to record a hooked mmap).
static bool recursive_insert = false;
-void MemoryRegionMap::Init(int max_stack_depth) {
+void MemoryRegionMap::Init(int max_stack_depth, bool use_buckets) {
RAW_VLOG(10, "MemoryRegionMap Init");
RAW_CHECK(max_stack_depth >= 0, "");
// Make sure we don't overflow the memory in region stacks:
@@ -214,6 +222,15 @@ void MemoryRegionMap::Init(int max_stack_depth) {
// Can't instead use HandleSavedRegionsLocked(&DoInsertRegionLocked) before
// recursive_insert = false; as InsertRegionLocked will also construct
// regions_ on demand for us.
+ if (use_buckets) {
+ const int table_bytes = kHashTableSize * sizeof(*bucket_table_);
+ recursive_insert = true;
+ bucket_table_ = static_cast<HeapProfileBucket**>(
+ MyAllocator::Allocate(table_bytes));
+ recursive_insert = false;
+ memset(bucket_table_, 0, table_bytes);
+ num_buckets_ = 0;
+ }
Unlock();
RAW_VLOG(10, "MemoryRegionMap Init done");
}
@@ -228,6 +245,19 @@ bool MemoryRegionMap::Shutdown() {
RAW_VLOG(10, "MemoryRegionMap Shutdown decrement done");
return true;
}
+ if (bucket_table_ != NULL) {
+ for (int i = 0; i < kHashTableSize; i++) {
+ for (HeapProfileBucket* curr = bucket_table_[i]; curr != 0; /**/) {
+ HeapProfileBucket* bucket = curr;
+ curr = curr->next;
+ MyAllocator::Free(bucket->stack, 0);
+ MyAllocator::Free(bucket, 0);
+ }
+ }
+ MyAllocator::Free(bucket_table_, 0);
+ num_buckets_ = 0;
+ bucket_table_ = NULL;
+ }
RAW_CHECK(MallocHook::RemoveMmapHook(&MmapHook), "");
RAW_CHECK(MallocHook::RemoveMremapHook(&MremapHook), "");
RAW_CHECK(MallocHook::RemoveSbrkHook(&SbrkHook), "");
@@ -245,6 +275,11 @@ bool MemoryRegionMap::Shutdown() {
return deleted_arena;
}
+bool MemoryRegionMap::IsRecordingLocked() {
+ RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
+ return client_count_ > 0;
+}
+
// Invariants (once libpthread_initialized is true):
// * While lock_ is not held, recursion_count_ is 0 (and
// lock_owner_tid_ is the previous owner, but we don't rely on
@@ -336,6 +371,62 @@ bool MemoryRegionMap::FindAndMarkStackRegion(uintptr_t stack_top,
return region != NULL;
}
+HeapProfileBucket* MemoryRegionMap::GetBucket(int depth,
+ const void* const key[]) {
+ RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
+ // Make hash-value
+ uintptr_t hash = 0;
+ for (int i = 0; i < depth; i++) {
+ hash += reinterpret_cast<uintptr_t>(key[i]);
+ hash += hash << 10;
+ hash ^= hash >> 6;
+ }
+ hash += hash << 3;
+ hash ^= hash >> 11;
+
+ // Lookup stack trace in table
+ unsigned int hash_index = (static_cast<unsigned int>(hash)) % kHashTableSize;
+ for (HeapProfileBucket* bucket = bucket_table_[hash_index];
+ bucket != 0;
+ bucket = bucket->next) {
+ if ((bucket->hash == hash) && (bucket->depth == depth) &&
+ std::equal(key, key + depth, bucket->stack)) {
+ return bucket;
+ }
+ }
+
+ // Create new bucket
+ const size_t key_size = sizeof(key[0]) * depth;
+ HeapProfileBucket* bucket;
+ if (recursive_insert) { // recursion: save in saved_buckets_
+ const void** key_copy = saved_buckets_keys_[saved_buckets_count_];
+ std::copy(key, key + depth, key_copy);
+ bucket = &saved_buckets_[saved_buckets_count_];
+ memset(bucket, 0, sizeof(*bucket));
+ ++saved_buckets_count_;
+ bucket->stack = key_copy;
+ bucket->next = NULL;
+ } else {
+ recursive_insert = true;
+ const void** key_copy = static_cast<const void**>(
+ MyAllocator::Allocate(key_size));
+ recursive_insert = false;
+ std::copy(key, key + depth, key_copy);
+ recursive_insert = true;
+ bucket = static_cast<HeapProfileBucket*>(
+ MyAllocator::Allocate(sizeof(HeapProfileBucket)));
+ recursive_insert = false;
+ memset(bucket, 0, sizeof(*bucket));
+ bucket->stack = key_copy;
+ bucket->next = bucket_table_[hash_index];
+ }
+ bucket->hash = hash;
+ bucket->depth = depth;
+ bucket_table_[hash_index] = bucket;
+ ++num_buckets_;
+ return bucket;
+}
+
MemoryRegionMap::RegionIterator MemoryRegionMap::BeginRegionLocked() {
RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
RAW_CHECK(regions_ != NULL, "");
@@ -404,6 +495,44 @@ inline void MemoryRegionMap::HandleSavedRegionsLocked(
}
}
+void MemoryRegionMap::RestoreSavedBucketsLocked() {
+ RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
+ while (saved_buckets_count_ > 0) {
+ HeapProfileBucket bucket = saved_buckets_[--saved_buckets_count_];
+ unsigned int hash_index =
+ static_cast<unsigned int>(bucket.hash) % kHashTableSize;
+ bool is_found = false;
+ for (HeapProfileBucket* curr = bucket_table_[hash_index];
+ curr != 0;
+ curr = curr->next) {
+ if ((curr->hash == bucket.hash) && (curr->depth == bucket.depth) &&
+ std::equal(bucket.stack, bucket.stack + bucket.depth, curr->stack)) {
+ curr->allocs += bucket.allocs;
+ curr->alloc_size += bucket.alloc_size;
+ curr->frees += bucket.frees;
+ curr->free_size += bucket.free_size;
+ is_found = true;
+ break;
+ }
+ }
+ if (is_found) continue;
+
+ const size_t key_size = sizeof(bucket.stack[0]) * bucket.depth;
+ const void** key_copy = static_cast<const void**>(
+ MyAllocator::Allocate(key_size));
+ std::copy(bucket.stack, bucket.stack + bucket.depth, key_copy);
+ HeapProfileBucket* new_bucket = static_cast<HeapProfileBucket*>(
+ MyAllocator::Allocate(sizeof(HeapProfileBucket)));
+ memset(new_bucket, 0, sizeof(*new_bucket));
+ new_bucket->hash = bucket.hash;
+ new_bucket->depth = bucket.depth;
+ new_bucket->stack = key_copy;
+ new_bucket->next = bucket_table_[hash_index];
+ bucket_table_[hash_index] = new_bucket;
+ ++num_buckets_;
+ }
+}
+
inline void MemoryRegionMap::InsertRegionLocked(const Region& region) {
RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
// We can be called recursively, because RegionSet constructor
@@ -468,6 +597,16 @@ void MemoryRegionMap::RecordRegionAddition(const void* start, size_t size) {
InsertRegionLocked(region);
// This will (eventually) allocate storage for and copy over the stack data
// from region.call_stack_data_ that is pointed by region.call_stack().
+ if (bucket_table_ != NULL) {
+ HeapProfileBucket* b = GetBucket(depth, region.call_stack);
+ ++b->allocs;
+ b->alloc_size += size;
+ if (!recursive_insert) {
+ recursive_insert = true;
+ RestoreSavedBucketsLocked();
+ recursive_insert = false;
+ }
+ }
Unlock();
}
@@ -486,6 +625,7 @@ void MemoryRegionMap::RecordRegionRemoval(const void* start, size_t size) {
Region& r = saved_regions[i];
if (r.start_addr == start_addr && r.end_addr == end_addr) {
// An exact match, so it's safe to remove.
+ RecordRegionRemovalInBucket(r.call_stack_depth, r.call_stack, size);
--saved_regions_count;
--put_pos;
RAW_VLOG(10, ("Insta-Removing saved region %p..%p; "
@@ -530,6 +670,8 @@ void MemoryRegionMap::RecordRegionRemoval(const void* start, size_t size) {
RAW_VLOG(12, "Deleting region %p..%p",
reinterpret_cast<void*>(region->start_addr),
reinterpret_cast<void*>(region->end_addr));
+ RecordRegionRemovalInBucket(region->call_stack_depth, region->call_stack,
+ region->end_addr - region->start_addr);
RegionSet::iterator d = region;
++region;
regions_->erase(d);
@@ -539,6 +681,8 @@ void MemoryRegionMap::RecordRegionRemoval(const void* start, size_t size) {
RAW_VLOG(12, "Splitting region %p..%p in two",
reinterpret_cast<void*>(region->start_addr),
reinterpret_cast<void*>(region->end_addr));
+ RecordRegionRemovalInBucket(region->call_stack_depth, region->call_stack,
+ end_addr - start_addr);
// Make another region for the start portion:
// The new region has to be the start portion because we can't
// just modify region->end_addr as it's the sorting key.
@@ -552,12 +696,16 @@ void MemoryRegionMap::RecordRegionRemoval(const void* start, size_t size) {
RAW_VLOG(12, "Start-chopping region %p..%p",
reinterpret_cast<void*>(region->start_addr),
reinterpret_cast<void*>(region->end_addr));
+ RecordRegionRemovalInBucket(region->call_stack_depth, region->call_stack,
+ end_addr - region->start_addr);
const_cast<Region&>(*region).set_start_addr(end_addr);
} else if (start_addr > region->start_addr &&
start_addr < region->end_addr) { // cut from end
RAW_VLOG(12, "End-chopping region %p..%p",
reinterpret_cast<void*>(region->start_addr),
reinterpret_cast<void*>(region->end_addr));
+ RecordRegionRemovalInBucket(region->call_stack_depth, region->call_stack,
+ region->end_addr - start_addr);
// Can't just modify region->end_addr (it's the sorting key):
Region r = *region;
r.set_end_addr(start_addr);
@@ -580,6 +728,16 @@ void MemoryRegionMap::RecordRegionRemoval(const void* start, size_t size) {
Unlock();
}
+void MemoryRegionMap::RecordRegionRemovalInBucket(int depth,
+ const void* const stack[],
+ size_t size) {
+ RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
+ if (bucket_table_ == NULL) return;
+ HeapProfileBucket* b = GetBucket(depth, stack);
+ ++b->frees;
+ b->free_size += size;
+}
+
void MemoryRegionMap::MmapHook(const void* result,
const void* start, size_t size,
int prot, int flags,
« no previous file with comments | « third_party/tcmalloc/chromium/src/memory_region_map.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698