Index: third_party/tcmalloc/chromium/src/heap-profile-table.cc |
diff --git a/third_party/tcmalloc/chromium/src/heap-profile-table.cc b/third_party/tcmalloc/chromium/src/heap-profile-table.cc |
index 0b02d4cd9304ed1f0306f61430189cf88d7b4942..68ec5735026f8feced50e6612c760ced6915b809 100644 |
--- a/third_party/tcmalloc/chromium/src/heap-profile-table.cc |
+++ b/third_party/tcmalloc/chromium/src/heap-profile-table.cc |
@@ -102,8 +102,7 @@ const char HeapProfileTable::kFileExt[] = ".heap"; |
//---------------------------------------------------------------------- |
-// Size for alloc_table_ and mmap_table_. |
-static const int kHashTableSize = 179999; |
+static const int kHashTableSize = 179999; // Size for bucket_table_. |
/*static*/ const int HeapProfileTable::kMaxStackDepth; |
//---------------------------------------------------------------------- |
@@ -125,61 +124,50 @@ static bool ByAllocatedSpace(HeapProfileTable::Stats* a, |
//---------------------------------------------------------------------- |
-HeapProfileTable::HeapProfileTable(Allocator alloc, DeAllocator dealloc) |
+HeapProfileTable::HeapProfileTable(Allocator alloc, |
+ DeAllocator dealloc, |
+ bool profile_mmap) |
: alloc_(alloc), |
dealloc_(dealloc), |
- num_alloc_buckets_(0), |
- mmap_table_(NULL), |
- num_available_mmap_buckets_(0), |
- mmap_address_map_(NULL) { |
- // Initialize the overall profile stats. |
- memset(&total_, 0, sizeof(total_)); |
- |
- // Make the malloc table. |
- const int alloc_table_bytes = kHashTableSize * sizeof(*alloc_table_); |
- alloc_table_ = reinterpret_cast<Bucket**>(alloc_(alloc_table_bytes)); |
- memset(alloc_table_, 0, alloc_table_bytes); |
- |
- // Make malloc and mmap allocation maps. |
- alloc_address_map_ = |
+ bucket_table_(NULL), |
+ profile_mmap_(profile_mmap), |
+ num_buckets_(0), |
+ address_map_(NULL) { |
+ // Make a hash table for buckets. |
+ const int table_bytes = kHashTableSize * sizeof(*bucket_table_); |
+ bucket_table_ = static_cast<Bucket**>(alloc_(table_bytes)); |
+ memset(bucket_table_, 0, table_bytes); |
+ |
+ // Make an allocation map. |
+ address_map_ = |
new(alloc_(sizeof(AllocationMap))) AllocationMap(alloc_, dealloc_); |
-} |
-HeapProfileTable::~HeapProfileTable() { |
- DeallocateBucketTable(alloc_table_); |
- alloc_table_ = NULL; |
- DeallocateBucketTable(mmap_table_); |
- mmap_table_ = NULL; |
- DeallocateAllocationMap(alloc_address_map_); |
- alloc_address_map_ = NULL; |
- DeallocateAllocationMap(mmap_address_map_); |
- mmap_address_map_ = NULL; |
-} |
- |
-void HeapProfileTable::DeallocateAllocationMap(AllocationMap* allocation) { |
- if (allocation != NULL) { |
- alloc_address_map_->~AllocationMap(); |
- dealloc_(allocation); |
- } |
+ // Initialize. |
+ memset(&total_, 0, sizeof(total_)); |
+ num_buckets_ = 0; |
} |
-void HeapProfileTable::DeallocateBucketTable(Bucket** table) { |
- if (table != NULL) { |
- for (int b = 0; b < kHashTableSize; b++) { |
- for (Bucket* x = table[b]; x != 0; /**/) { |
- Bucket* b = x; |
- x = x->next; |
- dealloc_(b->stack); |
- dealloc_(b); |
- } |
+HeapProfileTable::~HeapProfileTable() { |
+ // Free the allocation map. |
+ address_map_->~AllocationMap(); |
+ dealloc_(address_map_); |
+ address_map_ = NULL; |
+ |
+ // Free the hash table. |
+ for (int i = 0; i < kHashTableSize; i++) { |
+ for (Bucket* curr = bucket_table_[i]; curr != 0; /**/) { |
+ Bucket* bucket = curr; |
+ curr = curr->next; |
+ dealloc_(bucket->stack); |
+ dealloc_(bucket); |
} |
- dealloc_(table); |
} |
+ dealloc_(bucket_table_); |
+ bucket_table_ = NULL; |
} |
-HeapProfileTable::Bucket* HeapProfileTable::GetBucket( |
- int depth, const void* const key[], Bucket** table, |
- int* bucket_count) { |
+HeapProfileTable::Bucket* HeapProfileTable::GetBucket(int depth, |
+ const void* const key[]) { |
// Make hash-value |
uintptr_t h = 0; |
for (int i = 0; i < depth; i++) { |
@@ -192,7 +180,7 @@ HeapProfileTable::Bucket* HeapProfileTable::GetBucket( |
// Lookup stack trace in table |
unsigned int buck = ((unsigned int) h) % kHashTableSize; |
- for (Bucket* b = table[buck]; b != 0; b = b->next) { |
+ for (Bucket* b = bucket_table_[buck]; b != 0; b = b->next) { |
if ((b->hash == h) && |
(b->depth == depth) && |
equal(key, key + depth, b->stack)) { |
@@ -209,11 +197,9 @@ HeapProfileTable::Bucket* HeapProfileTable::GetBucket( |
b->hash = h; |
b->depth = depth; |
b->stack = kcopy; |
- b->next = table[buck]; |
- table[buck] = b; |
- if (bucket_count != NULL) { |
- ++(*bucket_count); |
- } |
+ b->next = bucket_table_[buck]; |
+ bucket_table_[buck] = b; |
+ num_buckets_++; |
return b; |
} |
@@ -226,8 +212,7 @@ int HeapProfileTable::GetCallerStackTrace( |
void HeapProfileTable::RecordAlloc( |
const void* ptr, size_t bytes, int stack_depth, |
const void* const call_stack[]) { |
- Bucket* b = GetBucket(stack_depth, call_stack, alloc_table_, |
- &num_alloc_buckets_); |
+ Bucket* b = GetBucket(stack_depth, call_stack); |
b->allocs++; |
b->alloc_size += bytes; |
total_.allocs++; |
@@ -236,12 +221,12 @@ void HeapProfileTable::RecordAlloc( |
AllocValue v; |
v.set_bucket(b); // also did set_live(false); set_ignore(false) |
v.bytes = bytes; |
- alloc_address_map_->Insert(ptr, v); |
+ address_map_->Insert(ptr, v); |
} |
void HeapProfileTable::RecordFree(const void* ptr) { |
AllocValue v; |
- if (alloc_address_map_->FindAndRemove(ptr, &v)) { |
+ if (address_map_->FindAndRemove(ptr, &v)) { |
Bucket* b = v.bucket(); |
b->frees++; |
b->free_size += v.bytes; |
@@ -251,14 +236,14 @@ void HeapProfileTable::RecordFree(const void* ptr) { |
} |
bool HeapProfileTable::FindAlloc(const void* ptr, size_t* object_size) const { |
- const AllocValue* alloc_value = alloc_address_map_->Find(ptr); |
+ const AllocValue* alloc_value = address_map_->Find(ptr); |
if (alloc_value != NULL) *object_size = alloc_value->bytes; |
return alloc_value != NULL; |
} |
bool HeapProfileTable::FindAllocDetails(const void* ptr, |
AllocInfo* info) const { |
- const AllocValue* alloc_value = alloc_address_map_->Find(ptr); |
+ const AllocValue* alloc_value = address_map_->Find(ptr); |
if (alloc_value != NULL) { |
info->object_size = alloc_value->bytes; |
info->call_stack = alloc_value->bucket()->stack; |
@@ -272,13 +257,13 @@ bool HeapProfileTable::FindInsideAlloc(const void* ptr, |
const void** object_ptr, |
size_t* object_size) const { |
const AllocValue* alloc_value = |
- alloc_address_map_->FindInside(&AllocValueSize, max_size, ptr, object_ptr); |
+ address_map_->FindInside(&AllocValueSize, max_size, ptr, object_ptr); |
if (alloc_value != NULL) *object_size = alloc_value->bytes; |
return alloc_value != NULL; |
} |
bool HeapProfileTable::MarkAsLive(const void* ptr) { |
- AllocValue* alloc = alloc_address_map_->FindMutable(ptr); |
+ AllocValue* alloc = address_map_->FindMutable(ptr); |
if (alloc && !alloc->live()) { |
alloc->set_live(true); |
return true; |
@@ -287,7 +272,7 @@ bool HeapProfileTable::MarkAsLive(const void* ptr) { |
} |
void HeapProfileTable::MarkAsIgnored(const void* ptr) { |
- AllocValue* alloc = alloc_address_map_->FindMutable(ptr); |
+ AllocValue* alloc = address_map_->FindMutable(ptr); |
if (alloc) { |
alloc->set_ignore(true); |
} |
@@ -296,18 +281,18 @@ void HeapProfileTable::MarkAsIgnored(const void* ptr) { |
void HeapProfileTable::IterateAllocationAddresses(AddressIterator f, |
void* data) { |
const AllocationAddressIteratorArgs args(f, data); |
- alloc_address_map_->Iterate<const AllocationAddressIteratorArgs&>( |
+ address_map_->Iterate<const AllocationAddressIteratorArgs&>( |
AllocationAddressesIterator, args); |
} |
void HeapProfileTable::MarkCurrentAllocations(AllocationMark mark) { |
const MarkArgs args(mark, true); |
- alloc_address_map_->Iterate<const MarkArgs&>(MarkIterator, args); |
+ address_map_->Iterate<const MarkArgs&>(MarkIterator, args); |
} |
void HeapProfileTable::MarkUnmarkedAllocations(AllocationMark mark) { |
const MarkArgs args(mark, true); |
- alloc_address_map_->Iterate<const MarkArgs&>(MarkIterator, args); |
+ address_map_->Iterate<const MarkArgs&>(MarkIterator, args); |
} |
// We'd be happier using snprintfer, but we don't to reduce dependencies. |
@@ -345,77 +330,21 @@ int HeapProfileTable::UnparseBucket(const Bucket& b, |
HeapProfileTable::Bucket** |
HeapProfileTable::MakeSortedBucketList() const { |
- Bucket** list = reinterpret_cast<Bucket**>(alloc_(sizeof(Bucket) * |
- (num_alloc_buckets_ + num_available_mmap_buckets_))); |
- |
- RAW_DCHECK(mmap_table_ != NULL || num_available_mmap_buckets_ == 0, ""); |
- |
- int n = 0; |
+ Bucket** list = static_cast<Bucket**>(alloc_(sizeof(Bucket) * num_buckets_)); |
- for (int b = 0; b < kHashTableSize; b++) { |
- for (Bucket* x = alloc_table_[b]; x != 0; x = x->next) { |
- list[n++] = x; |
+ int bucket_count = 0; |
+ for (int i = 0; i < kHashTableSize; i++) { |
+ for (Bucket* curr = bucket_table_[i]; curr != 0; curr = curr->next) { |
+ list[bucket_count++] = curr; |
} |
} |
- RAW_DCHECK(n == num_alloc_buckets_, ""); |
+ RAW_DCHECK(bucket_count == num_buckets_, ""); |
- if (mmap_table_ != NULL) { |
- for (int b = 0; b < kHashTableSize; b++) { |
- for (Bucket* x = mmap_table_[b]; x != 0; x = x->next) { |
- list[n++] = x; |
- } |
- } |
- } |
- RAW_DCHECK(n == num_alloc_buckets_ + num_available_mmap_buckets_, ""); |
- |
- sort(list, list + num_alloc_buckets_ + num_available_mmap_buckets_, |
- ByAllocatedSpace); |
+ sort(list, list + num_buckets_, ByAllocatedSpace); |
return list; |
} |
-void HeapProfileTable::RefreshMMapData(Allocator mmap_alloc, |
- DeAllocator mmap_dealloc) { |
- // Make the table |
- static const int mmap_table_bytes = kHashTableSize * sizeof(*mmap_table_); |
- if (mmap_table_ == NULL) { |
- mmap_table_ = reinterpret_cast<Bucket**>(alloc_(mmap_table_bytes)); |
- memset(mmap_table_, 0, mmap_table_bytes); |
- } |
- num_available_mmap_buckets_ = 0; |
- |
- ClearMMapData(); |
- mmap_address_map_ = new(alloc_(sizeof(AllocationMap))) |
- AllocationMap(mmap_alloc, mmap_dealloc); |
- |
- MemoryRegionMap::LockHolder l; |
- for (MemoryRegionMap::RegionIterator r = |
- MemoryRegionMap::BeginRegionLocked(); |
- r != MemoryRegionMap::EndRegionLocked(); ++r) { |
- Bucket* b = |
- GetBucket(r->call_stack_depth, r->call_stack, mmap_table_, NULL); |
- if (b->alloc_size == 0) { |
- num_available_mmap_buckets_ += 1; |
- } |
- b->allocs += 1; |
- b->alloc_size += r->end_addr - r->start_addr; |
- |
- AllocValue v; |
- v.set_bucket(b); |
- v.bytes = r->end_addr - r->start_addr; |
- mmap_address_map_->Insert(reinterpret_cast<const void*>(r->start_addr), v); |
- } |
-} |
- |
-void HeapProfileTable::ClearMMapData() { |
- if (mmap_address_map_ == NULL) return; |
- |
- mmap_address_map_->Iterate(ZeroBucketCountsIterator, this); |
- mmap_address_map_->~AllocationMap(); |
- dealloc_(mmap_address_map_); |
- mmap_address_map_ = NULL; |
-} |
- |
void HeapProfileTable::DumpMarkedObjects(AllocationMark mark, |
const char* file_name) { |
RawFD fd = RawOpenForWriting(file_name); |
@@ -424,7 +353,7 @@ void HeapProfileTable::DumpMarkedObjects(AllocationMark mark, |
return; |
} |
const DumpMarkedArgs args(fd, mark); |
- alloc_address_map_->Iterate<const DumpMarkedArgs&>(DumpMarkedIterator, args); |
+ address_map_->Iterate<const DumpMarkedArgs&>(DumpMarkedIterator, args); |
RawClose(fd); |
} |
@@ -439,7 +368,7 @@ void HeapProfileTable::DumpTypeStatistics(const char* file_name) const { |
AddressMap<TypeCount>* type_size_map; |
type_size_map = new(alloc_(sizeof(AddressMap<TypeCount>))) |
AddressMap<TypeCount>(alloc_, dealloc_); |
- alloc_address_map_->Iterate(TallyTypesItererator, type_size_map); |
+ address_map_->Iterate(TallyTypesItererator, type_size_map); |
RawWrite(fd, kTypeProfileStatsHeader, strlen(kTypeProfileStatsHeader)); |
const DumpArgs args(fd, NULL); |
@@ -455,7 +384,7 @@ void HeapProfileTable::IterateOrderedAllocContexts( |
AllocContextIterator callback) const { |
Bucket** list = MakeSortedBucketList(); |
AllocContextInfo info; |
- for (int i = 0; i < num_alloc_buckets_; ++i) { |
+ for (int i = 0; i < num_buckets_; ++i) { |
*static_cast<Stats*>(&info) = *static_cast<Stats*>(list[i]); |
info.stack_depth = list[i]->depth; |
info.call_stack = list[i]->stack; |
@@ -487,14 +416,17 @@ int HeapProfileTable::FillOrderedProfile(char buf[], int size) const { |
memset(&stats, 0, sizeof(stats)); |
int bucket_length = snprintf(buf, size, "%s", kProfileHeader); |
if (bucket_length < 0 || bucket_length >= size) return 0; |
- Bucket total_with_mmap(total_); |
- if (mmap_table_ != NULL) { |
- total_with_mmap.alloc_size += MemoryRegionMap::MapSize(); |
- total_with_mmap.free_size += MemoryRegionMap::UnmapSize(); |
- } |
- bucket_length = UnparseBucket(total_with_mmap, buf, bucket_length, size, |
+ bucket_length = UnparseBucket(total_, buf, bucket_length, size, |
" heapprofile", &stats); |
- for (int i = 0; i < num_alloc_buckets_; i++) { |
+ |
+ // Dump the mmap list first. |
+ if (profile_mmap_) { |
+ BufferArgs buffer(buf, bucket_length, size); |
+ MemoryRegionMap::IterateBuckets<BufferArgs*>(DumpBucketIterator, &buffer); |
+ bucket_length = buffer.buflen; |
+ } |
+ |
+ for (int i = 0; i < num_buckets_; i++) { |
bucket_length = UnparseBucket(*list[i], buf, bucket_length, size, "", |
&stats); |
} |
@@ -508,6 +440,13 @@ int HeapProfileTable::FillOrderedProfile(char buf[], int size) const { |
return bucket_length + map_length; |
} |
+// static |
+void HeapProfileTable::DumpBucketIterator(const Bucket* bucket, |
+ BufferArgs* args) { |
+ args->buflen = UnparseBucket(*bucket, args->buf, args->buflen, args->bufsize, |
+ "", NULL); |
+} |
+ |
#if defined(TYPE_PROFILING) |
// static |
void HeapProfileTable::TallyTypesItererator( |
@@ -598,17 +537,6 @@ void HeapProfileTable::MarkIterator(const void* ptr, AllocValue* v, |
v->set_mark(args.mark); |
} |
-inline void HeapProfileTable::ZeroBucketCountsIterator( |
- const void* ptr, AllocValue* v, HeapProfileTable* heap_profile) { |
- Bucket* b = v->bucket(); |
- if (b != NULL) { |
- b->allocs = 0; |
- b->alloc_size = 0; |
- b->free_size = 0; |
- b->frees = 0; |
- } |
-} |
- |
// Callback from NonLiveSnapshot; adds entry to arg->dest |
// if not the entry is not live and is not present in arg->base. |
void HeapProfileTable::AddIfNonLive(const void* ptr, AllocValue* v, |
@@ -675,7 +603,7 @@ void HeapProfileTable::CleanupOldProfiles(const char* prefix) { |
HeapProfileTable::Snapshot* HeapProfileTable::TakeSnapshot() { |
Snapshot* s = new (alloc_(sizeof(Snapshot))) Snapshot(alloc_, dealloc_); |
- alloc_address_map_->Iterate(AddToSnapshot, s); |
+ address_map_->Iterate(AddToSnapshot, s); |
return s; |
} |
@@ -700,7 +628,7 @@ HeapProfileTable::Snapshot* HeapProfileTable::NonLiveSnapshot( |
AddNonLiveArgs args; |
args.dest = s; |
args.base = base; |
- alloc_address_map_->Iterate<AddNonLiveArgs*>(AddIfNonLive, &args); |
+ address_map_->Iterate<AddNonLiveArgs*>(AddIfNonLive, &args); |
RAW_VLOG(2, "NonLiveSnapshot output: %d %d\n", |
int(s->total_.allocs - s->total_.frees), |
int(s->total_.alloc_size - s->total_.free_size)); |