Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2006, Google Inc. | 1 // Copyright (c) 2006, Google Inc. |
| 2 // All rights reserved. | 2 // All rights reserved. |
| 3 // | 3 // |
| 4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
| 5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
| 6 // met: | 6 // met: |
| 7 // | 7 // |
| 8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
| 9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
| 10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 59 #include "heap-profile-table.h" | 59 #include "heap-profile-table.h" |
| 60 | 60 |
| 61 #include "base/logging.h" | 61 #include "base/logging.h" |
| 62 #include "raw_printer.h" | 62 #include "raw_printer.h" |
| 63 #include "symbolize.h" | 63 #include "symbolize.h" |
| 64 #include <gperftools/stacktrace.h> | 64 #include <gperftools/stacktrace.h> |
| 65 #include <gperftools/malloc_hook.h> | 65 #include <gperftools/malloc_hook.h> |
| 66 #include "memory_region_map.h" | 66 #include "memory_region_map.h" |
| 67 #include "base/commandlineflags.h" | 67 #include "base/commandlineflags.h" |
| 68 #include "base/logging.h" // for the RawFD I/O commands | 68 #include "base/logging.h" // for the RawFD I/O commands |
| 69 #include "base/low_level_alloc.h" | |
| 69 #include "base/sysinfo.h" | 70 #include "base/sysinfo.h" |
| 70 | 71 |
| 71 using std::sort; | 72 using std::sort; |
| 72 using std::equal; | 73 using std::equal; |
| 73 using std::copy; | 74 using std::copy; |
| 74 using std::string; | 75 using std::string; |
| 75 using std::map; | 76 using std::map; |
| 76 | 77 |
| 77 using tcmalloc::FillProcSelfMaps; // from sysinfo.h | 78 using tcmalloc::FillProcSelfMaps; // from sysinfo.h |
| 78 using tcmalloc::DumpProcSelfMaps; // from sysinfo.h | 79 using tcmalloc::DumpProcSelfMaps; // from sysinfo.h |
| (...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 116 // For sorting Stats or Buckets by in-use space | 117 // For sorting Stats or Buckets by in-use space |
| 117 static bool ByAllocatedSpace(HeapProfileTable::Stats* a, | 118 static bool ByAllocatedSpace(HeapProfileTable::Stats* a, |
| 118 HeapProfileTable::Stats* b) { | 119 HeapProfileTable::Stats* b) { |
| 119 // Return true iff "a" has more allocated space than "b" | 120 // Return true iff "a" has more allocated space than "b" |
| 120 return (a->alloc_size - a->free_size) > (b->alloc_size - b->free_size); | 121 return (a->alloc_size - a->free_size) > (b->alloc_size - b->free_size); |
| 121 } | 122 } |
| 122 | 123 |
| 123 //---------------------------------------------------------------------- | 124 //---------------------------------------------------------------------- |
| 124 | 125 |
| 125 HeapProfileTable::HeapProfileTable(Allocator alloc, DeAllocator dealloc) | 126 HeapProfileTable::HeapProfileTable(Allocator alloc, DeAllocator dealloc) |
| 126 : alloc_(alloc), dealloc_(dealloc) { | 127 : alloc_(alloc), dealloc_(dealloc) { |
|
jar (doing other things)
2012/04/04 17:38:40
nit: One initializer per line (once you've wrapped
Dai Mikurube (NOT FULLTIME)
2012/04/05 05:19:26
Done.
| |
| 127 // Initialize the overall profile stats. | 128 // Initialize the overall profile stats. |
| 128 memset(&total_, 0, sizeof(total_)); | 129 memset(&total_, 0, sizeof(total_)); |
| 129 | 130 |
| 130 // Make the malloc table. | 131 // Make the malloc table. |
| 131 const int alloc_table_bytes = kHashTableSize * sizeof(*alloc_table_); | 132 const int alloc_table_bytes = kHashTableSize * sizeof(*alloc_table_); |
| 132 alloc_table_ = reinterpret_cast<Bucket**>(alloc_(alloc_table_bytes)); | 133 alloc_table_ = reinterpret_cast<Bucket**>(alloc_(alloc_table_bytes)); |
| 133 memset(alloc_table_, 0, alloc_table_bytes); | 134 memset(alloc_table_, 0, alloc_table_bytes); |
| 134 num_alloc_buckets_ = 0; | 135 num_alloc_buckets_ = 0; |
|
jar (doing other things)
2012/04/04 17:38:40
nit: these slots initialized with 0 and NULL shoul
Dai Mikurube (NOT FULLTIME)
2012/04/05 05:19:26
Done.
| |
| 135 | 136 |
| 136 // Initialize the mmap table. | 137 // Initialize the mmap table. |
| 137 mmap_table_ = NULL; | 138 mmap_table_ = NULL; |
| 138 num_available_mmap_buckets_ = 0; | 139 num_available_mmap_buckets_ = 0; |
| 139 | 140 |
| 140 // Make malloc and mmap allocation maps. | 141 // Make malloc and mmap allocation maps. |
| 141 alloc_address_map_ = | 142 alloc_address_map_ = |
| 142 new(alloc_(sizeof(AllocationMap))) AllocationMap(alloc_, dealloc_); | 143 new(alloc_(sizeof(AllocationMap))) AllocationMap(alloc_, dealloc_); |
| 143 mmap_address_map_ = NULL; | 144 mmap_address_map_ = NULL; |
| 144 } | 145 } |
| 145 | 146 |
| 146 HeapProfileTable::~HeapProfileTable() { | 147 HeapProfileTable::~HeapProfileTable() { |
| 147 DeallocateBucketTable(alloc_table_); | 148 DeallocateBucketTable(alloc_table_); |
| 148 alloc_table_ = NULL; | 149 alloc_table_ = NULL; |
| 149 DeallocateBucketTable(mmap_table_); | 150 DeallocateBucketTable(mmap_table_); |
| 150 mmap_table_ = NULL; | 151 mmap_table_ = NULL; |
| 151 DeallocateAllocationMap(alloc_address_map_); | 152 DeallocateAllocationMap(alloc_address_map_); |
| 152 alloc_address_map_ = NULL; | 153 alloc_address_map_ = NULL; |
| 153 DeallocateAllocationMap(mmap_address_map_); | 154 DeallocateAllocationMap(mmap_address_map_); |
|
jar (doing other things)
2012/04/04 17:38:40
This call will still used the default deallocator,
Dai Mikurube (NOT FULLTIME)
2012/04/05 05:19:26
Changed that mmap_address_map_ itself is allocated
| |
| 154 mmap_address_map_ = NULL; | 155 mmap_address_map_ = NULL; |
| 155 } | 156 } |
| 156 | 157 |
| 157 void HeapProfileTable::DeallocateAllocationMap(AllocationMap* allocation) { | 158 void HeapProfileTable::DeallocateAllocationMap(AllocationMap* allocation) { |
| 158 if (allocation != NULL) { | 159 if (allocation != NULL) { |
| 159 alloc_address_map_->~AllocationMap(); | 160 alloc_address_map_->~AllocationMap(); |
| 160 dealloc_(allocation); | 161 dealloc_(allocation); |
| 161 } | 162 } |
| 162 } | 163 } |
| 163 | 164 |
| (...skipping 184 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 348 } | 349 } |
| 349 } | 350 } |
| 350 RAW_DCHECK(n == num_alloc_buckets_ + num_available_mmap_buckets_, ""); | 351 RAW_DCHECK(n == num_alloc_buckets_ + num_available_mmap_buckets_, ""); |
| 351 | 352 |
| 352 sort(list, list + num_alloc_buckets_ + num_available_mmap_buckets_, | 353 sort(list, list + num_alloc_buckets_ + num_available_mmap_buckets_, |
| 353 ByAllocatedSpace); | 354 ByAllocatedSpace); |
| 354 | 355 |
| 355 return list; | 356 return list; |
| 356 } | 357 } |
| 357 | 358 |
| 358 void HeapProfileTable::RefreshMMapData() { | 359 void HeapProfileTable::RefreshMMapData(Allocator mmap_alloc, |
| 360 DeAllocator mmap_dealloc) { | |
| 359 // Make the table | 361 // Make the table |
| 360 static const int mmap_table_bytes = kHashTableSize * sizeof(*mmap_table_); | 362 static const int mmap_table_bytes = kHashTableSize * sizeof(*mmap_table_); |
| 361 if (mmap_table_ == NULL) { | 363 if (mmap_table_ == NULL) { |
| 362 mmap_table_ = reinterpret_cast<Bucket**>(alloc_(mmap_table_bytes)); | 364 mmap_table_ = reinterpret_cast<Bucket**>(alloc_(mmap_table_bytes)); |
| 363 memset(mmap_table_, 0, mmap_table_bytes); | 365 memset(mmap_table_, 0, mmap_table_bytes); |
| 364 } | 366 } |
| 365 num_available_mmap_buckets_ = 0; | 367 num_available_mmap_buckets_ = 0; |
| 366 | 368 |
| 367 ClearMMapData(); | 369 ClearMMapData(mmap_dealloc); |
| 368 mmap_address_map_ = | 370 mmap_address_map_ = new(mmap_alloc(sizeof(AllocationMap))) |
| 369 new(alloc_(sizeof(AllocationMap))) AllocationMap(alloc_, dealloc_); | 371 AllocationMap(mmap_alloc, mmap_dealloc); |
| 370 | 372 |
| 371 MemoryRegionMap::LockHolder l; | 373 MemoryRegionMap::LockHolder l; |
| 372 for (MemoryRegionMap::RegionIterator r = | 374 for (MemoryRegionMap::RegionIterator r = |
| 373 MemoryRegionMap::BeginRegionLocked(); | 375 MemoryRegionMap::BeginRegionLocked(); |
| 374 r != MemoryRegionMap::EndRegionLocked(); ++r) { | 376 r != MemoryRegionMap::EndRegionLocked(); ++r) { |
| 375 Bucket* b = | 377 Bucket* b = |
| 376 GetBucket(r->call_stack_depth, r->call_stack, mmap_table_, NULL); | 378 GetBucket(r->call_stack_depth, r->call_stack, mmap_table_, NULL); |
| 377 if (b->alloc_size == 0) { | 379 if (b->alloc_size == 0) { |
| 378 num_available_mmap_buckets_ += 1; | 380 num_available_mmap_buckets_ += 1; |
| 379 } | 381 } |
| 380 b->allocs += 1; | 382 b->allocs += 1; |
| 381 b->alloc_size += r->end_addr - r->start_addr; | 383 b->alloc_size += r->end_addr - r->start_addr; |
| 382 | 384 |
| 383 AllocValue v; | 385 AllocValue v; |
| 384 v.set_bucket(b); | 386 v.set_bucket(b); |
| 385 v.bytes = r->end_addr - r->start_addr; | 387 v.bytes = r->end_addr - r->start_addr; |
| 386 mmap_address_map_->Insert(reinterpret_cast<const void*>(r->start_addr), v); | 388 mmap_address_map_->Insert(reinterpret_cast<const void*>(r->start_addr), v); |
| 387 } | 389 } |
| 388 } | 390 } |
| 389 | 391 |
| 390 void HeapProfileTable::ClearMMapData() { | 392 void HeapProfileTable::ClearMMapData(DeAllocator mmap_dealloc) { |
| 391 if (mmap_address_map_ != NULL) { | 393 if (mmap_address_map_ != NULL) { |
|
jar (doing other things)
2012/04/04 17:38:40
nit: early return on "== NULL" will mean you won't
Dai Mikurube (NOT FULLTIME)
2012/04/05 05:19:26
Done.
| |
| 392 mmap_address_map_->Iterate(ZeroBucketCountsIterator, this); | 394 mmap_address_map_->Iterate(ZeroBucketCountsIterator, this); |
| 393 mmap_address_map_->~AllocationMap(); | 395 mmap_address_map_->~AllocationMap(); |
| 394 dealloc_(mmap_address_map_); | 396 mmap_dealloc(mmap_address_map_); |
| 395 mmap_address_map_ = NULL; | 397 mmap_address_map_ = NULL; |
| 396 } | 398 } |
| 397 } | 399 } |
| 398 | 400 |
| 399 void HeapProfileTable::IterateOrderedAllocContexts( | 401 void HeapProfileTable::IterateOrderedAllocContexts( |
| 400 AllocContextIterator callback) const { | 402 AllocContextIterator callback) const { |
| 401 Bucket** list = MakeSortedBucketList(); | 403 Bucket** list = MakeSortedBucketList(); |
| 402 AllocContextInfo info; | 404 AllocContextInfo info; |
| 403 for (int i = 0; i < num_alloc_buckets_; ++i) { | 405 for (int i = 0; i < num_alloc_buckets_; ++i) { |
| 404 *static_cast<Stats*>(&info) = *static_cast<Stats*>(list[i]); | 406 *static_cast<Stats*>(&info) = *static_cast<Stats*>(list[i]); |
| (...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 509 RawWrite(fd, kProfileHeader, strlen(kProfileHeader)); | 511 RawWrite(fd, kProfileHeader, strlen(kProfileHeader)); |
| 510 char buf[512]; | 512 char buf[512]; |
| 511 int len = UnparseBucket(total, buf, 0, sizeof(buf), " heapprofile", | 513 int len = UnparseBucket(total, buf, 0, sizeof(buf), " heapprofile", |
| 512 NULL); | 514 NULL); |
| 513 RawWrite(fd, buf, len); | 515 RawWrite(fd, buf, len); |
| 514 const DumpArgs args(fd, NULL); | 516 const DumpArgs args(fd, NULL); |
| 515 allocations->Iterate<const DumpArgs&>(DumpNonLiveIterator, args); | 517 allocations->Iterate<const DumpArgs&>(DumpNonLiveIterator, args); |
| 516 RawWrite(fd, kProcSelfMapsHeader, strlen(kProcSelfMapsHeader)); | 518 RawWrite(fd, kProcSelfMapsHeader, strlen(kProcSelfMapsHeader)); |
| 517 DumpProcSelfMaps(fd); | 519 DumpProcSelfMaps(fd); |
| 518 RawClose(fd); | 520 RawClose(fd); |
| 519 return true; | 521 return true; |
|
jar (doing other things)
2012/04/04 17:38:40
Since both clauses have a return, there is no need
Dai Mikurube (NOT FULLTIME)
2012/04/05 05:19:26
Done.
| |
| 520 } else { | 522 } else { |
| 521 RAW_LOG(ERROR, "Failed dumping filtered heap profile to %s", file_name); | 523 RAW_LOG(ERROR, "Failed dumping filtered heap profile to %s", file_name); |
| 522 return false; | 524 return false; |
| 523 } | 525 } |
| 524 } | 526 } |
| 525 | 527 |
| 526 void HeapProfileTable::CleanupOldProfiles(const char* prefix) { | 528 void HeapProfileTable::CleanupOldProfiles(const char* prefix) { |
| 527 if (!FLAGS_cleanup_old_heap_profiles) | 529 if (!FLAGS_cleanup_old_heap_profiles) |
| 528 return; | 530 return; |
| 529 char buf[1000]; | 531 char buf[1000]; |
| (...skipping 160 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 690 char* unused) { | 692 char* unused) { |
| 691 // Perhaps also log the allocation stack trace (unsymbolized) | 693 // Perhaps also log the allocation stack trace (unsymbolized) |
| 692 // on this line in case somebody finds it useful. | 694 // on this line in case somebody finds it useful. |
| 693 RAW_LOG(ERROR, "leaked %"PRIuS" byte object %p", v->bytes, ptr); | 695 RAW_LOG(ERROR, "leaked %"PRIuS" byte object %p", v->bytes, ptr); |
| 694 } | 696 } |
| 695 | 697 |
| 696 void HeapProfileTable::Snapshot::ReportIndividualObjects() { | 698 void HeapProfileTable::Snapshot::ReportIndividualObjects() { |
| 697 char unused; | 699 char unused; |
| 698 map_.Iterate(ReportObject, &unused); | 700 map_.Iterate(ReportObject, &unused); |
| 699 } | 701 } |
| OLD | NEW |