Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(6)

Side by Side Diff: third_party/tcmalloc/chromium/src/heap-profile-table.cc

Issue 9963095: Reserve a dedicated arena for every construction of mmap_address_map. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: just rebased. Created 8 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2006, Google Inc. 1 // Copyright (c) 2006, Google Inc.
2 // All rights reserved. 2 // All rights reserved.
3 // 3 //
4 // Redistribution and use in source and binary forms, with or without 4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are 5 // modification, are permitted provided that the following conditions are
6 // met: 6 // met:
7 // 7 //
8 // * Redistributions of source code must retain the above copyright 8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer. 9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above 10 // * Redistributions in binary form must reproduce the above
(...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after
116 // For sorting Stats or Buckets by in-use space 116 // For sorting Stats or Buckets by in-use space
117 static bool ByAllocatedSpace(HeapProfileTable::Stats* a, 117 static bool ByAllocatedSpace(HeapProfileTable::Stats* a,
118 HeapProfileTable::Stats* b) { 118 HeapProfileTable::Stats* b) {
119 // Return true iff "a" has more allocated space than "b" 119 // Return true iff "a" has more allocated space than "b"
120 return (a->alloc_size - a->free_size) > (b->alloc_size - b->free_size); 120 return (a->alloc_size - a->free_size) > (b->alloc_size - b->free_size);
121 } 121 }
122 122
123 //---------------------------------------------------------------------- 123 //----------------------------------------------------------------------
124 124
125 HeapProfileTable::HeapProfileTable(Allocator alloc, DeAllocator dealloc) 125 HeapProfileTable::HeapProfileTable(Allocator alloc, DeAllocator dealloc)
126 : alloc_(alloc), dealloc_(dealloc) { 126 : alloc_(alloc),
127 dealloc_(dealloc),
128 num_alloc_buckets_(0),
129 mmap_table_(NULL),
130 num_available_mmap_buckets_(0),
131 mmap_address_map_(NULL) {
127 // Initialize the overall profile stats. 132 // Initialize the overall profile stats.
128 memset(&total_, 0, sizeof(total_)); 133 memset(&total_, 0, sizeof(total_));
129 134
130 // Make the malloc table. 135 // Make the malloc table.
131 const int alloc_table_bytes = kHashTableSize * sizeof(*alloc_table_); 136 const int alloc_table_bytes = kHashTableSize * sizeof(*alloc_table_);
132 alloc_table_ = reinterpret_cast<Bucket**>(alloc_(alloc_table_bytes)); 137 alloc_table_ = reinterpret_cast<Bucket**>(alloc_(alloc_table_bytes));
133 memset(alloc_table_, 0, alloc_table_bytes); 138 memset(alloc_table_, 0, alloc_table_bytes);
134 num_alloc_buckets_ = 0;
135
136 // Initialize the mmap table.
137 mmap_table_ = NULL;
138 num_available_mmap_buckets_ = 0;
139 139
140 // Make malloc and mmap allocation maps. 140 // Make malloc and mmap allocation maps.
141 alloc_address_map_ = 141 alloc_address_map_ =
142 new(alloc_(sizeof(AllocationMap))) AllocationMap(alloc_, dealloc_); 142 new(alloc_(sizeof(AllocationMap))) AllocationMap(alloc_, dealloc_);
143 mmap_address_map_ = NULL;
144 } 143 }
145 144
146 HeapProfileTable::~HeapProfileTable() { 145 HeapProfileTable::~HeapProfileTable() {
147 DeallocateBucketTable(alloc_table_); 146 DeallocateBucketTable(alloc_table_);
148 alloc_table_ = NULL; 147 alloc_table_ = NULL;
149 DeallocateBucketTable(mmap_table_); 148 DeallocateBucketTable(mmap_table_);
150 mmap_table_ = NULL; 149 mmap_table_ = NULL;
151 DeallocateAllocationMap(alloc_address_map_); 150 DeallocateAllocationMap(alloc_address_map_);
152 alloc_address_map_ = NULL; 151 alloc_address_map_ = NULL;
153 DeallocateAllocationMap(mmap_address_map_); 152 DeallocateAllocationMap(mmap_address_map_);
(...skipping 194 matching lines...) Expand 10 before | Expand all | Expand 10 after
348 } 347 }
349 } 348 }
350 RAW_DCHECK(n == num_alloc_buckets_ + num_available_mmap_buckets_, ""); 349 RAW_DCHECK(n == num_alloc_buckets_ + num_available_mmap_buckets_, "");
351 350
352 sort(list, list + num_alloc_buckets_ + num_available_mmap_buckets_, 351 sort(list, list + num_alloc_buckets_ + num_available_mmap_buckets_,
353 ByAllocatedSpace); 352 ByAllocatedSpace);
354 353
355 return list; 354 return list;
356 } 355 }
357 356
358 void HeapProfileTable::RefreshMMapData() { 357 void HeapProfileTable::RefreshMMapData(Allocator mmap_alloc,
358 DeAllocator mmap_dealloc) {
359 // Make the table 359 // Make the table
360 static const int mmap_table_bytes = kHashTableSize * sizeof(*mmap_table_); 360 static const int mmap_table_bytes = kHashTableSize * sizeof(*mmap_table_);
361 if (mmap_table_ == NULL) { 361 if (mmap_table_ == NULL) {
362 mmap_table_ = reinterpret_cast<Bucket**>(alloc_(mmap_table_bytes)); 362 mmap_table_ = reinterpret_cast<Bucket**>(alloc_(mmap_table_bytes));
363 memset(mmap_table_, 0, mmap_table_bytes); 363 memset(mmap_table_, 0, mmap_table_bytes);
364 } 364 }
365 num_available_mmap_buckets_ = 0; 365 num_available_mmap_buckets_ = 0;
366 366
367 ClearMMapData(); 367 ClearMMapData();
368 mmap_address_map_ = 368 mmap_address_map_ = new(alloc_(sizeof(AllocationMap)))
369 new(alloc_(sizeof(AllocationMap))) AllocationMap(alloc_, dealloc_); 369 AllocationMap(mmap_alloc, mmap_dealloc);
370 370
371 MemoryRegionMap::LockHolder l; 371 MemoryRegionMap::LockHolder l;
372 for (MemoryRegionMap::RegionIterator r = 372 for (MemoryRegionMap::RegionIterator r =
373 MemoryRegionMap::BeginRegionLocked(); 373 MemoryRegionMap::BeginRegionLocked();
374 r != MemoryRegionMap::EndRegionLocked(); ++r) { 374 r != MemoryRegionMap::EndRegionLocked(); ++r) {
375 Bucket* b = 375 Bucket* b =
376 GetBucket(r->call_stack_depth, r->call_stack, mmap_table_, NULL); 376 GetBucket(r->call_stack_depth, r->call_stack, mmap_table_, NULL);
377 if (b->alloc_size == 0) { 377 if (b->alloc_size == 0) {
378 num_available_mmap_buckets_ += 1; 378 num_available_mmap_buckets_ += 1;
379 } 379 }
380 b->allocs += 1; 380 b->allocs += 1;
381 b->alloc_size += r->end_addr - r->start_addr; 381 b->alloc_size += r->end_addr - r->start_addr;
382 382
383 AllocValue v; 383 AllocValue v;
384 v.set_bucket(b); 384 v.set_bucket(b);
385 v.bytes = r->end_addr - r->start_addr; 385 v.bytes = r->end_addr - r->start_addr;
386 mmap_address_map_->Insert(reinterpret_cast<const void*>(r->start_addr), v); 386 mmap_address_map_->Insert(reinterpret_cast<const void*>(r->start_addr), v);
387 } 387 }
388 } 388 }
389 389
390 void HeapProfileTable::ClearMMapData() { 390 void HeapProfileTable::ClearMMapData() {
391 if (mmap_address_map_ != NULL) { 391 if (mmap_address_map_ == NULL) return;
392 mmap_address_map_->Iterate(ZeroBucketCountsIterator, this); 392
393 mmap_address_map_->~AllocationMap(); 393 mmap_address_map_->Iterate(ZeroBucketCountsIterator, this);
394 dealloc_(mmap_address_map_); 394 mmap_address_map_->~AllocationMap();
395 mmap_address_map_ = NULL; 395 dealloc_(mmap_address_map_);
396 } 396 mmap_address_map_ = NULL;
397 } 397 }
398 398
399 void HeapProfileTable::IterateOrderedAllocContexts( 399 void HeapProfileTable::IterateOrderedAllocContexts(
400 AllocContextIterator callback) const { 400 AllocContextIterator callback) const {
401 Bucket** list = MakeSortedBucketList(); 401 Bucket** list = MakeSortedBucketList();
402 AllocContextInfo info; 402 AllocContextInfo info;
403 for (int i = 0; i < num_alloc_buckets_; ++i) { 403 for (int i = 0; i < num_alloc_buckets_; ++i) {
404 *static_cast<Stats*>(&info) = *static_cast<Stats*>(list[i]); 404 *static_cast<Stats*>(&info) = *static_cast<Stats*>(list[i]);
405 info.stack_depth = list[i]->depth; 405 info.stack_depth = list[i]->depth;
406 info.call_stack = list[i]->stack; 406 info.call_stack = list[i]->stack;
(...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after
498 arg->dest->Add(ptr, *v); 498 arg->dest->Add(ptr, *v);
499 } 499 }
500 } 500 }
501 } 501 }
502 502
503 bool HeapProfileTable::WriteProfile(const char* file_name, 503 bool HeapProfileTable::WriteProfile(const char* file_name,
504 const Bucket& total, 504 const Bucket& total,
505 AllocationMap* allocations) { 505 AllocationMap* allocations) {
506 RAW_VLOG(1, "Dumping non-live heap profile to %s", file_name); 506 RAW_VLOG(1, "Dumping non-live heap profile to %s", file_name);
507 RawFD fd = RawOpenForWriting(file_name); 507 RawFD fd = RawOpenForWriting(file_name);
508 if (fd != kIllegalRawFD) { 508 if (fd == kIllegalRawFD) {
509 RawWrite(fd, kProfileHeader, strlen(kProfileHeader));
510 char buf[512];
511 int len = UnparseBucket(total, buf, 0, sizeof(buf), " heapprofile",
512 NULL);
513 RawWrite(fd, buf, len);
514 const DumpArgs args(fd, NULL);
515 allocations->Iterate<const DumpArgs&>(DumpNonLiveIterator, args);
516 RawWrite(fd, kProcSelfMapsHeader, strlen(kProcSelfMapsHeader));
517 DumpProcSelfMaps(fd);
518 RawClose(fd);
519 return true;
520 } else {
521 RAW_LOG(ERROR, "Failed dumping filtered heap profile to %s", file_name); 509 RAW_LOG(ERROR, "Failed dumping filtered heap profile to %s", file_name);
522 return false; 510 return false;
523 } 511 }
512 RawWrite(fd, kProfileHeader, strlen(kProfileHeader));
513 char buf[512];
514 int len = UnparseBucket(total, buf, 0, sizeof(buf), " heapprofile",
515 NULL);
516 RawWrite(fd, buf, len);
517 const DumpArgs args(fd, NULL);
518 allocations->Iterate<const DumpArgs&>(DumpNonLiveIterator, args);
519 RawWrite(fd, kProcSelfMapsHeader, strlen(kProcSelfMapsHeader));
520 DumpProcSelfMaps(fd);
521 RawClose(fd);
522 return true;
524 } 523 }
525 524
526 void HeapProfileTable::CleanupOldProfiles(const char* prefix) { 525 void HeapProfileTable::CleanupOldProfiles(const char* prefix) {
527 if (!FLAGS_cleanup_old_heap_profiles) 526 if (!FLAGS_cleanup_old_heap_profiles)
528 return; 527 return;
529 char buf[1000]; 528 char buf[1000];
530 snprintf(buf, 1000,"%s.%05d.", prefix, getpid()); 529 snprintf(buf, 1000,"%s.%05d.", prefix, getpid());
531 string pattern = string(buf) + ".*" + kFileExt; 530 string pattern = string(buf) + ".*" + kFileExt;
532 531
533 #if defined(HAVE_GLOB_H) 532 #if defined(HAVE_GLOB_H)
(...skipping 156 matching lines...) Expand 10 before | Expand all | Expand 10 after
690 char* unused) { 689 char* unused) {
691 // Perhaps also log the allocation stack trace (unsymbolized) 690 // Perhaps also log the allocation stack trace (unsymbolized)
692 // on this line in case somebody finds it useful. 691 // on this line in case somebody finds it useful.
693 RAW_LOG(ERROR, "leaked %"PRIuS" byte object %p", v->bytes, ptr); 692 RAW_LOG(ERROR, "leaked %"PRIuS" byte object %p", v->bytes, ptr);
694 } 693 }
695 694
696 void HeapProfileTable::Snapshot::ReportIndividualObjects() { 695 void HeapProfileTable::Snapshot::ReportIndividualObjects() {
697 char unused; 696 char unused;
698 map_.Iterate(ReportObject, &unused); 697 map_.Iterate(ReportObject, &unused);
699 } 698 }
OLDNEW
« no previous file with comments | « third_party/tcmalloc/chromium/src/heap-profile-table.h ('k') | third_party/tcmalloc/chromium/src/heap-profiler.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698