Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(9)

Side by Side Diff: third_party/tcmalloc/chromium/src/deep-heap-profile.cc

Issue 9812010: Breakdown nonprofiled memory regions (f.k.a. 'unknown'), and add new policy files. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: ready for review Created 8 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 // --- 5 // ---
6 // Author: Sainbayar Sukhbaatar 6 // Author: Sainbayar Sukhbaatar
7 // Dai Mikurube 7 // Dai Mikurube
8 // 8 //
9 9
10 #include "deep-heap-profile.h" 10 #include "deep-heap-profile.h"
11 11
12 #ifdef DEEP_HEAP_PROFILE 12 #ifdef DEEP_HEAP_PROFILE
13 #include <algorithm>
13 #include <fcntl.h> 14 #include <fcntl.h>
14 #include <sys/stat.h> 15 #include <sys/stat.h>
15 #include <sys/types.h> 16 #include <sys/types.h>
16 #ifdef HAVE_UNISTD_H 17 #ifdef HAVE_UNISTD_H
17 #include <unistd.h> // for getpagesize and getpid 18 #include <unistd.h> // for getpagesize and getpid
18 #endif // HAVE_UNISTD_H 19 #endif // HAVE_UNISTD_H
19 20
20 #include "base/cycleclock.h" 21 #include "base/cycleclock.h"
21 #include "base/sysinfo.h" 22 #include "base/sysinfo.h"
22 23
23 static const int kProfilerBufferSize = 1 << 20; 24 static const int kProfilerBufferSize = 1 << 20;
24 static const int kHashTableSize = 179999; // The same as heap-profile-table.cc. 25 static const int kHashTableSize = 179999; // The same as heap-profile-table.cc.
25 26
26 static const int PAGEMAP_BYTES = 8; 27 static const int PAGEMAP_BYTES = 8;
27 static const uint64 MAX_ADDRESS = kuint64max; 28 static const uint64 MAX_ADDRESS = kuint64max;
28 29
29 // Header strings of the dumped heap profile. 30 // Header strings of the dumped heap profile.
30 static const char kProfileHeader[] = "heap profile: "; 31 static const char kProfileHeader[] = "heap profile: ";
31 static const char kProfileVersion[] = "DUMP_DEEP_3"; 32 static const char kProfileVersion[] = "DUMP_DEEP_4";
32 static const char kGlobalStatsHeader[] = "GLOBAL_STATS:\n"; 33 static const char kGlobalStatsHeader[] = "GLOBAL_STATS:\n";
33 static const char kMMapStacktraceHeader[] = "MMAP_STACKTRACES:\n"; 34 static const char kMMapStacktraceHeader[] = "MMAP_STACKTRACES:\n";
34 static const char kAllocStacktraceHeader[] = "MALLOC_STACKTRACES:\n"; 35 static const char kAllocStacktraceHeader[] = "MALLOC_STACKTRACES:\n";
35 static const char kProcSelfMapsHeader[] = "\nMAPPED_LIBRARIES:\n"; 36 static const char kProcSelfMapsHeader[] = "\nMAPPED_LIBRARIES:\n";
36 37
37 static const char kVirtualLabel[] = "virtual"; 38 static const char kVirtualLabel[] = "virtual";
38 static const char kCommittedLabel[] = "committed"; 39 static const char kCommittedLabel[] = "committed";
39 40
40 DeepHeapProfile::DeepHeapProfile(HeapProfileTable* heap_profile, 41 DeepHeapProfile::DeepHeapProfile(HeapProfileTable* heap_profile,
41 const char* prefix) 42 const char* prefix)
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
82 deep_bucket_map_->Iterate(ClearIsLogged, this); 83 deep_bucket_map_->Iterate(ClearIsLogged, this);
83 84
84 // Write maps into a .maps file with using the global buffer. 85 // Write maps into a .maps file with using the global buffer.
85 WriteMapsToFile(filename_prefix_, kProfilerBufferSize, profiler_buffer_); 86 WriteMapsToFile(filename_prefix_, kProfilerBufferSize, profiler_buffer_);
86 } 87 }
87 88
88 // Reset committed sizes of buckets. 89 // Reset committed sizes of buckets.
89 ResetCommittedSize(heap_profile_->alloc_table_); 90 ResetCommittedSize(heap_profile_->alloc_table_);
90 ResetCommittedSize(heap_profile_->mmap_table_); 91 ResetCommittedSize(heap_profile_->mmap_table_);
91 92
92 SnapshotGlobalStatsWithoutMalloc(pagemap_fd_, &stats_); 93 // Allocate a list for mmap'ed regions.
93 size_t anonymous_committed = stats_.anonymous.committed_bytes(); 94 num_mmap_allocations_ = 0;
95 heap_profile_->mmap_address_map_->Iterate(CountMMap, this);
96 mmap_list_length_ = 0;
97 mmap_list_ = reinterpret_cast<MMapListEntry*>(heap_profile_->alloc_(
98 sizeof(MMapListEntry) * num_mmap_allocations_));
99
100 // Touch all the allocated pages. Touching is required to avoid new page
101 // commitment while filling the list in SnapshotGlobalStatsWithoutMalloc.
102 for (int i = 0;
103 i < num_mmap_allocations_;
104 i += getpagesize() / 2 / sizeof(MMapListEntry))
105 mmap_list_[i].first_address = 0;
106 mmap_list_[num_mmap_allocations_ - 1].last_address = 0;
107
108 SnapshotGlobalStatsWithoutMalloc(pagemap_fd_, &stats_, NULL, 0);
109 size_t anonymous_committed = stats_.all[ANONYMOUS].committed_bytes();
94 110
95 // Note: Try to minimize the number of calls to malloc in the following 111 // Note: Try to minimize the number of calls to malloc in the following
96 // region, up until we call WriteBucketsToBucketFile(), near the end of this 112 // region, up until we call WriteBucketsToBucketFile(), near the end of this
97 // function. Calling malloc in the region may make a gap between the 113 // function. Calling malloc in the region may make a gap between the
98 // observed size and actual memory allocation. The gap is less than or equal 114 // observed size and actual memory allocation. The gap is less than or equal
99 // to the size of allocated memory in the region. Calls to malloc won't 115 // to the size of allocated memory in the region. Calls to malloc won't
100 // break anything, but can add some noise to the recorded information. 116 // break anything, but can add some noise to the recorded information.
101 // TODO(dmikurube): Eliminate dynamic memory allocation caused by snprintf. 117 // TODO(dmikurube): Eliminate dynamic memory allocation caused by snprintf.
102 // glibc's snprintf internally allocates memory by alloca normally, but it 118 // glibc's snprintf internally allocates memory by alloca normally, but it
103 // allocates memory by malloc if large memory is required. 119 // allocates memory by malloc if large memory is required.
104 120
105 // Record committed sizes. 121 // Record committed sizes.
106 SnapshotAllAllocsWithoutMalloc(); 122 SnapshotAllAllocsWithoutMalloc();
107 123
108 // Check if committed bytes changed during SnapshotAllAllocsWithoutMalloc. 124 // Check if committed bytes changed during SnapshotAllAllocsWithoutMalloc.
109 SnapshotGlobalStatsWithoutMalloc(pagemap_fd_, &stats_); 125 SnapshotGlobalStatsWithoutMalloc(pagemap_fd_, &stats_,
126 mmap_list_, mmap_list_length_);
110 #ifndef NDEBUG 127 #ifndef NDEBUG
111 size_t committed_difference = 128 size_t committed_difference =
112 stats_.anonymous.committed_bytes() - anonymous_committed; 129 stats_.all[ANONYMOUS].committed_bytes() - anonymous_committed;
113 if (committed_difference != 0) { 130 if (committed_difference != 0) {
114 RAW_LOG(0, "Difference in committed size: %ld", committed_difference); 131 RAW_LOG(0, "Difference in committed size: %ld", committed_difference);
115 } 132 }
116 #endif 133 #endif
117 134
118 // Start filling buffer with the ordered profile. 135 // Start filling buffer with the ordered profile.
119 int printed = snprintf(buffer, buffer_size, 136 int printed = snprintf(buffer, buffer_size,
120 "%s%s\n", kProfileHeader, kProfileVersion); 137 "%s%s\n", kProfileHeader, kProfileVersion);
121 if (IsPrintedStringValid(printed, buffer_size, 0)) { 138 if (IsPrintedStringValid(printed, buffer_size, 0)) {
122 return 0; 139 return 0;
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after
172 // Fill buffer with stack trace buckets of allocated regions. 189 // Fill buffer with stack trace buckets of allocated regions.
173 used_in_buffer = SnapshotBucketTableWithoutMalloc(heap_profile_->alloc_table_, 190 used_in_buffer = SnapshotBucketTableWithoutMalloc(heap_profile_->alloc_table_,
174 used_in_buffer, 191 used_in_buffer,
175 buffer_size, 192 buffer_size,
176 buffer); 193 buffer);
177 194
178 RAW_DCHECK(used_in_buffer < buffer_size, ""); 195 RAW_DCHECK(used_in_buffer < buffer_size, "");
179 196
180 // Note: Memory snapshots are complete, and malloc may again be used freely. 197 // Note: Memory snapshots are complete, and malloc may again be used freely.
181 198
199 heap_profile_->dealloc_(mmap_list_);
200 mmap_list_ = NULL;
201
182 // Write the bucket listing into a .bucket file. 202 // Write the bucket listing into a .bucket file.
183 WriteBucketsToBucketFile(); 203 WriteBucketsToBucketFile();
184 204
185 #ifndef NDEBUG 205 #ifndef NDEBUG
186 int64 elapsed_cycles = CycleClock::Now() - starting_cycles; 206 int64 elapsed_cycles = CycleClock::Now() - starting_cycles;
187 double elapsed_seconds = elapsed_cycles / CyclesPerSecond(); 207 double elapsed_seconds = elapsed_cycles / CyclesPerSecond();
188 RAW_LOG(0, "Time spent on DeepProfiler: %.3f sec\n", elapsed_seconds); 208 RAW_LOG(0, "Time spent on DeepProfiler: %.3f sec\n", elapsed_seconds);
189 #endif 209 #endif
190 210
191 return used_in_buffer; 211 return used_in_buffer;
(...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after
269 int page_size = getpagesize(); 289 int page_size = getpagesize();
270 uint64 page_address = (first_address / page_size) * page_size; 290 uint64 page_address = (first_address / page_size) * page_size;
271 size_t committed_size = 0; 291 size_t committed_size = 0;
272 292
273 SeekProcPagemap(pagemap_fd, first_address); 293 SeekProcPagemap(pagemap_fd, first_address);
274 294
275 // Check every page on which the allocation resides. 295 // Check every page on which the allocation resides.
276 while (page_address <= last_address) { 296 while (page_address <= last_address) {
277 // Read corresponding physical page. 297 // Read corresponding physical page.
278 PageState state; 298 PageState state;
299 // TODO(dmikurube): Read pagemap in bulk for speed.
279 if (ReadProcPagemap(pagemap_fd, &state) == false) { 300 if (ReadProcPagemap(pagemap_fd, &state) == false) {
280 // We can't read the last region (e.g vsyscall). 301 // We can't read the last region (e.g vsyscall).
281 #ifndef NDEBUG 302 #ifndef NDEBUG
282 RAW_LOG(0, "pagemap read failed @ %#llx %"PRId64" bytes", 303 RAW_LOG(0, "pagemap read failed @ %#llx %"PRId64" bytes",
283 first_address, last_address - first_address + 1); 304 first_address, last_address - first_address + 1);
284 #endif 305 #endif
285 return 0; 306 return 0;
286 } 307 }
287 308
288 if (state.is_committed) { 309 if (state.is_committed) {
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
327 RAW_DCHECK(wrote_all, ""); 348 RAW_DCHECK(wrote_all, "");
328 RAW_DCHECK(map_length <= buffer_size, ""); 349 RAW_DCHECK(map_length <= buffer_size, "");
329 RawWrite(maps_fd, buffer, map_length); 350 RawWrite(maps_fd, buffer, map_length);
330 RawClose(maps_fd); 351 RawClose(maps_fd);
331 } 352 }
332 353
333 // TODO(dmikurube): Eliminate dynamic memory allocation caused by snprintf. 354 // TODO(dmikurube): Eliminate dynamic memory allocation caused by snprintf.
334 // ProcMapsIterator uses snprintf internally in construction. 355 // ProcMapsIterator uses snprintf internally in construction.
335 // static 356 // static
336 void DeepHeapProfile::SnapshotGlobalStatsWithoutMalloc(int pagemap_fd, 357 void DeepHeapProfile::SnapshotGlobalStatsWithoutMalloc(int pagemap_fd,
337 GlobalStats* stats) { 358 GlobalStats* stats,
359 MMapListEntry* mmap_list,
360 int mmap_list_length) {
338 ProcMapsIterator::Buffer iterator_buffer; 361 ProcMapsIterator::Buffer iterator_buffer;
339 ProcMapsIterator iterator(0, &iterator_buffer); 362 ProcMapsIterator iterator(0, &iterator_buffer);
340 uint64 first_address, last_address, offset; 363 uint64 first_address, last_address, offset;
341 int64 unused_inode; 364 int64 unused_inode;
342 char* flags; 365 char* flags;
343 char* filename; 366 char* filename;
367 int mmap_list_index = 0;
368 enum MapsRegionType type;
344 369
345 stats->total.Initialize(); 370 for (int i = 0; i < NUMBER_OF_MAPS_REGION_TYPES; ++i) {
346 stats->file_mapped.Initialize(); 371 stats->all[i].Initialize();
347 stats->anonymous.Initialize(); 372 stats->nonprofiled[i].Initialize();
348 stats->other.Initialize(); 373 }
349 374
350 while (iterator.Next(&first_address, &last_address, 375 while (iterator.Next(&first_address, &last_address,
351 &flags, &offset, &unused_inode, &filename)) { 376 &flags, &offset, &unused_inode, &filename)) {
352 // 'last_address' should be the last inclusive address of the region. 377 // 'last_address' should be the last inclusive address of the region.
353 last_address -= 1; 378 last_address -= 1;
354 if (strcmp("[vsyscall]", filename) == 0) { 379 if (strcmp("[vsyscall]", filename) == 0) {
355 continue; // Reading pagemap will fail in [vsyscall]. 380 continue; // Reading pagemap will fail in [vsyscall].
356 } 381 }
357 382
358 stats->total.Record(pagemap_fd, first_address, last_address); 383 type = ABSENT;
384 if (filename[0] == '/') {
385 if (flags[2] == 'x')
386 type = FILE_EXEC;
387 else
388 type = FILE_NONEXEC;
389 } else if (filename[0] == '\0' || filename[0] == '\n') {
390 type = ANONYMOUS;
391 } else if (strcmp(filename, "[stack]") == 0) {
392 type = STACK;
393 } else {
394 type = OTHER;
395 }
396 stats->all[type].Record(pagemap_fd, first_address, last_address);
359 397
360 if (filename[0] == '/') { 398 // TODO(dmikurube): Avoid double-counting of pagemap.
361 stats->file_mapped.Record(pagemap_fd, first_address, last_address); 399 // Counts nonprofiled memory regions in /proc/<pid>/maps.
362 } else if (filename[0] == '\0' || filename[0] == '\n') { 400 if (mmap_list != NULL) {
363 stats->anonymous.Record(pagemap_fd, first_address, last_address); 401 // It assumes that every mmap'ed region is included in one maps line.
364 } else { 402 uint64 cursor = first_address;
365 stats->other.Record(pagemap_fd, first_address, last_address); 403 bool first = true;
404
405 do {
406 if (!first) {
407 mmap_list[mmap_list_index].type = type;
408 cursor = mmap_list[mmap_list_index].last_address + 1;
409 ++mmap_list_index;
410 }
411 first = false;
412
413 uint64 last_address_of_nonprofiled;
414 // If the next mmap entry is away from the current maps line.
415 if (mmap_list_index >= mmap_list_length ||
416 mmap_list[mmap_list_index].first_address > last_address) {
417 last_address_of_nonprofiled = last_address;
418 } else {
419 last_address_of_nonprofiled =
420 mmap_list[mmap_list_index].first_address - 1;
421 }
422
423 if (last_address_of_nonprofiled + 1 > cursor) {
424 stats->nonprofiled[type].Record(
425 pagemap_fd, cursor, last_address_of_nonprofiled);
426 cursor = last_address_of_nonprofiled + 1;
427 }
428 } while (mmap_list_index < mmap_list_length &&
429 mmap_list[mmap_list_index].last_address <= last_address);
366 } 430 }
367 } 431 }
368 } 432 }
369 433
370 DeepHeapProfile::DeepBucket* DeepHeapProfile::GetDeepBucket(Bucket* bucket) { 434 DeepHeapProfile::DeepBucket* DeepHeapProfile::GetDeepBucket(Bucket* bucket) {
371 DeepBucket* found = deep_bucket_map_->FindMutable(bucket); 435 DeepBucket* found = deep_bucket_map_->FindMutable(bucket);
372 if (found != NULL) 436 if (found != NULL)
373 return found; 437 return found;
374 438
375 DeepBucket created; 439 DeepBucket created;
(...skipping 29 matching lines...) Expand all
405 continue; // Skip empty buckets. 469 continue; // Skip empty buckets.
406 } 470 }
407 const DeepBucket* deep_bucket = GetDeepBucket(bucket); 471 const DeepBucket* deep_bucket = GetDeepBucket(bucket);
408 used_in_buffer = UnparseBucket( 472 used_in_buffer = UnparseBucket(
409 *deep_bucket, "", used_in_buffer, buffer_size, buffer, NULL); 473 *deep_bucket, "", used_in_buffer, buffer_size, buffer, NULL);
410 } 474 }
411 } 475 }
412 return used_in_buffer; 476 return used_in_buffer;
413 } 477 }
414 478
479 // static
480 bool DeepHeapProfile::ByFirstAddress(const MMapListEntry& a,
481 const MMapListEntry& b) {
482 return a.first_address < b.first_address;
483 }
484
485 // static
486 void DeepHeapProfile::CountMMap(const void* pointer,
487 AllocValue* alloc_value,
488 DeepHeapProfile* deep_profile) {
489 ++deep_profile->num_mmap_allocations_;
490 }
491
415 void DeepHeapProfile::RecordAlloc(const void* pointer, 492 void DeepHeapProfile::RecordAlloc(const void* pointer,
416 AllocValue* alloc_value, 493 AllocValue* alloc_value,
417 DeepHeapProfile* deep_profile) { 494 DeepHeapProfile* deep_profile) {
418 uint64 address = reinterpret_cast<uintptr_t>(pointer); 495 uint64 address = reinterpret_cast<uintptr_t>(pointer);
419 size_t committed = GetCommittedSize(deep_profile->pagemap_fd_, 496 size_t committed = GetCommittedSize(deep_profile->pagemap_fd_,
420 address, address + alloc_value->bytes - 1); 497 address, address + alloc_value->bytes - 1);
421 498
422 DeepBucket* deep_bucket = deep_profile->GetDeepBucket(alloc_value->bucket()); 499 DeepBucket* deep_bucket = deep_profile->GetDeepBucket(alloc_value->bucket());
423 deep_bucket->committed_size += committed; 500 deep_bucket->committed_size += committed;
424 deep_profile->stats_.record_malloc.AddToVirtualBytes(alloc_value->bytes); 501 deep_profile->stats_.profiled_malloc.AddToVirtualBytes(alloc_value->bytes);
425 deep_profile->stats_.record_malloc.AddToCommittedBytes(committed); 502 deep_profile->stats_.profiled_malloc.AddToCommittedBytes(committed);
426 } 503 }
427 504
428 void DeepHeapProfile::RecordMMap(const void* pointer, 505 void DeepHeapProfile::RecordMMap(const void* pointer,
429 AllocValue* alloc_value, 506 AllocValue* alloc_value,
430 DeepHeapProfile* deep_profile) { 507 DeepHeapProfile* deep_profile) {
431 uint64 address = reinterpret_cast<uintptr_t>(pointer); 508 uint64 address = reinterpret_cast<uintptr_t>(pointer);
432 size_t committed = GetCommittedSize(deep_profile->pagemap_fd_, 509 size_t committed = GetCommittedSize(deep_profile->pagemap_fd_,
433 address, address + alloc_value->bytes - 1); 510 address, address + alloc_value->bytes - 1);
434 511
435 DeepBucket* deep_bucket = deep_profile->GetDeepBucket(alloc_value->bucket()); 512 DeepBucket* deep_bucket = deep_profile->GetDeepBucket(alloc_value->bucket());
436 deep_bucket->committed_size += committed; 513 deep_bucket->committed_size += committed;
437 deep_profile->stats_.record_mmap.AddToVirtualBytes(alloc_value->bytes); 514 deep_profile->stats_.profiled_mmap.AddToVirtualBytes(alloc_value->bytes);
438 deep_profile->stats_.record_mmap.AddToCommittedBytes(committed); 515 deep_profile->stats_.profiled_mmap.AddToCommittedBytes(committed);
516
517 if (deep_profile->mmap_list_length_ < deep_profile->num_mmap_allocations_) {
518 deep_profile->mmap_list_[deep_profile->mmap_list_length_].first_address =
519 address;
520 deep_profile->mmap_list_[deep_profile->mmap_list_length_].last_address =
521 address - 1 + alloc_value->bytes;
522 deep_profile->mmap_list_[deep_profile->mmap_list_length_].type = ABSENT;
523 ++deep_profile->mmap_list_length_;
524 } else {
525 RAW_LOG(0, "Unexpected number of mmap entries: %d/%d",
526 deep_profile->mmap_list_length_,
527 deep_profile->num_mmap_allocations_);
528 }
439 } 529 }
440 530
441 void DeepHeapProfile::SnapshotAllAllocsWithoutMalloc() { 531 void DeepHeapProfile::SnapshotAllAllocsWithoutMalloc() {
442 stats_.record_mmap.Initialize(); 532 stats_.profiled_mmap.Initialize();
443 stats_.record_malloc.Initialize(); 533 stats_.profiled_malloc.Initialize();
444 534
445 // malloc allocations. 535 // malloc allocations.
446 heap_profile_->alloc_address_map_->Iterate(RecordAlloc, this); 536 heap_profile_->alloc_address_map_->Iterate(RecordAlloc, this);
447 537
448 // mmap allocations. 538 // mmap allocations.
449 heap_profile_->mmap_address_map_->Iterate(RecordMMap, this); 539 heap_profile_->mmap_address_map_->Iterate(RecordMMap, this);
540 std::sort(mmap_list_, mmap_list_ + mmap_list_length_, ByFirstAddress);
450 } 541 }
451 542
452 int DeepHeapProfile::FillBucketForBucketFile(const DeepBucket* deep_bucket, 543 int DeepHeapProfile::FillBucketForBucketFile(const DeepBucket* deep_bucket,
453 int buffer_size, 544 int buffer_size,
454 char buffer[]) { 545 char buffer[]) {
455 const Bucket* bucket = deep_bucket->bucket; 546 const Bucket* bucket = deep_bucket->bucket;
456 int printed = snprintf(buffer, buffer_size, "%05d", deep_bucket->id); 547 int printed = snprintf(buffer, buffer_size, "%05d", deep_bucket->id);
457 if (IsPrintedStringValid(printed, buffer_size, 0)) { 548 if (IsPrintedStringValid(printed, buffer_size, 0)) {
458 return 0; 549 return 0;
459 } 550 }
(...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after
552 643
553 return used_in_buffer; 644 return used_in_buffer;
554 } 645 }
555 646
556 int DeepHeapProfile::UnparseRegionStats(const RegionStats* stats, 647 int DeepHeapProfile::UnparseRegionStats(const RegionStats* stats,
557 const char* name, 648 const char* name,
558 int used_in_buffer, 649 int used_in_buffer,
559 int buffer_size, 650 int buffer_size,
560 char* buffer) { 651 char* buffer) {
561 int printed = snprintf(buffer + used_in_buffer, buffer_size - used_in_buffer, 652 int printed = snprintf(buffer + used_in_buffer, buffer_size - used_in_buffer,
562 "%15s %10ld %10ld\n", 653 "%25s %12ld %12ld\n",
563 name, stats->virtual_bytes(), 654 name, stats->virtual_bytes(),
564 stats->committed_bytes()); 655 stats->committed_bytes());
565 if (IsPrintedStringValid(printed, buffer_size, used_in_buffer)) { 656 if (IsPrintedStringValid(printed, buffer_size, used_in_buffer)) {
566 return used_in_buffer; 657 return used_in_buffer;
567 } 658 }
568 used_in_buffer += printed; 659 used_in_buffer += printed;
569 660
570 return used_in_buffer; 661 return used_in_buffer;
571 } 662 }
572 663
573 int DeepHeapProfile::UnparseGlobalStats(int used_in_buffer, 664 int DeepHeapProfile::UnparseGlobalStats(int used_in_buffer,
574 int buffer_size, 665 int buffer_size,
575 char* buffer) { 666 char* buffer) {
576 int printed = snprintf(buffer + used_in_buffer, buffer_size - used_in_buffer, 667 RegionStats all_total;
577 "%15s %10s %10s\n", "", 668 RegionStats nonprofiled_total;
669 for (int i = 0; i < NUMBER_OF_MAPS_REGION_TYPES; ++i) {
670 all_total.AddAnotherRegionStat(stats_.all[i]);
671 nonprofiled_total.AddAnotherRegionStat(stats_.nonprofiled[i]);
672 }
673 int printed = snprintf(
674 buffer + used_in_buffer, buffer_size - used_in_buffer,
675 "# total (%lu) %c= profiled-mmap (%lu) + nonprofiled-* (%lu)\n",
676 all_total.committed_bytes(),
677 all_total.committed_bytes() ==
678 stats_.profiled_mmap.committed_bytes() +
679 nonprofiled_total.committed_bytes() ? '=' : '!',
680 stats_.profiled_mmap.committed_bytes(),
681 nonprofiled_total.committed_bytes());
682 if (IsPrintedStringValid(printed, buffer_size, used_in_buffer)) {
683 return used_in_buffer;
684 }
685 used_in_buffer += printed;
686
687 printed = snprintf(buffer + used_in_buffer, buffer_size - used_in_buffer,
688 "%25s %12s %12s\n", "",
578 kVirtualLabel, kCommittedLabel); 689 kVirtualLabel, kCommittedLabel);
579 if (IsPrintedStringValid(printed, buffer_size, used_in_buffer)) { 690 if (IsPrintedStringValid(printed, buffer_size, used_in_buffer)) {
580 return used_in_buffer; 691 return used_in_buffer;
581 } 692 }
582 used_in_buffer += printed; 693 used_in_buffer += printed;
583 694
584 used_in_buffer = UnparseRegionStats(&(stats_.total), "total", 695 used_in_buffer = UnparseRegionStats(&(all_total),
585 used_in_buffer, buffer_size, buffer); 696 "total", used_in_buffer, buffer_size, buffer);
586 used_in_buffer = UnparseRegionStats(&(stats_.file_mapped), "file mapped", 697 used_in_buffer = UnparseRegionStats(&(stats_.all[FILE_EXEC]),
587 used_in_buffer, buffer_size, buffer); 698 "file-exec", used_in_buffer, buffer_size, buffer);
588 used_in_buffer = UnparseRegionStats(&(stats_.anonymous), "anonymous", 699 used_in_buffer = UnparseRegionStats(&(stats_.all[FILE_NONEXEC]),
589 used_in_buffer, buffer_size, buffer); 700 "file-nonexec", used_in_buffer, buffer_size, buffer);
590 used_in_buffer = UnparseRegionStats(&(stats_.other), "other", 701 used_in_buffer = UnparseRegionStats(&(stats_.all[ANONYMOUS]),
591 used_in_buffer, buffer_size, buffer); 702 "anonymous", used_in_buffer, buffer_size, buffer);
592 used_in_buffer = UnparseRegionStats(&(stats_.record_mmap), "mmap", 703 used_in_buffer = UnparseRegionStats(&(stats_.all[STACK]),
593 used_in_buffer, buffer_size, buffer); 704 "stack", used_in_buffer, buffer_size, buffer);
594 used_in_buffer = UnparseRegionStats(&(stats_.record_malloc), "tcmalloc", 705 used_in_buffer = UnparseRegionStats(&(stats_.all[OTHER]),
595 used_in_buffer, buffer_size, buffer); 706 "other", used_in_buffer, buffer_size, buffer);
707 used_in_buffer = UnparseRegionStats(&(nonprofiled_total),
708 "nonprofiled-total", used_in_buffer, buffer_size, buffer);
709 used_in_buffer = UnparseRegionStats(&(stats_.nonprofiled[ABSENT]),
710 "nonprofiled-absent", used_in_buffer, buffer_size, buffer);
711 used_in_buffer = UnparseRegionStats(&(stats_.nonprofiled[ANONYMOUS]),
712 "nonprofiled-anonymous", used_in_buffer, buffer_size, buffer);
713 used_in_buffer = UnparseRegionStats(&(stats_.nonprofiled[FILE_EXEC]),
714 "nonprofiled-file-exec", used_in_buffer, buffer_size, buffer);
715 used_in_buffer = UnparseRegionStats(&(stats_.nonprofiled[FILE_NONEXEC]),
716 "nonprofiled-file-nonexec", used_in_buffer, buffer_size, buffer);
717 used_in_buffer = UnparseRegionStats(&(stats_.nonprofiled[STACK]),
718 "nonprofiled-stack", used_in_buffer, buffer_size, buffer);
719 used_in_buffer = UnparseRegionStats(&(stats_.nonprofiled[OTHER]),
720 "nonprofiled-other", used_in_buffer, buffer_size, buffer);
721 used_in_buffer = UnparseRegionStats(&(stats_.profiled_mmap),
722 "profiled-mmap", used_in_buffer, buffer_size, buffer);
723 used_in_buffer = UnparseRegionStats(&(stats_.profiled_malloc),
724 "profiled-malloc", used_in_buffer, buffer_size, buffer);
Dai Mikurube (NOT FULLTIME) 2012/04/12 12:20:24 This part may be to be considered to simplify.
596 return used_in_buffer; 725 return used_in_buffer;
597 } 726 }
598 #else // DEEP_HEAP_PROFILE 727 #else // DEEP_HEAP_PROFILE
599 728
600 DeepHeapProfile::DeepHeapProfile(HeapProfileTable* heap_profile, 729 DeepHeapProfile::DeepHeapProfile(HeapProfileTable* heap_profile,
601 const char* prefix) 730 const char* prefix)
602 : heap_profile_(heap_profile) { 731 : heap_profile_(heap_profile) {
603 } 732 }
604 733
605 DeepHeapProfile::~DeepHeapProfile() { 734 DeepHeapProfile::~DeepHeapProfile() {
606 } 735 }
607 736
608 int DeepHeapProfile::FillOrderedProfile(char buffer[], int buffer_size) { 737 int DeepHeapProfile::FillOrderedProfile(char buffer[], int buffer_size) {
609 return heap_profile_->FillOrderedProfile(buffer, buffer_size); 738 return heap_profile_->FillOrderedProfile(buffer, buffer_size);
610 } 739 }
611 740
612 #endif // DEEP_HEAP_PROFILE 741 #endif // DEEP_HEAP_PROFILE
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698