| OLD | NEW |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 // --- | 5 // --- |
| 6 // Author: Sainbayar Sukhbaatar | 6 // Author: Sainbayar Sukhbaatar |
| 7 // Dai Mikurube | 7 // Dai Mikurube |
| 8 // | 8 // |
| 9 | 9 |
| 10 #include "deep-heap-profile.h" | 10 #include "deep-heap-profile.h" |
| 11 | 11 |
| 12 #ifdef DEEP_HEAP_PROFILE | 12 #ifdef DEEP_HEAP_PROFILE |
| 13 #include <algorithm> | 13 #include <algorithm> |
| 14 #include <fcntl.h> | 14 #include <fcntl.h> |
| 15 #include <sys/stat.h> | 15 #include <sys/stat.h> |
| 16 #include <sys/types.h> | 16 #include <sys/types.h> |
| 17 #ifdef HAVE_UNISTD_H | 17 #ifdef HAVE_UNISTD_H |
| 18 #include <unistd.h> // for getpagesize and getpid | 18 #include <unistd.h> // for getpagesize and getpid |
| 19 #endif // HAVE_UNISTD_H | 19 #endif // HAVE_UNISTD_H |
| 20 | 20 |
| 21 #include "base/cycleclock.h" | 21 #include "base/cycleclock.h" |
| 22 #include "base/sysinfo.h" | 22 #include "base/sysinfo.h" |
| 23 #include "internal_logging.h" // for ASSERT, etc | 23 #include "internal_logging.h" // for ASSERT, etc |
| 24 #include "memory_region_map.h" |
| 24 | 25 |
| 25 static const int kProfilerBufferSize = 1 << 20; | 26 static const int kProfilerBufferSize = 1 << 20; |
| 26 static const int kHashTableSize = 179999; // Same as heap-profile-table.cc. | 27 static const int kHashTableSize = 179999; // Same as heap-profile-table.cc. |
| 27 | 28 |
| 28 static const int PAGEMAP_BYTES = 8; | 29 static const int PAGEMAP_BYTES = 8; |
| 29 static const uint64 MAX_ADDRESS = kuint64max; | 30 static const uint64 MAX_ADDRESS = kuint64max; |
| 30 | 31 |
| 31 // Tag strings in heap profile dumps. | 32 // Tag strings in heap profile dumps. |
| 32 static const char kProfileHeader[] = "heap profile: "; | 33 static const char kProfileHeader[] = "heap profile: "; |
| 33 static const char kProfileVersion[] = "DUMP_DEEP_6"; | 34 static const char kProfileVersion[] = "DUMP_DEEP_6"; |
| (...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 103 uint64 page_address = (first_address / page_size) * page_size; | 104 uint64 page_address = (first_address / page_size) * page_size; |
| 104 size_t committed_size = 0; | 105 size_t committed_size = 0; |
| 105 | 106 |
| 106 Seek(first_address); | 107 Seek(first_address); |
| 107 | 108 |
| 108 // Check every page on which the allocation resides. | 109 // Check every page on which the allocation resides. |
| 109 while (page_address <= last_address) { | 110 while (page_address <= last_address) { |
| 110 // Read corresponding physical page. | 111 // Read corresponding physical page. |
| 111 State state; | 112 State state; |
| 112 // TODO(dmikurube): Read pagemap in bulk for speed. | 113 // TODO(dmikurube): Read pagemap in bulk for speed. |
| 114 // TODO(dmikurube): Consider using mincore(2). |
| 113 if (Read(&state) == false) { | 115 if (Read(&state) == false) { |
| 114 // We can't read the last region (e.g vsyscall). | 116 // We can't read the last region (e.g vsyscall). |
| 115 #ifndef NDEBUG | 117 #ifndef NDEBUG |
| 116 RAW_LOG(0, "pagemap read failed @ %#llx %"PRId64" bytes", | 118 RAW_LOG(0, "pagemap read failed @ %#llx %"PRId64" bytes", |
| 117 first_address, last_address - first_address + 1); | 119 first_address, last_address - first_address + 1); |
| 118 #endif | 120 #endif |
| 119 return 0; | 121 return 0; |
| 120 } | 122 } |
| 121 | 123 |
| 122 if (state.is_committed) { | 124 if (state.is_committed) { |
| (...skipping 119 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 242 memory_residence_info_getter_->Initialize(); | 244 memory_residence_info_getter_->Initialize(); |
| 243 deep_table_.ResetIsLogged(); | 245 deep_table_.ResetIsLogged(); |
| 244 | 246 |
| 245 // Write maps into "|filename_prefix_|.<pid>.maps". | 247 // Write maps into "|filename_prefix_|.<pid>.maps". |
| 246 WriteProcMaps(filename_prefix_, kProfilerBufferSize, profiler_buffer_); | 248 WriteProcMaps(filename_prefix_, kProfilerBufferSize, profiler_buffer_); |
| 247 } | 249 } |
| 248 | 250 |
| 249 // Reset committed sizes of buckets. | 251 // Reset committed sizes of buckets. |
| 250 deep_table_.ResetCommittedSize(); | 252 deep_table_.ResetCommittedSize(); |
| 251 | 253 |
| 252 // Allocate a list for mmap'ed regions. | 254 // Record committed sizes. |
| 253 num_mmap_allocations_ = 0; | 255 stats_.SnapshotAllocations(this); |
| 254 if (heap_profile_->mmap_address_map_) { | |
| 255 heap_profile_->mmap_address_map_->Iterate(CountMMap, this); | |
| 256 | |
| 257 mmap_list_length_ = 0; | |
| 258 mmap_list_ = reinterpret_cast<MMapListEntry*>(heap_profile_->alloc_( | |
| 259 sizeof(MMapListEntry) * num_mmap_allocations_)); | |
| 260 | |
| 261 // Touch all the allocated pages. Touching is required to avoid new page | |
| 262 // commitment while filling the list in SnapshotProcMaps. | |
| 263 for (int i = 0; | |
| 264 i < num_mmap_allocations_; | |
| 265 i += getpagesize() / 2 / sizeof(MMapListEntry)) | |
| 266 mmap_list_[i].first_address = 0; | |
| 267 mmap_list_[num_mmap_allocations_ - 1].last_address = 0; | |
| 268 } | |
| 269 | |
| 270 stats_.SnapshotProcMaps(memory_residence_info_getter_, NULL, 0, NULL); | |
| 271 | 256 |
| 272 // TODO(dmikurube): Eliminate dynamic memory allocation caused by snprintf. | 257 // TODO(dmikurube): Eliminate dynamic memory allocation caused by snprintf. |
| 273 // glibc's snprintf internally allocates memory by alloca normally, but it | 258 // glibc's snprintf internally allocates memory by alloca normally, but it |
| 274 // allocates memory by malloc if large memory is required. | 259 // allocates memory by malloc if large memory is required. |
| 275 | 260 |
| 276 // Record committed sizes. | |
| 277 stats_.SnapshotAllocations(this); | |
| 278 | |
| 279 buffer.AppendString(kProfileHeader, 0); | 261 buffer.AppendString(kProfileHeader, 0); |
| 280 buffer.AppendString(kProfileVersion, 0); | 262 buffer.AppendString(kProfileVersion, 0); |
| 281 buffer.AppendString("\n", 0); | 263 buffer.AppendString("\n", 0); |
| 282 | 264 |
| 283 // Fill buffer with the global stats. | 265 // Fill buffer with the global stats. |
| 284 buffer.AppendString(kMMapListHeader, 0); | 266 buffer.AppendString(kMMapListHeader, 0); |
| 285 | 267 |
| 286 // Check if committed bytes changed during SnapshotAllocations. | 268 stats_.SnapshotMaps(memory_residence_info_getter_, this, &buffer); |
| 287 stats_.SnapshotProcMaps(memory_residence_info_getter_, | |
| 288 mmap_list_, | |
| 289 mmap_list_length_, | |
| 290 &buffer); | |
| 291 | 269 |
| 292 // Fill buffer with the global stats. | 270 // Fill buffer with the global stats. |
| 293 buffer.AppendString(kGlobalStatsHeader, 0); | 271 buffer.AppendString(kGlobalStatsHeader, 0); |
| 294 | 272 |
| 295 stats_.Unparse(&buffer); | 273 stats_.Unparse(&buffer); |
| 296 | 274 |
| 297 buffer.AppendString(kStacktraceHeader, 0); | 275 buffer.AppendString(kStacktraceHeader, 0); |
| 298 buffer.AppendString(kVirtualLabel, 10); | 276 buffer.AppendString(kVirtualLabel, 10); |
| 299 buffer.AppendChar(' '); | 277 buffer.AppendChar(' '); |
| 300 buffer.AppendString(kCommittedLabel, 10); | 278 buffer.AppendString(kCommittedLabel, 10); |
| 301 buffer.AppendString("\n", 0); | 279 buffer.AppendString("\n", 0); |
| 302 | 280 |
| 303 // Fill buffer. | 281 // Fill buffer. |
| 304 deep_table_.UnparseForStats(&buffer); | 282 deep_table_.UnparseForStats(&buffer); |
| 305 | 283 |
| 306 RAW_DCHECK(buffer.FilledBytes() < buffer_size, ""); | 284 RAW_DCHECK(buffer.FilledBytes() < buffer_size, ""); |
| 307 | 285 |
| 308 heap_profile_->dealloc_(mmap_list_); | |
| 309 mmap_list_ = NULL; | |
| 310 | |
| 311 // Write the bucket listing into a .bucket file. | 286 // Write the bucket listing into a .bucket file. |
| 312 deep_table_.WriteForBucketFile(filename_prefix_, dump_count_, &global_buffer); | 287 deep_table_.WriteForBucketFile(filename_prefix_, dump_count_, &global_buffer); |
| 313 | 288 |
| 314 #ifndef NDEBUG | 289 #ifndef NDEBUG |
| 315 int64 elapsed_cycles = CycleClock::Now() - starting_cycles; | 290 int64 elapsed_cycles = CycleClock::Now() - starting_cycles; |
| 316 double elapsed_seconds = elapsed_cycles / CyclesPerSecond(); | 291 double elapsed_seconds = elapsed_cycles / CyclesPerSecond(); |
| 317 RAW_LOG(0, "Time spent on DeepProfiler: %.3f sec\n", elapsed_seconds); | 292 RAW_LOG(0, "Time spent on DeepProfiler: %.3f sec\n", elapsed_seconds); |
| 318 #endif | 293 #endif |
| 319 | 294 |
| 320 return buffer.FilledBytes(); | 295 return buffer.FilledBytes(); |
| (...skipping 300 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 621 TextBuffer* buffer) { | 596 TextBuffer* buffer) { |
| 622 buffer->AppendString(name, 25); | 597 buffer->AppendString(name, 25); |
| 623 buffer->AppendChar(' '); | 598 buffer->AppendChar(' '); |
| 624 buffer->AppendLong(virtual_bytes_, 12); | 599 buffer->AppendLong(virtual_bytes_, 12); |
| 625 buffer->AppendChar(' '); | 600 buffer->AppendChar(' '); |
| 626 buffer->AppendLong(committed_bytes_, 12); | 601 buffer->AppendLong(committed_bytes_, 12); |
| 627 buffer->AppendString("\n", 0); | 602 buffer->AppendString("\n", 0); |
| 628 } | 603 } |
| 629 | 604 |
| 630 // TODO(dmikurube): Eliminate dynamic memory allocation caused by snprintf. | 605 // TODO(dmikurube): Eliminate dynamic memory allocation caused by snprintf. |
| 631 void DeepHeapProfile::GlobalStats::SnapshotProcMaps( | 606 void DeepHeapProfile::GlobalStats::SnapshotMaps( |
| 632 const MemoryResidenceInfoGetterInterface* memory_residence_info_getter, | 607 const MemoryResidenceInfoGetterInterface* memory_residence_info_getter, |
| 633 MMapListEntry* mmap_list, | 608 DeepHeapProfile* deep_profile, |
| 634 int mmap_list_length, | |
| 635 TextBuffer* mmap_dump_buffer) { | 609 TextBuffer* mmap_dump_buffer) { |
| 636 ProcMapsIterator::Buffer iterator_buffer; | 610 MemoryRegionMap::LockHolder lock_holder; |
| 637 ProcMapsIterator iterator(0, &iterator_buffer); | 611 ProcMapsIterator::Buffer procmaps_iter_buffer; |
| 612 ProcMapsIterator procmaps_iter(0, &procmaps_iter_buffer); |
| 638 uint64 first_address, last_address, offset; | 613 uint64 first_address, last_address, offset; |
| 639 int64 inode; | 614 int64 inode; |
| 640 char* flags; | 615 char* flags; |
| 641 char* filename; | 616 char* filename; |
| 642 int mmap_list_index = 0; | |
| 643 enum MapsRegionType type; | 617 enum MapsRegionType type; |
| 644 | |
| 645 for (int i = 0; i < NUMBER_OF_MAPS_REGION_TYPES; ++i) { | 618 for (int i = 0; i < NUMBER_OF_MAPS_REGION_TYPES; ++i) { |
| 646 all_[i].Initialize(); | 619 all_[i].Initialize(); |
| 647 unhooked_[i].Initialize(); | 620 unhooked_[i].Initialize(); |
| 648 } | 621 } |
| 622 profiled_mmap_.Initialize(); |
| 649 | 623 |
| 650 while (iterator.Next(&first_address, &last_address, | 624 MemoryRegionMap::RegionIterator mmap_iter = |
| 651 &flags, &offset, &inode, &filename)) { | 625 MemoryRegionMap::BeginRegionLocked(); |
| 626 |
| 627 while (procmaps_iter.Next(&first_address, &last_address, |
| 628 &flags, &offset, &inode, &filename)) { |
| 652 if (mmap_dump_buffer) { | 629 if (mmap_dump_buffer) { |
| 653 char buffer[1024]; | 630 char buffer[1024]; |
| 654 int written = iterator.FormatLine(buffer, sizeof(buffer), | 631 int written = procmaps_iter.FormatLine(buffer, sizeof(buffer), |
| 655 first_address, last_address, flags, | 632 first_address, last_address, flags, |
| 656 offset, inode, filename, 0); | 633 offset, inode, filename, 0); |
| 657 mmap_dump_buffer->AppendString(buffer, 0); | 634 mmap_dump_buffer->AppendString(buffer, 0); |
| 658 } | 635 } |
| 659 | 636 |
| 660 // 'last_address' should be the last inclusive address of the region. | 637 // 'last_address' should be the last inclusive address of the region. |
| 661 last_address -= 1; | 638 last_address -= 1; |
| 662 if (strcmp("[vsyscall]", filename) == 0) { | 639 if (strcmp("[vsyscall]", filename) == 0) { |
| 663 continue; // Reading pagemap will fail in [vsyscall]. | 640 continue; // Reading pagemap will fail in [vsyscall]. |
| 664 } | 641 } |
| 665 | 642 |
| 666 type = ABSENT; | 643 type = ABSENT; |
| 667 if (filename[0] == '/') { | 644 if (filename[0] == '/') { |
| 668 if (flags[2] == 'x') | 645 if (flags[2] == 'x') |
| 669 type = FILE_EXEC; | 646 type = FILE_EXEC; |
| 670 else | 647 else |
| 671 type = FILE_NONEXEC; | 648 type = FILE_NONEXEC; |
| 672 } else if (filename[0] == '\0' || filename[0] == '\n') { | 649 } else if (filename[0] == '\0' || filename[0] == '\n') { |
| 673 type = ANONYMOUS; | 650 type = ANONYMOUS; |
| 674 } else if (strcmp(filename, "[stack]") == 0) { | 651 } else if (strcmp(filename, "[stack]") == 0) { |
| 675 type = STACK; | 652 type = STACK; |
| 676 } else { | 653 } else { |
| 677 type = OTHER; | 654 type = OTHER; |
| 678 } | 655 } |
| 679 all_[type].Record( | 656 all_[type].Record( |
| 680 memory_residence_info_getter, first_address, last_address); | 657 memory_residence_info_getter, first_address, last_address); |
| 681 | 658 |
| 682 // TODO(dmikurube): Stop double-counting pagemap. | 659 // TODO(dmikurube): Stop double-counting pagemap. |
| 683 // Counts unhooked memory regions in /proc/<pid>/maps. | 660 // Counts unhooked memory regions in /proc/<pid>/maps. |
| 684 if (mmap_list != NULL) { | 661 if (MemoryRegionMap::IsRecordingLocked()) { |
| 685 // It assumes that every mmap'ed region is included in one maps line. | 662 // It assumes that every mmap'ed region is included in one maps line. |
| 686 uint64 cursor = first_address; | 663 uint64 cursor = first_address; |
| 687 bool first = true; | 664 bool first = true; |
| 688 | 665 |
| 689 do { | 666 do { |
| 667 Bucket* bucket = NULL; |
| 668 DeepBucket* deep_bucket = NULL; |
| 690 if (!first) { | 669 if (!first) { |
| 691 mmap_list[mmap_list_index].type = type; | 670 size_t committed = deep_profile->memory_residence_info_getter_-> |
| 692 cursor = mmap_list[mmap_list_index].last_address + 1; | 671 CommittedSize(mmap_iter->start_addr, mmap_iter->end_addr - 1); |
| 693 ++mmap_list_index; | 672 // TODO(dmikurube): Store a reference to the bucket in region. |
| 673 Bucket* bucket = MemoryRegionMap::GetBucket( |
| 674 mmap_iter->call_stack_depth, mmap_iter->call_stack); |
| 675 DeepBucket* deep_bucket = NULL; |
| 676 if (bucket != NULL) { |
| 677 deep_bucket = deep_profile->deep_table_.Lookup( |
| 678 bucket, |
| 679 #if defined(TYPE_PROFILING) |
| 680 NULL, // No type information for mmap'ed memory regions. |
| 681 #endif |
| 682 /* is_mmap */ true); |
| 683 } |
| 684 |
| 685 if (deep_bucket != NULL) |
| 686 deep_bucket->committed_size += committed; |
| 687 profiled_mmap_.AddToVirtualBytes( |
| 688 mmap_iter->end_addr - mmap_iter->start_addr); |
| 689 profiled_mmap_.AddToCommittedBytes(committed); |
| 690 |
| 691 cursor = mmap_iter->end_addr; |
| 692 ++mmap_iter; |
| 693 // Don't break here even if mmap_iter == EndRegionLocked(). |
| 694 } | 694 } |
| 695 first = false; | 695 first = false; |
| 696 | 696 |
| 697 uint64 last_address_of_unhooked; | 697 uint64 last_address_of_unhooked; |
| 698 // If the next mmap entry is away from the current maps line. | 698 // If the next mmap entry is away from the current maps line. |
| 699 if (mmap_list_index >= mmap_list_length || | 699 if (mmap_iter == MemoryRegionMap::EndRegionLocked() || |
| 700 mmap_list[mmap_list_index].first_address > last_address) { | 700 mmap_iter->start_addr > last_address) { |
| 701 last_address_of_unhooked = last_address; | 701 last_address_of_unhooked = last_address; |
| 702 } else { | 702 } else { |
| 703 last_address_of_unhooked = | 703 last_address_of_unhooked = mmap_iter->start_addr - 1; |
| 704 mmap_list[mmap_list_index].first_address - 1; | |
| 705 } | 704 } |
| 706 | 705 |
| 707 if (last_address_of_unhooked + 1 > cursor) { | 706 if (last_address_of_unhooked + 1 > cursor) { |
| 708 uint64 committed_size = unhooked_[type].Record( | 707 uint64 committed_size = unhooked_[type].Record( |
| 709 memory_residence_info_getter, | 708 memory_residence_info_getter, |
| 710 cursor, | 709 cursor, |
| 711 last_address_of_unhooked); | 710 last_address_of_unhooked); |
| 712 if (mmap_dump_buffer) { | 711 if (mmap_dump_buffer) { |
| 713 mmap_dump_buffer->AppendString(" ", 0); | 712 mmap_dump_buffer->AppendString(" ", 0); |
| 714 mmap_dump_buffer->AppendPtr(cursor, 0); | 713 mmap_dump_buffer->AppendPtr(cursor, 0); |
| 715 mmap_dump_buffer->AppendString(" - ", 0); | 714 mmap_dump_buffer->AppendString(" - ", 0); |
| 716 mmap_dump_buffer->AppendPtr(last_address_of_unhooked + 1, 0); | 715 mmap_dump_buffer->AppendPtr(last_address_of_unhooked + 1, 0); |
| 717 mmap_dump_buffer->AppendString(" unhooked ", 0); | 716 mmap_dump_buffer->AppendString(" unhooked ", 0); |
| 718 mmap_dump_buffer->AppendString(kMapsRegionTypeDict[type], 0); | 717 mmap_dump_buffer->AppendString(kMapsRegionTypeDict[type], 0); |
| 719 mmap_dump_buffer->AppendString(" ", 0); | 718 mmap_dump_buffer->AppendString(" ", 0); |
| 720 mmap_dump_buffer->AppendInt64(committed_size, 0); | 719 mmap_dump_buffer->AppendInt64(committed_size, 0); |
| 721 mmap_dump_buffer->AppendString("\n", 0); | 720 mmap_dump_buffer->AppendString("\n", 0); |
| 722 } | 721 } |
| 723 cursor = last_address_of_unhooked + 1; | 722 cursor = last_address_of_unhooked + 1; |
| 724 } | 723 } |
| 725 | 724 |
| 726 if (mmap_list_index < mmap_list_length && | 725 if (mmap_iter != MemoryRegionMap::EndRegionLocked() && |
| 727 mmap_list[mmap_list_index].first_address <= last_address && | 726 mmap_iter->start_addr <= last_address && |
| 728 mmap_dump_buffer) { | 727 mmap_dump_buffer) { |
| 729 bool trailing = | 728 bool trailing = mmap_iter->start_addr < first_address; |
| 730 mmap_list[mmap_list_index].first_address < first_address; | 729 bool continued = mmap_iter->end_addr - 1 > last_address; |
| 731 bool continued = | |
| 732 mmap_list[mmap_list_index].last_address > last_address; | |
| 733 mmap_dump_buffer->AppendString(trailing ? " (" : " ", 0); | 730 mmap_dump_buffer->AppendString(trailing ? " (" : " ", 0); |
| 734 mmap_dump_buffer->AppendPtr( | 731 mmap_dump_buffer->AppendPtr(mmap_iter->start_addr, 0); |
| 735 mmap_list[mmap_list_index].first_address, 0); | |
| 736 mmap_dump_buffer->AppendString(trailing ? ")" : " ", 0); | 732 mmap_dump_buffer->AppendString(trailing ? ")" : " ", 0); |
| 737 mmap_dump_buffer->AppendString("-", 0); | 733 mmap_dump_buffer->AppendString("-", 0); |
| 738 mmap_dump_buffer->AppendString(continued ? "(" : " ", 0); | 734 mmap_dump_buffer->AppendString(continued ? "(" : " ", 0); |
| 739 mmap_dump_buffer->AppendPtr( | 735 mmap_dump_buffer->AppendPtr(mmap_iter->end_addr, 0); |
| 740 mmap_list[mmap_list_index].last_address + 1, 0); | |
| 741 mmap_dump_buffer->AppendString(continued ? ")" : " ", 0); | 736 mmap_dump_buffer->AppendString(continued ? ")" : " ", 0); |
| 742 mmap_dump_buffer->AppendString(" hooked ", 0); | 737 mmap_dump_buffer->AppendString(" hooked ", 0); |
| 743 mmap_dump_buffer->AppendString(kMapsRegionTypeDict[type], 0); | 738 mmap_dump_buffer->AppendString(kMapsRegionTypeDict[type], 0); |
| 744 mmap_dump_buffer->AppendString(" @ ", 0); | 739 mmap_dump_buffer->AppendString(" @ ", 0); |
| 745 mmap_dump_buffer->AppendInt( | 740 if (deep_bucket != NULL) { |
| 746 mmap_list[mmap_list_index].deep_bucket->id, 0); | 741 mmap_dump_buffer->AppendInt(deep_bucket->id, 0); |
| 742 } else { |
| 743 mmap_dump_buffer->AppendInt(0, 0); |
| 744 } |
| 747 mmap_dump_buffer->AppendString("\n", 0); | 745 mmap_dump_buffer->AppendString("\n", 0); |
| 748 } | 746 } |
| 749 } while (mmap_list_index < mmap_list_length && | 747 } while (mmap_iter != MemoryRegionMap::EndRegionLocked() && |
| 750 mmap_list[mmap_list_index].last_address <= last_address); | 748 mmap_iter->end_addr - 1 <= last_address); |
| 751 } | 749 } |
| 752 } | 750 } |
| 753 } | 751 } |
| 754 | 752 |
| 755 void DeepHeapProfile::GlobalStats::SnapshotAllocations( | 753 void DeepHeapProfile::GlobalStats::SnapshotAllocations( |
| 756 DeepHeapProfile* deep_profile) { | 754 DeepHeapProfile* deep_profile) { |
| 757 profiled_mmap_.Initialize(); | |
| 758 profiled_malloc_.Initialize(); | 755 profiled_malloc_.Initialize(); |
| 759 | 756 |
| 760 // malloc allocations. | 757 deep_profile->heap_profile_->address_map_->Iterate(RecordAlloc, deep_profile); |
| 761 deep_profile->heap_profile_->alloc_address_map_->Iterate(RecordAlloc, | |
| 762 deep_profile); | |
| 763 | |
| 764 // mmap allocations. | |
| 765 if (deep_profile->heap_profile_->mmap_address_map_) { | |
| 766 deep_profile->heap_profile_->mmap_address_map_->Iterate(RecordMMap, | |
| 767 deep_profile); | |
| 768 std::sort(deep_profile->mmap_list_, | |
| 769 deep_profile->mmap_list_ + deep_profile->mmap_list_length_, | |
| 770 ByFirstAddress); | |
| 771 } | |
| 772 } | 758 } |
| 773 | 759 |
| 774 void DeepHeapProfile::GlobalStats::Unparse(TextBuffer* buffer) { | 760 void DeepHeapProfile::GlobalStats::Unparse(TextBuffer* buffer) { |
| 775 RegionStats all_total; | 761 RegionStats all_total; |
| 776 RegionStats unhooked_total; | 762 RegionStats unhooked_total; |
| 777 for (int i = 0; i < NUMBER_OF_MAPS_REGION_TYPES; ++i) { | 763 for (int i = 0; i < NUMBER_OF_MAPS_REGION_TYPES; ++i) { |
| 778 all_total.AddAnotherRegionStat(all_[i]); | 764 all_total.AddAnotherRegionStat(all_[i]); |
| 779 unhooked_total.AddAnotherRegionStat(unhooked_[i]); | 765 unhooked_total.AddAnotherRegionStat(unhooked_[i]); |
| 780 } | 766 } |
| 781 | 767 |
| (...skipping 28 matching lines...) Expand all Loading... |
| 810 unhooked_[ANONYMOUS].Unparse("nonprofiled-anonymous", buffer); | 796 unhooked_[ANONYMOUS].Unparse("nonprofiled-anonymous", buffer); |
| 811 unhooked_[FILE_EXEC].Unparse("nonprofiled-file-exec", buffer); | 797 unhooked_[FILE_EXEC].Unparse("nonprofiled-file-exec", buffer); |
| 812 unhooked_[FILE_NONEXEC].Unparse("nonprofiled-file-nonexec", buffer); | 798 unhooked_[FILE_NONEXEC].Unparse("nonprofiled-file-nonexec", buffer); |
| 813 unhooked_[STACK].Unparse("nonprofiled-stack", buffer); | 799 unhooked_[STACK].Unparse("nonprofiled-stack", buffer); |
| 814 unhooked_[OTHER].Unparse("nonprofiled-other", buffer); | 800 unhooked_[OTHER].Unparse("nonprofiled-other", buffer); |
| 815 profiled_mmap_.Unparse("profiled-mmap", buffer); | 801 profiled_mmap_.Unparse("profiled-mmap", buffer); |
| 816 profiled_malloc_.Unparse("profiled-malloc", buffer); | 802 profiled_malloc_.Unparse("profiled-malloc", buffer); |
| 817 } | 803 } |
| 818 | 804 |
| 819 // static | 805 // static |
| 820 bool DeepHeapProfile::GlobalStats::ByFirstAddress(const MMapListEntry& a, | |
| 821 const MMapListEntry& b) { | |
| 822 return a.first_address < b.first_address; | |
| 823 } | |
| 824 | |
| 825 // static | |
| 826 void DeepHeapProfile::GlobalStats::RecordAlloc(const void* pointer, | 806 void DeepHeapProfile::GlobalStats::RecordAlloc(const void* pointer, |
| 827 AllocValue* alloc_value, | 807 AllocValue* alloc_value, |
| 828 DeepHeapProfile* deep_profile) { | 808 DeepHeapProfile* deep_profile) { |
| 829 uint64 address = reinterpret_cast<uintptr_t>(pointer); | 809 uint64 address = reinterpret_cast<uintptr_t>(pointer); |
| 830 size_t committed = deep_profile->memory_residence_info_getter_->CommittedSize( | 810 size_t committed = deep_profile->memory_residence_info_getter_->CommittedSize( |
| 831 address, address + alloc_value->bytes - 1); | 811 address, address + alloc_value->bytes - 1); |
| 832 | 812 |
| 833 DeepBucket* deep_bucket = deep_profile->deep_table_.Lookup( | 813 DeepBucket* deep_bucket = deep_profile->deep_table_.Lookup( |
| 834 alloc_value->bucket(), | 814 alloc_value->bucket(), |
| 835 #if defined(TYPE_PROFILING) | 815 #if defined(TYPE_PROFILING) |
| 836 LookupType(pointer), | 816 LookupType(pointer), |
| 837 #endif | 817 #endif |
| 838 /* is_mmap */ false); | 818 /* is_mmap */ false); |
| 839 deep_bucket->committed_size += committed; | 819 deep_bucket->committed_size += committed; |
| 840 deep_profile->stats_.profiled_malloc_.AddToVirtualBytes(alloc_value->bytes); | 820 deep_profile->stats_.profiled_malloc_.AddToVirtualBytes(alloc_value->bytes); |
| 841 deep_profile->stats_.profiled_malloc_.AddToCommittedBytes(committed); | 821 deep_profile->stats_.profiled_malloc_.AddToCommittedBytes(committed); |
| 842 } | 822 } |
| 843 | 823 |
| 844 // static | 824 // static |
| 845 void DeepHeapProfile::GlobalStats::RecordMMap(const void* pointer, | |
| 846 AllocValue* alloc_value, | |
| 847 DeepHeapProfile* deep_profile) { | |
| 848 uint64 address = reinterpret_cast<uintptr_t>(pointer); | |
| 849 size_t committed = deep_profile->memory_residence_info_getter_->CommittedSize( | |
| 850 address, address + alloc_value->bytes - 1); | |
| 851 | |
| 852 DeepBucket* deep_bucket = deep_profile->deep_table_.Lookup( | |
| 853 alloc_value->bucket(), | |
| 854 #if defined(TYPE_PROFILING) | |
| 855 NULL, | |
| 856 #endif | |
| 857 /* is_mmap */ true); | |
| 858 deep_bucket->committed_size += committed; | |
| 859 deep_profile->stats_.profiled_mmap_.AddToVirtualBytes(alloc_value->bytes); | |
| 860 deep_profile->stats_.profiled_mmap_.AddToCommittedBytes(committed); | |
| 861 | |
| 862 if (deep_profile->mmap_list_length_ < deep_profile->num_mmap_allocations_) { | |
| 863 deep_profile->mmap_list_[deep_profile->mmap_list_length_].first_address = | |
| 864 address; | |
| 865 deep_profile->mmap_list_[deep_profile->mmap_list_length_].last_address = | |
| 866 address - 1 + alloc_value->bytes; | |
| 867 deep_profile->mmap_list_[deep_profile->mmap_list_length_].type = ABSENT; | |
| 868 deep_profile->mmap_list_[deep_profile->mmap_list_length_].deep_bucket = | |
| 869 deep_bucket; | |
| 870 ++deep_profile->mmap_list_length_; | |
| 871 } else { | |
| 872 RAW_LOG(0, "Unexpected number of mmap entries: %d/%d", | |
| 873 deep_profile->mmap_list_length_, | |
| 874 deep_profile->num_mmap_allocations_); | |
| 875 } | |
| 876 } | |
| 877 | |
| 878 // static | |
| 879 void DeepHeapProfile::WriteProcMaps(const char* prefix, | 825 void DeepHeapProfile::WriteProcMaps(const char* prefix, |
| 880 int buffer_size, | 826 int buffer_size, |
| 881 char raw_buffer[]) { | 827 char raw_buffer[]) { |
| 882 char filename[100]; | 828 char filename[100]; |
| 883 snprintf(filename, sizeof(filename), | 829 snprintf(filename, sizeof(filename), |
| 884 "%s.%05d.maps", prefix, static_cast<int>(getpid())); | 830 "%s.%05d.maps", prefix, static_cast<int>(getpid())); |
| 885 | 831 |
| 886 RawFD fd = RawOpenForWriting(filename); | 832 RawFD fd = RawOpenForWriting(filename); |
| 887 RAW_DCHECK(fd != kIllegalRawFD, ""); | 833 RAW_DCHECK(fd != kIllegalRawFD, ""); |
| 888 | 834 |
| 889 int length; | 835 int length; |
| 890 bool wrote_all; | 836 bool wrote_all; |
| 891 length = tcmalloc::FillProcSelfMaps(raw_buffer, buffer_size, &wrote_all); | 837 length = tcmalloc::FillProcSelfMaps(raw_buffer, buffer_size, &wrote_all); |
| 892 RAW_DCHECK(wrote_all, ""); | 838 RAW_DCHECK(wrote_all, ""); |
| 893 RAW_DCHECK(length <= buffer_size, ""); | 839 RAW_DCHECK(length <= buffer_size, ""); |
| 894 RawWrite(fd, raw_buffer, length); | 840 RawWrite(fd, raw_buffer, length); |
| 895 RawClose(fd); | 841 RawClose(fd); |
| 896 } | 842 } |
| 897 | |
| 898 // static | |
| 899 void DeepHeapProfile::CountMMap(const void* pointer, | |
| 900 AllocValue* alloc_value, | |
| 901 DeepHeapProfile* deep_profile) { | |
| 902 ++deep_profile->num_mmap_allocations_; | |
| 903 } | |
| 904 #else // DEEP_HEAP_PROFILE | 843 #else // DEEP_HEAP_PROFILE |
| 905 | 844 |
| 906 DeepHeapProfile::DeepHeapProfile(HeapProfileTable* heap_profile, | 845 DeepHeapProfile::DeepHeapProfile(HeapProfileTable* heap_profile, |
| 907 const char* prefix) | 846 const char* prefix) |
| 908 : heap_profile_(heap_profile) { | 847 : heap_profile_(heap_profile) { |
| 909 } | 848 } |
| 910 | 849 |
| 911 DeepHeapProfile::~DeepHeapProfile() { | 850 DeepHeapProfile::~DeepHeapProfile() { |
| 912 } | 851 } |
| 913 | 852 |
| 914 int DeepHeapProfile::FillOrderedProfile(char raw_buffer[], int buffer_size) { | 853 int DeepHeapProfile::FillOrderedProfile(char raw_buffer[], int buffer_size) { |
| 915 return heap_profile_->FillOrderedProfile(raw_buffer, buffer_size); | 854 return heap_profile_->FillOrderedProfile(raw_buffer, buffer_size); |
| 916 } | 855 } |
| 917 | 856 |
| 918 #endif // DEEP_HEAP_PROFILE | 857 #endif // DEEP_HEAP_PROFILE |
| OLD | NEW |