| Index: third_party/tcmalloc/chromium/src/deep-heap-profile.cc
|
| diff --git a/third_party/tcmalloc/chromium/src/deep-heap-profile.cc b/third_party/tcmalloc/chromium/src/deep-heap-profile.cc
|
| new file mode 100644
|
| index 0000000000000000000000000000000000000000..d379e6ab775e32a69752b41ae180e264d45f7918
|
| --- /dev/null
|
| +++ b/third_party/tcmalloc/chromium/src/deep-heap-profile.cc
|
| @@ -0,0 +1,559 @@
|
| +// Copyright (c) 2011 The Chromium Authors. All rights reserved.
|
| +// Use of this source code is governed by a BSD-style license that can be
|
| +// found in the LICENSE file.
|
| +
|
| +// ---
|
| +// Author: Sainbayar Sukhbaatar
|
| +// Dai Mikurube
|
| +//
|
| +
|
| +#include "deep-heap-profile.h"
|
| +
|
| +#ifdef DEEP_HEAP_PROFILE
|
| +#include <fcntl.h>
|
| +#include <sys/stat.h>
|
| +#include <sys/types.h>
|
| +#ifdef HAVE_UNISTD_H
|
| +#include <unistd.h> // for getpid()
|
| +#endif
|
| +
|
| +#include "base/cycleclock.h"
|
| +#include "base/sysinfo.h"
|
| +
|
| +static const int kProfilerBufferSize = 1 << 20;
|
| +static const int kHashTableSize = 179999; // The same as heap-profile-table.cc.
|
| +
|
| +static const int PAGE_SIZE = 4096;
|
| +static const int PAGEMAP_BYTES = 8;
|
| +static const uint64 TOP_ADDRESS = kuint64max;
|
| +
|
| +// Header strings of the dumped heap profile.
|
| +static const char kProfileHeader[] = "Deep Heap Profile";
|
| +static const int kProfileVersion = 1;
|
| +static const char kGlobalStatsHeader[] = "GLOBAL_STATS:\n";
|
| +static const char kStacktraceHeader[] = "STACKTRACES:\n";
|
| +static const char kProcSelfMapsHeader[] = "\nMAPPED_LIBRARIES:\n";
|
| +
|
| +DeepHeapProfile::DeepHeapProfile(HeapProfileTable* heap_profile,
|
| + const char* prefix)
|
| + : heap_profile_(heap_profile),
|
| + pagemap_fd_(-1),
|
| + most_recent_pid_(-1),
|
| + stats_(),
|
| + dump_count_(0),
|
| + filename_prefix_(NULL),
|
| + profiler_buffer_(NULL),
|
| + bucket_id_(0) {
|
| + deep_bucket_map_ = new(heap_profile_->alloc_(sizeof(DeepBucketMap)))
|
| + DeepBucketMap(heap_profile_->alloc_, heap_profile_->dealloc_);
|
| +
|
| + // Copy filename prefix.
|
| + const int prefix_length = strlen(prefix);
|
| + filename_prefix_ =
|
| + reinterpret_cast<char*>(heap_profile_->alloc_(prefix_length + 1));
|
| + memcpy(filename_prefix_, prefix, prefix_length);
|
| + filename_prefix_[prefix_length] = '\0';
|
| +
|
| + profiler_buffer_ =
|
| + reinterpret_cast<char*>(heap_profile_->alloc_(kProfilerBufferSize));
|
| +}
|
| +
|
| +DeepHeapProfile::~DeepHeapProfile() {
|
| + heap_profile_->dealloc_(profiler_buffer_);
|
| + heap_profile_->dealloc_(filename_prefix_);
|
| + deep_bucket_map_->~DeepBucketMap();
|
| + heap_profile_->dealloc_(deep_bucket_map_);
|
| +}
|
| +
|
| +int DeepHeapProfile::FillOrderedProfile(char buffer[], int buffer_size) {
|
| +#ifndef NDEBUG
|
| + int64 starting_cycles = CycleClock::Now();
|
| +#endif
|
| + ++dump_count_;
|
| +
|
| + // Re-open files in /proc/pid/ if the process is newly forked one.
|
| + if (most_recent_pid_ != getpid()) {
|
| + most_recent_pid_ = getpid();
|
| + pagemap_fd_ = OpenProcPagemap();
|
| +
|
| + // Write maps into a .maps file with using the global buffer.
|
| + WriteMapsToFile(profiler_buffer_, kProfilerBufferSize, filename_prefix_);
|
| + }
|
| +
|
| + // Reset committed sizes of buckets.
|
| + ResetCommittedSize(heap_profile_->alloc_table_);
|
| + ResetCommittedSize(heap_profile_->mmap_table_);
|
| +
|
| + GetGlobalStats(pagemap_fd_, &stats_);
|
| + size_t anonymous_committed = stats_.anonymous.committed_bytes;
|
| +
|
| + // Note: Don't allocate any memory from here.
|
| +
|
| + // Record committed sizes.
|
| + RecordAllAllocs();
|
| +
|
| + // Check if committed bytes changed during RecordAllAllocs.
|
| + GetGlobalStats(pagemap_fd_, &stats_);
|
| +#ifndef NDEBUG
|
| + size_t committed_difference =
|
| + stats_.anonymous.committed_bytes - anonymous_committed;
|
| + if (committed_difference != 0) {
|
| + RAW_LOG(0, "Difference in committed size: %ld", committed_difference);
|
| + }
|
| +#endif
|
| +
|
| + HeapProfileTable::Stats stats;
|
| + memset(&stats, 0, sizeof(stats));
|
| +
|
| + // Start filling buffer with the ordered profile.
|
| + int printed = snprintf(buffer, buffer_size,
|
| + "%s:%d\n", kProfileHeader, kProfileVersion);
|
| + if (printed < 0 || printed >= buffer_size) {
|
| + return 0;
|
| + }
|
| + int used_in_buffer = printed;
|
| +
|
| + // Fill buffer with the global stats.
|
| + printed = snprintf(buffer + used_in_buffer, buffer_size - used_in_buffer,
|
| + kGlobalStatsHeader);
|
| + if (printed < 0 || printed >= buffer_size - used_in_buffer) {
|
| + return used_in_buffer;
|
| + }
|
| + used_in_buffer += printed;
|
| +
|
| + used_in_buffer = UnparseGlobalStats(buffer, used_in_buffer, buffer_size);
|
| +
|
| + // Fill buffer with the header for buckets.
|
| + printed = snprintf(buffer + used_in_buffer, buffer_size - used_in_buffer,
|
| + kStacktraceHeader);
|
| + if (printed < 0 || printed >= buffer_size - used_in_buffer) {
|
| + return used_in_buffer;
|
| + }
|
| + used_in_buffer += printed;
|
| +
|
| + printed = snprintf(buffer + used_in_buffer, buffer_size - used_in_buffer,
|
| + "%10s %10s\n", "virtual", "committed");
|
| + if (printed < 0 || printed >= buffer_size - used_in_buffer) {
|
| + return used_in_buffer;
|
| + }
|
| + used_in_buffer += printed;
|
| +
|
| + // Fill buffer with stack trace buckets.
|
| + used_in_buffer = FillBucketTable(heap_profile_->alloc_table_, buffer,
|
| + buffer_size, used_in_buffer, &stats);
|
| + used_in_buffer = FillBucketTable(heap_profile_->mmap_table_, buffer,
|
| + buffer_size, used_in_buffer, &stats);
|
| +
|
| + RAW_DCHECK(used_in_buffer < buffer_size, "");
|
| +
|
| + // Note: Don't allocate any memory until here.
|
| +
|
| + // Write the bucket listing into a .bucket file.
|
| + WriteBucketsToBucketFile();
|
| +
|
| +#ifndef NDEBUG
|
| + int64 elapsed_cycles = CycleClock::Now() - starting_cycles;
|
| + double elapsed_seconds = elapsed_cycles / CyclesPerSecond();
|
| + RAW_LOG(0, "Time spent on DeepProfiler: %.3f sec\n", elapsed_seconds);
|
| +#endif
|
| +
|
| + return used_in_buffer;
|
| +}
|
| +
|
| +void DeepHeapProfile::RegionStats::Initialize() {
|
| + virtual_bytes = 0;
|
| + committed_bytes = 0;
|
| +}
|
| +
|
| +void DeepHeapProfile::RegionStats::Record(
|
| + int pagemap_fd, uint64 first_address, uint64 last_address) {
|
| + virtual_bytes += static_cast<size_t>(last_address - first_address + 1);
|
| + committed_bytes += GetCommittedSize(pagemap_fd, first_address, last_address);
|
| +}
|
| +
|
| +// static
|
| +int DeepHeapProfile::OpenProcPagemap() {
|
| + char filename[100];
|
| + sprintf(filename, "/proc/%d/pagemap", getpid());
|
| + int pagemap_fd = open(filename, O_RDONLY);
|
| + RAW_DCHECK(pagemap_fd != -1, "Failed to open /proc/self/pagemap");
|
| + return pagemap_fd;
|
| +}
|
| +
|
| +// static
|
| +bool DeepHeapProfile::SeekProcPagemap(int pagemap_fd, uint64 address) {
|
| + int64 index = (address / PAGE_SIZE) * PAGEMAP_BYTES;
|
| + int64 offset = lseek64(pagemap_fd, index, SEEK_SET);
|
| + RAW_DCHECK(offset == index, "");
|
| + if (offset < 0) {
|
| + return false;
|
| + }
|
| + return true;
|
| +}
|
| +
|
| +// static
|
| +bool DeepHeapProfile::ReadProcPagemap(int pagemap_fd, PageState* state) {
|
| + static const uint64 U64_1 = 1;
|
| + static const uint64 PFN_FILTER = (U64_1 << 55) - U64_1;
|
| + static const uint64 PAGE_PRESENT = U64_1 << 63;
|
| + static const uint64 PAGE_SWAP = U64_1 << 62;
|
| + static const uint64 PAGE_RESERVED = U64_1 << 61;
|
| + static const uint64 FLAG_NOPAGE = U64_1 << 20;
|
| + static const uint64 FLAG_KSM = U64_1 << 21;
|
| + static const uint64 FLAG_MMAP = U64_1 << 11;
|
| +
|
| + uint64 pagemap_value;
|
| + int result = read(pagemap_fd, &pagemap_value, PAGEMAP_BYTES);
|
| + if (result != PAGEMAP_BYTES) {
|
| + return false;
|
| + }
|
| +
|
| + // Check if the page is committed.
|
| + state->is_committed = (pagemap_value & (PAGE_PRESENT | PAGE_SWAP));
|
| +
|
| + state->is_present = (pagemap_value & PAGE_PRESENT);
|
| + state->is_swapped = (pagemap_value & PAGE_SWAP);
|
| + state->is_shared = false;
|
| +
|
| + return true;
|
| +}
|
| +
|
| +// static
|
| +size_t DeepHeapProfile::GetCommittedSize(
|
| + int pagemap_fd, uint64 first_address, uint64 last_address) {
|
| + uint64 page_address = (first_address / PAGE_SIZE) * PAGE_SIZE;
|
| + size_t committed_size = 0;
|
| +
|
| + SeekProcPagemap(pagemap_fd, first_address);
|
| +
|
| + // Check every page on which the allocation resides.
|
| + while (page_address <= last_address) {
|
| + // Read corresponding physical page.
|
| + PageState state;
|
| + if (ReadProcPagemap(pagemap_fd, &state) == false) {
|
| + // We can't read the last region (e.g vsyscall).
|
| +#ifndef NDEBUG
|
| + RAW_LOG(0, "pagemap read failed @ %#llx %"PRId64" bytes",
|
| + first_address, last_address - first_address + 1);
|
| +#endif
|
| + return 0;
|
| + }
|
| +
|
| + if (state.is_committed) {
|
| + // Calculate the size of the allocation part in this page.
|
| + size_t bytes = PAGE_SIZE;
|
| +
|
| + // If looking at the last page in a given region.
|
| + if (last_address <= page_address - 1 + PAGE_SIZE) {
|
| + bytes = last_address - page_address + 1;
|
| + }
|
| +
|
| + // If looking at the first page in a given region.
|
| + if (page_address < first_address) {
|
| + bytes -= first_address - page_address;
|
| + }
|
| +
|
| + committed_size += bytes;
|
| + }
|
| + if (page_address > TOP_ADDRESS - PAGE_SIZE) {
|
| + break;
|
| + }
|
| + page_address += PAGE_SIZE;
|
| + }
|
| +
|
| + return committed_size;
|
| +}
|
| +
|
| +// static
|
| +void DeepHeapProfile::WriteMapsToFile(char buffer[], int buffer_size,
|
| + char* filename_prefix) {
|
| + char filename[100];
|
| + snprintf(filename, sizeof(filename),
|
| + "%s.%05d.maps", filename_prefix, getpid());
|
| +
|
| + RawFD maps_fd = RawOpenForWriting(filename);
|
| + RAW_DCHECK(maps_fd != kIllegalRawFD, "");
|
| +
|
| + int map_length;
|
| + bool wrote_all;
|
| + map_length = tcmalloc::FillProcSelfMaps(buffer, buffer_size, &wrote_all);
|
| + RAW_DCHECK(wrote_all, "");
|
| + RAW_DCHECK(map_length <= buffer_size, "");
|
| + RawWrite(maps_fd, buffer, map_length);
|
| + RawClose(maps_fd);
|
| +}
|
| +
|
| +// static
|
| +void DeepHeapProfile::GetGlobalStats(int pagemap_fd, GlobalStats* stats) {
|
| + ProcMapsIterator::Buffer iterator_buffer;
|
| + ProcMapsIterator it(0, &iterator_buffer);
|
| + uint64 first_address, last_address, offset;
|
| + int64 inode;
|
| + char *flags, *filename;
|
| +
|
| + stats->total.Initialize();
|
| + stats->file_mapped.Initialize();
|
| + stats->anonymous.Initialize();
|
| + stats->other.Initialize();
|
| +
|
| + while (it.Next(&first_address, &last_address,
|
| + &flags, &offset, &inode, &filename)) {
|
| + // 'last_address' should be the last inclusive address of the region.
|
| + last_address -= 1;
|
| + if (strcmp("[vsyscall]", filename) == 0) {
|
| + continue; // Reading pagemap will fail in [vsyscall].
|
| + }
|
| +
|
| + int64 committed_bytes = stats->total.committed_bytes;
|
| + stats->total.Record(pagemap_fd, first_address, last_address);
|
| + committed_bytes = stats->total.committed_bytes - committed_bytes;
|
| +
|
| + if (filename[0] == '/') {
|
| + stats->file_mapped.Record(pagemap_fd, first_address, last_address);
|
| + } else if (filename[0] == '\0' || filename[0] == '\n') {
|
| + stats->anonymous.Record(pagemap_fd, first_address, last_address);
|
| + } else {
|
| + stats->other.Record(pagemap_fd, first_address, last_address);
|
| + }
|
| + }
|
| +}
|
| +
|
| +DeepHeapProfile::DeepBucket*
|
| +DeepHeapProfile::GetDeepBucket(Bucket* bucket) {
|
| + DeepBucket* found = deep_bucket_map_->FindMutable(bucket);
|
| + if (found == NULL) {
|
| + DeepBucket created;
|
| + created.bucket = bucket;
|
| + created.committed_size = 0;
|
| + created.id = (bucket_id_++);
|
| + created.is_logged = false;
|
| + deep_bucket_map_->Insert(bucket, created);
|
| + return deep_bucket_map_->FindMutable(bucket);
|
| + } else {
|
| + return found;
|
| + }
|
| +}
|
| +
|
| +void DeepHeapProfile::ResetCommittedSize(Bucket** bucket_table) {
|
| + for (int i = 0; i < kHashTableSize; i++) {
|
| + for (Bucket* b = bucket_table[i]; b != 0; b = b->next) {
|
| + DeepBucket* db = GetDeepBucket(b);
|
| + db->committed_size = 0;
|
| + }
|
| + }
|
| +}
|
| +
|
| +int DeepHeapProfile::FillBucketTable(Bucket** bucket_table,
|
| + char buffer[],
|
| + int buffer_size,
|
| + int used_in_buffer,
|
| + HeapProfileTable::Stats* stats) {
|
| + for (int i = 0; i < kHashTableSize; i++) {
|
| + for (Bucket* b = bucket_table[i]; b != 0; b = b->next) {
|
| + if (b->alloc_size - b->free_size == 0) {
|
| + continue; // Skip empty buckets.
|
| + }
|
| + const DeepBucket& db = *GetDeepBucket(b);
|
| + used_in_buffer =
|
| + UnparseBucket(db, buffer, used_in_buffer, buffer_size, "", stats);
|
| + }
|
| + }
|
| + return used_in_buffer;
|
| +}
|
| +
|
| +void DeepHeapProfile::RecordAlloc(const void* pointer,
|
| + AllocValue* alloc_value,
|
| + DeepHeapProfile* deep_profile) {
|
| + uint64 address = reinterpret_cast<uintptr_t>(pointer);
|
| + size_t committed = GetCommittedSize(deep_profile->pagemap_fd_,
|
| + address, address + alloc_value->bytes - 1);
|
| +
|
| + DeepBucket* db = deep_profile->GetDeepBucket(alloc_value->bucket());
|
| + db->committed_size += committed;
|
| + deep_profile->stats_.record_malloc.virtual_bytes += alloc_value->bytes;
|
| + deep_profile->stats_.record_malloc.committed_bytes += committed;
|
| +}
|
| +
|
| +void DeepHeapProfile::RecordMMap(const void* pointer,
|
| + AllocValue* alloc_value,
|
| + DeepHeapProfile* deep_profile) {
|
| + uint64 address = reinterpret_cast<uintptr_t>(pointer);
|
| + size_t committed = GetCommittedSize(deep_profile->pagemap_fd_,
|
| + address, address + alloc_value->bytes - 1);
|
| +
|
| + DeepBucket* db = deep_profile->GetDeepBucket(alloc_value->bucket());
|
| + db->committed_size += committed;
|
| + deep_profile->stats_.record_mmap.virtual_bytes += alloc_value->bytes;
|
| + deep_profile->stats_.record_mmap.committed_bytes += committed;
|
| +}
|
| +
|
| +void DeepHeapProfile::RecordAllAllocs() {
|
| + stats_.record_mmap.virtual_bytes = 0;
|
| + stats_.record_mmap.committed_bytes = 0;
|
| + stats_.record_malloc.virtual_bytes = 0;
|
| + stats_.record_malloc.committed_bytes = 0;
|
| +
|
| + // malloc allocations.
|
| + heap_profile_->alloc_address_map_->Iterate(RecordAlloc, this);
|
| +
|
| + // mmap allocations.
|
| + heap_profile_->mmap_address_map_->Iterate(RecordMMap, this);
|
| +}
|
| +
|
| +int DeepHeapProfile::FillBucketForBucketFile(const DeepBucket* deep_bucket,
|
| + char buffer[],
|
| + int buffer_size) {
|
| + const Bucket* bucket = deep_bucket->bucket;
|
| + int printed = snprintf(buffer, buffer_size, "%05d", deep_bucket->id);
|
| + if (printed < 0 || printed >= buffer_size) {
|
| + return 0;
|
| + }
|
| + int used_in_buffer = printed;
|
| +
|
| + for (int d = 0; d < bucket->depth; d++) {
|
| + printed = snprintf(buffer + used_in_buffer, buffer_size - used_in_buffer,
|
| + " 0x%08" PRIxPTR,
|
| + reinterpret_cast<uintptr_t>(bucket->stack[d]));
|
| + if (printed < 0 || printed >= buffer_size - used_in_buffer) {
|
| + return used_in_buffer;
|
| + }
|
| + used_in_buffer += printed;
|
| + }
|
| + printed = snprintf(buffer + used_in_buffer, buffer_size - used_in_buffer,
|
| + "\n");
|
| + if (printed < 0 || printed >= buffer_size - used_in_buffer) {
|
| + return used_in_buffer;
|
| + }
|
| + used_in_buffer += printed;
|
| +
|
| + return used_in_buffer;
|
| +}
|
| +
|
| +void DeepHeapProfile::WriteBucketsTableToBucketFile(Bucket** bucket_table,
|
| + RawFD bucket_fd) {
|
| + // We will use the global buffer here.
|
| + char* buffer = profiler_buffer_;
|
| + int buffer_size = kProfilerBufferSize;
|
| + int used_in_buffer = 0;
|
| +
|
| + for (int i = 0; i < kHashTableSize; i++) {
|
| + for (Bucket* b = bucket_table[i]; b != 0; b = b->next) {
|
| + DeepBucket* db = GetDeepBucket(b);
|
| + if (db->is_logged) {
|
| + continue; // Skip the bucket if it is already logged.
|
| + }
|
| + if (b->alloc_size - b->free_size <= 64) {
|
| + continue; // Skip small buckets.
|
| + }
|
| +
|
| + used_in_buffer += FillBucketForBucketFile(
|
| + db, buffer + used_in_buffer, buffer_size - used_in_buffer);
|
| + db->is_logged = true;
|
| +
|
| + // Write to file if buffer 80% full.
|
| + if (used_in_buffer > buffer_size * 0.8) {
|
| + RawWrite(bucket_fd, buffer, used_in_buffer);
|
| + used_in_buffer = 0;
|
| + }
|
| + }
|
| + }
|
| +
|
| + RawWrite(bucket_fd, buffer, used_in_buffer);
|
| +}
|
| +
|
| +void DeepHeapProfile::WriteBucketsToBucketFile() {
|
| + char filename[100];
|
| + snprintf(filename, sizeof(filename),
|
| + "%s.%05d.%04d.buckets", filename_prefix_, getpid(), dump_count_);
|
| + RawFD bucket_fd = RawOpenForWriting(filename);
|
| + RAW_DCHECK(bucket_fd != kIllegalRawFD, "");
|
| +
|
| + WriteBucketsTableToBucketFile(heap_profile_->alloc_table_, bucket_fd);
|
| + WriteBucketsTableToBucketFile(heap_profile_->mmap_table_, bucket_fd);
|
| +
|
| + RawClose(bucket_fd);
|
| +}
|
| +
|
| +int DeepHeapProfile::UnparseBucket(const DeepBucket& deep_bucket,
|
| + char* buffer,
|
| + int used_in_buffer,
|
| + int buffer_size,
|
| + const char* extra,
|
| + Stats* profile_stats) {
|
| + const Bucket& bucket = *deep_bucket.bucket;
|
| + if (profile_stats != NULL) {
|
| + profile_stats->allocs += bucket.allocs;
|
| + profile_stats->alloc_size += bucket.alloc_size;
|
| + profile_stats->frees += bucket.frees;
|
| + profile_stats->free_size += bucket.free_size;
|
| + }
|
| +
|
| + int printed = snprintf(buffer + used_in_buffer, buffer_size - used_in_buffer,
|
| + "%10"PRId64" %10"PRId64" %6d %6d @%s %d\n",
|
| + bucket.alloc_size - bucket.free_size,
|
| + deep_bucket.committed_size,
|
| + bucket.allocs, bucket.frees, extra, deep_bucket.id);
|
| + // If it looks like the snprintf failed, ignore the fact we printed anything.
|
| + if (printed < 0 || printed >= buffer_size - used_in_buffer) {
|
| + return used_in_buffer;
|
| + }
|
| + used_in_buffer += printed;
|
| +
|
| + return used_in_buffer;
|
| +}
|
| +
|
| +int DeepHeapProfile::UnparseRegionStats(const RegionStats* stats,
|
| + const char* name,
|
| + char* buffer,
|
| + int used_in_buffer,
|
| + int buffer_size) {
|
| + int printed = snprintf(buffer + used_in_buffer, buffer_size - used_in_buffer,
|
| + "%15s %10ld %10ld\n",
|
| + name, stats->virtual_bytes, stats->committed_bytes);
|
| + if (printed < 0 || printed >= buffer_size - used_in_buffer) {
|
| + return used_in_buffer;
|
| + }
|
| + used_in_buffer += printed;
|
| +
|
| + return used_in_buffer;
|
| +}
|
| +
|
| +int DeepHeapProfile::UnparseGlobalStats(char* buffer,
|
| + int used_in_buffer,
|
| + int buffer_size) {
|
| + int printed = snprintf(buffer + used_in_buffer, buffer_size - used_in_buffer,
|
| + "%15s %10s %10s\n", "", "virtual", "committed");
|
| + if (printed < 0 || printed >= buffer_size - used_in_buffer) {
|
| + return used_in_buffer;
|
| + }
|
| + used_in_buffer += printed;
|
| +
|
| + used_in_buffer = UnparseRegionStats(&(stats_.total), "total",
|
| + buffer, used_in_buffer, buffer_size);
|
| + used_in_buffer = UnparseRegionStats(&(stats_.file_mapped), "file mapped",
|
| + buffer, used_in_buffer, buffer_size);
|
| + used_in_buffer = UnparseRegionStats(&(stats_.anonymous), "anonymous",
|
| + buffer, used_in_buffer, buffer_size);
|
| + used_in_buffer = UnparseRegionStats(&(stats_.other), "other",
|
| + buffer, used_in_buffer, buffer_size);
|
| + used_in_buffer = UnparseRegionStats(&(stats_.record_mmap), "mmap",
|
| + buffer, used_in_buffer, buffer_size);
|
| + used_in_buffer = UnparseRegionStats(&(stats_.record_malloc), "tcmalloc",
|
| + buffer, used_in_buffer, buffer_size);
|
| + return used_in_buffer;
|
| +}
|
| +#else // DEEP_HEAP_PROFILE
|
| +
|
| +DeepHeapProfile::DeepHeapProfile(HeapProfileTable* heap_profile,
|
| + const char* prefix)
|
| + : heap_profile_(heap_profile) {
|
| +}
|
| +
|
| +DeepHeapProfile::~DeepHeapProfile() {
|
| +}
|
| +
|
| +int DeepHeapProfile::FillOrderedProfile(char buffer[], int buffer_size) {
|
| + return heap_profile_->FillOrderedProfile(buffer, buffer_size);
|
| +}
|
| +
|
| +#endif // DEEP_HEAP_PROFILE
|
|
|