| Index: third_party/tcmalloc/chromium/src/deep-memory-profiler.cc
|
| diff --git a/third_party/tcmalloc/chromium/src/deep-memory-profiler.cc b/third_party/tcmalloc/chromium/src/deep-memory-profiler.cc
|
| new file mode 100644
|
| index 0000000000000000000000000000000000000000..05e34cc64527ef7e42c0295a881bde0e3c138ef4
|
| --- /dev/null
|
| +++ b/third_party/tcmalloc/chromium/src/deep-memory-profiler.cc
|
| @@ -0,0 +1,552 @@
|
| +// Copyright (c) 2011 The Chromium Authors. All rights reserved.
|
| +// Use of this source code is governed by a BSD-style license that can be
|
| +// found in the LICENSE file.
|
| +
|
| +#include <sys/types.h>
|
| +#include <sys/stat.h>
|
| +#include <fcntl.h>
|
| +#include <unistd.h>
|
| +
|
| +#include "deep-memory-profiler.h"
|
| +#include "base/sysinfo.h"
|
| +#include "base/cycleclock.h"
|
| +
|
| +using tcmalloc::FillProcSelfMaps; // from sysinfo.h
|
| +using tcmalloc::DumpProcSelfMaps; // from sysinfo.h
|
| +
|
| +// Those are used in parsing /proc/self/pagemaps
|
| +#define PAGE_SIZE 4096
|
| +#define U64_1 ((uint64)1)
|
| +#define PFN_FILTER ((U64_1 << 55) - U64_1)
|
| +#define PAGE_PRESENT U64_1 << 63
|
| +#define PAGE_SWAP U64_1 << 62
|
| +#define PAGE_RESERVED U64_1 << 61
|
| +#define FLAG_NOPAGE U64_1 << 20
|
| +#define FLAG_KSM U64_1 << 21
|
| +#define FLAG_MMAP U64_1 << 11
|
| +#define PAGEMAP_BYTES 8
|
| +
|
| +static const int kProfilerBufferSize = 1 << 20;
|
| +static const int kHashTableSize = 179999; // Have to same as heap-profile-table.cc
|
| +
|
| +// header of the dumped heap profile
|
| +static const char kProfileHeader[] = "Deep Memory Profile\n";
|
| +static const char kGlobalStatsHeader[] = "GLOBAL_STATS:\n";
|
| +static const char kStacktraceHeader[] = "STACKTRACES:\n";
|
| +static const char kProcSelfMapsHeader[] = "\nMAPPED_LIBRARIES:\n";
|
| +
|
| +DeepMemoryProfiler::DeepMemoryProfiler(HeapProfileTable* heap_profile, const char* prefix) {
|
| + heap_profile_ = heap_profile;
|
| +
|
| + //kpageflags_fd_ = open("/proc/kpageflags", O_RDONLY);
|
| + //RAW_DCHECK(kpageflags_fd_ != -1, "Failed to open /proc/kpageflags");
|
| + kpageflags_fd_ = -1; // Not currently using this
|
| +
|
| + page_map_ = new(heap_profile_->alloc_(sizeof(PageStateMap)))
|
| + PageStateMap(heap_profile_->alloc_, heap_profile_->dealloc_);
|
| +
|
| + // Copy filename prefix
|
| + RAW_DCHECK(filename_prefix_ == NULL, "");
|
| + const int prefix_length = strlen(prefix);
|
| + filename_prefix_ = reinterpret_cast<char*>(heap_profile_->alloc_(prefix_length + 1));
|
| + memcpy(filename_prefix_, prefix, prefix_length);
|
| + filename_prefix_[prefix_length] = '\0';
|
| +
|
| + profiler_buffer_ = reinterpret_cast<char*>(heap_profile_->alloc_(kProfilerBufferSize));
|
| + dump_count_ = 0;
|
| +}
|
| +
|
| +DeepMemoryProfiler::~DeepMemoryProfiler() {
|
| + page_map_->~PageStateMap();
|
| + heap_profile_->dealloc_(page_map_);
|
| + heap_profile_->dealloc_(filename_prefix_);
|
| + heap_profile_->dealloc_(profiler_buffer_);
|
| +}
|
| +
|
| +// This function need to be called after each fork
|
| +void DeepMemoryProfiler::OpenPageMap() {
|
| + char filename[100];
|
| + sprintf(filename, "/proc/%d/pagemap", getpid());
|
| + pagemap_fd_ = open(filename, O_RDONLY);
|
| + RAW_DCHECK(pagemap_fd_ != -1, "Failed to open /proc/self/pagemap");
|
| +}
|
| +
|
| +bool DeepMemoryProfiler::PageMapSeek(uint64 addr) {
|
| + uint64 index = (addr / PAGE_SIZE) * PAGEMAP_BYTES;
|
| + uint64 o = lseek64(pagemap_fd_, index, SEEK_SET);
|
| + RAW_DCHECK(o == index, "");
|
| + return true;
|
| +}
|
| +
|
| +bool DeepMemoryProfiler::PageMapRead(PageState* state) {
|
| + uint64 pa;
|
| + int t = read(pagemap_fd_, &pa, PAGEMAP_BYTES);
|
| + if(t != PAGEMAP_BYTES)
|
| + return false;
|
| +
|
| + // Check if the page is committed
|
| + state->is_committed = (pa & (PAGE_PRESENT | PAGE_SWAP));
|
| +
|
| + state->is_present = (pa & PAGE_PRESENT);
|
| + state->is_swapped = (pa & PAGE_SWAP);
|
| +
|
| + // We can get more detailed stats from kPageflags
|
| + if(state->is_present && kpageflags_fd_ != -1) {
|
| + uint64 pfn = pa & PFN_FILTER;
|
| + int64 index = pfn * sizeof(uint64);
|
| + if (lseek64(kpageflags_fd_, index, SEEK_SET) != index) {
|
| + RAW_LOG(ERROR, "kpageflags seek failed. errno %d",errno);
|
| + return false;
|
| + }
|
| + uint64 flags;
|
| + if (read(kpageflags_fd_, &flags, sizeof(uint64)) < 0) {
|
| + RAW_LOG(ERROR, "kpageflags read failed. errno %d",errno);
|
| + return false;
|
| + }
|
| + if(flags & FLAG_NOPAGE) RAW_LOG(ERROR,"NOPAGE at present page frame %"PRId64"", pfn);
|
| + state->is_shared = (flags & FLAG_KSM);
|
| + state->is_mmap = (flags & FLAG_MMAP);
|
| + }else{
|
| + state->is_shared = false;
|
| + }
|
| +
|
| + return true;
|
| +}
|
| +
|
| +uint64 DeepMemoryProfiler::GetCommittedSize(uint64 addr, uint64 size) {
|
| + uint64 page_addr = (addr / PAGE_SIZE) * PAGE_SIZE;
|
| + uint64 committed_size = 0;
|
| +
|
| + PageMapSeek(addr);
|
| + // Check every pages on which the allocation reside
|
| + while(page_addr < addr + size) {
|
| + // Read corresponding physical page
|
| + PageState state;
|
| + if(PageMapRead(&state) == false){
|
| + // We can't read the last region (e.g vsyscall)
|
| + RAW_LOG(0, "pagemap read failed @ %#llx %"PRId64" bytes", addr, size);
|
| + return 0;
|
| + }
|
| +
|
| + if(state.is_committed){
|
| + // Calculate the size of the allocation part in this page
|
| + uint64 bytes = PAGE_SIZE;
|
| + if(page_addr < addr)
|
| + bytes -= addr - page_addr;
|
| + if(addr + size < page_addr + PAGE_SIZE)
|
| + bytes -= PAGE_SIZE - (addr + size - page_addr);
|
| +
|
| + committed_size += bytes;
|
| + }
|
| + page_addr += PAGE_SIZE;
|
| + }
|
| +
|
| + return committed_size;
|
| +}
|
| +
|
| +void DeepMemoryProfiler::InitRegionStats(RegionStats* stats) {
|
| + stats->virtual_bytes = 0;
|
| + stats->committed_bytes = 0;
|
| +}
|
| +
|
| +void DeepMemoryProfiler::RecordRegionStats(uint64 start, uint64 end,
|
| + RegionStats* stats) {
|
| + stats->virtual_bytes += end - start;
|
| + stats->committed_bytes += GetCommittedSize(start, end - start);
|
| +}
|
| +
|
| +void DeepMemoryProfiler::GetGlobalStats() {
|
| + ProcMapsIterator::Buffer iterbuf;
|
| + ProcMapsIterator it(0, &iterbuf);
|
| + uint64 start, end, offset;
|
| + int64 inode;
|
| + char *flags, *filename;
|
| +
|
| + InitRegionStats(&(stats_.total));
|
| + InitRegionStats(&(stats_.file_mapped));
|
| + InitRegionStats(&(stats_.anonymous));
|
| + InitRegionStats(&(stats_.other));
|
| +
|
| + /*
|
| + char file_name[1000];
|
| + snprintf(file_name, sizeof(file_name), "%s.%05d.%04d.maps",
|
| + filename_prefix_, getpid(), dump_count_);
|
| +
|
| + RawFD maps_fd = RawOpenForWriting(file_name);
|
| + RAW_DCHECK(maps_fd != kIllegalRawFD, "");
|
| +
|
| + // We use global buffer here
|
| + char* buf = profiler_buffer_;
|
| + int bufsize = kProfilerBufferSize;
|
| + int buflen = 0;
|
| + int64 mmap_end = 0;
|
| + const AllocValue* v = NULL;
|
| + */
|
| +
|
| + while (it.Next(&start, &end, &flags, &offset, &inode, &filename)) {
|
| + if(strcmp("[vsyscall]", filename) == 0) continue; // pagemap read fails in this region
|
| +
|
| + int64 committed_bytes = stats_.total.committed_bytes;
|
| + RecordRegionStats(start, end, &(stats_.total));
|
| + committed_bytes = stats_.total.committed_bytes - committed_bytes;
|
| +
|
| + if(filename[0] == '/') {
|
| + RecordRegionStats(start, end, &(stats_.file_mapped));
|
| + }else if(filename[0] == '\0' || filename[0] == '\n' || filename[0] == EOF){
|
| + RecordRegionStats(start, end, &(stats_.anonymous));
|
| + }else{
|
| + RecordRegionStats(start, end, &(stats_.other));
|
| + }
|
| + /*
|
| + RegionValue rv;
|
| + rv.start = start;
|
| + rv.end = end;
|
| + rv.size = end - start;
|
| + rv.committed_size = committed_bytes;
|
| + rv.recorded_size = 0;
|
| + rv.recorded_committed_size = 0;
|
| + memcpy(rv.permissions, flags, 4);
|
| + rv.permissions[4] = '\0';
|
| + memcpy(rv.filename, filename, strlen(filename) + 1);
|
| + regions_->Insert(reinterpret_cast<void*>(start), rv);
|
| +
|
| + if(rv.size > max_region_size_)
|
| + max_region_size_ = rv.size;
|
| + */
|
| +
|
| + /*
|
| + uint64 recorded_bytes = 0;
|
| + if(mmap_end > start && v != NULL){
|
| + // This region is a part of the previous mmap allocation
|
| + if(mmap_end >= end)
|
| + recorded_bytes = end - start;
|
| + else
|
| + recorded_bytes = mmap_end - start;
|
| + }else{
|
| + v = heap_profile_->allocation_mmap_->Find(reinterpret_cast<void*>(start));
|
| + if(v != NULL){
|
| + mmap_end = start + v->bytes;
|
| + if(mmap_end >= end)
|
| + recorded_bytes = end - start;
|
| + else
|
| + recorded_bytes = mmap_end - start;
|
| + while(mmap_end < end){
|
| + // There might be multiple mmap allocations in this region
|
| + v = heap_profile_->allocation_mmap_->Find(reinterpret_cast<void*>(mmap_end));
|
| + if(v == NULL)
|
| + break;
|
| + else{
|
| + mmap_end += v->bytes;
|
| + recorded_bytes += v->bytes;
|
| + }
|
| + }
|
| + }
|
| + }
|
| +
|
| + buflen += snprintf(buf + buflen, bufsize - buflen,
|
| + "%#llx-%#llx %10"PRId64" %10"PRId64" %10"PRId64" %s\n",
|
| + start, end, (end - start), committed_bytes, recorded_bytes,
|
| + filename);
|
| + */
|
| + }
|
| +
|
| + //RawWrite(maps_fd, buf, buflen);
|
| + //RawClose(maps_fd);
|
| +}
|
| +
|
| +void DeepMemoryProfiler::RecordAllocInRegions(uint64 addr, uint64 size) {
|
| + const void* regions_start;
|
| + const RegionValue* rv_const = regions_->FindInside(&GetRegionSize,
|
| + (size_t)max_region_size_,
|
| + (void*)addr, ®ions_start);
|
| +
|
| + if(rv_const == NULL) {
|
| + RAW_LOG(0, "Could't find a region for allocation!");
|
| + return;
|
| + }
|
| + RegionValue* rv = regions_->FindMutable(regions_start);
|
| + RAW_DCHECK(rv->start <= addr, "");
|
| + RAW_DCHECK(rv->end > addr, "");
|
| +
|
| + if(rv->end >= addr + size) {
|
| + // This region includes the whole allocation
|
| + rv->recorded_size += size;
|
| + rv->recorded_committed_size += GetCommittedSize(addr, size);
|
| + }else{
|
| + // This region includes only a part of the allocation.
|
| + // Need to find the other regions that include this allocation.
|
| + rv->recorded_size += rv->end - addr;
|
| + rv->recorded_committed_size += GetCommittedSize(addr, rv->end - addr);
|
| + RecordAllocInRegions(rv->end, addr + size - rv->end);
|
| + }
|
| +}
|
| +
|
| +void DeepMemoryProfiler::RecordAlloc(const void* ptr, AllocValue* v,
|
| + DeepMemoryProfiler* deep_profiler) {
|
| + uint64 alloc_addr = (uint64)ptr;
|
| + uint64 committed = deep_profiler->GetCommittedSize(alloc_addr, v->bytes);
|
| +
|
| + v->bucket()->committed_size += committed;
|
| + if(deep_profiler->recording_mmap_){
|
| + deep_profiler->stats_.record_mmap.virtual_bytes += v->bytes;
|
| + deep_profiler->stats_.record_mmap.committed_bytes += committed;
|
| + // Record this allocation in the region map
|
| + //deep_profiler->RecordAllocInRegions(alloc_addr, v->bytes);
|
| + }else{
|
| + deep_profiler->stats_.record_tcmalloc.virtual_bytes += v->bytes;
|
| + deep_profiler->stats_.record_tcmalloc.committed_bytes += committed;
|
| + }
|
| +}
|
| +
|
| +void DeepMemoryProfiler::RecordAllAllocs() {
|
| + stats_.record_mmap.virtual_bytes = 0;
|
| + stats_.record_mmap.committed_bytes = 0;
|
| + stats_.record_tcmalloc.virtual_bytes = 0;
|
| + stats_.record_tcmalloc.committed_bytes = 0;
|
| +
|
| + // Tcmalloc allocs
|
| + recording_mmap_ = false;
|
| + heap_profile_->allocation_->Iterate(RecordAlloc, this);
|
| +
|
| + // Mmap allocs
|
| + recording_mmap_ = true;
|
| + heap_profile_->allocation_mmap_->Iterate(RecordAlloc, this);
|
| +}
|
| +
|
| +
|
| +void DeepMemoryProfiler::WriteLeakyRegion(const void* ptr,
|
| + RegionValue* rv,
|
| + BufferArgs* buffer) {
|
| + if(rv->committed_size > rv->recorded_committed_size){
|
| + //if(rv->filename[0] != '\0')
|
| + // return;
|
| + if(buffer->len >= buffer->size)
|
| + return;
|
| +
|
| + int printed = snprintf(buffer->buf + buffer->len, buffer->size - buffer->len,
|
| + "%#llx-%#llx %s %10"PRId64" %10"PRId64""
|
| + " %10"PRId64" %10"PRId64" %s\n",
|
| + rv->start, rv->end, rv->permissions,
|
| + rv->size - rv->recorded_size, rv->size,
|
| + rv->committed_size - rv->recorded_committed_size,
|
| + rv->committed_size,
|
| + rv->filename);
|
| +
|
| + if(printed >= buffer->size - buffer->len)
|
| + return;
|
| + buffer->len += printed;
|
| + }
|
| +}
|
| +
|
| +// Write leaked regions to a file
|
| +void DeepMemoryProfiler::WriteAllLeakyRegions() {
|
| + BufferArgs buffer;
|
| + buffer.buf = profiler_buffer_;
|
| + buffer.size = kProfilerBufferSize;
|
| + buffer.len = 0;
|
| + regions_->Iterate(WriteLeakyRegion, &buffer);
|
| +
|
| + char leaks_file_name[1000];
|
| + snprintf(leaks_file_name, sizeof(leaks_file_name), "%s.%05d.%04d.leaks",
|
| + filename_prefix_, getpid(), dump_count_);
|
| +
|
| + RawFD leaks_fd = RawOpenForWriting(leaks_file_name);
|
| + RAW_DCHECK(leaks_fd != kIllegalRawFD, "");
|
| + RawWrite(leaks_fd, buffer.buf, buffer.len);
|
| + RawClose(leaks_fd);
|
| +}
|
| +
|
| +void DeepMemoryProfiler::WriteMapsToFile(char buf[], int size) {
|
| + char file_name[100];
|
| + snprintf(file_name, sizeof(file_name), "%s.%05d.maps", filename_prefix_, getpid());
|
| +
|
| + RawFD maps_fd = RawOpenForWriting(file_name);
|
| + RAW_DCHECK(maps_fd != kIllegalRawFD, "");
|
| +
|
| + int map_length;
|
| + bool dummy; // "wrote_all" -- did /proc/self/maps fit in its entirety?
|
| + map_length = FillProcSelfMaps(profiler_buffer_, kProfilerBufferSize, &dummy);
|
| + RAW_DCHECK(map_length <= kProfilerBufferSize, "");
|
| + RawWrite(maps_fd, profiler_buffer_, map_length);
|
| + RawClose(maps_fd);
|
| +}
|
| +
|
| +int DeepMemoryProfiler::WriteBucket(const Bucket* b, char buf[], int bufsize) {
|
| + int buflen = 0;
|
| + buflen += snprintf(buf + buflen, bufsize - buflen, "%05d", b->id);
|
| + for (int d = 0; d < b->depth; d++) {
|
| + buflen += snprintf(buf + buflen, bufsize - buflen, " 0x%08" PRIxPTR,
|
| + reinterpret_cast<uintptr_t>(b->stack[d]));
|
| + }
|
| + buflen += snprintf(buf + buflen, bufsize - buflen, "\n");
|
| + return buflen;
|
| +}
|
| +
|
| +void DeepMemoryProfiler::WriteBucketsToFile() {
|
| + char file_name[100];
|
| + snprintf(file_name, sizeof(file_name), "%s.%05d.%04d.buckets",
|
| + filename_prefix_, getpid(), dump_count_);
|
| + RawFD bucket_fd = RawOpenForWriting(file_name);
|
| + RAW_DCHECK(bucket_fd != kIllegalRawFD, "");
|
| +
|
| + // We will use the global buffer here
|
| + char* buf = profiler_buffer_;
|
| + int size = kProfilerBufferSize;
|
| + int buflen = 0;
|
| + for (int b = 0; b < kHashTableSize; b++) {
|
| + for (Bucket* x = heap_profile_->table_[b]; x != 0; x = x->next) {
|
| + if(x->is_logged)
|
| + continue; // Skip the bucket if it is already logged
|
| + if(x->alloc_size - x->free_size <= 64)
|
| + continue; // Skip small buckets
|
| +
|
| + buflen += WriteBucket(x, buf + buflen, size - buflen);
|
| + x->is_logged = true;
|
| +
|
| + // Write to file if buffer 80% full
|
| + if(buflen > size * 0.8){
|
| + RawWrite(bucket_fd, buf, buflen);
|
| + buflen = 0;
|
| + }
|
| + }
|
| + }
|
| + RawWrite(bucket_fd, buf, buflen);
|
| + RawClose(bucket_fd);
|
| +}
|
| +
|
| +int DeepMemoryProfiler::UnparseBucket(const Bucket& b,
|
| + char* buf, int buflen, int bufsize,
|
| + const char* extra,
|
| + Stats* profile_stats) {
|
| + if (profile_stats != NULL) {
|
| + profile_stats->allocs += b.allocs;
|
| + profile_stats->alloc_size += b.alloc_size;
|
| + profile_stats->frees += b.frees;
|
| + profile_stats->free_size += b.free_size;
|
| + }
|
| + int printed =
|
| + snprintf(buf + buflen, bufsize - buflen,
|
| + "%10"PRId64" %10"PRId64" %6d %6d @%s %d\n",
|
| + b.alloc_size - b.free_size,
|
| + b.committed_size,
|
| + b.allocs, b.frees,
|
| + extra,
|
| + b.id);
|
| + // If it looks like the snprintf failed, ignore the fact we printed anything
|
| + if (printed < 0 || printed >= bufsize - buflen) return buflen;
|
| + buflen += printed;
|
| +
|
| + return buflen;
|
| +}
|
| +
|
| +int DeepMemoryProfiler::UnparseRegionStats(const RegionStats* stats, const char* name,
|
| + char* buf, int buflen, int bufsize) {
|
| + int printed = snprintf(buf + buflen, bufsize - buflen,
|
| + "%15s %10"PRId64" %10"PRId64"\n",
|
| + name,
|
| + stats->virtual_bytes,
|
| + stats->committed_bytes);
|
| +
|
| + return buflen + printed;
|
| +}
|
| +
|
| +int DeepMemoryProfiler::UnparseGlobalStats(char* buf, int buflen, int bufsize) {
|
| + buflen += snprintf(buf + buflen, bufsize - buflen,
|
| + "%15s %10s %10s\n",
|
| + "", "virtual", "committed");
|
| +
|
| + buflen = UnparseRegionStats(&(stats_.total), "total", buf, buflen, bufsize);
|
| + buflen = UnparseRegionStats(&(stats_.file_mapped), "file mapped", buf, buflen, bufsize);
|
| + buflen = UnparseRegionStats(&(stats_.anonymous), "anonymous", buf, buflen, bufsize);
|
| + buflen = UnparseRegionStats(&(stats_.other), "other", buf, buflen, bufsize);
|
| + buflen = UnparseRegionStats(&(stats_.record_mmap), "mmap", buf, buflen, bufsize);
|
| + buflen = UnparseRegionStats(&(stats_.record_tcmalloc), "tcmalloc", buf, buflen, bufsize);
|
| + return buflen;
|
| +}
|
| +
|
| +// Takes snapshot of current memory usage.
|
| +// We avoid any memory allocations during snapshots.
|
| +//void DeepMemoryProfiler::TakeMemorySnapshot() {
|
| +//}
|
| +
|
| +int DeepMemoryProfiler::FillOrderedProfile(char buf[], int size) {
|
| + int64 start_time = CycleClock::Now();
|
| + dump_count_++;
|
| +
|
| + // We need to re-open files in /proc/pid/ if pid is created
|
| + if(most_recent_pid_ != getpid()) {
|
| + most_recent_pid_ = getpid();
|
| + OpenPageMap();
|
| + WriteMapsToFile(profiler_buffer_, kProfilerBufferSize); // Using the global buffer
|
| + }
|
| +
|
| +
|
| + //regions_ = new(heap_profile_->alloc_(sizeof(RegionMap)))
|
| + // RegionMap(heap_profile_->alloc_, heap_profile_->dealloc_);
|
| + //max_region_size_ = 0;
|
| + GetGlobalStats();
|
| + uint64 anonymous_committed = stats_.anonymous.committed_bytes;
|
| +
|
| + // Reset committed size of buckets
|
| + for (int b = 0; b < kHashTableSize; b++) {
|
| + for (Bucket* x = heap_profile_->table_[b]; x != 0; x = x->next) {
|
| + x->committed_size = 0;
|
| + }
|
| + }
|
| +
|
| + //Bucket** list = heap_profile_->MakeBucketList();
|
| + //for (int i = 0; i < heap_profile_->num_buckets_; i++) {
|
| + // list[i]->committed_size = 0;
|
| + //}
|
| +
|
| + // No allocation zone starts -----------------------------------
|
| +
|
| + // Record committed sizes
|
| + RecordAllAllocs();
|
| +
|
| + // Check if committed bytes changed during RecordAllAllocs.
|
| + GetGlobalStats();
|
| + uint64 comm_diff = stats_.anonymous.committed_bytes - anonymous_committed;
|
| + if(comm_diff != 0)
|
| + RAW_LOG(0, "committed diff: %"PRId64"", comm_diff);
|
| +
|
| + HeapProfileTable::Stats stats;
|
| + memset(&stats, 0, sizeof(stats));
|
| +
|
| + int bucket_length = snprintf(buf, size, kProfileHeader);
|
| + if (bucket_length < 0 || bucket_length >= size) return 0;
|
| +
|
| + // Printing Global Stats
|
| + bucket_length += snprintf(buf + bucket_length, size - bucket_length, kGlobalStatsHeader);
|
| + bucket_length = UnparseGlobalStats(buf, bucket_length, size);
|
| +
|
| + // Printing Stacktraces
|
| + bucket_length += snprintf(buf + bucket_length, size - bucket_length, kStacktraceHeader);
|
| + bucket_length += snprintf(buf + bucket_length, size - bucket_length,
|
| + "%10s %10s\n", "virtual", "committed");
|
| +
|
| + for (int b = 0; b < kHashTableSize; b++) {
|
| + for (Bucket* x = heap_profile_->table_[b]; x != 0; x = x->next) {
|
| + if(x->alloc_size - x->free_size == 0)
|
| + continue; // Skip empty buckets
|
| + bucket_length = UnparseBucket(*x, buf, bucket_length, size, "",
|
| + &stats);
|
| + }
|
| + }
|
| +
|
| + RAW_DCHECK(bucket_length < size, "");
|
| + // No allocation zone ends -----------------------------------
|
| +
|
| + //WriteAllLeakyRegions();
|
| + // Write stacktraces
|
| + WriteBucketsToFile();
|
| +
|
| + //regions_->~RegionMap();
|
| + //heap_profile_->dealloc_(regions_);
|
| +
|
| + int64 dt = CycleClock::Now() - start_time;
|
| + double dtf = dt / CyclesPerSecond();
|
| + RAW_LOG(0, "Time spent on DeepProfiler: %.3f sec\n", dtf);
|
| +
|
| + return bucket_length;
|
| +}
|
|
|