OLD | NEW |
(Empty) | |
| 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include <sys/types.h> |
| 6 #include <sys/stat.h> |
| 7 #include <fcntl.h> |
| 8 #include <unistd.h> |
| 9 |
| 10 #include "deep-memory-profiler.h" |
| 11 #include "base/sysinfo.h" |
| 12 #include "base/cycleclock.h" |
| 13 |
| 14 using tcmalloc::FillProcSelfMaps; // from sysinfo.h |
| 15 using tcmalloc::DumpProcSelfMaps; // from sysinfo.h |
| 16 |
| 17 // Those are used in parsing /proc/self/pagemaps |
| 18 #define PAGE_SIZE 4096 |
| 19 #define U64_1 ((uint64)1) |
| 20 #define PFN_FILTER ((U64_1 << 55) - U64_1) |
| 21 #define PAGE_PRESENT U64_1 << 63 |
| 22 #define PAGE_SWAP U64_1 << 62 |
| 23 #define PAGE_RESERVED U64_1 << 61 |
| 24 #define FLAG_NOPAGE U64_1 << 20 |
| 25 #define FLAG_KSM U64_1 << 21 |
| 26 #define FLAG_MMAP U64_1 << 11 |
| 27 #define PAGEMAP_BYTES 8 |
| 28 |
| 29 static const int kProfilerBufferSize = 1 << 20; |
| 30 static const int kHashTableSize = 179999; // Have to same as heap-profile-tabl
e.cc |
| 31 |
| 32 // header of the dumped heap profile |
| 33 static const char kProfileHeader[] = "Deep Memory Profile\n"; |
| 34 static const char kGlobalStatsHeader[] = "GLOBAL_STATS:\n"; |
| 35 static const char kStacktraceHeader[] = "STACKTRACES:\n"; |
| 36 static const char kProcSelfMapsHeader[] = "\nMAPPED_LIBRARIES:\n"; |
| 37 |
| 38 DeepMemoryProfiler::DeepMemoryProfiler(HeapProfileTable* heap_profile, const cha
r* prefix) { |
| 39 heap_profile_ = heap_profile; |
| 40 |
| 41 //kpageflags_fd_ = open("/proc/kpageflags", O_RDONLY); |
| 42 //RAW_DCHECK(kpageflags_fd_ != -1, "Failed to open /proc/kpageflags"); |
| 43 kpageflags_fd_ = -1; // Not currently using this |
| 44 |
| 45 page_map_ = new(heap_profile_->alloc_(sizeof(PageStateMap))) |
| 46 PageStateMap(heap_profile_->alloc_, heap_profile_->dealloc_); |
| 47 |
| 48 // Copy filename prefix |
| 49 RAW_DCHECK(filename_prefix_ == NULL, ""); |
| 50 const int prefix_length = strlen(prefix); |
| 51 filename_prefix_ = reinterpret_cast<char*>(heap_profile_->alloc_(prefix_length
+ 1)); |
| 52 memcpy(filename_prefix_, prefix, prefix_length); |
| 53 filename_prefix_[prefix_length] = '\0'; |
| 54 |
| 55 profiler_buffer_ = reinterpret_cast<char*>(heap_profile_->alloc_(kProfilerBuff
erSize)); |
| 56 dump_count_ = 0; |
| 57 } |
| 58 |
| 59 DeepMemoryProfiler::~DeepMemoryProfiler() { |
| 60 page_map_->~PageStateMap(); |
| 61 heap_profile_->dealloc_(page_map_); |
| 62 heap_profile_->dealloc_(filename_prefix_); |
| 63 heap_profile_->dealloc_(profiler_buffer_); |
| 64 } |
| 65 |
| 66 // This function need to be called after each fork |
| 67 void DeepMemoryProfiler::OpenPageMap() { |
| 68 char filename[100]; |
| 69 sprintf(filename, "/proc/%d/pagemap", getpid()); |
| 70 pagemap_fd_ = open(filename, O_RDONLY); |
| 71 RAW_DCHECK(pagemap_fd_ != -1, "Failed to open /proc/self/pagemap"); |
| 72 } |
| 73 |
| 74 bool DeepMemoryProfiler::PageMapSeek(uint64 addr) { |
| 75 uint64 index = (addr / PAGE_SIZE) * PAGEMAP_BYTES; |
| 76 uint64 o = lseek64(pagemap_fd_, index, SEEK_SET); |
| 77 RAW_DCHECK(o == index, ""); |
| 78 return true; |
| 79 } |
| 80 |
| 81 bool DeepMemoryProfiler::PageMapRead(PageState* state) { |
| 82 uint64 pa; |
| 83 int t = read(pagemap_fd_, &pa, PAGEMAP_BYTES); |
| 84 if(t != PAGEMAP_BYTES) |
| 85 return false; |
| 86 |
| 87 // Check if the page is committed |
| 88 state->is_committed = (pa & (PAGE_PRESENT | PAGE_SWAP)); |
| 89 |
| 90 state->is_present = (pa & PAGE_PRESENT); |
| 91 state->is_swapped = (pa & PAGE_SWAP); |
| 92 |
| 93 // We can get more detailed stats from kPageflags |
| 94 if(state->is_present && kpageflags_fd_ != -1) { |
| 95 uint64 pfn = pa & PFN_FILTER; |
| 96 int64 index = pfn * sizeof(uint64); |
| 97 if (lseek64(kpageflags_fd_, index, SEEK_SET) != index) { |
| 98 RAW_LOG(ERROR, "kpageflags seek failed. errno %d",errno); |
| 99 return false; |
| 100 } |
| 101 uint64 flags; |
| 102 if (read(kpageflags_fd_, &flags, sizeof(uint64)) < 0) { |
| 103 RAW_LOG(ERROR, "kpageflags read failed. errno %d",errno); |
| 104 return false; |
| 105 } |
| 106 if(flags & FLAG_NOPAGE) RAW_LOG(ERROR,"NOPAGE at present page frame %"PRId64
"", pfn); |
| 107 state->is_shared = (flags & FLAG_KSM); |
| 108 state->is_mmap = (flags & FLAG_MMAP); |
| 109 }else{ |
| 110 state->is_shared = false; |
| 111 } |
| 112 |
| 113 return true; |
| 114 } |
| 115 |
| 116 uint64 DeepMemoryProfiler::GetCommittedSize(uint64 addr, uint64 size) { |
| 117 uint64 page_addr = (addr / PAGE_SIZE) * PAGE_SIZE; |
| 118 uint64 committed_size = 0; |
| 119 |
| 120 PageMapSeek(addr); |
| 121 // Check every pages on which the allocation reside |
| 122 while(page_addr < addr + size) { |
| 123 // Read corresponding physical page |
| 124 PageState state; |
| 125 if(PageMapRead(&state) == false){ |
| 126 // We can't read the last region (e.g vsyscall) |
| 127 RAW_LOG(0, "pagemap read failed @ %#llx %"PRId64" bytes", addr, size); |
| 128 return 0; |
| 129 } |
| 130 |
| 131 if(state.is_committed){ |
| 132 // Calculate the size of the allocation part in this page |
| 133 uint64 bytes = PAGE_SIZE; |
| 134 if(page_addr < addr) |
| 135 bytes -= addr - page_addr; |
| 136 if(addr + size < page_addr + PAGE_SIZE) |
| 137 bytes -= PAGE_SIZE - (addr + size - page_addr); |
| 138 |
| 139 committed_size += bytes; |
| 140 } |
| 141 page_addr += PAGE_SIZE; |
| 142 } |
| 143 |
| 144 return committed_size; |
| 145 } |
| 146 |
| 147 void DeepMemoryProfiler::InitRegionStats(RegionStats* stats) { |
| 148 stats->virtual_bytes = 0; |
| 149 stats->committed_bytes = 0; |
| 150 } |
| 151 |
| 152 void DeepMemoryProfiler::RecordRegionStats(uint64 start, uint64 end, |
| 153 RegionStats* stats) { |
| 154 stats->virtual_bytes += end - start; |
| 155 stats->committed_bytes += GetCommittedSize(start, end - start); |
| 156 } |
| 157 |
| 158 void DeepMemoryProfiler::GetGlobalStats() { |
| 159 ProcMapsIterator::Buffer iterbuf; |
| 160 ProcMapsIterator it(0, &iterbuf); |
| 161 uint64 start, end, offset; |
| 162 int64 inode; |
| 163 char *flags, *filename; |
| 164 |
| 165 InitRegionStats(&(stats_.total)); |
| 166 InitRegionStats(&(stats_.file_mapped)); |
| 167 InitRegionStats(&(stats_.anonymous)); |
| 168 InitRegionStats(&(stats_.other)); |
| 169 |
| 170 /* |
| 171 char file_name[1000]; |
| 172 snprintf(file_name, sizeof(file_name), "%s.%05d.%04d.maps", |
| 173 filename_prefix_, getpid(), dump_count_); |
| 174 |
| 175 RawFD maps_fd = RawOpenForWriting(file_name); |
| 176 RAW_DCHECK(maps_fd != kIllegalRawFD, ""); |
| 177 |
| 178 // We use global buffer here |
| 179 char* buf = profiler_buffer_; |
| 180 int bufsize = kProfilerBufferSize; |
| 181 int buflen = 0; |
| 182 int64 mmap_end = 0; |
| 183 const AllocValue* v = NULL; |
| 184 */ |
| 185 |
| 186 while (it.Next(&start, &end, &flags, &offset, &inode, &filename)) { |
| 187 if(strcmp("[vsyscall]", filename) == 0) continue; // pagemap read fails in
this region |
| 188 |
| 189 int64 committed_bytes = stats_.total.committed_bytes; |
| 190 RecordRegionStats(start, end, &(stats_.total)); |
| 191 committed_bytes = stats_.total.committed_bytes - committed_bytes; |
| 192 |
| 193 if(filename[0] == '/') { |
| 194 RecordRegionStats(start, end, &(stats_.file_mapped)); |
| 195 }else if(filename[0] == '\0' || filename[0] == '\n' || filename[0] == EOF){ |
| 196 RecordRegionStats(start, end, &(stats_.anonymous)); |
| 197 }else{ |
| 198 RecordRegionStats(start, end, &(stats_.other)); |
| 199 } |
| 200 /* |
| 201 RegionValue rv; |
| 202 rv.start = start; |
| 203 rv.end = end; |
| 204 rv.size = end - start; |
| 205 rv.committed_size = committed_bytes; |
| 206 rv.recorded_size = 0; |
| 207 rv.recorded_committed_size = 0; |
| 208 memcpy(rv.permissions, flags, 4); |
| 209 rv.permissions[4] = '\0'; |
| 210 memcpy(rv.filename, filename, strlen(filename) + 1); |
| 211 regions_->Insert(reinterpret_cast<void*>(start), rv); |
| 212 |
| 213 if(rv.size > max_region_size_) |
| 214 max_region_size_ = rv.size; |
| 215 */ |
| 216 |
| 217 /* |
| 218 uint64 recorded_bytes = 0; |
| 219 if(mmap_end > start && v != NULL){ |
| 220 // This region is a part of the previous mmap allocation |
| 221 if(mmap_end >= end) |
| 222 recorded_bytes = end - start; |
| 223 else |
| 224 recorded_bytes = mmap_end - start; |
| 225 }else{ |
| 226 v = heap_profile_->allocation_mmap_->Find(reinterpret_cast<void*>(start)); |
| 227 if(v != NULL){ |
| 228 mmap_end = start + v->bytes; |
| 229 if(mmap_end >= end) |
| 230 recorded_bytes = end - start; |
| 231 else |
| 232 recorded_bytes = mmap_end - start; |
| 233 while(mmap_end < end){ |
| 234 // There might be multiple mmap allocations in this region |
| 235 v = heap_profile_->allocation_mmap_->Find(reinterpret_cast<void*>(mmap
_end)); |
| 236 if(v == NULL) |
| 237 break; |
| 238 else{ |
| 239 mmap_end += v->bytes; |
| 240 recorded_bytes += v->bytes; |
| 241 } |
| 242 } |
| 243 } |
| 244 } |
| 245 |
| 246 buflen += snprintf(buf + buflen, bufsize - buflen, |
| 247 "%#llx-%#llx %10"PRId64" %10"PRId64" %10"PRId64" %s\n", |
| 248 start, end, (end - start), committed_bytes, recorded_byte
s, |
| 249 filename); |
| 250 */ |
| 251 } |
| 252 |
| 253 //RawWrite(maps_fd, buf, buflen); |
| 254 //RawClose(maps_fd); |
| 255 } |
| 256 |
| 257 void DeepMemoryProfiler::RecordAllocInRegions(uint64 addr, uint64 size) { |
| 258 const void* regions_start; |
| 259 const RegionValue* rv_const = regions_->FindInside(&GetRegionSize, |
| 260 (size_t)max_region_size_, |
| 261 (void*)addr, ®ions_start
); |
| 262 |
| 263 if(rv_const == NULL) { |
| 264 RAW_LOG(0, "Could't find a region for allocation!"); |
| 265 return; |
| 266 } |
| 267 RegionValue* rv = regions_->FindMutable(regions_start); |
| 268 RAW_DCHECK(rv->start <= addr, ""); |
| 269 RAW_DCHECK(rv->end > addr, ""); |
| 270 |
| 271 if(rv->end >= addr + size) { |
| 272 // This region includes the whole allocation |
| 273 rv->recorded_size += size; |
| 274 rv->recorded_committed_size += GetCommittedSize(addr, size); |
| 275 }else{ |
| 276 // This region includes only a part of the allocation. |
| 277 // Need to find the other regions that include this allocation. |
| 278 rv->recorded_size += rv->end - addr; |
| 279 rv->recorded_committed_size += GetCommittedSize(addr, rv->end - addr); |
| 280 RecordAllocInRegions(rv->end, addr + size - rv->end); |
| 281 } |
| 282 } |
| 283 |
| 284 void DeepMemoryProfiler::RecordAlloc(const void* ptr, AllocValue* v, |
| 285 DeepMemoryProfiler* deep_profiler) { |
| 286 uint64 alloc_addr = (uint64)ptr; |
| 287 uint64 committed = deep_profiler->GetCommittedSize(alloc_addr, v->bytes); |
| 288 |
| 289 v->bucket()->committed_size += committed; |
| 290 if(deep_profiler->recording_mmap_){ |
| 291 deep_profiler->stats_.record_mmap.virtual_bytes += v->bytes; |
| 292 deep_profiler->stats_.record_mmap.committed_bytes += committed; |
| 293 // Record this allocation in the region map |
| 294 //deep_profiler->RecordAllocInRegions(alloc_addr, v->bytes); |
| 295 }else{ |
| 296 deep_profiler->stats_.record_tcmalloc.virtual_bytes += v->bytes; |
| 297 deep_profiler->stats_.record_tcmalloc.committed_bytes += committed; |
| 298 } |
| 299 } |
| 300 |
| 301 void DeepMemoryProfiler::RecordAllAllocs() { |
| 302 stats_.record_mmap.virtual_bytes = 0; |
| 303 stats_.record_mmap.committed_bytes = 0; |
| 304 stats_.record_tcmalloc.virtual_bytes = 0; |
| 305 stats_.record_tcmalloc.committed_bytes = 0; |
| 306 |
| 307 // Tcmalloc allocs |
| 308 recording_mmap_ = false; |
| 309 heap_profile_->allocation_->Iterate(RecordAlloc, this); |
| 310 |
| 311 // Mmap allocs |
| 312 recording_mmap_ = true; |
| 313 heap_profile_->allocation_mmap_->Iterate(RecordAlloc, this); |
| 314 } |
| 315 |
| 316 |
| 317 void DeepMemoryProfiler::WriteLeakyRegion(const void* ptr, |
| 318 RegionValue* rv, |
| 319 BufferArgs* buffer) { |
| 320 if(rv->committed_size > rv->recorded_committed_size){ |
| 321 //if(rv->filename[0] != '\0') |
| 322 // return; |
| 323 if(buffer->len >= buffer->size) |
| 324 return; |
| 325 |
| 326 int printed = snprintf(buffer->buf + buffer->len, buffer->size - buffer->len
, |
| 327 "%#llx-%#llx %s %10"PRId64" %10"PRId64"" |
| 328 " %10"PRId64" %10"PRId64" %s\n", |
| 329 rv->start, rv->end, rv->permissions, |
| 330 rv->size - rv->recorded_size, rv->size, |
| 331 rv->committed_size - rv->recorded_committed_size, |
| 332 rv->committed_size, |
| 333 rv->filename); |
| 334 |
| 335 if(printed >= buffer->size - buffer->len) |
| 336 return; |
| 337 buffer->len += printed; |
| 338 } |
| 339 } |
| 340 |
| 341 // Write leaked regions to a file |
| 342 void DeepMemoryProfiler::WriteAllLeakyRegions() { |
| 343 BufferArgs buffer; |
| 344 buffer.buf = profiler_buffer_; |
| 345 buffer.size = kProfilerBufferSize; |
| 346 buffer.len = 0; |
| 347 regions_->Iterate(WriteLeakyRegion, &buffer); |
| 348 |
| 349 char leaks_file_name[1000]; |
| 350 snprintf(leaks_file_name, sizeof(leaks_file_name), "%s.%05d.%04d.leaks", |
| 351 filename_prefix_, getpid(), dump_count_); |
| 352 |
| 353 RawFD leaks_fd = RawOpenForWriting(leaks_file_name); |
| 354 RAW_DCHECK(leaks_fd != kIllegalRawFD, ""); |
| 355 RawWrite(leaks_fd, buffer.buf, buffer.len); |
| 356 RawClose(leaks_fd); |
| 357 } |
| 358 |
| 359 void DeepMemoryProfiler::WriteMapsToFile(char buf[], int size) { |
| 360 char file_name[100]; |
| 361 snprintf(file_name, sizeof(file_name), "%s.%05d.maps", filename_prefix_, getpi
d()); |
| 362 |
| 363 RawFD maps_fd = RawOpenForWriting(file_name); |
| 364 RAW_DCHECK(maps_fd != kIllegalRawFD, ""); |
| 365 |
| 366 int map_length; |
| 367 bool dummy; // "wrote_all" -- did /proc/self/maps fit in its entirety? |
| 368 map_length = FillProcSelfMaps(profiler_buffer_, kProfilerBufferSize, &dummy); |
| 369 RAW_DCHECK(map_length <= kProfilerBufferSize, ""); |
| 370 RawWrite(maps_fd, profiler_buffer_, map_length); |
| 371 RawClose(maps_fd); |
| 372 } |
| 373 |
| 374 int DeepMemoryProfiler::WriteBucket(const Bucket* b, char buf[], int bufsize) { |
| 375 int buflen = 0; |
| 376 buflen += snprintf(buf + buflen, bufsize - buflen, "%05d", b->id); |
| 377 for (int d = 0; d < b->depth; d++) { |
| 378 buflen += snprintf(buf + buflen, bufsize - buflen, " 0x%08" PRIxPTR, |
| 379 reinterpret_cast<uintptr_t>(b->stack[d])); |
| 380 } |
| 381 buflen += snprintf(buf + buflen, bufsize - buflen, "\n"); |
| 382 return buflen; |
| 383 } |
| 384 |
| 385 void DeepMemoryProfiler::WriteBucketsToFile() { |
| 386 char file_name[100]; |
| 387 snprintf(file_name, sizeof(file_name), "%s.%05d.%04d.buckets", |
| 388 filename_prefix_, getpid(), dump_count_); |
| 389 RawFD bucket_fd = RawOpenForWriting(file_name); |
| 390 RAW_DCHECK(bucket_fd != kIllegalRawFD, ""); |
| 391 |
| 392 // We will use the global buffer here |
| 393 char* buf = profiler_buffer_; |
| 394 int size = kProfilerBufferSize; |
| 395 int buflen = 0; |
| 396 for (int b = 0; b < kHashTableSize; b++) { |
| 397 for (Bucket* x = heap_profile_->table_[b]; x != 0; x = x->next) { |
| 398 if(x->is_logged) |
| 399 continue; // Skip the bucket if it is already logged |
| 400 if(x->alloc_size - x->free_size <= 64) |
| 401 continue; // Skip small buckets |
| 402 |
| 403 buflen += WriteBucket(x, buf + buflen, size - buflen); |
| 404 x->is_logged = true; |
| 405 |
| 406 // Write to file if buffer 80% full |
| 407 if(buflen > size * 0.8){ |
| 408 RawWrite(bucket_fd, buf, buflen); |
| 409 buflen = 0; |
| 410 } |
| 411 } |
| 412 } |
| 413 RawWrite(bucket_fd, buf, buflen); |
| 414 RawClose(bucket_fd); |
| 415 } |
| 416 |
| 417 int DeepMemoryProfiler::UnparseBucket(const Bucket& b, |
| 418 char* buf, int buflen, int bufsize, |
| 419 const char* extra, |
| 420 Stats* profile_stats) { |
| 421 if (profile_stats != NULL) { |
| 422 profile_stats->allocs += b.allocs; |
| 423 profile_stats->alloc_size += b.alloc_size; |
| 424 profile_stats->frees += b.frees; |
| 425 profile_stats->free_size += b.free_size; |
| 426 } |
| 427 int printed = |
| 428 snprintf(buf + buflen, bufsize - buflen, |
| 429 "%10"PRId64" %10"PRId64" %6d %6d @%s %d\n", |
| 430 b.alloc_size - b.free_size, |
| 431 b.committed_size, |
| 432 b.allocs, b.frees, |
| 433 extra, |
| 434 b.id); |
| 435 // If it looks like the snprintf failed, ignore the fact we printed anything |
| 436 if (printed < 0 || printed >= bufsize - buflen) return buflen; |
| 437 buflen += printed; |
| 438 |
| 439 return buflen; |
| 440 } |
| 441 |
| 442 int DeepMemoryProfiler::UnparseRegionStats(const RegionStats* stats, const char*
name, |
| 443 char* buf, int buflen, int bufsize) { |
| 444 int printed = snprintf(buf + buflen, bufsize - buflen, |
| 445 "%15s %10"PRId64" %10"PRId64"\n", |
| 446 name, |
| 447 stats->virtual_bytes, |
| 448 stats->committed_bytes); |
| 449 |
| 450 return buflen + printed; |
| 451 } |
| 452 |
| 453 int DeepMemoryProfiler::UnparseGlobalStats(char* buf, int buflen, int bufsize) { |
| 454 buflen += snprintf(buf + buflen, bufsize - buflen, |
| 455 "%15s %10s %10s\n", |
| 456 "", "virtual", "committed"); |
| 457 |
| 458 buflen = UnparseRegionStats(&(stats_.total), "total", buf, buflen, bufsize); |
| 459 buflen = UnparseRegionStats(&(stats_.file_mapped), "file mapped", buf, buflen,
bufsize); |
| 460 buflen = UnparseRegionStats(&(stats_.anonymous), "anonymous", buf, buflen, buf
size); |
| 461 buflen = UnparseRegionStats(&(stats_.other), "other", buf, buflen, bufsize); |
| 462 buflen = UnparseRegionStats(&(stats_.record_mmap), "mmap", buf, buflen, bufsiz
e); |
| 463 buflen = UnparseRegionStats(&(stats_.record_tcmalloc), "tcmalloc", buf, buflen
, bufsize); |
| 464 return buflen; |
| 465 } |
| 466 |
| 467 // Takes snapshot of current memory usage. |
| 468 // We avoid any memory allocations during snapshots. |
| 469 //void DeepMemoryProfiler::TakeMemorySnapshot() { |
| 470 //} |
| 471 |
| 472 int DeepMemoryProfiler::FillOrderedProfile(char buf[], int size) { |
| 473 int64 start_time = CycleClock::Now(); |
| 474 dump_count_++; |
| 475 |
| 476 // We need to re-open files in /proc/pid/ if pid is created |
| 477 if(most_recent_pid_ != getpid()) { |
| 478 most_recent_pid_ = getpid(); |
| 479 OpenPageMap(); |
| 480 WriteMapsToFile(profiler_buffer_, kProfilerBufferSize); // Using the global
buffer |
| 481 } |
| 482 |
| 483 |
| 484 //regions_ = new(heap_profile_->alloc_(sizeof(RegionMap))) |
| 485 // RegionMap(heap_profile_->alloc_, heap_profile_->dealloc_); |
| 486 //max_region_size_ = 0; |
| 487 GetGlobalStats(); |
| 488 uint64 anonymous_committed = stats_.anonymous.committed_bytes; |
| 489 |
| 490 // Reset committed size of buckets |
| 491 for (int b = 0; b < kHashTableSize; b++) { |
| 492 for (Bucket* x = heap_profile_->table_[b]; x != 0; x = x->next) { |
| 493 x->committed_size = 0; |
| 494 } |
| 495 } |
| 496 |
| 497 //Bucket** list = heap_profile_->MakeBucketList(); |
| 498 //for (int i = 0; i < heap_profile_->num_buckets_; i++) { |
| 499 // list[i]->committed_size = 0; |
| 500 //} |
| 501 |
| 502 // No allocation zone starts ----------------------------------- |
| 503 |
| 504 // Record committed sizes |
| 505 RecordAllAllocs(); |
| 506 |
| 507 // Check if committed bytes changed during RecordAllAllocs. |
| 508 GetGlobalStats(); |
| 509 uint64 comm_diff = stats_.anonymous.committed_bytes - anonymous_committed; |
| 510 if(comm_diff != 0) |
| 511 RAW_LOG(0, "committed diff: %"PRId64"", comm_diff); |
| 512 |
| 513 HeapProfileTable::Stats stats; |
| 514 memset(&stats, 0, sizeof(stats)); |
| 515 |
| 516 int bucket_length = snprintf(buf, size, kProfileHeader); |
| 517 if (bucket_length < 0 || bucket_length >= size) return 0; |
| 518 |
| 519 // Printing Global Stats |
| 520 bucket_length += snprintf(buf + bucket_length, size - bucket_length, kGlobalSt
atsHeader); |
| 521 bucket_length = UnparseGlobalStats(buf, bucket_length, size); |
| 522 |
| 523 // Printing Stacktraces |
| 524 bucket_length += snprintf(buf + bucket_length, size - bucket_length, kStacktra
ceHeader); |
| 525 bucket_length += snprintf(buf + bucket_length, size - bucket_length, |
| 526 "%10s %10s\n", "virtual", "committed"); |
| 527 |
| 528 for (int b = 0; b < kHashTableSize; b++) { |
| 529 for (Bucket* x = heap_profile_->table_[b]; x != 0; x = x->next) { |
| 530 if(x->alloc_size - x->free_size == 0) |
| 531 continue; // Skip empty buckets |
| 532 bucket_length = UnparseBucket(*x, buf, bucket_length, size, "", |
| 533 &stats); |
| 534 } |
| 535 } |
| 536 |
| 537 RAW_DCHECK(bucket_length < size, ""); |
| 538 // No allocation zone ends ----------------------------------- |
| 539 |
| 540 //WriteAllLeakyRegions(); |
| 541 // Write stacktraces |
| 542 WriteBucketsToFile(); |
| 543 |
| 544 //regions_->~RegionMap(); |
| 545 //heap_profile_->dealloc_(regions_); |
| 546 |
| 547 int64 dt = CycleClock::Now() - start_time; |
| 548 double dtf = dt / CyclesPerSecond(); |
| 549 RAW_LOG(0, "Time spent on DeepProfiler: %.3f sec\n", dtf); |
| 550 |
| 551 return bucket_length; |
| 552 } |
OLD | NEW |