OLD | NEW |
(Empty) | |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 // --- |
| 6 // Author: Sainbayar Sukhbaatar |
| 7 // Dai Mikurube |
| 8 // |
| 9 |
| 10 #include "deep-heap-profile.h" |
| 11 |
| 12 #ifdef DEEP_HEAP_PROFILE |
| 13 #include <fcntl.h> |
| 14 #include <sys/stat.h> |
| 15 #include <sys/types.h> |
| 16 #ifdef HAVE_UNISTD_H |
| 17 #include <unistd.h> // for getpagesize and getpid |
| 18 #endif // HAVE_UNISTD_H |
| 19 |
| 20 #include "base/cycleclock.h" |
| 21 #include "base/sysinfo.h" |
| 22 |
| 23 static const int kProfilerBufferSize = 1 << 20; |
| 24 static const int kHashTableSize = 179999; // The same as heap-profile-table.cc. |
| 25 |
| 26 static const int PAGEMAP_BYTES = 8; |
| 27 static const uint64 MAX_ADDRESS = kuint64max; |
| 28 |
| 29 // Header strings of the dumped heap profile. |
| 30 static const char kProfileHeader[] = "heap profile: "; |
| 31 static const char kProfileVersion[] = "DUMP_DEEP_3"; |
| 32 static const char kGlobalStatsHeader[] = "GLOBAL_STATS:\n"; |
| 33 static const char kMMapStacktraceHeader[] = "MMAP_STACKTRACES:\n"; |
| 34 static const char kAllocStacktraceHeader[] = "MALLOC_STACKTRACES:\n"; |
| 35 static const char kProcSelfMapsHeader[] = "\nMAPPED_LIBRARIES:\n"; |
| 36 |
| 37 static const char kVirtualLabel[] = "virtual"; |
| 38 static const char kCommittedLabel[] = "committed"; |
| 39 |
| 40 DeepHeapProfile::DeepHeapProfile(HeapProfileTable* heap_profile, |
| 41 const char* prefix) |
| 42 : pagemap_fd_(-1), |
| 43 most_recent_pid_(-1), |
| 44 stats_(), |
| 45 dump_count_(0), |
| 46 filename_prefix_(NULL), |
| 47 profiler_buffer_(NULL), |
| 48 bucket_id_(0), |
| 49 heap_profile_(heap_profile) { |
| 50 deep_bucket_map_ = new(heap_profile_->alloc_(sizeof(DeepBucketMap))) |
| 51 DeepBucketMap(heap_profile_->alloc_, heap_profile_->dealloc_); |
| 52 |
| 53 // Copy filename prefix. |
| 54 const int prefix_length = strlen(prefix); |
| 55 filename_prefix_ = |
| 56 reinterpret_cast<char*>(heap_profile_->alloc_(prefix_length + 1)); |
| 57 memcpy(filename_prefix_, prefix, prefix_length); |
| 58 filename_prefix_[prefix_length] = '\0'; |
| 59 |
| 60 profiler_buffer_ = |
| 61 reinterpret_cast<char*>(heap_profile_->alloc_(kProfilerBufferSize)); |
| 62 } |
| 63 |
| 64 DeepHeapProfile::~DeepHeapProfile() { |
| 65 heap_profile_->dealloc_(profiler_buffer_); |
| 66 heap_profile_->dealloc_(filename_prefix_); |
| 67 deep_bucket_map_->~DeepBucketMap(); |
| 68 heap_profile_->dealloc_(deep_bucket_map_); |
| 69 } |
| 70 |
| 71 int DeepHeapProfile::FillOrderedProfile(char buffer[], int buffer_size) { |
| 72 #ifndef NDEBUG |
| 73 int64 starting_cycles = CycleClock::Now(); |
| 74 #endif |
| 75 ++dump_count_; |
| 76 |
| 77 // Re-open files in /proc/pid/ if the process is newly forked one. |
| 78 if (most_recent_pid_ != getpid()) { |
| 79 most_recent_pid_ = getpid(); |
| 80 pagemap_fd_ = OpenProcPagemap(); |
| 81 |
| 82 deep_bucket_map_->Iterate(ClearIsLogged, this); |
| 83 |
| 84 // Write maps into a .maps file with using the global buffer. |
| 85 WriteMapsToFile(filename_prefix_, kProfilerBufferSize, profiler_buffer_); |
| 86 } |
| 87 |
| 88 // Reset committed sizes of buckets. |
| 89 ResetCommittedSize(heap_profile_->alloc_table_); |
| 90 ResetCommittedSize(heap_profile_->mmap_table_); |
| 91 |
| 92 SnapshotGlobalStatsWithoutMalloc(pagemap_fd_, &stats_); |
| 93 size_t anonymous_committed = stats_.anonymous.committed_bytes(); |
| 94 |
| 95 // Note: Try to minimize the number of calls to malloc in the following |
| 96 // region, up until we call WriteBucketsToBucketFile(), near the end of this |
| 97 // function. Calling malloc in the region may make a gap between the |
| 98 // observed size and actual memory allocation. The gap is less than or equal |
| 99 // to the size of allocated memory in the region. Calls to malloc won't |
| 100 // break anything, but can add some noise to the recorded information. |
| 101 // TODO(dmikurube): Eliminate dynamic memory allocation caused by snprintf. |
| 102 // glibc's snprintf internally allocates memory by alloca normally, but it |
| 103 // allocates memory by malloc if large memory is required. |
| 104 |
| 105 // Record committed sizes. |
| 106 SnapshotAllAllocsWithoutMalloc(); |
| 107 |
| 108 // Check if committed bytes changed during SnapshotAllAllocsWithoutMalloc. |
| 109 SnapshotGlobalStatsWithoutMalloc(pagemap_fd_, &stats_); |
| 110 #ifndef NDEBUG |
| 111 size_t committed_difference = |
| 112 stats_.anonymous.committed_bytes() - anonymous_committed; |
| 113 if (committed_difference != 0) { |
| 114 RAW_LOG(0, "Difference in committed size: %ld", committed_difference); |
| 115 } |
| 116 #endif |
| 117 |
| 118 // Start filling buffer with the ordered profile. |
| 119 int printed = snprintf(buffer, buffer_size, |
| 120 "%s%s\n", kProfileHeader, kProfileVersion); |
| 121 if (IsPrintedStringValid(printed, buffer_size, 0)) { |
| 122 return 0; |
| 123 } |
| 124 int used_in_buffer = printed; |
| 125 |
| 126 // Fill buffer with the global stats. |
| 127 printed = snprintf(buffer + used_in_buffer, buffer_size - used_in_buffer, |
| 128 kGlobalStatsHeader); |
| 129 if (IsPrintedStringValid(printed, buffer_size, used_in_buffer)) { |
| 130 return used_in_buffer; |
| 131 } |
| 132 used_in_buffer += printed; |
| 133 |
| 134 used_in_buffer = UnparseGlobalStats(used_in_buffer, buffer_size, buffer); |
| 135 |
| 136 // Fill buffer with the header for buckets of mmap'ed regions. |
| 137 printed = snprintf(buffer + used_in_buffer, buffer_size - used_in_buffer, |
| 138 kMMapStacktraceHeader); |
| 139 if (IsPrintedStringValid(printed, buffer_size, used_in_buffer)) { |
| 140 return used_in_buffer; |
| 141 } |
| 142 used_in_buffer += printed; |
| 143 |
| 144 printed = snprintf(buffer + used_in_buffer, buffer_size - used_in_buffer, |
| 145 "%10s %10s\n", kVirtualLabel, kCommittedLabel); |
| 146 if (IsPrintedStringValid(printed, buffer_size, used_in_buffer)) { |
| 147 return used_in_buffer; |
| 148 } |
| 149 used_in_buffer += printed; |
| 150 |
| 151 // Fill buffer with stack trace buckets of mmap'ed regions. |
| 152 used_in_buffer = SnapshotBucketTableWithoutMalloc(heap_profile_->mmap_table_, |
| 153 used_in_buffer, |
| 154 buffer_size, |
| 155 buffer); |
| 156 |
| 157 // Fill buffer with the header for buckets of allocated regions. |
| 158 printed = snprintf(buffer + used_in_buffer, buffer_size - used_in_buffer, |
| 159 kAllocStacktraceHeader); |
| 160 if (IsPrintedStringValid(printed, buffer_size, used_in_buffer)) { |
| 161 return used_in_buffer; |
| 162 } |
| 163 used_in_buffer += printed; |
| 164 |
| 165 printed = snprintf(buffer + used_in_buffer, buffer_size - used_in_buffer, |
| 166 "%10s %10s\n", kVirtualLabel, kCommittedLabel); |
| 167 if (IsPrintedStringValid(printed, buffer_size, used_in_buffer)) { |
| 168 return used_in_buffer; |
| 169 } |
| 170 used_in_buffer += printed; |
| 171 |
| 172 // Fill buffer with stack trace buckets of allocated regions. |
| 173 used_in_buffer = SnapshotBucketTableWithoutMalloc(heap_profile_->alloc_table_, |
| 174 used_in_buffer, |
| 175 buffer_size, |
| 176 buffer); |
| 177 |
| 178 RAW_DCHECK(used_in_buffer < buffer_size, ""); |
| 179 |
| 180 // Note: Memory snapshots are complete, and malloc may again be used freely. |
| 181 |
| 182 // Write the bucket listing into a .bucket file. |
| 183 WriteBucketsToBucketFile(); |
| 184 |
| 185 #ifndef NDEBUG |
| 186 int64 elapsed_cycles = CycleClock::Now() - starting_cycles; |
| 187 double elapsed_seconds = elapsed_cycles / CyclesPerSecond(); |
| 188 RAW_LOG(0, "Time spent on DeepProfiler: %.3f sec\n", elapsed_seconds); |
| 189 #endif |
| 190 |
| 191 return used_in_buffer; |
| 192 } |
| 193 |
| 194 void DeepHeapProfile::RegionStats::Initialize() { |
| 195 virtual_bytes_ = 0; |
| 196 committed_bytes_ = 0; |
| 197 } |
| 198 |
| 199 void DeepHeapProfile::RegionStats::Record( |
| 200 int pagemap_fd, uint64 first_address, uint64 last_address) { |
| 201 virtual_bytes_ += static_cast<size_t>(last_address - first_address + 1); |
| 202 committed_bytes_ += GetCommittedSize(pagemap_fd, first_address, last_address); |
| 203 } |
| 204 |
| 205 // static |
| 206 bool DeepHeapProfile::IsPrintedStringValid(int printed, |
| 207 int buffer_size, |
| 208 int used_in_buffer) { |
| 209 return printed < 0 || printed >= buffer_size - used_in_buffer; |
| 210 } |
| 211 |
| 212 // TODO(dmikurube): Avoid calling ClearIsLogged to rewrite buckets by add a |
| 213 // reference to a previous file in a .heap file. |
| 214 // static |
| 215 void DeepHeapProfile::ClearIsLogged(const void* pointer, |
| 216 DeepHeapProfile::DeepBucket* deep_bucket, |
| 217 DeepHeapProfile* deep_profile) { |
| 218 deep_bucket->is_logged = false; |
| 219 } |
| 220 |
| 221 // static |
| 222 int DeepHeapProfile::OpenProcPagemap() { |
| 223 char filename[100]; |
| 224 snprintf(filename, sizeof(filename), "/proc/%d/pagemap", |
| 225 static_cast<int>(getpid())); |
| 226 int pagemap_fd = open(filename, O_RDONLY); |
| 227 RAW_DCHECK(pagemap_fd != -1, "Failed to open /proc/self/pagemap"); |
| 228 return pagemap_fd; |
| 229 } |
| 230 |
| 231 // static |
| 232 bool DeepHeapProfile::SeekProcPagemap(int pagemap_fd, uint64 address) { |
| 233 int64 index = (address / getpagesize()) * PAGEMAP_BYTES; |
| 234 int64 offset = lseek64(pagemap_fd, index, SEEK_SET); |
| 235 RAW_DCHECK(offset == index, "Failed in seeking."); |
| 236 return offset >= 0; |
| 237 } |
| 238 |
| 239 // static |
| 240 bool DeepHeapProfile::ReadProcPagemap(int pagemap_fd, PageState* state) { |
| 241 static const uint64 U64_1 = 1; |
| 242 static const uint64 PFN_FILTER = (U64_1 << 55) - U64_1; |
| 243 static const uint64 PAGE_PRESENT = U64_1 << 63; |
| 244 static const uint64 PAGE_SWAP = U64_1 << 62; |
| 245 static const uint64 PAGE_RESERVED = U64_1 << 61; |
| 246 static const uint64 FLAG_NOPAGE = U64_1 << 20; |
| 247 static const uint64 FLAG_KSM = U64_1 << 21; |
| 248 static const uint64 FLAG_MMAP = U64_1 << 11; |
| 249 |
| 250 uint64 pagemap_value; |
| 251 int result = read(pagemap_fd, &pagemap_value, PAGEMAP_BYTES); |
| 252 if (result != PAGEMAP_BYTES) { |
| 253 return false; |
| 254 } |
| 255 |
| 256 // Check if the page is committed. |
| 257 state->is_committed = (pagemap_value & (PAGE_PRESENT | PAGE_SWAP)); |
| 258 |
| 259 state->is_present = (pagemap_value & PAGE_PRESENT); |
| 260 state->is_swapped = (pagemap_value & PAGE_SWAP); |
| 261 state->is_shared = false; |
| 262 |
| 263 return true; |
| 264 } |
| 265 |
| 266 // static |
| 267 size_t DeepHeapProfile::GetCommittedSize( |
| 268 int pagemap_fd, uint64 first_address, uint64 last_address) { |
| 269 int page_size = getpagesize(); |
| 270 uint64 page_address = (first_address / page_size) * page_size; |
| 271 size_t committed_size = 0; |
| 272 |
| 273 SeekProcPagemap(pagemap_fd, first_address); |
| 274 |
| 275 // Check every page on which the allocation resides. |
| 276 while (page_address <= last_address) { |
| 277 // Read corresponding physical page. |
| 278 PageState state; |
| 279 if (ReadProcPagemap(pagemap_fd, &state) == false) { |
| 280 // We can't read the last region (e.g vsyscall). |
| 281 #ifndef NDEBUG |
| 282 RAW_LOG(0, "pagemap read failed @ %#llx %"PRId64" bytes", |
| 283 first_address, last_address - first_address + 1); |
| 284 #endif |
| 285 return 0; |
| 286 } |
| 287 |
| 288 if (state.is_committed) { |
| 289 // Calculate the size of the allocation part in this page. |
| 290 size_t bytes = page_size; |
| 291 |
| 292 // If looking at the last page in a given region. |
| 293 if (last_address <= page_address - 1 + page_size) { |
| 294 bytes = last_address - page_address + 1; |
| 295 } |
| 296 |
| 297 // If looking at the first page in a given region. |
| 298 if (page_address < first_address) { |
| 299 bytes -= first_address - page_address; |
| 300 } |
| 301 |
| 302 committed_size += bytes; |
| 303 } |
| 304 if (page_address > MAX_ADDRESS - page_size) { |
| 305 break; |
| 306 } |
| 307 page_address += page_size; |
| 308 } |
| 309 |
| 310 return committed_size; |
| 311 } |
| 312 |
| 313 // static |
| 314 void DeepHeapProfile::WriteMapsToFile(const char* filename_prefix, |
| 315 int buffer_size, |
| 316 char buffer[]) { |
| 317 char filename[100]; |
| 318 snprintf(filename, sizeof(filename), |
| 319 "%s.%05d.maps", filename_prefix, static_cast<int>(getpid())); |
| 320 |
| 321 RawFD maps_fd = RawOpenForWriting(filename); |
| 322 RAW_DCHECK(maps_fd != kIllegalRawFD, ""); |
| 323 |
| 324 int map_length; |
| 325 bool wrote_all; |
| 326 map_length = tcmalloc::FillProcSelfMaps(buffer, buffer_size, &wrote_all); |
| 327 RAW_DCHECK(wrote_all, ""); |
| 328 RAW_DCHECK(map_length <= buffer_size, ""); |
| 329 RawWrite(maps_fd, buffer, map_length); |
| 330 RawClose(maps_fd); |
| 331 } |
| 332 |
| 333 // TODO(dmikurube): Eliminate dynamic memory allocation caused by snprintf. |
| 334 // ProcMapsIterator uses snprintf internally in construction. |
| 335 // static |
| 336 void DeepHeapProfile::SnapshotGlobalStatsWithoutMalloc(int pagemap_fd, |
| 337 GlobalStats* stats) { |
| 338 ProcMapsIterator::Buffer iterator_buffer; |
| 339 ProcMapsIterator iterator(0, &iterator_buffer); |
| 340 uint64 first_address, last_address, offset; |
| 341 int64 unused_inode; |
| 342 char* flags; |
| 343 char* filename; |
| 344 |
| 345 stats->total.Initialize(); |
| 346 stats->file_mapped.Initialize(); |
| 347 stats->anonymous.Initialize(); |
| 348 stats->other.Initialize(); |
| 349 |
| 350 while (iterator.Next(&first_address, &last_address, |
| 351 &flags, &offset, &unused_inode, &filename)) { |
| 352 // 'last_address' should be the last inclusive address of the region. |
| 353 last_address -= 1; |
| 354 if (strcmp("[vsyscall]", filename) == 0) { |
| 355 continue; // Reading pagemap will fail in [vsyscall]. |
| 356 } |
| 357 |
| 358 stats->total.Record(pagemap_fd, first_address, last_address); |
| 359 |
| 360 if (filename[0] == '/') { |
| 361 stats->file_mapped.Record(pagemap_fd, first_address, last_address); |
| 362 } else if (filename[0] == '\0' || filename[0] == '\n') { |
| 363 stats->anonymous.Record(pagemap_fd, first_address, last_address); |
| 364 } else { |
| 365 stats->other.Record(pagemap_fd, first_address, last_address); |
| 366 } |
| 367 } |
| 368 } |
| 369 |
| 370 DeepHeapProfile::DeepBucket* DeepHeapProfile::GetDeepBucket(Bucket* bucket) { |
| 371 DeepBucket* found = deep_bucket_map_->FindMutable(bucket); |
| 372 if (found != NULL) |
| 373 return found; |
| 374 |
| 375 DeepBucket created; |
| 376 created.bucket = bucket; |
| 377 created.committed_size = 0; |
| 378 created.id = (bucket_id_++); |
| 379 created.is_logged = false; |
| 380 deep_bucket_map_->Insert(bucket, created); |
| 381 return deep_bucket_map_->FindMutable(bucket); |
| 382 } |
| 383 |
| 384 void DeepHeapProfile::ResetCommittedSize(Bucket** bucket_table) { |
| 385 for (int i = 0; i < kHashTableSize; i++) { |
| 386 for (Bucket* bucket = bucket_table[i]; |
| 387 bucket != NULL; |
| 388 bucket = bucket->next) { |
| 389 DeepBucket* deep_bucket = GetDeepBucket(bucket); |
| 390 deep_bucket->committed_size = 0; |
| 391 } |
| 392 } |
| 393 } |
| 394 |
| 395 // TODO(dmikurube): Eliminate dynamic memory allocation caused by snprintf. |
| 396 int DeepHeapProfile::SnapshotBucketTableWithoutMalloc(Bucket** bucket_table, |
| 397 int used_in_buffer, |
| 398 int buffer_size, |
| 399 char buffer[]) { |
| 400 for (int i = 0; i < kHashTableSize; i++) { |
| 401 for (Bucket* bucket = bucket_table[i]; |
| 402 bucket != NULL; |
| 403 bucket = bucket->next) { |
| 404 if (bucket->alloc_size - bucket->free_size == 0) { |
| 405 continue; // Skip empty buckets. |
| 406 } |
| 407 const DeepBucket* deep_bucket = GetDeepBucket(bucket); |
| 408 used_in_buffer = UnparseBucket( |
| 409 *deep_bucket, "", used_in_buffer, buffer_size, buffer, NULL); |
| 410 } |
| 411 } |
| 412 return used_in_buffer; |
| 413 } |
| 414 |
| 415 void DeepHeapProfile::RecordAlloc(const void* pointer, |
| 416 AllocValue* alloc_value, |
| 417 DeepHeapProfile* deep_profile) { |
| 418 uint64 address = reinterpret_cast<uintptr_t>(pointer); |
| 419 size_t committed = GetCommittedSize(deep_profile->pagemap_fd_, |
| 420 address, address + alloc_value->bytes - 1); |
| 421 |
| 422 DeepBucket* deep_bucket = deep_profile->GetDeepBucket(alloc_value->bucket()); |
| 423 deep_bucket->committed_size += committed; |
| 424 deep_profile->stats_.record_malloc.AddToVirtualBytes(alloc_value->bytes); |
| 425 deep_profile->stats_.record_malloc.AddToCommittedBytes(committed); |
| 426 } |
| 427 |
| 428 void DeepHeapProfile::RecordMMap(const void* pointer, |
| 429 AllocValue* alloc_value, |
| 430 DeepHeapProfile* deep_profile) { |
| 431 uint64 address = reinterpret_cast<uintptr_t>(pointer); |
| 432 size_t committed = GetCommittedSize(deep_profile->pagemap_fd_, |
| 433 address, address + alloc_value->bytes - 1); |
| 434 |
| 435 DeepBucket* deep_bucket = deep_profile->GetDeepBucket(alloc_value->bucket()); |
| 436 deep_bucket->committed_size += committed; |
| 437 deep_profile->stats_.record_mmap.AddToVirtualBytes(alloc_value->bytes); |
| 438 deep_profile->stats_.record_mmap.AddToCommittedBytes(committed); |
| 439 } |
| 440 |
| 441 void DeepHeapProfile::SnapshotAllAllocsWithoutMalloc() { |
| 442 stats_.record_mmap.Initialize(); |
| 443 stats_.record_malloc.Initialize(); |
| 444 |
| 445 // malloc allocations. |
| 446 heap_profile_->alloc_address_map_->Iterate(RecordAlloc, this); |
| 447 |
| 448 // mmap allocations. |
| 449 heap_profile_->mmap_address_map_->Iterate(RecordMMap, this); |
| 450 } |
| 451 |
| 452 int DeepHeapProfile::FillBucketForBucketFile(const DeepBucket* deep_bucket, |
| 453 int buffer_size, |
| 454 char buffer[]) { |
| 455 const Bucket* bucket = deep_bucket->bucket; |
| 456 int printed = snprintf(buffer, buffer_size, "%05d", deep_bucket->id); |
| 457 if (IsPrintedStringValid(printed, buffer_size, 0)) { |
| 458 return 0; |
| 459 } |
| 460 int used_in_buffer = printed; |
| 461 |
| 462 for (int depth = 0; depth < bucket->depth; depth++) { |
| 463 printed = snprintf(buffer + used_in_buffer, buffer_size - used_in_buffer, |
| 464 " 0x%08" PRIxPTR, |
| 465 reinterpret_cast<uintptr_t>(bucket->stack[depth])); |
| 466 if (IsPrintedStringValid(printed, buffer_size, used_in_buffer)) { |
| 467 return used_in_buffer; |
| 468 } |
| 469 used_in_buffer += printed; |
| 470 } |
| 471 printed = snprintf(buffer + used_in_buffer, buffer_size - used_in_buffer, |
| 472 "\n"); |
| 473 if (IsPrintedStringValid(printed, buffer_size, used_in_buffer)) { |
| 474 return used_in_buffer; |
| 475 } |
| 476 used_in_buffer += printed; |
| 477 |
| 478 return used_in_buffer; |
| 479 } |
| 480 |
| 481 void DeepHeapProfile::WriteBucketsTableToBucketFile(Bucket** bucket_table, |
| 482 RawFD bucket_fd) { |
| 483 // We will use the global buffer here. |
| 484 char* buffer = profiler_buffer_; |
| 485 int buffer_size = kProfilerBufferSize; |
| 486 int used_in_buffer = 0; |
| 487 |
| 488 for (int i = 0; i < kHashTableSize; i++) { |
| 489 for (Bucket* bucket = bucket_table[i]; |
| 490 bucket != NULL; |
| 491 bucket = bucket->next) { |
| 492 DeepBucket* deep_bucket = GetDeepBucket(bucket); |
| 493 if (deep_bucket->is_logged) { |
| 494 continue; // Skip the bucket if it is already logged. |
| 495 } |
| 496 if (bucket->alloc_size - bucket->free_size <= 64) { |
| 497 continue; // Skip small buckets. |
| 498 } |
| 499 |
| 500 used_in_buffer += FillBucketForBucketFile( |
| 501 deep_bucket, buffer_size - used_in_buffer, buffer + used_in_buffer); |
| 502 deep_bucket->is_logged = true; |
| 503 |
| 504 // Write to file if buffer 80% full. |
| 505 if (used_in_buffer > buffer_size * 0.8) { |
| 506 RawWrite(bucket_fd, buffer, used_in_buffer); |
| 507 used_in_buffer = 0; |
| 508 } |
| 509 } |
| 510 } |
| 511 |
| 512 RawWrite(bucket_fd, buffer, used_in_buffer); |
| 513 } |
| 514 |
| 515 void DeepHeapProfile::WriteBucketsToBucketFile() { |
| 516 char filename[100]; |
| 517 snprintf(filename, sizeof(filename), |
| 518 "%s.%05d.%04d.buckets", filename_prefix_, getpid(), dump_count_); |
| 519 RawFD bucket_fd = RawOpenForWriting(filename); |
| 520 RAW_DCHECK(bucket_fd != kIllegalRawFD, ""); |
| 521 |
| 522 WriteBucketsTableToBucketFile(heap_profile_->alloc_table_, bucket_fd); |
| 523 WriteBucketsTableToBucketFile(heap_profile_->mmap_table_, bucket_fd); |
| 524 |
| 525 RawClose(bucket_fd); |
| 526 } |
| 527 |
| 528 int DeepHeapProfile::UnparseBucket(const DeepBucket& deep_bucket, |
| 529 const char* extra, |
| 530 int used_in_buffer, |
| 531 int buffer_size, |
| 532 char* buffer, |
| 533 Stats* profile_stats) { |
| 534 const Bucket& bucket = *deep_bucket.bucket; |
| 535 if (profile_stats != NULL) { |
| 536 profile_stats->allocs += bucket.allocs; |
| 537 profile_stats->alloc_size += bucket.alloc_size; |
| 538 profile_stats->frees += bucket.frees; |
| 539 profile_stats->free_size += bucket.free_size; |
| 540 } |
| 541 |
| 542 int printed = snprintf(buffer + used_in_buffer, buffer_size - used_in_buffer, |
| 543 "%10"PRId64" %10"PRId64" %6d %6d @%s %d\n", |
| 544 bucket.alloc_size - bucket.free_size, |
| 545 deep_bucket.committed_size, |
| 546 bucket.allocs, bucket.frees, extra, deep_bucket.id); |
| 547 // If it looks like the snprintf failed, ignore the fact we printed anything. |
| 548 if (IsPrintedStringValid(printed, buffer_size, used_in_buffer)) { |
| 549 return used_in_buffer; |
| 550 } |
| 551 used_in_buffer += printed; |
| 552 |
| 553 return used_in_buffer; |
| 554 } |
| 555 |
| 556 int DeepHeapProfile::UnparseRegionStats(const RegionStats* stats, |
| 557 const char* name, |
| 558 int used_in_buffer, |
| 559 int buffer_size, |
| 560 char* buffer) { |
| 561 int printed = snprintf(buffer + used_in_buffer, buffer_size - used_in_buffer, |
| 562 "%15s %10ld %10ld\n", |
| 563 name, stats->virtual_bytes(), |
| 564 stats->committed_bytes()); |
| 565 if (IsPrintedStringValid(printed, buffer_size, used_in_buffer)) { |
| 566 return used_in_buffer; |
| 567 } |
| 568 used_in_buffer += printed; |
| 569 |
| 570 return used_in_buffer; |
| 571 } |
| 572 |
| 573 int DeepHeapProfile::UnparseGlobalStats(int used_in_buffer, |
| 574 int buffer_size, |
| 575 char* buffer) { |
| 576 int printed = snprintf(buffer + used_in_buffer, buffer_size - used_in_buffer, |
| 577 "%15s %10s %10s\n", "", |
| 578 kVirtualLabel, kCommittedLabel); |
| 579 if (IsPrintedStringValid(printed, buffer_size, used_in_buffer)) { |
| 580 return used_in_buffer; |
| 581 } |
| 582 used_in_buffer += printed; |
| 583 |
| 584 used_in_buffer = UnparseRegionStats(&(stats_.total), "total", |
| 585 used_in_buffer, buffer_size, buffer); |
| 586 used_in_buffer = UnparseRegionStats(&(stats_.file_mapped), "file mapped", |
| 587 used_in_buffer, buffer_size, buffer); |
| 588 used_in_buffer = UnparseRegionStats(&(stats_.anonymous), "anonymous", |
| 589 used_in_buffer, buffer_size, buffer); |
| 590 used_in_buffer = UnparseRegionStats(&(stats_.other), "other", |
| 591 used_in_buffer, buffer_size, buffer); |
| 592 used_in_buffer = UnparseRegionStats(&(stats_.record_mmap), "mmap", |
| 593 used_in_buffer, buffer_size, buffer); |
| 594 used_in_buffer = UnparseRegionStats(&(stats_.record_malloc), "tcmalloc", |
| 595 used_in_buffer, buffer_size, buffer); |
| 596 return used_in_buffer; |
| 597 } |
| 598 #else // DEEP_HEAP_PROFILE |
| 599 |
| 600 DeepHeapProfile::DeepHeapProfile(HeapProfileTable* heap_profile, |
| 601 const char* prefix) |
| 602 : heap_profile_(heap_profile) { |
| 603 } |
| 604 |
| 605 DeepHeapProfile::~DeepHeapProfile() { |
| 606 } |
| 607 |
| 608 int DeepHeapProfile::FillOrderedProfile(char buffer[], int buffer_size) { |
| 609 return heap_profile_->FillOrderedProfile(buffer, buffer_size); |
| 610 } |
| 611 |
| 612 #endif // DEEP_HEAP_PROFILE |
OLD | NEW |