Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(11)

Side by Side Diff: third_party/tcmalloc/chromium/src/deep-heap-profile.cc

Issue 8632007: A deeper heap profile dumper in third_party/tcmalloc/chromium. (Closed) Base URL: http://git.chromium.org/git/chromium.git@trunk
Patch Set: Reflected the comments. Created 8 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
(Empty)
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 // ---
6 // Author: Sainbayar Sukhbaatar
7 // Dai Mikurube
8 //
9
10 #include "deep-heap-profile.h"
11
12 #ifdef DEEP_HEAP_PROFILE
13 #include <fcntl.h>
14 #include <sys/stat.h>
15 #include <sys/types.h>
16 #ifdef HAVE_UNISTD_H
17 #include <unistd.h> // for getpagesize and getpid
18 #endif // HAVE_UNISTD_H
19
20 #include "base/cycleclock.h"
21 #include "base/sysinfo.h"
22
23 static const int kProfilerBufferSize = 1 << 20;
24 static const int kHashTableSize = 179999; // The same as heap-profile-table.cc.
25
26 static const int PAGEMAP_BYTES = 8;
27 static const uint64 TOP_ADDRESS = kuint64max;
28
29 // Header strings of the dumped heap profile.
30 static const char kProfileHeader[] = "heap profile: ";
31 static const char kProfileVersion[] = "DUMP_DEEP_3";
32 static const char kGlobalStatsHeader[] = "GLOBAL_STATS:\n";
33 static const char kMMapStacktraceHeader[] = "MMAP_STACKTRACES:\n";
34 static const char kAllocStacktraceHeader[] = "MALLOC_STACKTRACES:\n";
35 static const char kProcSelfMapsHeader[] = "\nMAPPED_LIBRARIES:\n";
36
37 DeepHeapProfile::DeepHeapProfile(HeapProfileTable* heap_profile,
38 const char* prefix)
39 : pagemap_fd_(-1),
40 most_recent_pid_(-1),
41 stats_(),
42 dump_count_(0),
43 filename_prefix_(NULL),
44 profiler_buffer_(NULL),
45 bucket_id_(0),
46 heap_profile_(heap_profile) {
47 deep_bucket_map_ = new(heap_profile_->alloc_(sizeof(DeepBucketMap)))
48 DeepBucketMap(heap_profile_->alloc_, heap_profile_->dealloc_);
49
50 // Copy filename prefix.
51 const int prefix_length = strlen(prefix);
52 filename_prefix_ =
53 reinterpret_cast<char*>(heap_profile_->alloc_(prefix_length + 1));
54 memcpy(filename_prefix_, prefix, prefix_length);
55 filename_prefix_[prefix_length] = '\0';
56
57 profiler_buffer_ =
58 reinterpret_cast<char*>(heap_profile_->alloc_(kProfilerBufferSize));
59 }
60
61 DeepHeapProfile::~DeepHeapProfile() {
62 heap_profile_->dealloc_(profiler_buffer_);
63 heap_profile_->dealloc_(filename_prefix_);
64 deep_bucket_map_->~DeepBucketMap();
65 heap_profile_->dealloc_(deep_bucket_map_);
66 }
67
68 int DeepHeapProfile::FillOrderedProfile(char buffer[], int buffer_size) {
69 #ifndef NDEBUG
70 int64 starting_cycles = CycleClock::Now();
71 #endif
72 ++dump_count_;
73
74 // Re-open files in /proc/pid/ if the process is newly forked one.
75 if (most_recent_pid_ != getpid()) {
76 most_recent_pid_ = getpid();
77 pagemap_fd_ = OpenProcPagemap();
78
79 deep_bucket_map_->Iterate(ClearIsLogged, this);
80
81 // Write maps into a .maps file with using the global buffer.
82 WriteMapsToFile(filename_prefix_, kProfilerBufferSize, profiler_buffer_);
83 }
84
85 // Reset committed sizes of buckets.
86 ResetCommittedSize(heap_profile_->alloc_table_);
87 ResetCommittedSize(heap_profile_->mmap_table_);
88
89 SnapshotGlobalStatsWithoutMalloc(pagemap_fd_, &stats_);
90 size_t anonymous_committed = stats_.anonymous.committed_bytes();
91
92 // Note: Least malloc from here. malloc here may cause a gap in the observed
jar (doing other things) 2012/03/16 01:20:02 nit: English: Change: "Least malloc from here" -->
Dai Mikurube (NOT FULLTIME) 2012/03/16 17:48:11 Done.
93 // size from actual memory allocation. The size of the gap is the size of
94 // allocated memory at maximum. It doesn't cause violation.
jar (doing other things) 2012/03/16 01:20:02 nit: English: I would have proposed a better sente
Dai Mikurube (NOT FULLTIME) 2012/03/16 17:48:11 Done. Thank you for good suggestion in English.
95 // TODO(dmikurube): Eliminate dynamic memory allocation caused by snprintf.
96 // glibc's snprintf internally allocates memory by alloca normally, but it
97 // allocates memory by malloc if large memory is required.
98
99 // Record committed sizes.
100 SnapshotAllAllocsWithoutMalloc();
101
102 // Check if committed bytes changed during SnapshotAllAllocsWithoutMalloc.
103 SnapshotGlobalStatsWithoutMalloc(pagemap_fd_, &stats_);
104 #ifndef NDEBUG
105 size_t committed_difference =
106 stats_.anonymous.committed_bytes() - anonymous_committed;
107 if (committed_difference != 0) {
108 RAW_LOG(0, "Difference in committed size: %ld", committed_difference);
109 }
110 #endif
111
112 // Start filling buffer with the ordered profile.
113 int printed = snprintf(buffer, buffer_size,
114 "%s%s\n", kProfileHeader, kProfileVersion);
115 if (printed < 0 || printed >= buffer_size) {
116 return 0;
117 }
118 int used_in_buffer = printed;
119
120 // Fill buffer with the global stats.
121 printed = snprintf(buffer + used_in_buffer, buffer_size - used_in_buffer,
122 kGlobalStatsHeader);
123 if (printed < 0 || printed >= buffer_size - used_in_buffer) {
124 return used_in_buffer;
125 }
126 used_in_buffer += printed;
127
128 used_in_buffer = UnparseGlobalStats(used_in_buffer, buffer_size, buffer);
129
130 // Fill buffer with the header for buckets of mmap'ed regions.
131 printed = snprintf(buffer + used_in_buffer, buffer_size - used_in_buffer,
132 kMMapStacktraceHeader);
133 if (printed < 0 || printed >= buffer_size - used_in_buffer) {
134 return used_in_buffer;
135 }
136 used_in_buffer += printed;
137
138 printed = snprintf(buffer + used_in_buffer, buffer_size - used_in_buffer,
139 "%10s %10s\n", "virtual", "committed");
140 if (printed < 0 || printed >= buffer_size - used_in_buffer) {
141 return used_in_buffer;
142 }
143 used_in_buffer += printed;
144
145 // Fill buffer with stack trace buckets of mmap'ed regions.
146 used_in_buffer = SnapshotBucketTableWithoutMalloc(heap_profile_->mmap_table_,
147 used_in_buffer,
148 buffer_size,
149 buffer);
150
151 // Fill buffer with the header for buckets of allocated regions.
152 printed = snprintf(buffer + used_in_buffer, buffer_size - used_in_buffer,
153 kAllocStacktraceHeader);
154 if (printed < 0 || printed >= buffer_size - used_in_buffer) {
155 return used_in_buffer;
156 }
157 used_in_buffer += printed;
158
159 printed = snprintf(buffer + used_in_buffer, buffer_size - used_in_buffer,
160 "%10s %10s\n", "virtual", "committed");
161 if (printed < 0 || printed >= buffer_size - used_in_buffer) {
162 return used_in_buffer;
163 }
164 used_in_buffer += printed;
165
166 // Fill buffer with stack trace buckets of allocated regions.
167 used_in_buffer = SnapshotBucketTableWithoutMalloc(heap_profile_->alloc_table_,
168 used_in_buffer,
169 buffer_size,
170 buffer);
171
172 RAW_DCHECK(used_in_buffer < buffer_size, "");
173
174 // Note: Least malloc until here.
jar (doing other things) 2012/03/16 01:20:02 nit: English: Suggest changing: "Note: Least mall
Dai Mikurube (NOT FULLTIME) 2012/03/16 17:48:11 Done.
175
176 // Write the bucket listing into a .bucket file.
177 WriteBucketsToBucketFile();
178
179 #ifndef NDEBUG
180 int64 elapsed_cycles = CycleClock::Now() - starting_cycles;
181 double elapsed_seconds = elapsed_cycles / CyclesPerSecond();
182 RAW_LOG(0, "Time spent on DeepProfiler: %.3f sec\n", elapsed_seconds);
183 #endif
184
185 return used_in_buffer;
186 }
187
188 void DeepHeapProfile::RegionStats::Initialize() {
189 virtual_bytes_ = 0;
190 committed_bytes_ = 0;
191 }
192
193 void DeepHeapProfile::RegionStats::Record(
194 int pagemap_fd, uint64 first_address, uint64 last_address) {
195 virtual_bytes_ += static_cast<size_t>(last_address - first_address + 1);
196 committed_bytes_ += GetCommittedSize(pagemap_fd, first_address, last_address);
197 }
198
199 // TODO(dmikurube): Avoid calling ClearIsLogged to rewrite buckets by add a
200 // reference to a previous file in a .heap file.
201 // static
202 void DeepHeapProfile::ClearIsLogged(const void* pointer,
203 DeepHeapProfile::DeepBucket* deep_bucket,
204 DeepHeapProfile* deep_profile) {
205 deep_bucket->is_logged = false;
206 }
207
208 // static
209 int DeepHeapProfile::OpenProcPagemap() {
210 char filename[100];
211 snprintf(filename, sizeof(filename), "/proc/%d/pagemap", getpid());
jar (doing other things) 2012/03/16 01:20:02 nit: Please add static cast of getpid() to type in
Dai Mikurube (NOT FULLTIME) 2012/03/16 17:48:11 Done.
212 int pagemap_fd = open(filename, O_RDONLY);
213 RAW_DCHECK(pagemap_fd != -1, "Failed to open /proc/self/pagemap");
214 return pagemap_fd;
215 }
216
217 // static
218 bool DeepHeapProfile::SeekProcPagemap(int pagemap_fd, uint64 address) {
219 static int page_size = 0;
jar (doing other things) 2012/03/16 01:20:02 nit: Why bother to cache the page_size in a static
Dai Mikurube (NOT FULLTIME) 2012/03/16 17:48:11 Exactly. Done.
220 if (!page_size) page_size = getpagesize();
221 int64 index = (address / page_size) * PAGEMAP_BYTES;
222 int64 offset = lseek64(pagemap_fd, index, SEEK_SET);
223 RAW_DCHECK(offset == index, "");
224 return offset >= 0;
225 }
226
227 // static
228 bool DeepHeapProfile::ReadProcPagemap(int pagemap_fd, PageState* state) {
229 static const uint64 U64_1 = 1;
230 static const uint64 PFN_FILTER = (U64_1 << 55) - U64_1;
231 static const uint64 PAGE_PRESENT = U64_1 << 63;
232 static const uint64 PAGE_SWAP = U64_1 << 62;
233 static const uint64 PAGE_RESERVED = U64_1 << 61;
234 static const uint64 FLAG_NOPAGE = U64_1 << 20;
235 static const uint64 FLAG_KSM = U64_1 << 21;
236 static const uint64 FLAG_MMAP = U64_1 << 11;
237
238 uint64 pagemap_value;
239 int result = read(pagemap_fd, &pagemap_value, PAGEMAP_BYTES);
240 if (result != PAGEMAP_BYTES) {
241 return false;
242 }
243
244 // Check if the page is committed.
245 state->is_committed = (pagemap_value & (PAGE_PRESENT | PAGE_SWAP));
246
247 state->is_present = (pagemap_value & PAGE_PRESENT);
248 state->is_swapped = (pagemap_value & PAGE_SWAP);
249 state->is_shared = false;
250
251 return true;
252 }
253
254 // static
255 size_t DeepHeapProfile::GetCommittedSize(
256 int pagemap_fd, uint64 first_address, uint64 last_address) {
257 static int page_size = 0;
jar (doing other things) 2012/03/16 01:20:02 nit: don't bother with static cache.
Dai Mikurube (NOT FULLTIME) 2012/03/16 17:48:11 Done.
258 if (!page_size) page_size = getpagesize();
259 uint64 page_address = (first_address / page_size) * page_size;
260 size_t committed_size = 0;
261
262 SeekProcPagemap(pagemap_fd, first_address);
263
264 // Check every page on which the allocation resides.
265 while (page_address <= last_address) {
266 // Read corresponding physical page.
267 PageState state;
268 if (ReadProcPagemap(pagemap_fd, &state) == false) {
269 // We can't read the last region (e.g vsyscall).
270 #ifndef NDEBUG
271 RAW_LOG(0, "pagemap read failed @ %#llx %"PRId64" bytes",
272 first_address, last_address - first_address + 1);
273 #endif
274 return 0;
275 }
276
277 if (state.is_committed) {
278 // Calculate the size of the allocation part in this page.
279 size_t bytes = page_size;
280
281 // If looking at the last page in a given region.
282 if (last_address <= page_address - 1 + page_size) {
283 bytes = last_address - page_address + 1;
284 }
285
286 // If looking at the first page in a given region.
287 if (page_address < first_address) {
288 bytes -= first_address - page_address;
289 }
290
291 committed_size += bytes;
292 }
293 if (page_address > TOP_ADDRESS - page_size) {
294 break;
295 }
296 page_address += page_size;
297 }
298
299 return committed_size;
300 }
301
302 // static
303 void DeepHeapProfile::WriteMapsToFile(const char* filename_prefix,
304 int buffer_size,
305 char buffer[]) {
306 char filename[100];
307 snprintf(filename, sizeof(filename),
308 "%s.%05d.maps", filename_prefix, getpid());
jar (doing other things) 2012/03/16 01:20:02 nit: cast getpid() to int
Dai Mikurube (NOT FULLTIME) 2012/03/16 17:48:11 Done.
309
310 RawFD maps_fd = RawOpenForWriting(filename);
311 RAW_DCHECK(maps_fd != kIllegalRawFD, "");
312
313 int map_length;
314 bool wrote_all;
315 map_length = tcmalloc::FillProcSelfMaps(buffer, buffer_size, &wrote_all);
316 RAW_DCHECK(wrote_all, "");
317 RAW_DCHECK(map_length <= buffer_size, "");
318 RawWrite(maps_fd, buffer, map_length);
319 RawClose(maps_fd);
320 }
321
322 // TODO(dmikurube): Eliminate dynamic memory allocation caused by snprintf.
323 // ProcMapsIterator uses snprintf internally in construction.
324 // static
325 void DeepHeapProfile::SnapshotGlobalStatsWithoutMalloc(int pagemap_fd,
326 GlobalStats* stats) {
327 ProcMapsIterator::Buffer iterator_buffer;
328 ProcMapsIterator iterator(0, &iterator_buffer);
329 uint64 first_address, last_address, offset;
330 int64 inode;
331 char* flags;
332 char* filename;
333
334 stats->total.Initialize();
335 stats->file_mapped.Initialize();
336 stats->anonymous.Initialize();
337 stats->other.Initialize();
338
339 while (iterator.Next(&first_address, &last_address,
340 &flags, &offset, &inode, &filename)) {
341 // 'last_address' should be the last inclusive address of the region.
342 last_address -= 1;
343 if (strcmp("[vsyscall]", filename) == 0) {
344 continue; // Reading pagemap will fail in [vsyscall].
345 }
346
347 stats->total.Record(pagemap_fd, first_address, last_address);
348
349 if (filename[0] == '/') {
350 stats->file_mapped.Record(pagemap_fd, first_address, last_address);
351 } else if (filename[0] == '\0' || filename[0] == '\n') {
352 stats->anonymous.Record(pagemap_fd, first_address, last_address);
353 } else {
354 stats->other.Record(pagemap_fd, first_address, last_address);
355 }
356 }
357 }
358
359 DeepHeapProfile::DeepBucket* DeepHeapProfile::GetDeepBucket(Bucket* bucket) {
360 DeepBucket* found = deep_bucket_map_->FindMutable(bucket);
361 if (found != NULL)
362 return found;
363
364 DeepBucket created;
365 created.bucket = bucket;
366 created.committed_size = 0;
367 created.id = (bucket_id_++);
368 created.is_logged = false;
369 deep_bucket_map_->Insert(bucket, created);
370 return deep_bucket_map_->FindMutable(bucket);
371 }
372
373 void DeepHeapProfile::ResetCommittedSize(Bucket** bucket_table) {
374 for (int i = 0; i < kHashTableSize; i++) {
375 for (Bucket* bucket = bucket_table[i];
376 bucket != NULL;
377 bucket = bucket->next) {
378 DeepBucket* deep_bucket = GetDeepBucket(bucket);
379 deep_bucket->committed_size = 0;
380 }
381 }
382 }
383
384 // TODO(dmikurube): Eliminate dynamic memory allocation caused by snprintf.
385 int DeepHeapProfile::SnapshotBucketTableWithoutMalloc(Bucket** bucket_table,
386 int used_in_buffer,
387 int buffer_size,
388 char buffer[]) {
389 for (int i = 0; i < kHashTableSize; i++) {
390 for (Bucket* bucket = bucket_table[i];
391 bucket != NULL;
392 bucket = bucket->next) {
393 if (bucket->alloc_size - bucket->free_size == 0) {
394 continue; // Skip empty buckets.
395 }
396 const DeepBucket* deep_bucket = GetDeepBucket(bucket);
397 used_in_buffer = UnparseBucket(
398 *deep_bucket, "", used_in_buffer, buffer_size, buffer, NULL);
399 }
400 }
401 return used_in_buffer;
402 }
403
404 void DeepHeapProfile::RecordAlloc(const void* pointer,
405 AllocValue* alloc_value,
406 DeepHeapProfile* deep_profile) {
407 uint64 address = reinterpret_cast<uintptr_t>(pointer);
408 size_t committed = GetCommittedSize(deep_profile->pagemap_fd_,
409 address, address + alloc_value->bytes - 1);
410
411 DeepBucket* deep_bucket = deep_profile->GetDeepBucket(alloc_value->bucket());
412 deep_bucket->committed_size += committed;
413 deep_profile->stats_.record_malloc.AddToVirtualBytes(alloc_value->bytes);
414 deep_profile->stats_.record_malloc.AddToCommittedBytes(committed);
415 }
416
417 void DeepHeapProfile::RecordMMap(const void* pointer,
418 AllocValue* alloc_value,
419 DeepHeapProfile* deep_profile) {
420 uint64 address = reinterpret_cast<uintptr_t>(pointer);
421 size_t committed = GetCommittedSize(deep_profile->pagemap_fd_,
422 address, address + alloc_value->bytes - 1);
423
424 DeepBucket* deep_bucket = deep_profile->GetDeepBucket(alloc_value->bucket());
425 deep_bucket->committed_size += committed;
426 deep_profile->stats_.record_mmap.AddToVirtualBytes(alloc_value->bytes);
427 deep_profile->stats_.record_mmap.AddToCommittedBytes(committed);
428 }
429
430 void DeepHeapProfile::SnapshotAllAllocsWithoutMalloc() {
431 stats_.record_mmap.Initialize();
432 stats_.record_malloc.Initialize();
433
434 // malloc allocations.
435 heap_profile_->alloc_address_map_->Iterate(RecordAlloc, this);
436
437 // mmap allocations.
438 heap_profile_->mmap_address_map_->Iterate(RecordMMap, this);
439 }
440
441 int DeepHeapProfile::FillBucketForBucketFile(const DeepBucket* deep_bucket,
442 int buffer_size,
443 char buffer[]) {
444 const Bucket* bucket = deep_bucket->bucket;
445 int printed = snprintf(buffer, buffer_size, "%05d", deep_bucket->id);
446 if (printed < 0 || printed >= buffer_size) {
447 return 0;
448 }
449 int used_in_buffer = printed;
450
451 for (int depth = 0; depth < bucket->depth; depth++) {
452 printed = snprintf(buffer + used_in_buffer, buffer_size - used_in_buffer,
453 " 0x%08" PRIxPTR,
454 reinterpret_cast<uintptr_t>(bucket->stack[depth]));
455 if (printed < 0 || printed >= buffer_size - used_in_buffer) {
456 return used_in_buffer;
457 }
458 used_in_buffer += printed;
459 }
460 printed = snprintf(buffer + used_in_buffer, buffer_size - used_in_buffer,
461 "\n");
462 if (printed < 0 || printed >= buffer_size - used_in_buffer) {
463 return used_in_buffer;
464 }
465 used_in_buffer += printed;
466
467 return used_in_buffer;
468 }
469
470 void DeepHeapProfile::WriteBucketsTableToBucketFile(Bucket** bucket_table,
471 RawFD bucket_fd) {
472 // We will use the global buffer here.
473 char* buffer = profiler_buffer_;
474 int buffer_size = kProfilerBufferSize;
475 int used_in_buffer = 0;
476
477 for (int i = 0; i < kHashTableSize; i++) {
478 for (Bucket* bucket = bucket_table[i];
479 bucket != NULL;
480 bucket = bucket->next) {
481 DeepBucket* deep_bucket = GetDeepBucket(bucket);
482 if (deep_bucket->is_logged) {
483 continue; // Skip the bucket if it is already logged.
484 }
485 if (bucket->alloc_size - bucket->free_size <= 64) {
486 continue; // Skip small buckets.
487 }
488
489 used_in_buffer += FillBucketForBucketFile(
490 deep_bucket, buffer_size - used_in_buffer, buffer + used_in_buffer);
491 deep_bucket->is_logged = true;
492
493 // Write to file if buffer 80% full.
494 if (used_in_buffer > buffer_size * 0.8) {
495 RawWrite(bucket_fd, buffer, used_in_buffer);
496 used_in_buffer = 0;
497 }
498 }
499 }
500
501 RawWrite(bucket_fd, buffer, used_in_buffer);
502 }
503
504 void DeepHeapProfile::WriteBucketsToBucketFile() {
505 char filename[100];
506 snprintf(filename, sizeof(filename),
507 "%s.%05d.%04d.buckets", filename_prefix_, getpid(), dump_count_);
508 RawFD bucket_fd = RawOpenForWriting(filename);
509 RAW_DCHECK(bucket_fd != kIllegalRawFD, "");
510
511 WriteBucketsTableToBucketFile(heap_profile_->alloc_table_, bucket_fd);
512 WriteBucketsTableToBucketFile(heap_profile_->mmap_table_, bucket_fd);
513
514 RawClose(bucket_fd);
515 }
516
517 int DeepHeapProfile::UnparseBucket(const DeepBucket& deep_bucket,
518 const char* extra,
519 int used_in_buffer,
520 int buffer_size,
521 char* buffer,
522 Stats* profile_stats) {
523 const Bucket& bucket = *deep_bucket.bucket;
524 if (profile_stats != NULL) {
525 profile_stats->allocs += bucket.allocs;
526 profile_stats->alloc_size += bucket.alloc_size;
527 profile_stats->frees += bucket.frees;
528 profile_stats->free_size += bucket.free_size;
529 }
530
531 int printed = snprintf(buffer + used_in_buffer, buffer_size - used_in_buffer,
532 "%10"PRId64" %10"PRId64" %6d %6d @%s %d\n",
533 bucket.alloc_size - bucket.free_size,
534 deep_bucket.committed_size,
535 bucket.allocs, bucket.frees, extra, deep_bucket.id);
536 // If it looks like the snprintf failed, ignore the fact we printed anything.
537 if (printed < 0 || printed >= buffer_size - used_in_buffer) {
538 return used_in_buffer;
539 }
540 used_in_buffer += printed;
541
542 return used_in_buffer;
543 }
544
545 int DeepHeapProfile::UnparseRegionStats(const RegionStats* stats,
546 const char* name,
547 int used_in_buffer,
548 int buffer_size,
549 char* buffer) {
550 int printed = snprintf(buffer + used_in_buffer, buffer_size - used_in_buffer,
551 "%15s %10ld %10ld\n",
552 name, stats->virtual_bytes(),
553 stats->committed_bytes());
554 if (printed < 0 || printed >= buffer_size - used_in_buffer) {
555 return used_in_buffer;
556 }
557 used_in_buffer += printed;
558
559 return used_in_buffer;
560 }
561
562 int DeepHeapProfile::UnparseGlobalStats(int used_in_buffer,
563 int buffer_size,
564 char* buffer) {
565 int printed = snprintf(buffer + used_in_buffer, buffer_size - used_in_buffer,
566 "%15s %10s %10s\n", "", "virtual", "committed");
567 if (printed < 0 || printed >= buffer_size - used_in_buffer) {
568 return used_in_buffer;
569 }
570 used_in_buffer += printed;
571
572 used_in_buffer = UnparseRegionStats(&(stats_.total), "total",
573 used_in_buffer, buffer_size, buffer);
574 used_in_buffer = UnparseRegionStats(&(stats_.file_mapped), "file mapped",
575 used_in_buffer, buffer_size, buffer);
576 used_in_buffer = UnparseRegionStats(&(stats_.anonymous), "anonymous",
577 used_in_buffer, buffer_size, buffer);
578 used_in_buffer = UnparseRegionStats(&(stats_.other), "other",
579 used_in_buffer, buffer_size, buffer);
580 used_in_buffer = UnparseRegionStats(&(stats_.record_mmap), "mmap",
581 used_in_buffer, buffer_size, buffer);
582 used_in_buffer = UnparseRegionStats(&(stats_.record_malloc), "tcmalloc",
583 used_in_buffer, buffer_size, buffer);
584 return used_in_buffer;
585 }
586 #else // DEEP_HEAP_PROFILE
587
588 DeepHeapProfile::DeepHeapProfile(HeapProfileTable* heap_profile,
589 const char* prefix)
590 : heap_profile_(heap_profile) {
591 }
592
593 DeepHeapProfile::~DeepHeapProfile() {
594 }
595
596 int DeepHeapProfile::FillOrderedProfile(char buffer[], int buffer_size) {
597 return heap_profile_->FillOrderedProfile(buffer, buffer_size);
598 }
599
600 #endif // DEEP_HEAP_PROFILE
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698