OLD | NEW |
---|---|
(Empty) | |
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 // --- | |
6 // Author: Sainbayar Sukhbaatar | |
7 // Dai Mikurube | |
8 // | |
9 | |
10 #include "deep-heap-profile.h" | |
11 | |
12 #ifdef DEEP_HEAP_PROFILE | |
13 #include <fcntl.h> | |
14 #include <sys/stat.h> | |
15 #include <sys/types.h> | |
16 #ifdef HAVE_UNISTD_H | |
17 #include <unistd.h> // for getpid() | |
18 #endif | |
jar (doing other things)
2012/01/27 00:46:58
For endif, please add comment showing what is term
Dai Mikurube (NOT FULLTIME)
2012/01/30 12:54:53
Done.
| |
19 | |
20 #include "base/cycleclock.h" | |
21 #include "base/sysinfo.h" | |
22 | |
23 static const int kProfilerBufferSize = 1 << 20; | |
24 static const int kHashTableSize = 179999; // The same as heap-profile-table.cc. | |
25 | |
26 static const int PAGE_SIZE = 4096; | |
jar (doing other things)
2012/01/27 00:46:58
It surprises me that we don't have the moral equiv
Dai Mikurube (NOT FULLTIME)
2012/01/30 12:54:53
getpagesize() is better. Fixed.
| |
27 static const int PAGEMAP_BYTES = 8; | |
28 static const uint64 TOP_ADDRESS = kuint64max; | |
jar (doing other things)
2012/01/27 00:46:58
Why do you use uint64 for addresses, when size_t i
Dai Mikurube (NOT FULLTIME)
2012/01/30 12:54:53
Simply, its' sbecause ProcMapsIterator::Next(Ext)
| |
29 | |
30 // Header strings of the dumped heap profile. | |
31 static const char kProfileHeader[] = "heap profile: "; | |
32 static const char kProfileVersion[] = "DUMP_DEEP_3"; | |
33 static const char kGlobalStatsHeader[] = "GLOBAL_STATS:\n"; | |
34 static const char kMMapStacktraceHeader[] = "MMAP_STACKTRACES:\n"; | |
35 static const char kAllocStacktraceHeader[] = "MALLOC_STACKTRACES:\n"; | |
36 static const char kProcSelfMapsHeader[] = "\nMAPPED_LIBRARIES:\n"; | |
37 | |
38 DeepHeapProfile::DeepHeapProfile(HeapProfileTable* heap_profile, | |
39 const char* prefix) | |
40 : heap_profile_(heap_profile), | |
41 pagemap_fd_(-1), | |
42 most_recent_pid_(-1), | |
43 stats_(), | |
44 dump_count_(0), | |
45 filename_prefix_(NULL), | |
46 profiler_buffer_(NULL), | |
47 bucket_id_(0) { | |
48 deep_bucket_map_ = new(heap_profile_->alloc_(sizeof(DeepBucketMap))) | |
49 DeepBucketMap(heap_profile_->alloc_, heap_profile_->dealloc_); | |
50 | |
51 // Copy filename prefix. | |
52 const int prefix_length = strlen(prefix); | |
53 filename_prefix_ = | |
54 reinterpret_cast<char*>(heap_profile_->alloc_(prefix_length + 1)); | |
55 memcpy(filename_prefix_, prefix, prefix_length); | |
jar (doing other things)
2012/01/27 00:46:58
Why not just strcpy(), given that you base this al
Dai Mikurube (NOT FULLTIME)
2012/01/30 12:54:53
I guess we thought that memcpy is faster in that c
| |
56 filename_prefix_[prefix_length] = '\0'; | |
57 | |
58 profiler_buffer_ = | |
59 reinterpret_cast<char*>(heap_profile_->alloc_(kProfilerBufferSize)); | |
60 } | |
61 | |
62 DeepHeapProfile::~DeepHeapProfile() { | |
63 heap_profile_->dealloc_(profiler_buffer_); | |
64 heap_profile_->dealloc_(filename_prefix_); | |
65 deep_bucket_map_->~DeepBucketMap(); | |
66 heap_profile_->dealloc_(deep_bucket_map_); | |
67 } | |
68 | |
69 int DeepHeapProfile::FillOrderedProfile(char buffer[], int buffer_size) { | |
70 #ifndef NDEBUG | |
71 int64 starting_cycles = CycleClock::Now(); | |
72 #endif | |
73 ++dump_count_; | |
74 | |
75 // Re-open files in /proc/pid/ if the process is newly forked one. | |
76 if (most_recent_pid_ != getpid()) { | |
77 most_recent_pid_ = getpid(); | |
78 pagemap_fd_ = OpenProcPagemap(); | |
79 | |
80 deep_bucket_map_->Iterate(ClearIsLogged, this); | |
81 | |
82 // Write maps into a .maps file with using the global buffer. | |
83 WriteMapsToFile(profiler_buffer_, kProfilerBufferSize, filename_prefix_); | |
84 } | |
85 | |
86 // Reset committed sizes of buckets. | |
87 ResetCommittedSize(heap_profile_->alloc_table_); | |
88 ResetCommittedSize(heap_profile_->mmap_table_); | |
89 | |
90 GetGlobalStats(pagemap_fd_, &stats_); | |
91 size_t anonymous_committed = stats_.anonymous.committed_bytes; | |
92 | |
93 // Note: Don't allocate any memory from here. | |
jar (doing other things)
2012/01/27 00:46:58
Could you motivate the comment? I'm guessing you'
Dai Mikurube (NOT FULLTIME)
2012/01/30 12:54:53
Renamed snapshotting functions here into Snapshot.
| |
94 | |
95 // Record committed sizes. | |
96 RecordAllAllocs(); | |
97 | |
98 // Check if committed bytes changed during RecordAllAllocs. | |
99 GetGlobalStats(pagemap_fd_, &stats_); | |
100 #ifndef NDEBUG | |
101 size_t committed_difference = | |
102 stats_.anonymous.committed_bytes - anonymous_committed; | |
103 if (committed_difference != 0) { | |
104 RAW_LOG(0, "Difference in committed size: %ld", committed_difference); | |
105 } | |
106 #endif | |
107 | |
108 HeapProfileTable::Stats stats; | |
jar (doing other things)
2012/01/27 00:46:58
Please move closer to first (real) use.... around
Dai Mikurube (NOT FULLTIME)
2012/01/30 12:54:53
Done.
| |
109 memset(&stats, 0, sizeof(stats)); | |
110 | |
111 // Start filling buffer with the ordered profile. | |
112 int printed = snprintf(buffer, buffer_size, | |
jar (doing other things)
2012/01/27 00:46:58
How are you sure than snprintf() doesn't allocate
Dai Mikurube (NOT FULLTIME)
2012/01/30 12:54:53
Please look at the above reply.
| |
113 "%s%s\n", kProfileHeader, kProfileVersion); | |
114 if (printed < 0 || printed >= buffer_size) { | |
115 return 0; | |
116 } | |
117 int used_in_buffer = printed; | |
118 | |
119 // Fill buffer with the global stats. | |
120 printed = snprintf(buffer + used_in_buffer, buffer_size - used_in_buffer, | |
121 kGlobalStatsHeader); | |
122 if (printed < 0 || printed >= buffer_size - used_in_buffer) { | |
123 return used_in_buffer; | |
124 } | |
125 used_in_buffer += printed; | |
126 | |
127 used_in_buffer = UnparseGlobalStats(buffer, used_in_buffer, buffer_size); | |
128 | |
129 // Fill buffer with the header for buckets of mmap'ed regions. | |
130 printed = snprintf(buffer + used_in_buffer, buffer_size - used_in_buffer, | |
131 kMMapStacktraceHeader); | |
132 if (printed < 0 || printed >= buffer_size - used_in_buffer) { | |
133 return used_in_buffer; | |
134 } | |
135 used_in_buffer += printed; | |
136 | |
137 printed = snprintf(buffer + used_in_buffer, buffer_size - used_in_buffer, | |
138 "%10s %10s\n", "virtual", "committed"); | |
139 if (printed < 0 || printed >= buffer_size - used_in_buffer) { | |
140 return used_in_buffer; | |
141 } | |
142 used_in_buffer += printed; | |
143 | |
144 // Fill buffer with stack trace buckets of mmap'ed regions. | |
145 used_in_buffer = FillBucketTable(heap_profile_->mmap_table_, buffer, | |
146 buffer_size, used_in_buffer, &stats); | |
jar (doing other things)
2012/01/27 00:46:58
This sure looks like a method on stats, but perhap
Dai Mikurube (NOT FULLTIME)
2012/01/30 12:54:53
HeapProfileTable::Stats is just a public struct (n
| |
147 | |
148 // Fill buffer with the header for buckets of allocated regions. | |
149 printed = snprintf(buffer + used_in_buffer, buffer_size - used_in_buffer, | |
150 kAllocStacktraceHeader); | |
151 if (printed < 0 || printed >= buffer_size - used_in_buffer) { | |
152 return used_in_buffer; | |
153 } | |
154 used_in_buffer += printed; | |
155 | |
156 printed = snprintf(buffer + used_in_buffer, buffer_size - used_in_buffer, | |
157 "%10s %10s\n", "virtual", "committed"); | |
158 if (printed < 0 || printed >= buffer_size - used_in_buffer) { | |
159 return used_in_buffer; | |
160 } | |
161 used_in_buffer += printed; | |
162 | |
163 // Fill buffer with stack trace buckets of allocated regions. | |
164 used_in_buffer = FillBucketTable(heap_profile_->alloc_table_, buffer, | |
165 buffer_size, used_in_buffer, &stats); | |
166 | |
167 RAW_DCHECK(used_in_buffer < buffer_size, ""); | |
168 | |
169 // Note: Don't allocate any memory until here. | |
170 | |
171 // Write the bucket listing into a .bucket file. | |
172 WriteBucketsToBucketFile(); | |
173 | |
174 #ifndef NDEBUG | |
175 int64 elapsed_cycles = CycleClock::Now() - starting_cycles; | |
176 double elapsed_seconds = elapsed_cycles / CyclesPerSecond(); | |
177 RAW_LOG(0, "Time spent on DeepProfiler: %.3f sec\n", elapsed_seconds); | |
178 #endif | |
179 | |
180 return used_in_buffer; | |
181 } | |
182 | |
183 void DeepHeapProfile::RegionStats::Initialize() { | |
184 virtual_bytes = 0; | |
185 committed_bytes = 0; | |
186 } | |
187 | |
188 void DeepHeapProfile::RegionStats::Record( | |
189 int pagemap_fd, uint64 first_address, uint64 last_address) { | |
190 virtual_bytes += static_cast<size_t>(last_address - first_address + 1); | |
191 committed_bytes += GetCommittedSize(pagemap_fd, first_address, last_address); | |
192 } | |
193 | |
194 // TODO(dmikurube): Avoid calling ClearIsLogged to rewrite buckets by add a | |
195 // reference to a previous file in a .heap file. | |
196 // static | |
197 void DeepHeapProfile::ClearIsLogged(const void* pointer, | |
198 DeepHeapProfile::DeepBucket* db, | |
199 DeepHeapProfile* deep_profile) { | |
200 db->is_logged = false; | |
201 } | |
202 | |
203 // static | |
204 int DeepHeapProfile::OpenProcPagemap() { | |
205 char filename[100]; | |
206 sprintf(filename, "/proc/%d/pagemap", getpid()); | |
jar (doing other things)
2012/01/27 00:46:58
use snprintf
Dai Mikurube (NOT FULLTIME)
2012/01/30 12:54:53
Done.
| |
207 int pagemap_fd = open(filename, O_RDONLY); | |
208 RAW_DCHECK(pagemap_fd != -1, "Failed to open /proc/self/pagemap"); | |
209 return pagemap_fd; | |
210 } | |
211 | |
212 // static | |
213 bool DeepHeapProfile::SeekProcPagemap(int pagemap_fd, uint64 address) { | |
214 int64 index = (address / PAGE_SIZE) * PAGEMAP_BYTES; | |
215 int64 offset = lseek64(pagemap_fd, index, SEEK_SET); | |
216 RAW_DCHECK(offset == index, ""); | |
217 if (offset < 0) { | |
jar (doing other things)
2012/01/27 00:46:58
nit: return offset >= 0;
Dai Mikurube (NOT FULLTIME)
2012/01/30 12:54:53
Done.
| |
218 return false; | |
219 } | |
220 return true; | |
221 } | |
222 | |
223 // static | |
224 bool DeepHeapProfile::ReadProcPagemap(int pagemap_fd, PageState* state) { | |
225 static const uint64 U64_1 = 1; | |
226 static const uint64 PFN_FILTER = (U64_1 << 55) - U64_1; | |
227 static const uint64 PAGE_PRESENT = U64_1 << 63; | |
228 static const uint64 PAGE_SWAP = U64_1 << 62; | |
229 static const uint64 PAGE_RESERVED = U64_1 << 61; | |
230 static const uint64 FLAG_NOPAGE = U64_1 << 20; | |
231 static const uint64 FLAG_KSM = U64_1 << 21; | |
232 static const uint64 FLAG_MMAP = U64_1 << 11; | |
233 | |
234 uint64 pagemap_value; | |
235 int result = read(pagemap_fd, &pagemap_value, PAGEMAP_BYTES); | |
236 if (result != PAGEMAP_BYTES) { | |
237 return false; | |
238 } | |
239 | |
240 // Check if the page is committed. | |
241 state->is_committed = (pagemap_value & (PAGE_PRESENT | PAGE_SWAP)); | |
242 | |
243 state->is_present = (pagemap_value & PAGE_PRESENT); | |
244 state->is_swapped = (pagemap_value & PAGE_SWAP); | |
245 state->is_shared = false; | |
246 | |
247 return true; | |
248 } | |
249 | |
250 // static | |
251 size_t DeepHeapProfile::GetCommittedSize( | |
252 int pagemap_fd, uint64 first_address, uint64 last_address) { | |
253 uint64 page_address = (first_address / PAGE_SIZE) * PAGE_SIZE; | |
254 size_t committed_size = 0; | |
255 | |
256 SeekProcPagemap(pagemap_fd, first_address); | |
257 | |
258 // Check every page on which the allocation resides. | |
259 while (page_address <= last_address) { | |
260 // Read corresponding physical page. | |
261 PageState state; | |
262 if (ReadProcPagemap(pagemap_fd, &state) == false) { | |
263 // We can't read the last region (e.g vsyscall). | |
264 #ifndef NDEBUG | |
265 RAW_LOG(0, "pagemap read failed @ %#llx %"PRId64" bytes", | |
266 first_address, last_address - first_address + 1); | |
267 #endif | |
268 return 0; | |
269 } | |
270 | |
271 if (state.is_committed) { | |
272 // Calculate the size of the allocation part in this page. | |
273 size_t bytes = PAGE_SIZE; | |
274 | |
275 // If looking at the last page in a given region. | |
276 if (last_address <= page_address - 1 + PAGE_SIZE) { | |
277 bytes = last_address - page_address + 1; | |
278 } | |
279 | |
280 // If looking at the first page in a given region. | |
281 if (page_address < first_address) { | |
282 bytes -= first_address - page_address; | |
283 } | |
284 | |
285 committed_size += bytes; | |
286 } | |
287 if (page_address > TOP_ADDRESS - PAGE_SIZE) { | |
288 break; | |
289 } | |
290 page_address += PAGE_SIZE; | |
291 } | |
292 | |
293 return committed_size; | |
294 } | |
295 | |
296 // static | |
297 void DeepHeapProfile::WriteMapsToFile(char buffer[], int buffer_size, | |
298 char* filename_prefix) { | |
299 char filename[100]; | |
300 snprintf(filename, sizeof(filename), | |
301 "%s.%05d.maps", filename_prefix, getpid()); | |
302 | |
303 RawFD maps_fd = RawOpenForWriting(filename); | |
304 RAW_DCHECK(maps_fd != kIllegalRawFD, ""); | |
305 | |
306 int map_length; | |
307 bool wrote_all; | |
308 map_length = tcmalloc::FillProcSelfMaps(buffer, buffer_size, &wrote_all); | |
309 RAW_DCHECK(wrote_all, ""); | |
310 RAW_DCHECK(map_length <= buffer_size, ""); | |
311 RawWrite(maps_fd, buffer, map_length); | |
312 RawClose(maps_fd); | |
313 } | |
314 | |
315 // static | |
316 void DeepHeapProfile::GetGlobalStats(int pagemap_fd, GlobalStats* stats) { | |
317 ProcMapsIterator::Buffer iterator_buffer; | |
318 ProcMapsIterator it(0, &iterator_buffer); | |
319 uint64 first_address, last_address, offset; | |
320 int64 inode; | |
321 char *flags, *filename; | |
jar (doing other things)
2012/01/27 00:46:58
nit: Chrome style puts the "*" next to the type.
Dai Mikurube (NOT FULLTIME)
2012/01/30 12:54:53
Done.
| |
322 | |
323 stats->total.Initialize(); | |
324 stats->file_mapped.Initialize(); | |
325 stats->anonymous.Initialize(); | |
326 stats->other.Initialize(); | |
327 | |
328 while (it.Next(&first_address, &last_address, | |
329 &flags, &offset, &inode, &filename)) { | |
330 // 'last_address' should be the last inclusive address of the region. | |
331 last_address -= 1; | |
332 if (strcmp("[vsyscall]", filename) == 0) { | |
333 continue; // Reading pagemap will fail in [vsyscall]. | |
334 } | |
335 | |
336 int64 committed_bytes = stats->total.committed_bytes; | |
337 stats->total.Record(pagemap_fd, first_address, last_address); | |
338 committed_bytes = stats->total.committed_bytes - committed_bytes; | |
jar (doing other things)
2012/01/27 00:46:58
Why isn't this merely committed_bytes = 0
In addi
Dai Mikurube (NOT FULLTIME)
2012/01/30 12:54:53
Removed. It was a code to be used in a planned ex
| |
339 | |
340 if (filename[0] == '/') { | |
341 stats->file_mapped.Record(pagemap_fd, first_address, last_address); | |
342 } else if (filename[0] == '\0' || filename[0] == '\n') { | |
343 stats->anonymous.Record(pagemap_fd, first_address, last_address); | |
344 } else { | |
345 stats->other.Record(pagemap_fd, first_address, last_address); | |
346 } | |
347 } | |
348 } | |
349 | |
350 DeepHeapProfile::DeepBucket* | |
351 DeepHeapProfile::GetDeepBucket(Bucket* bucket) { | |
jar (doing other things)
2012/01/27 00:46:58
nit: Try to put function definition on one line.
Dai Mikurube (NOT FULLTIME)
2012/01/30 12:54:53
Done.
| |
352 DeepBucket* found = deep_bucket_map_->FindMutable(bucket); | |
353 if (found == NULL) { | |
354 DeepBucket created; | |
355 created.bucket = bucket; | |
356 created.committed_size = 0; | |
357 created.id = (bucket_id_++); | |
358 created.is_logged = false; | |
359 deep_bucket_map_->Insert(bucket, created); | |
360 return deep_bucket_map_->FindMutable(bucket); | |
361 } else { | |
jar (doing other things)
2012/01/27 00:46:58
style nit: both sides of the if have a return... s
Dai Mikurube (NOT FULLTIME)
2012/01/30 12:54:53
Done.
| |
362 return found; | |
363 } | |
364 } | |
365 | |
366 void DeepHeapProfile::ResetCommittedSize(Bucket** bucket_table) { | |
367 for (int i = 0; i < kHashTableSize; i++) { | |
368 for (Bucket* b = bucket_table[i]; b != 0; b = b->next) { | |
jar (doing other things)
2012/01/27 00:46:58
nit: use NULL for pointers:
b != NULL
Dai Mikurube (NOT FULLTIME)
2012/01/30 12:54:53
Done.
| |
369 DeepBucket* db = GetDeepBucket(b); | |
370 db->committed_size = 0; | |
371 } | |
372 } | |
373 } | |
374 | |
375 int DeepHeapProfile::FillBucketTable(Bucket** bucket_table, | |
376 char buffer[], | |
377 int buffer_size, | |
378 int used_in_buffer, | |
379 HeapProfileTable::Stats* stats) { | |
380 for (int i = 0; i < kHashTableSize; i++) { | |
381 for (Bucket* b = bucket_table[i]; b != 0; b = b->next) { | |
jar (doing other things)
2012/01/27 00:46:58
nit: NULL instead of 0
Dai Mikurube (NOT FULLTIME)
2012/01/30 12:54:53
Done.
| |
382 if (b->alloc_size - b->free_size == 0) { | |
383 continue; // Skip empty buckets. | |
384 } | |
385 const DeepBucket& db = *GetDeepBucket(b); | |
jar (doing other things)
2012/01/27 00:46:58
nit: suggest:
const DeepBucket db = GetDeepBucket(
Dai Mikurube (NOT FULLTIME)
2012/01/30 12:54:53
Did you mean:
const DeepBucket *db = GetDeepBuck
jar (doing other things)
2012/03/16 01:20:02
Yes... thanks... the point was to avoid using refe
| |
386 used_in_buffer = | |
387 UnparseBucket(db, buffer, used_in_buffer, buffer_size, "", stats); | |
388 } | |
389 } | |
390 return used_in_buffer; | |
391 } | |
392 | |
393 void DeepHeapProfile::RecordAlloc(const void* pointer, | |
394 AllocValue* alloc_value, | |
395 DeepHeapProfile* deep_profile) { | |
396 uint64 address = reinterpret_cast<uintptr_t>(pointer); | |
397 size_t committed = GetCommittedSize(deep_profile->pagemap_fd_, | |
398 address, address + alloc_value->bytes - 1); | |
399 | |
400 DeepBucket* db = deep_profile->GetDeepBucket(alloc_value->bucket()); | |
401 db->committed_size += committed; | |
402 deep_profile->stats_.record_malloc.virtual_bytes += alloc_value->bytes; | |
403 deep_profile->stats_.record_malloc.committed_bytes += committed; | |
404 } | |
405 | |
406 void DeepHeapProfile::RecordMMap(const void* pointer, | |
407 AllocValue* alloc_value, | |
408 DeepHeapProfile* deep_profile) { | |
409 uint64 address = reinterpret_cast<uintptr_t>(pointer); | |
410 size_t committed = GetCommittedSize(deep_profile->pagemap_fd_, | |
411 address, address + alloc_value->bytes - 1); | |
412 | |
413 DeepBucket* db = deep_profile->GetDeepBucket(alloc_value->bucket()); | |
414 db->committed_size += committed; | |
415 deep_profile->stats_.record_mmap.virtual_bytes += alloc_value->bytes; | |
416 deep_profile->stats_.record_mmap.committed_bytes += committed; | |
417 } | |
418 | |
419 void DeepHeapProfile::RecordAllAllocs() { | |
420 stats_.record_mmap.virtual_bytes = 0; | |
421 stats_.record_mmap.committed_bytes = 0; | |
422 stats_.record_malloc.virtual_bytes = 0; | |
423 stats_.record_malloc.committed_bytes = 0; | |
424 | |
425 // malloc allocations. | |
426 heap_profile_->alloc_address_map_->Iterate(RecordAlloc, this); | |
427 | |
428 // mmap allocations. | |
429 heap_profile_->mmap_address_map_->Iterate(RecordMMap, this); | |
430 } | |
431 | |
432 int DeepHeapProfile::FillBucketForBucketFile(const DeepBucket* deep_bucket, | |
433 char buffer[], | |
434 int buffer_size) { | |
435 const Bucket* bucket = deep_bucket->bucket; | |
436 int printed = snprintf(buffer, buffer_size, "%05d", deep_bucket->id); | |
437 if (printed < 0 || printed >= buffer_size) { | |
438 return 0; | |
439 } | |
440 int used_in_buffer = printed; | |
441 | |
442 for (int d = 0; d < bucket->depth; d++) { | |
443 printed = snprintf(buffer + used_in_buffer, buffer_size - used_in_buffer, | |
444 " 0x%08" PRIxPTR, | |
445 reinterpret_cast<uintptr_t>(bucket->stack[d])); | |
446 if (printed < 0 || printed >= buffer_size - used_in_buffer) { | |
447 return used_in_buffer; | |
448 } | |
449 used_in_buffer += printed; | |
450 } | |
451 printed = snprintf(buffer + used_in_buffer, buffer_size - used_in_buffer, | |
452 "\n"); | |
453 if (printed < 0 || printed >= buffer_size - used_in_buffer) { | |
454 return used_in_buffer; | |
455 } | |
456 used_in_buffer += printed; | |
457 | |
458 return used_in_buffer; | |
459 } | |
460 | |
461 void DeepHeapProfile::WriteBucketsTableToBucketFile(Bucket** bucket_table, | |
462 RawFD bucket_fd) { | |
463 // We will use the global buffer here. | |
464 char* buffer = profiler_buffer_; | |
465 int buffer_size = kProfilerBufferSize; | |
466 int used_in_buffer = 0; | |
467 | |
468 for (int i = 0; i < kHashTableSize; i++) { | |
469 for (Bucket* b = bucket_table[i]; b != 0; b = b->next) { | |
470 DeepBucket* db = GetDeepBucket(b); | |
471 if (db->is_logged) { | |
472 continue; // Skip the bucket if it is already logged. | |
473 } | |
474 if (b->alloc_size - b->free_size <= 64) { | |
475 continue; // Skip small buckets. | |
476 } | |
477 | |
478 used_in_buffer += FillBucketForBucketFile( | |
479 db, buffer + used_in_buffer, buffer_size - used_in_buffer); | |
480 db->is_logged = true; | |
481 | |
482 // Write to file if buffer 80% full. | |
483 if (used_in_buffer > buffer_size * 0.8) { | |
484 RawWrite(bucket_fd, buffer, used_in_buffer); | |
485 used_in_buffer = 0; | |
486 } | |
487 } | |
488 } | |
489 | |
490 RawWrite(bucket_fd, buffer, used_in_buffer); | |
491 } | |
492 | |
493 void DeepHeapProfile::WriteBucketsToBucketFile() { | |
494 char filename[100]; | |
495 snprintf(filename, sizeof(filename), | |
496 "%s.%05d.%04d.buckets", filename_prefix_, getpid(), dump_count_); | |
497 RawFD bucket_fd = RawOpenForWriting(filename); | |
498 RAW_DCHECK(bucket_fd != kIllegalRawFD, ""); | |
499 | |
500 WriteBucketsTableToBucketFile(heap_profile_->alloc_table_, bucket_fd); | |
501 WriteBucketsTableToBucketFile(heap_profile_->mmap_table_, bucket_fd); | |
502 | |
503 RawClose(bucket_fd); | |
504 } | |
505 | |
506 int DeepHeapProfile::UnparseBucket(const DeepBucket& deep_bucket, | |
507 char* buffer, | |
508 int used_in_buffer, | |
509 int buffer_size, | |
510 const char* extra, | |
511 Stats* profile_stats) { | |
512 const Bucket& bucket = *deep_bucket.bucket; | |
513 if (profile_stats != NULL) { | |
514 profile_stats->allocs += bucket.allocs; | |
515 profile_stats->alloc_size += bucket.alloc_size; | |
516 profile_stats->frees += bucket.frees; | |
517 profile_stats->free_size += bucket.free_size; | |
518 } | |
519 | |
520 int printed = snprintf(buffer + used_in_buffer, buffer_size - used_in_buffer, | |
521 "%10"PRId64" %10"PRId64" %6d %6d @%s %d\n", | |
522 bucket.alloc_size - bucket.free_size, | |
523 deep_bucket.committed_size, | |
524 bucket.allocs, bucket.frees, extra, deep_bucket.id); | |
525 // If it looks like the snprintf failed, ignore the fact we printed anything. | |
526 if (printed < 0 || printed >= buffer_size - used_in_buffer) { | |
527 return used_in_buffer; | |
528 } | |
529 used_in_buffer += printed; | |
530 | |
531 return used_in_buffer; | |
532 } | |
533 | |
534 int DeepHeapProfile::UnparseRegionStats(const RegionStats* stats, | |
535 const char* name, | |
536 char* buffer, | |
537 int used_in_buffer, | |
538 int buffer_size) { | |
539 int printed = snprintf(buffer + used_in_buffer, buffer_size - used_in_buffer, | |
540 "%15s %10ld %10ld\n", | |
541 name, stats->virtual_bytes, stats->committed_bytes); | |
542 if (printed < 0 || printed >= buffer_size - used_in_buffer) { | |
543 return used_in_buffer; | |
544 } | |
545 used_in_buffer += printed; | |
546 | |
547 return used_in_buffer; | |
548 } | |
549 | |
550 int DeepHeapProfile::UnparseGlobalStats(char* buffer, | |
551 int used_in_buffer, | |
552 int buffer_size) { | |
553 int printed = snprintf(buffer + used_in_buffer, buffer_size - used_in_buffer, | |
554 "%15s %10s %10s\n", "", "virtual", "committed"); | |
555 if (printed < 0 || printed >= buffer_size - used_in_buffer) { | |
556 return used_in_buffer; | |
557 } | |
558 used_in_buffer += printed; | |
559 | |
560 used_in_buffer = UnparseRegionStats(&(stats_.total), "total", | |
561 buffer, used_in_buffer, buffer_size); | |
562 used_in_buffer = UnparseRegionStats(&(stats_.file_mapped), "file mapped", | |
563 buffer, used_in_buffer, buffer_size); | |
564 used_in_buffer = UnparseRegionStats(&(stats_.anonymous), "anonymous", | |
565 buffer, used_in_buffer, buffer_size); | |
566 used_in_buffer = UnparseRegionStats(&(stats_.other), "other", | |
567 buffer, used_in_buffer, buffer_size); | |
568 used_in_buffer = UnparseRegionStats(&(stats_.record_mmap), "mmap", | |
569 buffer, used_in_buffer, buffer_size); | |
570 used_in_buffer = UnparseRegionStats(&(stats_.record_malloc), "tcmalloc", | |
571 buffer, used_in_buffer, buffer_size); | |
572 return used_in_buffer; | |
573 } | |
574 #else // DEEP_HEAP_PROFILE | |
575 | |
576 DeepHeapProfile::DeepHeapProfile(HeapProfileTable* heap_profile, | |
577 const char* prefix) | |
578 : heap_profile_(heap_profile) { | |
579 } | |
580 | |
581 DeepHeapProfile::~DeepHeapProfile() { | |
582 } | |
583 | |
584 int DeepHeapProfile::FillOrderedProfile(char buffer[], int buffer_size) { | |
585 return heap_profile_->FillOrderedProfile(buffer, buffer_size); | |
586 } | |
587 | |
588 #endif // DEEP_HEAP_PROFILE | |
OLD | NEW |