OLD | NEW |
---|---|
(Empty) | |
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 // --- | |
6 // Author: Sainbayar Sukhbaatar | |
7 // Dai Mikurube | |
8 // | |
9 | |
10 #include "deep-heap-profile.h" | |
11 | |
12 #ifdef DEEP_HEAP_PROFILE | |
13 #include <fcntl.h> | |
14 #include <sys/stat.h> | |
15 #include <sys/types.h> | |
16 #ifdef HAVE_UNISTD_H | |
17 #include <unistd.h> // for getpid() | |
18 #endif | |
19 | |
20 #include "base/cycleclock.h" | |
21 #include "base/sysinfo.h" | |
22 | |
23 static const int kProfilerBufferSize = 1 << 20; | |
24 static const int kHashTableSize = 179999; // The same as heap-profile-table.cc | |
25 | |
26 static const int PAGE_SIZE = 4096; | |
27 static const int PAGEMAP_BYTES = 8; | |
28 | |
29 // header of the dumped heap profile | |
30 static const char kProfileHeader[] = "Deep Memory Profile\n"; | |
31 static const char kGlobalStatsHeader[] = "GLOBAL_STATS:\n"; | |
32 static const char kStacktraceHeader[] = "STACKTRACES:\n"; | |
33 static const char kProcSelfMapsHeader[] = "\nMAPPED_LIBRARIES:\n"; | |
34 | |
35 DeepHeapProfile::DeepHeapProfile(HeapProfileTable* heap_profile, | |
36 const char* prefix) | |
37 : heap_profile_(heap_profile), | |
38 pagemap_fd_(-1), | |
39 most_recent_pid_(-1), | |
40 stats_(), | |
41 dump_count_(0), | |
42 filename_prefix_(NULL), | |
43 profiler_buffer_(NULL), | |
44 bucket_id_(0) { | |
45 deep_bucket_map_ = new(heap_profile_->alloc_(sizeof(DeepBucketMap))) | |
46 DeepBucketMap(heap_profile_->alloc_, heap_profile_->dealloc_); | |
47 | |
48 // Copy filename prefix | |
49 RAW_DCHECK(filename_prefix_ == NULL, ""); | |
jar (doing other things)
2011/12/28 08:30:17
I don't understand the need for this DCHECK. The
Dai Mikurube (NOT FULLTIME)
2012/01/06 01:25:23
Good catch. In the previous version, filename_pre
| |
50 const int prefix_length = strlen(prefix); | |
51 filename_prefix_ = | |
52 reinterpret_cast<char*>(heap_profile_->alloc_(prefix_length + 1)); | |
53 memcpy(filename_prefix_, prefix, prefix_length); | |
54 filename_prefix_[prefix_length] = '\0'; | |
55 | |
56 profiler_buffer_ = | |
57 reinterpret_cast<char*>(heap_profile_->alloc_(kProfilerBufferSize)); | |
58 } | |
59 | |
60 DeepHeapProfile::~DeepHeapProfile() { | |
61 heap_profile_->dealloc_(profiler_buffer_); | |
62 heap_profile_->dealloc_(filename_prefix_); | |
63 deep_bucket_map_->~DeepBucketMap(); | |
64 heap_profile_->dealloc_(deep_bucket_map_); | |
65 } | |
66 | |
67 int DeepHeapProfile::FillOrderedProfile(char buf[], int size) { | |
68 int64 start_time = CycleClock::Now(); | |
69 ++dump_count_; | |
70 | |
71 // Re-open files in /proc/pid/ if the process is newly forked one. | |
72 if (most_recent_pid_ != getpid()) { | |
73 most_recent_pid_ = getpid(); | |
74 OpenProcPagemap(); | |
75 | |
76 // Write maps into a .maps file with using the global buffer. | |
77 WriteMapsToFile(profiler_buffer_, kProfilerBufferSize); | |
78 } | |
79 | |
80 // Reset committed sizes of buckets. | |
81 ResetCommittedSize(heap_profile_->malloc_table_); | |
82 ResetCommittedSize(heap_profile_->mmap_table_); | |
83 | |
84 GetGlobalStats(); | |
85 size_t anonymous_committed = stats_.anonymous.committed_bytes; | |
86 | |
87 // Note: Don't allocate any memory from here. | |
88 | |
89 // Record committed sizes. | |
90 RecordAllAllocs(); | |
91 | |
92 // Check if committed bytes changed during RecordAllAllocs. | |
93 GetGlobalStats(); | |
94 size_t committed_difference = | |
95 stats_.anonymous.committed_bytes - anonymous_committed; | |
96 if (committed_difference != 0) | |
97 RAW_LOG(0, "Difference in committed size: %ld", committed_difference); | |
98 | |
99 HeapProfileTable::Stats stats; | |
100 memset(&stats, 0, sizeof(stats)); | |
101 | |
102 // Start filling buf with the ordered profile. | |
103 int bucket_length = snprintf(buf, size, kProfileHeader); | |
104 if (bucket_length < 0 || bucket_length >= size) return 0; | |
105 | |
106 // Fill buf with the global stats. | |
107 bucket_length += | |
108 snprintf(buf + bucket_length, size - bucket_length, kGlobalStatsHeader); | |
jar (doing other things)
2011/12/28 08:30:17
If we would have overrun the buffer, then the buck
Dai Mikurube (NOT FULLTIME)
2012/01/06 02:18:23
Thanks for the point. I was completely out of tou
| |
109 bucket_length = UnparseGlobalStats(buf, bucket_length, size); | |
110 | |
111 // Fill buf with the header for buckets. | |
112 bucket_length += | |
113 snprintf(buf + bucket_length, size - bucket_length, kStacktraceHeader); | |
114 bucket_length += snprintf(buf + bucket_length, size - bucket_length, | |
jar (doing other things)
2011/12/28 08:30:17
Here again you'd get into trouble if bucket_length
Dai Mikurube (NOT FULLTIME)
2012/01/06 02:18:23
Fixed in the same way above.
On 2011/12/28 08:30:
| |
115 "%10s %10s\n", "virtual", "committed"); | |
116 | |
117 // Fill buf with stack trace buckets. | |
118 bucket_length = FillBucketTable(heap_profile_->malloc_table_, | |
119 buf, size, bucket_length, &stats); | |
120 bucket_length = FillBucketTable(heap_profile_->mmap_table_, | |
121 buf, size, bucket_length, &stats); | |
122 | |
123 RAW_DCHECK(bucket_length < size, ""); | |
jar (doing other things)
2011/12/28 08:30:17
Between truncations and possible errors along the
Dai Mikurube (NOT FULLTIME)
2012/01/06 02:18:23
It detects buffer overrun now by the fixes above.
| |
124 | |
125 // Note: Don't allocate any memory until here. | |
126 | |
127 // Write the bucket listing into a .bucket file. | |
128 WriteBucketsToBucketFile(); | |
129 | |
130 int64 dt = CycleClock::Now() - start_time; | |
jar (doing other things)
2011/12/28 08:30:17
nit: avoid abreviations like "dt"
Dai Mikurube (NOT FULLTIME)
2012/01/06 01:25:23
Done.
| |
131 double dtf = dt / CyclesPerSecond(); | |
132 RAW_LOG(0, "Time spent on DeepProfiler: %.3f sec\n", dtf); | |
jar (doing other things)
2011/12/28 08:30:17
There has been a move away from logging data than
Dai Mikurube (NOT FULLTIME)
2012/01/06 01:25:23
We don't have DLOG in third_party/tcmalloc. It's
| |
133 | |
134 return bucket_length; | |
135 } | |
136 | |
137 DeepHeapProfile::DeepBucket* | |
138 DeepHeapProfile::GetDeepBucket(Bucket* bucket) { | |
139 DeepBucket* found = deep_bucket_map_->FindMutable(bucket); | |
140 if (found == NULL) { | |
141 DeepBucket created; | |
142 created.bucket = bucket; | |
143 created.committed_size = 0; | |
144 created.id = (bucket_id_++); | |
145 created.is_logged = false; | |
146 deep_bucket_map_->Insert(bucket, created); | |
147 return deep_bucket_map_->FindMutable(bucket); | |
148 } else { | |
149 return found; | |
150 } | |
151 } | |
152 | |
153 void DeepHeapProfile::ResetCommittedSize(Bucket** table) { | |
154 for (int i = 0; i < kHashTableSize; i++) { | |
155 for (Bucket* b = table[i]; b != 0; b = b->next) { | |
156 DeepBucket* db = GetDeepBucket(b); | |
157 db->committed_size = 0; | |
158 } | |
159 } | |
160 } | |
161 | |
162 int DeepHeapProfile::FillBucketTable(Bucket** table, | |
163 char buf[], int size, int bucket_length, | |
164 HeapProfileTable::Stats* stats) { | |
165 for (int i = 0; i < kHashTableSize; i++) { | |
166 for (Bucket* b = table[i]; b != 0; b = b->next) { | |
167 if (b->alloc_size - b->free_size == 0) | |
168 continue; // Skip empty buckets | |
169 const DeepBucket& db = *GetDeepBucket(b); | |
170 bucket_length = UnparseBucket(db, buf, bucket_length, size, "", stats); | |
171 } | |
172 } | |
173 return bucket_length; | |
174 } | |
175 | |
176 // This function need to be called after each fork. | |
177 void DeepHeapProfile::OpenProcPagemap() { | |
178 char filename[100]; | |
179 sprintf(filename, "/proc/%d/pagemap", getpid()); | |
180 pagemap_fd_ = open(filename, O_RDONLY); | |
181 RAW_DCHECK(pagemap_fd_ != -1, "Failed to open /proc/self/pagemap"); | |
182 } | |
183 | |
184 bool DeepHeapProfile::SeekProcPagemap(uint64 address) { | |
185 uint64 index = (address / PAGE_SIZE) * PAGEMAP_BYTES; | |
186 uint64 o = lseek64(pagemap_fd_, index, SEEK_SET); | |
187 RAW_DCHECK(o == index, ""); | |
188 return true; | |
189 } | |
190 | |
191 bool DeepHeapProfile::ReadProcPagemap(PageState* state) { | |
192 static const uint64 U64_1 = 1; | |
193 static const uint64 PFN_FILTER = (U64_1 << 55) - U64_1; | |
194 static const uint64 PAGE_PRESENT = U64_1 << 63; | |
195 static const uint64 PAGE_SWAP = U64_1 << 62; | |
196 static const uint64 PAGE_RESERVED = U64_1 << 61; | |
197 static const uint64 FLAG_NOPAGE = U64_1 << 20; | |
198 static const uint64 FLAG_KSM = U64_1 << 21; | |
199 static const uint64 FLAG_MMAP = U64_1 << 11; | |
200 | |
201 uint64 pagemap_value; | |
202 int result = read(pagemap_fd_, &pagemap_value, PAGEMAP_BYTES); | |
203 if (result != PAGEMAP_BYTES) | |
204 return false; | |
205 | |
206 // Check if the page is committed. | |
207 state->is_committed = (pagemap_value & (PAGE_PRESENT | PAGE_SWAP)); | |
208 | |
209 state->is_present = (pagemap_value & PAGE_PRESENT); | |
210 state->is_swapped = (pagemap_value & PAGE_SWAP); | |
211 state->is_shared = false; | |
212 | |
213 return true; | |
214 } | |
215 | |
216 size_t DeepHeapProfile::GetCommittedSize(uint64 address, size_t size) { | |
217 uint64 page_address = (address / PAGE_SIZE) * PAGE_SIZE; | |
218 size_t committed_size = 0; | |
219 | |
220 SeekProcPagemap(address); | |
221 | |
222 // Check every pages on which the allocation reside. | |
jar (doing other things)
2011/12/28 08:30:17
nit: pages--> page
reside-->resides
Dai Mikurube (NOT FULLTIME)
2012/01/06 01:25:23
Done.
| |
223 while (page_address < address + size) { | |
jar (doing other things)
2011/12/28 08:30:17
Does this infinite loop when allocation is made in
Dai Mikurube (NOT FULLTIME)
2012/01/06 01:25:23
I guess the page 0xffffffffffff0000- is not mapped
| |
224 // Read corresponding physical page. | |
225 PageState state; | |
226 if (ReadProcPagemap(&state) == false) { | |
227 // We can't read the last region (e.g vsyscall). | |
228 RAW_LOG(0, "pagemap read failed @ %#llx %"PRId64" bytes", address, size); | |
jar (doing other things)
2011/12/28 08:30:17
DLOG??
Dai Mikurube (NOT FULLTIME)
2012/01/06 01:25:23
The same as above.
On 2011/12/28 08:30:17, jar wr
| |
229 return 0; | |
230 } | |
231 | |
232 if (state.is_committed) { | |
233 // Calculate the size of the allocation part in this page. | |
234 size_t bytes = PAGE_SIZE; | |
235 if (page_address < address) | |
236 bytes -= address - page_address; | |
237 if (address + size < page_address + PAGE_SIZE) | |
jar (doing other things)
2011/12/28 08:30:17
Can page_address + PAGE_SIZE == 0? (this is agai
Dai Mikurube (NOT FULLTIME)
2012/01/06 01:25:23
Changed the condition: address + size <= page_addr
| |
238 bytes -= PAGE_SIZE - (address + size - page_address); | |
jar (doing other things)
2011/12/28 08:30:17
I had to read this a bit to be sure it made sense.
Dai Mikurube (NOT FULLTIME)
2012/01/06 01:25:23
Cool, agreed. Moved and modified it.
On 2011/12/
| |
239 | |
240 committed_size += bytes; | |
241 } | |
242 page_address += PAGE_SIZE; | |
243 } | |
244 | |
245 return committed_size; | |
246 } | |
247 | |
248 void DeepHeapProfile::InitRegionStats(RegionStats* stats) { | |
249 stats->virtual_bytes = 0; | |
250 stats->committed_bytes = 0; | |
251 } | |
252 | |
253 void DeepHeapProfile::RecordRegionStats(uint64 start, | |
254 uint64 end, | |
255 RegionStats* stats) { | |
256 size_t size = static_cast<size_t>(end - start); | |
257 stats->virtual_bytes += size; | |
258 stats->committed_bytes += GetCommittedSize(start, size); | |
259 } | |
260 | |
261 void DeepHeapProfile::GetGlobalStats() { | |
262 ProcMapsIterator::Buffer iterator_buffer; | |
263 ProcMapsIterator it(0, &iterator_buffer); | |
264 uint64 start, end, offset; | |
265 int64 inode; | |
266 char *flags, *filename; | |
267 | |
268 InitRegionStats(&(stats_.total)); | |
269 InitRegionStats(&(stats_.file_mapped)); | |
270 InitRegionStats(&(stats_.anonymous)); | |
271 InitRegionStats(&(stats_.other)); | |
272 | |
273 while (it.Next(&start, &end, &flags, &offset, &inode, &filename)) { | |
274 if (strcmp("[vsyscall]", filename) == 0) | |
275 continue; // pagemap read fails in this region | |
276 | |
277 int64 committed_bytes = stats_.total.committed_bytes; | |
278 RecordRegionStats(start, end, &(stats_.total)); | |
279 committed_bytes = stats_.total.committed_bytes - committed_bytes; | |
280 | |
281 if (filename[0] == '/') { | |
282 RecordRegionStats(start, end, &(stats_.file_mapped)); | |
283 } else if (filename[0] == '\0' || | |
284 filename[0] == '\n' || | |
285 filename[0] == EOF) { | |
jar (doing other things)
2011/12/28 08:30:17
I'm used to seeing EOF as an int (wider than a cha
Dai Mikurube (NOT FULLTIME)
2012/01/06 01:25:23
Thanks for the good catch. It's wrong (though /pr
| |
286 RecordRegionStats(start, end, &(stats_.anonymous)); | |
287 } else { | |
288 RecordRegionStats(start, end, &(stats_.other)); | |
289 } | |
290 } | |
291 } | |
292 | |
293 void DeepHeapProfile::RecordAlloc(const void* pointer, | |
294 AllocValue* v, | |
295 DeepHeapProfile* deep_profile) { | |
296 uint64 alloc_address = (uint64) pointer; | |
jar (doing other things)
2011/12/28 08:30:17
nit: use a reinterpret_cast<>()
There may also be
Dai Mikurube (NOT FULLTIME)
2012/01/06 01:25:23
Exactly. Replaced.
uint64 comes from ProcMapsIte
| |
297 size_t committed = deep_profile->GetCommittedSize(alloc_address, v->bytes); | |
298 | |
299 (deep_profile->GetDeepBucket(v->bucket()))->committed_size += committed; | |
300 deep_profile->stats_.record_tcmalloc.virtual_bytes += v->bytes; | |
301 deep_profile->stats_.record_tcmalloc.committed_bytes += committed; | |
302 } | |
303 | |
304 void DeepHeapProfile::RecordMMap(const void* pointer, | |
305 AllocValue* v, | |
306 DeepHeapProfile* deep_profile) { | |
307 uint64 alloc_address = (uint64) pointer; | |
308 size_t committed = deep_profile->GetCommittedSize(alloc_address, v->bytes); | |
309 | |
310 (deep_profile->GetDeepBucket(v->bucket()))->committed_size += committed; | |
311 deep_profile->stats_.record_mmap.virtual_bytes += v->bytes; | |
312 deep_profile->stats_.record_mmap.committed_bytes += committed; | |
313 } | |
314 | |
315 void DeepHeapProfile::RecordAllAllocs() { | |
316 stats_.record_mmap.virtual_bytes = 0; | |
317 stats_.record_mmap.committed_bytes = 0; | |
318 stats_.record_tcmalloc.virtual_bytes = 0; | |
319 stats_.record_tcmalloc.committed_bytes = 0; | |
320 | |
321 // Tcmalloc allocs | |
322 heap_profile_->allocation_->Iterate(RecordAlloc, this); | |
323 | |
324 // Mmap allocs | |
325 heap_profile_->mmap_allocation_->Iterate(RecordMMap, this); | |
326 } | |
327 | |
328 void DeepHeapProfile::WriteMapsToFile(char buf[], int size) { | |
329 char file_name[100]; | |
330 snprintf(file_name, sizeof(file_name), | |
331 "%s.%05d.maps", filename_prefix_, getpid()); | |
332 | |
333 RawFD maps_fd = RawOpenForWriting(file_name); | |
334 RAW_DCHECK(maps_fd != kIllegalRawFD, ""); | |
335 | |
336 int map_length; | |
337 bool wrote_all; | |
338 map_length = tcmalloc::FillProcSelfMaps( | |
339 profiler_buffer_, kProfilerBufferSize, &wrote_all); | |
340 RAW_DCHECK(wrote_all, ""); | |
341 RAW_DCHECK(map_length <= kProfilerBufferSize, ""); | |
342 RawWrite(maps_fd, profiler_buffer_, map_length); | |
343 RawClose(maps_fd); | |
344 } | |
345 | |
346 int DeepHeapProfile::FillBucketForBucketFile(const DeepBucket* deep_bucket, | |
347 char buf[], int bufsize) { | |
348 const Bucket* bucket = deep_bucket->bucket; | |
349 int buflen = 0; | |
350 buflen += snprintf(buf + buflen, bufsize - buflen, "%05d", | |
351 deep_bucket->id); | |
352 for (int d = 0; d < bucket->depth; d++) { | |
353 buflen += snprintf(buf + buflen, bufsize - buflen, " 0x%08" PRIxPTR, | |
jar (doing other things)
2011/12/28 08:30:17
Here again, I think there is a problem when buflen
Dai Mikurube (NOT FULLTIME)
2012/01/06 02:18:23
Done.
| |
354 reinterpret_cast<uintptr_t>(bucket->stack[d])); | |
355 } | |
356 buflen += snprintf(buf + buflen, bufsize - buflen, "\n"); | |
357 return buflen; | |
358 } | |
359 | |
360 void DeepHeapProfile::WriteBucketsTableToBucketFile(Bucket** table, | |
361 RawFD bucket_fd) { | |
362 // We will use the global buffer here. | |
363 char* buf = profiler_buffer_; | |
364 int size = kProfilerBufferSize; | |
365 int buflen = 0; | |
366 | |
367 for (int i = 0; i < kHashTableSize; i++) { | |
368 for (Bucket* b = table[i]; b != 0; b = b->next) { | |
369 DeepBucket* db = GetDeepBucket(b); | |
370 if (db->is_logged) continue; // Skip the bucket if it is already logged | |
371 if (b->alloc_size - b->free_size <= 64) continue; // Skip small buckets | |
372 | |
373 buflen += FillBucketForBucketFile(db, buf + buflen, size - buflen); | |
374 db->is_logged = true; | |
375 | |
376 // Write to file if buffer 80% full. | |
377 if (buflen > size * 0.8) { | |
378 RawWrite(bucket_fd, buf, buflen); | |
379 buflen = 0; | |
380 } | |
381 } | |
382 } | |
383 | |
384 RawWrite(bucket_fd, buf, buflen); | |
385 } | |
386 | |
387 void DeepHeapProfile::WriteBucketsToBucketFile() { | |
388 char file_name[100]; | |
389 snprintf(file_name, sizeof(file_name), "%s.%05d.%04d.buckets", | |
390 filename_prefix_, getpid(), dump_count_); | |
391 RawFD bucket_fd = RawOpenForWriting(file_name); | |
392 RAW_DCHECK(bucket_fd != kIllegalRawFD, ""); | |
393 | |
394 WriteBucketsTableToBucketFile(heap_profile_->malloc_table_, bucket_fd); | |
395 WriteBucketsTableToBucketFile(heap_profile_->mmap_table_, bucket_fd); | |
396 | |
397 RawClose(bucket_fd); | |
398 } | |
399 | |
400 int DeepHeapProfile::UnparseBucket(const DeepBucket& deep_bucket, | |
401 char* buf, int buflen, int bufsize, | |
402 const char* extra, | |
403 Stats* profile_stats) { | |
404 const Bucket& bucket = *deep_bucket.bucket; | |
405 if (profile_stats != NULL) { | |
406 profile_stats->allocs += bucket.allocs; | |
407 profile_stats->alloc_size += bucket.alloc_size; | |
408 profile_stats->frees += bucket.frees; | |
409 profile_stats->free_size += bucket.free_size; | |
410 } | |
411 | |
412 int printed = snprintf(buf + buflen, bufsize - buflen, | |
413 "%10"PRId64" %10"PRId64" %6d %6d @%s %d\n", | |
414 bucket.alloc_size - bucket.free_size, deep_bucket.committed_size, | |
415 bucket.allocs, bucket.frees, extra, deep_bucket.id); | |
416 // If it looks like the snprintf failed, ignore the fact we printed anything. | |
417 if (printed < 0 || printed >= bufsize - buflen) return buflen; | |
418 buflen += printed; | |
419 | |
420 return buflen; | |
421 } | |
422 | |
423 int DeepHeapProfile::UnparseRegionStats(const RegionStats* stats, | |
424 const char* name, | |
425 char* buf, | |
426 int buflen, | |
427 int bufsize) { | |
428 int printed = snprintf(buf + buflen, bufsize - buflen, | |
429 "%15s %10ld %10ld\n", | |
430 name, | |
431 stats->virtual_bytes, | |
432 stats->committed_bytes); | |
433 | |
434 return buflen + printed; | |
jar (doing other things)
2011/12/28 08:30:17
This again may cause problems when line 426 trunca
Dai Mikurube (NOT FULLTIME)
2012/01/06 02:18:23
Done.
| |
435 } | |
436 | |
437 int DeepHeapProfile::UnparseGlobalStats(char* buf, int buflen, int bufsize) { | |
438 buflen += snprintf(buf + buflen, bufsize - buflen, | |
jar (doing other things)
2011/12/28 08:30:17
When buflen is larger than bufsize, we'll get a ne
Dai Mikurube (NOT FULLTIME)
2012/01/06 02:18:23
Done.
| |
439 "%15s %10s %10s\n", | |
440 "", "virtual", "committed"); | |
441 | |
442 buflen = UnparseRegionStats( | |
443 &(stats_.total), "total", buf, buflen, bufsize); | |
444 buflen = UnparseRegionStats( | |
445 &(stats_.file_mapped), "file mapped", buf, buflen, bufsize); | |
446 buflen = UnparseRegionStats( | |
447 &(stats_.anonymous), "anonymous", buf, buflen, bufsize); | |
448 buflen = UnparseRegionStats( | |
449 &(stats_.other), "other", buf, buflen, bufsize); | |
450 buflen = UnparseRegionStats( | |
451 &(stats_.record_mmap), "mmap", buf, buflen, bufsize); | |
452 buflen = UnparseRegionStats( | |
453 &(stats_.record_tcmalloc), "tcmalloc", buf, buflen, bufsize); | |
454 return buflen; | |
455 } | |
456 #else // DEEP_HEAP_PROFILE | |
457 | |
458 DeepHeapProfile::DeepHeapProfile(HeapProfileTable* heap_profile, | |
459 const char* prefix) | |
460 : heap_profile_(heap_profile) { | |
461 } | |
462 | |
463 DeepHeapProfile::~DeepHeapProfile() { | |
464 } | |
465 | |
466 int DeepHeapProfile::FillOrderedProfile(char buf[], int size) { | |
467 return heap_profile_->FillOrderedProfile(buf, size); | |
468 } | |
469 | |
470 #endif // DEEP_HEAP_PROFILE | |
OLD | NEW |