OLD | NEW |
1 // Copyright (c) 2006, Google Inc. | 1 // Copyright (c) 2006, Google Inc. |
2 // All rights reserved. | 2 // All rights reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
52 #endif | 52 #endif |
53 #include <errno.h> | 53 #include <errno.h> |
54 #include <stdarg.h> | 54 #include <stdarg.h> |
55 #include <string> | 55 #include <string> |
56 #include <map> | 56 #include <map> |
57 #include <algorithm> // for sort(), equal(), and copy() | 57 #include <algorithm> // for sort(), equal(), and copy() |
58 | 58 |
59 #include "heap-profile-table.h" | 59 #include "heap-profile-table.h" |
60 | 60 |
61 #include "base/logging.h" | 61 #include "base/logging.h" |
| 62 #include "memory_region_map.h" |
62 #include "raw_printer.h" | 63 #include "raw_printer.h" |
63 #include "symbolize.h" | 64 #include "symbolize.h" |
64 #include <google/stacktrace.h> | 65 #include <google/stacktrace.h> |
65 #include <google/malloc_hook.h> | 66 #include <google/malloc_hook.h> |
66 #include "base/commandlineflags.h" | 67 #include "base/commandlineflags.h" |
67 #include "base/logging.h" // for the RawFD I/O commands | 68 #include "base/logging.h" // for the RawFD I/O commands |
68 #include "base/sysinfo.h" | 69 #include "base/sysinfo.h" |
69 | 70 |
70 using std::sort; | 71 using std::sort; |
71 using std::equal; | 72 using std::equal; |
(...skipping 19 matching lines...) Expand all Loading... |
91 // header of the dumped heap profile | 92 // header of the dumped heap profile |
92 static const char kProfileHeader[] = "heap profile: "; | 93 static const char kProfileHeader[] = "heap profile: "; |
93 static const char kProcSelfMapsHeader[] = "\nMAPPED_LIBRARIES:\n"; | 94 static const char kProcSelfMapsHeader[] = "\nMAPPED_LIBRARIES:\n"; |
94 | 95 |
95 //---------------------------------------------------------------------- | 96 //---------------------------------------------------------------------- |
96 | 97 |
97 const char HeapProfileTable::kFileExt[] = ".heap"; | 98 const char HeapProfileTable::kFileExt[] = ".heap"; |
98 | 99 |
99 //---------------------------------------------------------------------- | 100 //---------------------------------------------------------------------- |
100 | 101 |
101 static const int kHashTableSize = 179999; // Size for table_. | 102 // Size for alloc_table_ and mmap_table_. |
| 103 static const int kHashTableSize = 179999; |
102 /*static*/ const int HeapProfileTable::kMaxStackDepth; | 104 /*static*/ const int HeapProfileTable::kMaxStackDepth; |
103 | 105 |
104 //---------------------------------------------------------------------- | 106 //---------------------------------------------------------------------- |
105 | 107 |
106 // We strip out different number of stack frames in debug mode | 108 // We strip out different number of stack frames in debug mode |
107 // because less inlining happens in that case | 109 // because less inlining happens in that case |
108 #ifdef NDEBUG | 110 #ifdef NDEBUG |
109 static const int kStripFrames = 2; | 111 static const int kStripFrames = 2; |
110 #else | 112 #else |
111 static const int kStripFrames = 3; | 113 static const int kStripFrames = 3; |
112 #endif | 114 #endif |
113 | 115 |
114 // For sorting Stats or Buckets by in-use space | 116 // For sorting Stats or Buckets by in-use space |
115 static bool ByAllocatedSpace(HeapProfileTable::Stats* a, | 117 static bool ByAllocatedSpace(HeapProfileTable::Stats* a, |
116 HeapProfileTable::Stats* b) { | 118 HeapProfileTable::Stats* b) { |
117 // Return true iff "a" has more allocated space than "b" | 119 // Return true iff "a" has more allocated space than "b" |
118 return (a->alloc_size - a->free_size) > (b->alloc_size - b->free_size); | 120 return (a->alloc_size - a->free_size) > (b->alloc_size - b->free_size); |
119 } | 121 } |
120 | 122 |
121 //---------------------------------------------------------------------- | 123 //---------------------------------------------------------------------- |
122 | 124 |
123 HeapProfileTable::HeapProfileTable(Allocator alloc, DeAllocator dealloc) | 125 HeapProfileTable::HeapProfileTable(Allocator alloc, DeAllocator dealloc) |
124 : alloc_(alloc), dealloc_(dealloc) { | 126 : alloc_(alloc), dealloc_(dealloc) { |
125 // Make the table | 127 // Initialize the overall profile stats. |
126 const int table_bytes = kHashTableSize * sizeof(*table_); | |
127 table_ = reinterpret_cast<Bucket**>(alloc_(table_bytes)); | |
128 memset(table_, 0, table_bytes); | |
129 // Make allocation map | |
130 allocation_ = | |
131 new(alloc_(sizeof(AllocationMap))) AllocationMap(alloc_, dealloc_); | |
132 // init the rest: | |
133 memset(&total_, 0, sizeof(total_)); | 128 memset(&total_, 0, sizeof(total_)); |
134 num_buckets_ = 0; | 129 |
| 130 // Make the malloc table. |
| 131 const int alloc_table_bytes = kHashTableSize * sizeof(*alloc_table_); |
| 132 alloc_table_ = reinterpret_cast<Bucket**>(alloc_(alloc_table_bytes)); |
| 133 memset(alloc_table_, 0, alloc_table_bytes); |
| 134 num_alloc_buckets_ = 0; |
| 135 |
| 136 // Initialize the mmap table. |
| 137 mmap_table_ = NULL; |
| 138 num_available_mmap_buckets_ = 0; |
| 139 |
| 140 // Make malloc and mmap allocation maps. |
| 141 alloc_address_map_ = |
| 142 new(alloc_(sizeof(AllocationMap))) AllocationMap(alloc_, dealloc_); |
| 143 mmap_address_map_ = NULL; |
135 } | 144 } |
136 | 145 |
137 HeapProfileTable::~HeapProfileTable() { | 146 HeapProfileTable::~HeapProfileTable() { |
138 // free allocation map | 147 DeallocateBucketTable(alloc_table_); |
139 allocation_->~AllocationMap(); | 148 alloc_table_ = NULL; |
140 dealloc_(allocation_); | 149 DeallocateBucketTable(mmap_table_); |
141 allocation_ = NULL; | 150 mmap_table_ = NULL; |
142 // free hash table | 151 DeallocateAllocationMap(alloc_address_map_); |
143 for (int b = 0; b < kHashTableSize; b++) { | 152 alloc_address_map_ = NULL; |
144 for (Bucket* x = table_[b]; x != 0; /**/) { | 153 DeallocateAllocationMap(mmap_address_map_); |
145 Bucket* b = x; | 154 mmap_address_map_ = NULL; |
146 x = x->next; | |
147 dealloc_(b->stack); | |
148 dealloc_(b); | |
149 } | |
150 } | |
151 dealloc_(table_); | |
152 table_ = NULL; | |
153 } | 155 } |
154 | 156 |
155 HeapProfileTable::Bucket* HeapProfileTable::GetBucket(int depth, | 157 void HeapProfileTable::DeallocateAllocationMap(AllocationMap* allocation) { |
156 const void* const key[]) { | 158 if (allocation != NULL) { |
| 159 alloc_address_map_->~AllocationMap(); |
| 160 dealloc_(allocation); |
| 161 } |
| 162 } |
| 163 |
| 164 void HeapProfileTable::DeallocateBucketTable(Bucket** table) { |
| 165 if (table != NULL) { |
| 166 for (int b = 0; b < kHashTableSize; b++) { |
| 167 for (Bucket* x = table[b]; x != 0; /**/) { |
| 168 Bucket* b = x; |
| 169 x = x->next; |
| 170 dealloc_(b->stack); |
| 171 dealloc_(b); |
| 172 } |
| 173 } |
| 174 dealloc_(table); |
| 175 } |
| 176 } |
| 177 |
| 178 HeapProfileTable::Bucket* HeapProfileTable::GetBucket( |
| 179 int depth, const void* const key[], Bucket** table, |
| 180 int* bucket_count) { |
157 // Make hash-value | 181 // Make hash-value |
158 uintptr_t h = 0; | 182 uintptr_t h = 0; |
159 for (int i = 0; i < depth; i++) { | 183 for (int i = 0; i < depth; i++) { |
160 h += reinterpret_cast<uintptr_t>(key[i]); | 184 h += reinterpret_cast<uintptr_t>(key[i]); |
161 h += h << 10; | 185 h += h << 10; |
162 h ^= h >> 6; | 186 h ^= h >> 6; |
163 } | 187 } |
164 h += h << 3; | 188 h += h << 3; |
165 h ^= h >> 11; | 189 h ^= h >> 11; |
166 | 190 |
167 // Lookup stack trace in table | 191 // Lookup stack trace in table |
168 unsigned int buck = ((unsigned int) h) % kHashTableSize; | 192 unsigned int buck = ((unsigned int) h) % kHashTableSize; |
169 for (Bucket* b = table_[buck]; b != 0; b = b->next) { | 193 for (Bucket* b = table[buck]; b != 0; b = b->next) { |
170 if ((b->hash == h) && | 194 if ((b->hash == h) && |
171 (b->depth == depth) && | 195 (b->depth == depth) && |
172 equal(key, key + depth, b->stack)) { | 196 equal(key, key + depth, b->stack)) { |
173 return b; | 197 return b; |
174 } | 198 } |
175 } | 199 } |
176 | 200 |
177 // Create new bucket | 201 // Create new bucket |
178 const size_t key_size = sizeof(key[0]) * depth; | 202 const size_t key_size = sizeof(key[0]) * depth; |
179 const void** kcopy = reinterpret_cast<const void**>(alloc_(key_size)); | 203 const void** kcopy = reinterpret_cast<const void**>(alloc_(key_size)); |
180 copy(key, key + depth, kcopy); | 204 copy(key, key + depth, kcopy); |
181 Bucket* b = reinterpret_cast<Bucket*>(alloc_(sizeof(Bucket))); | 205 Bucket* b = reinterpret_cast<Bucket*>(alloc_(sizeof(Bucket))); |
182 memset(b, 0, sizeof(*b)); | 206 memset(b, 0, sizeof(*b)); |
183 b->hash = h; | 207 b->hash = h; |
184 b->depth = depth; | 208 b->depth = depth; |
185 b->stack = kcopy; | 209 b->stack = kcopy; |
186 b->next = table_[buck]; | 210 b->next = table[buck]; |
187 table_[buck] = b; | 211 table[buck] = b; |
188 num_buckets_++; | 212 if (bucket_count != NULL) { |
| 213 ++(*bucket_count); |
| 214 } |
189 return b; | 215 return b; |
190 } | 216 } |
191 | 217 |
192 void HeapProfileTable::RecordAlloc(const void* ptr, size_t bytes, | 218 void HeapProfileTable::RecordAlloc(const void* ptr, size_t bytes, |
193 int skip_count) { | 219 int skip_count) { |
194 void* key[kMaxStackDepth]; | 220 void* key[kMaxStackDepth]; |
195 int depth = MallocHook::GetCallerStackTrace( | 221 int depth = MallocHook::GetCallerStackTrace( |
196 key, kMaxStackDepth, kStripFrames + skip_count + 1); | 222 key, kMaxStackDepth, kStripFrames + skip_count + 1); |
197 RecordAllocWithStack(ptr, bytes, depth, key); | 223 RecordAllocWithStack(ptr, bytes, depth, key); |
198 } | 224 } |
199 | 225 |
200 void HeapProfileTable::RecordAllocWithStack( | 226 void HeapProfileTable::RecordAllocWithStack( |
201 const void* ptr, size_t bytes, int stack_depth, | 227 const void* ptr, size_t bytes, int stack_depth, |
202 const void* const call_stack[]) { | 228 const void* const call_stack[]) { |
203 Bucket* b = GetBucket(stack_depth, call_stack); | 229 Bucket* b = GetBucket(stack_depth, call_stack, alloc_table_, |
| 230 &num_alloc_buckets_); |
204 b->allocs++; | 231 b->allocs++; |
205 b->alloc_size += bytes; | 232 b->alloc_size += bytes; |
206 total_.allocs++; | 233 total_.allocs++; |
207 total_.alloc_size += bytes; | 234 total_.alloc_size += bytes; |
208 | 235 |
209 AllocValue v; | 236 AllocValue v; |
210 v.set_bucket(b); // also did set_live(false); set_ignore(false) | 237 v.set_bucket(b); // also did set_live(false); set_ignore(false) |
211 v.bytes = bytes; | 238 v.bytes = bytes; |
212 allocation_->Insert(ptr, v); | 239 alloc_address_map_->Insert(ptr, v); |
213 } | 240 } |
214 | 241 |
215 void HeapProfileTable::RecordFree(const void* ptr) { | 242 void HeapProfileTable::RecordFree(const void* ptr) { |
216 AllocValue v; | 243 AllocValue v; |
217 if (allocation_->FindAndRemove(ptr, &v)) { | 244 if (alloc_address_map_->FindAndRemove(ptr, &v)) { |
218 Bucket* b = v.bucket(); | 245 Bucket* b = v.bucket(); |
219 b->frees++; | 246 b->frees++; |
220 b->free_size += v.bytes; | 247 b->free_size += v.bytes; |
221 total_.frees++; | 248 total_.frees++; |
222 total_.free_size += v.bytes; | 249 total_.free_size += v.bytes; |
223 } | 250 } |
224 } | 251 } |
225 | 252 |
226 bool HeapProfileTable::FindAlloc(const void* ptr, size_t* object_size) const { | 253 bool HeapProfileTable::FindAlloc(const void* ptr, size_t* object_size) const { |
227 const AllocValue* alloc_value = allocation_->Find(ptr); | 254 const AllocValue* alloc_value = alloc_address_map_->Find(ptr); |
228 if (alloc_value != NULL) *object_size = alloc_value->bytes; | 255 if (alloc_value != NULL) *object_size = alloc_value->bytes; |
229 return alloc_value != NULL; | 256 return alloc_value != NULL; |
230 } | 257 } |
231 | 258 |
232 bool HeapProfileTable::FindAllocDetails(const void* ptr, | 259 bool HeapProfileTable::FindAllocDetails(const void* ptr, |
233 AllocInfo* info) const { | 260 AllocInfo* info) const { |
234 const AllocValue* alloc_value = allocation_->Find(ptr); | 261 const AllocValue* alloc_value = alloc_address_map_->Find(ptr); |
235 if (alloc_value != NULL) { | 262 if (alloc_value != NULL) { |
236 info->object_size = alloc_value->bytes; | 263 info->object_size = alloc_value->bytes; |
237 info->call_stack = alloc_value->bucket()->stack; | 264 info->call_stack = alloc_value->bucket()->stack; |
238 info->stack_depth = alloc_value->bucket()->depth; | 265 info->stack_depth = alloc_value->bucket()->depth; |
239 } | 266 } |
240 return alloc_value != NULL; | 267 return alloc_value != NULL; |
241 } | 268 } |
242 | 269 |
243 bool HeapProfileTable::FindInsideAlloc(const void* ptr, | 270 bool HeapProfileTable::FindInsideAlloc(const void* ptr, |
244 size_t max_size, | 271 size_t max_size, |
245 const void** object_ptr, | 272 const void** object_ptr, |
246 size_t* object_size) const { | 273 size_t* object_size) const { |
247 const AllocValue* alloc_value = | 274 const AllocValue* alloc_value = |
248 allocation_->FindInside(&AllocValueSize, max_size, ptr, object_ptr); | 275 alloc_address_map_->FindInside(&AllocValueSize, max_size, ptr, object_ptr); |
249 if (alloc_value != NULL) *object_size = alloc_value->bytes; | 276 if (alloc_value != NULL) *object_size = alloc_value->bytes; |
250 return alloc_value != NULL; | 277 return alloc_value != NULL; |
251 } | 278 } |
252 | 279 |
253 bool HeapProfileTable::MarkAsLive(const void* ptr) { | 280 bool HeapProfileTable::MarkAsLive(const void* ptr) { |
254 AllocValue* alloc = allocation_->FindMutable(ptr); | 281 AllocValue* alloc = alloc_address_map_->FindMutable(ptr); |
255 if (alloc && !alloc->live()) { | 282 if (alloc && !alloc->live()) { |
256 alloc->set_live(true); | 283 alloc->set_live(true); |
257 return true; | 284 return true; |
258 } | 285 } |
259 return false; | 286 return false; |
260 } | 287 } |
261 | 288 |
262 void HeapProfileTable::MarkAsIgnored(const void* ptr) { | 289 void HeapProfileTable::MarkAsIgnored(const void* ptr) { |
263 AllocValue* alloc = allocation_->FindMutable(ptr); | 290 AllocValue* alloc = alloc_address_map_->FindMutable(ptr); |
264 if (alloc) { | 291 if (alloc) { |
265 alloc->set_ignore(true); | 292 alloc->set_ignore(true); |
266 } | 293 } |
267 } | 294 } |
268 | 295 |
269 // We'd be happier using snprintfer, but we don't to reduce dependencies. | 296 // We'd be happier using snprintfer, but we don't to reduce dependencies. |
270 int HeapProfileTable::UnparseBucket(const Bucket& b, | 297 int HeapProfileTable::UnparseBucket(const Bucket& b, |
271 char* buf, int buflen, int bufsize, | 298 char* buf, int buflen, int bufsize, |
272 const char* extra, | 299 const char* extra, |
273 Stats* profile_stats) { | 300 Stats* profile_stats) { |
274 if (profile_stats != NULL) { | 301 if (profile_stats != NULL) { |
275 profile_stats->allocs += b.allocs; | 302 profile_stats->allocs += b.allocs; |
276 profile_stats->alloc_size += b.alloc_size; | 303 profile_stats->alloc_size += b.alloc_size; |
277 profile_stats->frees += b.frees; | 304 profile_stats->frees += b.frees; |
278 profile_stats->free_size += b.free_size; | 305 profile_stats->free_size += b.free_size; |
279 } | 306 } |
280 int printed = | 307 int printed = snprintf(buf + buflen, bufsize - buflen, |
281 snprintf(buf + buflen, bufsize - buflen, "%6d: %8"PRId64" [%6d: %8"PRId64"]
@%s", | 308 "%6d: %8"PRId64" [%6d: %8"PRId64"] @%s", |
282 b.allocs - b.frees, | 309 b.allocs - b.frees, b.alloc_size - b.free_size, |
283 b.alloc_size - b.free_size, | 310 b.allocs, b.alloc_size, extra); |
284 b.allocs, | |
285 b.alloc_size, | |
286 extra); | |
287 // If it looks like the snprintf failed, ignore the fact we printed anything | 311 // If it looks like the snprintf failed, ignore the fact we printed anything |
288 if (printed < 0 || printed >= bufsize - buflen) return buflen; | 312 if (printed < 0 || printed >= bufsize - buflen) return buflen; |
289 buflen += printed; | 313 buflen += printed; |
290 for (int d = 0; d < b.depth; d++) { | 314 for (int d = 0; d < b.depth; d++) { |
291 printed = snprintf(buf + buflen, bufsize - buflen, " 0x%08" PRIxPTR, | 315 printed = snprintf(buf + buflen, bufsize - buflen, " 0x%08" PRIxPTR, |
292 reinterpret_cast<uintptr_t>(b.stack[d])); | 316 reinterpret_cast<uintptr_t>(b.stack[d])); |
293 if (printed < 0 || printed >= bufsize - buflen) return buflen; | 317 if (printed < 0 || printed >= bufsize - buflen) return buflen; |
294 buflen += printed; | 318 buflen += printed; |
295 } | 319 } |
296 printed = snprintf(buf + buflen, bufsize - buflen, "\n"); | 320 printed = snprintf(buf + buflen, bufsize - buflen, "\n"); |
297 if (printed < 0 || printed >= bufsize - buflen) return buflen; | 321 if (printed < 0 || printed >= bufsize - buflen) return buflen; |
298 buflen += printed; | 322 buflen += printed; |
299 return buflen; | 323 return buflen; |
300 } | 324 } |
301 | 325 |
302 HeapProfileTable::Bucket** | 326 HeapProfileTable::Bucket** |
303 HeapProfileTable::MakeSortedBucketList() const { | 327 HeapProfileTable::MakeSortedBucketList() const { |
304 Bucket** list = | 328 Bucket** list = reinterpret_cast<Bucket**>(alloc_(sizeof(Bucket) * |
305 reinterpret_cast<Bucket**>(alloc_(sizeof(Bucket) * num_buckets_)); | 329 (num_alloc_buckets_ + num_available_mmap_buckets_))); |
| 330 |
| 331 RAW_DCHECK(mmap_table_ != NULL || num_available_mmap_buckets_ == 0, ""); |
306 | 332 |
307 int n = 0; | 333 int n = 0; |
| 334 |
308 for (int b = 0; b < kHashTableSize; b++) { | 335 for (int b = 0; b < kHashTableSize; b++) { |
309 for (Bucket* x = table_[b]; x != 0; x = x->next) { | 336 for (Bucket* x = alloc_table_[b]; x != 0; x = x->next) { |
310 list[n++] = x; | 337 list[n++] = x; |
311 } | 338 } |
312 } | 339 } |
313 RAW_DCHECK(n == num_buckets_, ""); | 340 RAW_DCHECK(n == num_alloc_buckets_, ""); |
314 | 341 |
315 sort(list, list + num_buckets_, ByAllocatedSpace); | 342 if (mmap_table_ != NULL) { |
| 343 for (int b = 0; b < kHashTableSize; b++) { |
| 344 for (Bucket* x = mmap_table_[b]; x != 0; x = x->next) { |
| 345 list[n++] = x; |
| 346 } |
| 347 } |
| 348 } |
| 349 RAW_DCHECK(n == num_alloc_buckets_ + num_available_mmap_buckets_, ""); |
| 350 |
| 351 sort(list, list + num_alloc_buckets_ + num_available_mmap_buckets_, |
| 352 ByAllocatedSpace); |
316 | 353 |
317 return list; | 354 return list; |
318 } | 355 } |
319 | 356 |
| 357 void HeapProfileTable::RefreshMMapData() { |
| 358 // Make the table |
| 359 static const int mmap_table_bytes = kHashTableSize * sizeof(*mmap_table_); |
| 360 if (mmap_table_ == NULL) { |
| 361 mmap_table_ = reinterpret_cast<Bucket**>(alloc_(mmap_table_bytes)); |
| 362 memset(mmap_table_, 0, mmap_table_bytes); |
| 363 } |
| 364 num_available_mmap_buckets_ = 0; |
| 365 |
| 366 ClearMMapData(); |
| 367 mmap_address_map_ = |
| 368 new(alloc_(sizeof(AllocationMap))) AllocationMap(alloc_, dealloc_); |
| 369 |
| 370 MemoryRegionMap::LockHolder l; |
| 371 for (MemoryRegionMap::RegionIterator r = |
| 372 MemoryRegionMap::BeginRegionLocked(); |
| 373 r != MemoryRegionMap::EndRegionLocked(); ++r) { |
| 374 Bucket* b = |
| 375 GetBucket(r->call_stack_depth, r->call_stack, mmap_table_, NULL); |
| 376 if (b->alloc_size == 0) { |
| 377 num_available_mmap_buckets_ += 1; |
| 378 } |
| 379 b->allocs += 1; |
| 380 b->alloc_size += r->end_addr - r->start_addr; |
| 381 |
| 382 AllocValue v; |
| 383 v.set_bucket(b); |
| 384 v.bytes = r->end_addr - r->start_addr; |
| 385 mmap_address_map_->Insert(reinterpret_cast<const void*>(r->start_addr), v); |
| 386 } |
| 387 } |
| 388 |
| 389 void HeapProfileTable::ClearMMapData() { |
| 390 if (mmap_address_map_ != NULL) { |
| 391 mmap_address_map_->Iterate(ZeroBucketCountsIterator, this); |
| 392 mmap_address_map_->~AllocationMap(); |
| 393 dealloc_(mmap_address_map_); |
| 394 mmap_address_map_ = NULL; |
| 395 } |
| 396 } |
| 397 |
320 void HeapProfileTable::IterateOrderedAllocContexts( | 398 void HeapProfileTable::IterateOrderedAllocContexts( |
321 AllocContextIterator callback) const { | 399 AllocContextIterator callback) const { |
322 Bucket** list = MakeSortedBucketList(); | 400 Bucket** list = MakeSortedBucketList(); |
323 AllocContextInfo info; | 401 AllocContextInfo info; |
324 for (int i = 0; i < num_buckets_; ++i) { | 402 for (int i = 0; i < num_alloc_buckets_; ++i) { |
325 *static_cast<Stats*>(&info) = *static_cast<Stats*>(list[i]); | 403 *static_cast<Stats*>(&info) = *static_cast<Stats*>(list[i]); |
326 info.stack_depth = list[i]->depth; | 404 info.stack_depth = list[i]->depth; |
327 info.call_stack = list[i]->stack; | 405 info.call_stack = list[i]->stack; |
328 callback(info); | 406 callback(info); |
329 } | 407 } |
330 dealloc_(list); | 408 dealloc_(list); |
331 } | 409 } |
332 | 410 |
333 int HeapProfileTable::FillOrderedProfile(char buf[], int size) const { | 411 int HeapProfileTable::FillOrderedProfile(char buf[], int size) const { |
334 Bucket** list = MakeSortedBucketList(); | 412 Bucket** list = MakeSortedBucketList(); |
(...skipping 11 matching lines...) Expand all Loading... |
346 map_length += FillProcSelfMaps(buf + map_length, size - map_length, &dummy); | 424 map_length += FillProcSelfMaps(buf + map_length, size - map_length, &dummy); |
347 RAW_DCHECK(map_length <= size, ""); | 425 RAW_DCHECK(map_length <= size, ""); |
348 char* const map_start = buf + size - map_length; // move to end | 426 char* const map_start = buf + size - map_length; // move to end |
349 memmove(map_start, buf, map_length); | 427 memmove(map_start, buf, map_length); |
350 size -= map_length; | 428 size -= map_length; |
351 | 429 |
352 Stats stats; | 430 Stats stats; |
353 memset(&stats, 0, sizeof(stats)); | 431 memset(&stats, 0, sizeof(stats)); |
354 int bucket_length = snprintf(buf, size, "%s", kProfileHeader); | 432 int bucket_length = snprintf(buf, size, "%s", kProfileHeader); |
355 if (bucket_length < 0 || bucket_length >= size) return 0; | 433 if (bucket_length < 0 || bucket_length >= size) return 0; |
356 bucket_length = UnparseBucket(total_, buf, bucket_length, size, | 434 Bucket total_with_mmap(total_); |
| 435 if (mmap_table_ != NULL) { |
| 436 total_with_mmap.alloc_size += MemoryRegionMap::MapSize(); |
| 437 total_with_mmap.free_size += MemoryRegionMap::UnmapSize(); |
| 438 } |
| 439 bucket_length = UnparseBucket(total_with_mmap, buf, bucket_length, size, |
357 " heapprofile", &stats); | 440 " heapprofile", &stats); |
358 for (int i = 0; i < num_buckets_; i++) { | 441 for (int i = 0; i < num_alloc_buckets_; i++) { |
359 bucket_length = UnparseBucket(*list[i], buf, bucket_length, size, "", | 442 bucket_length = UnparseBucket(*list[i], buf, bucket_length, size, "", |
360 &stats); | 443 &stats); |
361 } | 444 } |
362 RAW_DCHECK(bucket_length < size, ""); | 445 RAW_DCHECK(bucket_length < size, ""); |
363 | 446 |
364 dealloc_(list); | 447 dealloc_(list); |
365 | 448 |
366 RAW_DCHECK(buf + bucket_length <= map_start, ""); | 449 RAW_DCHECK(buf + bucket_length <= map_start, ""); |
367 memmove(buf + bucket_length, map_start, map_length); // close the gap | 450 memmove(buf + bucket_length, map_start, map_length); // close the gap |
368 | 451 |
(...skipping 14 matching lines...) Expand all Loading... |
383 memset(&b, 0, sizeof(b)); | 466 memset(&b, 0, sizeof(b)); |
384 b.allocs = 1; | 467 b.allocs = 1; |
385 b.alloc_size = v->bytes; | 468 b.alloc_size = v->bytes; |
386 b.depth = v->bucket()->depth; | 469 b.depth = v->bucket()->depth; |
387 b.stack = v->bucket()->stack; | 470 b.stack = v->bucket()->stack; |
388 char buf[1024]; | 471 char buf[1024]; |
389 int len = UnparseBucket(b, buf, 0, sizeof(buf), "", args.profile_stats); | 472 int len = UnparseBucket(b, buf, 0, sizeof(buf), "", args.profile_stats); |
390 RawWrite(args.fd, buf, len); | 473 RawWrite(args.fd, buf, len); |
391 } | 474 } |
392 | 475 |
| 476 inline void HeapProfileTable::ZeroBucketCountsIterator( |
| 477 const void* ptr, AllocValue* v, HeapProfileTable* heap_profile) { |
| 478 Bucket* b = v->bucket(); |
| 479 if (b != NULL) { |
| 480 b->allocs = 0; |
| 481 b->alloc_size = 0; |
| 482 b->free_size = 0; |
| 483 b->frees = 0; |
| 484 } |
| 485 } |
| 486 |
393 // Callback from NonLiveSnapshot; adds entry to arg->dest | 487 // Callback from NonLiveSnapshot; adds entry to arg->dest |
394 // if not the entry is not live and is not present in arg->base. | 488 // if not the entry is not live and is not present in arg->base. |
395 void HeapProfileTable::AddIfNonLive(const void* ptr, AllocValue* v, | 489 void HeapProfileTable::AddIfNonLive(const void* ptr, AllocValue* v, |
396 AddNonLiveArgs* arg) { | 490 AddNonLiveArgs* arg) { |
397 if (v->live()) { | 491 if (v->live()) { |
398 v->set_live(false); | 492 v->set_live(false); |
399 } else { | 493 } else { |
400 if (arg->base != NULL && arg->base->map_.Find(ptr) != NULL) { | 494 if (arg->base != NULL && arg->base->map_.Find(ptr) != NULL) { |
401 // Present in arg->base, so do not save | 495 // Present in arg->base, so do not save |
402 } else { | 496 } else { |
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
450 } | 544 } |
451 } | 545 } |
452 globfree(&g); | 546 globfree(&g); |
453 #else /* HAVE_GLOB_H */ | 547 #else /* HAVE_GLOB_H */ |
454 RAW_LOG(WARNING, "Unable to remove old heap profiles (can't run glob())"); | 548 RAW_LOG(WARNING, "Unable to remove old heap profiles (can't run glob())"); |
455 #endif | 549 #endif |
456 } | 550 } |
457 | 551 |
458 HeapProfileTable::Snapshot* HeapProfileTable::TakeSnapshot() { | 552 HeapProfileTable::Snapshot* HeapProfileTable::TakeSnapshot() { |
459 Snapshot* s = new (alloc_(sizeof(Snapshot))) Snapshot(alloc_, dealloc_); | 553 Snapshot* s = new (alloc_(sizeof(Snapshot))) Snapshot(alloc_, dealloc_); |
460 allocation_->Iterate(AddToSnapshot, s); | 554 alloc_address_map_->Iterate(AddToSnapshot, s); |
461 return s; | 555 return s; |
462 } | 556 } |
463 | 557 |
464 void HeapProfileTable::ReleaseSnapshot(Snapshot* s) { | 558 void HeapProfileTable::ReleaseSnapshot(Snapshot* s) { |
465 s->~Snapshot(); | 559 s->~Snapshot(); |
466 dealloc_(s); | 560 dealloc_(s); |
467 } | 561 } |
468 | 562 |
469 // Callback from TakeSnapshot; adds a single entry to snapshot | 563 // Callback from TakeSnapshot; adds a single entry to snapshot |
470 void HeapProfileTable::AddToSnapshot(const void* ptr, AllocValue* v, | 564 void HeapProfileTable::AddToSnapshot(const void* ptr, AllocValue* v, |
471 Snapshot* snapshot) { | 565 Snapshot* snapshot) { |
472 snapshot->Add(ptr, *v); | 566 snapshot->Add(ptr, *v); |
473 } | 567 } |
474 | 568 |
475 HeapProfileTable::Snapshot* HeapProfileTable::NonLiveSnapshot( | 569 HeapProfileTable::Snapshot* HeapProfileTable::NonLiveSnapshot( |
476 Snapshot* base) { | 570 Snapshot* base) { |
477 RAW_VLOG(2, "NonLiveSnapshot input: %d %d\n", | 571 RAW_VLOG(2, "NonLiveSnapshot input: %d %d\n", |
478 int(total_.allocs - total_.frees), | 572 int(total_.allocs - total_.frees), |
479 int(total_.alloc_size - total_.free_size)); | 573 int(total_.alloc_size - total_.free_size)); |
480 | 574 |
481 Snapshot* s = new (alloc_(sizeof(Snapshot))) Snapshot(alloc_, dealloc_); | 575 Snapshot* s = new (alloc_(sizeof(Snapshot))) Snapshot(alloc_, dealloc_); |
482 AddNonLiveArgs args; | 576 AddNonLiveArgs args; |
483 args.dest = s; | 577 args.dest = s; |
484 args.base = base; | 578 args.base = base; |
485 allocation_->Iterate<AddNonLiveArgs*>(AddIfNonLive, &args); | 579 alloc_address_map_->Iterate<AddNonLiveArgs*>(AddIfNonLive, &args); |
486 RAW_VLOG(2, "NonLiveSnapshot output: %d %d\n", | 580 RAW_VLOG(2, "NonLiveSnapshot output: %d %d\n", |
487 int(s->total_.allocs - s->total_.frees), | 581 int(s->total_.allocs - s->total_.frees), |
488 int(s->total_.alloc_size - s->total_.free_size)); | 582 int(s->total_.alloc_size - s->total_.free_size)); |
489 return s; | 583 return s; |
490 } | 584 } |
491 | 585 |
492 // Information kept per unique bucket seen | 586 // Information kept per unique bucket seen |
493 struct HeapProfileTable::Snapshot::Entry { | 587 struct HeapProfileTable::Snapshot::Entry { |
494 int count; | 588 int count; |
495 int bytes; | 589 int bytes; |
(...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
595 char* unused) { | 689 char* unused) { |
596 // Perhaps also log the allocation stack trace (unsymbolized) | 690 // Perhaps also log the allocation stack trace (unsymbolized) |
597 // on this line in case somebody finds it useful. | 691 // on this line in case somebody finds it useful. |
598 RAW_LOG(ERROR, "leaked %"PRIuS" byte object %p", v->bytes, ptr); | 692 RAW_LOG(ERROR, "leaked %"PRIuS" byte object %p", v->bytes, ptr); |
599 } | 693 } |
600 | 694 |
601 void HeapProfileTable::Snapshot::ReportIndividualObjects() { | 695 void HeapProfileTable::Snapshot::ReportIndividualObjects() { |
602 char unused; | 696 char unused; |
603 map_.Iterate(ReportObject, &unused); | 697 map_.Iterate(ReportObject, &unused); |
604 } | 698 } |
OLD | NEW |