OLD | NEW |
1 // Copyright (c) 2006, Google Inc. | 1 // Copyright (c) 2006, Google Inc. |
2 // All rights reserved. | 2 // All rights reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
95 #if defined(TYPE_PROFILING) | 95 #if defined(TYPE_PROFILING) |
96 static const char kTypeProfileStatsHeader[] = "type statistics:\n"; | 96 static const char kTypeProfileStatsHeader[] = "type statistics:\n"; |
97 #endif // defined(TYPE_PROFILING) | 97 #endif // defined(TYPE_PROFILING) |
98 | 98 |
99 //---------------------------------------------------------------------- | 99 //---------------------------------------------------------------------- |
100 | 100 |
101 const char HeapProfileTable::kFileExt[] = ".heap"; | 101 const char HeapProfileTable::kFileExt[] = ".heap"; |
102 | 102 |
103 //---------------------------------------------------------------------- | 103 //---------------------------------------------------------------------- |
104 | 104 |
105 // Size for alloc_table_ and mmap_table_. | 105 static const int kHashTableSize = 179999; // Size for bucket_table_. |
106 static const int kHashTableSize = 179999; | |
107 /*static*/ const int HeapProfileTable::kMaxStackDepth; | 106 /*static*/ const int HeapProfileTable::kMaxStackDepth; |
108 | 107 |
109 //---------------------------------------------------------------------- | 108 //---------------------------------------------------------------------- |
110 | 109 |
111 // We strip out different number of stack frames in debug mode | 110 // We strip out different number of stack frames in debug mode |
112 // because less inlining happens in that case | 111 // because less inlining happens in that case |
113 #ifdef NDEBUG | 112 #ifdef NDEBUG |
114 static const int kStripFrames = 2; | 113 static const int kStripFrames = 2; |
115 #else | 114 #else |
116 static const int kStripFrames = 3; | 115 static const int kStripFrames = 3; |
117 #endif | 116 #endif |
118 | 117 |
119 // For sorting Stats or Buckets by in-use space | 118 // For sorting Stats or Buckets by in-use space |
120 static bool ByAllocatedSpace(HeapProfileTable::Stats* a, | 119 static bool ByAllocatedSpace(HeapProfileTable::Stats* a, |
121 HeapProfileTable::Stats* b) { | 120 HeapProfileTable::Stats* b) { |
122 // Return true iff "a" has more allocated space than "b" | 121 // Return true iff "a" has more allocated space than "b" |
123 return (a->alloc_size - a->free_size) > (b->alloc_size - b->free_size); | 122 return (a->alloc_size - a->free_size) > (b->alloc_size - b->free_size); |
124 } | 123 } |
125 | 124 |
126 //---------------------------------------------------------------------- | 125 //---------------------------------------------------------------------- |
127 | 126 |
128 HeapProfileTable::HeapProfileTable(Allocator alloc, DeAllocator dealloc) | 127 HeapProfileTable::HeapProfileTable(Allocator alloc, |
| 128 DeAllocator dealloc, |
| 129 bool profile_mmap) |
129 : alloc_(alloc), | 130 : alloc_(alloc), |
130 dealloc_(dealloc), | 131 dealloc_(dealloc), |
131 num_alloc_buckets_(0), | 132 bucket_table_(NULL), |
132 mmap_table_(NULL), | 133 profile_mmap_(profile_mmap), |
133 num_available_mmap_buckets_(0), | 134 num_buckets_(0), |
134 mmap_address_map_(NULL) { | 135 address_map_(NULL) { |
135 // Initialize the overall profile stats. | 136 // Make a hash table for buckets. |
| 137 const int table_bytes = kHashTableSize * sizeof(*bucket_table_); |
| 138 bucket_table_ = static_cast<Bucket**>(alloc_(table_bytes)); |
| 139 memset(bucket_table_, 0, table_bytes); |
| 140 |
| 141 // Make an allocation map. |
| 142 address_map_ = |
| 143 new(alloc_(sizeof(AllocationMap))) AllocationMap(alloc_, dealloc_); |
| 144 |
| 145 // Initialize. |
136 memset(&total_, 0, sizeof(total_)); | 146 memset(&total_, 0, sizeof(total_)); |
137 | 147 num_buckets_ = 0; |
138 // Make the malloc table. | |
139 const int alloc_table_bytes = kHashTableSize * sizeof(*alloc_table_); | |
140 alloc_table_ = reinterpret_cast<Bucket**>(alloc_(alloc_table_bytes)); | |
141 memset(alloc_table_, 0, alloc_table_bytes); | |
142 | |
143 // Make malloc and mmap allocation maps. | |
144 alloc_address_map_ = | |
145 new(alloc_(sizeof(AllocationMap))) AllocationMap(alloc_, dealloc_); | |
146 } | 148 } |
147 | 149 |
148 HeapProfileTable::~HeapProfileTable() { | 150 HeapProfileTable::~HeapProfileTable() { |
149 DeallocateBucketTable(alloc_table_); | 151 // Free the allocation map. |
150 alloc_table_ = NULL; | 152 address_map_->~AllocationMap(); |
151 DeallocateBucketTable(mmap_table_); | 153 dealloc_(address_map_); |
152 mmap_table_ = NULL; | 154 address_map_ = NULL; |
153 DeallocateAllocationMap(alloc_address_map_); | 155 |
154 alloc_address_map_ = NULL; | 156 // Free the hash table. |
155 DeallocateAllocationMap(mmap_address_map_); | 157 for (int i = 0; i < kHashTableSize; i++) { |
156 mmap_address_map_ = NULL; | 158 for (Bucket* curr = bucket_table_[i]; curr != 0; /**/) { |
| 159 Bucket* bucket = curr; |
| 160 curr = curr->next; |
| 161 dealloc_(bucket->stack); |
| 162 dealloc_(bucket); |
| 163 } |
| 164 } |
| 165 dealloc_(bucket_table_); |
| 166 bucket_table_ = NULL; |
157 } | 167 } |
158 | 168 |
159 void HeapProfileTable::DeallocateAllocationMap(AllocationMap* allocation) { | 169 HeapProfileTable::Bucket* HeapProfileTable::GetBucket(int depth, |
160 if (allocation != NULL) { | 170 const void* const key[]) { |
161 alloc_address_map_->~AllocationMap(); | |
162 dealloc_(allocation); | |
163 } | |
164 } | |
165 | |
166 void HeapProfileTable::DeallocateBucketTable(Bucket** table) { | |
167 if (table != NULL) { | |
168 for (int b = 0; b < kHashTableSize; b++) { | |
169 for (Bucket* x = table[b]; x != 0; /**/) { | |
170 Bucket* b = x; | |
171 x = x->next; | |
172 dealloc_(b->stack); | |
173 dealloc_(b); | |
174 } | |
175 } | |
176 dealloc_(table); | |
177 } | |
178 } | |
179 | |
180 HeapProfileTable::Bucket* HeapProfileTable::GetBucket( | |
181 int depth, const void* const key[], Bucket** table, | |
182 int* bucket_count) { | |
183 // Make hash-value | 171 // Make hash-value |
184 uintptr_t h = 0; | 172 uintptr_t h = 0; |
185 for (int i = 0; i < depth; i++) { | 173 for (int i = 0; i < depth; i++) { |
186 h += reinterpret_cast<uintptr_t>(key[i]); | 174 h += reinterpret_cast<uintptr_t>(key[i]); |
187 h += h << 10; | 175 h += h << 10; |
188 h ^= h >> 6; | 176 h ^= h >> 6; |
189 } | 177 } |
190 h += h << 3; | 178 h += h << 3; |
191 h ^= h >> 11; | 179 h ^= h >> 11; |
192 | 180 |
193 // Lookup stack trace in table | 181 // Lookup stack trace in table |
194 unsigned int buck = ((unsigned int) h) % kHashTableSize; | 182 unsigned int buck = ((unsigned int) h) % kHashTableSize; |
195 for (Bucket* b = table[buck]; b != 0; b = b->next) { | 183 for (Bucket* b = bucket_table_[buck]; b != 0; b = b->next) { |
196 if ((b->hash == h) && | 184 if ((b->hash == h) && |
197 (b->depth == depth) && | 185 (b->depth == depth) && |
198 equal(key, key + depth, b->stack)) { | 186 equal(key, key + depth, b->stack)) { |
199 return b; | 187 return b; |
200 } | 188 } |
201 } | 189 } |
202 | 190 |
203 // Create new bucket | 191 // Create new bucket |
204 const size_t key_size = sizeof(key[0]) * depth; | 192 const size_t key_size = sizeof(key[0]) * depth; |
205 const void** kcopy = reinterpret_cast<const void**>(alloc_(key_size)); | 193 const void** kcopy = reinterpret_cast<const void**>(alloc_(key_size)); |
206 copy(key, key + depth, kcopy); | 194 copy(key, key + depth, kcopy); |
207 Bucket* b = reinterpret_cast<Bucket*>(alloc_(sizeof(Bucket))); | 195 Bucket* b = reinterpret_cast<Bucket*>(alloc_(sizeof(Bucket))); |
208 memset(b, 0, sizeof(*b)); | 196 memset(b, 0, sizeof(*b)); |
209 b->hash = h; | 197 b->hash = h; |
210 b->depth = depth; | 198 b->depth = depth; |
211 b->stack = kcopy; | 199 b->stack = kcopy; |
212 b->next = table[buck]; | 200 b->next = bucket_table_[buck]; |
213 table[buck] = b; | 201 bucket_table_[buck] = b; |
214 if (bucket_count != NULL) { | 202 num_buckets_++; |
215 ++(*bucket_count); | |
216 } | |
217 return b; | 203 return b; |
218 } | 204 } |
219 | 205 |
220 int HeapProfileTable::GetCallerStackTrace( | 206 int HeapProfileTable::GetCallerStackTrace( |
221 int skip_count, void* stack[kMaxStackDepth]) { | 207 int skip_count, void* stack[kMaxStackDepth]) { |
222 return MallocHook::GetCallerStackTrace( | 208 return MallocHook::GetCallerStackTrace( |
223 stack, kMaxStackDepth, kStripFrames + skip_count + 1); | 209 stack, kMaxStackDepth, kStripFrames + skip_count + 1); |
224 } | 210 } |
225 | 211 |
226 void HeapProfileTable::RecordAlloc( | 212 void HeapProfileTable::RecordAlloc( |
227 const void* ptr, size_t bytes, int stack_depth, | 213 const void* ptr, size_t bytes, int stack_depth, |
228 const void* const call_stack[]) { | 214 const void* const call_stack[]) { |
229 Bucket* b = GetBucket(stack_depth, call_stack, alloc_table_, | 215 Bucket* b = GetBucket(stack_depth, call_stack); |
230 &num_alloc_buckets_); | |
231 b->allocs++; | 216 b->allocs++; |
232 b->alloc_size += bytes; | 217 b->alloc_size += bytes; |
233 total_.allocs++; | 218 total_.allocs++; |
234 total_.alloc_size += bytes; | 219 total_.alloc_size += bytes; |
235 | 220 |
236 AllocValue v; | 221 AllocValue v; |
237 v.set_bucket(b); // also did set_live(false); set_ignore(false) | 222 v.set_bucket(b); // also did set_live(false); set_ignore(false) |
238 v.bytes = bytes; | 223 v.bytes = bytes; |
239 alloc_address_map_->Insert(ptr, v); | 224 address_map_->Insert(ptr, v); |
240 } | 225 } |
241 | 226 |
242 void HeapProfileTable::RecordFree(const void* ptr) { | 227 void HeapProfileTable::RecordFree(const void* ptr) { |
243 AllocValue v; | 228 AllocValue v; |
244 if (alloc_address_map_->FindAndRemove(ptr, &v)) { | 229 if (address_map_->FindAndRemove(ptr, &v)) { |
245 Bucket* b = v.bucket(); | 230 Bucket* b = v.bucket(); |
246 b->frees++; | 231 b->frees++; |
247 b->free_size += v.bytes; | 232 b->free_size += v.bytes; |
248 total_.frees++; | 233 total_.frees++; |
249 total_.free_size += v.bytes; | 234 total_.free_size += v.bytes; |
250 } | 235 } |
251 } | 236 } |
252 | 237 |
253 bool HeapProfileTable::FindAlloc(const void* ptr, size_t* object_size) const { | 238 bool HeapProfileTable::FindAlloc(const void* ptr, size_t* object_size) const { |
254 const AllocValue* alloc_value = alloc_address_map_->Find(ptr); | 239 const AllocValue* alloc_value = address_map_->Find(ptr); |
255 if (alloc_value != NULL) *object_size = alloc_value->bytes; | 240 if (alloc_value != NULL) *object_size = alloc_value->bytes; |
256 return alloc_value != NULL; | 241 return alloc_value != NULL; |
257 } | 242 } |
258 | 243 |
259 bool HeapProfileTable::FindAllocDetails(const void* ptr, | 244 bool HeapProfileTable::FindAllocDetails(const void* ptr, |
260 AllocInfo* info) const { | 245 AllocInfo* info) const { |
261 const AllocValue* alloc_value = alloc_address_map_->Find(ptr); | 246 const AllocValue* alloc_value = address_map_->Find(ptr); |
262 if (alloc_value != NULL) { | 247 if (alloc_value != NULL) { |
263 info->object_size = alloc_value->bytes; | 248 info->object_size = alloc_value->bytes; |
264 info->call_stack = alloc_value->bucket()->stack; | 249 info->call_stack = alloc_value->bucket()->stack; |
265 info->stack_depth = alloc_value->bucket()->depth; | 250 info->stack_depth = alloc_value->bucket()->depth; |
266 } | 251 } |
267 return alloc_value != NULL; | 252 return alloc_value != NULL; |
268 } | 253 } |
269 | 254 |
270 bool HeapProfileTable::FindInsideAlloc(const void* ptr, | 255 bool HeapProfileTable::FindInsideAlloc(const void* ptr, |
271 size_t max_size, | 256 size_t max_size, |
272 const void** object_ptr, | 257 const void** object_ptr, |
273 size_t* object_size) const { | 258 size_t* object_size) const { |
274 const AllocValue* alloc_value = | 259 const AllocValue* alloc_value = |
275 alloc_address_map_->FindInside(&AllocValueSize, max_size, ptr, object_ptr); | 260 address_map_->FindInside(&AllocValueSize, max_size, ptr, object_ptr); |
276 if (alloc_value != NULL) *object_size = alloc_value->bytes; | 261 if (alloc_value != NULL) *object_size = alloc_value->bytes; |
277 return alloc_value != NULL; | 262 return alloc_value != NULL; |
278 } | 263 } |
279 | 264 |
280 bool HeapProfileTable::MarkAsLive(const void* ptr) { | 265 bool HeapProfileTable::MarkAsLive(const void* ptr) { |
281 AllocValue* alloc = alloc_address_map_->FindMutable(ptr); | 266 AllocValue* alloc = address_map_->FindMutable(ptr); |
282 if (alloc && !alloc->live()) { | 267 if (alloc && !alloc->live()) { |
283 alloc->set_live(true); | 268 alloc->set_live(true); |
284 return true; | 269 return true; |
285 } | 270 } |
286 return false; | 271 return false; |
287 } | 272 } |
288 | 273 |
289 void HeapProfileTable::MarkAsIgnored(const void* ptr) { | 274 void HeapProfileTable::MarkAsIgnored(const void* ptr) { |
290 AllocValue* alloc = alloc_address_map_->FindMutable(ptr); | 275 AllocValue* alloc = address_map_->FindMutable(ptr); |
291 if (alloc) { | 276 if (alloc) { |
292 alloc->set_ignore(true); | 277 alloc->set_ignore(true); |
293 } | 278 } |
294 } | 279 } |
295 | 280 |
296 void HeapProfileTable::IterateAllocationAddresses(AddressIterator f, | 281 void HeapProfileTable::IterateAllocationAddresses(AddressIterator f, |
297 void* data) { | 282 void* data) { |
298 const AllocationAddressIteratorArgs args(f, data); | 283 const AllocationAddressIteratorArgs args(f, data); |
299 alloc_address_map_->Iterate<const AllocationAddressIteratorArgs&>( | 284 address_map_->Iterate<const AllocationAddressIteratorArgs&>( |
300 AllocationAddressesIterator, args); | 285 AllocationAddressesIterator, args); |
301 } | 286 } |
302 | 287 |
303 void HeapProfileTable::MarkCurrentAllocations(AllocationMark mark) { | 288 void HeapProfileTable::MarkCurrentAllocations(AllocationMark mark) { |
304 const MarkArgs args(mark, true); | 289 const MarkArgs args(mark, true); |
305 alloc_address_map_->Iterate<const MarkArgs&>(MarkIterator, args); | 290 address_map_->Iterate<const MarkArgs&>(MarkIterator, args); |
306 } | 291 } |
307 | 292 |
308 void HeapProfileTable::MarkUnmarkedAllocations(AllocationMark mark) { | 293 void HeapProfileTable::MarkUnmarkedAllocations(AllocationMark mark) { |
309 const MarkArgs args(mark, true); | 294 const MarkArgs args(mark, true); |
310 alloc_address_map_->Iterate<const MarkArgs&>(MarkIterator, args); | 295 address_map_->Iterate<const MarkArgs&>(MarkIterator, args); |
311 } | 296 } |
312 | 297 |
313 // We'd be happier using snprintfer, but we don't to reduce dependencies. | 298 // We'd be happier using snprintfer, but we don't to reduce dependencies. |
314 int HeapProfileTable::UnparseBucket(const Bucket& b, | 299 int HeapProfileTable::UnparseBucket(const Bucket& b, |
315 char* buf, int buflen, int bufsize, | 300 char* buf, int buflen, int bufsize, |
316 const char* extra, | 301 const char* extra, |
317 Stats* profile_stats) { | 302 Stats* profile_stats) { |
318 if (profile_stats != NULL) { | 303 if (profile_stats != NULL) { |
319 profile_stats->allocs += b.allocs; | 304 profile_stats->allocs += b.allocs; |
320 profile_stats->alloc_size += b.alloc_size; | 305 profile_stats->alloc_size += b.alloc_size; |
(...skipping 17 matching lines...) Expand all Loading... |
338 buflen += printed; | 323 buflen += printed; |
339 } | 324 } |
340 printed = snprintf(buf + buflen, bufsize - buflen, "\n"); | 325 printed = snprintf(buf + buflen, bufsize - buflen, "\n"); |
341 if (printed < 0 || printed >= bufsize - buflen) return buflen; | 326 if (printed < 0 || printed >= bufsize - buflen) return buflen; |
342 buflen += printed; | 327 buflen += printed; |
343 return buflen; | 328 return buflen; |
344 } | 329 } |
345 | 330 |
346 HeapProfileTable::Bucket** | 331 HeapProfileTable::Bucket** |
347 HeapProfileTable::MakeSortedBucketList() const { | 332 HeapProfileTable::MakeSortedBucketList() const { |
348 Bucket** list = reinterpret_cast<Bucket**>(alloc_(sizeof(Bucket) * | 333 Bucket** list = static_cast<Bucket**>(alloc_(sizeof(Bucket) * num_buckets_)); |
349 (num_alloc_buckets_ + num_available_mmap_buckets_))); | |
350 | 334 |
351 RAW_DCHECK(mmap_table_ != NULL || num_available_mmap_buckets_ == 0, ""); | 335 int bucket_count = 0; |
352 | 336 for (int i = 0; i < kHashTableSize; i++) { |
353 int n = 0; | 337 for (Bucket* curr = bucket_table_[i]; curr != 0; curr = curr->next) { |
354 | 338 list[bucket_count++] = curr; |
355 for (int b = 0; b < kHashTableSize; b++) { | |
356 for (Bucket* x = alloc_table_[b]; x != 0; x = x->next) { | |
357 list[n++] = x; | |
358 } | 339 } |
359 } | 340 } |
360 RAW_DCHECK(n == num_alloc_buckets_, ""); | 341 RAW_DCHECK(bucket_count == num_buckets_, ""); |
361 | 342 |
362 if (mmap_table_ != NULL) { | 343 sort(list, list + num_buckets_, ByAllocatedSpace); |
363 for (int b = 0; b < kHashTableSize; b++) { | |
364 for (Bucket* x = mmap_table_[b]; x != 0; x = x->next) { | |
365 list[n++] = x; | |
366 } | |
367 } | |
368 } | |
369 RAW_DCHECK(n == num_alloc_buckets_ + num_available_mmap_buckets_, ""); | |
370 | |
371 sort(list, list + num_alloc_buckets_ + num_available_mmap_buckets_, | |
372 ByAllocatedSpace); | |
373 | 344 |
374 return list; | 345 return list; |
375 } | 346 } |
376 | 347 |
377 void HeapProfileTable::RefreshMMapData(Allocator mmap_alloc, | |
378 DeAllocator mmap_dealloc) { | |
379 // Make the table | |
380 static const int mmap_table_bytes = kHashTableSize * sizeof(*mmap_table_); | |
381 if (mmap_table_ == NULL) { | |
382 mmap_table_ = reinterpret_cast<Bucket**>(alloc_(mmap_table_bytes)); | |
383 memset(mmap_table_, 0, mmap_table_bytes); | |
384 } | |
385 num_available_mmap_buckets_ = 0; | |
386 | |
387 ClearMMapData(); | |
388 mmap_address_map_ = new(alloc_(sizeof(AllocationMap))) | |
389 AllocationMap(mmap_alloc, mmap_dealloc); | |
390 | |
391 MemoryRegionMap::LockHolder l; | |
392 for (MemoryRegionMap::RegionIterator r = | |
393 MemoryRegionMap::BeginRegionLocked(); | |
394 r != MemoryRegionMap::EndRegionLocked(); ++r) { | |
395 Bucket* b = | |
396 GetBucket(r->call_stack_depth, r->call_stack, mmap_table_, NULL); | |
397 if (b->alloc_size == 0) { | |
398 num_available_mmap_buckets_ += 1; | |
399 } | |
400 b->allocs += 1; | |
401 b->alloc_size += r->end_addr - r->start_addr; | |
402 | |
403 AllocValue v; | |
404 v.set_bucket(b); | |
405 v.bytes = r->end_addr - r->start_addr; | |
406 mmap_address_map_->Insert(reinterpret_cast<const void*>(r->start_addr), v); | |
407 } | |
408 } | |
409 | |
410 void HeapProfileTable::ClearMMapData() { | |
411 if (mmap_address_map_ == NULL) return; | |
412 | |
413 mmap_address_map_->Iterate(ZeroBucketCountsIterator, this); | |
414 mmap_address_map_->~AllocationMap(); | |
415 dealloc_(mmap_address_map_); | |
416 mmap_address_map_ = NULL; | |
417 } | |
418 | |
419 void HeapProfileTable::DumpMarkedObjects(AllocationMark mark, | 348 void HeapProfileTable::DumpMarkedObjects(AllocationMark mark, |
420 const char* file_name) { | 349 const char* file_name) { |
421 RawFD fd = RawOpenForWriting(file_name); | 350 RawFD fd = RawOpenForWriting(file_name); |
422 if (fd == kIllegalRawFD) { | 351 if (fd == kIllegalRawFD) { |
423 RAW_LOG(ERROR, "Failed dumping live objects to %s", file_name); | 352 RAW_LOG(ERROR, "Failed dumping live objects to %s", file_name); |
424 return; | 353 return; |
425 } | 354 } |
426 const DumpMarkedArgs args(fd, mark); | 355 const DumpMarkedArgs args(fd, mark); |
427 alloc_address_map_->Iterate<const DumpMarkedArgs&>(DumpMarkedIterator, args); | 356 address_map_->Iterate<const DumpMarkedArgs&>(DumpMarkedIterator, args); |
428 RawClose(fd); | 357 RawClose(fd); |
429 } | 358 } |
430 | 359 |
431 #if defined(TYPE_PROFILING) | 360 #if defined(TYPE_PROFILING) |
432 void HeapProfileTable::DumpTypeStatistics(const char* file_name) const { | 361 void HeapProfileTable::DumpTypeStatistics(const char* file_name) const { |
433 RawFD fd = RawOpenForWriting(file_name); | 362 RawFD fd = RawOpenForWriting(file_name); |
434 if (fd == kIllegalRawFD) { | 363 if (fd == kIllegalRawFD) { |
435 RAW_LOG(ERROR, "Failed dumping type statistics to %s", file_name); | 364 RAW_LOG(ERROR, "Failed dumping type statistics to %s", file_name); |
436 return; | 365 return; |
437 } | 366 } |
438 | 367 |
439 AddressMap<TypeCount>* type_size_map; | 368 AddressMap<TypeCount>* type_size_map; |
440 type_size_map = new(alloc_(sizeof(AddressMap<TypeCount>))) | 369 type_size_map = new(alloc_(sizeof(AddressMap<TypeCount>))) |
441 AddressMap<TypeCount>(alloc_, dealloc_); | 370 AddressMap<TypeCount>(alloc_, dealloc_); |
442 alloc_address_map_->Iterate(TallyTypesItererator, type_size_map); | 371 address_map_->Iterate(TallyTypesItererator, type_size_map); |
443 | 372 |
444 RawWrite(fd, kTypeProfileStatsHeader, strlen(kTypeProfileStatsHeader)); | 373 RawWrite(fd, kTypeProfileStatsHeader, strlen(kTypeProfileStatsHeader)); |
445 const DumpArgs args(fd, NULL); | 374 const DumpArgs args(fd, NULL); |
446 type_size_map->Iterate<const DumpArgs&>(DumpTypesIterator, args); | 375 type_size_map->Iterate<const DumpArgs&>(DumpTypesIterator, args); |
447 RawClose(fd); | 376 RawClose(fd); |
448 | 377 |
449 type_size_map->~AddressMap<TypeCount>(); | 378 type_size_map->~AddressMap<TypeCount>(); |
450 dealloc_(type_size_map); | 379 dealloc_(type_size_map); |
451 } | 380 } |
452 #endif // defined(TYPE_PROFILING) | 381 #endif // defined(TYPE_PROFILING) |
453 | 382 |
454 void HeapProfileTable::IterateOrderedAllocContexts( | 383 void HeapProfileTable::IterateOrderedAllocContexts( |
455 AllocContextIterator callback) const { | 384 AllocContextIterator callback) const { |
456 Bucket** list = MakeSortedBucketList(); | 385 Bucket** list = MakeSortedBucketList(); |
457 AllocContextInfo info; | 386 AllocContextInfo info; |
458 for (int i = 0; i < num_alloc_buckets_; ++i) { | 387 for (int i = 0; i < num_buckets_; ++i) { |
459 *static_cast<Stats*>(&info) = *static_cast<Stats*>(list[i]); | 388 *static_cast<Stats*>(&info) = *static_cast<Stats*>(list[i]); |
460 info.stack_depth = list[i]->depth; | 389 info.stack_depth = list[i]->depth; |
461 info.call_stack = list[i]->stack; | 390 info.call_stack = list[i]->stack; |
462 callback(info); | 391 callback(info); |
463 } | 392 } |
464 dealloc_(list); | 393 dealloc_(list); |
465 } | 394 } |
466 | 395 |
467 int HeapProfileTable::FillOrderedProfile(char buf[], int size) const { | 396 int HeapProfileTable::FillOrderedProfile(char buf[], int size) const { |
468 Bucket** list = MakeSortedBucketList(); | 397 Bucket** list = MakeSortedBucketList(); |
(...skipping 11 matching lines...) Expand all Loading... |
480 map_length += FillProcSelfMaps(buf + map_length, size - map_length, &dummy); | 409 map_length += FillProcSelfMaps(buf + map_length, size - map_length, &dummy); |
481 RAW_DCHECK(map_length <= size, ""); | 410 RAW_DCHECK(map_length <= size, ""); |
482 char* const map_start = buf + size - map_length; // move to end | 411 char* const map_start = buf + size - map_length; // move to end |
483 memmove(map_start, buf, map_length); | 412 memmove(map_start, buf, map_length); |
484 size -= map_length; | 413 size -= map_length; |
485 | 414 |
486 Stats stats; | 415 Stats stats; |
487 memset(&stats, 0, sizeof(stats)); | 416 memset(&stats, 0, sizeof(stats)); |
488 int bucket_length = snprintf(buf, size, "%s", kProfileHeader); | 417 int bucket_length = snprintf(buf, size, "%s", kProfileHeader); |
489 if (bucket_length < 0 || bucket_length >= size) return 0; | 418 if (bucket_length < 0 || bucket_length >= size) return 0; |
490 Bucket total_with_mmap(total_); | 419 bucket_length = UnparseBucket(total_, buf, bucket_length, size, |
491 if (mmap_table_ != NULL) { | 420 " heapprofile", &stats); |
492 total_with_mmap.alloc_size += MemoryRegionMap::MapSize(); | 421 |
493 total_with_mmap.free_size += MemoryRegionMap::UnmapSize(); | 422 // Dump the mmap list first. |
| 423 if (profile_mmap_) { |
| 424 BufferArgs buffer(buf, bucket_length, size); |
| 425 MemoryRegionMap::IterateBuckets<BufferArgs*>(DumpBucketIterator, &buffer); |
| 426 bucket_length = buffer.buflen; |
494 } | 427 } |
495 bucket_length = UnparseBucket(total_with_mmap, buf, bucket_length, size, | 428 |
496 " heapprofile", &stats); | 429 for (int i = 0; i < num_buckets_; i++) { |
497 for (int i = 0; i < num_alloc_buckets_; i++) { | |
498 bucket_length = UnparseBucket(*list[i], buf, bucket_length, size, "", | 430 bucket_length = UnparseBucket(*list[i], buf, bucket_length, size, "", |
499 &stats); | 431 &stats); |
500 } | 432 } |
501 RAW_DCHECK(bucket_length < size, ""); | 433 RAW_DCHECK(bucket_length < size, ""); |
502 | 434 |
503 dealloc_(list); | 435 dealloc_(list); |
504 | 436 |
505 RAW_DCHECK(buf + bucket_length <= map_start, ""); | 437 RAW_DCHECK(buf + bucket_length <= map_start, ""); |
506 memmove(buf + bucket_length, map_start, map_length); // close the gap | 438 memmove(buf + bucket_length, map_start, map_length); // close the gap |
507 | 439 |
508 return bucket_length + map_length; | 440 return bucket_length + map_length; |
509 } | 441 } |
510 | 442 |
| 443 // static |
| 444 void HeapProfileTable::DumpBucketIterator(const Bucket* bucket, |
| 445 BufferArgs* args) { |
| 446 args->buflen = UnparseBucket(*bucket, args->buf, args->buflen, args->bufsize, |
| 447 "", NULL); |
| 448 } |
| 449 |
511 #if defined(TYPE_PROFILING) | 450 #if defined(TYPE_PROFILING) |
512 // static | 451 // static |
513 void HeapProfileTable::TallyTypesItererator( | 452 void HeapProfileTable::TallyTypesItererator( |
514 const void* ptr, | 453 const void* ptr, |
515 AllocValue* value, | 454 AllocValue* value, |
516 AddressMap<TypeCount>* type_size_map) { | 455 AddressMap<TypeCount>* type_size_map) { |
517 const std::type_info* type = LookupType(ptr); | 456 const std::type_info* type = LookupType(ptr); |
518 | 457 |
519 const void* key = NULL; | 458 const void* key = NULL; |
520 if (type) | 459 if (type) |
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
591 } | 530 } |
592 | 531 |
593 inline | 532 inline |
594 void HeapProfileTable::MarkIterator(const void* ptr, AllocValue* v, | 533 void HeapProfileTable::MarkIterator(const void* ptr, AllocValue* v, |
595 const MarkArgs& args) { | 534 const MarkArgs& args) { |
596 if (!args.mark_all && v->mark() != UNMARKED) | 535 if (!args.mark_all && v->mark() != UNMARKED) |
597 return; | 536 return; |
598 v->set_mark(args.mark); | 537 v->set_mark(args.mark); |
599 } | 538 } |
600 | 539 |
601 inline void HeapProfileTable::ZeroBucketCountsIterator( | |
602 const void* ptr, AllocValue* v, HeapProfileTable* heap_profile) { | |
603 Bucket* b = v->bucket(); | |
604 if (b != NULL) { | |
605 b->allocs = 0; | |
606 b->alloc_size = 0; | |
607 b->free_size = 0; | |
608 b->frees = 0; | |
609 } | |
610 } | |
611 | |
612 // Callback from NonLiveSnapshot; adds entry to arg->dest | 540 // Callback from NonLiveSnapshot; adds entry to arg->dest |
613 // if not the entry is not live and is not present in arg->base. | 541 // if not the entry is not live and is not present in arg->base. |
614 void HeapProfileTable::AddIfNonLive(const void* ptr, AllocValue* v, | 542 void HeapProfileTable::AddIfNonLive(const void* ptr, AllocValue* v, |
615 AddNonLiveArgs* arg) { | 543 AddNonLiveArgs* arg) { |
616 if (v->live()) { | 544 if (v->live()) { |
617 v->set_live(false); | 545 v->set_live(false); |
618 } else { | 546 } else { |
619 if (arg->base != NULL && arg->base->map_.Find(ptr) != NULL) { | 547 if (arg->base != NULL && arg->base->map_.Find(ptr) != NULL) { |
620 // Present in arg->base, so do not save | 548 // Present in arg->base, so do not save |
621 } else { | 549 } else { |
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
668 } | 596 } |
669 } | 597 } |
670 globfree(&g); | 598 globfree(&g); |
671 #else /* HAVE_GLOB_H */ | 599 #else /* HAVE_GLOB_H */ |
672 RAW_LOG(WARNING, "Unable to remove old heap profiles (can't run glob())"); | 600 RAW_LOG(WARNING, "Unable to remove old heap profiles (can't run glob())"); |
673 #endif | 601 #endif |
674 } | 602 } |
675 | 603 |
676 HeapProfileTable::Snapshot* HeapProfileTable::TakeSnapshot() { | 604 HeapProfileTable::Snapshot* HeapProfileTable::TakeSnapshot() { |
677 Snapshot* s = new (alloc_(sizeof(Snapshot))) Snapshot(alloc_, dealloc_); | 605 Snapshot* s = new (alloc_(sizeof(Snapshot))) Snapshot(alloc_, dealloc_); |
678 alloc_address_map_->Iterate(AddToSnapshot, s); | 606 address_map_->Iterate(AddToSnapshot, s); |
679 return s; | 607 return s; |
680 } | 608 } |
681 | 609 |
682 void HeapProfileTable::ReleaseSnapshot(Snapshot* s) { | 610 void HeapProfileTable::ReleaseSnapshot(Snapshot* s) { |
683 s->~Snapshot(); | 611 s->~Snapshot(); |
684 dealloc_(s); | 612 dealloc_(s); |
685 } | 613 } |
686 | 614 |
687 // Callback from TakeSnapshot; adds a single entry to snapshot | 615 // Callback from TakeSnapshot; adds a single entry to snapshot |
688 void HeapProfileTable::AddToSnapshot(const void* ptr, AllocValue* v, | 616 void HeapProfileTable::AddToSnapshot(const void* ptr, AllocValue* v, |
689 Snapshot* snapshot) { | 617 Snapshot* snapshot) { |
690 snapshot->Add(ptr, *v); | 618 snapshot->Add(ptr, *v); |
691 } | 619 } |
692 | 620 |
693 HeapProfileTable::Snapshot* HeapProfileTable::NonLiveSnapshot( | 621 HeapProfileTable::Snapshot* HeapProfileTable::NonLiveSnapshot( |
694 Snapshot* base) { | 622 Snapshot* base) { |
695 RAW_VLOG(2, "NonLiveSnapshot input: %d %d\n", | 623 RAW_VLOG(2, "NonLiveSnapshot input: %d %d\n", |
696 int(total_.allocs - total_.frees), | 624 int(total_.allocs - total_.frees), |
697 int(total_.alloc_size - total_.free_size)); | 625 int(total_.alloc_size - total_.free_size)); |
698 | 626 |
699 Snapshot* s = new (alloc_(sizeof(Snapshot))) Snapshot(alloc_, dealloc_); | 627 Snapshot* s = new (alloc_(sizeof(Snapshot))) Snapshot(alloc_, dealloc_); |
700 AddNonLiveArgs args; | 628 AddNonLiveArgs args; |
701 args.dest = s; | 629 args.dest = s; |
702 args.base = base; | 630 args.base = base; |
703 alloc_address_map_->Iterate<AddNonLiveArgs*>(AddIfNonLive, &args); | 631 address_map_->Iterate<AddNonLiveArgs*>(AddIfNonLive, &args); |
704 RAW_VLOG(2, "NonLiveSnapshot output: %d %d\n", | 632 RAW_VLOG(2, "NonLiveSnapshot output: %d %d\n", |
705 int(s->total_.allocs - s->total_.frees), | 633 int(s->total_.allocs - s->total_.frees), |
706 int(s->total_.alloc_size - s->total_.free_size)); | 634 int(s->total_.alloc_size - s->total_.free_size)); |
707 return s; | 635 return s; |
708 } | 636 } |
709 | 637 |
710 // Information kept per unique bucket seen | 638 // Information kept per unique bucket seen |
711 struct HeapProfileTable::Snapshot::Entry { | 639 struct HeapProfileTable::Snapshot::Entry { |
712 int count; | 640 int count; |
713 int bytes; | 641 int bytes; |
(...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
813 char* unused) { | 741 char* unused) { |
814 // Perhaps also log the allocation stack trace (unsymbolized) | 742 // Perhaps also log the allocation stack trace (unsymbolized) |
815 // on this line in case somebody finds it useful. | 743 // on this line in case somebody finds it useful. |
816 RAW_LOG(ERROR, "leaked %"PRIuS" byte object %p", v->bytes, ptr); | 744 RAW_LOG(ERROR, "leaked %"PRIuS" byte object %p", v->bytes, ptr); |
817 } | 745 } |
818 | 746 |
819 void HeapProfileTable::Snapshot::ReportIndividualObjects() { | 747 void HeapProfileTable::Snapshot::ReportIndividualObjects() { |
820 char unused; | 748 char unused; |
821 map_.Iterate(ReportObject, &unused); | 749 map_.Iterate(ReportObject, &unused); |
822 } | 750 } |
OLD | NEW |