OLD | NEW |
---|---|
1 // Copyright (c) 2006, Google Inc. | 1 // Copyright (c) 2006, Google Inc. |
2 // All rights reserved. | 2 // All rights reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
(...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
85 | 85 |
86 DEFINE_int32(heap_check_max_leaks, | 86 DEFINE_int32(heap_check_max_leaks, |
87 EnvToInt("HEAP_CHECK_MAX_LEAKS", 20), | 87 EnvToInt("HEAP_CHECK_MAX_LEAKS", 20), |
88 "The maximum number of leak reports to print."); | 88 "The maximum number of leak reports to print."); |
89 | 89 |
90 //---------------------------------------------------------------------- | 90 //---------------------------------------------------------------------- |
91 | 91 |
92 // header of the dumped heap profile | 92 // header of the dumped heap profile |
93 static const char kProfileHeader[] = "heap profile: "; | 93 static const char kProfileHeader[] = "heap profile: "; |
94 static const char kProcSelfMapsHeader[] = "\nMAPPED_LIBRARIES:\n"; | 94 static const char kProcSelfMapsHeader[] = "\nMAPPED_LIBRARIES:\n"; |
95 #if defined(PROFILING_ALLOCATED_TYPE) | |
96 static const char kAllocatedTypeStatsHeader[] = "allocated type statistics:\n"; | |
jar (doing other things)
2012/08/20 21:59:09
nit: globals are prefixed by g_
Dai Mikurube (NOT FULLTIME)
2012/08/21 04:45:44
It follows the other "static const" variables in t
| |
97 #endif // PROFILING_ALLOCATED_TYPE | |
95 | 98 |
96 //---------------------------------------------------------------------- | 99 //---------------------------------------------------------------------- |
97 | 100 |
98 const char HeapProfileTable::kFileExt[] = ".heap"; | 101 const char HeapProfileTable::kFileExt[] = ".heap"; |
99 | 102 |
100 //---------------------------------------------------------------------- | 103 //---------------------------------------------------------------------- |
101 | 104 |
102 // Size for alloc_table_ and mmap_table_. | 105 // Size for alloc_table_ and mmap_table_. |
103 static const int kHashTableSize = 179999; | 106 static const int kHashTableSize = 179999; |
104 /*static*/ const int HeapProfileTable::kMaxStackDepth; | 107 /*static*/ const int HeapProfileTable::kMaxStackDepth; |
(...skipping 306 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
411 RawFD fd = RawOpenForWriting(file_name); | 414 RawFD fd = RawOpenForWriting(file_name); |
412 if (fd == kIllegalRawFD) { | 415 if (fd == kIllegalRawFD) { |
413 RAW_LOG(ERROR, "Failed dumping live objects to %s", file_name); | 416 RAW_LOG(ERROR, "Failed dumping live objects to %s", file_name); |
414 return; | 417 return; |
415 } | 418 } |
416 const DumpMarkedArgs args(fd, mark); | 419 const DumpMarkedArgs args(fd, mark); |
417 alloc_address_map_->Iterate<const DumpMarkedArgs&>(DumpMarkedIterator, args); | 420 alloc_address_map_->Iterate<const DumpMarkedArgs&>(DumpMarkedIterator, args); |
418 RawClose(fd); | 421 RawClose(fd); |
419 } | 422 } |
420 | 423 |
424 #if defined(PROFILING_ALLOCATED_TYPE) | |
425 void HeapProfileTable::DumpAllocatedTypeStatistics( | |
426 const char* file_name) const { | |
427 AddressMap<AllocatedTypeCount>* type_size_map; | |
428 type_size_map = new(alloc_(sizeof(AddressMap<AllocatedTypeCount>))) | |
429 AddressMap<AllocatedTypeCount>(alloc_, dealloc_); | |
430 | |
431 alloc_address_map_->Iterate(CountUpAllocatedTypeIterator, type_size_map); | |
432 | |
433 RawFD fd = RawOpenForWriting(file_name); | |
434 if (fd == kIllegalRawFD) { | |
435 RAW_LOG(ERROR, "Failed dumping allocated type statistics to %s", file_name); | |
436 return; | |
437 } | |
438 RawWrite(fd, kAllocatedTypeStatsHeader, strlen(kAllocatedTypeStatsHeader)); | |
439 const DumpArgs args(fd, NULL); | |
440 type_size_map->Iterate<const DumpArgs&>(DumpAllocatedTypeIterator, args); | |
441 RawClose(fd); | |
442 | |
443 type_size_map->~AddressMap<AllocatedTypeCount>(); | |
444 dealloc_(type_size_map); | |
445 } | |
446 #endif // PROFILING_ALLOCATED_TYPE | |
447 | |
421 void HeapProfileTable::IterateOrderedAllocContexts( | 448 void HeapProfileTable::IterateOrderedAllocContexts( |
422 AllocContextIterator callback) const { | 449 AllocContextIterator callback) const { |
423 Bucket** list = MakeSortedBucketList(); | 450 Bucket** list = MakeSortedBucketList(); |
424 AllocContextInfo info; | 451 AllocContextInfo info; |
425 for (int i = 0; i < num_alloc_buckets_; ++i) { | 452 for (int i = 0; i < num_alloc_buckets_; ++i) { |
426 *static_cast<Stats*>(&info) = *static_cast<Stats*>(list[i]); | 453 *static_cast<Stats*>(&info) = *static_cast<Stats*>(list[i]); |
427 info.stack_depth = list[i]->depth; | 454 info.stack_depth = list[i]->depth; |
428 info.call_stack = list[i]->stack; | 455 info.call_stack = list[i]->stack; |
429 callback(info); | 456 callback(info); |
430 } | 457 } |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
468 RAW_DCHECK(bucket_length < size, ""); | 495 RAW_DCHECK(bucket_length < size, ""); |
469 | 496 |
470 dealloc_(list); | 497 dealloc_(list); |
471 | 498 |
472 RAW_DCHECK(buf + bucket_length <= map_start, ""); | 499 RAW_DCHECK(buf + bucket_length <= map_start, ""); |
473 memmove(buf + bucket_length, map_start, map_length); // close the gap | 500 memmove(buf + bucket_length, map_start, map_length); // close the gap |
474 | 501 |
475 return bucket_length + map_length; | 502 return bucket_length + map_length; |
476 } | 503 } |
477 | 504 |
505 #if defined(PROFILING_ALLOCATED_TYPE) | |
506 // static | |
507 void HeapProfileTable::CountUpAllocatedTypeIterator( | |
508 const void* ptr, AllocValue* v, | |
jar (doing other things)
2012/08/20 21:59:09
nit: one param per line.
Don't use single letter
Dai Mikurube (NOT FULLTIME)
2012/08/21 04:45:44
Renamed it to value, but I still prefer v for cons
| |
509 AddressMap<AllocatedTypeCount>* type_size_map) { | |
510 const std::type_info* type = ::LookupAllocatedType(ptr); | |
511 | |
512 const void* key; | |
513 if (type) { | |
514 // In new abi, type_info's NTBS is unique. | |
jar (doing other things)
2012/08/20 21:59:09
Use words rather than abbreviations in comments.
Dai Mikurube (NOT FULLTIME)
2012/08/21 04:45:44
Done.
| |
515 key = type->name(); | |
516 } else { | |
517 key = NULL; | |
518 } | |
519 | |
520 AllocatedTypeCount* count = type_size_map->FindMutable(key); | |
521 if (count) { | |
522 count->bytes += v->bytes; | |
523 ++count->objects; | |
524 } else { | |
525 type_size_map->Insert(key, AllocatedTypeCount(v->bytes, 1)); | |
526 } | |
527 } | |
528 | |
529 // static | |
530 void HeapProfileTable::DumpAllocatedTypeIterator( | |
531 const void* ptr, AllocatedTypeCount* count, const DumpArgs& args) { | |
jar (doing other things)
2012/08/20 21:59:09
nit: one arg per line, wrap at paren
Dai Mikurube (NOT FULLTIME)
2012/08/21 04:45:44
Done.
| |
532 char buf[1024]; | |
533 int len; | |
534 const char* mangled_type_name = reinterpret_cast<const char*>(ptr); | |
jar (doing other things)
2012/08/20 21:59:09
nit: prefer static_cast
Dai Mikurube (NOT FULLTIME)
2012/08/21 04:45:44
Thanks for the good catch. I mistook.
| |
535 if (mangled_type_name == NULL) { | |
536 len = snprintf(buf, sizeof(buf), "%6d: %8"PRId64" @ (no_typeinfo)\n", | |
537 count->objects, count->bytes); | |
538 } else { | |
539 len = snprintf(buf, sizeof(buf), "%6d: %8"PRId64" @ %s\n", | |
540 count->objects, count->bytes, mangled_type_name); | |
jar (doing other things)
2012/08/20 21:59:09
Use a ternary expression, and you won't have to re
Dai Mikurube (NOT FULLTIME)
2012/08/21 04:45:44
Done.
| |
541 } | |
542 RawWrite(args.fd, buf, len); | |
543 } | |
544 #endif // PROFILING_ALLOCATED_TYPE | |
545 | |
478 inline | 546 inline |
479 void HeapProfileTable::DumpNonLiveIterator(const void* ptr, AllocValue* v, | 547 void HeapProfileTable::DumpNonLiveIterator(const void* ptr, AllocValue* v, |
480 const DumpArgs& args) { | 548 const DumpArgs& args) { |
481 if (v->live()) { | 549 if (v->live()) { |
482 v->set_live(false); | 550 v->set_live(false); |
483 return; | 551 return; |
484 } | 552 } |
485 if (v->ignore()) { | 553 if (v->ignore()) { |
486 return; | 554 return; |
487 } | 555 } |
(...skipping 249 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
737 char* unused) { | 805 char* unused) { |
738 // Perhaps also log the allocation stack trace (unsymbolized) | 806 // Perhaps also log the allocation stack trace (unsymbolized) |
739 // on this line in case somebody finds it useful. | 807 // on this line in case somebody finds it useful. |
740 RAW_LOG(ERROR, "leaked %"PRIuS" byte object %p", v->bytes, ptr); | 808 RAW_LOG(ERROR, "leaked %"PRIuS" byte object %p", v->bytes, ptr); |
741 } | 809 } |
742 | 810 |
743 void HeapProfileTable::Snapshot::ReportIndividualObjects() { | 811 void HeapProfileTable::Snapshot::ReportIndividualObjects() { |
744 char unused; | 812 char unused; |
745 map_.Iterate(ReportObject, &unused); | 813 map_.Iterate(ReportObject, &unused); |
746 } | 814 } |
OLD | NEW |