OLD | NEW |
---|---|
1 // Copyright (c) 2005, Google Inc. | 1 // Copyright (c) 2005, Google Inc. |
2 // All rights reserved. | 2 // All rights reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
(...skipping 198 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
209 // (NULL if no need for dumping yet) | 209 // (NULL if no need for dumping yet) |
210 static int dump_count = 0; // How many dumps so far | 210 static int dump_count = 0; // How many dumps so far |
211 static int64 last_dump_alloc = 0; // alloc_size when did we last dump | 211 static int64 last_dump_alloc = 0; // alloc_size when did we last dump |
212 static int64 last_dump_free = 0; // free_size when did we last dump | 212 static int64 last_dump_free = 0; // free_size when did we last dump |
213 static int64 high_water_mark = 0; // In-use-bytes at last high-water dump | 213 static int64 high_water_mark = 0; // In-use-bytes at last high-water dump |
214 static int64 last_dump_time = 0; // The time of the last dump | 214 static int64 last_dump_time = 0; // The time of the last dump |
215 | 215 |
216 static HeapProfileTable* heap_profile = NULL; // the heap profile table | 216 static HeapProfileTable* heap_profile = NULL; // the heap profile table |
217 static DeepHeapProfile* deep_profile = NULL; // deep memory profiler | 217 static DeepHeapProfile* deep_profile = NULL; // deep memory profiler |
218 | 218 |
219 // Callback an appplication can use to generate its own "stacks". | |
220 static PseudoStackGenerator pseudo_stack_generator = NULL; | |
221 | |
219 //---------------------------------------------------------------------- | 222 //---------------------------------------------------------------------- |
220 // Profile generation | 223 // Profile generation |
221 //---------------------------------------------------------------------- | 224 //---------------------------------------------------------------------- |
222 | 225 |
223 // Input must be a buffer of size at least 1MB. | 226 // Input must be a buffer of size at least 1MB. |
224 static char* DoGetHeapProfileLocked(char* buf, int buflen) { | 227 static char* DoGetHeapProfileLocked(char* buf, int buflen) { |
225 // We used to be smarter about estimating the required memory and | 228 // We used to be smarter about estimating the required memory and |
226 // then capping it to 1MB and generating the profile into that. | 229 // then capping it to 1MB and generating the profile into that. |
227 if (buf == NULL || buflen < 1) | 230 if (buf == NULL || buflen < 1) |
228 return NULL; | 231 return NULL; |
229 | 232 |
230 RAW_DCHECK(heap_lock.IsHeld(), ""); | 233 RAW_DCHECK(heap_lock.IsHeld(), ""); |
231 int bytes_written = 0; | 234 int bytes_written = 0; |
232 if (is_on) { | 235 if (is_on) { |
233 HeapProfileTable::Stats const stats = heap_profile->total(); | 236 HeapProfileTable::Stats const stats = heap_profile->total(); |
234 (void)stats; // avoid an unused-variable warning in non-debug mode. | 237 (void)stats; // avoid an unused-variable warning in non-debug mode. |
235 bytes_written = heap_profile->FillOrderedProfile(buf, buflen - 1); | 238 if (pseudo_stack_generator) { |
239 bytes_written = heap_profile->FillPseudoStackProfile(buf, buflen - 1); | |
240 } else { | |
241 bytes_written = heap_profile->FillOrderedProfile(buf, buflen - 1); | |
242 } | |
236 // FillOrderedProfile should not reduce the set of active mmap-ed regions, | 243 // FillOrderedProfile should not reduce the set of active mmap-ed regions, |
237 // hence MemoryRegionMap will let us remove everything we've added above: | 244 // hence MemoryRegionMap will let us remove everything we've added above: |
238 RAW_DCHECK(stats.Equivalent(heap_profile->total()), ""); | 245 RAW_DCHECK(stats.Equivalent(heap_profile->total()), ""); |
239 // if this fails, we somehow removed by FillOrderedProfile | 246 // if this fails, we somehow removed by FillOrderedProfile |
240 // more than we have added. | 247 // more than we have added. |
241 } | 248 } |
242 buf[bytes_written] = '\0'; | 249 buf[bytes_written] = '\0'; |
243 RAW_DCHECK(bytes_written == strlen(buf), ""); | 250 RAW_DCHECK(bytes_written == strlen(buf), ""); |
244 | 251 |
245 return buf; | 252 return buf; |
(...skipping 123 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
369 // Take the stack trace outside the critical section. | 376 // Take the stack trace outside the critical section. |
370 void* stack[HeapProfileTable::kMaxStackDepth]; | 377 void* stack[HeapProfileTable::kMaxStackDepth]; |
371 int depth = HeapProfileTable::GetCallerStackTrace(skip_count + 1, stack); | 378 int depth = HeapProfileTable::GetCallerStackTrace(skip_count + 1, stack); |
372 SpinLockHolder l(&heap_lock); | 379 SpinLockHolder l(&heap_lock); |
373 if (is_on) { | 380 if (is_on) { |
374 heap_profile->RecordAlloc(ptr, bytes, depth, stack); | 381 heap_profile->RecordAlloc(ptr, bytes, depth, stack); |
375 MaybeDumpProfileLocked(); | 382 MaybeDumpProfileLocked(); |
376 } | 383 } |
377 } | 384 } |
378 | 385 |
386 // Record an allocation in the profile. | |
387 static void PseudoStackRecordAlloc(const void* ptr, | |
388 size_t bytes, | |
389 int skip_count) { | |
Dai Mikurube (NOT FULLTIME)
2013/06/19 04:34:03
It's almost the same with RecordAlloc(). Why do w
James Cook
2013/06/29 00:02:42
I elected to copy the function because this is per
| |
390 // Take the stack trace outside the critical section. | |
391 void* stack[HeapProfileTable::kMaxStackDepth]; | |
392 // Generate our pseudo-stack by via callback into the client code. | |
393 int depth = (*pseudo_stack_generator)(stack); | |
394 SpinLockHolder l(&heap_lock); | |
395 if (is_on) { | |
396 heap_profile->RecordAlloc(ptr, bytes, depth, stack); | |
397 MaybeDumpProfileLocked(); | |
398 } | |
399 } | |
400 | |
379 // Record a deallocation in the profile. | 401 // Record a deallocation in the profile. |
380 static void RecordFree(const void* ptr) { | 402 static void RecordFree(const void* ptr) { |
381 SpinLockHolder l(&heap_lock); | 403 SpinLockHolder l(&heap_lock); |
382 if (is_on) { | 404 if (is_on) { |
383 heap_profile->RecordFree(ptr); | 405 heap_profile->RecordFree(ptr); |
384 MaybeDumpProfileLocked(); | 406 MaybeDumpProfileLocked(); |
385 } | 407 } |
386 } | 408 } |
387 | 409 |
388 //---------------------------------------------------------------------- | 410 //---------------------------------------------------------------------- |
389 // Allocation/deallocation hooks for MallocHook | 411 // Allocation/deallocation hooks for MallocHook |
390 //---------------------------------------------------------------------- | 412 //---------------------------------------------------------------------- |
391 | 413 |
392 // static | 414 // static |
393 void NewHook(const void* ptr, size_t size) { | 415 void NewHook(const void* ptr, size_t size) { |
394 if (ptr != NULL) RecordAlloc(ptr, size, 0); | 416 if (ptr != NULL) RecordAlloc(ptr, size, 0); |
395 } | 417 } |
396 | 418 |
397 // static | 419 // static |
398 void DeleteHook(const void* ptr) { | 420 void DeleteHook(const void* ptr) { |
399 if (ptr != NULL) RecordFree(ptr); | 421 if (ptr != NULL) RecordFree(ptr); |
400 } | 422 } |
401 | 423 |
424 // static | |
425 void PseudoStackNewHook(const void* ptr, size_t size) { | |
Dai Mikurube (NOT FULLTIME)
2013/06/19 04:34:03
ditto.
| |
426 if (ptr != NULL) PseudoStackRecordAlloc(ptr, size, 0); | |
427 } | |
428 | |
402 // TODO(jandrews): Re-enable stack tracing | 429 // TODO(jandrews): Re-enable stack tracing |
403 #ifdef TODO_REENABLE_STACK_TRACING | 430 #ifdef TODO_REENABLE_STACK_TRACING |
404 static void RawInfoStackDumper(const char* message, void*) { | 431 static void RawInfoStackDumper(const char* message, void*) { |
405 RAW_LOG(INFO, "%.*s", static_cast<int>(strlen(message) - 1), message); | 432 RAW_LOG(INFO, "%.*s", static_cast<int>(strlen(message) - 1), message); |
406 // -1 is to chop the \n which will be added by RAW_LOG | 433 // -1 is to chop the \n which will be added by RAW_LOG |
407 } | 434 } |
408 #endif | 435 #endif |
409 | 436 |
410 static void MmapHook(const void* result, const void* start, size_t size, | 437 static void MmapHook(const void* result, const void* start, size_t size, |
411 int prot, int flags, int fd, off_t offset) { | 438 int prot, int flags, int fd, off_t offset) { |
(...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
524 deep_profile = new(ProfilerMalloc(sizeof(DeepHeapProfile))) | 551 deep_profile = new(ProfilerMalloc(sizeof(DeepHeapProfile))) |
525 DeepHeapProfile(heap_profile, prefix); | 552 DeepHeapProfile(heap_profile, prefix); |
526 } | 553 } |
527 | 554 |
528 // We do not reset dump_count so if the user does a sequence of | 555 // We do not reset dump_count so if the user does a sequence of |
529 // HeapProfilerStart/HeapProfileStop, we will get a continuous | 556 // HeapProfilerStart/HeapProfileStop, we will get a continuous |
530 // sequence of profiles. | 557 // sequence of profiles. |
531 | 558 |
532 if (FLAGS_only_mmap_profile == false) { | 559 if (FLAGS_only_mmap_profile == false) { |
533 // Now set the hooks that capture new/delete and malloc/free. | 560 // Now set the hooks that capture new/delete and malloc/free. |
534 RAW_CHECK(MallocHook::AddNewHook(&NewHook), ""); | 561 if (pseudo_stack_generator) { |
562 RAW_CHECK(MallocHook::AddNewHook(&PseudoStackNewHook), ""); | |
563 } else { | |
564 RAW_CHECK(MallocHook::AddNewHook(&NewHook), ""); | |
565 } | |
535 RAW_CHECK(MallocHook::AddDeleteHook(&DeleteHook), ""); | 566 RAW_CHECK(MallocHook::AddDeleteHook(&DeleteHook), ""); |
536 } | 567 } |
537 | 568 |
538 // Copy filename prefix | 569 // Copy filename prefix if provided. |
539 RAW_DCHECK(filename_prefix == NULL, ""); | 570 if (prefix) { |
540 const int prefix_length = strlen(prefix); | 571 RAW_DCHECK(filename_prefix == NULL, ""); |
541 filename_prefix = reinterpret_cast<char*>(ProfilerMalloc(prefix_length + 1)); | 572 const int prefix_length = strlen(prefix); |
542 memcpy(filename_prefix, prefix, prefix_length); | 573 filename_prefix = reinterpret_cast<char*>(ProfilerMalloc(prefix_length + 1)) ; |
Dai Mikurube (NOT FULLTIME)
2013/06/19 04:34:03
Over 80?
| |
543 filename_prefix[prefix_length] = '\0'; | 574 memcpy(filename_prefix, prefix, prefix_length); |
575 filename_prefix[prefix_length] = '\0'; | |
576 } | |
544 } | 577 } |
545 | 578 |
546 extern "C" void IterateAllocatedObjects(AddressVisitor visitor, void* data) { | 579 extern "C" void IterateAllocatedObjects(AddressVisitor visitor, void* data) { |
547 SpinLockHolder l(&heap_lock); | 580 SpinLockHolder l(&heap_lock); |
548 | 581 |
549 if (!is_on) return; | 582 if (!is_on) return; |
550 | 583 |
551 heap_profile->IterateAllocationAddresses(visitor, data); | 584 heap_profile->IterateAllocationAddresses(visitor, data); |
552 } | 585 } |
553 | 586 |
554 extern "C" int IsHeapProfilerRunning() { | 587 extern "C" int IsHeapProfilerRunning() { |
555 SpinLockHolder l(&heap_lock); | 588 SpinLockHolder l(&heap_lock); |
556 return is_on ? 1 : 0; // return an int, because C code doesn't have bool | 589 return is_on ? 1 : 0; // return an int, because C code doesn't have bool |
557 } | 590 } |
558 | 591 |
559 extern "C" void HeapProfilerStop() { | 592 extern "C" void HeapProfilerStop() { |
560 SpinLockHolder l(&heap_lock); | 593 SpinLockHolder l(&heap_lock); |
561 | 594 |
562 if (!is_on) return; | 595 if (!is_on) return; |
563 | 596 |
564 if (FLAGS_only_mmap_profile == false) { | 597 if (FLAGS_only_mmap_profile == false) { |
565 // Unset our new/delete hooks, checking they were set: | 598 // Unset our new/delete hooks, checking they were set: |
566 RAW_CHECK(MallocHook::RemoveNewHook(&NewHook), ""); | 599 if (pseudo_stack_generator) { |
600 RAW_CHECK(MallocHook::RemoveNewHook(&PseudoStackNewHook), ""); | |
601 } else { | |
602 RAW_CHECK(MallocHook::RemoveNewHook(&NewHook), ""); | |
603 } | |
567 RAW_CHECK(MallocHook::RemoveDeleteHook(&DeleteHook), ""); | 604 RAW_CHECK(MallocHook::RemoveDeleteHook(&DeleteHook), ""); |
568 } | 605 } |
569 if (FLAGS_mmap_log) { | 606 if (FLAGS_mmap_log) { |
570 // Restore mmap/sbrk hooks, checking that our hooks were set: | 607 // Restore mmap/sbrk hooks, checking that our hooks were set: |
571 RAW_CHECK(MallocHook::RemoveMmapHook(&MmapHook), ""); | 608 RAW_CHECK(MallocHook::RemoveMmapHook(&MmapHook), ""); |
572 RAW_CHECK(MallocHook::RemoveMremapHook(&MremapHook), ""); | 609 RAW_CHECK(MallocHook::RemoveMremapHook(&MremapHook), ""); |
573 RAW_CHECK(MallocHook::RemoveSbrkHook(&SbrkHook), ""); | 610 RAW_CHECK(MallocHook::RemoveSbrkHook(&SbrkHook), ""); |
574 RAW_CHECK(MallocHook::RemoveMunmapHook(&MunmapHook), ""); | 611 RAW_CHECK(MallocHook::RemoveMunmapHook(&MunmapHook), ""); |
575 } | 612 } |
576 | 613 |
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
628 } | 665 } |
629 | 666 |
630 extern "C" void HeapProfilerDumpAliveObjects(const char* filename) { | 667 extern "C" void HeapProfilerDumpAliveObjects(const char* filename) { |
631 SpinLockHolder l(&heap_lock); | 668 SpinLockHolder l(&heap_lock); |
632 | 669 |
633 if (!is_on) return; | 670 if (!is_on) return; |
634 | 671 |
635 heap_profile->DumpMarkedObjects(HeapProfileTable::MARK_TWO, filename); | 672 heap_profile->DumpMarkedObjects(HeapProfileTable::MARK_TWO, filename); |
636 } | 673 } |
637 | 674 |
675 extern "C" void SetPseudoStackGenerator(PseudoStackGenerator callback) { | |
Dai Mikurube (NOT FULLTIME)
2013/06/19 04:34:03
The function name should contain HeapProfiler or s
James Cook
2013/06/29 00:02:42
Good idea. Done.
| |
676 SpinLockHolder l(&heap_lock); | |
677 pseudo_stack_generator = callback; | |
678 } | |
679 | |
638 //---------------------------------------------------------------------- | 680 //---------------------------------------------------------------------- |
639 // Initialization/finalization code | 681 // Initialization/finalization code |
640 //---------------------------------------------------------------------- | 682 //---------------------------------------------------------------------- |
641 | 683 |
642 // Initialization code | 684 // Initialization code |
643 static void HeapProfilerInit() { | 685 static void HeapProfilerInit() { |
644 // Everything after this point is for setting up the profiler based on envvar | 686 // Everything after this point is for setting up the profiler based on envvar |
645 char fname[PATH_MAX]; | 687 char fname[PATH_MAX]; |
646 if (!GetUniquePathFromEnv(HEAPPROFILE, fname)) { | 688 if (!GetUniquePathFromEnv(HEAPPROFILE, fname)) { |
647 return; | 689 return; |
(...skipping 14 matching lines...) Expand all Loading... | |
662 | 704 |
663 // class used for finalization -- dumps the heap-profile at program exit | 705 // class used for finalization -- dumps the heap-profile at program exit |
664 struct HeapProfileEndWriter { | 706 struct HeapProfileEndWriter { |
665 ~HeapProfileEndWriter() { HeapProfilerDump("Exiting"); } | 707 ~HeapProfileEndWriter() { HeapProfilerDump("Exiting"); } |
666 }; | 708 }; |
667 | 709 |
668 // We want to make sure tcmalloc is up and running before starting the profiler | 710 // We want to make sure tcmalloc is up and running before starting the profiler |
669 static const TCMallocGuard tcmalloc_initializer; | 711 static const TCMallocGuard tcmalloc_initializer; |
670 REGISTER_MODULE_INITIALIZER(heapprofiler, HeapProfilerInit()); | 712 REGISTER_MODULE_INITIALIZER(heapprofiler, HeapProfilerInit()); |
671 static HeapProfileEndWriter heap_profile_end_writer; | 713 static HeapProfileEndWriter heap_profile_end_writer; |
OLD | NEW |