OLD | NEW |
---|---|
1 // Copyright (c) 2005, Google Inc. | 1 // Copyright (c) 2005, Google Inc. |
2 // All rights reserved. | 2 // All rights reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
(...skipping 204 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
215 // (NULL if no need for dumping yet) | 215 // (NULL if no need for dumping yet) |
216 static int dump_count = 0; // How many dumps so far | 216 static int dump_count = 0; // How many dumps so far |
217 static int64 last_dump_alloc = 0; // alloc_size when did we last dump | 217 static int64 last_dump_alloc = 0; // alloc_size when did we last dump |
218 static int64 last_dump_free = 0; // free_size when did we last dump | 218 static int64 last_dump_free = 0; // free_size when did we last dump |
219 static int64 high_water_mark = 0; // In-use-bytes at last high-water dump | 219 static int64 high_water_mark = 0; // In-use-bytes at last high-water dump |
220 static int64 last_dump_time = 0; // The time of the last dump | 220 static int64 last_dump_time = 0; // The time of the last dump |
221 | 221 |
222 static HeapProfileTable* heap_profile = NULL; // the heap profile table | 222 static HeapProfileTable* heap_profile = NULL; // the heap profile table |
223 static DeepHeapProfile* deep_profile = NULL; // deep memory profiler | 223 static DeepHeapProfile* deep_profile = NULL; // deep memory profiler |
224 | 224 |
225 // Callback an appplication can use to generate its own "stacks". | |
226 static PseudoStackGenerator pseudo_stack_generator = NULL; | |
227 | |
225 //---------------------------------------------------------------------- | 228 //---------------------------------------------------------------------- |
226 // Profile generation | 229 // Profile generation |
227 //---------------------------------------------------------------------- | 230 //---------------------------------------------------------------------- |
228 | 231 |
229 // Input must be a buffer of size at least 1MB. | 232 // Input must be a buffer of size at least 1MB. |
230 static char* DoGetHeapProfileLocked(char* buf, int buflen) { | 233 static char* DoGetHeapProfileLocked(char* buf, int buflen) { |
231 // We used to be smarter about estimating the required memory and | 234 // We used to be smarter about estimating the required memory and |
232 // then capping it to 1MB and generating the profile into that. | 235 // then capping it to 1MB and generating the profile into that. |
233 if (buf == NULL || buflen < 1) | 236 if (buf == NULL || buflen < 1) |
234 return NULL; | 237 return NULL; |
(...skipping 140 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
375 // Take the stack trace outside the critical section. | 378 // Take the stack trace outside the critical section. |
376 void* stack[HeapProfileTable::kMaxStackDepth]; | 379 void* stack[HeapProfileTable::kMaxStackDepth]; |
377 int depth = HeapProfileTable::GetCallerStackTrace(skip_count + 1, stack); | 380 int depth = HeapProfileTable::GetCallerStackTrace(skip_count + 1, stack); |
378 SpinLockHolder l(&heap_lock); | 381 SpinLockHolder l(&heap_lock); |
379 if (is_on) { | 382 if (is_on) { |
380 heap_profile->RecordAlloc(ptr, bytes, depth, stack); | 383 heap_profile->RecordAlloc(ptr, bytes, depth, stack); |
381 MaybeDumpProfileLocked(); | 384 MaybeDumpProfileLocked(); |
382 } | 385 } |
383 } | 386 } |
384 | 387 |
388 // Record an allocation in the profile. This function is performance critical | |
389 // so it does not attempt to share code with RecordAlloc() above. | |
Dai Mikurube (NOT FULLTIME)
2013/07/01 05:47:42
Is it really performance critical? I don't think i
James Cook
2013/07/01 23:51:20
I benchmarked it both with and without a branch (i
Dai Mikurube (NOT FULLTIME)
2013/07/02 01:39:06
Thank you for working on it!
JFYI, I'm not confid
| |
390 static void PseudoStackRecordAlloc(const void* ptr, | |
391 size_t bytes, | |
392 int skip_count) { | |
393 // Take the stack trace outside the critical section. | |
394 void* stack[HeapProfileTable::kMaxStackDepth]; | |
395 // Generate our pseudo-stack by via callback into the client code. | |
396 int depth = (*pseudo_stack_generator)(stack); | |
397 SpinLockHolder l(&heap_lock); | |
398 if (is_on) { | |
399 heap_profile->RecordAlloc(ptr, bytes, depth, stack); | |
400 MaybeDumpProfileLocked(); | |
401 } | |
402 } | |
403 | |
385 // Record a deallocation in the profile. | 404 // Record a deallocation in the profile. |
386 static void RecordFree(const void* ptr) { | 405 static void RecordFree(const void* ptr) { |
387 SpinLockHolder l(&heap_lock); | 406 SpinLockHolder l(&heap_lock); |
388 if (is_on) { | 407 if (is_on) { |
389 heap_profile->RecordFree(ptr); | 408 heap_profile->RecordFree(ptr); |
390 MaybeDumpProfileLocked(); | 409 MaybeDumpProfileLocked(); |
391 } | 410 } |
392 } | 411 } |
393 | 412 |
394 //---------------------------------------------------------------------- | 413 //---------------------------------------------------------------------- |
395 // Allocation/deallocation hooks for MallocHook | 414 // Allocation/deallocation hooks for MallocHook |
396 //---------------------------------------------------------------------- | 415 //---------------------------------------------------------------------- |
397 | 416 |
398 // static | 417 // static |
399 void NewHook(const void* ptr, size_t size) { | 418 void NewHook(const void* ptr, size_t size) { |
400 if (ptr != NULL) RecordAlloc(ptr, size, 0); | 419 if (ptr != NULL) RecordAlloc(ptr, size, 0); |
401 } | 420 } |
402 | 421 |
403 // static | 422 // static |
404 void DeleteHook(const void* ptr) { | 423 void DeleteHook(const void* ptr) { |
405 if (ptr != NULL) RecordFree(ptr); | 424 if (ptr != NULL) RecordFree(ptr); |
406 } | 425 } |
407 | 426 |
427 // static | |
428 void PseudoStackNewHook(const void* ptr, size_t size) { | |
429 if (ptr != NULL) PseudoStackRecordAlloc(ptr, size, 0); | |
430 } | |
431 | |
408 // TODO(jandrews): Re-enable stack tracing | 432 // TODO(jandrews): Re-enable stack tracing |
409 #ifdef TODO_REENABLE_STACK_TRACING | 433 #ifdef TODO_REENABLE_STACK_TRACING |
410 static void RawInfoStackDumper(const char* message, void*) { | 434 static void RawInfoStackDumper(const char* message, void*) { |
411 RAW_LOG(INFO, "%.*s", static_cast<int>(strlen(message) - 1), message); | 435 RAW_LOG(INFO, "%.*s", static_cast<int>(strlen(message) - 1), message); |
412 // -1 is to chop the \n which will be added by RAW_LOG | 436 // -1 is to chop the \n which will be added by RAW_LOG |
413 } | 437 } |
414 #endif | 438 #endif |
415 | 439 |
416 static void MmapHook(const void* result, const void* start, size_t size, | 440 static void MmapHook(const void* result, const void* start, size_t size, |
417 int prot, int flags, int fd, off_t offset) { | 441 int prot, int flags, int fd, off_t offset) { |
(...skipping 113 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
531 DeepHeapProfile(heap_profile, prefix, DeepHeapProfile::PageFrameType( | 555 DeepHeapProfile(heap_profile, prefix, DeepHeapProfile::PageFrameType( |
532 FLAGS_deep_heap_profile_pageframe)); | 556 FLAGS_deep_heap_profile_pageframe)); |
533 } | 557 } |
534 | 558 |
535 // We do not reset dump_count so if the user does a sequence of | 559 // We do not reset dump_count so if the user does a sequence of |
536 // HeapProfilerStart/HeapProfileStop, we will get a continuous | 560 // HeapProfilerStart/HeapProfileStop, we will get a continuous |
537 // sequence of profiles. | 561 // sequence of profiles. |
538 | 562 |
539 if (FLAGS_only_mmap_profile == false) { | 563 if (FLAGS_only_mmap_profile == false) { |
540 // Now set the hooks that capture new/delete and malloc/free. | 564 // Now set the hooks that capture new/delete and malloc/free. |
541 RAW_CHECK(MallocHook::AddNewHook(&NewHook), ""); | 565 if (pseudo_stack_generator) { |
566 RAW_CHECK(MallocHook::AddNewHook(&PseudoStackNewHook), ""); | |
567 } else { | |
568 RAW_CHECK(MallocHook::AddNewHook(&NewHook), ""); | |
569 } | |
542 RAW_CHECK(MallocHook::AddDeleteHook(&DeleteHook), ""); | 570 RAW_CHECK(MallocHook::AddDeleteHook(&DeleteHook), ""); |
543 } | 571 } |
544 | 572 |
545 // Copy filename prefix | 573 // Copy filename prefix if provided. |
546 RAW_DCHECK(filename_prefix == NULL, ""); | 574 if (prefix) { |
547 const int prefix_length = strlen(prefix); | 575 RAW_DCHECK(filename_prefix == NULL, ""); |
548 filename_prefix = reinterpret_cast<char*>(ProfilerMalloc(prefix_length + 1)); | 576 const int prefix_length = strlen(prefix); |
549 memcpy(filename_prefix, prefix, prefix_length); | 577 filename_prefix = |
550 filename_prefix[prefix_length] = '\0'; | 578 reinterpret_cast<char*>(ProfilerMalloc(prefix_length + 1)); |
579 memcpy(filename_prefix, prefix, prefix_length); | |
580 filename_prefix[prefix_length] = '\0'; | |
581 } | |
582 } | |
583 | |
584 extern "C" void HeapProfilerWithPseudoStackStart( | |
585 PseudoStackGenerator callback) { | |
586 { | |
587 // Ensure the callback is set before allocations can be recorded. | |
588 SpinLockHolder l(&heap_lock); | |
589 pseudo_stack_generator = callback; | |
590 } | |
591 HeapProfilerStart(NULL); | |
592 { | |
593 // The data from /proc/self/maps is not required for pseudo-stack profiles | |
594 // and increases the size of the profile dumps significantly. | |
595 SpinLockHolder l(&heap_lock); | |
596 heap_profile->DisableProfileSelfMaps(); | |
597 } | |
551 } | 598 } |
552 | 599 |
553 extern "C" void IterateAllocatedObjects(AddressVisitor visitor, void* data) { | 600 extern "C" void IterateAllocatedObjects(AddressVisitor visitor, void* data) { |
554 SpinLockHolder l(&heap_lock); | 601 SpinLockHolder l(&heap_lock); |
555 | 602 |
556 if (!is_on) return; | 603 if (!is_on) return; |
557 | 604 |
558 heap_profile->IterateAllocationAddresses(visitor, data); | 605 heap_profile->IterateAllocationAddresses(visitor, data); |
559 } | 606 } |
560 | 607 |
561 extern "C" int IsHeapProfilerRunning() { | 608 extern "C" int IsHeapProfilerRunning() { |
562 SpinLockHolder l(&heap_lock); | 609 SpinLockHolder l(&heap_lock); |
563 return is_on ? 1 : 0; // return an int, because C code doesn't have bool | 610 return is_on ? 1 : 0; // return an int, because C code doesn't have bool |
564 } | 611 } |
565 | 612 |
566 extern "C" void HeapProfilerStop() { | 613 extern "C" void HeapProfilerStop() { |
567 SpinLockHolder l(&heap_lock); | 614 SpinLockHolder l(&heap_lock); |
568 | 615 |
569 if (!is_on) return; | 616 if (!is_on) return; |
570 | 617 |
571 if (FLAGS_only_mmap_profile == false) { | 618 if (FLAGS_only_mmap_profile == false) { |
572 // Unset our new/delete hooks, checking they were set: | 619 // Unset our new/delete hooks, checking they were set: |
573 RAW_CHECK(MallocHook::RemoveNewHook(&NewHook), ""); | 620 if (pseudo_stack_generator) { |
621 RAW_CHECK(MallocHook::RemoveNewHook(&PseudoStackNewHook), ""); | |
622 } else { | |
623 RAW_CHECK(MallocHook::RemoveNewHook(&NewHook), ""); | |
624 } | |
574 RAW_CHECK(MallocHook::RemoveDeleteHook(&DeleteHook), ""); | 625 RAW_CHECK(MallocHook::RemoveDeleteHook(&DeleteHook), ""); |
575 } | 626 } |
576 if (FLAGS_mmap_log) { | 627 if (FLAGS_mmap_log) { |
577 // Restore mmap/sbrk hooks, checking that our hooks were set: | 628 // Restore mmap/sbrk hooks, checking that our hooks were set: |
578 RAW_CHECK(MallocHook::RemoveMmapHook(&MmapHook), ""); | 629 RAW_CHECK(MallocHook::RemoveMmapHook(&MmapHook), ""); |
579 RAW_CHECK(MallocHook::RemoveMremapHook(&MremapHook), ""); | 630 RAW_CHECK(MallocHook::RemoveMremapHook(&MremapHook), ""); |
580 RAW_CHECK(MallocHook::RemoveSbrkHook(&SbrkHook), ""); | 631 RAW_CHECK(MallocHook::RemoveSbrkHook(&SbrkHook), ""); |
581 RAW_CHECK(MallocHook::RemoveMunmapHook(&MunmapHook), ""); | 632 RAW_CHECK(MallocHook::RemoveMunmapHook(&MunmapHook), ""); |
582 } | 633 } |
583 | 634 |
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
669 | 720 |
670 // class used for finalization -- dumps the heap-profile at program exit | 721 // class used for finalization -- dumps the heap-profile at program exit |
671 struct HeapProfileEndWriter { | 722 struct HeapProfileEndWriter { |
672 ~HeapProfileEndWriter() { HeapProfilerDump("Exiting"); } | 723 ~HeapProfileEndWriter() { HeapProfilerDump("Exiting"); } |
673 }; | 724 }; |
674 | 725 |
675 // We want to make sure tcmalloc is up and running before starting the profiler | 726 // We want to make sure tcmalloc is up and running before starting the profiler |
676 static const TCMallocGuard tcmalloc_initializer; | 727 static const TCMallocGuard tcmalloc_initializer; |
677 REGISTER_MODULE_INITIALIZER(heapprofiler, HeapProfilerInit()); | 728 REGISTER_MODULE_INITIALIZER(heapprofiler, HeapProfilerInit()); |
678 static HeapProfileEndWriter heap_profile_end_writer; | 729 static HeapProfileEndWriter heap_profile_end_writer; |
OLD | NEW |