OLD | NEW |
1 // Copyright (c) 2005, Google Inc. | 1 // Copyright (c) 2005, Google Inc. |
2 // All rights reserved. | 2 // All rights reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
(...skipping 202 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
213 // (NULL if no need for dumping yet) | 213 // (NULL if no need for dumping yet) |
214 static int dump_count = 0; // How many dumps so far | 214 static int dump_count = 0; // How many dumps so far |
215 static int64 last_dump_alloc = 0; // alloc_size when did we last dump | 215 static int64 last_dump_alloc = 0; // alloc_size when did we last dump |
216 static int64 last_dump_free = 0; // free_size when did we last dump | 216 static int64 last_dump_free = 0; // free_size when did we last dump |
217 static int64 high_water_mark = 0; // In-use-bytes at last high-water dump | 217 static int64 high_water_mark = 0; // In-use-bytes at last high-water dump |
218 static int64 last_dump_time = 0; // The time of the last dump | 218 static int64 last_dump_time = 0; // The time of the last dump |
219 | 219 |
220 static HeapProfileTable* heap_profile = NULL; // the heap profile table | 220 static HeapProfileTable* heap_profile = NULL; // the heap profile table |
221 static DeepHeapProfile* deep_profile = NULL; // deep memory profiler | 221 static DeepHeapProfile* deep_profile = NULL; // deep memory profiler |
222 | 222 |
| 223 // Callback an appplication can use to generate its own "stacks". |
| 224 static PseudoStackGenerator pseudo_stack_generator = NULL; |
| 225 |
223 //---------------------------------------------------------------------- | 226 //---------------------------------------------------------------------- |
224 // Profile generation | 227 // Profile generation |
225 //---------------------------------------------------------------------- | 228 //---------------------------------------------------------------------- |
226 | 229 |
227 // Input must be a buffer of size at least 1MB. | 230 // Input must be a buffer of size at least 1MB. |
228 static char* DoGetHeapProfileLocked(const char* reason, char* buf, int buflen) { | 231 static char* DoGetHeapProfileLocked(const char* reason, char* buf, int buflen) { |
229 // We used to be smarter about estimating the required memory and | 232 // We used to be smarter about estimating the required memory and |
230 // then capping it to 1MB and generating the profile into that. | 233 // then capping it to 1MB and generating the profile into that. |
231 if (buf == NULL || buflen < 1) | 234 if (buf == NULL || buflen < 1) |
232 return NULL; | 235 return NULL; |
233 | 236 |
234 RAW_DCHECK(heap_lock.IsHeld(), ""); | 237 RAW_DCHECK(heap_lock.IsHeld(), ""); |
235 int bytes_written = 0; | 238 int bytes_written = 0; |
236 if (is_on) { | 239 if (is_on) { |
237 HeapProfileTable::Stats const stats = heap_profile->total(); | 240 HeapProfileTable::Stats const stats = heap_profile->total(); |
238 (void)stats; // avoid an unused-variable warning in non-debug mode. | 241 (void)stats; // avoid an unused-variable warning in non-debug mode. |
239 if (deep_profile) { | 242 if (deep_profile) { |
240 bytes_written = deep_profile->FillOrderedProfile(reason, buf, buflen - 1); | 243 bytes_written = deep_profile->FillOrderedProfile(reason, buf, buflen - 1); |
| 244 } else if (pseudo_stack_generator) { |
| 245 bytes_written = heap_profile->FillPseudoStackProfile(buf, buflen - 1); |
241 } else { | 246 } else { |
242 bytes_written = heap_profile->FillOrderedProfile(buf, buflen - 1); | 247 bytes_written = heap_profile->FillOrderedProfile(buf, buflen - 1); |
243 } | 248 } |
244 // FillOrderedProfile should not reduce the set of active mmap-ed regions, | 249 // FillOrderedProfile should not reduce the set of active mmap-ed regions, |
245 // hence MemoryRegionMap will let us remove everything we've added above: | 250 // hence MemoryRegionMap will let us remove everything we've added above: |
246 RAW_DCHECK(stats.Equivalent(heap_profile->total()), ""); | 251 RAW_DCHECK(stats.Equivalent(heap_profile->total()), ""); |
247 // if this fails, we somehow removed by FillOrderedProfile | 252 // if this fails, we somehow removed by FillOrderedProfile |
248 // more than we have added. | 253 // more than we have added. |
249 } | 254 } |
250 buf[bytes_written] = '\0'; | 255 buf[bytes_written] = '\0'; |
(...skipping 121 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
372 // Take the stack trace outside the critical section. | 377 // Take the stack trace outside the critical section. |
373 void* stack[HeapProfileTable::kMaxStackDepth]; | 378 void* stack[HeapProfileTable::kMaxStackDepth]; |
374 int depth = HeapProfileTable::GetCallerStackTrace(skip_count + 1, stack); | 379 int depth = HeapProfileTable::GetCallerStackTrace(skip_count + 1, stack); |
375 SpinLockHolder l(&heap_lock); | 380 SpinLockHolder l(&heap_lock); |
376 if (is_on) { | 381 if (is_on) { |
377 heap_profile->RecordAlloc(ptr, bytes, depth, stack); | 382 heap_profile->RecordAlloc(ptr, bytes, depth, stack); |
378 MaybeDumpProfileLocked(); | 383 MaybeDumpProfileLocked(); |
379 } | 384 } |
380 } | 385 } |
381 | 386 |
| 387 // Record an allocation in the profile. |
| 388 static void PseudoStackRecordAlloc(const void* ptr, |
| 389 size_t bytes, |
| 390 int skip_count) { |
| 391 // Take the stack trace outside the critical section. |
| 392 void* stack[HeapProfileTable::kMaxStackDepth]; |
| 393 // Generate our pseudo-stack by via callback into the client code. |
| 394 int depth = (*pseudo_stack_generator)(stack); |
| 395 SpinLockHolder l(&heap_lock); |
| 396 if (is_on) { |
| 397 heap_profile->RecordAlloc(ptr, bytes, depth, stack); |
| 398 MaybeDumpProfileLocked(); |
| 399 } |
| 400 } |
| 401 |
382 // Record a deallocation in the profile. | 402 // Record a deallocation in the profile. |
383 static void RecordFree(const void* ptr) { | 403 static void RecordFree(const void* ptr) { |
384 SpinLockHolder l(&heap_lock); | 404 SpinLockHolder l(&heap_lock); |
385 if (is_on) { | 405 if (is_on) { |
386 heap_profile->RecordFree(ptr); | 406 heap_profile->RecordFree(ptr); |
387 MaybeDumpProfileLocked(); | 407 MaybeDumpProfileLocked(); |
388 } | 408 } |
389 } | 409 } |
390 | 410 |
391 //---------------------------------------------------------------------- | 411 //---------------------------------------------------------------------- |
392 // Allocation/deallocation hooks for MallocHook | 412 // Allocation/deallocation hooks for MallocHook |
393 //---------------------------------------------------------------------- | 413 //---------------------------------------------------------------------- |
394 | 414 |
395 // static | 415 // static |
396 void NewHook(const void* ptr, size_t size) { | 416 void NewHook(const void* ptr, size_t size) { |
397 if (ptr != NULL) RecordAlloc(ptr, size, 0); | 417 if (ptr != NULL) RecordAlloc(ptr, size, 0); |
398 } | 418 } |
399 | 419 |
400 // static | 420 // static |
401 void DeleteHook(const void* ptr) { | 421 void DeleteHook(const void* ptr) { |
402 if (ptr != NULL) RecordFree(ptr); | 422 if (ptr != NULL) RecordFree(ptr); |
403 } | 423 } |
404 | 424 |
| 425 // static |
| 426 void PseudoStackNewHook(const void* ptr, size_t size) { |
| 427 if (ptr != NULL) PseudoStackRecordAlloc(ptr, size, 0); |
| 428 } |
| 429 |
405 // TODO(jandrews): Re-enable stack tracing | 430 // TODO(jandrews): Re-enable stack tracing |
406 #ifdef TODO_REENABLE_STACK_TRACING | 431 #ifdef TODO_REENABLE_STACK_TRACING |
407 static void RawInfoStackDumper(const char* message, void*) { | 432 static void RawInfoStackDumper(const char* message, void*) { |
408 RAW_LOG(INFO, "%.*s", static_cast<int>(strlen(message) - 1), message); | 433 RAW_LOG(INFO, "%.*s", static_cast<int>(strlen(message) - 1), message); |
409 // -1 is to chop the \n which will be added by RAW_LOG | 434 // -1 is to chop the \n which will be added by RAW_LOG |
410 } | 435 } |
411 #endif | 436 #endif |
412 | 437 |
413 static void MmapHook(const void* result, const void* start, size_t size, | 438 static void MmapHook(const void* result, const void* start, size_t size, |
414 int prot, int flags, int fd, off_t offset) { | 439 int prot, int flags, int fd, off_t offset) { |
(...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
527 deep_profile = new(ProfilerMalloc(sizeof(DeepHeapProfile))) | 552 deep_profile = new(ProfilerMalloc(sizeof(DeepHeapProfile))) |
528 DeepHeapProfile(heap_profile, prefix); | 553 DeepHeapProfile(heap_profile, prefix); |
529 } | 554 } |
530 | 555 |
531 // We do not reset dump_count so if the user does a sequence of | 556 // We do not reset dump_count so if the user does a sequence of |
532 // HeapProfilerStart/HeapProfileStop, we will get a continuous | 557 // HeapProfilerStart/HeapProfileStop, we will get a continuous |
533 // sequence of profiles. | 558 // sequence of profiles. |
534 | 559 |
535 if (FLAGS_only_mmap_profile == false) { | 560 if (FLAGS_only_mmap_profile == false) { |
536 // Now set the hooks that capture new/delete and malloc/free. | 561 // Now set the hooks that capture new/delete and malloc/free. |
537 RAW_CHECK(MallocHook::AddNewHook(&NewHook), ""); | 562 if (pseudo_stack_generator) { |
| 563 RAW_CHECK(MallocHook::AddNewHook(&PseudoStackNewHook), ""); |
| 564 } else { |
| 565 RAW_CHECK(MallocHook::AddNewHook(&NewHook), ""); |
| 566 } |
538 RAW_CHECK(MallocHook::AddDeleteHook(&DeleteHook), ""); | 567 RAW_CHECK(MallocHook::AddDeleteHook(&DeleteHook), ""); |
539 } | 568 } |
540 | 569 |
541 // Copy filename prefix | 570 // Copy filename prefix |
542 RAW_DCHECK(filename_prefix == NULL, ""); | 571 RAW_DCHECK(filename_prefix == NULL, ""); |
543 const int prefix_length = strlen(prefix); | 572 const int prefix_length = strlen(prefix); |
544 filename_prefix = reinterpret_cast<char*>(ProfilerMalloc(prefix_length + 1)); | 573 filename_prefix = reinterpret_cast<char*>(ProfilerMalloc(prefix_length + 1)); |
545 memcpy(filename_prefix, prefix, prefix_length); | 574 memcpy(filename_prefix, prefix, prefix_length); |
546 filename_prefix[prefix_length] = '\0'; | 575 filename_prefix[prefix_length] = '\0'; |
547 } | 576 } |
(...skipping 11 matching lines...) Expand all Loading... |
559 return is_on ? 1 : 0; // return an int, because C code doesn't have bool | 588 return is_on ? 1 : 0; // return an int, because C code doesn't have bool |
560 } | 589 } |
561 | 590 |
562 extern "C" void HeapProfilerStop() { | 591 extern "C" void HeapProfilerStop() { |
563 SpinLockHolder l(&heap_lock); | 592 SpinLockHolder l(&heap_lock); |
564 | 593 |
565 if (!is_on) return; | 594 if (!is_on) return; |
566 | 595 |
567 if (FLAGS_only_mmap_profile == false) { | 596 if (FLAGS_only_mmap_profile == false) { |
568 // Unset our new/delete hooks, checking they were set: | 597 // Unset our new/delete hooks, checking they were set: |
569 RAW_CHECK(MallocHook::RemoveNewHook(&NewHook), ""); | 598 if (pseudo_stack_generator) { |
| 599 RAW_CHECK(MallocHook::RemoveNewHook(&PseudoStackNewHook), ""); |
| 600 } else { |
| 601 RAW_CHECK(MallocHook::RemoveNewHook(&NewHook), ""); |
| 602 } |
570 RAW_CHECK(MallocHook::RemoveDeleteHook(&DeleteHook), ""); | 603 RAW_CHECK(MallocHook::RemoveDeleteHook(&DeleteHook), ""); |
571 } | 604 } |
572 if (FLAGS_mmap_log) { | 605 if (FLAGS_mmap_log) { |
573 // Restore mmap/sbrk hooks, checking that our hooks were set: | 606 // Restore mmap/sbrk hooks, checking that our hooks were set: |
574 RAW_CHECK(MallocHook::RemoveMmapHook(&MmapHook), ""); | 607 RAW_CHECK(MallocHook::RemoveMmapHook(&MmapHook), ""); |
575 RAW_CHECK(MallocHook::RemoveMremapHook(&MremapHook), ""); | 608 RAW_CHECK(MallocHook::RemoveMremapHook(&MremapHook), ""); |
576 RAW_CHECK(MallocHook::RemoveSbrkHook(&SbrkHook), ""); | 609 RAW_CHECK(MallocHook::RemoveSbrkHook(&SbrkHook), ""); |
577 RAW_CHECK(MallocHook::RemoveMunmapHook(&MunmapHook), ""); | 610 RAW_CHECK(MallocHook::RemoveMunmapHook(&MunmapHook), ""); |
578 } | 611 } |
579 | 612 |
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
631 } | 664 } |
632 | 665 |
633 extern "C" void HeapProfilerDumpAliveObjects(const char* filename) { | 666 extern "C" void HeapProfilerDumpAliveObjects(const char* filename) { |
634 SpinLockHolder l(&heap_lock); | 667 SpinLockHolder l(&heap_lock); |
635 | 668 |
636 if (!is_on) return; | 669 if (!is_on) return; |
637 | 670 |
638 heap_profile->DumpMarkedObjects(HeapProfileTable::MARK_TWO, filename); | 671 heap_profile->DumpMarkedObjects(HeapProfileTable::MARK_TWO, filename); |
639 } | 672 } |
640 | 673 |
| 674 extern "C" void SetPseudoStackGenerator(PseudoStackGenerator callback) { |
| 675 SpinLockHolder l(&heap_lock); |
| 676 pseudo_stack_generator = callback; |
| 677 } |
| 678 |
641 //---------------------------------------------------------------------- | 679 //---------------------------------------------------------------------- |
642 // Initialization/finalization code | 680 // Initialization/finalization code |
643 //---------------------------------------------------------------------- | 681 //---------------------------------------------------------------------- |
644 | 682 |
645 // Initialization code | 683 // Initialization code |
646 static void HeapProfilerInit() { | 684 static void HeapProfilerInit() { |
647 // Everything after this point is for setting up the profiler based on envvar | 685 // Everything after this point is for setting up the profiler based on envvar |
648 char fname[PATH_MAX]; | 686 char fname[PATH_MAX]; |
649 if (!GetUniquePathFromEnv(HEAPPROFILE, fname)) { | 687 if (!GetUniquePathFromEnv(HEAPPROFILE, fname)) { |
650 return; | 688 return; |
(...skipping 14 matching lines...) Expand all Loading... |
665 | 703 |
666 // class used for finalization -- dumps the heap-profile at program exit | 704 // class used for finalization -- dumps the heap-profile at program exit |
667 struct HeapProfileEndWriter { | 705 struct HeapProfileEndWriter { |
668 ~HeapProfileEndWriter() { HeapProfilerDump("Exiting"); } | 706 ~HeapProfileEndWriter() { HeapProfilerDump("Exiting"); } |
669 }; | 707 }; |
670 | 708 |
671 // We want to make sure tcmalloc is up and running before starting the profiler | 709 // We want to make sure tcmalloc is up and running before starting the profiler |
672 static const TCMallocGuard tcmalloc_initializer; | 710 static const TCMallocGuard tcmalloc_initializer; |
673 REGISTER_MODULE_INITIALIZER(heapprofiler, HeapProfilerInit()); | 711 REGISTER_MODULE_INITIALIZER(heapprofiler, HeapProfilerInit()); |
674 static HeapProfileEndWriter heap_profile_end_writer; | 712 static HeapProfileEndWriter heap_profile_end_writer; |
OLD | NEW |