OLD | NEW |
1 // Copyright (c) 2005, Google Inc. | 1 // Copyright (c) 2005, Google Inc. |
2 // All rights reserved. | 2 // All rights reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
(...skipping 202 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
213 // (NULL if no need for dumping yet) | 213 // (NULL if no need for dumping yet) |
214 static int dump_count = 0; // How many dumps so far | 214 static int dump_count = 0; // How many dumps so far |
215 static int64 last_dump_alloc = 0; // alloc_size when did we last dump | 215 static int64 last_dump_alloc = 0; // alloc_size when did we last dump |
216 static int64 last_dump_free = 0; // free_size when did we last dump | 216 static int64 last_dump_free = 0; // free_size when did we last dump |
217 static int64 high_water_mark = 0; // In-use-bytes at last high-water dump | 217 static int64 high_water_mark = 0; // In-use-bytes at last high-water dump |
218 static int64 last_dump_time = 0; // The time of the last dump | 218 static int64 last_dump_time = 0; // The time of the last dump |
219 | 219 |
220 static HeapProfileTable* heap_profile = NULL; // the heap profile table | 220 static HeapProfileTable* heap_profile = NULL; // the heap profile table |
221 static DeepHeapProfile* deep_profile = NULL; // deep memory profiler | 221 static DeepHeapProfile* deep_profile = NULL; // deep memory profiler |
222 | 222 |
| 223 // Callback an appplication can use to generate its own "stacks". |
| 224 static PseudoStackGenerator pseudo_stack_generator = NULL; |
| 225 |
223 //---------------------------------------------------------------------- | 226 //---------------------------------------------------------------------- |
224 // Profile generation | 227 // Profile generation |
225 //---------------------------------------------------------------------- | 228 //---------------------------------------------------------------------- |
226 | 229 |
227 // Input must be a buffer of size at least 1MB. | 230 // Input must be a buffer of size at least 1MB. |
228 static char* DoGetHeapProfileLocked(char* buf, int buflen) { | 231 static char* DoGetHeapProfileLocked(char* buf, int buflen) { |
229 // We used to be smarter about estimating the required memory and | 232 // We used to be smarter about estimating the required memory and |
230 // then capping it to 1MB and generating the profile into that. | 233 // then capping it to 1MB and generating the profile into that. |
231 if (buf == NULL || buflen < 1) | 234 if (buf == NULL || buflen < 1) |
232 return NULL; | 235 return NULL; |
233 | 236 |
234 RAW_DCHECK(heap_lock.IsHeld(), ""); | 237 RAW_DCHECK(heap_lock.IsHeld(), ""); |
235 int bytes_written = 0; | 238 int bytes_written = 0; |
236 if (is_on) { | 239 if (is_on) { |
237 HeapProfileTable::Stats const stats = heap_profile->total(); | 240 HeapProfileTable::Stats const stats = heap_profile->total(); |
238 (void)stats; // avoid an unused-variable warning in non-debug mode. | 241 (void)stats; // avoid an unused-variable warning in non-debug mode. |
239 if (deep_profile) { | 242 if (deep_profile) { |
240 bytes_written = deep_profile->FillOrderedProfile(buf, buflen - 1); | 243 bytes_written = deep_profile->FillOrderedProfile(buf, buflen - 1); |
241 } else { | 244 } else { |
242 bytes_written = heap_profile->FillOrderedProfile(buf, buflen - 1); | 245 // bytes_written = heap_profile->FillOrderedProfile(buf, buflen - 1); |
| 246 bytes_written = heap_profile->FillOrderedProfile2(buf, buflen - 1); |
243 } | 247 } |
244 // FillOrderedProfile should not reduce the set of active mmap-ed regions, | 248 // FillOrderedProfile should not reduce the set of active mmap-ed regions, |
245 // hence MemoryRegionMap will let us remove everything we've added above: | 249 // hence MemoryRegionMap will let us remove everything we've added above: |
246 RAW_DCHECK(stats.Equivalent(heap_profile->total()), ""); | 250 RAW_DCHECK(stats.Equivalent(heap_profile->total()), ""); |
247 // if this fails, we somehow removed by FillOrderedProfile | 251 // if this fails, we somehow removed by FillOrderedProfile |
248 // more than we have added. | 252 // more than we have added. |
249 } | 253 } |
250 buf[bytes_written] = '\0'; | 254 buf[bytes_written] = '\0'; |
251 RAW_DCHECK(bytes_written == strlen(buf), ""); | 255 RAW_DCHECK(bytes_written == strlen(buf), ""); |
252 | 256 |
(...skipping 111 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
364 if (inuse_bytes > high_water_mark) | 368 if (inuse_bytes > high_water_mark) |
365 high_water_mark = inuse_bytes; | 369 high_water_mark = inuse_bytes; |
366 } | 370 } |
367 } | 371 } |
368 } | 372 } |
369 | 373 |
370 // Record an allocation in the profile. | 374 // Record an allocation in the profile. |
371 static void RecordAlloc(const void* ptr, size_t bytes, int skip_count) { | 375 static void RecordAlloc(const void* ptr, size_t bytes, int skip_count) { |
372 // Take the stack trace outside the critical section. | 376 // Take the stack trace outside the critical section. |
373 void* stack[HeapProfileTable::kMaxStackDepth]; | 377 void* stack[HeapProfileTable::kMaxStackDepth]; |
374 int depth = HeapProfileTable::GetCallerStackTrace(skip_count + 1, stack); | 378 //JAMESDEBUG this is the wrong place to do this, look deeper |
| 379 int depth; |
| 380 if (pseudo_stack_generator) { |
| 381 depth = (*pseudo_stack_generator)(stack); |
| 382 } else { |
| 383 stack[0] = NULL; |
| 384 depth = 1; |
| 385 } |
| 386 // int depth = HeapProfileTable::GetCallerStackTrace(skip_count + 1, stack); |
375 SpinLockHolder l(&heap_lock); | 387 SpinLockHolder l(&heap_lock); |
376 if (is_on) { | 388 if (is_on) { |
377 heap_profile->RecordAlloc(ptr, bytes, depth, stack); | 389 heap_profile->RecordAlloc(ptr, bytes, depth, stack); |
378 MaybeDumpProfileLocked(); | 390 MaybeDumpProfileLocked(); |
379 } | 391 } |
380 } | 392 } |
381 | 393 |
382 // Record a deallocation in the profile. | 394 // Record a deallocation in the profile. |
383 static void RecordFree(const void* ptr) { | 395 static void RecordFree(const void* ptr) { |
384 SpinLockHolder l(&heap_lock); | 396 SpinLockHolder l(&heap_lock); |
(...skipping 246 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
631 } | 643 } |
632 | 644 |
633 extern "C" void HeapProfilerDumpAliveObjects(const char* filename) { | 645 extern "C" void HeapProfilerDumpAliveObjects(const char* filename) { |
634 SpinLockHolder l(&heap_lock); | 646 SpinLockHolder l(&heap_lock); |
635 | 647 |
636 if (!is_on) return; | 648 if (!is_on) return; |
637 | 649 |
638 heap_profile->DumpMarkedObjects(HeapProfileTable::MARK_TWO, filename); | 650 heap_profile->DumpMarkedObjects(HeapProfileTable::MARK_TWO, filename); |
639 } | 651 } |
640 | 652 |
| 653 extern "C" void SetPseudoStackGenerator(PseudoStackGenerator callback) { |
| 654 SpinLockHolder l(&heap_lock); |
| 655 pseudo_stack_generator = callback; |
| 656 } |
| 657 |
641 //---------------------------------------------------------------------- | 658 //---------------------------------------------------------------------- |
642 // Initialization/finalization code | 659 // Initialization/finalization code |
643 //---------------------------------------------------------------------- | 660 //---------------------------------------------------------------------- |
644 | 661 |
645 // Initialization code | 662 // Initialization code |
646 static void HeapProfilerInit() { | 663 static void HeapProfilerInit() { |
647 // Everything after this point is for setting up the profiler based on envvar | 664 // Everything after this point is for setting up the profiler based on envvar |
648 char fname[PATH_MAX]; | 665 char fname[PATH_MAX]; |
649 if (!GetUniquePathFromEnv(HEAPPROFILE, fname)) { | 666 if (!GetUniquePathFromEnv(HEAPPROFILE, fname)) { |
650 return; | 667 return; |
(...skipping 14 matching lines...) Expand all Loading... |
665 | 682 |
666 // class used for finalization -- dumps the heap-profile at program exit | 683 // class used for finalization -- dumps the heap-profile at program exit |
667 struct HeapProfileEndWriter { | 684 struct HeapProfileEndWriter { |
668 ~HeapProfileEndWriter() { HeapProfilerDump("Exiting"); } | 685 ~HeapProfileEndWriter() { HeapProfilerDump("Exiting"); } |
669 }; | 686 }; |
670 | 687 |
671 // We want to make sure tcmalloc is up and running before starting the profiler | 688 // We want to make sure tcmalloc is up and running before starting the profiler |
672 static const TCMallocGuard tcmalloc_initializer; | 689 static const TCMallocGuard tcmalloc_initializer; |
673 REGISTER_MODULE_INITIALIZER(heapprofiler, HeapProfilerInit()); | 690 REGISTER_MODULE_INITIALIZER(heapprofiler, HeapProfilerInit()); |
674 static HeapProfileEndWriter heap_profile_end_writer; | 691 static HeapProfileEndWriter heap_profile_end_writer; |
OLD | NEW |