OLD | NEW |
---|---|
1 // Copyright (c) 2005, Google Inc. | 1 // Copyright (c) 2005, Google Inc. |
2 // All rights reserved. | 2 // All rights reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
(...skipping 204 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
215 // (NULL if no need for dumping yet) | 215 // (NULL if no need for dumping yet) |
216 static int dump_count = 0; // How many dumps so far | 216 static int dump_count = 0; // How many dumps so far |
217 static int64 last_dump_alloc = 0; // alloc_size when did we last dump | 217 static int64 last_dump_alloc = 0; // alloc_size when did we last dump |
218 static int64 last_dump_free = 0; // free_size when did we last dump | 218 static int64 last_dump_free = 0; // free_size when did we last dump |
219 static int64 high_water_mark = 0; // In-use-bytes at last high-water dump | 219 static int64 high_water_mark = 0; // In-use-bytes at last high-water dump |
220 static int64 last_dump_time = 0; // The time of the last dump | 220 static int64 last_dump_time = 0; // The time of the last dump |
221 | 221 |
222 static HeapProfileTable* heap_profile = NULL; // the heap profile table | 222 static HeapProfileTable* heap_profile = NULL; // the heap profile table |
223 static DeepHeapProfile* deep_profile = NULL; // deep memory profiler | 223 static DeepHeapProfile* deep_profile = NULL; // deep memory profiler |
224 | 224 |
225 // Callback to generate a stack trace for an allocation. May be overriden | |
226 // by an application to provide its own pseudo-stacks. | |
227 static StackGeneratorFunction stack_generator_function = | |
228 &HeapProfileTable::GetCallerStackTrace; | |
jar (doing other things)
2013/07/12 01:24:26
nit: no need for the ampersand.
James Cook
2013/07/12 17:40:27
Done.
| |
229 | |
225 //---------------------------------------------------------------------- | 230 //---------------------------------------------------------------------- |
226 // Profile generation | 231 // Profile generation |
227 //---------------------------------------------------------------------- | 232 //---------------------------------------------------------------------- |
228 | 233 |
229 // Input must be a buffer of size at least 1MB. | 234 // Input must be a buffer of size at least 1MB. |
230 static char* DoGetHeapProfileLocked(char* buf, int buflen) { | 235 static char* DoGetHeapProfileLocked(char* buf, int buflen) { |
231 // We used to be smarter about estimating the required memory and | 236 // We used to be smarter about estimating the required memory and |
232 // then capping it to 1MB and generating the profile into that. | 237 // then capping it to 1MB and generating the profile into that. |
233 if (buf == NULL || buflen < 1) | 238 if (buf == NULL || buflen < 1) |
234 return NULL; | 239 return NULL; |
(...skipping 132 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
367 if (inuse_bytes > high_water_mark) | 372 if (inuse_bytes > high_water_mark) |
368 high_water_mark = inuse_bytes; | 373 high_water_mark = inuse_bytes; |
369 } | 374 } |
370 } | 375 } |
371 } | 376 } |
372 | 377 |
373 // Record an allocation in the profile. | 378 // Record an allocation in the profile. |
374 static void RecordAlloc(const void* ptr, size_t bytes, int skip_count) { | 379 static void RecordAlloc(const void* ptr, size_t bytes, int skip_count) { |
375 // Take the stack trace outside the critical section. | 380 // Take the stack trace outside the critical section. |
376 void* stack[HeapProfileTable::kMaxStackDepth]; | 381 void* stack[HeapProfileTable::kMaxStackDepth]; |
377 int depth = HeapProfileTable::GetCallerStackTrace(skip_count + 1, stack); | 382 int depth = (*stack_generator_function)(skip_count + 1, stack); |
jar (doing other things)
2013/07/12 01:24:26
nit: no need for the indirection or parens.
stack
James Cook
2013/07/12 17:40:27
Done. Apparently I have forgotten how to deal wit
| |
378 SpinLockHolder l(&heap_lock); | 383 SpinLockHolder l(&heap_lock); |
379 if (is_on) { | 384 if (is_on) { |
380 heap_profile->RecordAlloc(ptr, bytes, depth, stack); | 385 heap_profile->RecordAlloc(ptr, bytes, depth, stack); |
381 MaybeDumpProfileLocked(); | 386 MaybeDumpProfileLocked(); |
382 } | 387 } |
383 } | 388 } |
384 | 389 |
385 // Record a deallocation in the profile. | 390 // Record a deallocation in the profile. |
386 static void RecordFree(const void* ptr) { | 391 static void RecordFree(const void* ptr) { |
387 SpinLockHolder l(&heap_lock); | 392 SpinLockHolder l(&heap_lock); |
(...skipping 147 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
535 // We do not reset dump_count so if the user does a sequence of | 540 // We do not reset dump_count so if the user does a sequence of |
536 // HeapProfilerStart/HeapProfileStop, we will get a continuous | 541 // HeapProfilerStart/HeapProfileStop, we will get a continuous |
537 // sequence of profiles. | 542 // sequence of profiles. |
538 | 543 |
539 if (FLAGS_only_mmap_profile == false) { | 544 if (FLAGS_only_mmap_profile == false) { |
540 // Now set the hooks that capture new/delete and malloc/free. | 545 // Now set the hooks that capture new/delete and malloc/free. |
541 RAW_CHECK(MallocHook::AddNewHook(&NewHook), ""); | 546 RAW_CHECK(MallocHook::AddNewHook(&NewHook), ""); |
542 RAW_CHECK(MallocHook::AddDeleteHook(&DeleteHook), ""); | 547 RAW_CHECK(MallocHook::AddDeleteHook(&DeleteHook), ""); |
543 } | 548 } |
544 | 549 |
545 // Copy filename prefix | 550 // Copy filename prefix only if provided. |
551 if (!prefix) | |
552 return; | |
546 RAW_DCHECK(filename_prefix == NULL, ""); | 553 RAW_DCHECK(filename_prefix == NULL, ""); |
547 const int prefix_length = strlen(prefix); | 554 const int prefix_length = strlen(prefix); |
548 filename_prefix = reinterpret_cast<char*>(ProfilerMalloc(prefix_length + 1)); | 555 filename_prefix = reinterpret_cast<char*>(ProfilerMalloc(prefix_length + 1)); |
549 memcpy(filename_prefix, prefix, prefix_length); | 556 memcpy(filename_prefix, prefix, prefix_length); |
550 filename_prefix[prefix_length] = '\0'; | 557 filename_prefix[prefix_length] = '\0'; |
551 } | 558 } |
552 | 559 |
560 extern "C" void HeapProfilerWithPseudoStackStart( | |
561 StackGeneratorFunction callback) { | |
562 { | |
563 // Ensure the callback is set before allocations can be recorded. | |
564 SpinLockHolder l(&heap_lock); | |
565 stack_generator_function = callback; | |
566 } | |
567 HeapProfilerStart(NULL); | |
568 } | |
569 | |
553 extern "C" void IterateAllocatedObjects(AddressVisitor visitor, void* data) { | 570 extern "C" void IterateAllocatedObjects(AddressVisitor visitor, void* data) { |
554 SpinLockHolder l(&heap_lock); | 571 SpinLockHolder l(&heap_lock); |
555 | 572 |
556 if (!is_on) return; | 573 if (!is_on) return; |
557 | 574 |
558 heap_profile->IterateAllocationAddresses(visitor, data); | 575 heap_profile->IterateAllocationAddresses(visitor, data); |
559 } | 576 } |
560 | 577 |
561 extern "C" int IsHeapProfilerRunning() { | 578 extern "C" int IsHeapProfilerRunning() { |
562 SpinLockHolder l(&heap_lock); | 579 SpinLockHolder l(&heap_lock); |
(...skipping 106 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
669 | 686 |
670 // class used for finalization -- dumps the heap-profile at program exit | 687 // class used for finalization -- dumps the heap-profile at program exit |
671 struct HeapProfileEndWriter { | 688 struct HeapProfileEndWriter { |
672 ~HeapProfileEndWriter() { HeapProfilerDump("Exiting"); } | 689 ~HeapProfileEndWriter() { HeapProfilerDump("Exiting"); } |
673 }; | 690 }; |
674 | 691 |
675 // We want to make sure tcmalloc is up and running before starting the profiler | 692 // We want to make sure tcmalloc is up and running before starting the profiler |
676 static const TCMallocGuard tcmalloc_initializer; | 693 static const TCMallocGuard tcmalloc_initializer; |
677 REGISTER_MODULE_INITIALIZER(heapprofiler, HeapProfilerInit()); | 694 REGISTER_MODULE_INITIALIZER(heapprofiler, HeapProfilerInit()); |
678 static HeapProfileEndWriter heap_profile_end_writer; | 695 static HeapProfileEndWriter heap_profile_end_writer; |
OLD | NEW |