OLD | NEW |
1 // Copyright (c) 2005, Google Inc. | 1 // Copyright (c) 2005, Google Inc. |
2 // All rights reserved. | 2 // All rights reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
64 #include "malloc_hook-inl.h" | 64 #include "malloc_hook-inl.h" |
65 #include "tcmalloc_guard.h" | 65 #include "tcmalloc_guard.h" |
66 #include <google/malloc_hook.h> | 66 #include <google/malloc_hook.h> |
67 #include <google/malloc_extension.h> | 67 #include <google/malloc_extension.h> |
68 #include "base/spinlock.h" | 68 #include "base/spinlock.h" |
69 #include "base/low_level_alloc.h" | 69 #include "base/low_level_alloc.h" |
70 #include "base/sysinfo.h" // for GetUniquePathFromEnv() | 70 #include "base/sysinfo.h" // for GetUniquePathFromEnv() |
71 #include "heap-profile-table.h" | 71 #include "heap-profile-table.h" |
72 #include "memory_region_map.h" | 72 #include "memory_region_map.h" |
73 | 73 |
74 | |
75 #ifndef PATH_MAX | 74 #ifndef PATH_MAX |
76 #ifdef MAXPATHLEN | 75 #ifdef MAXPATHLEN |
77 #define PATH_MAX MAXPATHLEN | 76 #define PATH_MAX MAXPATHLEN |
78 #else | 77 #else |
79 #define PATH_MAX 4096 // seems conservative for max filename len! | 78 #define PATH_MAX 4096 // seems conservative for max filename len! |
80 #endif | 79 #endif |
81 #endif | 80 #endif |
82 | 81 |
83 using STL_NAMESPACE::string; | 82 using STL_NAMESPACE::string; |
84 using STL_NAMESPACE::sort; | 83 using STL_NAMESPACE::sort; |
(...skipping 25 matching lines...) Expand all Loading... |
110 DEFINE_bool(mmap_log, | 109 DEFINE_bool(mmap_log, |
111 EnvToBool("HEAP_PROFILE_MMAP_LOG", false), | 110 EnvToBool("HEAP_PROFILE_MMAP_LOG", false), |
112 "Should mmap/munmap calls be logged?"); | 111 "Should mmap/munmap calls be logged?"); |
113 DEFINE_bool(mmap_profile, | 112 DEFINE_bool(mmap_profile, |
114 EnvToBool("HEAP_PROFILE_MMAP", false), | 113 EnvToBool("HEAP_PROFILE_MMAP", false), |
115 "If heap-profiling is on, also profile mmap, mremap, and sbrk)"); | 114 "If heap-profiling is on, also profile mmap, mremap, and sbrk)"); |
116 DEFINE_bool(only_mmap_profile, | 115 DEFINE_bool(only_mmap_profile, |
117 EnvToBool("HEAP_PROFILE_ONLY_MMAP", false), | 116 EnvToBool("HEAP_PROFILE_ONLY_MMAP", false), |
118 "If heap-profiling is on, only profile mmap, mremap, and sbrk; " | 117 "If heap-profiling is on, only profile mmap, mremap, and sbrk; " |
119 "do not profile malloc/new/etc"); | 118 "do not profile malloc/new/etc"); |
120 | 119 #ifdef DEEP_PROFILER_ON |
| 120 DEFINE_int64(heap_profile_time_interval, |
| 121 EnvToInt64("HEAP_PROFILE_TIME_INTERVAL", 0), |
| 122 "If non-zero, dump heap profiling information once every " |
| 123 "specified number of seconds since the last dump."); |
| 124 #endif |
121 | 125 |
122 //---------------------------------------------------------------------- | 126 //---------------------------------------------------------------------- |
123 // Locking | 127 // Locking |
124 //---------------------------------------------------------------------- | 128 //---------------------------------------------------------------------- |
125 | 129 |
126 // A pthread_mutex has way too much lock contention to be used here. | 130 // A pthread_mutex has way too much lock contention to be used here. |
127 // | 131 // |
128 // I would like to use Mutex, but it can call malloc(), | 132 // I would like to use Mutex, but it can call malloc(), |
129 // which can cause us to fall into an infinite recursion. | 133 // which can cause us to fall into an infinite recursion. |
130 // | 134 // |
131 // So we use a simple spinlock. | 135 // So we use a simple spinlock. |
132 static SpinLock heap_lock(SpinLock::LINKER_INITIALIZED); | 136 static SpinLock heap_lock(SpinLock::LINKER_INITIALIZED); |
133 | 137 |
134 //---------------------------------------------------------------------- | 138 //---------------------------------------------------------------------- |
135 // Simple allocator for heap profiler's internal memory | 139 // Simple allocator for heap profiler's internal memory |
136 //---------------------------------------------------------------------- | 140 //---------------------------------------------------------------------- |
137 | 141 |
138 static LowLevelAlloc::Arena *heap_profiler_memory; | 142 static LowLevelAlloc::Arena *heap_profiler_memory; |
139 | 143 |
140 static void* ProfilerMalloc(size_t bytes) { | 144 static void* ProfilerMalloc(size_t bytes) { |
141 return LowLevelAlloc::AllocWithArena(bytes, heap_profiler_memory); | 145 return LowLevelAlloc::AllocWithArena(bytes, heap_profiler_memory); |
142 } | 146 } |
143 static void ProfilerFree(void* p) { | 147 static void ProfilerFree(void* p) { |
144 LowLevelAlloc::Free(p); | 148 LowLevelAlloc::Free(p); |
145 } | 149 } |
146 | 150 |
147 // We use buffers of this size in DoGetHeapProfile. | 151 // We use buffers of this size in DoGetHeapProfile. |
148 static const int kProfileBufferSize = 1 << 20; | 152 static const int kProfileBufferSize = 5 << 20; |
149 | |
150 // This is a last-ditch buffer we use in DumpProfileLocked in case we | 153 // This is a last-ditch buffer we use in DumpProfileLocked in case we |
151 // can't allocate more memory from ProfilerMalloc. We expect this | 154 // can't allocate more memory from ProfilerMalloc. We expect this |
152 // will be used by HeapProfileEndWriter when the application has to | 155 // will be used by HeapProfileEndWriter when the application has to |
153 // exit due to out-of-memory. This buffer is allocated in | 156 // exit due to out-of-memory. This buffer is allocated in |
154 // HeapProfilerStart. Access to this must be protected by heap_lock. | 157 // HeapProfilerStart. Access to this must be protected by heap_lock. |
155 static char* global_profiler_buffer = NULL; | 158 static char* global_profiler_buffer = NULL; |
156 | 159 |
157 | 160 |
158 //---------------------------------------------------------------------- | 161 //---------------------------------------------------------------------- |
159 // Profiling control/state data | 162 // Profiling control/state data |
160 //---------------------------------------------------------------------- | 163 //---------------------------------------------------------------------- |
161 | 164 |
162 // Access to all of these is protected by heap_lock. | 165 // Access to all of these is protected by heap_lock. |
163 static bool is_on = false; // If are on as a subsytem. | 166 static bool is_on = false; // If are on as a subsytem. |
164 static bool dumping = false; // Dumping status to prevent recursion | 167 static bool dumping = false; // Dumping status to prevent recursion |
165 static char* filename_prefix = NULL; // Prefix used for profile file names | 168 static char* filename_prefix = NULL; // Prefix used for profile file names |
166 // (NULL if no need for dumping yet) | 169 // (NULL if no need for dumping yet) |
167 static int dump_count = 0; // How many dumps so far | 170 static int dump_count = 0; // How many dumps so far |
168 static int64 last_dump_alloc = 0; // alloc_size when did we last dump | 171 static int64 last_dump_alloc = 0; // alloc_size when did we last dump |
169 static int64 last_dump_free = 0; // free_size when did we last dump | 172 static int64 last_dump_free = 0; // free_size when did we last dump |
170 static int64 high_water_mark = 0; // In-use-bytes at last high-water dump | 173 static int64 high_water_mark = 0; // In-use-bytes at last high-water dump |
| 174 static int64 last_dump_time = 0; // The time of the last dump |
171 | 175 |
172 static HeapProfileTable* heap_profile = NULL; // the heap profile table | 176 static HeapProfileTable* heap_profile = NULL; // the heap profile table |
173 | 177 |
| 178 #ifdef DEEP_PROFILER_ON |
| 179 #include "deep-memory-profiler.h" |
| 180 static DeepMemoryProfiler* deep_profiler = NULL; // deep memory profiler |
| 181 #endif |
| 182 |
174 //---------------------------------------------------------------------- | 183 //---------------------------------------------------------------------- |
175 // Profile generation | 184 // Profile generation |
176 //---------------------------------------------------------------------- | 185 //---------------------------------------------------------------------- |
177 | 186 |
178 enum AddOrRemove { ADD, REMOVE }; | 187 enum AddOrRemove { ADD, REMOVE }; |
179 | 188 |
180 // Add or remove all MMap-allocated regions to/from *heap_profile. | 189 // Add or remove all MMap-allocated regions to/from *heap_profile. |
181 // Assumes heap_lock is held. | 190 // Assumes heap_lock is held. |
182 static void AddRemoveMMapDataLocked(AddOrRemove mode) { | 191 static void AddRemoveMMapDataLocked(AddOrRemove mode) { |
183 RAW_DCHECK(heap_lock.IsHeld(), ""); | 192 RAW_DCHECK(heap_lock.IsHeld(), ""); |
184 if (!FLAGS_mmap_profile || !is_on) return; | 193 if (!FLAGS_mmap_profile || !is_on) return; |
185 // MemoryRegionMap maintained all the data we need for all | 194 // MemoryRegionMap maintained all the data we need for all |
186 // mmap-like allocations, so we just use it here: | 195 // mmap-like allocations, so we just use it here: |
187 MemoryRegionMap::LockHolder l; | 196 MemoryRegionMap::LockHolder l; |
| 197 heap_profile->MMapRecordBegin(); |
188 for (MemoryRegionMap::RegionIterator r = MemoryRegionMap::BeginRegionLocked(); | 198 for (MemoryRegionMap::RegionIterator r = MemoryRegionMap::BeginRegionLocked(); |
189 r != MemoryRegionMap::EndRegionLocked(); ++r) { | 199 r != MemoryRegionMap::EndRegionLocked(); ++r) { |
190 if (mode == ADD) { | 200 if (mode == ADD) { |
191 heap_profile->RecordAllocWithStack( | 201 heap_profile->RecordAllocWithStack( |
192 reinterpret_cast<const void*>(r->start_addr), | 202 reinterpret_cast<const void*>(r->start_addr), |
193 r->end_addr - r->start_addr, | 203 r->end_addr - r->start_addr, |
194 r->call_stack_depth, r->call_stack); | 204 r->call_stack_depth, r->call_stack); |
195 } else { | 205 } else { |
196 heap_profile->RecordFree(reinterpret_cast<void*>(r->start_addr)); | 206 heap_profile->RecordFree(reinterpret_cast<void*>(r->start_addr)); |
197 } | 207 } |
198 } | 208 } |
| 209 heap_profile->MMapRecordEnd(); |
199 } | 210 } |
200 | 211 |
201 // Input must be a buffer of size at least 1MB. | 212 // Input must be a buffer of size at least 1MB. |
202 static char* DoGetHeapProfileLocked(char* buf, int buflen) { | 213 static char* DoGetHeapProfileLocked(char* buf, int buflen) { |
203 // We used to be smarter about estimating the required memory and | 214 // We used to be smarter about estimating the required memory and |
204 // then capping it to 1MB and generating the profile into that. | 215 // then capping it to 1MB and generating the profile into that. |
205 if (buf == NULL || buflen < 1) | 216 if (buf == NULL || buflen < 1) |
206 return NULL; | 217 return NULL; |
207 | 218 |
208 RAW_DCHECK(heap_lock.IsHeld(), ""); | 219 RAW_DCHECK(heap_lock.IsHeld(), ""); |
209 int bytes_written = 0; | 220 int bytes_written = 0; |
210 if (is_on) { | 221 if (is_on) { |
211 HeapProfileTable::Stats const stats = heap_profile->total(); | 222 HeapProfileTable::Stats const stats = heap_profile->total(); |
212 (void)stats; // avoid an unused-variable warning in non-debug mode. | 223 (void)stats; // avoid an unused-variable warning in non-debug mode. |
213 AddRemoveMMapDataLocked(ADD); | 224 AddRemoveMMapDataLocked(ADD); |
| 225 #ifdef DEEP_PROFILER_ON |
| 226 bytes_written = deep_profiler->FillOrderedProfile(buf, buflen - 1); |
| 227 #else |
214 bytes_written = heap_profile->FillOrderedProfile(buf, buflen - 1); | 228 bytes_written = heap_profile->FillOrderedProfile(buf, buflen - 1); |
| 229 #endif |
215 // FillOrderedProfile should not reduce the set of active mmap-ed regions, | 230 // FillOrderedProfile should not reduce the set of active mmap-ed regions, |
216 // hence MemoryRegionMap will let us remove everything we've added above: | 231 // hence MemoryRegionMap will let us remove everything we've added above: |
217 AddRemoveMMapDataLocked(REMOVE); | 232 AddRemoveMMapDataLocked(REMOVE); |
218 RAW_DCHECK(stats.Equivalent(heap_profile->total()), ""); | 233 RAW_DCHECK(stats.Equivalent(heap_profile->total()), ""); |
219 // if this fails, we somehow removed by AddRemoveMMapDataLocked | 234 // if this fails, we somehow removed by AddRemoveMMapDataLocked |
220 // more than we have added. | 235 // more than we have added. |
221 } | 236 } |
222 buf[bytes_written] = '\0'; | 237 buf[bytes_written] = '\0'; |
223 RAW_DCHECK(bytes_written == strlen(buf), ""); | 238 RAW_DCHECK(bytes_written == strlen(buf), ""); |
224 | 239 |
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
303 snprintf(buf, sizeof(buf), ("%"PRId64" MB freed cumulatively, " | 318 snprintf(buf, sizeof(buf), ("%"PRId64" MB freed cumulatively, " |
304 "%"PRId64" MB currently in use"), | 319 "%"PRId64" MB currently in use"), |
305 total.free_size >> 20, inuse_bytes >> 20); | 320 total.free_size >> 20, inuse_bytes >> 20); |
306 need_to_dump = true; | 321 need_to_dump = true; |
307 } else if (FLAGS_heap_profile_inuse_interval > 0 && | 322 } else if (FLAGS_heap_profile_inuse_interval > 0 && |
308 inuse_bytes > | 323 inuse_bytes > |
309 high_water_mark + FLAGS_heap_profile_inuse_interval) { | 324 high_water_mark + FLAGS_heap_profile_inuse_interval) { |
310 snprintf(buf, sizeof(buf), "%"PRId64" MB currently in use", | 325 snprintf(buf, sizeof(buf), "%"PRId64" MB currently in use", |
311 inuse_bytes >> 20); | 326 inuse_bytes >> 20); |
312 need_to_dump = true; | 327 need_to_dump = true; |
| 328 } else if (FLAGS_heap_profile_time_interval > 0 && |
| 329 time(NULL) - last_dump_time >= FLAGS_heap_profile_time_interval)
{ |
| 330 snprintf(buf, sizeof(buf), "%d sec since the last dump", |
| 331 time(NULL) - last_dump_time); |
| 332 need_to_dump = true; |
| 333 last_dump_time = time(NULL); |
313 } | 334 } |
314 if (need_to_dump) { | 335 if (need_to_dump) { |
315 DumpProfileLocked(buf); | 336 DumpProfileLocked(buf); |
316 | 337 |
317 last_dump_alloc = total.alloc_size; | 338 last_dump_alloc = total.alloc_size; |
318 last_dump_free = total.free_size; | 339 last_dump_free = total.free_size; |
319 if (inuse_bytes > high_water_mark) | 340 if (inuse_bytes > high_water_mark) |
320 high_water_mark = inuse_bytes; | 341 high_water_mark = inuse_bytes; |
321 } | 342 } |
322 } | 343 } |
(...skipping 140 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
463 // heap profile even if the application runs out of memory. | 484 // heap profile even if the application runs out of memory. |
464 global_profiler_buffer = | 485 global_profiler_buffer = |
465 reinterpret_cast<char*>(ProfilerMalloc(kProfileBufferSize)); | 486 reinterpret_cast<char*>(ProfilerMalloc(kProfileBufferSize)); |
466 | 487 |
467 heap_profile = new(ProfilerMalloc(sizeof(HeapProfileTable))) | 488 heap_profile = new(ProfilerMalloc(sizeof(HeapProfileTable))) |
468 HeapProfileTable(ProfilerMalloc, ProfilerFree); | 489 HeapProfileTable(ProfilerMalloc, ProfilerFree); |
469 | 490 |
470 last_dump_alloc = 0; | 491 last_dump_alloc = 0; |
471 last_dump_free = 0; | 492 last_dump_free = 0; |
472 high_water_mark = 0; | 493 high_water_mark = 0; |
| 494 last_dump_time = 0; |
| 495 |
| 496 #ifdef DEEP_PROFILER_ON |
| 497 // Initialize deep memory profiler |
| 498 RAW_VLOG(0, "[%d] Starting a deep memory profiler", getpid()); |
| 499 deep_profiler = new(ProfilerMalloc(sizeof(DeepMemoryProfiler))) |
| 500 DeepMemoryProfiler(heap_profile, prefix); |
| 501 #endif |
473 | 502 |
474 // We do not reset dump_count so if the user does a sequence of | 503 // We do not reset dump_count so if the user does a sequence of |
475 // HeapProfilerStart/HeapProfileStop, we will get a continuous | 504 // HeapProfilerStart/HeapProfileStop, we will get a continuous |
476 // sequence of profiles. | 505 // sequence of profiles. |
477 | 506 |
478 if (FLAGS_only_mmap_profile == false) { | 507 if (FLAGS_only_mmap_profile == false) { |
479 // Now set the hooks that capture new/delete and malloc/free. | 508 // Now set the hooks that capture new/delete and malloc/free. |
480 RAW_CHECK(MallocHook::AddNewHook(&NewHook), ""); | 509 RAW_CHECK(MallocHook::AddNewHook(&NewHook), ""); |
481 RAW_CHECK(MallocHook::AddDeleteHook(&DeleteHook), ""); | 510 RAW_CHECK(MallocHook::AddDeleteHook(&DeleteHook), ""); |
482 } | 511 } |
(...skipping 22 matching lines...) Expand all Loading... |
505 RAW_CHECK(MallocHook::RemoveDeleteHook(&DeleteHook), ""); | 534 RAW_CHECK(MallocHook::RemoveDeleteHook(&DeleteHook), ""); |
506 } | 535 } |
507 if (FLAGS_mmap_log) { | 536 if (FLAGS_mmap_log) { |
508 // Restore mmap/sbrk hooks, checking that our hooks were set: | 537 // Restore mmap/sbrk hooks, checking that our hooks were set: |
509 RAW_CHECK(MallocHook::RemoveMmapHook(&MmapHook), ""); | 538 RAW_CHECK(MallocHook::RemoveMmapHook(&MmapHook), ""); |
510 RAW_CHECK(MallocHook::RemoveMremapHook(&MremapHook), ""); | 539 RAW_CHECK(MallocHook::RemoveMremapHook(&MremapHook), ""); |
511 RAW_CHECK(MallocHook::RemoveSbrkHook(&SbrkHook), ""); | 540 RAW_CHECK(MallocHook::RemoveSbrkHook(&SbrkHook), ""); |
512 RAW_CHECK(MallocHook::RemoveMunmapHook(&MunmapHook), ""); | 541 RAW_CHECK(MallocHook::RemoveMunmapHook(&MunmapHook), ""); |
513 } | 542 } |
514 | 543 |
| 544 #ifdef DEEP_PROFILER_ON |
| 545 // free deep memory profiler |
| 546 deep_profiler->~DeepMemoryProfiler(); |
| 547 ProfilerFree(deep_profiler); |
| 548 deep_profiler = NULL; |
| 549 #endif |
| 550 |
515 // free profile | 551 // free profile |
516 heap_profile->~HeapProfileTable(); | 552 heap_profile->~HeapProfileTable(); |
517 ProfilerFree(heap_profile); | 553 ProfilerFree(heap_profile); |
518 heap_profile = NULL; | 554 heap_profile = NULL; |
519 | 555 |
520 // free output-buffer memory | 556 // free output-buffer memory |
521 ProfilerFree(global_profiler_buffer); | 557 ProfilerFree(global_profiler_buffer); |
522 | 558 |
523 // free prefix | 559 // free prefix |
524 ProfilerFree(filename_prefix); | 560 ProfilerFree(filename_prefix); |
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
569 | 605 |
570 // class used for finalization -- dumps the heap-profile at program exit | 606 // class used for finalization -- dumps the heap-profile at program exit |
571 struct HeapProfileEndWriter { | 607 struct HeapProfileEndWriter { |
572 ~HeapProfileEndWriter() { HeapProfilerDump("Exiting"); } | 608 ~HeapProfileEndWriter() { HeapProfilerDump("Exiting"); } |
573 }; | 609 }; |
574 | 610 |
575 // We want to make sure tcmalloc is up and running before starting the profiler | 611 // We want to make sure tcmalloc is up and running before starting the profiler |
576 static const TCMallocGuard tcmalloc_initializer; | 612 static const TCMallocGuard tcmalloc_initializer; |
577 REGISTER_MODULE_INITIALIZER(heapprofiler, HeapProfilerInit()); | 613 REGISTER_MODULE_INITIALIZER(heapprofiler, HeapProfilerInit()); |
578 static HeapProfileEndWriter heap_profile_end_writer; | 614 static HeapProfileEndWriter heap_profile_end_writer; |
OLD | NEW |