OLD | NEW |
1 // Copyright (c) 2005, Google Inc. | 1 // Copyright (c) 2005, Google Inc. |
2 // All rights reserved. | 2 // All rights reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
(...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
100 "If non-zero, dump heap profiling information once every " | 100 "If non-zero, dump heap profiling information once every " |
101 "specified number of bytes deallocated by the program " | 101 "specified number of bytes deallocated by the program " |
102 "since the last dump."); | 102 "since the last dump."); |
103 // We could also add flags that report whenever inuse_bytes changes by | 103 // We could also add flags that report whenever inuse_bytes changes by |
104 // X or -X, but there hasn't been a need for that yet, so we haven't. | 104 // X or -X, but there hasn't been a need for that yet, so we haven't. |
105 DEFINE_int64(heap_profile_inuse_interval, | 105 DEFINE_int64(heap_profile_inuse_interval, |
106 EnvToInt64("HEAP_PROFILE_INUSE_INTERVAL", 100 << 20 /*100MB*/), | 106 EnvToInt64("HEAP_PROFILE_INUSE_INTERVAL", 100 << 20 /*100MB*/), |
107 "If non-zero, dump heap profiling information whenever " | 107 "If non-zero, dump heap profiling information whenever " |
108 "the high-water memory usage mark increases by the specified " | 108 "the high-water memory usage mark increases by the specified " |
109 "number of bytes."); | 109 "number of bytes."); |
| 110 DEFINE_int64(heap_profile_time_interval, |
| 111 EnvToInt64("HEAP_PROFILE_TIME_INTERVAL", 0), |
| 112 "If non-zero, dump heap profiling information once every " |
| 113 "specified number of seconds since the last dump."); |
110 DEFINE_bool(mmap_log, | 114 DEFINE_bool(mmap_log, |
111 EnvToBool("HEAP_PROFILE_MMAP_LOG", false), | 115 EnvToBool("HEAP_PROFILE_MMAP_LOG", false), |
112 "Should mmap/munmap calls be logged?"); | 116 "Should mmap/munmap calls be logged?"); |
113 DEFINE_bool(mmap_profile, | 117 DEFINE_bool(mmap_profile, |
114 EnvToBool("HEAP_PROFILE_MMAP", false), | 118 EnvToBool("HEAP_PROFILE_MMAP", false), |
115 "If heap-profiling is on, also profile mmap, mremap, and sbrk)"); | 119 "If heap-profiling is on, also profile mmap, mremap, and sbrk)"); |
116 DEFINE_bool(only_mmap_profile, | 120 DEFINE_bool(only_mmap_profile, |
117 EnvToBool("HEAP_PROFILE_ONLY_MMAP", false), | 121 EnvToBool("HEAP_PROFILE_ONLY_MMAP", false), |
118 "If heap-profiling is on, only profile mmap, mremap, and sbrk; " | 122 "If heap-profiling is on, only profile mmap, mremap, and sbrk; " |
119 "do not profile malloc/new/etc"); | 123 "do not profile malloc/new/etc"); |
(...skipping 18 matching lines...) Expand all Loading... |
138 static LowLevelAlloc::Arena *heap_profiler_memory; | 142 static LowLevelAlloc::Arena *heap_profiler_memory; |
139 | 143 |
140 static void* ProfilerMalloc(size_t bytes) { | 144 static void* ProfilerMalloc(size_t bytes) { |
141 return LowLevelAlloc::AllocWithArena(bytes, heap_profiler_memory); | 145 return LowLevelAlloc::AllocWithArena(bytes, heap_profiler_memory); |
142 } | 146 } |
143 static void ProfilerFree(void* p) { | 147 static void ProfilerFree(void* p) { |
144 LowLevelAlloc::Free(p); | 148 LowLevelAlloc::Free(p); |
145 } | 149 } |
146 | 150 |
147 // We use buffers of this size in DoGetHeapProfile. | 151 // We use buffers of this size in DoGetHeapProfile. |
148 static const int kProfileBufferSize = 1 << 20; | 152 // The size is 1 << 20 in the original google-perftools. Changed it to |
| 153 // 5 << 20 since a larger buffer is requried for deeper profiling in Chromium. |
| 154 // The buffer is allocated only when the environment variable HEAPPROFILE is |
| 155 // specified to dump heap information. |
| 156 static const int kProfileBufferSize = 5 << 20; |
149 | 157 |
150 // This is a last-ditch buffer we use in DumpProfileLocked in case we | 158 // This is a last-ditch buffer we use in DumpProfileLocked in case we |
151 // can't allocate more memory from ProfilerMalloc. We expect this | 159 // can't allocate more memory from ProfilerMalloc. We expect this |
152 // will be used by HeapProfileEndWriter when the application has to | 160 // will be used by HeapProfileEndWriter when the application has to |
153 // exit due to out-of-memory. This buffer is allocated in | 161 // exit due to out-of-memory. This buffer is allocated in |
154 // HeapProfilerStart. Access to this must be protected by heap_lock. | 162 // HeapProfilerStart. Access to this must be protected by heap_lock. |
155 static char* global_profiler_buffer = NULL; | 163 static char* global_profiler_buffer = NULL; |
156 | 164 |
157 | 165 |
158 //---------------------------------------------------------------------- | 166 //---------------------------------------------------------------------- |
159 // Profiling control/state data | 167 // Profiling control/state data |
160 //---------------------------------------------------------------------- | 168 //---------------------------------------------------------------------- |
161 | 169 |
162 // Access to all of these is protected by heap_lock. | 170 // Access to all of these is protected by heap_lock. |
163 static bool is_on = false; // If are on as a subsytem. | 171 static bool is_on = false; // If are on as a subsytem. |
164 static bool dumping = false; // Dumping status to prevent recursion | 172 static bool dumping = false; // Dumping status to prevent recursion |
165 static char* filename_prefix = NULL; // Prefix used for profile file names | 173 static char* filename_prefix = NULL; // Prefix used for profile file names |
166 // (NULL if no need for dumping yet) | 174 // (NULL if no need for dumping yet) |
167 static int dump_count = 0; // How many dumps so far | 175 static int dump_count = 0; // How many dumps so far |
168 static int64 last_dump_alloc = 0; // alloc_size when did we last dump | 176 static int64 last_dump_alloc = 0; // alloc_size when did we last dump |
169 static int64 last_dump_free = 0; // free_size when did we last dump | 177 static int64 last_dump_free = 0; // free_size when did we last dump |
170 static int64 high_water_mark = 0; // In-use-bytes at last high-water dump | 178 static int64 high_water_mark = 0; // In-use-bytes at last high-water dump |
| 179 static int64 last_dump_time = 0; // The time of the last dump |
171 | 180 |
172 static HeapProfileTable* heap_profile = NULL; // the heap profile table | 181 static HeapProfileTable* heap_profile = NULL; // the heap profile table |
173 | 182 |
174 //---------------------------------------------------------------------- | 183 //---------------------------------------------------------------------- |
175 // Profile generation | 184 // Profile generation |
176 //---------------------------------------------------------------------- | 185 //---------------------------------------------------------------------- |
177 | 186 |
178 // Input must be a buffer of size at least 1MB. | 187 // Input must be a buffer of size at least 1MB. |
179 static char* DoGetHeapProfileLocked(char* buf, int buflen) { | 188 static char* DoGetHeapProfileLocked(char* buf, int buflen) { |
180 // We used to be smarter about estimating the required memory and | 189 // We used to be smarter about estimating the required memory and |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
216 RAW_DCHECK(is_on, ""); | 225 RAW_DCHECK(is_on, ""); |
217 RAW_DCHECK(!dumping, ""); | 226 RAW_DCHECK(!dumping, ""); |
218 | 227 |
219 if (filename_prefix == NULL) return; // we do not yet need dumping | 228 if (filename_prefix == NULL) return; // we do not yet need dumping |
220 | 229 |
221 dumping = true; | 230 dumping = true; |
222 | 231 |
223 // Make file name | 232 // Make file name |
224 char file_name[1000]; | 233 char file_name[1000]; |
225 dump_count++; | 234 dump_count++; |
226 snprintf(file_name, sizeof(file_name), "%s.%04d%s", | 235 snprintf(file_name, sizeof(file_name), "%s.%05d.%04d%s", |
227 filename_prefix, dump_count, HeapProfileTable::kFileExt); | 236 filename_prefix, getpid(), dump_count, HeapProfileTable::kFileExt); |
228 | 237 |
229 // Dump the profile | 238 // Dump the profile |
230 RAW_VLOG(0, "Dumping heap profile to %s (%s)", file_name, reason); | 239 RAW_VLOG(0, "Dumping heap profile to %s (%s)", file_name, reason); |
231 // We must use file routines that don't access memory, since we hold | 240 // We must use file routines that don't access memory, since we hold |
232 // a memory lock now. | 241 // a memory lock now. |
233 RawFD fd = RawOpenForWriting(file_name); | 242 RawFD fd = RawOpenForWriting(file_name); |
234 if (fd == kIllegalRawFD) { | 243 if (fd == kIllegalRawFD) { |
235 RAW_LOG(ERROR, "Failed dumping heap profile to %s", file_name); | 244 RAW_LOG(ERROR, "Failed dumping heap profile to %s", file_name); |
236 dumping = false; | 245 dumping = false; |
237 return; | 246 return; |
(...skipping 19 matching lines...) Expand all Loading... |
257 //---------------------------------------------------------------------- | 266 //---------------------------------------------------------------------- |
258 | 267 |
259 // Dump a profile after either an allocation or deallocation, if | 268 // Dump a profile after either an allocation or deallocation, if |
260 // the memory use has changed enough since the last dump. | 269 // the memory use has changed enough since the last dump. |
261 static void MaybeDumpProfileLocked() { | 270 static void MaybeDumpProfileLocked() { |
262 if (!dumping) { | 271 if (!dumping) { |
263 const HeapProfileTable::Stats& total = heap_profile->total(); | 272 const HeapProfileTable::Stats& total = heap_profile->total(); |
264 const int64 inuse_bytes = total.alloc_size - total.free_size; | 273 const int64 inuse_bytes = total.alloc_size - total.free_size; |
265 bool need_to_dump = false; | 274 bool need_to_dump = false; |
266 char buf[128]; | 275 char buf[128]; |
| 276 int64 current_time = time(NULL); |
267 if (FLAGS_heap_profile_allocation_interval > 0 && | 277 if (FLAGS_heap_profile_allocation_interval > 0 && |
268 total.alloc_size >= | 278 total.alloc_size >= |
269 last_dump_alloc + FLAGS_heap_profile_allocation_interval) { | 279 last_dump_alloc + FLAGS_heap_profile_allocation_interval) { |
270 snprintf(buf, sizeof(buf), ("%"PRId64" MB allocated cumulatively, " | 280 snprintf(buf, sizeof(buf), ("%"PRId64" MB allocated cumulatively, " |
271 "%"PRId64" MB currently in use"), | 281 "%"PRId64" MB currently in use"), |
272 total.alloc_size >> 20, inuse_bytes >> 20); | 282 total.alloc_size >> 20, inuse_bytes >> 20); |
273 need_to_dump = true; | 283 need_to_dump = true; |
274 } else if (FLAGS_heap_profile_deallocation_interval > 0 && | 284 } else if (FLAGS_heap_profile_deallocation_interval > 0 && |
275 total.free_size >= | 285 total.free_size >= |
276 last_dump_free + FLAGS_heap_profile_deallocation_interval) { | 286 last_dump_free + FLAGS_heap_profile_deallocation_interval) { |
277 snprintf(buf, sizeof(buf), ("%"PRId64" MB freed cumulatively, " | 287 snprintf(buf, sizeof(buf), ("%"PRId64" MB freed cumulatively, " |
278 "%"PRId64" MB currently in use"), | 288 "%"PRId64" MB currently in use"), |
279 total.free_size >> 20, inuse_bytes >> 20); | 289 total.free_size >> 20, inuse_bytes >> 20); |
280 need_to_dump = true; | 290 need_to_dump = true; |
281 } else if (FLAGS_heap_profile_inuse_interval > 0 && | 291 } else if (FLAGS_heap_profile_inuse_interval > 0 && |
282 inuse_bytes > | 292 inuse_bytes > |
283 high_water_mark + FLAGS_heap_profile_inuse_interval) { | 293 high_water_mark + FLAGS_heap_profile_inuse_interval) { |
284 snprintf(buf, sizeof(buf), "%"PRId64" MB currently in use", | 294 snprintf(buf, sizeof(buf), "%"PRId64" MB currently in use", |
285 inuse_bytes >> 20); | 295 inuse_bytes >> 20); |
286 need_to_dump = true; | 296 need_to_dump = true; |
| 297 } else if (FLAGS_heap_profile_time_interval > 0 && |
| 298 current_time - last_dump_time >= |
| 299 FLAGS_heap_profile_time_interval) { |
| 300 snprintf(buf, sizeof(buf), "%d sec since the last dump", |
| 301 current_time - last_dump_time); |
| 302 need_to_dump = true; |
| 303 last_dump_time = current_time; |
287 } | 304 } |
288 if (need_to_dump) { | 305 if (need_to_dump) { |
289 DumpProfileLocked(buf); | 306 DumpProfileLocked(buf); |
290 | 307 |
291 last_dump_alloc = total.alloc_size; | 308 last_dump_alloc = total.alloc_size; |
292 last_dump_free = total.free_size; | 309 last_dump_free = total.free_size; |
293 if (inuse_bytes > high_water_mark) | 310 if (inuse_bytes > high_water_mark) |
294 high_water_mark = inuse_bytes; | 311 high_water_mark = inuse_bytes; |
295 } | 312 } |
296 } | 313 } |
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
381 // in pretty-printing of NULL as "nil". | 398 // in pretty-printing of NULL as "nil". |
382 // TODO(maxim): instead should use a safe snprintf reimplementation | 399 // TODO(maxim): instead should use a safe snprintf reimplementation |
383 RAW_LOG(INFO, "munmap(start=0x%"PRIxPTR", len=%"PRIuS")", | 400 RAW_LOG(INFO, "munmap(start=0x%"PRIxPTR", len=%"PRIuS")", |
384 (uintptr_t) ptr, size); | 401 (uintptr_t) ptr, size); |
385 #ifdef TODO_REENABLE_STACK_TRACING | 402 #ifdef TODO_REENABLE_STACK_TRACING |
386 DumpStackTrace(1, RawInfoStackDumper, NULL); | 403 DumpStackTrace(1, RawInfoStackDumper, NULL); |
387 #endif | 404 #endif |
388 } | 405 } |
389 } | 406 } |
390 | 407 |
391 static void SbrkHook(const void* result, ptrdiff_t increment) { | 408 static void SbrkHook(const void* result, std::ptrdiff_t increment) { |
392 if (FLAGS_mmap_log) { // log it | 409 if (FLAGS_mmap_log) { // log it |
393 RAW_LOG(INFO, "sbrk(inc=%"PRIdS") = 0x%"PRIxPTR"", | 410 RAW_LOG(INFO, "sbrk(inc=%"PRIdS") = 0x%"PRIxPTR"", |
394 increment, (uintptr_t) result); | 411 increment, (uintptr_t) result); |
395 #ifdef TODO_REENABLE_STACK_TRACING | 412 #ifdef TODO_REENABLE_STACK_TRACING |
396 DumpStackTrace(1, RawInfoStackDumper, NULL); | 413 DumpStackTrace(1, RawInfoStackDumper, NULL); |
397 #endif | 414 #endif |
398 } | 415 } |
399 } | 416 } |
400 | 417 |
401 //---------------------------------------------------------------------- | 418 //---------------------------------------------------------------------- |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
440 // heap profile even if the application runs out of memory. | 457 // heap profile even if the application runs out of memory. |
441 global_profiler_buffer = | 458 global_profiler_buffer = |
442 reinterpret_cast<char*>(ProfilerMalloc(kProfileBufferSize)); | 459 reinterpret_cast<char*>(ProfilerMalloc(kProfileBufferSize)); |
443 | 460 |
444 heap_profile = new(ProfilerMalloc(sizeof(HeapProfileTable))) | 461 heap_profile = new(ProfilerMalloc(sizeof(HeapProfileTable))) |
445 HeapProfileTable(ProfilerMalloc, ProfilerFree); | 462 HeapProfileTable(ProfilerMalloc, ProfilerFree); |
446 | 463 |
447 last_dump_alloc = 0; | 464 last_dump_alloc = 0; |
448 last_dump_free = 0; | 465 last_dump_free = 0; |
449 high_water_mark = 0; | 466 high_water_mark = 0; |
| 467 last_dump_time = 0; |
450 | 468 |
451 // We do not reset dump_count so if the user does a sequence of | 469 // We do not reset dump_count so if the user does a sequence of |
452 // HeapProfilerStart/HeapProfileStop, we will get a continuous | 470 // HeapProfilerStart/HeapProfileStop, we will get a continuous |
453 // sequence of profiles. | 471 // sequence of profiles. |
454 | 472 |
455 if (FLAGS_only_mmap_profile == false) { | 473 if (FLAGS_only_mmap_profile == false) { |
456 // Now set the hooks that capture new/delete and malloc/free. | 474 // Now set the hooks that capture new/delete and malloc/free. |
457 RAW_CHECK(MallocHook::AddNewHook(&NewHook), ""); | 475 RAW_CHECK(MallocHook::AddNewHook(&NewHook), ""); |
458 RAW_CHECK(MallocHook::AddDeleteHook(&DeleteHook), ""); | 476 RAW_CHECK(MallocHook::AddDeleteHook(&DeleteHook), ""); |
459 } | 477 } |
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
546 | 564 |
547 // class used for finalization -- dumps the heap-profile at program exit | 565 // class used for finalization -- dumps the heap-profile at program exit |
548 struct HeapProfileEndWriter { | 566 struct HeapProfileEndWriter { |
549 ~HeapProfileEndWriter() { HeapProfilerDump("Exiting"); } | 567 ~HeapProfileEndWriter() { HeapProfilerDump("Exiting"); } |
550 }; | 568 }; |
551 | 569 |
552 // We want to make sure tcmalloc is up and running before starting the profiler | 570 // We want to make sure tcmalloc is up and running before starting the profiler |
553 static const TCMallocGuard tcmalloc_initializer; | 571 static const TCMallocGuard tcmalloc_initializer; |
554 REGISTER_MODULE_INITIALIZER(heapprofiler, HeapProfilerInit()); | 572 REGISTER_MODULE_INITIALIZER(heapprofiler, HeapProfilerInit()); |
555 static HeapProfileEndWriter heap_profile_end_writer; | 573 static HeapProfileEndWriter heap_profile_end_writer; |
OLD | NEW |