OLD | NEW |
1 // Copyright (c) 2005, Google Inc. | 1 // Copyright (c) 2005, Google Inc. |
2 // All rights reserved. | 2 // All rights reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
(...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
100 "If non-zero, dump heap profiling information once every " | 100 "If non-zero, dump heap profiling information once every " |
101 "specified number of bytes deallocated by the program " | 101 "specified number of bytes deallocated by the program " |
102 "since the last dump."); | 102 "since the last dump."); |
103 // We could also add flags that report whenever inuse_bytes changes by | 103 // We could also add flags that report whenever inuse_bytes changes by |
104 // X or -X, but there hasn't been a need for that yet, so we haven't. | 104 // X or -X, but there hasn't been a need for that yet, so we haven't. |
105 DEFINE_int64(heap_profile_inuse_interval, | 105 DEFINE_int64(heap_profile_inuse_interval, |
106 EnvToInt64("HEAP_PROFILE_INUSE_INTERVAL", 100 << 20 /*100MB*/), | 106 EnvToInt64("HEAP_PROFILE_INUSE_INTERVAL", 100 << 20 /*100MB*/), |
107 "If non-zero, dump heap profiling information whenever " | 107 "If non-zero, dump heap profiling information whenever " |
108 "the high-water memory usage mark increases by the specified " | 108 "the high-water memory usage mark increases by the specified " |
109 "number of bytes."); | 109 "number of bytes."); |
| 110 DEFINE_int64(heap_profile_time_interval, |
| 111 EnvToInt64("HEAP_PROFILE_TIME_INTERVAL", 0), |
| 112 "If non-zero, dump heap profiling information once every " |
| 113 "specified number of seconds since the last dump."); |
110 DEFINE_bool(mmap_log, | 114 DEFINE_bool(mmap_log, |
111 EnvToBool("HEAP_PROFILE_MMAP_LOG", false), | 115 EnvToBool("HEAP_PROFILE_MMAP_LOG", false), |
112 "Should mmap/munmap calls be logged?"); | 116 "Should mmap/munmap calls be logged?"); |
113 DEFINE_bool(mmap_profile, | 117 DEFINE_bool(mmap_profile, |
114 EnvToBool("HEAP_PROFILE_MMAP", false), | 118 EnvToBool("HEAP_PROFILE_MMAP", false), |
115 "If heap-profiling is on, also profile mmap, mremap, and sbrk)"); | 119 "If heap-profiling is on, also profile mmap, mremap, and sbrk)"); |
116 DEFINE_bool(only_mmap_profile, | 120 DEFINE_bool(only_mmap_profile, |
117 EnvToBool("HEAP_PROFILE_ONLY_MMAP", false), | 121 EnvToBool("HEAP_PROFILE_ONLY_MMAP", false), |
118 "If heap-profiling is on, only profile mmap, mremap, and sbrk; " | 122 "If heap-profiling is on, only profile mmap, mremap, and sbrk; " |
119 "do not profile malloc/new/etc"); | 123 "do not profile malloc/new/etc"); |
(...skipping 18 matching lines...) Expand all Loading... |
138 static LowLevelAlloc::Arena *heap_profiler_memory; | 142 static LowLevelAlloc::Arena *heap_profiler_memory; |
139 | 143 |
140 static void* ProfilerMalloc(size_t bytes) { | 144 static void* ProfilerMalloc(size_t bytes) { |
141 return LowLevelAlloc::AllocWithArena(bytes, heap_profiler_memory); | 145 return LowLevelAlloc::AllocWithArena(bytes, heap_profiler_memory); |
142 } | 146 } |
143 static void ProfilerFree(void* p) { | 147 static void ProfilerFree(void* p) { |
144 LowLevelAlloc::Free(p); | 148 LowLevelAlloc::Free(p); |
145 } | 149 } |
146 | 150 |
147 // We use buffers of this size in DoGetHeapProfile. | 151 // We use buffers of this size in DoGetHeapProfile. |
148 static const int kProfileBufferSize = 1 << 20; | 152 // The size is 1 << 20 in the original google-perftools. Changed it to |
| 153 // 5 << 20 since a larger buffer is requried for deeper profiling in Chromium. |
| 154 // The buffer is allocated only when the environment variable HEAPPROFILE is |
| 155 // specified to dump heap information. |
| 156 static const int kProfileBufferSize = 5 << 20; |
149 | 157 |
150 // This is a last-ditch buffer we use in DumpProfileLocked in case we | 158 // This is a last-ditch buffer we use in DumpProfileLocked in case we |
151 // can't allocate more memory from ProfilerMalloc. We expect this | 159 // can't allocate more memory from ProfilerMalloc. We expect this |
152 // will be used by HeapProfileEndWriter when the application has to | 160 // will be used by HeapProfileEndWriter when the application has to |
153 // exit due to out-of-memory. This buffer is allocated in | 161 // exit due to out-of-memory. This buffer is allocated in |
154 // HeapProfilerStart. Access to this must be protected by heap_lock. | 162 // HeapProfilerStart. Access to this must be protected by heap_lock. |
155 static char* global_profiler_buffer = NULL; | 163 static char* global_profiler_buffer = NULL; |
156 | 164 |
157 | 165 |
158 //---------------------------------------------------------------------- | 166 //---------------------------------------------------------------------- |
159 // Profiling control/state data | 167 // Profiling control/state data |
160 //---------------------------------------------------------------------- | 168 //---------------------------------------------------------------------- |
161 | 169 |
162 // Access to all of these is protected by heap_lock. | 170 // Access to all of these is protected by heap_lock. |
163 static bool is_on = false; // If are on as a subsytem. | 171 static bool is_on = false; // If are on as a subsytem. |
164 static bool dumping = false; // Dumping status to prevent recursion | 172 static bool dumping = false; // Dumping status to prevent recursion |
165 static char* filename_prefix = NULL; // Prefix used for profile file names | 173 static char* filename_prefix = NULL; // Prefix used for profile file names |
166 // (NULL if no need for dumping yet) | 174 // (NULL if no need for dumping yet) |
167 static int dump_count = 0; // How many dumps so far | 175 static int dump_count = 0; // How many dumps so far |
168 static int64 last_dump_alloc = 0; // alloc_size when did we last dump | 176 static int64 last_dump_alloc = 0; // alloc_size when did we last dump |
169 static int64 last_dump_free = 0; // free_size when did we last dump | 177 static int64 last_dump_free = 0; // free_size when did we last dump |
170 static int64 high_water_mark = 0; // In-use-bytes at last high-water dump | 178 static int64 high_water_mark = 0; // In-use-bytes at last high-water dump |
| 179 static int64 last_dump_time = 0; // The time of the last dump |
171 | 180 |
172 static HeapProfileTable* heap_profile = NULL; // the heap profile table | 181 static HeapProfileTable* heap_profile = NULL; // the heap profile table |
173 | 182 |
174 //---------------------------------------------------------------------- | 183 //---------------------------------------------------------------------- |
175 // Profile generation | 184 // Profile generation |
176 //---------------------------------------------------------------------- | 185 //---------------------------------------------------------------------- |
177 | 186 |
178 enum AddOrRemove { ADD, REMOVE }; | 187 enum AddOrRemove { ADD, REMOVE }; |
179 | 188 |
180 // Add or remove all MMap-allocated regions to/from *heap_profile. | 189 // Add or remove all MMap-allocated regions to/from *heap_profile. |
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
242 RAW_DCHECK(is_on, ""); | 251 RAW_DCHECK(is_on, ""); |
243 RAW_DCHECK(!dumping, ""); | 252 RAW_DCHECK(!dumping, ""); |
244 | 253 |
245 if (filename_prefix == NULL) return; // we do not yet need dumping | 254 if (filename_prefix == NULL) return; // we do not yet need dumping |
246 | 255 |
247 dumping = true; | 256 dumping = true; |
248 | 257 |
249 // Make file name | 258 // Make file name |
250 char file_name[1000]; | 259 char file_name[1000]; |
251 dump_count++; | 260 dump_count++; |
252 snprintf(file_name, sizeof(file_name), "%s.%04d%s", | 261 snprintf(file_name, sizeof(file_name), "%s.%05d.%04d%s", |
253 filename_prefix, dump_count, HeapProfileTable::kFileExt); | 262 filename_prefix, getpid(), dump_count, HeapProfileTable::kFileExt); |
254 | 263 |
255 // Dump the profile | 264 // Dump the profile |
256 RAW_VLOG(0, "Dumping heap profile to %s (%s)", file_name, reason); | 265 RAW_VLOG(0, "Dumping heap profile to %s (%s)", file_name, reason); |
257 // We must use file routines that don't access memory, since we hold | 266 // We must use file routines that don't access memory, since we hold |
258 // a memory lock now. | 267 // a memory lock now. |
259 RawFD fd = RawOpenForWriting(file_name); | 268 RawFD fd = RawOpenForWriting(file_name); |
260 if (fd == kIllegalRawFD) { | 269 if (fd == kIllegalRawFD) { |
261 RAW_LOG(ERROR, "Failed dumping heap profile to %s", file_name); | 270 RAW_LOG(ERROR, "Failed dumping heap profile to %s", file_name); |
262 dumping = false; | 271 dumping = false; |
263 return; | 272 return; |
(...skipping 19 matching lines...) Expand all Loading... |
283 //---------------------------------------------------------------------- | 292 //---------------------------------------------------------------------- |
284 | 293 |
285 // Dump a profile after either an allocation or deallocation, if | 294 // Dump a profile after either an allocation or deallocation, if |
286 // the memory use has changed enough since the last dump. | 295 // the memory use has changed enough since the last dump. |
287 static void MaybeDumpProfileLocked() { | 296 static void MaybeDumpProfileLocked() { |
288 if (!dumping) { | 297 if (!dumping) { |
289 const HeapProfileTable::Stats& total = heap_profile->total(); | 298 const HeapProfileTable::Stats& total = heap_profile->total(); |
290 const int64 inuse_bytes = total.alloc_size - total.free_size; | 299 const int64 inuse_bytes = total.alloc_size - total.free_size; |
291 bool need_to_dump = false; | 300 bool need_to_dump = false; |
292 char buf[128]; | 301 char buf[128]; |
| 302 int64 current_time = time(NULL); |
293 if (FLAGS_heap_profile_allocation_interval > 0 && | 303 if (FLAGS_heap_profile_allocation_interval > 0 && |
294 total.alloc_size >= | 304 total.alloc_size >= |
295 last_dump_alloc + FLAGS_heap_profile_allocation_interval) { | 305 last_dump_alloc + FLAGS_heap_profile_allocation_interval) { |
296 snprintf(buf, sizeof(buf), ("%"PRId64" MB allocated cumulatively, " | 306 snprintf(buf, sizeof(buf), ("%"PRId64" MB allocated cumulatively, " |
297 "%"PRId64" MB currently in use"), | 307 "%"PRId64" MB currently in use"), |
298 total.alloc_size >> 20, inuse_bytes >> 20); | 308 total.alloc_size >> 20, inuse_bytes >> 20); |
299 need_to_dump = true; | 309 need_to_dump = true; |
300 } else if (FLAGS_heap_profile_deallocation_interval > 0 && | 310 } else if (FLAGS_heap_profile_deallocation_interval > 0 && |
301 total.free_size >= | 311 total.free_size >= |
302 last_dump_free + FLAGS_heap_profile_deallocation_interval) { | 312 last_dump_free + FLAGS_heap_profile_deallocation_interval) { |
303 snprintf(buf, sizeof(buf), ("%"PRId64" MB freed cumulatively, " | 313 snprintf(buf, sizeof(buf), ("%"PRId64" MB freed cumulatively, " |
304 "%"PRId64" MB currently in use"), | 314 "%"PRId64" MB currently in use"), |
305 total.free_size >> 20, inuse_bytes >> 20); | 315 total.free_size >> 20, inuse_bytes >> 20); |
306 need_to_dump = true; | 316 need_to_dump = true; |
307 } else if (FLAGS_heap_profile_inuse_interval > 0 && | 317 } else if (FLAGS_heap_profile_inuse_interval > 0 && |
308 inuse_bytes > | 318 inuse_bytes > |
309 high_water_mark + FLAGS_heap_profile_inuse_interval) { | 319 high_water_mark + FLAGS_heap_profile_inuse_interval) { |
310 snprintf(buf, sizeof(buf), "%"PRId64" MB currently in use", | 320 snprintf(buf, sizeof(buf), "%"PRId64" MB currently in use", |
311 inuse_bytes >> 20); | 321 inuse_bytes >> 20); |
312 need_to_dump = true; | 322 need_to_dump = true; |
| 323 } else if (FLAGS_heap_profile_time_interval > 0 && |
| 324 current_time - last_dump_time >= |
| 325 FLAGS_heap_profile_time_interval) { |
| 326 snprintf(buf, sizeof(buf), "%d sec since the last dump", |
| 327 current_time - last_dump_time); |
| 328 need_to_dump = true; |
| 329 last_dump_time = current_time; |
313 } | 330 } |
314 if (need_to_dump) { | 331 if (need_to_dump) { |
315 DumpProfileLocked(buf); | 332 DumpProfileLocked(buf); |
316 | 333 |
317 last_dump_alloc = total.alloc_size; | 334 last_dump_alloc = total.alloc_size; |
318 last_dump_free = total.free_size; | 335 last_dump_free = total.free_size; |
319 if (inuse_bytes > high_water_mark) | 336 if (inuse_bytes > high_water_mark) |
320 high_water_mark = inuse_bytes; | 337 high_water_mark = inuse_bytes; |
321 } | 338 } |
322 } | 339 } |
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
404 // in pretty-printing of NULL as "nil". | 421 // in pretty-printing of NULL as "nil". |
405 // TODO(maxim): instead should use a safe snprintf reimplementation | 422 // TODO(maxim): instead should use a safe snprintf reimplementation |
406 RAW_LOG(INFO, "munmap(start=0x%"PRIxPTR", len=%"PRIuS")", | 423 RAW_LOG(INFO, "munmap(start=0x%"PRIxPTR", len=%"PRIuS")", |
407 (uintptr_t) ptr, size); | 424 (uintptr_t) ptr, size); |
408 #ifdef TODO_REENABLE_STACK_TRACING | 425 #ifdef TODO_REENABLE_STACK_TRACING |
409 DumpStackTrace(1, RawInfoStackDumper, NULL); | 426 DumpStackTrace(1, RawInfoStackDumper, NULL); |
410 #endif | 427 #endif |
411 } | 428 } |
412 } | 429 } |
413 | 430 |
414 static void SbrkHook(const void* result, ptrdiff_t increment) { | 431 static void SbrkHook(const void* result, std::ptrdiff_t increment) { |
415 if (FLAGS_mmap_log) { // log it | 432 if (FLAGS_mmap_log) { // log it |
416 RAW_LOG(INFO, "sbrk(inc=%"PRIdS") = 0x%"PRIxPTR"", | 433 RAW_LOG(INFO, "sbrk(inc=%"PRIdS") = 0x%"PRIxPTR"", |
417 increment, (uintptr_t) result); | 434 increment, (uintptr_t) result); |
418 #ifdef TODO_REENABLE_STACK_TRACING | 435 #ifdef TODO_REENABLE_STACK_TRACING |
419 DumpStackTrace(1, RawInfoStackDumper, NULL); | 436 DumpStackTrace(1, RawInfoStackDumper, NULL); |
420 #endif | 437 #endif |
421 } | 438 } |
422 } | 439 } |
423 | 440 |
424 //---------------------------------------------------------------------- | 441 //---------------------------------------------------------------------- |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
463 // heap profile even if the application runs out of memory. | 480 // heap profile even if the application runs out of memory. |
464 global_profiler_buffer = | 481 global_profiler_buffer = |
465 reinterpret_cast<char*>(ProfilerMalloc(kProfileBufferSize)); | 482 reinterpret_cast<char*>(ProfilerMalloc(kProfileBufferSize)); |
466 | 483 |
467 heap_profile = new(ProfilerMalloc(sizeof(HeapProfileTable))) | 484 heap_profile = new(ProfilerMalloc(sizeof(HeapProfileTable))) |
468 HeapProfileTable(ProfilerMalloc, ProfilerFree); | 485 HeapProfileTable(ProfilerMalloc, ProfilerFree); |
469 | 486 |
470 last_dump_alloc = 0; | 487 last_dump_alloc = 0; |
471 last_dump_free = 0; | 488 last_dump_free = 0; |
472 high_water_mark = 0; | 489 high_water_mark = 0; |
| 490 last_dump_time = 0; |
473 | 491 |
474 // We do not reset dump_count so if the user does a sequence of | 492 // We do not reset dump_count so if the user does a sequence of |
475 // HeapProfilerStart/HeapProfileStop, we will get a continuous | 493 // HeapProfilerStart/HeapProfileStop, we will get a continuous |
476 // sequence of profiles. | 494 // sequence of profiles. |
477 | 495 |
478 if (FLAGS_only_mmap_profile == false) { | 496 if (FLAGS_only_mmap_profile == false) { |
479 // Now set the hooks that capture new/delete and malloc/free. | 497 // Now set the hooks that capture new/delete and malloc/free. |
480 RAW_CHECK(MallocHook::AddNewHook(&NewHook), ""); | 498 RAW_CHECK(MallocHook::AddNewHook(&NewHook), ""); |
481 RAW_CHECK(MallocHook::AddDeleteHook(&DeleteHook), ""); | 499 RAW_CHECK(MallocHook::AddDeleteHook(&DeleteHook), ""); |
482 } | 500 } |
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
569 | 587 |
570 // class used for finalization -- dumps the heap-profile at program exit | 588 // class used for finalization -- dumps the heap-profile at program exit |
571 struct HeapProfileEndWriter { | 589 struct HeapProfileEndWriter { |
572 ~HeapProfileEndWriter() { HeapProfilerDump("Exiting"); } | 590 ~HeapProfileEndWriter() { HeapProfilerDump("Exiting"); } |
573 }; | 591 }; |
574 | 592 |
575 // We want to make sure tcmalloc is up and running before starting the profiler | 593 // We want to make sure tcmalloc is up and running before starting the profiler |
576 static const TCMallocGuard tcmalloc_initializer; | 594 static const TCMallocGuard tcmalloc_initializer; |
577 REGISTER_MODULE_INITIALIZER(heapprofiler, HeapProfilerInit()); | 595 REGISTER_MODULE_INITIALIZER(heapprofiler, HeapProfilerInit()); |
578 static HeapProfileEndWriter heap_profile_end_writer; | 596 static HeapProfileEndWriter heap_profile_end_writer; |
OLD | NEW |