OLD | NEW |
1 // Copyright (c) 2005, Google Inc. | 1 // Copyright (c) 2005, Google Inc. |
2 // All rights reserved. | 2 // All rights reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
(...skipping 321 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
332 static void MaybeDumpProfileLocked() { | 332 static void MaybeDumpProfileLocked() { |
333 if (!dumping) { | 333 if (!dumping) { |
334 const HeapProfileTable::Stats& total = heap_profile->total(); | 334 const HeapProfileTable::Stats& total = heap_profile->total(); |
335 const int64 inuse_bytes = total.alloc_size - total.free_size; | 335 const int64 inuse_bytes = total.alloc_size - total.free_size; |
336 bool need_to_dump = false; | 336 bool need_to_dump = false; |
337 char buf[128]; | 337 char buf[128]; |
338 int64 current_time = time(NULL); | 338 int64 current_time = time(NULL); |
339 if (FLAGS_heap_profile_allocation_interval > 0 && | 339 if (FLAGS_heap_profile_allocation_interval > 0 && |
340 total.alloc_size >= | 340 total.alloc_size >= |
341 last_dump_alloc + FLAGS_heap_profile_allocation_interval) { | 341 last_dump_alloc + FLAGS_heap_profile_allocation_interval) { |
342 snprintf(buf, sizeof(buf), ("%"PRId64" MB allocated cumulatively, " | 342 snprintf(buf, sizeof(buf), ("%" PRId64 " MB allocated cumulatively, " |
343 "%"PRId64" MB currently in use"), | 343 "%" PRId64 " MB currently in use"), |
344 total.alloc_size >> 20, inuse_bytes >> 20); | 344 total.alloc_size >> 20, inuse_bytes >> 20); |
345 need_to_dump = true; | 345 need_to_dump = true; |
346 } else if (FLAGS_heap_profile_deallocation_interval > 0 && | 346 } else if (FLAGS_heap_profile_deallocation_interval > 0 && |
347 total.free_size >= | 347 total.free_size >= |
348 last_dump_free + FLAGS_heap_profile_deallocation_interval) { | 348 last_dump_free + FLAGS_heap_profile_deallocation_interval) { |
349 snprintf(buf, sizeof(buf), ("%"PRId64" MB freed cumulatively, " | 349 snprintf(buf, sizeof(buf), ("%" PRId64 " MB freed cumulatively, " |
350 "%"PRId64" MB currently in use"), | 350 "%" PRId64 " MB currently in use"), |
351 total.free_size >> 20, inuse_bytes >> 20); | 351 total.free_size >> 20, inuse_bytes >> 20); |
352 need_to_dump = true; | 352 need_to_dump = true; |
353 } else if (FLAGS_heap_profile_inuse_interval > 0 && | 353 } else if (FLAGS_heap_profile_inuse_interval > 0 && |
354 inuse_bytes > | 354 inuse_bytes > |
355 high_water_mark + FLAGS_heap_profile_inuse_interval) { | 355 high_water_mark + FLAGS_heap_profile_inuse_interval) { |
356 snprintf(buf, sizeof(buf), "%"PRId64" MB currently in use", | 356 snprintf(buf, sizeof(buf), "%" PRId64 " MB currently in use", |
357 inuse_bytes >> 20); | 357 inuse_bytes >> 20); |
358 need_to_dump = true; | 358 need_to_dump = true; |
359 } else if (FLAGS_heap_profile_time_interval > 0 && | 359 } else if (FLAGS_heap_profile_time_interval > 0 && |
360 current_time - last_dump_time >= | 360 current_time - last_dump_time >= |
361 FLAGS_heap_profile_time_interval) { | 361 FLAGS_heap_profile_time_interval) { |
362 snprintf(buf, sizeof(buf), "%d sec since the last dump", | 362 snprintf(buf, sizeof(buf), "%d sec since the last dump", |
363 current_time - last_dump_time); | 363 current_time - last_dump_time); |
364 need_to_dump = true; | 364 need_to_dump = true; |
365 last_dump_time = current_time; | 365 last_dump_time = current_time; |
366 } | 366 } |
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
418 } | 418 } |
419 #endif | 419 #endif |
420 | 420 |
421 static void MmapHook(const void* result, const void* start, size_t size, | 421 static void MmapHook(const void* result, const void* start, size_t size, |
422 int prot, int flags, int fd, off_t offset) { | 422 int prot, int flags, int fd, off_t offset) { |
423 if (FLAGS_mmap_log) { // log it | 423 if (FLAGS_mmap_log) { // log it |
424 // We use PRIxS not just '%p' to avoid deadlocks | 424 // We use PRIxS not just '%p' to avoid deadlocks |
425 // in pretty-printing of NULL as "nil". | 425 // in pretty-printing of NULL as "nil". |
426 // TODO(maxim): instead should use a safe snprintf reimplementation | 426 // TODO(maxim): instead should use a safe snprintf reimplementation |
427 RAW_LOG(INFO, | 427 RAW_LOG(INFO, |
428 "mmap(start=0x%"PRIxPTR", len=%"PRIuS", prot=0x%x, flags=0x%x, " | 428 "mmap(start=0x%" PRIxPTR ", len=%" PRIuS ", prot=0x%x, flags=0x%x, " |
429 "fd=%d, offset=0x%x) = 0x%"PRIxPTR"", | 429 "fd=%d, offset=0x%x) = 0x%" PRIxPTR, |
430 (uintptr_t) start, size, prot, flags, fd, (unsigned int) offset, | 430 (uintptr_t) start, size, prot, flags, fd, (unsigned int) offset, |
431 (uintptr_t) result); | 431 (uintptr_t) result); |
432 #ifdef TODO_REENABLE_STACK_TRACING | 432 #ifdef TODO_REENABLE_STACK_TRACING |
433 DumpStackTrace(1, RawInfoStackDumper, NULL); | 433 DumpStackTrace(1, RawInfoStackDumper, NULL); |
434 #endif | 434 #endif |
435 } | 435 } |
436 } | 436 } |
437 | 437 |
438 static void MremapHook(const void* result, const void* old_addr, | 438 static void MremapHook(const void* result, const void* old_addr, |
439 size_t old_size, size_t new_size, | 439 size_t old_size, size_t new_size, |
440 int flags, const void* new_addr) { | 440 int flags, const void* new_addr) { |
441 if (FLAGS_mmap_log) { // log it | 441 if (FLAGS_mmap_log) { // log it |
442 // We use PRIxS not just '%p' to avoid deadlocks | 442 // We use PRIxS not just '%p' to avoid deadlocks |
443 // in pretty-printing of NULL as "nil". | 443 // in pretty-printing of NULL as "nil". |
444 // TODO(maxim): instead should use a safe snprintf reimplementation | 444 // TODO(maxim): instead should use a safe snprintf reimplementation |
445 RAW_LOG(INFO, | 445 RAW_LOG(INFO, |
446 "mremap(old_addr=0x%"PRIxPTR", old_size=%"PRIuS", " | 446 "mremap(old_addr=0x%" PRIxPTR ", old_size=%" PRIuS ", " |
447 "new_size=%"PRIuS", flags=0x%x, new_addr=0x%"PRIxPTR") = " | 447 "new_size=%" PRIuS ", flags=0x%x, new_addr=0x%" PRIxPTR ") = " |
448 "0x%"PRIxPTR"", | 448 "0x%" PRIxPTR, |
449 (uintptr_t) old_addr, old_size, new_size, flags, | 449 (uintptr_t) old_addr, old_size, new_size, flags, |
450 (uintptr_t) new_addr, (uintptr_t) result); | 450 (uintptr_t) new_addr, (uintptr_t) result); |
451 #ifdef TODO_REENABLE_STACK_TRACING | 451 #ifdef TODO_REENABLE_STACK_TRACING |
452 DumpStackTrace(1, RawInfoStackDumper, NULL); | 452 DumpStackTrace(1, RawInfoStackDumper, NULL); |
453 #endif | 453 #endif |
454 } | 454 } |
455 } | 455 } |
456 | 456 |
457 static void MunmapHook(const void* ptr, size_t size) { | 457 static void MunmapHook(const void* ptr, size_t size) { |
458 if (FLAGS_mmap_log) { // log it | 458 if (FLAGS_mmap_log) { // log it |
459 // We use PRIxS not just '%p' to avoid deadlocks | 459 // We use PRIxS not just '%p' to avoid deadlocks |
460 // in pretty-printing of NULL as "nil". | 460 // in pretty-printing of NULL as "nil". |
461 // TODO(maxim): instead should use a safe snprintf reimplementation | 461 // TODO(maxim): instead should use a safe snprintf reimplementation |
462 RAW_LOG(INFO, "munmap(start=0x%"PRIxPTR", len=%"PRIuS")", | 462 RAW_LOG(INFO, "munmap(start=0x%" PRIxPTR ", len=%" PRIuS ")", |
463 (uintptr_t) ptr, size); | 463 (uintptr_t) ptr, size); |
464 #ifdef TODO_REENABLE_STACK_TRACING | 464 #ifdef TODO_REENABLE_STACK_TRACING |
465 DumpStackTrace(1, RawInfoStackDumper, NULL); | 465 DumpStackTrace(1, RawInfoStackDumper, NULL); |
466 #endif | 466 #endif |
467 } | 467 } |
468 } | 468 } |
469 | 469 |
470 static void SbrkHook(const void* result, ptrdiff_t increment) { | 470 static void SbrkHook(const void* result, ptrdiff_t increment) { |
471 if (FLAGS_mmap_log) { // log it | 471 if (FLAGS_mmap_log) { // log it |
472 RAW_LOG(INFO, "sbrk(inc=%"PRIdS") = 0x%"PRIxPTR"", | 472 RAW_LOG(INFO, "sbrk(inc=%" PRIdS ") = 0x%" PRIxPTR, |
473 increment, (uintptr_t) result); | 473 increment, (uintptr_t) result); |
474 #ifdef TODO_REENABLE_STACK_TRACING | 474 #ifdef TODO_REENABLE_STACK_TRACING |
475 DumpStackTrace(1, RawInfoStackDumper, NULL); | 475 DumpStackTrace(1, RawInfoStackDumper, NULL); |
476 #endif | 476 #endif |
477 } | 477 } |
478 } | 478 } |
479 | 479 |
480 //---------------------------------------------------------------------- | 480 //---------------------------------------------------------------------- |
481 // Starting/stopping/dumping | 481 // Starting/stopping/dumping |
482 //---------------------------------------------------------------------- | 482 //---------------------------------------------------------------------- |
(...skipping 203 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
686 | 686 |
687 // class used for finalization -- dumps the heap-profile at program exit | 687 // class used for finalization -- dumps the heap-profile at program exit |
688 struct HeapProfileEndWriter { | 688 struct HeapProfileEndWriter { |
689 ~HeapProfileEndWriter() { HeapProfilerDump("Exiting"); } | 689 ~HeapProfileEndWriter() { HeapProfilerDump("Exiting"); } |
690 }; | 690 }; |
691 | 691 |
692 // We want to make sure tcmalloc is up and running before starting the profiler | 692 // We want to make sure tcmalloc is up and running before starting the profiler |
693 static const TCMallocGuard tcmalloc_initializer; | 693 static const TCMallocGuard tcmalloc_initializer; |
694 REGISTER_MODULE_INITIALIZER(heapprofiler, HeapProfilerInit()); | 694 REGISTER_MODULE_INITIALIZER(heapprofiler, HeapProfilerInit()); |
695 static HeapProfileEndWriter heap_profile_end_writer; | 695 static HeapProfileEndWriter heap_profile_end_writer; |
OLD | NEW |