OLD | NEW |
---|---|
1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/trace_event/memory_dump_manager.h" | 5 #include "base/trace_event/memory_dump_manager.h" |
6 | 6 |
7 #include <inttypes.h> | 7 #include <inttypes.h> |
8 #include <stdio.h> | 8 #include <stdio.h> |
9 | 9 |
10 #include <algorithm> | 10 #include <algorithm> |
(...skipping 16 matching lines...) Expand all Loading... | |
27 #include "base/trace_event/heap_profiler.h" | 27 #include "base/trace_event/heap_profiler.h" |
28 #include "base/trace_event/heap_profiler_allocation_context_tracker.h" | 28 #include "base/trace_event/heap_profiler_allocation_context_tracker.h" |
29 #include "base/trace_event/heap_profiler_event_filter.h" | 29 #include "base/trace_event/heap_profiler_event_filter.h" |
30 #include "base/trace_event/heap_profiler_stack_frame_deduplicator.h" | 30 #include "base/trace_event/heap_profiler_stack_frame_deduplicator.h" |
31 #include "base/trace_event/heap_profiler_type_name_deduplicator.h" | 31 #include "base/trace_event/heap_profiler_type_name_deduplicator.h" |
32 #include "base/trace_event/malloc_dump_provider.h" | 32 #include "base/trace_event/malloc_dump_provider.h" |
33 #include "base/trace_event/memory_dump_provider.h" | 33 #include "base/trace_event/memory_dump_provider.h" |
34 #include "base/trace_event/memory_dump_scheduler.h" | 34 #include "base/trace_event/memory_dump_scheduler.h" |
35 #include "base/trace_event/memory_dump_session_state.h" | 35 #include "base/trace_event/memory_dump_session_state.h" |
36 #include "base/trace_event/memory_infra_background_whitelist.h" | 36 #include "base/trace_event/memory_infra_background_whitelist.h" |
37 #include "base/trace_event/memory_peak_detector.h" | |
37 #include "base/trace_event/process_memory_dump.h" | 38 #include "base/trace_event/process_memory_dump.h" |
38 #include "base/trace_event/trace_event.h" | 39 #include "base/trace_event/trace_event.h" |
39 #include "base/trace_event/trace_event_argument.h" | 40 #include "base/trace_event/trace_event_argument.h" |
40 #include "build/build_config.h" | 41 #include "build/build_config.h" |
41 | 42 |
42 #if defined(OS_ANDROID) | 43 #if defined(OS_ANDROID) |
43 #include "base/trace_event/java_heap_dump_provider_android.h" | 44 #include "base/trace_event/java_heap_dump_provider_android.h" |
44 #endif | 45 #endif |
45 | 46 |
46 namespace base { | 47 namespace base { |
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
127 void EstimateTraceMemoryOverhead( | 128 void EstimateTraceMemoryOverhead( |
128 TraceEventMemoryOverhead* overhead) override { | 129 TraceEventMemoryOverhead* overhead) override { |
129 return (session_state.get()->*getter_function)() | 130 return (session_state.get()->*getter_function)() |
130 ->EstimateTraceMemoryOverhead(overhead); | 131 ->EstimateTraceMemoryOverhead(overhead); |
131 } | 132 } |
132 | 133 |
133 scoped_refptr<MemoryDumpSessionState> session_state; | 134 scoped_refptr<MemoryDumpSessionState> session_state; |
134 GetterFunctPtr const getter_function; | 135 GetterFunctPtr const getter_function; |
135 }; | 136 }; |
136 | 137 |
138 void OnPeakDetected(MemoryDumpLevelOfDetail level_of_detail) { | |
139 MemoryDumpManager::GetInstance()->RequestGlobalDump( | |
140 MemoryDumpType::PEAK_MEMORY_USAGE, level_of_detail); | |
141 } | |
142 | |
143 void OnPeriodicSchedulerTick(MemoryDumpLevelOfDetail level_of_detail) { | |
144 MemoryDumpManager::GetInstance()->RequestGlobalDump( | |
145 MemoryDumpType::PERIODIC_INTERVAL, level_of_detail); | |
146 } | |
147 | |
137 } // namespace | 148 } // namespace |
138 | 149 |
139 // static | 150 // static |
140 const char* const MemoryDumpManager::kTraceCategory = | 151 const char* const MemoryDumpManager::kTraceCategory = |
141 TRACE_DISABLED_BY_DEFAULT("memory-infra"); | 152 TRACE_DISABLED_BY_DEFAULT("memory-infra"); |
142 | 153 |
143 // static | 154 // static |
144 const char* const MemoryDumpManager::kLogPrefix = "Memory-infra dump"; | 155 const char* const MemoryDumpManager::kLogPrefix = "Memory-infra dump"; |
145 | 156 |
146 // static | 157 // static |
(...skipping 190 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
337 } | 348 } |
338 | 349 |
339 { | 350 { |
340 AutoLock lock(lock_); | 351 AutoLock lock(lock_); |
341 bool already_registered = !dump_providers_.insert(mdpinfo).second; | 352 bool already_registered = !dump_providers_.insert(mdpinfo).second; |
342 // This actually happens in some tests which don't have a clean tear-down | 353 // This actually happens in some tests which don't have a clean tear-down |
343 // path for RenderThreadImpl::Init(). | 354 // path for RenderThreadImpl::Init(). |
344 if (already_registered) | 355 if (already_registered) |
345 return; | 356 return; |
346 | 357 |
347 // The list of polling MDPs is populated OnTraceLogEnabled(). This code | 358 if (options.is_fast_polling_supported) |
348 // deals with the case of a MDP capable of fast polling that is registered | 359 MemoryPeakDetector::GetInstance()->NotifyMemoryDumpProvidersChanged(); |
349 // after the OnTraceLogEnabled() | |
350 if (options.is_fast_polling_supported && dump_thread_) { | |
351 dump_thread_->task_runner()->PostTask( | |
352 FROM_HERE, Bind(&MemoryDumpManager::RegisterPollingMDPOnDumpThread, | |
353 Unretained(this), mdpinfo)); | |
354 } | |
355 } | 360 } |
356 | 361 |
357 if (heap_profiling_enabled_) | 362 if (heap_profiling_enabled_) |
358 mdp->OnHeapProfilingEnabled(true); | 363 mdp->OnHeapProfilingEnabled(true); |
359 } | 364 } |
360 | 365 |
361 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) { | 366 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) { |
362 UnregisterDumpProviderInternal(mdp, false /* delete_async */); | 367 UnregisterDumpProviderInternal(mdp, false /* delete_async */); |
363 } | 368 } |
364 | 369 |
(...skipping 18 matching lines...) Expand all Loading... | |
383 } | 388 } |
384 | 389 |
385 if (mdp_iter == dump_providers_.end()) | 390 if (mdp_iter == dump_providers_.end()) |
386 return; // Not registered / already unregistered. | 391 return; // Not registered / already unregistered. |
387 | 392 |
388 if (take_mdp_ownership_and_delete_async) { | 393 if (take_mdp_ownership_and_delete_async) { |
389 // The MDP will be deleted whenever the MDPInfo struct will, that is either: | 394 // The MDP will be deleted whenever the MDPInfo struct will, that is either: |
390 // - At the end of this function, if no dump is in progress. | 395 // - At the end of this function, if no dump is in progress. |
391 // - Either in SetupNextMemoryDump() or InvokeOnMemoryDump() when MDPInfo is | 396 // - Either in SetupNextMemoryDump() or InvokeOnMemoryDump() when MDPInfo is |
392 // removed from |pending_dump_providers|. | 397 // removed from |pending_dump_providers|. |
393 // - When the provider is removed from |dump_providers_for_polling_|. | 398 // - When the provider is removed from other clients (MemoryPeakDetector). |
394 DCHECK(!(*mdp_iter)->owned_dump_provider); | 399 DCHECK(!(*mdp_iter)->owned_dump_provider); |
395 (*mdp_iter)->owned_dump_provider = std::move(owned_mdp); | 400 (*mdp_iter)->owned_dump_provider = std::move(owned_mdp); |
396 } else if (strict_thread_check_blacklist_.count((*mdp_iter)->name) == 0 || | 401 } else if (strict_thread_check_blacklist_.count((*mdp_iter)->name) == 0 || |
397 subtle::NoBarrier_Load(&memory_tracing_enabled_)) { | 402 subtle::NoBarrier_Load(&memory_tracing_enabled_)) { |
398 // If dump provider's name is on |strict_thread_check_blacklist_|, then the | 403 // If dump provider's name is on |strict_thread_check_blacklist_|, then the |
399 // DCHECK is fired only when tracing is enabled. Otherwise the DCHECK is | 404 // DCHECK is fired only when tracing is enabled. Otherwise the DCHECK is |
400 // fired even when tracing is not enabled (stricter). | 405 // fired even when tracing is not enabled (stricter). |
401 // TODO(ssid): Remove this condition after removing all the dump providers | 406 // TODO(ssid): Remove this condition after removing all the dump providers |
402 // in the blacklist and the buildbots are no longer flakily hitting the | 407 // in the blacklist and the buildbots are no longer flakily hitting the |
403 // DCHECK, crbug.com/643438. | 408 // DCHECK, crbug.com/643438. |
404 | 409 |
405 // If you hit this DCHECK, your dump provider has a bug. | 410 // If you hit this DCHECK, your dump provider has a bug. |
406 // Unregistration of a MemoryDumpProvider is safe only if: | 411 // Unregistration of a MemoryDumpProvider is safe only if: |
407 // - The MDP has specified a sequenced task runner affinity AND the | 412 // - The MDP has specified a sequenced task runner affinity AND the |
408 // unregistration happens on the same task runner. So that the MDP cannot | 413 // unregistration happens on the same task runner. So that the MDP cannot |
409 // unregister and be in the middle of a OnMemoryDump() at the same time. | 414 // unregister and be in the middle of a OnMemoryDump() at the same time. |
410 // - The MDP has NOT specified a task runner affinity and its ownership is | 415 // - The MDP has NOT specified a task runner affinity and its ownership is |
411 // transferred via UnregisterAndDeleteDumpProviderSoon(). | 416 // transferred via UnregisterAndDeleteDumpProviderSoon(). |
412 // In all the other cases, it is not possible to guarantee that the | 417 // In all the other cases, it is not possible to guarantee that the |
413 // unregistration will not race with OnMemoryDump() calls. | 418 // unregistration will not race with OnMemoryDump() calls. |
414 DCHECK((*mdp_iter)->task_runner && | 419 DCHECK((*mdp_iter)->task_runner && |
415 (*mdp_iter)->task_runner->RunsTasksOnCurrentThread()) | 420 (*mdp_iter)->task_runner->RunsTasksOnCurrentThread()) |
416 << "MemoryDumpProvider \"" << (*mdp_iter)->name << "\" attempted to " | 421 << "MemoryDumpProvider \"" << (*mdp_iter)->name << "\" attempted to " |
417 << "unregister itself in a racy way. Please file a crbug."; | 422 << "unregister itself in a racy way. Please file a crbug."; |
418 } | 423 } |
419 | 424 |
420 if ((*mdp_iter)->options.is_fast_polling_supported && dump_thread_) { | 425 if ((*mdp_iter)->options.is_fast_polling_supported) { |
421 DCHECK(take_mdp_ownership_and_delete_async); | 426 DCHECK(take_mdp_ownership_and_delete_async); |
422 dump_thread_->task_runner()->PostTask( | 427 MemoryPeakDetector::GetInstance()->NotifyMemoryDumpProvidersChanged(); |
423 FROM_HERE, Bind(&MemoryDumpManager::UnregisterPollingMDPOnDumpThread, | |
424 Unretained(this), *mdp_iter)); | |
425 } | 428 } |
426 | 429 |
427 // The MDPInfo instance can still be referenced by the | 430 // The MDPInfo instance can still be referenced by the |
428 // |ProcessMemoryDumpAsyncState.pending_dump_providers|. For this reason | 431 // |ProcessMemoryDumpAsyncState.pending_dump_providers|. For this reason |
429 // the MDPInfo is flagged as disabled. It will cause InvokeOnMemoryDump() | 432 // the MDPInfo is flagged as disabled. It will cause InvokeOnMemoryDump() |
430 // to just skip it, without actually invoking the |mdp|, which might be | 433 // to just skip it, without actually invoking the |mdp|, which might be |
431 // destroyed by the caller soon after this method returns. | 434 // destroyed by the caller soon after this method returns. |
432 (*mdp_iter)->disabled = true; | 435 (*mdp_iter)->disabled = true; |
433 dump_providers_.erase(mdp_iter); | 436 dump_providers_.erase(mdp_iter); |
434 } | 437 } |
435 | 438 |
436 void MemoryDumpManager::RegisterPollingMDPOnDumpThread( | |
437 scoped_refptr<MemoryDumpProviderInfo> mdpinfo) { | |
438 AutoLock lock(lock_); | |
439 dump_providers_for_polling_.insert(mdpinfo); | |
440 | |
441 // Notify ready for polling when first polling supported provider is | |
442 // registered. This handles the case where OnTraceLogEnabled() did not notify | |
443 // ready since no polling supported mdp has yet been registered. | |
444 if (dump_providers_for_polling_.size() == 1) | |
445 MemoryDumpScheduler::GetInstance()->EnablePollingIfNeeded(); | |
446 } | |
447 | |
448 void MemoryDumpManager::UnregisterPollingMDPOnDumpThread( | |
449 scoped_refptr<MemoryDumpProviderInfo> mdpinfo) { | |
450 mdpinfo->dump_provider->SuspendFastMemoryPolling(); | |
451 | |
452 AutoLock lock(lock_); | |
453 dump_providers_for_polling_.erase(mdpinfo); | |
454 DCHECK(!dump_providers_for_polling_.empty()) | |
455 << "All polling MDPs cannot be unregistered."; | |
456 } | |
457 | |
458 void MemoryDumpManager::RequestGlobalDump( | 439 void MemoryDumpManager::RequestGlobalDump( |
459 MemoryDumpType dump_type, | 440 MemoryDumpType dump_type, |
460 MemoryDumpLevelOfDetail level_of_detail, | 441 MemoryDumpLevelOfDetail level_of_detail, |
461 const MemoryDumpCallback& callback) { | 442 const MemoryDumpCallback& callback) { |
462 // Bail out immediately if tracing is not enabled at all or if the dump mode | 443 // Bail out immediately if tracing is not enabled at all or if the dump mode |
463 // is not allowed. | 444 // is not allowed. |
464 if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_)) || | 445 if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_)) || |
465 !IsDumpModeAllowed(level_of_detail)) { | 446 !IsDumpModeAllowed(level_of_detail)) { |
466 VLOG(1) << kLogPrefix << " failed because " << kTraceCategory | 447 VLOG(1) << kLogPrefix << " failed because " << kTraceCategory |
467 << " tracing category is not enabled or the requested dump mode is " | 448 << " tracing category is not enabled or the requested dump mode is " |
(...skipping 14 matching lines...) Expand all Loading... | |
482 MemoryDumpTypeToString(dump_type), "level_of_detail", | 463 MemoryDumpTypeToString(dump_type), "level_of_detail", |
483 MemoryDumpLevelOfDetailToString(level_of_detail)); | 464 MemoryDumpLevelOfDetailToString(level_of_detail)); |
484 MemoryDumpCallback wrapped_callback = Bind(&OnGlobalDumpDone, callback); | 465 MemoryDumpCallback wrapped_callback = Bind(&OnGlobalDumpDone, callback); |
485 | 466 |
486 // The delegate will coordinate the IPC broadcast and at some point invoke | 467 // The delegate will coordinate the IPC broadcast and at some point invoke |
487 // CreateProcessDump() to get a dump for the current process. | 468 // CreateProcessDump() to get a dump for the current process. |
488 MemoryDumpRequestArgs args = {guid, dump_type, level_of_detail}; | 469 MemoryDumpRequestArgs args = {guid, dump_type, level_of_detail}; |
489 delegate_->RequestGlobalMemoryDump(args, wrapped_callback); | 470 delegate_->RequestGlobalMemoryDump(args, wrapped_callback); |
490 } | 471 } |
491 | 472 |
473 void MemoryDumpManager::GetDumpProvidersForPolling( | |
474 std::vector<scoped_refptr<MemoryDumpProviderInfo>>* providers) { | |
475 DCHECK(providers->empty()); | |
476 AutoLock lock(lock_); | |
477 for (const scoped_refptr<MemoryDumpProviderInfo>& mdp : dump_providers_) { | |
478 if (mdp->options.is_fast_polling_supported) | |
479 providers->push_back(mdp); | |
480 } | |
481 } | |
482 | |
492 void MemoryDumpManager::RequestGlobalDump( | 483 void MemoryDumpManager::RequestGlobalDump( |
493 MemoryDumpType dump_type, | 484 MemoryDumpType dump_type, |
494 MemoryDumpLevelOfDetail level_of_detail) { | 485 MemoryDumpLevelOfDetail level_of_detail) { |
495 RequestGlobalDump(dump_type, level_of_detail, MemoryDumpCallback()); | 486 RequestGlobalDump(dump_type, level_of_detail, MemoryDumpCallback()); |
496 } | 487 } |
497 | 488 |
498 bool MemoryDumpManager::IsDumpProviderRegisteredForTesting( | 489 bool MemoryDumpManager::IsDumpProviderRegisteredForTesting( |
499 MemoryDumpProvider* provider) { | 490 MemoryDumpProvider* provider) { |
500 AutoLock lock(lock_); | 491 AutoLock lock(lock_); |
501 | 492 |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
534 pmd_async_state.reset(new ProcessMemoryDumpAsyncState( | 525 pmd_async_state.reset(new ProcessMemoryDumpAsyncState( |
535 args, dump_providers_, session_state_, callback, | 526 args, dump_providers_, session_state_, callback, |
536 dump_thread_ ? dump_thread_->task_runner() : nullptr)); | 527 dump_thread_ ? dump_thread_->task_runner() : nullptr)); |
537 | 528 |
538 // Safety check to prevent reaching here without calling RequestGlobalDump, | 529 // Safety check to prevent reaching here without calling RequestGlobalDump, |
539 // with disallowed modes. If |session_state_| is null then tracing is | 530 // with disallowed modes. If |session_state_| is null then tracing is |
540 // disabled. | 531 // disabled. |
541 CHECK(!session_state_ || | 532 CHECK(!session_state_ || |
542 session_state_->IsDumpModeAllowed(args.level_of_detail)); | 533 session_state_->IsDumpModeAllowed(args.level_of_detail)); |
543 | 534 |
544 MemoryDumpScheduler::GetInstance()->NotifyDumpTriggered(); | 535 // If enabled, holds back the peak detector resetting its estimation window. |
536 MemoryPeakDetector::GetInstance()->Clear(); | |
545 } | 537 } |
546 | 538 |
547 // Start the process dump. This involves task runner hops as specified by the | 539 // Start the process dump. This involves task runner hops as specified by the |
548 // MemoryDumpProvider(s) in RegisterDumpProvider()). | 540 // MemoryDumpProvider(s) in RegisterDumpProvider()). |
549 SetupNextMemoryDump(std::move(pmd_async_state)); | 541 SetupNextMemoryDump(std::move(pmd_async_state)); |
550 } | 542 } |
551 | 543 |
552 // PostTask InvokeOnMemoryDump() to the dump provider's sequenced task runner. A | 544 // PostTask InvokeOnMemoryDump() to the dump provider's sequenced task runner. A |
553 // PostTask is always required for a generic SequencedTaskRunner to ensure that | 545 // PostTask is always required for a generic SequencedTaskRunner to ensure that |
554 // no other task is running on it concurrently. SetupNextMemoryDump() and | 546 // no other task is running on it concurrently. SetupNextMemoryDump() and |
(...skipping 155 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
710 args); | 702 args); |
711 bool dump_successful = mdpinfo->dump_provider->OnMemoryDump(args, pmd); | 703 bool dump_successful = mdpinfo->dump_provider->OnMemoryDump(args, pmd); |
712 mdpinfo->consecutive_failures = | 704 mdpinfo->consecutive_failures = |
713 dump_successful ? 0 : mdpinfo->consecutive_failures + 1; | 705 dump_successful ? 0 : mdpinfo->consecutive_failures + 1; |
714 } | 706 } |
715 | 707 |
716 pmd_async_state->pending_dump_providers.pop_back(); | 708 pmd_async_state->pending_dump_providers.pop_back(); |
717 SetupNextMemoryDump(std::move(pmd_async_state)); | 709 SetupNextMemoryDump(std::move(pmd_async_state)); |
718 } | 710 } |
719 | 711 |
720 bool MemoryDumpManager::PollFastMemoryTotal(uint64_t* memory_total) { | |
721 #if DCHECK_IS_ON() | |
722 { | |
723 AutoLock lock(lock_); | |
724 if (dump_thread_) | |
725 DCHECK(dump_thread_->task_runner()->BelongsToCurrentThread()); | |
726 } | |
727 #endif | |
728 if (dump_providers_for_polling_.empty()) | |
729 return false; | |
730 | |
731 *memory_total = 0; | |
732 // Note that we call PollFastMemoryTotal() even if the dump provider is | |
733 // disabled (unregistered). This is to avoid taking lock while polling. | |
734 for (const auto& mdpinfo : dump_providers_for_polling_) { | |
735 uint64_t value = 0; | |
736 mdpinfo->dump_provider->PollFastMemoryTotal(&value); | |
737 *memory_total += value; | |
738 } | |
739 return true; | |
740 } | |
741 | |
742 // static | 712 // static |
743 uint32_t MemoryDumpManager::GetDumpsSumKb(const std::string& pattern, | 713 uint32_t MemoryDumpManager::GetDumpsSumKb(const std::string& pattern, |
744 const ProcessMemoryDump* pmd) { | 714 const ProcessMemoryDump* pmd) { |
745 uint64_t sum = 0; | 715 uint64_t sum = 0; |
746 for (const auto& kv : pmd->allocator_dumps()) { | 716 for (const auto& kv : pmd->allocator_dumps()) { |
747 auto name = StringPiece(kv.first); | 717 auto name = StringPiece(kv.first); |
748 if (MatchPattern(name, pattern)) | 718 if (MatchPattern(name, pattern)) |
749 sum += kv.second->GetSize(); | 719 sum += kv.second->GetSize(); |
750 } | 720 } |
751 return sum / 1024; | 721 return sum / 1024; |
(...skipping 130 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
882 MakeUnique<SessionStateConvertableProxy<StackFrameDeduplicator>>( | 852 MakeUnique<SessionStateConvertableProxy<StackFrameDeduplicator>>( |
883 session_state, &MemoryDumpSessionState::stack_frame_deduplicator)); | 853 session_state, &MemoryDumpSessionState::stack_frame_deduplicator)); |
884 | 854 |
885 TRACE_EVENT_API_ADD_METADATA_EVENT( | 855 TRACE_EVENT_API_ADD_METADATA_EVENT( |
886 TraceLog::GetCategoryGroupEnabled("__metadata"), "typeNames", | 856 TraceLog::GetCategoryGroupEnabled("__metadata"), "typeNames", |
887 "typeNames", | 857 "typeNames", |
888 MakeUnique<SessionStateConvertableProxy<TypeNameDeduplicator>>( | 858 MakeUnique<SessionStateConvertableProxy<TypeNameDeduplicator>>( |
889 session_state, &MemoryDumpSessionState::type_name_deduplicator)); | 859 session_state, &MemoryDumpSessionState::type_name_deduplicator)); |
890 } | 860 } |
891 | 861 |
892 { | 862 AutoLock lock(lock_); |
893 AutoLock lock(lock_); | |
894 | 863 |
895 DCHECK(delegate_); // At this point we must have a delegate. | 864 DCHECK(delegate_); // At this point we must have a delegate. |
896 session_state_ = session_state; | 865 session_state_ = session_state; |
897 | 866 |
898 DCHECK(!dump_thread_); | 867 DCHECK(!dump_thread_); |
899 dump_thread_ = std::move(dump_thread); | 868 dump_thread_ = std::move(dump_thread); |
900 | 869 |
901 subtle::NoBarrier_Store(&memory_tracing_enabled_, 1); | 870 subtle::NoBarrier_Store(&memory_tracing_enabled_, 1); |
902 | 871 |
903 dump_providers_for_polling_.clear(); | 872 MemoryDumpScheduler::Config periodic_config; |
904 for (const auto& mdpinfo : dump_providers_) { | 873 MemoryPeakDetector::Config peak_config; |
905 if (mdpinfo->options.is_fast_polling_supported) | 874 for (const auto& trigger : memory_dump_config.triggers) { |
906 dump_providers_for_polling_.insert(mdpinfo); | 875 if (!session_state_->IsDumpModeAllowed(trigger.level_of_detail)) { |
876 NOTREACHED(); | |
877 continue; | |
907 } | 878 } |
908 | 879 if (trigger.trigger_type == MemoryDumpType::PERIODIC_INTERVAL) { |
909 MemoryDumpScheduler* dump_scheduler = MemoryDumpScheduler::GetInstance(); | 880 if (periodic_config.triggers.empty()) { |
910 dump_scheduler->Setup(this, dump_thread_->task_runner()); | 881 periodic_config.callback = BindRepeating(&OnPeriodicSchedulerTick); |
911 DCHECK_LE(memory_dump_config.triggers.size(), 3u); | |
912 for (const auto& trigger : memory_dump_config.triggers) { | |
913 if (!session_state_->IsDumpModeAllowed(trigger.level_of_detail)) { | |
914 NOTREACHED(); | |
915 continue; | |
916 } | 882 } |
917 dump_scheduler->AddTrigger(trigger.trigger_type, trigger.level_of_detail, | 883 periodic_config.triggers.push_back( |
918 trigger.min_time_between_dumps_ms); | 884 {trigger.level_of_detail, trigger.min_time_between_dumps_ms}); |
885 } else if (trigger.trigger_type == MemoryDumpType::PEAK_MEMORY_USAGE) { | |
886 MemoryPeakDetector::GetInstance()->Setup( | |
ssid
2017/04/10 21:35:51
Maybe a dcheck here to avoid calling setup multipl
Primiano Tucci (use gerrit)
2017/04/11 11:43:08
Done.
| |
887 BindRepeating(&MemoryDumpManager::GetDumpProvidersForPolling, | |
888 Unretained(this)), | |
889 dump_thread_->task_runner(), | |
890 BindRepeating(&OnPeakDetected, trigger.level_of_detail)); | |
891 peak_config.polling_interval_ms = 10; | |
892 peak_config.min_time_between_peaks_ms = trigger.min_time_between_dumps_ms; | |
893 peak_config.enable_verbose_poll_tracing = | |
894 trigger.level_of_detail == MemoryDumpLevelOfDetail::DETAILED; | |
919 } | 895 } |
920 | |
921 // Notify polling supported only if some polling supported provider was | |
922 // registered, else RegisterPollingMDPOnDumpThread() will notify when first | |
923 // polling MDP registers. | |
924 if (!dump_providers_for_polling_.empty()) | |
925 dump_scheduler->EnablePollingIfNeeded(); | |
926 | |
927 // Only coordinator process triggers periodic global memory dumps. | |
928 if (delegate_->IsCoordinator()) | |
929 dump_scheduler->EnablePeriodicTriggerIfNeeded(); | |
930 } | 896 } |
931 | 897 |
898 if (peak_config.min_time_between_peaks_ms) | |
899 MemoryPeakDetector::GetInstance()->Start(peak_config); | |
ssid
2017/04/10 21:35:51
Can we find some way to trigger a memory dump at t
Primiano Tucci (use gerrit)
2017/04/11 11:43:08
Done.
| |
900 | |
901 // Only coordinator process triggers periodic global memory dumps. | |
902 if (delegate_->IsCoordinator() && !periodic_config.triggers.empty()) { | |
903 MemoryDumpScheduler::GetInstance()->Start(periodic_config, | |
904 dump_thread_->task_runner()); | |
905 } | |
932 } | 906 } |
933 | 907 |
934 void MemoryDumpManager::OnTraceLogDisabled() { | 908 void MemoryDumpManager::OnTraceLogDisabled() { |
935 // There might be a memory dump in progress while this happens. Therefore, | 909 // There might be a memory dump in progress while this happens. Therefore, |
936 // ensure that the MDM state which depends on the tracing enabled / disabled | 910 // ensure that the MDM state which depends on the tracing enabled / disabled |
937 // state is always accessed by the dumping methods holding the |lock_|. | 911 // state is always accessed by the dumping methods holding the |lock_|. |
938 if (!subtle::NoBarrier_Load(&memory_tracing_enabled_)) | 912 if (!subtle::NoBarrier_Load(&memory_tracing_enabled_)) |
939 return; | 913 return; |
940 subtle::NoBarrier_Store(&memory_tracing_enabled_, 0); | 914 subtle::NoBarrier_Store(&memory_tracing_enabled_, 0); |
941 std::unique_ptr<Thread> dump_thread; | 915 std::unique_ptr<Thread> dump_thread; |
942 { | 916 { |
943 AutoLock lock(lock_); | 917 AutoLock lock(lock_); |
918 MemoryDumpScheduler::GetInstance()->Stop(); | |
919 MemoryPeakDetector::GetInstance()->TearDown(); | |
944 dump_thread = std::move(dump_thread_); | 920 dump_thread = std::move(dump_thread_); |
945 session_state_ = nullptr; | 921 session_state_ = nullptr; |
946 MemoryDumpScheduler::GetInstance()->DisableAllTriggers(); | |
947 } | 922 } |
948 | 923 |
949 // Thread stops are blocking and must be performed outside of the |lock_| | 924 // Thread stops are blocking and must be performed outside of the |lock_| |
950 // or will deadlock (e.g., if SetupNextMemoryDump() tries to acquire it). | 925 // or will deadlock (e.g., if SetupNextMemoryDump() tries to acquire it). |
951 if (dump_thread) | 926 if (dump_thread) |
952 dump_thread->Stop(); | 927 dump_thread->Stop(); |
953 | |
954 // |dump_providers_for_polling_| must be cleared only after the dump thread is | |
955 // stopped (polling tasks are done). | |
956 { | |
957 AutoLock lock(lock_); | |
958 for (const auto& mdpinfo : dump_providers_for_polling_) | |
959 mdpinfo->dump_provider->SuspendFastMemoryPolling(); | |
960 dump_providers_for_polling_.clear(); | |
961 } | |
962 } | 928 } |
963 | 929 |
964 bool MemoryDumpManager::IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode) { | 930 bool MemoryDumpManager::IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode) { |
965 AutoLock lock(lock_); | 931 AutoLock lock(lock_); |
966 if (!session_state_) | 932 if (!session_state_) |
967 return false; | 933 return false; |
968 return session_state_->IsDumpModeAllowed(dump_mode); | 934 return session_state_->IsDumpModeAllowed(dump_mode); |
969 } | 935 } |
970 | 936 |
971 MemoryDumpManager::ProcessMemoryDumpAsyncState::ProcessMemoryDumpAsyncState( | 937 MemoryDumpManager::ProcessMemoryDumpAsyncState::ProcessMemoryDumpAsyncState( |
(...skipping 22 matching lines...) Expand all Loading... | |
994 if (iter == process_dumps.end()) { | 960 if (iter == process_dumps.end()) { |
995 std::unique_ptr<ProcessMemoryDump> new_pmd( | 961 std::unique_ptr<ProcessMemoryDump> new_pmd( |
996 new ProcessMemoryDump(session_state, dump_args)); | 962 new ProcessMemoryDump(session_state, dump_args)); |
997 iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first; | 963 iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first; |
998 } | 964 } |
999 return iter->second.get(); | 965 return iter->second.get(); |
1000 } | 966 } |
1001 | 967 |
1002 } // namespace trace_event | 968 } // namespace trace_event |
1003 } // namespace base | 969 } // namespace base |
OLD | NEW |