Index: base/trace_event/memory_dump_manager.cc |
diff --git a/base/trace_event/memory_dump_manager.cc b/base/trace_event/memory_dump_manager.cc |
index cbce21021c08a599fa6f6d23db007de8d8bad7b4..116d22e2ae1e4875403a239be03f3966dd1d15ae 100644 |
--- a/base/trace_event/memory_dump_manager.cc |
+++ b/base/trace_event/memory_dump_manager.cc |
@@ -85,6 +85,12 @@ void OnGlobalDumpDone(MemoryDumpCallback wrapped_callback, |
} |
} |
+// Empty callback to PostTask the deletion of an owned MemoryDumpProvider. |
+void DeleteMemoryDumpProvider(scoped_ptr<MemoryDumpProvider> mdp) { |
+ // We just need an empty method that takes ownership of the scoped_ptr |
+ // and de-scopes it, causing its deletion. |
+} |
+ |
} // namespace |
// static |
@@ -218,7 +224,20 @@ void MemoryDumpManager::RegisterDumpProvider( |
} |
void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) { |
+ UnregisterDumpProviderInternal(mdp, false /* delete_async */); |
+} |
+ |
+void MemoryDumpManager::UnregisterAndDeleteDumpProviderAsync( |
+ scoped_ptr<MemoryDumpProvider> mdp) { |
+ UnregisterDumpProviderInternal(mdp.release(), true /* delete_async */); |
+} |
+ |
+void MemoryDumpManager::UnregisterDumpProviderInternal(MemoryDumpProvider* mdp, |
+ bool delete_async) { |
AutoLock lock(lock_); |
+ scoped_ptr<MemoryDumpProvider> owned_mdp; |
+ if (delete_async) |
+ owned_mdp.reset(mdp); // Auto delete in case of early outs. |
auto mdp_iter = dump_providers_.begin(); |
for (; mdp_iter != dump_providers_.end(); ++mdp_iter) { |
@@ -227,7 +246,7 @@ void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) { |
} |
if (mdp_iter == dump_providers_.end()) |
- return; |
+ return; // Already unregistered. |
Ruud van Asseldonk
2015/12/14 16:25:48
So the async deletion might actually be synchronou
|
// Unregistration of a MemoryDumpProvider while tracing is ongoing is safe |
// only if the MDP has specified a thread affinity (via task_runner()) AND |
@@ -235,13 +254,25 @@ void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) { |
// and OnMemoryDump() at the same time). |
// Otherwise, it is not possible to guarantee that its unregistration is |
// race-free. If you hit this DCHECK, your MDP has a bug. |
- DCHECK(!subtle::NoBarrier_Load(&memory_tracing_enabled_) || |
- (mdp_iter->task_runner && |
- mdp_iter->task_runner->BelongsToCurrentThread())) |
- << "MemoryDumpProvider \"" << mdp_iter->name << "\" attempted to " |
- << "unregister itself in a racy way. Please file a crbug."; |
+ if (!delete_async && subtle::NoBarrier_Load(&memory_tracing_enabled_)) { |
+ DCHECK((mdp_iter->task_runner && |
+ mdp_iter->task_runner->BelongsToCurrentThread())) |
+ << "MemoryDumpProvider \"" << mdp_iter->name << "\" attempted to " |
+ << "unregister itself in a racy way. Please file a crbug."; |
+ } |
mdp_iter->unregistered = true; |
+ |
+ // Delete the descriptor immediately if there are no no dumps in progress. |
+ // Otherwise, the prologue of ContinueAsyncProcessDump will take care. |
+ if (outstanding_dumps_.empty()) { |
+ dump_providers_.erase(mdp_iter); |
+ // If |delete_async| == true, the |mdp| will be destroyed synchronously here |
+ // as there is no reason to defer the deletion (and no thread to defer to). |
+ } else if (delete_async) { |
+ outstanding_dumps_.front()->dump_thread_task_runner->PostTask( |
+ FROM_HERE, Bind(&DeleteMemoryDumpProvider, Passed(&owned_mdp))); |
Ruud van Asseldonk
2015/12/14 16:25:48
Having an in-flight message own the dump provider
|
+ } |
} |
void MemoryDumpManager::RequestGlobalDump( |
@@ -293,22 +324,28 @@ void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args, |
TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(kTraceCategory, "ProcessMemoryDump", |
TRACE_ID_MANGLE(args.dump_guid)); |
+ bool no_dump_providers_registered; |
scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state; |
{ |
AutoLock lock(lock_); |
+ no_dump_providers_registered = dump_providers_.empty(); |
pmd_async_state.reset(new ProcessMemoryDumpAsyncState( |
args, dump_providers_.begin(), session_state_, callback, |
dump_thread_->task_runner())); |
+ outstanding_dumps_.push_back(pmd_async_state.get()); |
Ruud van Asseldonk
2015/12/14 16:25:48
By doing this you lose all of the advantages of a
|
} |
TRACE_EVENT_WITH_FLOW0(kTraceCategory, "MemoryDumpManager::CreateProcessDump", |
TRACE_ID_MANGLE(args.dump_guid), |
TRACE_EVENT_FLAG_FLOW_OUT); |
+ if (no_dump_providers_registered) |
+ return FinalizeDumpAndAddToTrace(pmd_async_state.Pass()); |
+ |
// Start the thread hop. |dump_providers_| are kept sorted by thread, so |
// ContinueAsyncProcessDump will hop at most once per thread (w.r.t. thread |
// affinity specified by the MemoryDumpProvider(s) in RegisterDumpProvider()). |
- ContinueAsyncProcessDump(std::move(pmd_async_state)); |
+ ContinueAsyncProcessDump(pmd_async_state.release()); |
} |
// At most one ContinueAsyncProcessDump() can be active at any time for a given |
@@ -326,14 +363,22 @@ void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args, |
// - Advance the |next_dump_provider| iterator to the next dump provider. |
// - If this was the last hop, create a trace event, add it to the trace |
// and finalize (invoke callback). |
- |
void MemoryDumpManager::ContinueAsyncProcessDump( |
- scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) { |
+ ProcessMemoryDumpAsyncState* owned_pmd_async_state) { |
// Initalizes the ThreadLocalEventBuffer to guarantee that the TRACE_EVENTs |
// in the PostTask below don't end up registering their own dump providers |
// (for discounting trace memory overhead) while holding the |lock_|. |
TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported(); |
+ // In theory |owned_pmd_async_state| should be a scoped_ptr. The only reason |
+ // why it isn't is because of the corner case logic of |did_post_task| below, |
+ // which needs to take back the ownsership of the |pmd_async_state| when a |
Ruud van Asseldonk
2015/12/14 16:25:48
/s/ownsership/ownership/
|
+ // thread goes away and consequently PostTask() fails. PostTask(), in fact, |
+ // destroyes the scoped_ptr arguments upon failure. This would prevent us to |
+ // to just skip the hop and move on. Hence the naked -> scoped ptr conversion. |
+ auto pmd_async_state = make_scoped_ptr(owned_pmd_async_state); |
Ruud van Asseldonk
2015/12/14 16:25:48
I wonder if it would be simpler to just make |pmd_
|
+ owned_pmd_async_state = nullptr; |
+ |
const uint64_t dump_guid = pmd_async_state->req_args.dump_guid; |
const char* dump_provider_name = nullptr; |
@@ -350,6 +395,7 @@ void MemoryDumpManager::ContinueAsyncProcessDump( |
AutoLock lock(lock_); |
auto mdp_info = pmd_async_state->next_dump_provider; |
+ DCHECK(mdp_info != dump_providers_.end()); |
mdp = mdp_info->dump_provider; |
dump_provider_name = mdp_info->name; |
pid = mdp_info->options.target_pid; |
@@ -380,14 +426,18 @@ void MemoryDumpManager::ContinueAsyncProcessDump( |
const bool did_post_task = task_runner->PostTask( |
Ruud van Asseldonk
2015/12/14 16:25:48
From the |PostTask| documentation, this is no guar
|
FROM_HERE, Bind(&MemoryDumpManager::ContinueAsyncProcessDump, |
- Unretained(this), Passed(&pmd_async_state))); |
- if (did_post_task) |
+ Unretained(this), Unretained(pmd_async_state.get()))); |
+ |
+ if (did_post_task) { |
+ // Ownership is tranferred to the next ContinueAsyncProcessDump(). |
+ (void)pmd_async_state.release(); |
Ruud van Asseldonk
2015/12/14 16:25:48
You can do |pmd_async_state.reset()| instead.
|
return; |
+ } |
- // The thread is gone. At this point the best thing we can do is to |
- // disable the dump provider and abort this dump. |
+ // The thread is gone. Disable the dump provider, skip its dump and move |
+ // on with the other dumper providers. |
mdp_info->disabled = true; |
Ruud van Asseldonk
2015/12/14 16:25:48
Shouldn't all dump providers that share that threa
|
- return AbortDumpLocked(callback, callback_task_runner, dump_guid); |
+ skip_dump = true; |
} |
} // AutoLock(lock_) |
@@ -410,6 +460,7 @@ void MemoryDumpManager::ContinueAsyncProcessDump( |
{ |
AutoLock lock(lock_); |
auto mdp_info = pmd_async_state->next_dump_provider; |
+ DCHECK(mdp_info != dump_providers_.end()); |
if (dump_successful) { |
mdp_info->consecutive_failures = 0; |
} else if (!skip_dump) { |
@@ -419,11 +470,14 @@ void MemoryDumpManager::ContinueAsyncProcessDump( |
} |
} |
++pmd_async_state->next_dump_provider; |
- finalize = pmd_async_state->next_dump_provider == dump_providers_.end(); |
- |
+ if (pmd_async_state->next_dump_provider == dump_providers_.end()) { |
+ finalize = true; |
+ std::remove(outstanding_dumps_.begin(), outstanding_dumps_.end(), |
+ pmd_async_state.get()); |
+ } |
if (mdp_info->unregistered) |
dump_providers_.erase(mdp_info); |
- } |
+ } // AutoLock(lock_) |
if (!skip_dump && !dump_successful) { |
LOG(ERROR) << "MemoryDumpProvider \"" << dump_provider_name << "\" failed, " |
@@ -434,7 +488,7 @@ void MemoryDumpManager::ContinueAsyncProcessDump( |
if (finalize) |
return FinalizeDumpAndAddToTrace(std::move(pmd_async_state)); |
- ContinueAsyncProcessDump(std::move(pmd_async_state)); |
+ ContinueAsyncProcessDump(std::move(pmd_async_state.release())); |
Ruud van Asseldonk
2015/12/14 16:25:48
You don't need the move here.
|
} |
// static |
@@ -483,20 +537,6 @@ void MemoryDumpManager::FinalizeDumpAndAddToTrace( |
TRACE_ID_MANGLE(dump_guid)); |
} |
-// static |
-void MemoryDumpManager::AbortDumpLocked( |
- MemoryDumpCallback callback, |
- scoped_refptr<SingleThreadTaskRunner> task_runner, |
- uint64_t dump_guid) { |
- if (callback.is_null()) |
- return; // There is nothing to NACK. |
- |
- // Post the callback even if we are already on the right thread to avoid |
- // invoking the callback while holding the lock_. |
- task_runner->PostTask(FROM_HERE, |
- Bind(callback, dump_guid, false /* success */)); |
-} |
- |
void MemoryDumpManager::OnTraceLogEnabled() { |
bool enabled; |
TRACE_EVENT_CATEGORY_GROUP_ENABLED(kTraceCategory, &enabled); |