Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(281)

Side by Side Diff: base/trace_event/memory_dump_manager.cc

Issue 1536533004: [tracing] Simplify logic of MemoryDumpManager (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: final without lock Created 5 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/trace_event/memory_dump_manager.h" 5 #include "base/trace_event/memory_dump_manager.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <utility> 8 #include <utility>
9 9
10 #include "base/atomic_sequence_num.h" 10 #include "base/atomic_sequence_num.h"
(...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after
115 return Singleton<MemoryDumpManager, 115 return Singleton<MemoryDumpManager,
116 LeakySingletonTraits<MemoryDumpManager>>::get(); 116 LeakySingletonTraits<MemoryDumpManager>>::get();
117 } 117 }
118 118
119 // static 119 // static
120 void MemoryDumpManager::SetInstanceForTesting(MemoryDumpManager* instance) { 120 void MemoryDumpManager::SetInstanceForTesting(MemoryDumpManager* instance) {
121 g_instance_for_testing = instance; 121 g_instance_for_testing = instance;
122 } 122 }
123 123
124 MemoryDumpManager::MemoryDumpManager() 124 MemoryDumpManager::MemoryDumpManager()
125 : delegate_(nullptr), 125 : dump_providers_sequence_number_(0),
126 delegate_(nullptr),
126 is_coordinator_(false), 127 is_coordinator_(false),
127 memory_tracing_enabled_(0), 128 memory_tracing_enabled_(0),
128 tracing_process_id_(kInvalidTracingProcessId), 129 tracing_process_id_(kInvalidTracingProcessId),
129 dumper_registrations_ignored_for_testing_(false) { 130 dumper_registrations_ignored_for_testing_(false) {
130 g_next_guid.GetNext(); // Make sure that first guid is not zero. 131 g_next_guid.GetNext(); // Make sure that first guid is not zero.
131 132
132 heap_profiling_enabled_ = CommandLine::InitializedForCurrentProcess() 133 heap_profiling_enabled_ = CommandLine::InitializedForCurrentProcess()
133 ? CommandLine::ForCurrentProcess()->HasSwitch( 134 ? CommandLine::ForCurrentProcess()->HasSwitch(
134 switches::kEnableHeapProfiling) 135 switches::kEnableHeapProfiling)
135 : false; 136 : false;
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
186 } 187 }
187 188
188 void MemoryDumpManager::RegisterDumpProvider( 189 void MemoryDumpManager::RegisterDumpProvider(
189 MemoryDumpProvider* mdp, 190 MemoryDumpProvider* mdp,
190 const char* name, 191 const char* name,
191 const scoped_refptr<SingleThreadTaskRunner>& task_runner, 192 const scoped_refptr<SingleThreadTaskRunner>& task_runner,
192 const MemoryDumpProvider::Options& options) { 193 const MemoryDumpProvider::Options& options) {
193 if (dumper_registrations_ignored_for_testing_) 194 if (dumper_registrations_ignored_for_testing_)
194 return; 195 return;
195 196
196 MemoryDumpProviderInfo mdp_info(mdp, name, task_runner, options); 197 {
197 AutoLock lock(lock_); 198 AutoLock lock(lock_);
198 auto iter_new = dump_providers_.insert(mdp_info); 199 dump_providers_sequence_number_++;
199 200 scoped_refptr<MemoryDumpProviderInfo> mdpinfo = new MemoryDumpProviderInfo(
200 // If there was a previous entry, replace it with the new one. This is to deal 201 mdp, name, dump_providers_sequence_number_, task_runner, options);
201 // with the case where a dump provider unregisters itself and then re- 202 dump_providers_.insert(mdpinfo);
202 // registers before a memory dump happens, so its entry was still in the
203 // collection but flagged |unregistered|.
204 if (!iter_new.second) {
205 dump_providers_.erase(iter_new.first);
206 dump_providers_.insert(mdp_info);
207 } 203 }
208 204
209 if (heap_profiling_enabled_) 205 if (heap_profiling_enabled_)
210 mdp->OnHeapProfilingEnabled(true); 206 mdp->OnHeapProfilingEnabled(true);
211 } 207 }
212 208
213 void MemoryDumpManager::RegisterDumpProvider( 209 void MemoryDumpManager::RegisterDumpProvider(
214 MemoryDumpProvider* mdp, 210 MemoryDumpProvider* mdp,
215 const char* name, 211 const char* name,
216 const scoped_refptr<SingleThreadTaskRunner>& task_runner) { 212 const scoped_refptr<SingleThreadTaskRunner>& task_runner) {
217 RegisterDumpProvider(mdp, name, task_runner, MemoryDumpProvider::Options()); 213 RegisterDumpProvider(mdp, name, task_runner, MemoryDumpProvider::Options());
218 } 214 }
219 215
220 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) { 216 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) {
221 AutoLock lock(lock_); 217 AutoLock lock(lock_);
222 218
223 auto mdp_iter = dump_providers_.begin(); 219 auto mdp_iter = dump_providers_.begin();
224 for (; mdp_iter != dump_providers_.end(); ++mdp_iter) { 220 for (; mdp_iter != dump_providers_.end(); ++mdp_iter) {
225 if (mdp_iter->dump_provider == mdp) 221 if ((*mdp_iter)->dump_provider == mdp)
226 break; 222 break;
227 } 223 }
228 224
229 if (mdp_iter == dump_providers_.end()) 225 if (mdp_iter == dump_providers_.end())
230 return; 226 return;
231 227
232 // Unregistration of a MemoryDumpProvider while tracing is ongoing is safe 228 // Unregistration of a MemoryDumpProvider while tracing is ongoing is safe
233 // only if the MDP has specified a thread affinity (via task_runner()) AND 229 // only if the MDP has specified a thread affinity (via task_runner()) AND
234 // the unregistration happens on the same thread (so the MDP cannot unregister 230 // the unregistration happens on the same thread (so the MDP cannot unregister
235 // and OnMemoryDump() at the same time). 231 // and OnMemoryDump() at the same time).
236 // Otherwise, it is not possible to guarantee that its unregistration is 232 // Otherwise, it is not possible to guarantee that its unregistration is
237 // race-free. If you hit this DCHECK, your MDP has a bug. 233 // race-free. If you hit this DCHECK, your MDP has a bug.
238 DCHECK(!subtle::NoBarrier_Load(&memory_tracing_enabled_) || 234 DCHECK(!subtle::NoBarrier_Load(&memory_tracing_enabled_) ||
239 (mdp_iter->task_runner && 235 ((*mdp_iter)->task_runner &&
240 mdp_iter->task_runner->BelongsToCurrentThread())) 236 (*mdp_iter)->task_runner->BelongsToCurrentThread()))
241 << "MemoryDumpProvider \"" << mdp_iter->name << "\" attempted to " 237 << "MemoryDumpProvider \"" << (*mdp_iter)->name << "\" attempted to "
242 << "unregister itself in a racy way. Please file a crbug."; 238 << "unregister itself in a racy way. Please file a crbug.";
243 239
244 mdp_iter->unregistered = true; 240 // The MDPInfo instance can still be referenced by the
241 // |ProcessMemoryDumpAsyncState.pending_dump_providers|. For this reason
242 // the MDPInfo is flagged as disabled. It will cause ContinueAsyncProcessDump
243 // to just skip it, without actually invoking the |mdp|, which might be
244 // destroyed by the caller soon after this method returns.
245 (*mdp_iter)->disabled = true;
246 dump_providers_.erase(mdp_iter);
245 } 247 }
246 248
247 void MemoryDumpManager::RequestGlobalDump( 249 void MemoryDumpManager::RequestGlobalDump(
248 MemoryDumpType dump_type, 250 MemoryDumpType dump_type,
249 MemoryDumpLevelOfDetail level_of_detail, 251 MemoryDumpLevelOfDetail level_of_detail,
250 const MemoryDumpCallback& callback) { 252 const MemoryDumpCallback& callback) {
251 // Bail out immediately if tracing is not enabled at all. 253 // Bail out immediately if tracing is not enabled at all.
252 if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_))) { 254 if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_))) {
253 if (!callback.is_null()) 255 if (!callback.is_null())
254 callback.Run(0u /* guid */, false /* success */); 256 callback.Run(0u /* guid */, false /* success */);
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
289 } 291 }
290 292
291 void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args, 293 void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args,
292 const MemoryDumpCallback& callback) { 294 const MemoryDumpCallback& callback) {
293 TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(kTraceCategory, "ProcessMemoryDump", 295 TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(kTraceCategory, "ProcessMemoryDump",
294 TRACE_ID_MANGLE(args.dump_guid)); 296 TRACE_ID_MANGLE(args.dump_guid));
295 297
296 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state; 298 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state;
297 { 299 {
298 AutoLock lock(lock_); 300 AutoLock lock(lock_);
299 pmd_async_state.reset(new ProcessMemoryDumpAsyncState( 301 pmd_async_state.reset(
300 args, dump_providers_.begin(), session_state_, callback, 302 new ProcessMemoryDumpAsyncState(args, dump_providers_, session_state_,
301 dump_thread_->task_runner())); 303 callback, dump_thread_->task_runner()));
302 } 304 }
303 305
304 TRACE_EVENT_WITH_FLOW0(kTraceCategory, "MemoryDumpManager::CreateProcessDump", 306 TRACE_EVENT_WITH_FLOW0(kTraceCategory, "MemoryDumpManager::CreateProcessDump",
305 TRACE_ID_MANGLE(args.dump_guid), 307 TRACE_ID_MANGLE(args.dump_guid),
306 TRACE_EVENT_FLAG_FLOW_OUT); 308 TRACE_EVENT_FLAG_FLOW_OUT);
307 309
308 // Start the thread hop. |dump_providers_| are kept sorted by thread, so 310 // Start the thread hop. |dump_providers_| are kept sorted by thread, so
309 // ContinueAsyncProcessDump will hop at most once per thread (w.r.t. thread 311 // ContinueAsyncProcessDump will hop at most once per thread (w.r.t. thread
310 // affinity specified by the MemoryDumpProvider(s) in RegisterDumpProvider()). 312 // affinity specified by the MemoryDumpProvider(s) in RegisterDumpProvider()).
311 ContinueAsyncProcessDump(std::move(pmd_async_state)); 313 ContinueAsyncProcessDump(pmd_async_state.release());
312 } 314 }
313 315
314 // At most one ContinueAsyncProcessDump() can be active at any time for a given 316 // At most one ContinueAsyncProcessDump() can be active at any time for a given
315 // PMD, regardless of status of the |lock_|. |lock_| is used here purely to 317 // PMD, regardless of status of the |lock_|. |lock_| is used here purely to
316 // ensure consistency w.r.t. (un)registrations of |dump_providers_|. 318 // ensure consistency w.r.t. (un)registrations of |dump_providers_|.
317 // The linearization of dump providers' OnMemoryDump invocations is achieved by 319 // The linearization of dump providers' OnMemoryDump invocations is achieved by
318 // means of subsequent PostTask(s). 320 // means of subsequent PostTask(s).
319 // 321 //
320 // 1) Prologue: 322 // 1) Prologue:
323 // - If this was the last hop, create a trace event, add it to the trace
324 // and finalize (invoke callback).
325 // - Check if we are on the right thread. If not hop and continue there.
321 // - Check if the dump provider is disabled, if so skip the dump. 326 // - Check if the dump provider is disabled, if so skip the dump.
322 // - Check if we are on the right thread. If not hop and continue there.
323 // 2) Invoke the dump provider's OnMemoryDump() (unless skipped). 327 // 2) Invoke the dump provider's OnMemoryDump() (unless skipped).
324 // 3) Epilogue: 328 // 3) Epilogue:
325 // - Unregister the dump provider if it failed too many times consecutively. 329 // - Unregister the dump provider if it failed too many times consecutively.
326 // - Advance the |next_dump_provider| iterator to the next dump provider. 330 // - Pop() the MDP from the |pending_dump_providers| list, eventually
327 // - If this was the last hop, create a trace event, add it to the trace 331 // destroying the MDPInfo if that was unregistered in the meantime.
328 // and finalize (invoke callback).
329
330 void MemoryDumpManager::ContinueAsyncProcessDump( 332 void MemoryDumpManager::ContinueAsyncProcessDump(
331 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) { 333 ProcessMemoryDumpAsyncState* owned_pmd_async_state) {
332 // Initalizes the ThreadLocalEventBuffer to guarantee that the TRACE_EVENTs 334 // Initalizes the ThreadLocalEventBuffer to guarantee that the TRACE_EVENTs
333 // in the PostTask below don't end up registering their own dump providers 335 // in the PostTask below don't end up registering their own dump providers
334 // (for discounting trace memory overhead) while holding the |lock_|. 336 // (for discounting trace memory overhead) while holding the |lock_|.
335 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported(); 337 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported();
336 338
337 const uint64_t dump_guid = pmd_async_state->req_args.dump_guid; 339 // In theory |owned_pmd_async_state| should be a scoped_ptr. The only reason
338 const char* dump_provider_name = nullptr; 340 // why it isn't is because of the corner case logic of |did_post_task| below,
341 // which needs to take back the ownership of the |pmd_async_state| when a
342 // thread goes away and consequently the PostTask() fails.
343 // Unfortunately, PostTask() destroys the scoped_ptr arguments upon failure
344 // to prevent accidental leaks. Using a scoped_ptr would prevent us to to
345 // skip the hop and move on. Hence the manual naked -> scoped ptr juggling.
346 auto pmd_async_state = make_scoped_ptr(owned_pmd_async_state);
347 owned_pmd_async_state = nullptr;
339 348
340 // Pid of the target process being dumped. Often kNullProcessId (= current 349 if (pmd_async_state->pending_dump_providers.empty())
341 // process), non-zero when the coordinator process creates dumps on behalf 350 return FinalizeDumpAndAddToTrace(std::move(pmd_async_state));
342 // of child processes (see crbug.com/461788).
343 ProcessId pid;
344 351
345 // DO NOT put any LOG() statement in the locked sections, as in some contexts 352 // Read MemoryDumpProviderInfo thread safety considerations in
346 // (GPU process) LOG() ends up performing PostTask/IPCs. 353 // memory_dump_manager.h when accessing |mdpinfo| fields.
347 MemoryDumpProvider* mdp; 354 MemoryDumpProviderInfo* mdpinfo =
348 bool skip_dump = false; 355 pmd_async_state->pending_dump_providers.back().get();
349 {
350 AutoLock lock(lock_);
351 356
352 auto mdp_info = pmd_async_state->next_dump_provider; 357 // If the dump provider did not specify a thread affinity, dump on
353 mdp = mdp_info->dump_provider; 358 // |dump_thread_|. Note that |dump_thread_| might have been Stop()-ed at this
354 dump_provider_name = mdp_info->name; 359 // point (if tracing was disabled in the meanwhile). In such case the
355 pid = mdp_info->options.target_pid; 360 // PostTask() below will fail, but |task_runner| should always be non-null.
361 SingleThreadTaskRunner* task_runner = mdpinfo->task_runner.get();
362 if (!task_runner)
363 task_runner = pmd_async_state->dump_thread_task_runner.get();
356 364
357 // If the dump provider did not specify a thread affinity, dump on 365 if (!task_runner->BelongsToCurrentThread()) {
358 // |dump_thread_|. 366 // It's time to hop onto another thread.
359 SingleThreadTaskRunner* task_runner = mdp_info->task_runner.get(); 367 const bool did_post_task = task_runner->PostTask(
360 if (!task_runner) 368 FROM_HERE, Bind(&MemoryDumpManager::ContinueAsyncProcessDump,
361 task_runner = pmd_async_state->dump_thread_task_runner.get(); 369 Unretained(this), Unretained(pmd_async_state.get())));
370 if (did_post_task) {
371 // Ownership is tranferred to the next ContinueAsyncProcessDump().
372 ignore_result(pmd_async_state.release());
373 return;
374 }
375 // The thread is gone. Skip the dump provider and keep going.
376 mdpinfo->disabled = true;
Ruud van Asseldonk 2015/12/18 12:40:43 Now that this is no longer inside the lock, is it
Primiano Tucci (use gerrit) 2015/12/18 14:02:11 This is precisely why I wrote the large block belo
377 }
362 378
363 // |dump_thread_| might have been Stop()-ed at this point (if tracing was 379 // We are now on the right thread to read non-const |mdpinfo| fields.
364 // disabled in the meanwhile). In such case the PostTask() below will fail. 380 // Tehcnically there is a data race here while accessing |mdpinfo.disabled|
Ruud van Asseldonk 2015/12/18 12:40:42 /s/Tehcnically/Technically/
Primiano Tucci (use gerrit) 2015/12/18 14:02:11 Done.
365 // |task_runner|, however, should always be non-null. 381 // that is: if tracing is disabled and immediately re-enabled while a dump is
366 DCHECK(task_runner); 382 // in progress, the OnTraceLogEnabled() would clear-up the |disabled| and
383 // |consecutive_failures| fields while we concurrently access them here.
384 // This is an extremely unluckily case, not worth a lock (which wouldn't
385 // really solve the problem). In the worst case, we can end up with a spurious
386 // invocation of the dump provider.
367 387
368 if (mdp_info->disabled || mdp_info->unregistered) { 388 if (!mdpinfo->disabled) {
Ruud van Asseldonk 2015/12/18 12:40:42 No, you need to keep the lock. (Or add a lock per
Primiano Tucci (use gerrit) 2015/12/18 14:02:11 This cannot happen. A MDProvider can be unsubcribe
369 skip_dump = true; 389 // Invoke the dump provider.
370 } else if (!task_runner->BelongsToCurrentThread()) {
371 // It's time to hop onto another thread.
372
373 // Copy the callback + arguments just for the unlikley case in which
374 // PostTask fails. In such case the Bind helper will destroy the
375 // pmd_async_state and we must keep a copy of the fields to notify the
376 // abort.
377 MemoryDumpCallback callback = pmd_async_state->callback;
378 scoped_refptr<SingleThreadTaskRunner> callback_task_runner =
379 pmd_async_state->callback_task_runner;
380
381 const bool did_post_task = task_runner->PostTask(
382 FROM_HERE, Bind(&MemoryDumpManager::ContinueAsyncProcessDump,
383 Unretained(this), Passed(&pmd_async_state)));
384 if (did_post_task)
385 return;
386
387 // The thread is gone. At this point the best thing we can do is to
388 // disable the dump provider and abort this dump.
389 mdp_info->disabled = true;
390 return AbortDumpLocked(callback, callback_task_runner, dump_guid);
391 }
392 } // AutoLock(lock_)
393
394 // Invoke the dump provider without holding the |lock_|.
395 bool finalize = false;
396 bool dump_successful = false;
397
398 if (!skip_dump) {
399 TRACE_EVENT_WITH_FLOW1(kTraceCategory, 390 TRACE_EVENT_WITH_FLOW1(kTraceCategory,
400 "MemoryDumpManager::ContinueAsyncProcessDump", 391 "MemoryDumpManager::ContinueAsyncProcessDump",
401 TRACE_ID_MANGLE(dump_guid), 392 TRACE_ID_MANGLE(pmd_async_state->req_args.dump_guid),
402 TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT, 393 TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT,
403 "dump_provider.name", dump_provider_name); 394 "dump_provider.name", mdpinfo->name);
395
396 // Pid of the target process being dumped. Often kNullProcessId (= current
397 // process), non-zero when the coordinator process creates dumps on behalf
398 // of child processes (see crbug.com/461788).
399 ProcessId target_pid = mdpinfo->options.target_pid;
400 ProcessMemoryDump* pmd =
401 pmd_async_state->GetOrCreateMemoryDumpContainerForProcess(target_pid);
404 MemoryDumpArgs args = {pmd_async_state->req_args.level_of_detail}; 402 MemoryDumpArgs args = {pmd_async_state->req_args.level_of_detail};
405 ProcessMemoryDump* process_memory_dump = 403 bool dump_successful = mdpinfo->dump_provider->OnMemoryDump(args, pmd);
406 pmd_async_state->GetOrCreateMemoryDumpContainerForProcess(pid);
407 dump_successful = mdp->OnMemoryDump(args, process_memory_dump);
408 }
409 404
410 {
411 AutoLock lock(lock_);
412 auto mdp_info = pmd_async_state->next_dump_provider;
413 if (dump_successful) { 405 if (dump_successful) {
414 mdp_info->consecutive_failures = 0; 406 mdpinfo->consecutive_failures = 0;
415 } else if (!skip_dump) { 407 } else {
416 ++mdp_info->consecutive_failures; 408 ++mdpinfo->consecutive_failures;
417 if (mdp_info->consecutive_failures >= kMaxConsecutiveFailuresCount) { 409 if (mdpinfo->consecutive_failures >= kMaxConsecutiveFailuresCount) {
418 mdp_info->disabled = true; 410 mdpinfo->disabled = true;
411 LOG(ERROR) << "MemoryDumpProvider \"" << mdpinfo->name << "\" failed, "
412 << "possibly due to sandboxing (crbug.com/461788)."
413 << "Disabling dumper for current process. Try --no-sandbox.";
419 } 414 }
420 } 415 }
421 ++pmd_async_state->next_dump_provider; 416 } // if (!mdpinfo->disabled)
422 finalize = pmd_async_state->next_dump_provider == dump_providers_.end();
423 417
424 if (mdp_info->unregistered) 418 pmd_async_state->pending_dump_providers.pop_back();
425 dump_providers_.erase(mdp_info); 419 ContinueAsyncProcessDump(pmd_async_state.release());
426 }
427
428 if (!skip_dump && !dump_successful) {
429 LOG(ERROR) << "MemoryDumpProvider \"" << dump_provider_name << "\" failed, "
430 << "possibly due to sandboxing (crbug.com/461788)."
431 << "Disabling dumper for current process. Try --no-sandbox.";
432 }
433
434 if (finalize)
435 return FinalizeDumpAndAddToTrace(std::move(pmd_async_state));
436
437 ContinueAsyncProcessDump(std::move(pmd_async_state));
438 } 420 }
439 421
440 // static 422 // static
441 void MemoryDumpManager::FinalizeDumpAndAddToTrace( 423 void MemoryDumpManager::FinalizeDumpAndAddToTrace(
442 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) { 424 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) {
425 DCHECK(pmd_async_state->pending_dump_providers.empty());
443 const uint64_t dump_guid = pmd_async_state->req_args.dump_guid; 426 const uint64_t dump_guid = pmd_async_state->req_args.dump_guid;
444 if (!pmd_async_state->callback_task_runner->BelongsToCurrentThread()) { 427 if (!pmd_async_state->callback_task_runner->BelongsToCurrentThread()) {
445 scoped_refptr<SingleThreadTaskRunner> callback_task_runner = 428 scoped_refptr<SingleThreadTaskRunner> callback_task_runner =
446 pmd_async_state->callback_task_runner; 429 pmd_async_state->callback_task_runner;
447 callback_task_runner->PostTask( 430 callback_task_runner->PostTask(
448 FROM_HERE, Bind(&MemoryDumpManager::FinalizeDumpAndAddToTrace, 431 FROM_HERE, Bind(&MemoryDumpManager::FinalizeDumpAndAddToTrace,
449 Passed(&pmd_async_state))); 432 Passed(&pmd_async_state)));
450 return; 433 return;
451 } 434 }
452 435
(...skipping 23 matching lines...) Expand all
476 459
477 if (!pmd_async_state->callback.is_null()) { 460 if (!pmd_async_state->callback.is_null()) {
478 pmd_async_state->callback.Run(dump_guid, true /* success */); 461 pmd_async_state->callback.Run(dump_guid, true /* success */);
479 pmd_async_state->callback.Reset(); 462 pmd_async_state->callback.Reset();
480 } 463 }
481 464
482 TRACE_EVENT_NESTABLE_ASYNC_END0(kTraceCategory, "ProcessMemoryDump", 465 TRACE_EVENT_NESTABLE_ASYNC_END0(kTraceCategory, "ProcessMemoryDump",
483 TRACE_ID_MANGLE(dump_guid)); 466 TRACE_ID_MANGLE(dump_guid));
484 } 467 }
485 468
486 // static
487 void MemoryDumpManager::AbortDumpLocked(
488 MemoryDumpCallback callback,
489 scoped_refptr<SingleThreadTaskRunner> task_runner,
490 uint64_t dump_guid) {
491 if (callback.is_null())
492 return; // There is nothing to NACK.
493
494 // Post the callback even if we are already on the right thread to avoid
495 // invoking the callback while holding the lock_.
496 task_runner->PostTask(FROM_HERE,
497 Bind(callback, dump_guid, false /* success */));
498 }
499
500 void MemoryDumpManager::OnTraceLogEnabled() { 469 void MemoryDumpManager::OnTraceLogEnabled() {
501 bool enabled; 470 bool enabled;
502 TRACE_EVENT_CATEGORY_GROUP_ENABLED(kTraceCategory, &enabled); 471 TRACE_EVENT_CATEGORY_GROUP_ENABLED(kTraceCategory, &enabled);
503 if (!enabled) 472 if (!enabled)
504 return; 473 return;
505 474
506 // Initialize the TraceLog for the current thread. This is to avoid that the 475 // Initialize the TraceLog for the current thread. This is to avoid that the
507 // TraceLog memory dump provider is registered lazily in the PostTask() below 476 // TraceLog memory dump provider is registered lazily in the PostTask() below
508 // while the |lock_| is taken; 477 // while the |lock_| is taken;
509 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported(); 478 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported();
(...skipping 24 matching lines...) Expand all
534 TRACE_EVENT_API_ADD_METADATA_EVENT( 503 TRACE_EVENT_API_ADD_METADATA_EVENT(
535 "typeNames", "typeNames", 504 "typeNames", "typeNames",
536 scoped_refptr<ConvertableToTraceFormat>(type_name_deduplicator)); 505 scoped_refptr<ConvertableToTraceFormat>(type_name_deduplicator));
537 } 506 }
538 507
539 DCHECK(!dump_thread_); 508 DCHECK(!dump_thread_);
540 dump_thread_ = std::move(dump_thread); 509 dump_thread_ = std::move(dump_thread);
541 session_state_ = new MemoryDumpSessionState(stack_frame_deduplicator, 510 session_state_ = new MemoryDumpSessionState(stack_frame_deduplicator,
542 type_name_deduplicator); 511 type_name_deduplicator);
543 512
544 for (auto it = dump_providers_.begin(); it != dump_providers_.end(); ++it) { 513 for (const scoped_refptr<MemoryDumpProviderInfo>& mdpinfo : dump_providers_) {
545 it->disabled = false; 514 mdpinfo->disabled = false;
546 it->consecutive_failures = 0; 515 mdpinfo->consecutive_failures = 0;
547 } 516 }
548 517
549 subtle::NoBarrier_Store(&memory_tracing_enabled_, 1); 518 subtle::NoBarrier_Store(&memory_tracing_enabled_, 1);
550 519
551 // TODO(primiano): This is a temporary hack to disable periodic memory dumps 520 // TODO(primiano): This is a temporary hack to disable periodic memory dumps
552 // when running memory benchmarks until telemetry uses TraceConfig to 521 // when running memory benchmarks until telemetry uses TraceConfig to
553 // enable/disable periodic dumps. See crbug.com/529184 . 522 // enable/disable periodic dumps. See crbug.com/529184 .
554 if (!is_coordinator_ || 523 if (!is_coordinator_ ||
555 CommandLine::ForCurrentProcess()->HasSwitch( 524 CommandLine::ForCurrentProcess()->HasSwitch(
556 "enable-memory-benchmarking")) { 525 "enable-memory-benchmarking")) {
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
603 dump_thread->Stop(); 572 dump_thread->Stop();
604 } 573 }
605 574
606 uint64_t MemoryDumpManager::GetTracingProcessId() const { 575 uint64_t MemoryDumpManager::GetTracingProcessId() const {
607 return delegate_->GetTracingProcessId(); 576 return delegate_->GetTracingProcessId();
608 } 577 }
609 578
610 MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo( 579 MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo(
611 MemoryDumpProvider* dump_provider, 580 MemoryDumpProvider* dump_provider,
612 const char* name, 581 const char* name,
582 uint32_t registration_order,
613 const scoped_refptr<SingleThreadTaskRunner>& task_runner, 583 const scoped_refptr<SingleThreadTaskRunner>& task_runner,
614 const MemoryDumpProvider::Options& options) 584 const MemoryDumpProvider::Options& options)
615 : dump_provider(dump_provider), 585 : dump_provider(dump_provider),
616 name(name), 586 name(name),
587 registration_order(registration_order),
617 task_runner(task_runner), 588 task_runner(task_runner),
618 options(options), 589 options(options),
619 consecutive_failures(0), 590 consecutive_failures(0),
620 disabled(false), 591 disabled(false) {}
621 unregistered(false) {}
622 592
623 MemoryDumpManager::MemoryDumpProviderInfo::~MemoryDumpProviderInfo() {} 593 MemoryDumpManager::MemoryDumpProviderInfo::~MemoryDumpProviderInfo() {}
624 594
625 bool MemoryDumpManager::MemoryDumpProviderInfo::operator<( 595 bool MemoryDumpManager::MemoryDumpProviderInfo::Comparator::operator()(
626 const MemoryDumpProviderInfo& other) const { 596 const scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo>& a,
627 if (task_runner == other.task_runner) 597 const scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo>& b) const {
628 return dump_provider < other.dump_provider; 598 if (!a || !b)
599 return a.get() < b.get();
629 // Ensure that unbound providers (task_runner == nullptr) always run last. 600 // Ensure that unbound providers (task_runner == nullptr) always run last.
630 return !(task_runner < other.task_runner); 601 // Rationale: some unbound dump providers are known to be slow, keep them last
602 // to avoid skewing timings of the other dump providers.
603 return std::tie(a->task_runner, a->registration_order) >
604 std::tie(b->task_runner, b->registration_order);
Ruud van Asseldonk 2015/12/18 12:40:42 Looks much nicer than before :D
Primiano Tucci (use gerrit) 2015/12/18 14:02:11 Acknowledged.
631 } 605 }
632 606
633 MemoryDumpManager::ProcessMemoryDumpAsyncState::ProcessMemoryDumpAsyncState( 607 MemoryDumpManager::ProcessMemoryDumpAsyncState::ProcessMemoryDumpAsyncState(
634 MemoryDumpRequestArgs req_args, 608 MemoryDumpRequestArgs req_args,
635 MemoryDumpProviderInfoSet::iterator next_dump_provider, 609 const MemoryDumpProviderInfo::OrderedSet& dump_providers,
636 const scoped_refptr<MemoryDumpSessionState>& session_state, 610 const scoped_refptr<MemoryDumpSessionState>& session_state,
637 MemoryDumpCallback callback, 611 MemoryDumpCallback callback,
638 const scoped_refptr<SingleThreadTaskRunner>& dump_thread_task_runner) 612 const scoped_refptr<SingleThreadTaskRunner>& dump_thread_task_runner)
639 : req_args(req_args), 613 : req_args(req_args),
640 next_dump_provider(next_dump_provider),
641 session_state(session_state), 614 session_state(session_state),
642 callback(callback), 615 callback(callback),
643 callback_task_runner(MessageLoop::current()->task_runner()), 616 callback_task_runner(MessageLoop::current()->task_runner()),
644 dump_thread_task_runner(dump_thread_task_runner) {} 617 dump_thread_task_runner(dump_thread_task_runner) {
618 pending_dump_providers.reserve(dump_providers.size());
619 pending_dump_providers.assign(dump_providers.rbegin(), dump_providers.rend());
620 }
645 621
646 MemoryDumpManager::ProcessMemoryDumpAsyncState::~ProcessMemoryDumpAsyncState() { 622 MemoryDumpManager::ProcessMemoryDumpAsyncState::~ProcessMemoryDumpAsyncState() {
647 } 623 }
648 624
649 ProcessMemoryDump* MemoryDumpManager::ProcessMemoryDumpAsyncState:: 625 ProcessMemoryDump* MemoryDumpManager::ProcessMemoryDumpAsyncState::
650 GetOrCreateMemoryDumpContainerForProcess(ProcessId pid) { 626 GetOrCreateMemoryDumpContainerForProcess(ProcessId pid) {
651 auto iter = process_dumps.find(pid); 627 auto iter = process_dumps.find(pid);
652 if (iter == process_dumps.end()) { 628 if (iter == process_dumps.end()) {
653 scoped_ptr<ProcessMemoryDump> new_pmd(new ProcessMemoryDump(session_state)); 629 scoped_ptr<ProcessMemoryDump> new_pmd(new ProcessMemoryDump(session_state));
654 iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first; 630 iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first;
655 } 631 }
656 return iter->second.get(); 632 return iter->second.get();
657 } 633 }
658 634
659 } // namespace trace_event 635 } // namespace trace_event
660 } // namespace base 636 } // namespace base
OLDNEW
« no previous file with comments | « base/trace_event/memory_dump_manager.h ('k') | base/trace_event/memory_dump_manager_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698