Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(164)

Side by Side Diff: base/trace_event/memory_dump_manager.cc

Issue 1540283003: Revert of [tracing] Simplify logic of MemoryDumpManager (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 5 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/trace_event/memory_dump_manager.h" 5 #include "base/trace_event/memory_dump_manager.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <utility> 8 #include <utility>
9 9
10 #include "base/atomic_sequence_num.h" 10 #include "base/atomic_sequence_num.h"
(...skipping 175 matching lines...) Expand 10 before | Expand all | Expand 10 after
186 } 186 }
187 187
188 void MemoryDumpManager::RegisterDumpProvider( 188 void MemoryDumpManager::RegisterDumpProvider(
189 MemoryDumpProvider* mdp, 189 MemoryDumpProvider* mdp,
190 const char* name, 190 const char* name,
191 const scoped_refptr<SingleThreadTaskRunner>& task_runner, 191 const scoped_refptr<SingleThreadTaskRunner>& task_runner,
192 const MemoryDumpProvider::Options& options) { 192 const MemoryDumpProvider::Options& options) {
193 if (dumper_registrations_ignored_for_testing_) 193 if (dumper_registrations_ignored_for_testing_)
194 return; 194 return;
195 195
196 scoped_refptr<MemoryDumpProviderInfo> mdpinfo = 196 MemoryDumpProviderInfo mdp_info(mdp, name, task_runner, options);
197 new MemoryDumpProviderInfo(mdp, name, task_runner, options); 197 AutoLock lock(lock_);
198 auto iter_new = dump_providers_.insert(mdp_info);
198 199
199 { 200 // If there was a previous entry, replace it with the new one. This is to deal
200 AutoLock lock(lock_); 201 // with the case where a dump provider unregisters itself and then re-
201 bool already_registered = !dump_providers_.insert(mdpinfo).second; 202 // registers before a memory dump happens, so its entry was still in the
202 // This actually happen in some tests which don't have a clean tear-down 203 // collection but flagged |unregistered|.
203 // path for RenderThreadImpl::Init(). 204 if (!iter_new.second) {
204 if (already_registered) 205 dump_providers_.erase(iter_new.first);
205 return; 206 dump_providers_.insert(mdp_info);
206 } 207 }
207 208
208 if (heap_profiling_enabled_) 209 if (heap_profiling_enabled_)
209 mdp->OnHeapProfilingEnabled(true); 210 mdp->OnHeapProfilingEnabled(true);
210 } 211 }
211 212
212 void MemoryDumpManager::RegisterDumpProvider( 213 void MemoryDumpManager::RegisterDumpProvider(
213 MemoryDumpProvider* mdp, 214 MemoryDumpProvider* mdp,
214 const char* name, 215 const char* name,
215 const scoped_refptr<SingleThreadTaskRunner>& task_runner) { 216 const scoped_refptr<SingleThreadTaskRunner>& task_runner) {
216 RegisterDumpProvider(mdp, name, task_runner, MemoryDumpProvider::Options()); 217 RegisterDumpProvider(mdp, name, task_runner, MemoryDumpProvider::Options());
217 } 218 }
218 219
219 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) { 220 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) {
220 AutoLock lock(lock_); 221 AutoLock lock(lock_);
221 222
222 auto mdp_iter = dump_providers_.begin(); 223 auto mdp_iter = dump_providers_.begin();
223 for (; mdp_iter != dump_providers_.end(); ++mdp_iter) { 224 for (; mdp_iter != dump_providers_.end(); ++mdp_iter) {
224 if ((*mdp_iter)->dump_provider == mdp) 225 if (mdp_iter->dump_provider == mdp)
225 break; 226 break;
226 } 227 }
227 228
228 if (mdp_iter == dump_providers_.end()) 229 if (mdp_iter == dump_providers_.end())
229 return; 230 return;
230 231
231 // Unregistration of a MemoryDumpProvider while tracing is ongoing is safe 232 // Unregistration of a MemoryDumpProvider while tracing is ongoing is safe
232 // only if the MDP has specified a thread affinity (via task_runner()) AND 233 // only if the MDP has specified a thread affinity (via task_runner()) AND
233 // the unregistration happens on the same thread (so the MDP cannot unregister 234 // the unregistration happens on the same thread (so the MDP cannot unregister
234 // and OnMemoryDump() at the same time). 235 // and OnMemoryDump() at the same time).
235 // Otherwise, it is not possible to guarantee that its unregistration is 236 // Otherwise, it is not possible to guarantee that its unregistration is
236 // race-free. If you hit this DCHECK, your MDP has a bug. 237 // race-free. If you hit this DCHECK, your MDP has a bug.
237 DCHECK(!subtle::NoBarrier_Load(&memory_tracing_enabled_) || 238 DCHECK(!subtle::NoBarrier_Load(&memory_tracing_enabled_) ||
238 ((*mdp_iter)->task_runner && 239 (mdp_iter->task_runner &&
239 (*mdp_iter)->task_runner->BelongsToCurrentThread())) 240 mdp_iter->task_runner->BelongsToCurrentThread()))
240 << "MemoryDumpProvider \"" << (*mdp_iter)->name << "\" attempted to " 241 << "MemoryDumpProvider \"" << mdp_iter->name << "\" attempted to "
241 << "unregister itself in a racy way. Please file a crbug."; 242 << "unregister itself in a racy way. Please file a crbug.";
242 243
243 // The MDPInfo instance can still be referenced by the 244 mdp_iter->unregistered = true;
244 // |ProcessMemoryDumpAsyncState.pending_dump_providers|. For this reason
245 // the MDPInfo is flagged as disabled. It will cause ContinueAsyncProcessDump
246 // to just skip it, without actually invoking the |mdp|, which might be
247 // destroyed by the caller soon after this method returns.
248 (*mdp_iter)->disabled = true;
249 dump_providers_.erase(mdp_iter);
250 } 245 }
251 246
252 void MemoryDumpManager::RequestGlobalDump( 247 void MemoryDumpManager::RequestGlobalDump(
253 MemoryDumpType dump_type, 248 MemoryDumpType dump_type,
254 MemoryDumpLevelOfDetail level_of_detail, 249 MemoryDumpLevelOfDetail level_of_detail,
255 const MemoryDumpCallback& callback) { 250 const MemoryDumpCallback& callback) {
256 // Bail out immediately if tracing is not enabled at all. 251 // Bail out immediately if tracing is not enabled at all.
257 if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_))) { 252 if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_))) {
258 if (!callback.is_null()) 253 if (!callback.is_null())
259 callback.Run(0u /* guid */, false /* success */); 254 callback.Run(0u /* guid */, false /* success */);
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
294 } 289 }
295 290
296 void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args, 291 void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args,
297 const MemoryDumpCallback& callback) { 292 const MemoryDumpCallback& callback) {
298 TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(kTraceCategory, "ProcessMemoryDump", 293 TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(kTraceCategory, "ProcessMemoryDump",
299 TRACE_ID_MANGLE(args.dump_guid)); 294 TRACE_ID_MANGLE(args.dump_guid));
300 295
301 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state; 296 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state;
302 { 297 {
303 AutoLock lock(lock_); 298 AutoLock lock(lock_);
304 pmd_async_state.reset( 299 pmd_async_state.reset(new ProcessMemoryDumpAsyncState(
305 new ProcessMemoryDumpAsyncState(args, dump_providers_, session_state_, 300 args, dump_providers_.begin(), session_state_, callback,
306 callback, dump_thread_->task_runner())); 301 dump_thread_->task_runner()));
307 } 302 }
308 303
309 TRACE_EVENT_WITH_FLOW0(kTraceCategory, "MemoryDumpManager::CreateProcessDump", 304 TRACE_EVENT_WITH_FLOW0(kTraceCategory, "MemoryDumpManager::CreateProcessDump",
310 TRACE_ID_MANGLE(args.dump_guid), 305 TRACE_ID_MANGLE(args.dump_guid),
311 TRACE_EVENT_FLAG_FLOW_OUT); 306 TRACE_EVENT_FLAG_FLOW_OUT);
312 307
313 // Start the thread hop. |dump_providers_| are kept sorted by thread, so 308 // Start the thread hop. |dump_providers_| are kept sorted by thread, so
314 // ContinueAsyncProcessDump will hop at most once per thread (w.r.t. thread 309 // ContinueAsyncProcessDump will hop at most once per thread (w.r.t. thread
315 // affinity specified by the MemoryDumpProvider(s) in RegisterDumpProvider()). 310 // affinity specified by the MemoryDumpProvider(s) in RegisterDumpProvider()).
316 ContinueAsyncProcessDump(pmd_async_state.release()); 311 ContinueAsyncProcessDump(std::move(pmd_async_state));
317 } 312 }
318 313
319 // At most one ContinueAsyncProcessDump() can be active at any time for a given 314 // At most one ContinueAsyncProcessDump() can be active at any time for a given
320 // PMD, regardless of status of the |lock_|. |lock_| is used here purely to 315 // PMD, regardless of status of the |lock_|. |lock_| is used here purely to
321 // ensure consistency w.r.t. (un)registrations of |dump_providers_|. 316 // ensure consistency w.r.t. (un)registrations of |dump_providers_|.
322 // The linearization of dump providers' OnMemoryDump invocations is achieved by 317 // The linearization of dump providers' OnMemoryDump invocations is achieved by
323 // means of subsequent PostTask(s). 318 // means of subsequent PostTask(s).
324 // 319 //
325 // 1) Prologue: 320 // 1) Prologue:
326 // - If this was the last hop, create a trace event, add it to the trace 321 // - Check if the dump provider is disabled, if so skip the dump.
327 // and finalize (invoke callback).
328 // - Check if we are on the right thread. If not hop and continue there. 322 // - Check if we are on the right thread. If not hop and continue there.
329 // - Check if the dump provider is disabled, if so skip the dump.
330 // 2) Invoke the dump provider's OnMemoryDump() (unless skipped). 323 // 2) Invoke the dump provider's OnMemoryDump() (unless skipped).
331 // 3) Epilogue: 324 // 3) Epilogue:
332 // - Unregister the dump provider if it failed too many times consecutively. 325 // - Unregister the dump provider if it failed too many times consecutively.
333 // - Pop() the MDP from the |pending_dump_providers| list, eventually 326 // - Advance the |next_dump_provider| iterator to the next dump provider.
334 // destroying the MDPInfo if that was unregistered in the meantime. 327 // - If this was the last hop, create a trace event, add it to the trace
328 // and finalize (invoke callback).
329
335 void MemoryDumpManager::ContinueAsyncProcessDump( 330 void MemoryDumpManager::ContinueAsyncProcessDump(
336 ProcessMemoryDumpAsyncState* owned_pmd_async_state) { 331 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) {
337 // Initalizes the ThreadLocalEventBuffer to guarantee that the TRACE_EVENTs 332 // Initalizes the ThreadLocalEventBuffer to guarantee that the TRACE_EVENTs
338 // in the PostTask below don't end up registering their own dump providers 333 // in the PostTask below don't end up registering their own dump providers
339 // (for discounting trace memory overhead) while holding the |lock_|. 334 // (for discounting trace memory overhead) while holding the |lock_|.
340 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported(); 335 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported();
341 336
342 // In theory |owned_pmd_async_state| should be a scoped_ptr. The only reason 337 const uint64_t dump_guid = pmd_async_state->req_args.dump_guid;
343 // why it isn't is because of the corner case logic of |did_post_task| below, 338 const char* dump_provider_name = nullptr;
344 // which needs to take back the ownership of the |pmd_async_state| when a
345 // thread goes away and consequently the PostTask() fails.
346 // Unfortunately, PostTask() destroys the scoped_ptr arguments upon failure
347 // to prevent accidental leaks. Using a scoped_ptr would prevent us to to
348 // skip the hop and move on. Hence the manual naked -> scoped ptr juggling.
349 auto pmd_async_state = make_scoped_ptr(owned_pmd_async_state);
350 owned_pmd_async_state = nullptr;
351 339
352 if (pmd_async_state->pending_dump_providers.empty()) 340 // Pid of the target process being dumped. Often kNullProcessId (= current
341 // process), non-zero when the coordinator process creates dumps on behalf
342 // of child processes (see crbug.com/461788).
343 ProcessId pid;
344
345 // DO NOT put any LOG() statement in the locked sections, as in some contexts
346 // (GPU process) LOG() ends up performing PostTask/IPCs.
347 MemoryDumpProvider* mdp;
348 bool skip_dump = false;
349 {
350 AutoLock lock(lock_);
351
352 auto mdp_info = pmd_async_state->next_dump_provider;
353 mdp = mdp_info->dump_provider;
354 dump_provider_name = mdp_info->name;
355 pid = mdp_info->options.target_pid;
356
357 // If the dump provider did not specify a thread affinity, dump on
358 // |dump_thread_|.
359 SingleThreadTaskRunner* task_runner = mdp_info->task_runner.get();
360 if (!task_runner)
361 task_runner = pmd_async_state->dump_thread_task_runner.get();
362
363 // |dump_thread_| might have been Stop()-ed at this point (if tracing was
364 // disabled in the meanwhile). In such case the PostTask() below will fail.
365 // |task_runner|, however, should always be non-null.
366 DCHECK(task_runner);
367
368 if (mdp_info->disabled || mdp_info->unregistered) {
369 skip_dump = true;
370 } else if (!task_runner->BelongsToCurrentThread()) {
371 // It's time to hop onto another thread.
372
373 // Copy the callback + arguments just for the unlikley case in which
374 // PostTask fails. In such case the Bind helper will destroy the
375 // pmd_async_state and we must keep a copy of the fields to notify the
376 // abort.
377 MemoryDumpCallback callback = pmd_async_state->callback;
378 scoped_refptr<SingleThreadTaskRunner> callback_task_runner =
379 pmd_async_state->callback_task_runner;
380
381 const bool did_post_task = task_runner->PostTask(
382 FROM_HERE, Bind(&MemoryDumpManager::ContinueAsyncProcessDump,
383 Unretained(this), Passed(&pmd_async_state)));
384 if (did_post_task)
385 return;
386
387 // The thread is gone. At this point the best thing we can do is to
388 // disable the dump provider and abort this dump.
389 mdp_info->disabled = true;
390 return AbortDumpLocked(callback, callback_task_runner, dump_guid);
391 }
392 } // AutoLock(lock_)
393
394 // Invoke the dump provider without holding the |lock_|.
395 bool finalize = false;
396 bool dump_successful = false;
397
398 if (!skip_dump) {
399 TRACE_EVENT_WITH_FLOW1(kTraceCategory,
400 "MemoryDumpManager::ContinueAsyncProcessDump",
401 TRACE_ID_MANGLE(dump_guid),
402 TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT,
403 "dump_provider.name", dump_provider_name);
404 MemoryDumpArgs args = {pmd_async_state->req_args.level_of_detail};
405 ProcessMemoryDump* process_memory_dump =
406 pmd_async_state->GetOrCreateMemoryDumpContainerForProcess(pid);
407 dump_successful = mdp->OnMemoryDump(args, process_memory_dump);
408 }
409
410 {
411 AutoLock lock(lock_);
412 auto mdp_info = pmd_async_state->next_dump_provider;
413 if (dump_successful) {
414 mdp_info->consecutive_failures = 0;
415 } else if (!skip_dump) {
416 ++mdp_info->consecutive_failures;
417 if (mdp_info->consecutive_failures >= kMaxConsecutiveFailuresCount) {
418 mdp_info->disabled = true;
419 }
420 }
421 ++pmd_async_state->next_dump_provider;
422 finalize = pmd_async_state->next_dump_provider == dump_providers_.end();
423
424 if (mdp_info->unregistered)
425 dump_providers_.erase(mdp_info);
426 }
427
428 if (!skip_dump && !dump_successful) {
429 LOG(ERROR) << "MemoryDumpProvider \"" << dump_provider_name << "\" failed, "
430 << "possibly due to sandboxing (crbug.com/461788)."
431 << "Disabling dumper for current process. Try --no-sandbox.";
432 }
433
434 if (finalize)
353 return FinalizeDumpAndAddToTrace(std::move(pmd_async_state)); 435 return FinalizeDumpAndAddToTrace(std::move(pmd_async_state));
354 436
355 // Read MemoryDumpProviderInfo thread safety considerations in 437 ContinueAsyncProcessDump(std::move(pmd_async_state));
356 // memory_dump_manager.h when accessing |mdpinfo| fields.
357 MemoryDumpProviderInfo* mdpinfo =
358 pmd_async_state->pending_dump_providers.back().get();
359
360 // If the dump provider did not specify a thread affinity, dump on
361 // |dump_thread_|. Note that |dump_thread_| might have been Stop()-ed at this
362 // point (if tracing was disabled in the meanwhile). In such case the
363 // PostTask() below will fail, but |task_runner| should always be non-null.
364 SingleThreadTaskRunner* task_runner = mdpinfo->task_runner.get();
365 if (!task_runner)
366 task_runner = pmd_async_state->dump_thread_task_runner.get();
367
368 if (!task_runner->BelongsToCurrentThread()) {
369 // It's time to hop onto another thread.
370 const bool did_post_task = task_runner->PostTask(
371 FROM_HERE, Bind(&MemoryDumpManager::ContinueAsyncProcessDump,
372 Unretained(this), Unretained(pmd_async_state.get())));
373 if (did_post_task) {
374 // Ownership is tranferred to the next ContinueAsyncProcessDump().
375 ignore_result(pmd_async_state.release());
376 return;
377 }
378 // The thread is gone. Skip the dump provider and keep going.
379 mdpinfo->disabled = true;
380 }
381
382 // At this point wither we are on the right thread (|mdpinfo.task_runner|)
383 // to access mdp fields, or the right thread is gone (and |disabled| == true).
384
385 if (!mdpinfo->disabled) {
386 // Invoke the dump provider.
387 TRACE_EVENT_WITH_FLOW1(kTraceCategory,
388 "MemoryDumpManager::ContinueAsyncProcessDump",
389 TRACE_ID_MANGLE(pmd_async_state->req_args.dump_guid),
390 TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT,
391 "dump_provider.name", mdpinfo->name);
392
393 // Pid of the target process being dumped. Often kNullProcessId (= current
394 // process), non-zero when the coordinator process creates dumps on behalf
395 // of child processes (see crbug.com/461788).
396 ProcessId target_pid = mdpinfo->options.target_pid;
397 ProcessMemoryDump* pmd =
398 pmd_async_state->GetOrCreateMemoryDumpContainerForProcess(target_pid);
399 MemoryDumpArgs args = {pmd_async_state->req_args.level_of_detail};
400 bool dump_successful = mdpinfo->dump_provider->OnMemoryDump(args, pmd);
401
402 if (dump_successful) {
403 mdpinfo->consecutive_failures = 0;
404 } else {
405 ++mdpinfo->consecutive_failures;
406 if (mdpinfo->consecutive_failures >= kMaxConsecutiveFailuresCount) {
407 mdpinfo->disabled = true;
408 LOG(ERROR) << "MemoryDumpProvider \"" << mdpinfo->name << "\" failed, "
409 << "possibly due to sandboxing (crbug.com/461788)."
410 << "Disabling dumper for current process. Try --no-sandbox.";
411 }
412 }
413 } // if (!mdpinfo->disabled)
414
415 pmd_async_state->pending_dump_providers.pop_back();
416 ContinueAsyncProcessDump(pmd_async_state.release());
417 } 438 }
418 439
419 // static 440 // static
420 void MemoryDumpManager::FinalizeDumpAndAddToTrace( 441 void MemoryDumpManager::FinalizeDumpAndAddToTrace(
421 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) { 442 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) {
422 DCHECK(pmd_async_state->pending_dump_providers.empty());
423 const uint64_t dump_guid = pmd_async_state->req_args.dump_guid; 443 const uint64_t dump_guid = pmd_async_state->req_args.dump_guid;
424 if (!pmd_async_state->callback_task_runner->BelongsToCurrentThread()) { 444 if (!pmd_async_state->callback_task_runner->BelongsToCurrentThread()) {
425 scoped_refptr<SingleThreadTaskRunner> callback_task_runner = 445 scoped_refptr<SingleThreadTaskRunner> callback_task_runner =
426 pmd_async_state->callback_task_runner; 446 pmd_async_state->callback_task_runner;
427 callback_task_runner->PostTask( 447 callback_task_runner->PostTask(
428 FROM_HERE, Bind(&MemoryDumpManager::FinalizeDumpAndAddToTrace, 448 FROM_HERE, Bind(&MemoryDumpManager::FinalizeDumpAndAddToTrace,
429 Passed(&pmd_async_state))); 449 Passed(&pmd_async_state)));
430 return; 450 return;
431 } 451 }
432 452
(...skipping 23 matching lines...) Expand all
456 476
457 if (!pmd_async_state->callback.is_null()) { 477 if (!pmd_async_state->callback.is_null()) {
458 pmd_async_state->callback.Run(dump_guid, true /* success */); 478 pmd_async_state->callback.Run(dump_guid, true /* success */);
459 pmd_async_state->callback.Reset(); 479 pmd_async_state->callback.Reset();
460 } 480 }
461 481
462 TRACE_EVENT_NESTABLE_ASYNC_END0(kTraceCategory, "ProcessMemoryDump", 482 TRACE_EVENT_NESTABLE_ASYNC_END0(kTraceCategory, "ProcessMemoryDump",
463 TRACE_ID_MANGLE(dump_guid)); 483 TRACE_ID_MANGLE(dump_guid));
464 } 484 }
465 485
486 // static
487 void MemoryDumpManager::AbortDumpLocked(
488 MemoryDumpCallback callback,
489 scoped_refptr<SingleThreadTaskRunner> task_runner,
490 uint64_t dump_guid) {
491 if (callback.is_null())
492 return; // There is nothing to NACK.
493
494 // Post the callback even if we are already on the right thread to avoid
495 // invoking the callback while holding the lock_.
496 task_runner->PostTask(FROM_HERE,
497 Bind(callback, dump_guid, false /* success */));
498 }
499
466 void MemoryDumpManager::OnTraceLogEnabled() { 500 void MemoryDumpManager::OnTraceLogEnabled() {
467 bool enabled; 501 bool enabled;
468 TRACE_EVENT_CATEGORY_GROUP_ENABLED(kTraceCategory, &enabled); 502 TRACE_EVENT_CATEGORY_GROUP_ENABLED(kTraceCategory, &enabled);
469 if (!enabled) 503 if (!enabled)
470 return; 504 return;
471 505
472 // Initialize the TraceLog for the current thread. This is to avoid that the 506 // Initialize the TraceLog for the current thread. This is to avoid that the
473 // TraceLog memory dump provider is registered lazily in the PostTask() below 507 // TraceLog memory dump provider is registered lazily in the PostTask() below
474 // while the |lock_| is taken; 508 // while the |lock_| is taken;
475 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported(); 509 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported();
(...skipping 24 matching lines...) Expand all
500 TRACE_EVENT_API_ADD_METADATA_EVENT( 534 TRACE_EVENT_API_ADD_METADATA_EVENT(
501 "typeNames", "typeNames", 535 "typeNames", "typeNames",
502 scoped_refptr<ConvertableToTraceFormat>(type_name_deduplicator)); 536 scoped_refptr<ConvertableToTraceFormat>(type_name_deduplicator));
503 } 537 }
504 538
505 DCHECK(!dump_thread_); 539 DCHECK(!dump_thread_);
506 dump_thread_ = std::move(dump_thread); 540 dump_thread_ = std::move(dump_thread);
507 session_state_ = new MemoryDumpSessionState(stack_frame_deduplicator, 541 session_state_ = new MemoryDumpSessionState(stack_frame_deduplicator,
508 type_name_deduplicator); 542 type_name_deduplicator);
509 543
544 for (auto it = dump_providers_.begin(); it != dump_providers_.end(); ++it) {
545 it->disabled = false;
546 it->consecutive_failures = 0;
547 }
548
510 subtle::NoBarrier_Store(&memory_tracing_enabled_, 1); 549 subtle::NoBarrier_Store(&memory_tracing_enabled_, 1);
511 550
512 // TODO(primiano): This is a temporary hack to disable periodic memory dumps 551 // TODO(primiano): This is a temporary hack to disable periodic memory dumps
513 // when running memory benchmarks until telemetry uses TraceConfig to 552 // when running memory benchmarks until telemetry uses TraceConfig to
514 // enable/disable periodic dumps. See crbug.com/529184 . 553 // enable/disable periodic dumps. See crbug.com/529184 .
515 if (!is_coordinator_ || 554 if (!is_coordinator_ ||
516 CommandLine::ForCurrentProcess()->HasSwitch( 555 CommandLine::ForCurrentProcess()->HasSwitch(
517 "enable-memory-benchmarking")) { 556 "enable-memory-benchmarking")) {
518 return; 557 return;
519 } 558 }
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
571 MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo( 610 MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo(
572 MemoryDumpProvider* dump_provider, 611 MemoryDumpProvider* dump_provider,
573 const char* name, 612 const char* name,
574 const scoped_refptr<SingleThreadTaskRunner>& task_runner, 613 const scoped_refptr<SingleThreadTaskRunner>& task_runner,
575 const MemoryDumpProvider::Options& options) 614 const MemoryDumpProvider::Options& options)
576 : dump_provider(dump_provider), 615 : dump_provider(dump_provider),
577 name(name), 616 name(name),
578 task_runner(task_runner), 617 task_runner(task_runner),
579 options(options), 618 options(options),
580 consecutive_failures(0), 619 consecutive_failures(0),
581 disabled(false) {} 620 disabled(false),
621 unregistered(false) {}
582 622
583 MemoryDumpManager::MemoryDumpProviderInfo::~MemoryDumpProviderInfo() {} 623 MemoryDumpManager::MemoryDumpProviderInfo::~MemoryDumpProviderInfo() {}
584 624
585 bool MemoryDumpManager::MemoryDumpProviderInfo::Comparator::operator()( 625 bool MemoryDumpManager::MemoryDumpProviderInfo::operator<(
586 const scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo>& a, 626 const MemoryDumpProviderInfo& other) const {
587 const scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo>& b) const { 627 if (task_runner == other.task_runner)
588 if (!a || !b) 628 return dump_provider < other.dump_provider;
589 return a.get() < b.get();
590 // Ensure that unbound providers (task_runner == nullptr) always run last. 629 // Ensure that unbound providers (task_runner == nullptr) always run last.
591 // Rationale: some unbound dump providers are known to be slow, keep them last 630 return !(task_runner < other.task_runner);
592 // to avoid skewing timings of the other dump providers.
593 return std::tie(a->task_runner, a->dump_provider) >
594 std::tie(b->task_runner, b->dump_provider);
595 } 631 }
596 632
597 MemoryDumpManager::ProcessMemoryDumpAsyncState::ProcessMemoryDumpAsyncState( 633 MemoryDumpManager::ProcessMemoryDumpAsyncState::ProcessMemoryDumpAsyncState(
598 MemoryDumpRequestArgs req_args, 634 MemoryDumpRequestArgs req_args,
599 const MemoryDumpProviderInfo::OrderedSet& dump_providers, 635 MemoryDumpProviderInfoSet::iterator next_dump_provider,
600 const scoped_refptr<MemoryDumpSessionState>& session_state, 636 const scoped_refptr<MemoryDumpSessionState>& session_state,
601 MemoryDumpCallback callback, 637 MemoryDumpCallback callback,
602 const scoped_refptr<SingleThreadTaskRunner>& dump_thread_task_runner) 638 const scoped_refptr<SingleThreadTaskRunner>& dump_thread_task_runner)
603 : req_args(req_args), 639 : req_args(req_args),
640 next_dump_provider(next_dump_provider),
604 session_state(session_state), 641 session_state(session_state),
605 callback(callback), 642 callback(callback),
606 callback_task_runner(MessageLoop::current()->task_runner()), 643 callback_task_runner(MessageLoop::current()->task_runner()),
607 dump_thread_task_runner(dump_thread_task_runner) { 644 dump_thread_task_runner(dump_thread_task_runner) {}
608 pending_dump_providers.reserve(dump_providers.size());
609 pending_dump_providers.assign(dump_providers.rbegin(), dump_providers.rend());
610 }
611 645
612 MemoryDumpManager::ProcessMemoryDumpAsyncState::~ProcessMemoryDumpAsyncState() { 646 MemoryDumpManager::ProcessMemoryDumpAsyncState::~ProcessMemoryDumpAsyncState() {
613 } 647 }
614 648
615 ProcessMemoryDump* MemoryDumpManager::ProcessMemoryDumpAsyncState:: 649 ProcessMemoryDump* MemoryDumpManager::ProcessMemoryDumpAsyncState::
616 GetOrCreateMemoryDumpContainerForProcess(ProcessId pid) { 650 GetOrCreateMemoryDumpContainerForProcess(ProcessId pid) {
617 auto iter = process_dumps.find(pid); 651 auto iter = process_dumps.find(pid);
618 if (iter == process_dumps.end()) { 652 if (iter == process_dumps.end()) {
619 scoped_ptr<ProcessMemoryDump> new_pmd(new ProcessMemoryDump(session_state)); 653 scoped_ptr<ProcessMemoryDump> new_pmd(new ProcessMemoryDump(session_state));
620 iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first; 654 iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first;
621 } 655 }
622 return iter->second.get(); 656 return iter->second.get();
623 } 657 }
624 658
625 } // namespace trace_event 659 } // namespace trace_event
626 } // namespace base 660 } // namespace base
OLDNEW
« no previous file with comments | « base/trace_event/memory_dump_manager.h ('k') | base/trace_event/memory_dump_manager_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698