Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(246)

Side by Side Diff: base/tracked_objects.cc

Issue 1021053003: Delivering the FIRST_NONEMPTY_PAINT phase changing event to base/ (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@phase_splitting
Patch Set: More asvitkine@ comments. Created 5 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/tracked_objects.h" 5 #include "base/tracked_objects.h"
6 6
7 #include <limits.h> 7 #include <limits.h>
8 #include <stdlib.h> 8 #include <stdlib.h>
9 9
10 #include "base/atomicops.h" 10 #include "base/atomicops.h"
11 #include "base/base_switches.h" 11 #include "base/base_switches.h"
12 #include "base/command_line.h" 12 #include "base/command_line.h"
13 #include "base/compiler_specific.h" 13 #include "base/compiler_specific.h"
14 #include "base/debug/leak_annotations.h" 14 #include "base/debug/leak_annotations.h"
15 #include "base/logging.h" 15 #include "base/logging.h"
16 #include "base/process/process_handle.h" 16 #include "base/process/process_handle.h"
17 #include "base/profiler/alternate_timer.h" 17 #include "base/profiler/alternate_timer.h"
18 #include "base/stl_util.h"
18 #include "base/strings/stringprintf.h" 19 #include "base/strings/stringprintf.h"
19 #include "base/third_party/valgrind/memcheck.h" 20 #include "base/third_party/valgrind/memcheck.h"
20 #include "base/tracking_info.h" 21 #include "base/tracking_info.h"
21 22
22 using base::TimeDelta; 23 using base::TimeDelta;
23 24
24 namespace base { 25 namespace base {
25 class TimeDelta; 26 class TimeDelta;
26 } 27 }
27 28
(...skipping 192 matching lines...) Expand 10 before | Expand all | Expand 10 after
220 221
221 //------------------------------------------------------------------------------ 222 //------------------------------------------------------------------------------
222 Births::Births(const Location& location, const ThreadData& current) 223 Births::Births(const Location& location, const ThreadData& current)
223 : BirthOnThread(location, current), 224 : BirthOnThread(location, current),
224 birth_count_(1) { } 225 birth_count_(1) { }
225 226
226 int Births::birth_count() const { return birth_count_; } 227 int Births::birth_count() const { return birth_count_; }
227 228
228 void Births::RecordBirth() { ++birth_count_; } 229 void Births::RecordBirth() { ++birth_count_; }
229 230
231 void Births::SubtractBirths(int count) {
232 birth_count_ -= count;
jar (doing other things) 2015/04/01 17:10:25 This is unsafe, unless *all* folks that access thi
vadimt 2015/04/06 23:25:12 Done.
233 }
234
230 //------------------------------------------------------------------------------ 235 //------------------------------------------------------------------------------
231 // ThreadData maintains the central data for all births and deaths on a single 236 // ThreadData maintains the central data for all births and deaths on a single
232 // thread. 237 // thread.
233 238
234 // TODO(jar): We should pull all these static vars together, into a struct, and 239 // TODO(jar): We should pull all these static vars together, into a struct, and
235 // optimize layout so that we benefit from locality of reference during accesses 240 // optimize layout so that we benefit from locality of reference during accesses
236 // to them. 241 // to them.
237 242
238 // static 243 // static
239 NowFunction* ThreadData::now_function_ = NULL; 244 NowFunction* ThreadData::now_function_ = NULL;
(...skipping 22 matching lines...) Expand all
262 // static 267 // static
263 ThreadData* ThreadData::first_retired_worker_ = NULL; 268 ThreadData* ThreadData::first_retired_worker_ = NULL;
264 269
265 // static 270 // static
266 base::LazyInstance<base::Lock>::Leaky 271 base::LazyInstance<base::Lock>::Leaky
267 ThreadData::list_lock_ = LAZY_INSTANCE_INITIALIZER; 272 ThreadData::list_lock_ = LAZY_INSTANCE_INITIALIZER;
268 273
269 // static 274 // static
270 ThreadData::Status ThreadData::status_ = ThreadData::UNINITIALIZED; 275 ThreadData::Status ThreadData::status_ = ThreadData::UNINITIALIZED;
271 276
277 // static
278 base::LazyInstance<PhasedProcessDataSnapshotMap>
279 ThreadData::completed_phases_snapshots_ = LAZY_INSTANCE_INITIALIZER;
280
272 ThreadData::ThreadData(const std::string& suggested_name) 281 ThreadData::ThreadData(const std::string& suggested_name)
273 : next_(NULL), 282 : next_(NULL),
274 next_retired_worker_(NULL), 283 next_retired_worker_(NULL),
275 worker_thread_number_(0), 284 worker_thread_number_(0),
276 incarnation_count_for_pool_(-1), 285 incarnation_count_for_pool_(-1),
277 current_stopwatch_(NULL) { 286 current_stopwatch_(NULL) {
278 DCHECK_GE(suggested_name.size(), 0u); 287 DCHECK_GE(suggested_name.size(), 0u);
279 thread_name_ = suggested_name; 288 thread_name_ = suggested_name;
280 PushToHeadOfList(); // Which sets real incarnation_count_for_pool_. 289 PushToHeadOfList(); // Which sets real incarnation_count_for_pool_.
281 } 290 }
(...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after
383 return; 392 return;
384 } 393 }
385 // We must NOT do any allocations during this callback. 394 // We must NOT do any allocations during this callback.
386 // Using the simple linked lists avoids all allocations. 395 // Using the simple linked lists avoids all allocations.
387 DCHECK_EQ(this->next_retired_worker_, reinterpret_cast<ThreadData*>(NULL)); 396 DCHECK_EQ(this->next_retired_worker_, reinterpret_cast<ThreadData*>(NULL));
388 this->next_retired_worker_ = first_retired_worker_; 397 this->next_retired_worker_ = first_retired_worker_;
389 first_retired_worker_ = this; 398 first_retired_worker_ = this;
390 } 399 }
391 400
392 // static 401 // static
393 void ThreadData::Snapshot(ProcessDataSnapshot* process_data_snapshot) { 402 void ThreadData::Snapshot(int current_profiling_phase,
403 ProcessDataSnapshot* process_data_snapshot) {
404 process_data_snapshot->phased_process_data_snapshots =
405 completed_phases_snapshots_.Get();
406
407 DCHECK(!ContainsKey(process_data_snapshot->phased_process_data_snapshots,
408 current_profiling_phase));
394 ThreadData::SnapshotCurrentPhase( 409 ThreadData::SnapshotCurrentPhase(
395 &process_data_snapshot->phased_process_data_snapshots[0]); 410 false, &process_data_snapshot
411 ->phased_process_data_snapshots[current_profiling_phase]);
412 }
413
414 // static
415 void ThreadData::OnProfilingPhaseCompletion(int profiling_phase) {
416 if (!kTrackAllTaskObjects)
417 return; // Not compiled in.
418
419 PhasedProcessDataSnapshotMap& snapshots = completed_phases_snapshots_.Get();
420 DCHECK(!ContainsKey(snapshots, profiling_phase));
421 ThreadData::SnapshotCurrentPhase(true, &snapshots[profiling_phase]);
396 } 422 }
397 423
398 Births* ThreadData::TallyABirth(const Location& location) { 424 Births* ThreadData::TallyABirth(const Location& location) {
399 BirthMap::iterator it = birth_map_.find(location); 425 BirthMap::iterator it = birth_map_.find(location);
400 Births* child; 426 Births* child;
401 if (it != birth_map_.end()) { 427 if (it != birth_map_.end()) {
402 child = it->second; 428 child = it->second;
403 child->RecordBirth(); 429 child->RecordBirth();
404 } else { 430 } else {
405 child = new Births(location, *this); // Leak this. 431 child = new Births(location, *this); // Leak this.
(...skipping 11 matching lines...) Expand all
417 // Lock since the map may get relocated now, and other threads sometimes 443 // Lock since the map may get relocated now, and other threads sometimes
418 // snapshot it (but they lock before copying it). 444 // snapshot it (but they lock before copying it).
419 base::AutoLock lock(map_lock_); 445 base::AutoLock lock(map_lock_);
420 parent_child_set_.insert(pair); 446 parent_child_set_.insert(pair);
421 } 447 }
422 } 448 }
423 449
424 return child; 450 return child;
425 } 451 }
426 452
427 void ThreadData::TallyADeath(const Births& birth, 453 void ThreadData::TallyADeath(const Births& births,
428 int32 queue_duration, 454 int32 queue_duration,
429 const TaskStopwatch& stopwatch) { 455 const TaskStopwatch& stopwatch) {
430 int32 run_duration = stopwatch.RunDurationMs(); 456 int32 run_duration = stopwatch.RunDurationMs();
431 457
432 // Stir in some randomness, plus add constant in case durations are zero. 458 // Stir in some randomness, plus add constant in case durations are zero.
433 const uint32 kSomePrimeNumber = 2147483647; 459 const uint32 kSomePrimeNumber = 2147483647;
434 random_number_ += queue_duration + run_duration + kSomePrimeNumber; 460 random_number_ += queue_duration + run_duration + kSomePrimeNumber;
435 // An address is going to have some randomness to it as well ;-). 461 // An address is going to have some randomness to it as well ;-).
436 random_number_ ^= static_cast<uint32>(&birth - reinterpret_cast<Births*>(0)); 462 random_number_ ^= static_cast<uint32>(&births - reinterpret_cast<Births*>(0));
437 463
438 // We don't have queue durations without OS timer. OS timer is automatically 464 // We don't have queue durations without OS timer. OS timer is automatically
439 // used for task-post-timing, so the use of an alternate timer implies all 465 // used for task-post-timing, so the use of an alternate timer implies all
440 // queue times are invalid, unless it was explicitly said that we can trust 466 // queue times are invalid, unless it was explicitly said that we can trust
441 // the alternate timer. 467 // the alternate timer.
442 if (kAllowAlternateTimeSourceHandling && 468 if (kAllowAlternateTimeSourceHandling &&
443 now_function_ && 469 now_function_ &&
444 !now_function_is_time_) { 470 !now_function_is_time_) {
445 queue_duration = 0; 471 queue_duration = 0;
446 } 472 }
447 473
448 DeathMap::iterator it = death_map_.find(&birth); 474 DeathMap::iterator it = death_map_.find(&births);
449 DeathData* death_data; 475 DeathData* death_data;
450 if (it != death_map_.end()) { 476 if (it != death_map_.end()) {
451 death_data = &it->second; 477 death_data = &it->second;
452 } else { 478 } else {
453 base::AutoLock lock(map_lock_); // Lock as the map may get relocated now. 479 base::AutoLock lock(map_lock_); // Lock as the map may get relocated now.
454 death_data = &death_map_[&birth]; 480 death_data = &death_map_[&births];
455 } // Release lock ASAP. 481 } // Release lock ASAP.
456 death_data->RecordDeath(queue_duration, run_duration, random_number_); 482 death_data->RecordDeath(queue_duration, run_duration, random_number_);
457 483
458 if (!kTrackParentChildLinks) 484 if (!kTrackParentChildLinks)
459 return; 485 return;
460 if (!parent_stack_.empty()) { // We might get turned off. 486 if (!parent_stack_.empty()) { // We might get turned off.
461 DCHECK_EQ(parent_stack_.top(), &birth); 487 DCHECK_EQ(parent_stack_.top(), &births);
462 parent_stack_.pop(); 488 parent_stack_.pop();
463 } 489 }
464 } 490 }
465 491
466 // static 492 // static
467 Births* ThreadData::TallyABirthIfActive(const Location& location) { 493 Births* ThreadData::TallyABirthIfActive(const Location& location) {
468 if (!kTrackAllTaskObjects) 494 if (!kTrackAllTaskObjects)
469 return NULL; // Not compiled in. 495 return NULL; // Not compiled in.
470 496
471 if (!TrackingStatus()) 497 if (!TrackingStatus())
472 return NULL; 498 return NULL;
473 ThreadData* current_thread_data = Get(); 499 ThreadData* current_thread_data = Get();
474 if (!current_thread_data) 500 if (!current_thread_data)
475 return NULL; 501 return NULL;
476 return current_thread_data->TallyABirth(location); 502 return current_thread_data->TallyABirth(location);
477 } 503 }
478 504
479 // static 505 // static
480 void ThreadData::TallyRunOnNamedThreadIfTracking( 506 void ThreadData::TallyRunOnNamedThreadIfTracking(
481 const base::TrackingInfo& completed_task, 507 const base::TrackingInfo& completed_task,
482 const TaskStopwatch& stopwatch) { 508 const TaskStopwatch& stopwatch) {
483 if (!kTrackAllTaskObjects) 509 if (!kTrackAllTaskObjects)
484 return; // Not compiled in. 510 return; // Not compiled in.
485 511
486 // Even if we have been DEACTIVATED, we will process any pending births so 512 // Even if we have been DEACTIVATED, we will process any pending births so
487 // that our data structures (which counted the outstanding births) remain 513 // that our data structures (which counted the outstanding births) remain
488 // consistent. 514 // consistent.
489 const Births* birth = completed_task.birth_tally; 515 const Births* births = completed_task.birth_tally;
490 if (!birth) 516 if (!births)
491 return; 517 return;
492 ThreadData* current_thread_data = stopwatch.GetThreadData(); 518 ThreadData* current_thread_data = stopwatch.GetThreadData();
493 if (!current_thread_data) 519 if (!current_thread_data)
494 return; 520 return;
495 521
496 // Watch out for a race where status_ is changing, and hence one or both 522 // Watch out for a race where status_ is changing, and hence one or both
497 // of start_of_run or end_of_run is zero. In that case, we didn't bother to 523 // of start_of_run or end_of_run is zero. In that case, we didn't bother to
498 // get a time value since we "weren't tracking" and we were trying to be 524 // get a time value since we "weren't tracking" and we were trying to be
499 // efficient by not calling for a genuine time value. For simplicity, we'll 525 // efficient by not calling for a genuine time value. For simplicity, we'll
500 // use a default zero duration when we can't calculate a true value. 526 // use a default zero duration when we can't calculate a true value.
501 TrackedTime start_of_run = stopwatch.StartTime(); 527 TrackedTime start_of_run = stopwatch.StartTime();
502 int32 queue_duration = 0; 528 int32 queue_duration = 0;
503 if (!start_of_run.is_null()) { 529 if (!start_of_run.is_null()) {
504 queue_duration = (start_of_run - completed_task.EffectiveTimePosted()) 530 queue_duration = (start_of_run - completed_task.EffectiveTimePosted())
505 .InMilliseconds(); 531 .InMilliseconds();
506 } 532 }
507 current_thread_data->TallyADeath(*birth, queue_duration, stopwatch); 533 current_thread_data->TallyADeath(*births, queue_duration, stopwatch);
508 } 534 }
509 535
510 // static 536 // static
511 void ThreadData::TallyRunOnWorkerThreadIfTracking( 537 void ThreadData::TallyRunOnWorkerThreadIfTracking(
512 const Births* birth, 538 const Births* births,
513 const TrackedTime& time_posted, 539 const TrackedTime& time_posted,
514 const TaskStopwatch& stopwatch) { 540 const TaskStopwatch& stopwatch) {
515 if (!kTrackAllTaskObjects) 541 if (!kTrackAllTaskObjects)
516 return; // Not compiled in. 542 return; // Not compiled in.
517 543
518 // Even if we have been DEACTIVATED, we will process any pending births so 544 // Even if we have been DEACTIVATED, we will process any pending births so
519 // that our data structures (which counted the outstanding births) remain 545 // that our data structures (which counted the outstanding births) remain
520 // consistent. 546 // consistent.
521 if (!birth) 547 if (!births)
522 return; 548 return;
523 549
524 // TODO(jar): Support the option to coalesce all worker-thread activity under 550 // TODO(jar): Support the option to coalesce all worker-thread activity under
525 // one ThreadData instance that uses locks to protect *all* access. This will 551 // one ThreadData instance that uses locks to protect *all* access. This will
526 // reduce memory (making it provably bounded), but run incrementally slower 552 // reduce memory (making it provably bounded), but run incrementally slower
527 // (since we'll use locks on TallyABirth and TallyADeath). The good news is 553 // (since we'll use locks on TallyABirth and TallyADeath). The good news is
528 // that the locks on TallyADeath will be *after* the worker thread has run, 554 // that the locks on TallyADeath will be *after* the worker thread has run,
529 // and hence nothing will be waiting for the completion (... besides some 555 // and hence nothing will be waiting for the completion (... besides some
530 // other thread that might like to run). Also, the worker threads tasks are 556 // other thread that might like to run). Also, the worker threads tasks are
531 // generally longer, and hence the cost of the lock may perchance be amortized 557 // generally longer, and hence the cost of the lock may perchance be amortized
532 // over the long task's lifetime. 558 // over the long task's lifetime.
533 ThreadData* current_thread_data = stopwatch.GetThreadData(); 559 ThreadData* current_thread_data = stopwatch.GetThreadData();
534 if (!current_thread_data) 560 if (!current_thread_data)
535 return; 561 return;
536 562
537 TrackedTime start_of_run = stopwatch.StartTime(); 563 TrackedTime start_of_run = stopwatch.StartTime();
538 int32 queue_duration = 0; 564 int32 queue_duration = 0;
539 if (!start_of_run.is_null()) { 565 if (!start_of_run.is_null()) {
540 queue_duration = (start_of_run - time_posted).InMilliseconds(); 566 queue_duration = (start_of_run - time_posted).InMilliseconds();
541 } 567 }
542 current_thread_data->TallyADeath(*birth, queue_duration, stopwatch); 568 current_thread_data->TallyADeath(*births, queue_duration, stopwatch);
543 } 569 }
544 570
545 // static 571 // static
546 void ThreadData::TallyRunInAScopedRegionIfTracking( 572 void ThreadData::TallyRunInAScopedRegionIfTracking(
547 const Births* birth, 573 const Births* births,
548 const TaskStopwatch& stopwatch) { 574 const TaskStopwatch& stopwatch) {
549 if (!kTrackAllTaskObjects) 575 if (!kTrackAllTaskObjects)
550 return; // Not compiled in. 576 return; // Not compiled in.
551 577
552 // Even if we have been DEACTIVATED, we will process any pending births so 578 // Even if we have been DEACTIVATED, we will process any pending births so
553 // that our data structures (which counted the outstanding births) remain 579 // that our data structures (which counted the outstanding births) remain
554 // consistent. 580 // consistent.
555 if (!birth) 581 if (!births)
556 return; 582 return;
557 583
558 ThreadData* current_thread_data = stopwatch.GetThreadData(); 584 ThreadData* current_thread_data = stopwatch.GetThreadData();
559 if (!current_thread_data) 585 if (!current_thread_data)
560 return; 586 return;
561 587
562 int32 queue_duration = 0; 588 int32 queue_duration = 0;
563 current_thread_data->TallyADeath(*birth, queue_duration, stopwatch); 589 current_thread_data->TallyADeath(*births, queue_duration, stopwatch);
564 } 590 }
565 591
566 // static 592 // static
567 void ThreadData::SnapshotAllExecutedTasks( 593 void ThreadData::SnapshotAllExecutedTasks(
594 bool reset,
568 ProcessDataPhaseSnapshot* process_data_phase, 595 ProcessDataPhaseSnapshot* process_data_phase,
569 BirthCountMap* birth_counts) { 596 BirthCountMap* birth_counts) {
570 if (!kTrackAllTaskObjects) 597 if (!kTrackAllTaskObjects)
571 return; // Not compiled in. 598 return; // Not compiled in.
572 599
573 // Get an unchanging copy of a ThreadData list. 600 // Get an unchanging copy of a ThreadData list.
574 ThreadData* my_list = ThreadData::first(); 601 ThreadData* my_list = ThreadData::first();
575 602
603 DeathResetResults death_reset_results;
604
576 // Gather data serially. 605 // Gather data serially.
577 // This hackish approach *can* get some slighly corrupt tallies, as we are 606 // This hackish approach *can* get some slighly corrupt tallies, as we are
578 // grabbing values without the protection of a lock, but it has the advantage 607 // grabbing values without the protection of a lock, but it has the advantage
579 // of working even with threads that don't have message loops. If a user 608 // of working even with threads that don't have message loops. If a user
580 // sees any strangeness, they can always just run their stats gathering a 609 // sees any strangeness, they can always just run their stats gathering a
581 // second time. 610 // second time.
582 for (ThreadData* thread_data = my_list; 611 for (ThreadData* thread_data = my_list;
583 thread_data; 612 thread_data;
584 thread_data = thread_data->next()) { 613 thread_data = thread_data->next()) {
585 thread_data->SnapshotExecutedTasks(process_data_phase, birth_counts); 614 thread_data->SnapshotExecutedTasks(process_data_phase,
615 reset ? &death_reset_results : nullptr,
616 birth_counts);
617 }
618
619 if (reset) {
620 for (ThreadData* thread_data = my_list; thread_data;
621 thread_data = thread_data->next()) {
622 thread_data->SubtractDeathResultsFromBirths(death_reset_results);
623 }
586 } 624 }
587 } 625 }
588 626
589 // static 627 // static
590 void ThreadData::SnapshotCurrentPhase( 628 void ThreadData::SnapshotCurrentPhase(
629 bool reset,
591 ProcessDataPhaseSnapshot* process_data_phase) { 630 ProcessDataPhaseSnapshot* process_data_phase) {
592 // Add births that have run to completion to |collected_data|. 631 // Add births that have run to completion to |collected_data|.
593 // |birth_counts| tracks the total number of births recorded at each location 632 // |birth_counts| tracks the total number of births recorded at each location
594 // for which we have not seen a death count. 633 // for which we have not seen a death count.
595 BirthCountMap birth_counts; 634 BirthCountMap birth_counts;
596 ThreadData::SnapshotAllExecutedTasks(process_data_phase, &birth_counts); 635 ThreadData::SnapshotAllExecutedTasks(reset, process_data_phase,
636 &birth_counts);
597 637
598 // Add births that are still active -- i.e. objects that have tallied a birth, 638 // Add births that are still active -- i.e. objects that have tallied a birth,
599 // but have not yet tallied a matching death, and hence must be either 639 // but have not yet tallied a matching death, and hence must be either
600 // running, queued up, or being held in limbo for future posting. 640 // running, queued up, or being held in limbo for future posting.
601 for (const auto& birth_count : birth_counts) { 641 for (const auto& birth_count : birth_counts) {
602 if (birth_count.second > 0) { 642 if (birth_count.second > 0) {
603 process_data_phase->tasks.push_back(TaskSnapshot( 643 process_data_phase->tasks.push_back(TaskSnapshot(
604 *birth_count.first, DeathData(birth_count.second), "Still_Alive")); 644 *birth_count.first, DeathData(birth_count.second), "Still_Alive"));
605 } 645 }
606 } 646 }
607 } 647 }
608 648
609 void ThreadData::SnapshotExecutedTasks( 649 void ThreadData::SnapshotExecutedTasks(
610 ProcessDataPhaseSnapshot* process_data_phase, 650 ProcessDataPhaseSnapshot* process_data_phase,
651 DeathResetResults* death_reset_results,
611 BirthCountMap* birth_counts) { 652 BirthCountMap* birth_counts) {
612 // Get copy of data, so that the data will not change during the iterations 653 // Get copy of data, so that the data will not change during the iterations
613 // and processing. 654 // and processing.
614 ThreadData::BirthMap birth_map; 655 ThreadData::BirthMap birth_map;
615 ThreadData::DeathMap death_map; 656 ThreadData::DeathMap death_map;
616 ThreadData::ParentChildSet parent_child_set; 657 ThreadData::ParentChildSet parent_child_set;
617 SnapshotMaps(&birth_map, &death_map, &parent_child_set); 658 SnapshotMaps(&birth_map, &death_map, death_reset_results, &parent_child_set);
618 659
619 for (const auto& death : death_map) { 660 for (const auto& death : death_map) {
620 process_data_phase->tasks.push_back( 661 process_data_phase->tasks.push_back(
621 TaskSnapshot(*death.first, death.second, thread_name())); 662 TaskSnapshot(*death.first, death.second, thread_name()));
622 (*birth_counts)[death.first] -= death.first->birth_count(); 663 // We don't populate birth_counts if a reset was requested.
664 if (death_reset_results == nullptr)
665 (*birth_counts)[death.first] -= death.first->birth_count();
623 } 666 }
624 667
625 for (const auto& birth : birth_map) { 668 for (const auto& birth : birth_map) {
626 (*birth_counts)[birth.second] += birth.second->birth_count(); 669 (*birth_counts)[birth.second] += birth.second->birth_count();
627 } 670 }
628 671
629 if (!kTrackParentChildLinks) 672 if (!kTrackParentChildLinks)
630 return; 673 return;
631 674
632 for (const auto& parent_child : parent_child_set) { 675 for (const auto& parent_child : parent_child_set) {
633 process_data_phase->descendants.push_back( 676 process_data_phase->descendants.push_back(
634 ParentChildPairSnapshot(parent_child)); 677 ParentChildPairSnapshot(parent_child));
635 } 678 }
636 } 679 }
637 680
638 // This may be called from another thread. 681 // This may be called from another thread.
639 void ThreadData::SnapshotMaps(BirthMap* birth_map, 682 void ThreadData::SnapshotMaps(BirthMap* birth_map,
640 DeathMap* death_map, 683 DeathMap* death_map,
684 DeathResetResults* death_reset_results,
641 ParentChildSet* parent_child_set) { 685 ParentChildSet* parent_child_set) {
642 base::AutoLock lock(map_lock_); 686 base::AutoLock lock(map_lock_);
643 for (const auto& birth : birth_map_) 687
644 (*birth_map)[birth.first] = birth.second; 688 if (death_reset_results == nullptr) {
645 for (const auto& death : death_map_) 689 // When reset is not requested, snapshot births.
690 for (const auto& birth : birth_map_)
691 (*birth_map)[birth.first] = birth.second;
692 }
693 for (auto& death : death_map_) {
694 // Don't snapshot deaths with 0 count. Deaths with 0 count can result from
695 // prior calls to SnapshotMaps with death_reset_results!=null param.
696 if (death.second.count() <= 0)
697 continue;
698
646 (*death_map)[death.first] = death.second; 699 (*death_map)[death.first] = death.second;
700 if (death_reset_results != nullptr) {
701 // If resetting deaths is requested, store the curent value of the death
702 // count in death_reset_results, and then clear the death.
703 const auto& death_reset_result = death_reset_results->find(death.first);
704
705 if (death_reset_result != death_reset_results->end())
706 death_reset_result->second += death.second.count();
707 else
708 (*death_reset_results)[death.first] = death.second.count();
709
710 death.second.Clear();
711 }
712 }
647 713
648 if (!kTrackParentChildLinks) 714 if (!kTrackParentChildLinks)
649 return; 715 return;
650 716
651 for (const auto& parent_child : parent_child_set_) 717 for (const auto& parent_child : parent_child_set_)
652 parent_child_set->insert(parent_child); 718 parent_child_set->insert(parent_child);
653 } 719 }
654 720
721 // This may be called from another thread.
722 void ThreadData::SubtractDeathResultsFromBirths(
723 const DeathResetResults& death_reset_results) {
724 base::AutoLock lock(map_lock_);
725
726 for (const auto& births : birth_map_) {
727 const auto& death_reset_result = death_reset_results.find(births.second);
728 if (death_reset_result != death_reset_results.end())
729 births.second->SubtractBirths(death_reset_result->second);
730 }
731 }
732
655 static void OptionallyInitializeAlternateTimer() { 733 static void OptionallyInitializeAlternateTimer() {
656 NowFunction* alternate_time_source = GetAlternateTimeSource(); 734 NowFunction* alternate_time_source = GetAlternateTimeSource();
657 if (alternate_time_source) 735 if (alternate_time_source)
658 ThreadData::SetAlternateTimeSource(alternate_time_source); 736 ThreadData::SetAlternateTimeSource(alternate_time_source);
659 } 737 }
660 738
661 bool ThreadData::Initialize() { 739 bool ThreadData::Initialize() {
662 if (!kTrackAllTaskObjects) 740 if (!kTrackAllTaskObjects)
663 return false; // Not compiled in. 741 return false; // Not compiled in.
664 if (status_ >= DEACTIVATED) 742 if (status_ >= DEACTIVATED)
(...skipping 161 matching lines...) Expand 10 before | Expand all | Expand 10 after
826 // Do actual recursive delete in all ThreadData instances. 904 // Do actual recursive delete in all ThreadData instances.
827 while (thread_data_list) { 905 while (thread_data_list) {
828 ThreadData* next_thread_data = thread_data_list; 906 ThreadData* next_thread_data = thread_data_list;
829 thread_data_list = thread_data_list->next(); 907 thread_data_list = thread_data_list->next();
830 908
831 for (BirthMap::iterator it = next_thread_data->birth_map_.begin(); 909 for (BirthMap::iterator it = next_thread_data->birth_map_.begin();
832 next_thread_data->birth_map_.end() != it; ++it) 910 next_thread_data->birth_map_.end() != it; ++it)
833 delete it->second; // Delete the Birth Records. 911 delete it->second; // Delete the Birth Records.
834 delete next_thread_data; // Includes all Death Records. 912 delete next_thread_data; // Includes all Death Records.
835 } 913 }
914
915 completed_phases_snapshots_.Get().clear();
836 } 916 }
837 917
838 //------------------------------------------------------------------------------ 918 //------------------------------------------------------------------------------
839 TaskStopwatch::TaskStopwatch() 919 TaskStopwatch::TaskStopwatch()
840 : wallclock_duration_ms_(0), 920 : wallclock_duration_ms_(0),
841 current_thread_data_(NULL), 921 current_thread_data_(NULL),
842 excluded_duration_ms_(0), 922 excluded_duration_ms_(0),
843 parent_(NULL) { 923 parent_(NULL) {
844 #if DCHECK_IS_ON() 924 #if DCHECK_IS_ON()
845 state_ = CREATED; 925 state_ = CREATED;
(...skipping 131 matching lines...) Expand 10 before | Expand all | Expand 10 after
977 : process_id(base::GetCurrentProcId()) { 1057 : process_id(base::GetCurrentProcId()) {
978 #else 1058 #else
979 : process_id(base::kNullProcessId) { 1059 : process_id(base::kNullProcessId) {
980 #endif 1060 #endif
981 } 1061 }
982 1062
983 ProcessDataSnapshot::~ProcessDataSnapshot() { 1063 ProcessDataSnapshot::~ProcessDataSnapshot() {
984 } 1064 }
985 1065
986 } // namespace tracked_objects 1066 } // namespace tracked_objects
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698