Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(22)

Side by Side Diff: base/tracked_objects.cc

Issue 1021053003: Delivering the FIRST_NONEMPTY_PAINT phase changing event to base/ (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@phase_splitting
Patch Set: jar@ comments. Created 5 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/tracked_objects.h" 5 #include "base/tracked_objects.h"
6 6
7 #include <limits.h> 7 #include <limits.h>
8 #include <stdlib.h> 8 #include <stdlib.h>
9 9
10 #include "base/atomicops.h" 10 #include "base/atomicops.h"
11 #include "base/base_switches.h" 11 #include "base/base_switches.h"
12 #include "base/command_line.h" 12 #include "base/command_line.h"
13 #include "base/compiler_specific.h" 13 #include "base/compiler_specific.h"
14 #include "base/debug/leak_annotations.h" 14 #include "base/debug/leak_annotations.h"
15 #include "base/logging.h" 15 #include "base/logging.h"
16 #include "base/process/process_handle.h" 16 #include "base/process/process_handle.h"
17 #include "base/profiler/alternate_timer.h" 17 #include "base/profiler/alternate_timer.h"
18 #include "base/stl_util.h"
18 #include "base/strings/stringprintf.h" 19 #include "base/strings/stringprintf.h"
19 #include "base/third_party/valgrind/memcheck.h" 20 #include "base/third_party/valgrind/memcheck.h"
20 #include "base/tracking_info.h" 21 #include "base/tracking_info.h"
21 22
22 using base::TimeDelta; 23 using base::TimeDelta;
23 24
24 namespace base { 25 namespace base {
25 class TimeDelta; 26 class TimeDelta;
26 } 27 }
27 28
28 namespace tracked_objects { 29 namespace tracked_objects {
29 30
30 namespace { 31 namespace {
31 // Flag to compile out almost all of the task tracking code. 32 // Flag to compile out almost all of the task tracking code.
32 const bool kTrackAllTaskObjects = true; 33 const bool kTrackAllTaskObjects = true;
Ilya Sherman 2015/04/07 01:15:30 Is this flag still needed? It might be helpful to
vadimt 2015/04/07 21:44:14 Done; plase see (and LGTM) https://codereview.chro
33 34
34 // TODO(jar): Evaluate the perf impact of enabling this. If the perf impact is 35 // TODO(jar): Evaluate the perf impact of enabling this. If the perf impact is
35 // negligible, enable by default. 36 // negligible, enable by default.
36 // Flag to compile out parent-child link recording. 37 // Flag to compile out parent-child link recording.
37 const bool kTrackParentChildLinks = false; 38 const bool kTrackParentChildLinks = false;
38 39
39 // When ThreadData is first initialized, should we start in an ACTIVE state to 40 // When ThreadData is first initialized, should we start in an ACTIVE state to
40 // record all of the startup-time tasks, or should we start up DEACTIVATED, so 41 // record all of the startup-time tasks, or should we start up DEACTIVATED, so
41 // that we only record after parsing the command line flag --enable-tracking. 42 // that we only record after parsing the command line flag --enable-tracking.
42 // Note that the flag may force either state, so this really controls only the 43 // Note that the flag may force either state, so this really controls only the
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
89 } 90 }
90 return current_timing_enabled == ENABLED_TIMING; 91 return current_timing_enabled == ENABLED_TIMING;
91 } 92 }
92 93
93 } // namespace 94 } // namespace
94 95
95 //------------------------------------------------------------------------------ 96 //------------------------------------------------------------------------------
96 // DeathData tallies durations when a death takes place. 97 // DeathData tallies durations when a death takes place.
97 98
98 DeathData::DeathData() { 99 DeathData::DeathData() {
99 Clear(); 100 count_ = 0;
101 sample_probability_count_ = 0;
102 run_duration_sum_ = 0;
103 run_duration_max_ = 0;
104 run_duration_sample_ = 0;
105 queue_duration_sum_ = 0;
106 queue_duration_max_ = 0;
107 queue_duration_sample_ = 0;
108 last_phase_snapshot_ = nullptr;
100 } 109 }
101 110
102 DeathData::DeathData(int count) { 111 DeathData::DeathData(int count) : DeathData() {
103 Clear();
104 count_ = count; 112 count_ = count;
105 } 113 }
106 114
115 DeathData::~DeathData() {
116 while (last_phase_snapshot_) {
117 DeathDataPhaseSnapshot* snapshot = last_phase_snapshot_;
118 last_phase_snapshot_ = snapshot->prev;
119 delete snapshot;
120 }
121 }
122
107 // TODO(jar): I need to see if this macro to optimize branching is worth using. 123 // TODO(jar): I need to see if this macro to optimize branching is worth using.
108 // 124 //
109 // This macro has no branching, so it is surely fast, and is equivalent to: 125 // This macro has no branching, so it is surely fast, and is equivalent to:
110 // if (assign_it) 126 // if (assign_it)
111 // target = source; 127 // target = source;
112 // We use a macro rather than a template to force this to inline. 128 // We use a macro rather than a template to force this to inline.
113 // Related code for calculating max is discussed on the web. 129 // Related code for calculating max is discussed on the web.
114 #define CONDITIONAL_ASSIGN(assign_it, target, source) \ 130 #define CONDITIONAL_ASSIGN(assign_it, target, source) \
115 ((target) ^= ((target) ^ (source)) & -static_cast<int32>(assign_it)) 131 ((target) ^= ((target) ^ (source)) & -static_cast<int32>(assign_it))
116 132
117 void DeathData::RecordDeath(const int32 queue_duration, 133 void DeathData::RecordDeath(const int32 queue_duration,
118 const int32 run_duration, 134 const int32 run_duration,
119 const uint32 random_number) { 135 const uint32 random_number) {
120 // We'll just clamp at INT_MAX, but we should note this in the UI as such. 136 // We'll just clamp at INT_MAX, but we should note this in the UI as such.
121 if (count_ < INT_MAX) 137 if (count_ < INT_MAX)
122 ++count_; 138 ++count_;
139 if (sample_probability_count_ < INT_MAX)
140 ++sample_probability_count_;
123 queue_duration_sum_ += queue_duration; 141 queue_duration_sum_ += queue_duration;
124 run_duration_sum_ += run_duration; 142 run_duration_sum_ += run_duration;
125 143
126 if (queue_duration_max_ < queue_duration) 144 if (queue_duration_max_ < queue_duration)
127 queue_duration_max_ = queue_duration; 145 queue_duration_max_ = queue_duration;
128 if (run_duration_max_ < run_duration) 146 if (run_duration_max_ < run_duration)
129 run_duration_max_ = run_duration; 147 run_duration_max_ = run_duration;
130 148
131 // Take a uniformly distributed sample over all durations ever supplied. 149 // Take a uniformly distributed sample over all durations ever supplied during
132 // The probability that we (instead) use this new sample is 1/count_. This 150 // currrent profiling phase.
133 // results in a completely uniform selection of the sample (at least when we 151 // The probability that we (instead) use this new sample is
134 // don't clamp count_... but that should be inconsequentially likely). 152 // 1/sample_probability_count_. This results in a completely uniform selection
135 // We ignore the fact that we correlated our selection of a sample to the run 153 // of the sample (at least when we don't clamp sample_probability_count_...
136 // and queue times (i.e., we used them to generate random_number). 154 // but that should be inconsequentially likely). We ignore the fact that we
137 CHECK_GT(count_, 0); 155 // correlated our selection of a sample to the run and queue times (i.e., we
138 if (0 == (random_number % count_)) { 156 // used them to generate random_number).
157 CHECK_GT(sample_probability_count_, 0);
158 if (0 == (random_number % sample_probability_count_)) {
139 queue_duration_sample_ = queue_duration; 159 queue_duration_sample_ = queue_duration;
140 run_duration_sample_ = run_duration; 160 run_duration_sample_ = run_duration;
141 } 161 }
142 } 162 }
143 163
144 int DeathData::count() const { return count_; } 164 int DeathData::count() const { return count_; }
145 165
146 int32 DeathData::run_duration_sum() const { return run_duration_sum_; } 166 int32 DeathData::run_duration_sum() const { return run_duration_sum_; }
147 167
148 int32 DeathData::run_duration_max() const { return run_duration_max_; } 168 int32 DeathData::run_duration_max() const { return run_duration_max_; }
149 169
150 int32 DeathData::run_duration_sample() const { 170 int32 DeathData::run_duration_sample() const {
151 return run_duration_sample_; 171 return run_duration_sample_;
152 } 172 }
153 173
154 int32 DeathData::queue_duration_sum() const { 174 int32 DeathData::queue_duration_sum() const {
155 return queue_duration_sum_; 175 return queue_duration_sum_;
156 } 176 }
157 177
158 int32 DeathData::queue_duration_max() const { 178 int32 DeathData::queue_duration_max() const {
159 return queue_duration_max_; 179 return queue_duration_max_;
160 } 180 }
161 181
162 int32 DeathData::queue_duration_sample() const { 182 int32 DeathData::queue_duration_sample() const {
163 return queue_duration_sample_; 183 return queue_duration_sample_;
164 } 184 }
165 185
166 void DeathData::Clear() { 186 DeathDataPhaseSnapshot* DeathData::last_phase_snapshot() const {
167 count_ = 0; 187 return last_phase_snapshot_;
168 run_duration_sum_ = 0; 188 }
189
190 void DeathData::OnProfilingPhaseCompletion(int profiling_phase) {
191 // Snapshotting and storing current state.
192 last_phase_snapshot_ = new DeathDataPhaseSnapshot(profiling_phase, *this);
193
194 // Not touching fields for which a delta can be computed by comparing with a
195 // snapshot from previos phase. Resetting other fields. Sample values will be
196 // reset upon next death recording because sample_probability_count_ is set to
197 // 0.
198 sample_probability_count_ = 0;
169 run_duration_max_ = 0; 199 run_duration_max_ = 0;
170 run_duration_sample_ = 0;
171 queue_duration_sum_ = 0;
172 queue_duration_max_ = 0; 200 queue_duration_max_ = 0;
173 queue_duration_sample_ = 0;
174 } 201 }
175 202
176 //------------------------------------------------------------------------------ 203 //------------------------------------------------------------------------------
177 DeathDataSnapshot::DeathDataSnapshot() 204 DeathDataSnapshot::DeathDataSnapshot()
178 : count(-1), 205 : count(-1),
179 run_duration_sum(-1), 206 run_duration_sum(-1),
180 run_duration_max(-1), 207 run_duration_max(-1),
181 run_duration_sample(-1), 208 run_duration_sample(-1),
182 queue_duration_sum(-1), 209 queue_duration_sum(-1),
183 queue_duration_max(-1), 210 queue_duration_max(-1),
184 queue_duration_sample(-1) { 211 queue_duration_sample(-1) {
185 } 212 }
186 213
187 DeathDataSnapshot::DeathDataSnapshot( 214 DeathDataSnapshot::DeathDataSnapshot(
188 const tracked_objects::DeathData& death_data) 215 const tracked_objects::DeathData& death_data)
189 : count(death_data.count()), 216 : count(death_data.count()),
190 run_duration_sum(death_data.run_duration_sum()), 217 run_duration_sum(death_data.run_duration_sum()),
191 run_duration_max(death_data.run_duration_max()), 218 run_duration_max(death_data.run_duration_max()),
192 run_duration_sample(death_data.run_duration_sample()), 219 run_duration_sample(death_data.run_duration_sample()),
193 queue_duration_sum(death_data.queue_duration_sum()), 220 queue_duration_sum(death_data.queue_duration_sum()),
194 queue_duration_max(death_data.queue_duration_max()), 221 queue_duration_max(death_data.queue_duration_max()),
195 queue_duration_sample(death_data.queue_duration_sample()) { 222 queue_duration_sample(death_data.queue_duration_sample()) {
196 } 223 }
197 224
198 DeathDataSnapshot::~DeathDataSnapshot() { 225 DeathDataSnapshot::~DeathDataSnapshot() {
199 } 226 }
200 227
228 void DeathDataSnapshot::CalculateDelta(const DeathDataSnapshot& older) {
229 count -= older.count;
230 run_duration_sum -= older.run_duration_sum;
231 queue_duration_sum -= older.queue_duration_sum;
232 }
233
201 //------------------------------------------------------------------------------ 234 //------------------------------------------------------------------------------
202 BirthOnThread::BirthOnThread(const Location& location, 235 BirthOnThread::BirthOnThread(const Location& location,
203 const ThreadData& current) 236 const ThreadData& current)
204 : location_(location), 237 : location_(location),
205 birth_thread_(&current) { 238 birth_thread_(&current) {
206 } 239 }
207 240
208 //------------------------------------------------------------------------------ 241 //------------------------------------------------------------------------------
209 BirthOnThreadSnapshot::BirthOnThreadSnapshot() { 242 BirthOnThreadSnapshot::BirthOnThreadSnapshot() {
210 } 243 }
(...skipping 172 matching lines...) Expand 10 before | Expand all | Expand 10 after
383 return; 416 return;
384 } 417 }
385 // We must NOT do any allocations during this callback. 418 // We must NOT do any allocations during this callback.
386 // Using the simple linked lists avoids all allocations. 419 // Using the simple linked lists avoids all allocations.
387 DCHECK_EQ(this->next_retired_worker_, reinterpret_cast<ThreadData*>(NULL)); 420 DCHECK_EQ(this->next_retired_worker_, reinterpret_cast<ThreadData*>(NULL));
388 this->next_retired_worker_ = first_retired_worker_; 421 this->next_retired_worker_ = first_retired_worker_;
389 first_retired_worker_ = this; 422 first_retired_worker_ = this;
390 } 423 }
391 424
392 // static 425 // static
393 void ThreadData::Snapshot(ProcessDataSnapshot* process_data_snapshot) { 426 void ThreadData::Snapshot(int current_profiling_phase,
394 ThreadData::SnapshotCurrentPhase( 427 ProcessDataSnapshot* process_data_snapshot) {
395 &process_data_snapshot->phased_process_data_snapshots[0]); 428 if (!kTrackAllTaskObjects)
429 return; // Not compiled in.
430
431 BirthCountMap birth_counts;
432
433 // Get an unchanging copy of a ThreadData list.
434 ThreadData* my_list = ThreadData::first();
435
436 // Gather data serially.
437 // This hackish approach *can* get some slighly corrupt tallies, as we are
438 // grabbing values without the protection of a lock, but it has the advantage
439 // of working even with threads that don't have message loops. If a user
440 // sees any strangeness, they can always just run their stats gathering a
441 // second time.
442 for (ThreadData* thread_data = my_list; thread_data;
443 thread_data = thread_data->next()) {
444 thread_data->SnapshotExecutedTasks(
445 current_profiling_phase,
446 &process_data_snapshot->phased_process_data_snapshots, &birth_counts);
447 }
448
449 // Add births that are still active -- i.e. objects that have tallied a birth,
450 // but have not yet tallied a matching death, and hence must be either
451 // running, queued up, or being held in limbo for future posting.
452 for (const auto& birth_count : birth_counts) {
453 if (birth_count.second > 0) {
454 process_data_snapshot
455 ->phased_process_data_snapshots[current_profiling_phase]
456 .tasks.push_back(TaskSnapshot(
457 BirthOnThreadSnapshot(*birth_count.first),
458 DeathDataSnapshot(DeathData(birth_count.second)), "Still_Alive"));
459 }
460 }
461 }
462
463 // static
464 void ThreadData::OnProfilingPhaseCompletion(int profiling_phase) {
465 if (!kTrackAllTaskObjects)
466 return; // Not compiled in.
467
468 // Get an unchanging copy of a ThreadData list.
469 ThreadData* my_list = ThreadData::first();
470
471 // Add snapshots for all death datas in all threads serially.
472 // This hackish approach *can* get some slighly corrupt tallies, as we are
473 // grabbing values without the protection of a lock, but it has the advantage
474 // of working even with threads that don't have message loops. Any corruption
475 // shouldn't cause "cascading damage" to anything else (in later phases).
476 for (ThreadData* thread_data = my_list; thread_data;
477 thread_data = thread_data->next()) {
478 thread_data->OnProfilingPhaseCompletionOnThread(profiling_phase);
479 }
396 } 480 }
397 481
398 Births* ThreadData::TallyABirth(const Location& location) { 482 Births* ThreadData::TallyABirth(const Location& location) {
399 BirthMap::iterator it = birth_map_.find(location); 483 BirthMap::iterator it = birth_map_.find(location);
400 Births* child; 484 Births* child;
401 if (it != birth_map_.end()) { 485 if (it != birth_map_.end()) {
402 child = it->second; 486 child = it->second;
403 child->RecordBirth(); 487 child->RecordBirth();
404 } else { 488 } else {
405 child = new Births(location, *this); // Leak this. 489 child = new Births(location, *this); // Leak this.
(...skipping 11 matching lines...) Expand all
417 // Lock since the map may get relocated now, and other threads sometimes 501 // Lock since the map may get relocated now, and other threads sometimes
418 // snapshot it (but they lock before copying it). 502 // snapshot it (but they lock before copying it).
419 base::AutoLock lock(map_lock_); 503 base::AutoLock lock(map_lock_);
420 parent_child_set_.insert(pair); 504 parent_child_set_.insert(pair);
421 } 505 }
422 } 506 }
423 507
424 return child; 508 return child;
425 } 509 }
426 510
427 void ThreadData::TallyADeath(const Births& birth, 511 void ThreadData::TallyADeath(const Births& births,
428 int32 queue_duration, 512 int32 queue_duration,
429 const TaskStopwatch& stopwatch) { 513 const TaskStopwatch& stopwatch) {
430 int32 run_duration = stopwatch.RunDurationMs(); 514 int32 run_duration = stopwatch.RunDurationMs();
431 515
432 // Stir in some randomness, plus add constant in case durations are zero. 516 // Stir in some randomness, plus add constant in case durations are zero.
433 const uint32 kSomePrimeNumber = 2147483647; 517 const uint32 kSomePrimeNumber = 2147483647;
434 random_number_ += queue_duration + run_duration + kSomePrimeNumber; 518 random_number_ += queue_duration + run_duration + kSomePrimeNumber;
435 // An address is going to have some randomness to it as well ;-). 519 // An address is going to have some randomness to it as well ;-).
436 random_number_ ^= static_cast<uint32>(&birth - reinterpret_cast<Births*>(0)); 520 random_number_ ^= static_cast<uint32>(&births - reinterpret_cast<Births*>(0));
437 521
438 // We don't have queue durations without OS timer. OS timer is automatically 522 // We don't have queue durations without OS timer. OS timer is automatically
439 // used for task-post-timing, so the use of an alternate timer implies all 523 // used for task-post-timing, so the use of an alternate timer implies all
440 // queue times are invalid, unless it was explicitly said that we can trust 524 // queue times are invalid, unless it was explicitly said that we can trust
441 // the alternate timer. 525 // the alternate timer.
442 if (kAllowAlternateTimeSourceHandling && 526 if (kAllowAlternateTimeSourceHandling &&
443 now_function_ && 527 now_function_ &&
444 !now_function_is_time_) { 528 !now_function_is_time_) {
445 queue_duration = 0; 529 queue_duration = 0;
446 } 530 }
447 531
448 DeathMap::iterator it = death_map_.find(&birth); 532 DeathMap::iterator it = death_map_.find(&births);
449 DeathData* death_data; 533 DeathData* death_data;
450 if (it != death_map_.end()) { 534 if (it != death_map_.end()) {
451 death_data = &it->second; 535 death_data = &it->second;
452 } else { 536 } else {
453 base::AutoLock lock(map_lock_); // Lock as the map may get relocated now. 537 base::AutoLock lock(map_lock_); // Lock as the map may get relocated now.
454 death_data = &death_map_[&birth]; 538 death_data = &death_map_[&births];
455 } // Release lock ASAP. 539 } // Release lock ASAP.
456 death_data->RecordDeath(queue_duration, run_duration, random_number_); 540 death_data->RecordDeath(queue_duration, run_duration, random_number_);
457 541
458 if (!kTrackParentChildLinks) 542 if (!kTrackParentChildLinks)
459 return; 543 return;
460 if (!parent_stack_.empty()) { // We might get turned off. 544 if (!parent_stack_.empty()) { // We might get turned off.
461 DCHECK_EQ(parent_stack_.top(), &birth); 545 DCHECK_EQ(parent_stack_.top(), &births);
462 parent_stack_.pop(); 546 parent_stack_.pop();
463 } 547 }
464 } 548 }
465 549
466 // static 550 // static
467 Births* ThreadData::TallyABirthIfActive(const Location& location) { 551 Births* ThreadData::TallyABirthIfActive(const Location& location) {
468 if (!kTrackAllTaskObjects) 552 if (!kTrackAllTaskObjects)
469 return NULL; // Not compiled in. 553 return NULL; // Not compiled in.
470 554
471 if (!TrackingStatus()) 555 if (!TrackingStatus())
472 return NULL; 556 return NULL;
473 ThreadData* current_thread_data = Get(); 557 ThreadData* current_thread_data = Get();
474 if (!current_thread_data) 558 if (!current_thread_data)
475 return NULL; 559 return NULL;
476 return current_thread_data->TallyABirth(location); 560 return current_thread_data->TallyABirth(location);
477 } 561 }
478 562
479 // static 563 // static
480 void ThreadData::TallyRunOnNamedThreadIfTracking( 564 void ThreadData::TallyRunOnNamedThreadIfTracking(
481 const base::TrackingInfo& completed_task, 565 const base::TrackingInfo& completed_task,
482 const TaskStopwatch& stopwatch) { 566 const TaskStopwatch& stopwatch) {
483 if (!kTrackAllTaskObjects) 567 if (!kTrackAllTaskObjects)
484 return; // Not compiled in. 568 return; // Not compiled in.
485 569
486 // Even if we have been DEACTIVATED, we will process any pending births so 570 // Even if we have been DEACTIVATED, we will process any pending births so
487 // that our data structures (which counted the outstanding births) remain 571 // that our data structures (which counted the outstanding births) remain
488 // consistent. 572 // consistent.
489 const Births* birth = completed_task.birth_tally; 573 const Births* births = completed_task.birth_tally;
490 if (!birth) 574 if (!births)
491 return; 575 return;
492 ThreadData* current_thread_data = stopwatch.GetThreadData(); 576 ThreadData* current_thread_data = stopwatch.GetThreadData();
493 if (!current_thread_data) 577 if (!current_thread_data)
494 return; 578 return;
495 579
496 // Watch out for a race where status_ is changing, and hence one or both 580 // Watch out for a race where status_ is changing, and hence one or both
497 // of start_of_run or end_of_run is zero. In that case, we didn't bother to 581 // of start_of_run or end_of_run is zero. In that case, we didn't bother to
498 // get a time value since we "weren't tracking" and we were trying to be 582 // get a time value since we "weren't tracking" and we were trying to be
499 // efficient by not calling for a genuine time value. For simplicity, we'll 583 // efficient by not calling for a genuine time value. For simplicity, we'll
500 // use a default zero duration when we can't calculate a true value. 584 // use a default zero duration when we can't calculate a true value.
501 TrackedTime start_of_run = stopwatch.StartTime(); 585 TrackedTime start_of_run = stopwatch.StartTime();
502 int32 queue_duration = 0; 586 int32 queue_duration = 0;
503 if (!start_of_run.is_null()) { 587 if (!start_of_run.is_null()) {
504 queue_duration = (start_of_run - completed_task.EffectiveTimePosted()) 588 queue_duration = (start_of_run - completed_task.EffectiveTimePosted())
505 .InMilliseconds(); 589 .InMilliseconds();
506 } 590 }
507 current_thread_data->TallyADeath(*birth, queue_duration, stopwatch); 591 current_thread_data->TallyADeath(*births, queue_duration, stopwatch);
508 } 592 }
509 593
510 // static 594 // static
511 void ThreadData::TallyRunOnWorkerThreadIfTracking( 595 void ThreadData::TallyRunOnWorkerThreadIfTracking(
512 const Births* birth, 596 const Births* births,
513 const TrackedTime& time_posted, 597 const TrackedTime& time_posted,
514 const TaskStopwatch& stopwatch) { 598 const TaskStopwatch& stopwatch) {
515 if (!kTrackAllTaskObjects) 599 if (!kTrackAllTaskObjects)
516 return; // Not compiled in. 600 return; // Not compiled in.
517 601
518 // Even if we have been DEACTIVATED, we will process any pending births so 602 // Even if we have been DEACTIVATED, we will process any pending births so
519 // that our data structures (which counted the outstanding births) remain 603 // that our data structures (which counted the outstanding births) remain
520 // consistent. 604 // consistent.
521 if (!birth) 605 if (!births)
522 return; 606 return;
523 607
524 // TODO(jar): Support the option to coalesce all worker-thread activity under 608 // TODO(jar): Support the option to coalesce all worker-thread activity under
525 // one ThreadData instance that uses locks to protect *all* access. This will 609 // one ThreadData instance that uses locks to protect *all* access. This will
526 // reduce memory (making it provably bounded), but run incrementally slower 610 // reduce memory (making it provably bounded), but run incrementally slower
527 // (since we'll use locks on TallyABirth and TallyADeath). The good news is 611 // (since we'll use locks on TallyABirth and TallyADeath). The good news is
528 // that the locks on TallyADeath will be *after* the worker thread has run, 612 // that the locks on TallyADeath will be *after* the worker thread has run,
529 // and hence nothing will be waiting for the completion (... besides some 613 // and hence nothing will be waiting for the completion (... besides some
530 // other thread that might like to run). Also, the worker threads tasks are 614 // other thread that might like to run). Also, the worker threads tasks are
531 // generally longer, and hence the cost of the lock may perchance be amortized 615 // generally longer, and hence the cost of the lock may perchance be amortized
532 // over the long task's lifetime. 616 // over the long task's lifetime.
533 ThreadData* current_thread_data = stopwatch.GetThreadData(); 617 ThreadData* current_thread_data = stopwatch.GetThreadData();
534 if (!current_thread_data) 618 if (!current_thread_data)
535 return; 619 return;
536 620
537 TrackedTime start_of_run = stopwatch.StartTime(); 621 TrackedTime start_of_run = stopwatch.StartTime();
538 int32 queue_duration = 0; 622 int32 queue_duration = 0;
539 if (!start_of_run.is_null()) { 623 if (!start_of_run.is_null()) {
540 queue_duration = (start_of_run - time_posted).InMilliseconds(); 624 queue_duration = (start_of_run - time_posted).InMilliseconds();
541 } 625 }
542 current_thread_data->TallyADeath(*birth, queue_duration, stopwatch); 626 current_thread_data->TallyADeath(*births, queue_duration, stopwatch);
543 } 627 }
544 628
545 // static 629 // static
546 void ThreadData::TallyRunInAScopedRegionIfTracking( 630 void ThreadData::TallyRunInAScopedRegionIfTracking(
547 const Births* birth, 631 const Births* births,
548 const TaskStopwatch& stopwatch) { 632 const TaskStopwatch& stopwatch) {
549 if (!kTrackAllTaskObjects) 633 if (!kTrackAllTaskObjects)
550 return; // Not compiled in. 634 return; // Not compiled in.
551 635
552 // Even if we have been DEACTIVATED, we will process any pending births so 636 // Even if we have been DEACTIVATED, we will process any pending births so
553 // that our data structures (which counted the outstanding births) remain 637 // that our data structures (which counted the outstanding births) remain
554 // consistent. 638 // consistent.
555 if (!birth) 639 if (!births)
556 return; 640 return;
557 641
558 ThreadData* current_thread_data = stopwatch.GetThreadData(); 642 ThreadData* current_thread_data = stopwatch.GetThreadData();
559 if (!current_thread_data) 643 if (!current_thread_data)
560 return; 644 return;
561 645
562 int32 queue_duration = 0; 646 int32 queue_duration = 0;
563 current_thread_data->TallyADeath(*birth, queue_duration, stopwatch); 647 current_thread_data->TallyADeath(*births, queue_duration, stopwatch);
564 }
565
566 // static
567 void ThreadData::SnapshotAllExecutedTasks(
568 ProcessDataPhaseSnapshot* process_data_phase,
569 BirthCountMap* birth_counts) {
570 if (!kTrackAllTaskObjects)
571 return; // Not compiled in.
572
573 // Get an unchanging copy of a ThreadData list.
574 ThreadData* my_list = ThreadData::first();
575
576 // Gather data serially.
577 // This hackish approach *can* get some slighly corrupt tallies, as we are
578 // grabbing values without the protection of a lock, but it has the advantage
579 // of working even with threads that don't have message loops. If a user
580 // sees any strangeness, they can always just run their stats gathering a
581 // second time.
582 for (ThreadData* thread_data = my_list;
583 thread_data;
584 thread_data = thread_data->next()) {
585 thread_data->SnapshotExecutedTasks(process_data_phase, birth_counts);
586 }
587 }
588
589 // static
590 void ThreadData::SnapshotCurrentPhase(
591 ProcessDataPhaseSnapshot* process_data_phase) {
592 // Add births that have run to completion to |collected_data|.
593 // |birth_counts| tracks the total number of births recorded at each location
594 // for which we have not seen a death count.
595 BirthCountMap birth_counts;
596 ThreadData::SnapshotAllExecutedTasks(process_data_phase, &birth_counts);
597
598 // Add births that are still active -- i.e. objects that have tallied a birth,
599 // but have not yet tallied a matching death, and hence must be either
600 // running, queued up, or being held in limbo for future posting.
601 for (const auto& birth_count : birth_counts) {
602 if (birth_count.second > 0) {
603 process_data_phase->tasks.push_back(TaskSnapshot(
604 *birth_count.first, DeathData(birth_count.second), "Still_Alive"));
605 }
606 }
607 } 648 }
608 649
609 void ThreadData::SnapshotExecutedTasks( 650 void ThreadData::SnapshotExecutedTasks(
610 ProcessDataPhaseSnapshot* process_data_phase, 651 int current_profiling_phase,
652 PhasedProcessDataSnapshotMap* phased_process_data_snapshots,
611 BirthCountMap* birth_counts) { 653 BirthCountMap* birth_counts) {
612 // Get copy of data, so that the data will not change during the iterations 654 // Get copy of data, so that the data will not change during the iterations
613 // and processing. 655 // and processing.
614 ThreadData::BirthMap birth_map; 656 BirthMap birth_map;
615 ThreadData::DeathMap death_map; 657 DeathsSnapshot deaths;
616 ThreadData::ParentChildSet parent_child_set; 658 ParentChildSet parent_child_set;
617 SnapshotMaps(&birth_map, &death_map, &parent_child_set); 659 SnapshotMaps(current_profiling_phase, &birth_map, &deaths, &parent_child_set);
618
619 for (const auto& death : death_map) {
620 process_data_phase->tasks.push_back(
621 TaskSnapshot(*death.first, death.second, thread_name()));
622 (*birth_counts)[death.first] -= death.first->birth_count();
623 }
624 660
625 for (const auto& birth : birth_map) { 661 for (const auto& birth : birth_map) {
626 (*birth_counts)[birth.second] += birth.second->birth_count(); 662 (*birth_counts)[birth.second] += birth.second->birth_count();
627 } 663 }
628 664
629 if (!kTrackParentChildLinks) 665 for (const auto& death : deaths) {
630 return; 666 (*birth_counts)[death.first] -= death.first->birth_count();
631 667
632 for (const auto& parent_child : parent_child_set) { 668 for (const DeathDataPhaseSnapshot* phase = &death.second; phase;
633 process_data_phase->descendants.push_back( 669 phase = phase->prev) {
634 ParentChildPairSnapshot(parent_child)); 670 DeathDataSnapshot death_data = phase->death_data;
671
672 if (phase->prev)
673 death_data.CalculateDelta(phase->prev->death_data);
674
675 if (death_data.count > 0)
676 (*phased_process_data_snapshots)[phase->profiling_phase]
677 .tasks.push_back(TaskSnapshot(BirthOnThreadSnapshot(*death.first),
678 death_data, thread_name()));
679 }
635 } 680 }
636 } 681 }
637 682
638 // This may be called from another thread. 683 // This may be called from another thread.
639 void ThreadData::SnapshotMaps(BirthMap* birth_map, 684 void ThreadData::SnapshotMaps(int profiling_phase,
640 DeathMap* death_map, 685 BirthMap* birth_map,
686 DeathsSnapshot* deaths,
641 ParentChildSet* parent_child_set) { 687 ParentChildSet* parent_child_set) {
642 base::AutoLock lock(map_lock_); 688 base::AutoLock lock(map_lock_);
689
643 for (const auto& birth : birth_map_) 690 for (const auto& birth : birth_map_)
644 (*birth_map)[birth.first] = birth.second; 691 (*birth_map)[birth.first] = birth.second;
645 for (const auto& death : death_map_) 692
646 (*death_map)[death.first] = death.second; 693 for (const auto& death : death_map_) {
694 deaths->push_back(DeathsSnapshot::value_type(
695 death.first, DeathDataPhaseSnapshot(profiling_phase, death.second)));
696 }
647 697
648 if (!kTrackParentChildLinks) 698 if (!kTrackParentChildLinks)
649 return; 699 return;
650 700
651 for (const auto& parent_child : parent_child_set_) 701 for (const auto& parent_child : parent_child_set_)
652 parent_child_set->insert(parent_child); 702 parent_child_set->insert(parent_child);
653 } 703 }
654 704
705 void ThreadData::OnProfilingPhaseCompletionOnThread(int profiling_phase) {
706 base::AutoLock lock(map_lock_);
707
708 for (auto& death : death_map_) {
709 death.second.OnProfilingPhaseCompletion(profiling_phase);
710 }
711 }
712
655 static void OptionallyInitializeAlternateTimer() { 713 static void OptionallyInitializeAlternateTimer() {
656 NowFunction* alternate_time_source = GetAlternateTimeSource(); 714 NowFunction* alternate_time_source = GetAlternateTimeSource();
657 if (alternate_time_source) 715 if (alternate_time_source)
658 ThreadData::SetAlternateTimeSource(alternate_time_source); 716 ThreadData::SetAlternateTimeSource(alternate_time_source);
659 } 717 }
660 718
661 bool ThreadData::Initialize() { 719 bool ThreadData::Initialize() {
662 if (!kTrackAllTaskObjects) 720 if (!kTrackAllTaskObjects)
663 return false; // Not compiled in. 721 return false; // Not compiled in.
664 if (status_ >= DEACTIVATED) 722 if (status_ >= DEACTIVATED)
(...skipping 259 matching lines...) Expand 10 before | Expand all | Expand 10 after
924 982
925 ThreadData* TaskStopwatch::GetThreadData() const { 983 ThreadData* TaskStopwatch::GetThreadData() const {
926 #if DCHECK_IS_ON() 984 #if DCHECK_IS_ON()
927 DCHECK(state_ != CREATED); 985 DCHECK(state_ != CREATED);
928 #endif 986 #endif
929 987
930 return current_thread_data_; 988 return current_thread_data_;
931 } 989 }
932 990
933 //------------------------------------------------------------------------------ 991 //------------------------------------------------------------------------------
992 // DeathDataPhaseSnapshot
993
994 DeathDataPhaseSnapshot::DeathDataPhaseSnapshot(int profiling_phase,
995 const DeathData& death_data)
996 : profiling_phase(profiling_phase),
997 death_data(death_data),
998 prev(death_data.last_phase_snapshot()) {
999 }
1000
1001 //------------------------------------------------------------------------------
1002 // TaskSnapshot
1003
934 TaskSnapshot::TaskSnapshot() { 1004 TaskSnapshot::TaskSnapshot() {
935 } 1005 }
936 1006
937 TaskSnapshot::TaskSnapshot(const BirthOnThread& birth, 1007 TaskSnapshot::TaskSnapshot(const BirthOnThreadSnapshot& birth,
938 const DeathData& death_data, 1008 const DeathDataSnapshot& death_data,
939 const std::string& death_thread_name) 1009 const std::string& death_thread_name)
940 : birth(birth), 1010 : birth(birth),
941 death_data(death_data), 1011 death_data(death_data),
942 death_thread_name(death_thread_name) { 1012 death_thread_name(death_thread_name) {
943 } 1013 }
944 1014
945 TaskSnapshot::~TaskSnapshot() { 1015 TaskSnapshot::~TaskSnapshot() {
946 } 1016 }
947 1017
948 //------------------------------------------------------------------------------ 1018 //------------------------------------------------------------------------------
(...skipping 28 matching lines...) Expand all
977 : process_id(base::GetCurrentProcId()) { 1047 : process_id(base::GetCurrentProcId()) {
978 #else 1048 #else
979 : process_id(base::kNullProcessId) { 1049 : process_id(base::kNullProcessId) {
980 #endif 1050 #endif
981 } 1051 }
982 1052
983 ProcessDataSnapshot::~ProcessDataSnapshot() { 1053 ProcessDataSnapshot::~ProcessDataSnapshot() {
984 } 1054 }
985 1055
986 } // namespace tracked_objects 1056 } // namespace tracked_objects
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698