Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/tracked_objects.h" | 5 #include "base/tracked_objects.h" |
| 6 | 6 |
| 7 #include <limits.h> | 7 #include <limits.h> |
| 8 #include <stdlib.h> | 8 #include <stdlib.h> |
| 9 | 9 |
| 10 #include "base/atomicops.h" | 10 #include "base/atomicops.h" |
| 11 #include "base/base_switches.h" | 11 #include "base/base_switches.h" |
| 12 #include "base/command_line.h" | 12 #include "base/command_line.h" |
| 13 #include "base/compiler_specific.h" | 13 #include "base/compiler_specific.h" |
| 14 #include "base/debug/leak_annotations.h" | 14 #include "base/debug/leak_annotations.h" |
| 15 #include "base/logging.h" | 15 #include "base/logging.h" |
| 16 #include "base/process/process_handle.h" | 16 #include "base/process/process_handle.h" |
| 17 #include "base/profiler/alternate_timer.h" | 17 #include "base/profiler/alternate_timer.h" |
| 18 #include "base/stl_util.h" | |
| 18 #include "base/strings/stringprintf.h" | 19 #include "base/strings/stringprintf.h" |
| 19 #include "base/third_party/valgrind/memcheck.h" | 20 #include "base/third_party/valgrind/memcheck.h" |
| 20 #include "base/tracking_info.h" | 21 #include "base/tracking_info.h" |
| 21 | 22 |
| 22 using base::TimeDelta; | 23 using base::TimeDelta; |
| 23 | 24 |
| 24 namespace base { | 25 namespace base { |
| 25 class TimeDelta; | 26 class TimeDelta; |
| 26 } | 27 } |
| 27 | 28 |
| (...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 86 } | 87 } |
| 87 return current_timing_enabled == ENABLED_TIMING; | 88 return current_timing_enabled == ENABLED_TIMING; |
| 88 } | 89 } |
| 89 | 90 |
| 90 } // namespace | 91 } // namespace |
| 91 | 92 |
| 92 //------------------------------------------------------------------------------ | 93 //------------------------------------------------------------------------------ |
| 93 // DeathData tallies durations when a death takes place. | 94 // DeathData tallies durations when a death takes place. |
| 94 | 95 |
| 95 DeathData::DeathData() { | 96 DeathData::DeathData() { |
| 96 Clear(); | 97 count_ = 0; |
| 98 sample_probability_count_ = 0; | |
| 99 run_duration_sum_ = 0; | |
| 100 run_duration_max_ = 0; | |
| 101 run_duration_sample_ = 0; | |
| 102 queue_duration_sum_ = 0; | |
| 103 queue_duration_max_ = 0; | |
| 104 queue_duration_sample_ = 0; | |
|
Alexei Svitkine (slow)
2015/04/08 15:55:39
Instead of having all of these as members - which
vadimt
2015/04/08 20:32:34
As the comment says, "Members are ordered from mos
Alexei Svitkine (slow)
2015/04/08 20:54:25
I am actually very skeptical of that original comm
vadimt
2015/04/08 21:27:17
jar@ told me that cache lines on Android may be ve
Alexei Svitkine (slow)
2015/04/08 21:57:49
Fine, but I'm still not OK with the circular depen
vadimt
2015/04/08 23:31:24
Done, but not sure I like the result...
Since Deat
| |
| 105 last_phase_snapshot_ = nullptr; | |
| 97 } | 106 } |
| 98 | 107 |
| 99 DeathData::DeathData(int count) { | 108 DeathData::DeathData(int count) : DeathData() { |
| 100 Clear(); | |
| 101 count_ = count; | 109 count_ = count; |
| 102 } | 110 } |
| 103 | 111 |
| 112 DeathData::~DeathData() { | |
| 113 while (last_phase_snapshot_) { | |
| 114 DeathDataPhaseSnapshot* snapshot = last_phase_snapshot_; | |
| 115 last_phase_snapshot_ = snapshot->prev; | |
| 116 delete snapshot; | |
| 117 } | |
| 118 } | |
| 119 | |
| 104 // TODO(jar): I need to see if this macro to optimize branching is worth using. | 120 // TODO(jar): I need to see if this macro to optimize branching is worth using. |
| 105 // | 121 // |
| 106 // This macro has no branching, so it is surely fast, and is equivalent to: | 122 // This macro has no branching, so it is surely fast, and is equivalent to: |
| 107 // if (assign_it) | 123 // if (assign_it) |
| 108 // target = source; | 124 // target = source; |
| 109 // We use a macro rather than a template to force this to inline. | 125 // We use a macro rather than a template to force this to inline. |
| 110 // Related code for calculating max is discussed on the web. | 126 // Related code for calculating max is discussed on the web. |
| 111 #define CONDITIONAL_ASSIGN(assign_it, target, source) \ | 127 #define CONDITIONAL_ASSIGN(assign_it, target, source) \ |
| 112 ((target) ^= ((target) ^ (source)) & -static_cast<int32>(assign_it)) | 128 ((target) ^= ((target) ^ (source)) & -static_cast<int32>(assign_it)) |
| 113 | 129 |
| 114 void DeathData::RecordDeath(const int32 queue_duration, | 130 void DeathData::RecordDeath(const int32 queue_duration, |
| 115 const int32 run_duration, | 131 const int32 run_duration, |
| 116 const uint32 random_number) { | 132 const uint32 random_number) { |
| 117 // We'll just clamp at INT_MAX, but we should note this in the UI as such. | 133 // We'll just clamp at INT_MAX, but we should note this in the UI as such. |
| 118 if (count_ < INT_MAX) | 134 if (count_ < INT_MAX) |
| 119 ++count_; | 135 ++count_; |
| 136 if (sample_probability_count_ < INT_MAX) | |
| 137 ++sample_probability_count_; | |
| 120 queue_duration_sum_ += queue_duration; | 138 queue_duration_sum_ += queue_duration; |
| 121 run_duration_sum_ += run_duration; | 139 run_duration_sum_ += run_duration; |
| 122 | 140 |
| 123 if (queue_duration_max_ < queue_duration) | 141 if (queue_duration_max_ < queue_duration) |
| 124 queue_duration_max_ = queue_duration; | 142 queue_duration_max_ = queue_duration; |
| 125 if (run_duration_max_ < run_duration) | 143 if (run_duration_max_ < run_duration) |
| 126 run_duration_max_ = run_duration; | 144 run_duration_max_ = run_duration; |
| 127 | 145 |
| 128 // Take a uniformly distributed sample over all durations ever supplied. | 146 // Take a uniformly distributed sample over all durations ever supplied during |
| 129 // The probability that we (instead) use this new sample is 1/count_. This | 147 // currrent profiling phase. |
| 130 // results in a completely uniform selection of the sample (at least when we | 148 // The probability that we (instead) use this new sample is |
| 131 // don't clamp count_... but that should be inconsequentially likely). | 149 // 1/sample_probability_count_. This results in a completely uniform selection |
| 132 // We ignore the fact that we correlated our selection of a sample to the run | 150 // of the sample (at least when we don't clamp sample_probability_count_... |
| 133 // and queue times (i.e., we used them to generate random_number). | 151 // but that should be inconsequentially likely). We ignore the fact that we |
| 134 CHECK_GT(count_, 0); | 152 // correlated our selection of a sample to the run and queue times (i.e., we |
| 135 if (0 == (random_number % count_)) { | 153 // used them to generate random_number). |
| 154 CHECK_GT(sample_probability_count_, 0); | |
| 155 if (0 == (random_number % sample_probability_count_)) { | |
| 136 queue_duration_sample_ = queue_duration; | 156 queue_duration_sample_ = queue_duration; |
| 137 run_duration_sample_ = run_duration; | 157 run_duration_sample_ = run_duration; |
| 138 } | 158 } |
| 139 } | 159 } |
| 140 | 160 |
| 141 int DeathData::count() const { return count_; } | 161 int DeathData::count() const { return count_; } |
| 142 | 162 |
| 143 int32 DeathData::run_duration_sum() const { return run_duration_sum_; } | 163 int32 DeathData::run_duration_sum() const { return run_duration_sum_; } |
| 144 | 164 |
| 145 int32 DeathData::run_duration_max() const { return run_duration_max_; } | 165 int32 DeathData::run_duration_max() const { return run_duration_max_; } |
| 146 | 166 |
| 147 int32 DeathData::run_duration_sample() const { | 167 int32 DeathData::run_duration_sample() const { |
| 148 return run_duration_sample_; | 168 return run_duration_sample_; |
| 149 } | 169 } |
| 150 | 170 |
| 151 int32 DeathData::queue_duration_sum() const { | 171 int32 DeathData::queue_duration_sum() const { |
| 152 return queue_duration_sum_; | 172 return queue_duration_sum_; |
| 153 } | 173 } |
| 154 | 174 |
| 155 int32 DeathData::queue_duration_max() const { | 175 int32 DeathData::queue_duration_max() const { |
| 156 return queue_duration_max_; | 176 return queue_duration_max_; |
| 157 } | 177 } |
| 158 | 178 |
| 159 int32 DeathData::queue_duration_sample() const { | 179 int32 DeathData::queue_duration_sample() const { |
| 160 return queue_duration_sample_; | 180 return queue_duration_sample_; |
| 161 } | 181 } |
| 162 | 182 |
| 163 void DeathData::Clear() { | 183 DeathDataPhaseSnapshot* DeathData::last_phase_snapshot() const { |
| 164 count_ = 0; | 184 return last_phase_snapshot_; |
| 165 run_duration_sum_ = 0; | 185 } |
| 186 | |
| 187 void DeathData::OnProfilingPhaseCompleted(int profiling_phase) { | |
| 188 // Snapshotting and storing current state. | |
| 189 last_phase_snapshot_ = new DeathDataPhaseSnapshot(profiling_phase, *this); | |
|
Alexei Svitkine (slow)
2015/04/08 15:55:38
Eek, I really don't like this circular reference b
vadimt
2015/04/08 20:32:34
Beautiful, but perhaps suboptimal, as per previous
| |
| 190 | |
| 191 // Not touching fields for which a delta can be computed by comparing with a | |
| 192 // snapshot from previos phase. Resetting other fields. Sample values will be | |
| 193 // reset upon next death recording because sample_probability_count_ is set to | |
| 194 // 0. | |
| 195 sample_probability_count_ = 0; | |
| 166 run_duration_max_ = 0; | 196 run_duration_max_ = 0; |
| 167 run_duration_sample_ = 0; | |
| 168 queue_duration_sum_ = 0; | |
| 169 queue_duration_max_ = 0; | 197 queue_duration_max_ = 0; |
| 170 queue_duration_sample_ = 0; | |
| 171 } | 198 } |
| 172 | 199 |
| 173 //------------------------------------------------------------------------------ | 200 //------------------------------------------------------------------------------ |
| 174 DeathDataSnapshot::DeathDataSnapshot() | 201 DeathDataSnapshot::DeathDataSnapshot() |
| 175 : count(-1), | 202 : count(-1), |
| 176 run_duration_sum(-1), | 203 run_duration_sum(-1), |
| 177 run_duration_max(-1), | 204 run_duration_max(-1), |
| 178 run_duration_sample(-1), | 205 run_duration_sample(-1), |
| 179 queue_duration_sum(-1), | 206 queue_duration_sum(-1), |
| 180 queue_duration_max(-1), | 207 queue_duration_max(-1), |
| 181 queue_duration_sample(-1) { | 208 queue_duration_sample(-1) { |
| 182 } | 209 } |
| 183 | 210 |
| 184 DeathDataSnapshot::DeathDataSnapshot( | 211 DeathDataSnapshot::DeathDataSnapshot( |
| 185 const tracked_objects::DeathData& death_data) | 212 const tracked_objects::DeathData& death_data) |
|
Alexei Svitkine (slow)
2015/04/08 15:55:38
Nit: No need for tracked_objects:: namespace.
vadimt
2015/04/08 20:32:34
Done.
| |
| 186 : count(death_data.count()), | 213 : count(death_data.count()), |
| 187 run_duration_sum(death_data.run_duration_sum()), | 214 run_duration_sum(death_data.run_duration_sum()), |
| 188 run_duration_max(death_data.run_duration_max()), | 215 run_duration_max(death_data.run_duration_max()), |
| 189 run_duration_sample(death_data.run_duration_sample()), | 216 run_duration_sample(death_data.run_duration_sample()), |
| 190 queue_duration_sum(death_data.queue_duration_sum()), | 217 queue_duration_sum(death_data.queue_duration_sum()), |
| 191 queue_duration_max(death_data.queue_duration_max()), | 218 queue_duration_max(death_data.queue_duration_max()), |
| 192 queue_duration_sample(death_data.queue_duration_sample()) { | 219 queue_duration_sample(death_data.queue_duration_sample()) { |
| 193 } | 220 } |
| 194 | 221 |
| 195 DeathDataSnapshot::~DeathDataSnapshot() { | 222 DeathDataSnapshot::~DeathDataSnapshot() { |
| 196 } | 223 } |
| 197 | 224 |
| 225 void DeathDataSnapshot::CalculateDelta(const DeathDataSnapshot& older) { | |
| 226 count -= older.count; | |
| 227 run_duration_sum -= older.run_duration_sum; | |
| 228 queue_duration_sum -= older.queue_duration_sum; | |
| 229 } | |
| 230 | |
| 198 //------------------------------------------------------------------------------ | 231 //------------------------------------------------------------------------------ |
| 199 BirthOnThread::BirthOnThread(const Location& location, | 232 BirthOnThread::BirthOnThread(const Location& location, |
| 200 const ThreadData& current) | 233 const ThreadData& current) |
| 201 : location_(location), | 234 : location_(location), |
| 202 birth_thread_(¤t) { | 235 birth_thread_(¤t) { |
| 203 } | 236 } |
| 204 | 237 |
| 205 //------------------------------------------------------------------------------ | 238 //------------------------------------------------------------------------------ |
| 206 BirthOnThreadSnapshot::BirthOnThreadSnapshot() { | 239 BirthOnThreadSnapshot::BirthOnThreadSnapshot() { |
| 207 } | 240 } |
| (...skipping 170 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 378 return; | 411 return; |
| 379 } | 412 } |
| 380 // We must NOT do any allocations during this callback. | 413 // We must NOT do any allocations during this callback. |
| 381 // Using the simple linked lists avoids all allocations. | 414 // Using the simple linked lists avoids all allocations. |
| 382 DCHECK_EQ(this->next_retired_worker_, reinterpret_cast<ThreadData*>(NULL)); | 415 DCHECK_EQ(this->next_retired_worker_, reinterpret_cast<ThreadData*>(NULL)); |
| 383 this->next_retired_worker_ = first_retired_worker_; | 416 this->next_retired_worker_ = first_retired_worker_; |
| 384 first_retired_worker_ = this; | 417 first_retired_worker_ = this; |
| 385 } | 418 } |
| 386 | 419 |
| 387 // static | 420 // static |
| 388 void ThreadData::Snapshot(ProcessDataSnapshot* process_data_snapshot) { | 421 void ThreadData::Snapshot(int current_profiling_phase, |
| 389 ThreadData::SnapshotCurrentPhase( | 422 ProcessDataSnapshot* process_data_snapshot) { |
| 390 &process_data_snapshot->phased_process_data_snapshots[0]); | 423 BirthCountMap birth_counts; |
| 424 | |
| 425 // Get an unchanging copy of a ThreadData list. | |
| 426 ThreadData* my_list = ThreadData::first(); | |
| 427 | |
| 428 // Gather data serially. | |
| 429 // This hackish approach *can* get some slighly corrupt tallies, as we are | |
| 430 // grabbing values without the protection of a lock, but it has the advantage | |
| 431 // of working even with threads that don't have message loops. If a user | |
| 432 // sees any strangeness, they can always just run their stats gathering a | |
| 433 // second time. | |
| 434 for (ThreadData* thread_data = my_list; thread_data; | |
| 435 thread_data = thread_data->next()) { | |
| 436 thread_data->SnapshotExecutedTasks( | |
| 437 current_profiling_phase, | |
| 438 &process_data_snapshot->phased_process_data_snapshots, &birth_counts); | |
| 439 } | |
| 440 | |
| 441 // Add births that are still active -- i.e. objects that have tallied a birth, | |
| 442 // but have not yet tallied a matching death, and hence must be either | |
| 443 // running, queued up, or being held in limbo for future posting. | |
| 444 for (const auto& birth_count : birth_counts) { | |
| 445 if (birth_count.second > 0) { | |
| 446 process_data_snapshot | |
| 447 ->phased_process_data_snapshots[current_profiling_phase] | |
| 448 .tasks.push_back(TaskSnapshot( | |
| 449 BirthOnThreadSnapshot(*birth_count.first), | |
| 450 DeathDataSnapshot(DeathData(birth_count.second)), "Still_Alive")); | |
| 451 } | |
| 452 } | |
| 453 } | |
| 454 | |
| 455 // static | |
| 456 void ThreadData::OnProfilingPhaseCompleted(int profiling_phase) { | |
| 457 // Get an unchanging copy of a ThreadData list. | |
| 458 ThreadData* my_list = ThreadData::first(); | |
| 459 | |
| 460 // Add snapshots for all death datas in all threads serially. | |
| 461 // This hackish approach *can* get some slighly corrupt tallies, as we are | |
| 462 // grabbing values without the protection of a lock, but it has the advantage | |
| 463 // of working even with threads that don't have message loops. Any corruption | |
| 464 // shouldn't cause "cascading damage" to anything else (in later phases). | |
| 465 for (ThreadData* thread_data = my_list; thread_data; | |
| 466 thread_data = thread_data->next()) { | |
| 467 thread_data->OnProfilingPhaseCompletionOnThread(profiling_phase); | |
| 468 } | |
| 391 } | 469 } |
| 392 | 470 |
| 393 Births* ThreadData::TallyABirth(const Location& location) { | 471 Births* ThreadData::TallyABirth(const Location& location) { |
| 394 BirthMap::iterator it = birth_map_.find(location); | 472 BirthMap::iterator it = birth_map_.find(location); |
| 395 Births* child; | 473 Births* child; |
| 396 if (it != birth_map_.end()) { | 474 if (it != birth_map_.end()) { |
| 397 child = it->second; | 475 child = it->second; |
| 398 child->RecordBirth(); | 476 child->RecordBirth(); |
| 399 } else { | 477 } else { |
| 400 child = new Births(location, *this); // Leak this. | 478 child = new Births(location, *this); // Leak this. |
| (...skipping 11 matching lines...) Expand all Loading... | |
| 412 // Lock since the map may get relocated now, and other threads sometimes | 490 // Lock since the map may get relocated now, and other threads sometimes |
| 413 // snapshot it (but they lock before copying it). | 491 // snapshot it (but they lock before copying it). |
| 414 base::AutoLock lock(map_lock_); | 492 base::AutoLock lock(map_lock_); |
| 415 parent_child_set_.insert(pair); | 493 parent_child_set_.insert(pair); |
| 416 } | 494 } |
| 417 } | 495 } |
| 418 | 496 |
| 419 return child; | 497 return child; |
| 420 } | 498 } |
| 421 | 499 |
| 422 void ThreadData::TallyADeath(const Births& birth, | 500 void ThreadData::TallyADeath(const Births& births, |
| 423 int32 queue_duration, | 501 int32 queue_duration, |
| 424 const TaskStopwatch& stopwatch) { | 502 const TaskStopwatch& stopwatch) { |
| 425 int32 run_duration = stopwatch.RunDurationMs(); | 503 int32 run_duration = stopwatch.RunDurationMs(); |
| 426 | 504 |
| 427 // Stir in some randomness, plus add constant in case durations are zero. | 505 // Stir in some randomness, plus add constant in case durations are zero. |
| 428 const uint32 kSomePrimeNumber = 2147483647; | 506 const uint32 kSomePrimeNumber = 2147483647; |
| 429 random_number_ += queue_duration + run_duration + kSomePrimeNumber; | 507 random_number_ += queue_duration + run_duration + kSomePrimeNumber; |
| 430 // An address is going to have some randomness to it as well ;-). | 508 // An address is going to have some randomness to it as well ;-). |
| 431 random_number_ ^= static_cast<uint32>(&birth - reinterpret_cast<Births*>(0)); | 509 random_number_ ^= static_cast<uint32>(&births - reinterpret_cast<Births*>(0)); |
| 432 | 510 |
| 433 // We don't have queue durations without OS timer. OS timer is automatically | 511 // We don't have queue durations without OS timer. OS timer is automatically |
| 434 // used for task-post-timing, so the use of an alternate timer implies all | 512 // used for task-post-timing, so the use of an alternate timer implies all |
| 435 // queue times are invalid, unless it was explicitly said that we can trust | 513 // queue times are invalid, unless it was explicitly said that we can trust |
| 436 // the alternate timer. | 514 // the alternate timer. |
| 437 if (kAllowAlternateTimeSourceHandling && | 515 if (kAllowAlternateTimeSourceHandling && |
| 438 now_function_ && | 516 now_function_ && |
| 439 !now_function_is_time_) { | 517 !now_function_is_time_) { |
| 440 queue_duration = 0; | 518 queue_duration = 0; |
| 441 } | 519 } |
| 442 | 520 |
| 443 DeathMap::iterator it = death_map_.find(&birth); | 521 DeathMap::iterator it = death_map_.find(&births); |
| 444 DeathData* death_data; | 522 DeathData* death_data; |
| 445 if (it != death_map_.end()) { | 523 if (it != death_map_.end()) { |
| 446 death_data = &it->second; | 524 death_data = &it->second; |
| 447 } else { | 525 } else { |
| 448 base::AutoLock lock(map_lock_); // Lock as the map may get relocated now. | 526 base::AutoLock lock(map_lock_); // Lock as the map may get relocated now. |
| 449 death_data = &death_map_[&birth]; | 527 death_data = &death_map_[&births]; |
| 450 } // Release lock ASAP. | 528 } // Release lock ASAP. |
| 451 death_data->RecordDeath(queue_duration, run_duration, random_number_); | 529 death_data->RecordDeath(queue_duration, run_duration, random_number_); |
| 452 | 530 |
| 453 if (!kTrackParentChildLinks) | 531 if (!kTrackParentChildLinks) |
| 454 return; | 532 return; |
| 455 if (!parent_stack_.empty()) { // We might get turned off. | 533 if (!parent_stack_.empty()) { // We might get turned off. |
| 456 DCHECK_EQ(parent_stack_.top(), &birth); | 534 DCHECK_EQ(parent_stack_.top(), &births); |
| 457 parent_stack_.pop(); | 535 parent_stack_.pop(); |
| 458 } | 536 } |
| 459 } | 537 } |
| 460 | 538 |
| 461 // static | 539 // static |
| 462 Births* ThreadData::TallyABirthIfActive(const Location& location) { | 540 Births* ThreadData::TallyABirthIfActive(const Location& location) { |
| 463 if (!TrackingStatus()) | 541 if (!TrackingStatus()) |
| 464 return NULL; | 542 return NULL; |
| 465 ThreadData* current_thread_data = Get(); | 543 ThreadData* current_thread_data = Get(); |
| 466 if (!current_thread_data) | 544 if (!current_thread_data) |
| 467 return NULL; | 545 return NULL; |
| 468 return current_thread_data->TallyABirth(location); | 546 return current_thread_data->TallyABirth(location); |
| 469 } | 547 } |
| 470 | 548 |
| 471 // static | 549 // static |
| 472 void ThreadData::TallyRunOnNamedThreadIfTracking( | 550 void ThreadData::TallyRunOnNamedThreadIfTracking( |
| 473 const base::TrackingInfo& completed_task, | 551 const base::TrackingInfo& completed_task, |
| 474 const TaskStopwatch& stopwatch) { | 552 const TaskStopwatch& stopwatch) { |
| 475 // Even if we have been DEACTIVATED, we will process any pending births so | 553 // Even if we have been DEACTIVATED, we will process any pending births so |
| 476 // that our data structures (which counted the outstanding births) remain | 554 // that our data structures (which counted the outstanding births) remain |
| 477 // consistent. | 555 // consistent. |
| 478 const Births* birth = completed_task.birth_tally; | 556 const Births* births = completed_task.birth_tally; |
| 479 if (!birth) | 557 if (!births) |
| 480 return; | 558 return; |
| 481 ThreadData* current_thread_data = stopwatch.GetThreadData(); | 559 ThreadData* current_thread_data = stopwatch.GetThreadData(); |
| 482 if (!current_thread_data) | 560 if (!current_thread_data) |
| 483 return; | 561 return; |
| 484 | 562 |
| 485 // Watch out for a race where status_ is changing, and hence one or both | 563 // Watch out for a race where status_ is changing, and hence one or both |
| 486 // of start_of_run or end_of_run is zero. In that case, we didn't bother to | 564 // of start_of_run or end_of_run is zero. In that case, we didn't bother to |
| 487 // get a time value since we "weren't tracking" and we were trying to be | 565 // get a time value since we "weren't tracking" and we were trying to be |
| 488 // efficient by not calling for a genuine time value. For simplicity, we'll | 566 // efficient by not calling for a genuine time value. For simplicity, we'll |
| 489 // use a default zero duration when we can't calculate a true value. | 567 // use a default zero duration when we can't calculate a true value. |
| 490 TrackedTime start_of_run = stopwatch.StartTime(); | 568 TrackedTime start_of_run = stopwatch.StartTime(); |
| 491 int32 queue_duration = 0; | 569 int32 queue_duration = 0; |
| 492 if (!start_of_run.is_null()) { | 570 if (!start_of_run.is_null()) { |
| 493 queue_duration = (start_of_run - completed_task.EffectiveTimePosted()) | 571 queue_duration = (start_of_run - completed_task.EffectiveTimePosted()) |
| 494 .InMilliseconds(); | 572 .InMilliseconds(); |
| 495 } | 573 } |
| 496 current_thread_data->TallyADeath(*birth, queue_duration, stopwatch); | 574 current_thread_data->TallyADeath(*births, queue_duration, stopwatch); |
| 497 } | 575 } |
| 498 | 576 |
| 499 // static | 577 // static |
| 500 void ThreadData::TallyRunOnWorkerThreadIfTracking( | 578 void ThreadData::TallyRunOnWorkerThreadIfTracking( |
| 501 const Births* birth, | 579 const Births* births, |
| 502 const TrackedTime& time_posted, | 580 const TrackedTime& time_posted, |
| 503 const TaskStopwatch& stopwatch) { | 581 const TaskStopwatch& stopwatch) { |
| 504 // Even if we have been DEACTIVATED, we will process any pending births so | 582 // Even if we have been DEACTIVATED, we will process any pending births so |
| 505 // that our data structures (which counted the outstanding births) remain | 583 // that our data structures (which counted the outstanding births) remain |
| 506 // consistent. | 584 // consistent. |
| 507 if (!birth) | 585 if (!births) |
| 508 return; | 586 return; |
| 509 | 587 |
| 510 // TODO(jar): Support the option to coalesce all worker-thread activity under | 588 // TODO(jar): Support the option to coalesce all worker-thread activity under |
| 511 // one ThreadData instance that uses locks to protect *all* access. This will | 589 // one ThreadData instance that uses locks to protect *all* access. This will |
| 512 // reduce memory (making it provably bounded), but run incrementally slower | 590 // reduce memory (making it provably bounded), but run incrementally slower |
| 513 // (since we'll use locks on TallyABirth and TallyADeath). The good news is | 591 // (since we'll use locks on TallyABirth and TallyADeath). The good news is |
| 514 // that the locks on TallyADeath will be *after* the worker thread has run, | 592 // that the locks on TallyADeath will be *after* the worker thread has run, |
| 515 // and hence nothing will be waiting for the completion (... besides some | 593 // and hence nothing will be waiting for the completion (... besides some |
| 516 // other thread that might like to run). Also, the worker threads tasks are | 594 // other thread that might like to run). Also, the worker threads tasks are |
| 517 // generally longer, and hence the cost of the lock may perchance be amortized | 595 // generally longer, and hence the cost of the lock may perchance be amortized |
| 518 // over the long task's lifetime. | 596 // over the long task's lifetime. |
| 519 ThreadData* current_thread_data = stopwatch.GetThreadData(); | 597 ThreadData* current_thread_data = stopwatch.GetThreadData(); |
| 520 if (!current_thread_data) | 598 if (!current_thread_data) |
| 521 return; | 599 return; |
| 522 | 600 |
| 523 TrackedTime start_of_run = stopwatch.StartTime(); | 601 TrackedTime start_of_run = stopwatch.StartTime(); |
| 524 int32 queue_duration = 0; | 602 int32 queue_duration = 0; |
| 525 if (!start_of_run.is_null()) { | 603 if (!start_of_run.is_null()) { |
| 526 queue_duration = (start_of_run - time_posted).InMilliseconds(); | 604 queue_duration = (start_of_run - time_posted).InMilliseconds(); |
| 527 } | 605 } |
| 528 current_thread_data->TallyADeath(*birth, queue_duration, stopwatch); | 606 current_thread_data->TallyADeath(*births, queue_duration, stopwatch); |
| 529 } | 607 } |
| 530 | 608 |
| 531 // static | 609 // static |
| 532 void ThreadData::TallyRunInAScopedRegionIfTracking( | 610 void ThreadData::TallyRunInAScopedRegionIfTracking( |
| 533 const Births* birth, | 611 const Births* births, |
| 534 const TaskStopwatch& stopwatch) { | 612 const TaskStopwatch& stopwatch) { |
| 535 // Even if we have been DEACTIVATED, we will process any pending births so | 613 // Even if we have been DEACTIVATED, we will process any pending births so |
| 536 // that our data structures (which counted the outstanding births) remain | 614 // that our data structures (which counted the outstanding births) remain |
| 537 // consistent. | 615 // consistent. |
| 538 if (!birth) | 616 if (!births) |
| 539 return; | 617 return; |
| 540 | 618 |
| 541 ThreadData* current_thread_data = stopwatch.GetThreadData(); | 619 ThreadData* current_thread_data = stopwatch.GetThreadData(); |
| 542 if (!current_thread_data) | 620 if (!current_thread_data) |
| 543 return; | 621 return; |
| 544 | 622 |
| 545 int32 queue_duration = 0; | 623 int32 queue_duration = 0; |
| 546 current_thread_data->TallyADeath(*birth, queue_duration, stopwatch); | 624 current_thread_data->TallyADeath(*births, queue_duration, stopwatch); |
| 547 } | |
| 548 | |
| 549 // static | |
| 550 void ThreadData::SnapshotAllExecutedTasks( | |
| 551 ProcessDataPhaseSnapshot* process_data_phase, | |
| 552 BirthCountMap* birth_counts) { | |
| 553 // Get an unchanging copy of a ThreadData list. | |
| 554 ThreadData* my_list = ThreadData::first(); | |
| 555 | |
| 556 // Gather data serially. | |
| 557 // This hackish approach *can* get some slighly corrupt tallies, as we are | |
| 558 // grabbing values without the protection of a lock, but it has the advantage | |
| 559 // of working even with threads that don't have message loops. If a user | |
| 560 // sees any strangeness, they can always just run their stats gathering a | |
| 561 // second time. | |
| 562 for (ThreadData* thread_data = my_list; | |
| 563 thread_data; | |
| 564 thread_data = thread_data->next()) { | |
| 565 thread_data->SnapshotExecutedTasks(process_data_phase, birth_counts); | |
| 566 } | |
| 567 } | |
| 568 | |
| 569 // static | |
| 570 void ThreadData::SnapshotCurrentPhase( | |
| 571 ProcessDataPhaseSnapshot* process_data_phase) { | |
| 572 // Add births that have run to completion to |collected_data|. | |
| 573 // |birth_counts| tracks the total number of births recorded at each location | |
| 574 // for which we have not seen a death count. | |
| 575 BirthCountMap birth_counts; | |
| 576 ThreadData::SnapshotAllExecutedTasks(process_data_phase, &birth_counts); | |
| 577 | |
| 578 // Add births that are still active -- i.e. objects that have tallied a birth, | |
| 579 // but have not yet tallied a matching death, and hence must be either | |
| 580 // running, queued up, or being held in limbo for future posting. | |
| 581 for (const auto& birth_count : birth_counts) { | |
| 582 if (birth_count.second > 0) { | |
| 583 process_data_phase->tasks.push_back(TaskSnapshot( | |
| 584 *birth_count.first, DeathData(birth_count.second), "Still_Alive")); | |
| 585 } | |
| 586 } | |
| 587 } | 625 } |
| 588 | 626 |
| 589 void ThreadData::SnapshotExecutedTasks( | 627 void ThreadData::SnapshotExecutedTasks( |
| 590 ProcessDataPhaseSnapshot* process_data_phase, | 628 int current_profiling_phase, |
| 629 PhasedProcessDataSnapshotMap* phased_process_data_snapshots, | |
| 591 BirthCountMap* birth_counts) { | 630 BirthCountMap* birth_counts) { |
| 592 // Get copy of data, so that the data will not change during the iterations | 631 // Get copy of data, so that the data will not change during the iterations |
| 593 // and processing. | 632 // and processing. |
| 594 ThreadData::BirthMap birth_map; | 633 BirthMap birth_map; |
| 595 ThreadData::DeathMap death_map; | 634 DeathsSnapshot deaths; |
| 596 ThreadData::ParentChildSet parent_child_set; | 635 ParentChildSet parent_child_set; |
| 597 SnapshotMaps(&birth_map, &death_map, &parent_child_set); | 636 SnapshotMaps(current_profiling_phase, &birth_map, &deaths, &parent_child_set); |
| 598 | |
| 599 for (const auto& death : death_map) { | |
| 600 process_data_phase->tasks.push_back( | |
| 601 TaskSnapshot(*death.first, death.second, thread_name())); | |
| 602 (*birth_counts)[death.first] -= death.first->birth_count(); | |
| 603 } | |
| 604 | 637 |
| 605 for (const auto& birth : birth_map) { | 638 for (const auto& birth : birth_map) { |
| 606 (*birth_counts)[birth.second] += birth.second->birth_count(); | 639 (*birth_counts)[birth.second] += birth.second->birth_count(); |
| 607 } | 640 } |
| 608 | 641 |
| 609 if (!kTrackParentChildLinks) | 642 for (const auto& death : deaths) { |
| 610 return; | 643 (*birth_counts)[death.first] -= death.first->birth_count(); |
| 611 | 644 |
| 612 for (const auto& parent_child : parent_child_set) { | 645 for (const DeathDataPhaseSnapshot* phase = &death.second; phase; |
| 613 process_data_phase->descendants.push_back( | 646 phase = phase->prev) { |
| 614 ParentChildPairSnapshot(parent_child)); | 647 DeathDataSnapshot death_data = phase->death_data; |
| 648 | |
| 649 if (phase->prev) | |
| 650 death_data.CalculateDelta(phase->prev->death_data); | |
| 651 | |
| 652 if (death_data.count > 0) | |
|
Alexei Svitkine (slow)
2015/04/08 15:55:39
Nit: {}
vadimt
2015/04/08 20:32:34
Done.
| |
| 653 (*phased_process_data_snapshots)[phase->profiling_phase] | |
| 654 .tasks.push_back(TaskSnapshot(BirthOnThreadSnapshot(*death.first), | |
| 655 death_data, thread_name())); | |
| 656 } | |
| 615 } | 657 } |
| 616 } | 658 } |
| 617 | 659 |
| 618 // This may be called from another thread. | 660 // This may be called from another thread. |
| 619 void ThreadData::SnapshotMaps(BirthMap* birth_map, | 661 void ThreadData::SnapshotMaps(int profiling_phase, |
| 620 DeathMap* death_map, | 662 BirthMap* birth_map, |
| 663 DeathsSnapshot* deaths, | |
| 621 ParentChildSet* parent_child_set) { | 664 ParentChildSet* parent_child_set) { |
| 622 base::AutoLock lock(map_lock_); | 665 base::AutoLock lock(map_lock_); |
| 666 | |
| 623 for (const auto& birth : birth_map_) | 667 for (const auto& birth : birth_map_) |
| 624 (*birth_map)[birth.first] = birth.second; | 668 (*birth_map)[birth.first] = birth.second; |
| 625 for (const auto& death : death_map_) | 669 |
| 626 (*death_map)[death.first] = death.second; | 670 for (const auto& death : death_map_) { |
| 671 deaths->push_back(DeathsSnapshot::value_type( | |
| 672 death.first, DeathDataPhaseSnapshot(profiling_phase, death.second))); | |
| 673 } | |
| 627 | 674 |
| 628 if (!kTrackParentChildLinks) | 675 if (!kTrackParentChildLinks) |
| 629 return; | 676 return; |
| 630 | 677 |
| 631 for (const auto& parent_child : parent_child_set_) | 678 for (const auto& parent_child : parent_child_set_) |
| 632 parent_child_set->insert(parent_child); | 679 parent_child_set->insert(parent_child); |
| 633 } | 680 } |
| 634 | 681 |
| 682 void ThreadData::OnProfilingPhaseCompletionOnThread(int profiling_phase) { | |
| 683 base::AutoLock lock(map_lock_); | |
| 684 | |
| 685 for (auto& death : death_map_) { | |
| 686 death.second.OnProfilingPhaseCompleted(profiling_phase); | |
| 687 } | |
| 688 } | |
| 689 | |
| 635 static void OptionallyInitializeAlternateTimer() { | 690 static void OptionallyInitializeAlternateTimer() { |
| 636 NowFunction* alternate_time_source = GetAlternateTimeSource(); | 691 NowFunction* alternate_time_source = GetAlternateTimeSource(); |
| 637 if (alternate_time_source) | 692 if (alternate_time_source) |
| 638 ThreadData::SetAlternateTimeSource(alternate_time_source); | 693 ThreadData::SetAlternateTimeSource(alternate_time_source); |
| 639 } | 694 } |
| 640 | 695 |
| 641 bool ThreadData::Initialize() { | 696 bool ThreadData::Initialize() { |
| 642 if (status_ >= DEACTIVATED) | 697 if (status_ >= DEACTIVATED) |
| 643 return true; // Someone else did the initialization. | 698 return true; // Someone else did the initialization. |
| 644 // Due to racy lazy initialization in tests, we'll need to recheck status_ | 699 // Due to racy lazy initialization in tests, we'll need to recheck status_ |
| (...skipping 257 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 902 | 957 |
| 903 ThreadData* TaskStopwatch::GetThreadData() const { | 958 ThreadData* TaskStopwatch::GetThreadData() const { |
| 904 #if DCHECK_IS_ON() | 959 #if DCHECK_IS_ON() |
| 905 DCHECK(state_ != CREATED); | 960 DCHECK(state_ != CREATED); |
| 906 #endif | 961 #endif |
| 907 | 962 |
| 908 return current_thread_data_; | 963 return current_thread_data_; |
| 909 } | 964 } |
| 910 | 965 |
| 911 //------------------------------------------------------------------------------ | 966 //------------------------------------------------------------------------------ |
| 967 // DeathDataPhaseSnapshot | |
| 968 | |
| 969 DeathDataPhaseSnapshot::DeathDataPhaseSnapshot(int profiling_phase, | |
| 970 const DeathData& death_data) | |
| 971 : profiling_phase(profiling_phase), | |
| 972 death_data(death_data), | |
| 973 prev(death_data.last_phase_snapshot()) { | |
| 974 } | |
| 975 | |
| 976 //------------------------------------------------------------------------------ | |
| 977 // TaskSnapshot | |
| 978 | |
| 912 TaskSnapshot::TaskSnapshot() { | 979 TaskSnapshot::TaskSnapshot() { |
| 913 } | 980 } |
| 914 | 981 |
| 915 TaskSnapshot::TaskSnapshot(const BirthOnThread& birth, | 982 TaskSnapshot::TaskSnapshot(const BirthOnThreadSnapshot& birth, |
| 916 const DeathData& death_data, | 983 const DeathDataSnapshot& death_data, |
| 917 const std::string& death_thread_name) | 984 const std::string& death_thread_name) |
| 918 : birth(birth), | 985 : birth(birth), |
| 919 death_data(death_data), | 986 death_data(death_data), |
| 920 death_thread_name(death_thread_name) { | 987 death_thread_name(death_thread_name) { |
| 921 } | 988 } |
| 922 | 989 |
| 923 TaskSnapshot::~TaskSnapshot() { | 990 TaskSnapshot::~TaskSnapshot() { |
| 924 } | 991 } |
| 925 | 992 |
| 926 //------------------------------------------------------------------------------ | 993 //------------------------------------------------------------------------------ |
| (...skipping 28 matching lines...) Expand all Loading... | |
| 955 : process_id(base::GetCurrentProcId()) { | 1022 : process_id(base::GetCurrentProcId()) { |
| 956 #else | 1023 #else |
| 957 : process_id(base::kNullProcessId) { | 1024 : process_id(base::kNullProcessId) { |
| 958 #endif | 1025 #endif |
| 959 } | 1026 } |
| 960 | 1027 |
| 961 ProcessDataSnapshot::~ProcessDataSnapshot() { | 1028 ProcessDataSnapshot::~ProcessDataSnapshot() { |
| 962 } | 1029 } |
| 963 | 1030 |
| 964 } // namespace tracked_objects | 1031 } // namespace tracked_objects |
| OLD | NEW |