OLD | NEW |
---|---|
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/tracked_objects.h" | 5 #include "base/tracked_objects.h" |
6 | 6 |
7 #include <limits.h> | 7 #include <limits.h> |
8 #include <stdlib.h> | 8 #include <stdlib.h> |
9 | 9 |
10 #include "base/atomicops.h" | 10 #include "base/atomicops.h" |
11 #include "base/base_switches.h" | 11 #include "base/base_switches.h" |
12 #include "base/command_line.h" | 12 #include "base/command_line.h" |
13 #include "base/compiler_specific.h" | 13 #include "base/compiler_specific.h" |
14 #include "base/debug/leak_annotations.h" | 14 #include "base/debug/leak_annotations.h" |
15 #include "base/logging.h" | 15 #include "base/logging.h" |
16 #include "base/process/process_handle.h" | 16 #include "base/process/process_handle.h" |
17 #include "base/profiler/alternate_timer.h" | 17 #include "base/profiler/alternate_timer.h" |
18 #include "base/stl_util.h" | |
18 #include "base/strings/stringprintf.h" | 19 #include "base/strings/stringprintf.h" |
19 #include "base/third_party/valgrind/memcheck.h" | 20 #include "base/third_party/valgrind/memcheck.h" |
20 #include "base/tracking_info.h" | 21 #include "base/tracking_info.h" |
21 | 22 |
22 using base::TimeDelta; | 23 using base::TimeDelta; |
23 | 24 |
24 namespace base { | 25 namespace base { |
25 class TimeDelta; | 26 class TimeDelta; |
26 } | 27 } |
27 | 28 |
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
86 } | 87 } |
87 return current_timing_enabled == ENABLED_TIMING; | 88 return current_timing_enabled == ENABLED_TIMING; |
88 } | 89 } |
89 | 90 |
90 } // namespace | 91 } // namespace |
91 | 92 |
92 //------------------------------------------------------------------------------ | 93 //------------------------------------------------------------------------------ |
93 // DeathData tallies durations when a death takes place. | 94 // DeathData tallies durations when a death takes place. |
94 | 95 |
95 DeathData::DeathData() { | 96 DeathData::DeathData() { |
96 Clear(); | 97 count_ = 0; |
98 sample_probability_count_ = 0; | |
99 run_duration_sum_ = 0; | |
100 run_duration_max_ = 0; | |
101 run_duration_sample_ = 0; | |
102 queue_duration_sum_ = 0; | |
103 queue_duration_max_ = 0; | |
104 queue_duration_sample_ = 0; | |
105 last_phase_snapshot_ = nullptr; | |
97 } | 106 } |
98 | 107 |
99 DeathData::DeathData(int count) { | 108 DeathData::~DeathData() { |
100 Clear(); | 109 while (last_phase_snapshot_) { |
101 count_ = count; | 110 DeathDataPhaseSnapshot* snapshot = last_phase_snapshot_; |
111 last_phase_snapshot_ = snapshot->prev; | |
112 delete snapshot; | |
113 } | |
102 } | 114 } |
103 | 115 |
104 // TODO(jar): I need to see if this macro to optimize branching is worth using. | 116 // TODO(jar): I need to see if this macro to optimize branching is worth using. |
105 // | 117 // |
106 // This macro has no branching, so it is surely fast, and is equivalent to: | 118 // This macro has no branching, so it is surely fast, and is equivalent to: |
107 // if (assign_it) | 119 // if (assign_it) |
108 // target = source; | 120 // target = source; |
109 // We use a macro rather than a template to force this to inline. | 121 // We use a macro rather than a template to force this to inline. |
110 // Related code for calculating max is discussed on the web. | 122 // Related code for calculating max is discussed on the web. |
111 #define CONDITIONAL_ASSIGN(assign_it, target, source) \ | 123 #define CONDITIONAL_ASSIGN(assign_it, target, source) \ |
112 ((target) ^= ((target) ^ (source)) & -static_cast<int32>(assign_it)) | 124 ((target) ^= ((target) ^ (source)) & -static_cast<int32>(assign_it)) |
113 | 125 |
114 void DeathData::RecordDeath(const int32 queue_duration, | 126 void DeathData::RecordDeath(const int32 queue_duration, |
115 const int32 run_duration, | 127 const int32 run_duration, |
116 const uint32 random_number) { | 128 const uint32 random_number) { |
117 // We'll just clamp at INT_MAX, but we should note this in the UI as such. | 129 // We'll just clamp at INT_MAX, but we should note this in the UI as such. |
118 if (count_ < INT_MAX) | 130 if (count_ < INT_MAX) |
119 ++count_; | 131 ++count_; |
132 if (sample_probability_count_ < INT_MAX) | |
133 ++sample_probability_count_; | |
120 queue_duration_sum_ += queue_duration; | 134 queue_duration_sum_ += queue_duration; |
121 run_duration_sum_ += run_duration; | 135 run_duration_sum_ += run_duration; |
122 | 136 |
123 if (queue_duration_max_ < queue_duration) | 137 if (queue_duration_max_ < queue_duration) |
124 queue_duration_max_ = queue_duration; | 138 queue_duration_max_ = queue_duration; |
125 if (run_duration_max_ < run_duration) | 139 if (run_duration_max_ < run_duration) |
126 run_duration_max_ = run_duration; | 140 run_duration_max_ = run_duration; |
127 | 141 |
128 // Take a uniformly distributed sample over all durations ever supplied. | 142 // Take a uniformly distributed sample over all durations ever supplied during |
129 // The probability that we (instead) use this new sample is 1/count_. This | 143 // currrent profiling phase. |
130 // results in a completely uniform selection of the sample (at least when we | 144 // The probability that we (instead) use this new sample is |
131 // don't clamp count_... but that should be inconsequentially likely). | 145 // 1/sample_probability_count_. This results in a completely uniform selection |
132 // We ignore the fact that we correlated our selection of a sample to the run | 146 // of the sample (at least when we don't clamp sample_probability_count_... |
133 // and queue times (i.e., we used them to generate random_number). | 147 // but that should be inconsequentially likely). We ignore the fact that we |
134 CHECK_GT(count_, 0); | 148 // correlated our selection of a sample to the run and queue times (i.e., we |
135 if (0 == (random_number % count_)) { | 149 // used them to generate random_number). |
150 CHECK_GT(sample_probability_count_, 0); | |
151 if (0 == (random_number % sample_probability_count_)) { | |
136 queue_duration_sample_ = queue_duration; | 152 queue_duration_sample_ = queue_duration; |
137 run_duration_sample_ = run_duration; | 153 run_duration_sample_ = run_duration; |
138 } | 154 } |
139 } | 155 } |
140 | 156 |
141 int DeathData::count() const { return count_; } | 157 int DeathData::count() const { return count_; } |
142 | 158 |
143 int32 DeathData::run_duration_sum() const { return run_duration_sum_; } | 159 int32 DeathData::run_duration_sum() const { return run_duration_sum_; } |
144 | 160 |
145 int32 DeathData::run_duration_max() const { return run_duration_max_; } | 161 int32 DeathData::run_duration_max() const { return run_duration_max_; } |
146 | 162 |
147 int32 DeathData::run_duration_sample() const { | 163 int32 DeathData::run_duration_sample() const { |
148 return run_duration_sample_; | 164 return run_duration_sample_; |
149 } | 165 } |
150 | 166 |
151 int32 DeathData::queue_duration_sum() const { | 167 int32 DeathData::queue_duration_sum() const { |
152 return queue_duration_sum_; | 168 return queue_duration_sum_; |
153 } | 169 } |
154 | 170 |
155 int32 DeathData::queue_duration_max() const { | 171 int32 DeathData::queue_duration_max() const { |
156 return queue_duration_max_; | 172 return queue_duration_max_; |
157 } | 173 } |
158 | 174 |
159 int32 DeathData::queue_duration_sample() const { | 175 int32 DeathData::queue_duration_sample() const { |
160 return queue_duration_sample_; | 176 return queue_duration_sample_; |
161 } | 177 } |
162 | 178 |
163 void DeathData::Clear() { | 179 DeathDataPhaseSnapshot* DeathData::last_phase_snapshot() const { |
164 count_ = 0; | 180 return last_phase_snapshot_; |
165 run_duration_sum_ = 0; | 181 } |
182 | |
183 void DeathData::OnProfilingPhaseCompleted(int profiling_phase) { | |
184 // Snapshotting and storing current state. | |
185 last_phase_snapshot_ = new DeathDataPhaseSnapshot( | |
186 profiling_phase, count_, run_duration_sum_, run_duration_max_, | |
187 run_duration_sample_, queue_duration_sum_, queue_duration_max_, | |
188 queue_duration_sample_, last_phase_snapshot_); | |
189 | |
190 // Not touching fields for which a delta can be computed by comparing with a | |
191 // snapshot from previos phase. Resetting other fields. Sample values will be | |
192 // reset upon next death recording because sample_probability_count_ is set to | |
193 // 0. | |
Alexei Svitkine (slow)
2015/04/09 15:39:06
This comment needs to discuss *why* it's done this
vadimt
2015/04/09 21:28:39
Done
| |
194 sample_probability_count_ = 0; | |
166 run_duration_max_ = 0; | 195 run_duration_max_ = 0; |
167 run_duration_sample_ = 0; | |
168 queue_duration_sum_ = 0; | |
169 queue_duration_max_ = 0; | 196 queue_duration_max_ = 0; |
170 queue_duration_sample_ = 0; | |
171 } | 197 } |
172 | 198 |
173 //------------------------------------------------------------------------------ | 199 //------------------------------------------------------------------------------ |
174 DeathDataSnapshot::DeathDataSnapshot() | 200 DeathDataSnapshot::DeathDataSnapshot() |
175 : count(-1), | 201 : count(-1), |
176 run_duration_sum(-1), | 202 run_duration_sum(-1), |
177 run_duration_max(-1), | 203 run_duration_max(-1), |
178 run_duration_sample(-1), | 204 run_duration_sample(-1), |
179 queue_duration_sum(-1), | 205 queue_duration_sum(-1), |
180 queue_duration_max(-1), | 206 queue_duration_max(-1), |
181 queue_duration_sample(-1) { | 207 queue_duration_sample(-1) { |
182 } | 208 } |
183 | 209 |
184 DeathDataSnapshot::DeathDataSnapshot( | 210 DeathDataSnapshot::DeathDataSnapshot(int count, |
185 const tracked_objects::DeathData& death_data) | 211 int32 run_duration_sum, |
186 : count(death_data.count()), | 212 int32 run_duration_max, |
187 run_duration_sum(death_data.run_duration_sum()), | 213 int32 run_duration_sample, |
188 run_duration_max(death_data.run_duration_max()), | 214 int32 queue_duration_sum, |
189 run_duration_sample(death_data.run_duration_sample()), | 215 int32 queue_duration_max, |
190 queue_duration_sum(death_data.queue_duration_sum()), | 216 int32 queue_duration_sample) |
191 queue_duration_max(death_data.queue_duration_max()), | 217 : count(count), |
192 queue_duration_sample(death_data.queue_duration_sample()) { | 218 run_duration_sum(run_duration_sum), |
219 run_duration_max(run_duration_max), | |
220 run_duration_sample(run_duration_sample), | |
221 queue_duration_sum(queue_duration_sum), | |
222 queue_duration_max(queue_duration_max), | |
223 queue_duration_sample(queue_duration_sample) { | |
193 } | 224 } |
194 | 225 |
195 DeathDataSnapshot::~DeathDataSnapshot() { | 226 DeathDataSnapshot::~DeathDataSnapshot() { |
196 } | 227 } |
197 | 228 |
229 void DeathDataSnapshot::CalculateDelta(const DeathDataSnapshot& older) { | |
Alexei Svitkine (slow)
2015/04/09 15:39:05
How about SubtractDelta()?
Calculate doesn't make
vadimt
2015/04/09 21:28:40
Done, but we are not subtracting the delta, we are
| |
230 count -= older.count; | |
231 run_duration_sum -= older.run_duration_sum; | |
232 queue_duration_sum -= older.queue_duration_sum; | |
233 } | |
234 | |
198 //------------------------------------------------------------------------------ | 235 //------------------------------------------------------------------------------ |
199 BirthOnThread::BirthOnThread(const Location& location, | 236 BirthOnThread::BirthOnThread(const Location& location, |
200 const ThreadData& current) | 237 const ThreadData& current) |
201 : location_(location), | 238 : location_(location), |
202 birth_thread_(¤t) { | 239 birth_thread_(¤t) { |
203 } | 240 } |
204 | 241 |
205 //------------------------------------------------------------------------------ | 242 //------------------------------------------------------------------------------ |
206 BirthOnThreadSnapshot::BirthOnThreadSnapshot() { | 243 BirthOnThreadSnapshot::BirthOnThreadSnapshot() { |
207 } | 244 } |
208 | 245 |
209 BirthOnThreadSnapshot::BirthOnThreadSnapshot( | 246 BirthOnThreadSnapshot::BirthOnThreadSnapshot(const BirthOnThread& birth) |
210 const tracked_objects::BirthOnThread& birth) | |
211 : location(birth.location()), | 247 : location(birth.location()), |
212 thread_name(birth.birth_thread()->thread_name()) { | 248 thread_name(birth.birth_thread()->thread_name()) { |
213 } | 249 } |
214 | 250 |
215 BirthOnThreadSnapshot::~BirthOnThreadSnapshot() { | 251 BirthOnThreadSnapshot::~BirthOnThreadSnapshot() { |
216 } | 252 } |
217 | 253 |
218 //------------------------------------------------------------------------------ | 254 //------------------------------------------------------------------------------ |
219 Births::Births(const Location& location, const ThreadData& current) | 255 Births::Births(const Location& location, const ThreadData& current) |
220 : BirthOnThread(location, current), | 256 : BirthOnThread(location, current), |
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
257 ThreadData* ThreadData::all_thread_data_list_head_ = NULL; | 293 ThreadData* ThreadData::all_thread_data_list_head_ = NULL; |
258 | 294 |
259 // static | 295 // static |
260 ThreadData* ThreadData::first_retired_worker_ = NULL; | 296 ThreadData* ThreadData::first_retired_worker_ = NULL; |
261 | 297 |
262 // static | 298 // static |
263 base::LazyInstance<base::Lock>::Leaky | 299 base::LazyInstance<base::Lock>::Leaky |
264 ThreadData::list_lock_ = LAZY_INSTANCE_INITIALIZER; | 300 ThreadData::list_lock_ = LAZY_INSTANCE_INITIALIZER; |
265 | 301 |
266 // static | 302 // static |
303 base::ThreadChecker ThreadData::snapshot_thread_checker_; | |
Alexei Svitkine (slow)
2015/04/09 15:39:05
Hmm, this isn't correct actually. I think this cau
vadimt
2015/04/09 21:28:39
Fixed the lazy instance initialization.
I think,
| |
304 | |
305 // static | |
267 ThreadData::Status ThreadData::status_ = ThreadData::UNINITIALIZED; | 306 ThreadData::Status ThreadData::status_ = ThreadData::UNINITIALIZED; |
268 | 307 |
269 ThreadData::ThreadData(const std::string& suggested_name) | 308 ThreadData::ThreadData(const std::string& suggested_name) |
270 : next_(NULL), | 309 : next_(NULL), |
271 next_retired_worker_(NULL), | 310 next_retired_worker_(NULL), |
272 worker_thread_number_(0), | 311 worker_thread_number_(0), |
273 incarnation_count_for_pool_(-1), | 312 incarnation_count_for_pool_(-1), |
274 current_stopwatch_(NULL) { | 313 current_stopwatch_(NULL) { |
275 DCHECK_GE(suggested_name.size(), 0u); | 314 DCHECK_GE(suggested_name.size(), 0u); |
276 thread_name_ = suggested_name; | 315 thread_name_ = suggested_name; |
(...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
378 return; | 417 return; |
379 } | 418 } |
380 // We must NOT do any allocations during this callback. | 419 // We must NOT do any allocations during this callback. |
381 // Using the simple linked lists avoids all allocations. | 420 // Using the simple linked lists avoids all allocations. |
382 DCHECK_EQ(this->next_retired_worker_, reinterpret_cast<ThreadData*>(NULL)); | 421 DCHECK_EQ(this->next_retired_worker_, reinterpret_cast<ThreadData*>(NULL)); |
383 this->next_retired_worker_ = first_retired_worker_; | 422 this->next_retired_worker_ = first_retired_worker_; |
384 first_retired_worker_ = this; | 423 first_retired_worker_ = this; |
385 } | 424 } |
386 | 425 |
387 // static | 426 // static |
388 void ThreadData::Snapshot(ProcessDataSnapshot* process_data_snapshot) { | 427 void ThreadData::Snapshot(int current_profiling_phase, |
389 ThreadData::SnapshotCurrentPhase( | 428 ProcessDataSnapshot* process_data_snapshot) { |
390 &process_data_snapshot->phased_process_data_snapshots[0]); | 429 DCHECK(snapshot_thread_checker_.CalledOnValidThread()); |
430 BirthCountMap birth_counts; | |
431 | |
432 // Get an unchanging copy of a ThreadData list. | |
433 ThreadData* my_list = ThreadData::first(); | |
434 | |
435 // Gather data serially. | |
436 // This hackish approach *can* get some slighly corrupt tallies, as we are | |
437 // grabbing values without the protection of a lock, but it has the advantage | |
438 // of working even with threads that don't have message loops. If a user | |
439 // sees any strangeness, they can always just run their stats gathering a | |
440 // second time. | |
441 for (ThreadData* thread_data = my_list; thread_data; | |
442 thread_data = thread_data->next()) { | |
443 thread_data->SnapshotExecutedTasks( | |
444 current_profiling_phase, | |
445 &process_data_snapshot->phased_process_data_snapshots, &birth_counts); | |
446 } | |
447 | |
448 // Add births that are still active -- i.e. objects that have tallied a birth, | |
449 // but have not yet tallied a matching death, and hence must be either | |
450 // running, queued up, or being held in limbo for future posting. | |
451 for (const auto& birth_count : birth_counts) { | |
452 if (birth_count.second > 0) { | |
453 process_data_snapshot | |
454 ->phased_process_data_snapshots[current_profiling_phase] | |
455 .tasks.push_back(TaskSnapshot( | |
Alexei Svitkine (slow)
2015/04/09 15:39:05
Nit: Can you make a local variable pointer outside
vadimt
2015/04/09 21:28:39
Done.
| |
456 BirthOnThreadSnapshot(*birth_count.first), | |
457 DeathDataSnapshot(birth_count.second, 0, 0, 0, 0, 0, 0), | |
458 "Still_Alive")); | |
459 } | |
460 } | |
461 } | |
462 | |
463 // static | |
464 void ThreadData::OnProfilingPhaseCompleted(int profiling_phase) { | |
465 DCHECK(snapshot_thread_checker_.CalledOnValidThread()); | |
466 // Get an unchanging copy of a ThreadData list. | |
467 ThreadData* my_list = ThreadData::first(); | |
468 | |
469 // Add snapshots for all death datas in all threads serially. | |
470 // This hackish approach *can* get some slighly corrupt tallies, as we are | |
471 // grabbing values without the protection of a lock, but it has the advantage | |
472 // of working even with threads that don't have message loops. Any corruption | |
473 // shouldn't cause "cascading damage" to anything else (in later phases). | |
474 for (ThreadData* thread_data = my_list; thread_data; | |
475 thread_data = thread_data->next()) { | |
476 thread_data->OnProfilingPhaseCompletionOnThread(profiling_phase); | |
477 } | |
391 } | 478 } |
392 | 479 |
393 Births* ThreadData::TallyABirth(const Location& location) { | 480 Births* ThreadData::TallyABirth(const Location& location) { |
394 BirthMap::iterator it = birth_map_.find(location); | 481 BirthMap::iterator it = birth_map_.find(location); |
395 Births* child; | 482 Births* child; |
396 if (it != birth_map_.end()) { | 483 if (it != birth_map_.end()) { |
397 child = it->second; | 484 child = it->second; |
398 child->RecordBirth(); | 485 child->RecordBirth(); |
399 } else { | 486 } else { |
400 child = new Births(location, *this); // Leak this. | 487 child = new Births(location, *this); // Leak this. |
(...skipping 11 matching lines...) Expand all Loading... | |
412 // Lock since the map may get relocated now, and other threads sometimes | 499 // Lock since the map may get relocated now, and other threads sometimes |
413 // snapshot it (but they lock before copying it). | 500 // snapshot it (but they lock before copying it). |
414 base::AutoLock lock(map_lock_); | 501 base::AutoLock lock(map_lock_); |
415 parent_child_set_.insert(pair); | 502 parent_child_set_.insert(pair); |
416 } | 503 } |
417 } | 504 } |
418 | 505 |
419 return child; | 506 return child; |
420 } | 507 } |
421 | 508 |
422 void ThreadData::TallyADeath(const Births& birth, | 509 void ThreadData::TallyADeath(const Births& births, |
423 int32 queue_duration, | 510 int32 queue_duration, |
424 const TaskStopwatch& stopwatch) { | 511 const TaskStopwatch& stopwatch) { |
425 int32 run_duration = stopwatch.RunDurationMs(); | 512 int32 run_duration = stopwatch.RunDurationMs(); |
426 | 513 |
427 // Stir in some randomness, plus add constant in case durations are zero. | 514 // Stir in some randomness, plus add constant in case durations are zero. |
428 const uint32 kSomePrimeNumber = 2147483647; | 515 const uint32 kSomePrimeNumber = 2147483647; |
429 random_number_ += queue_duration + run_duration + kSomePrimeNumber; | 516 random_number_ += queue_duration + run_duration + kSomePrimeNumber; |
430 // An address is going to have some randomness to it as well ;-). | 517 // An address is going to have some randomness to it as well ;-). |
431 random_number_ ^= static_cast<uint32>(&birth - reinterpret_cast<Births*>(0)); | 518 random_number_ ^= static_cast<uint32>(&births - reinterpret_cast<Births*>(0)); |
432 | 519 |
433 // We don't have queue durations without OS timer. OS timer is automatically | 520 // We don't have queue durations without OS timer. OS timer is automatically |
434 // used for task-post-timing, so the use of an alternate timer implies all | 521 // used for task-post-timing, so the use of an alternate timer implies all |
435 // queue times are invalid, unless it was explicitly said that we can trust | 522 // queue times are invalid, unless it was explicitly said that we can trust |
436 // the alternate timer. | 523 // the alternate timer. |
437 if (kAllowAlternateTimeSourceHandling && | 524 if (kAllowAlternateTimeSourceHandling && |
438 now_function_ && | 525 now_function_ && |
439 !now_function_is_time_) { | 526 !now_function_is_time_) { |
440 queue_duration = 0; | 527 queue_duration = 0; |
441 } | 528 } |
442 | 529 |
443 DeathMap::iterator it = death_map_.find(&birth); | 530 DeathMap::iterator it = death_map_.find(&births); |
444 DeathData* death_data; | 531 DeathData* death_data; |
445 if (it != death_map_.end()) { | 532 if (it != death_map_.end()) { |
446 death_data = &it->second; | 533 death_data = &it->second; |
447 } else { | 534 } else { |
448 base::AutoLock lock(map_lock_); // Lock as the map may get relocated now. | 535 base::AutoLock lock(map_lock_); // Lock as the map may get relocated now. |
449 death_data = &death_map_[&birth]; | 536 death_data = &death_map_[&births]; |
450 } // Release lock ASAP. | 537 } // Release lock ASAP. |
451 death_data->RecordDeath(queue_duration, run_duration, random_number_); | 538 death_data->RecordDeath(queue_duration, run_duration, random_number_); |
452 | 539 |
453 if (!kTrackParentChildLinks) | 540 if (!kTrackParentChildLinks) |
454 return; | 541 return; |
455 if (!parent_stack_.empty()) { // We might get turned off. | 542 if (!parent_stack_.empty()) { // We might get turned off. |
456 DCHECK_EQ(parent_stack_.top(), &birth); | 543 DCHECK_EQ(parent_stack_.top(), &births); |
457 parent_stack_.pop(); | 544 parent_stack_.pop(); |
458 } | 545 } |
459 } | 546 } |
460 | 547 |
461 // static | 548 // static |
462 Births* ThreadData::TallyABirthIfActive(const Location& location) { | 549 Births* ThreadData::TallyABirthIfActive(const Location& location) { |
463 if (!TrackingStatus()) | 550 if (!TrackingStatus()) |
464 return NULL; | 551 return NULL; |
465 ThreadData* current_thread_data = Get(); | 552 ThreadData* current_thread_data = Get(); |
466 if (!current_thread_data) | 553 if (!current_thread_data) |
467 return NULL; | 554 return NULL; |
468 return current_thread_data->TallyABirth(location); | 555 return current_thread_data->TallyABirth(location); |
469 } | 556 } |
470 | 557 |
471 // static | 558 // static |
472 void ThreadData::TallyRunOnNamedThreadIfTracking( | 559 void ThreadData::TallyRunOnNamedThreadIfTracking( |
473 const base::TrackingInfo& completed_task, | 560 const base::TrackingInfo& completed_task, |
474 const TaskStopwatch& stopwatch) { | 561 const TaskStopwatch& stopwatch) { |
475 // Even if we have been DEACTIVATED, we will process any pending births so | 562 // Even if we have been DEACTIVATED, we will process any pending births so |
476 // that our data structures (which counted the outstanding births) remain | 563 // that our data structures (which counted the outstanding births) remain |
477 // consistent. | 564 // consistent. |
478 const Births* birth = completed_task.birth_tally; | 565 const Births* births = completed_task.birth_tally; |
479 if (!birth) | 566 if (!births) |
480 return; | 567 return; |
481 ThreadData* current_thread_data = stopwatch.GetThreadData(); | 568 ThreadData* current_thread_data = stopwatch.GetThreadData(); |
482 if (!current_thread_data) | 569 if (!current_thread_data) |
483 return; | 570 return; |
484 | 571 |
485 // Watch out for a race where status_ is changing, and hence one or both | 572 // Watch out for a race where status_ is changing, and hence one or both |
486 // of start_of_run or end_of_run is zero. In that case, we didn't bother to | 573 // of start_of_run or end_of_run is zero. In that case, we didn't bother to |
487 // get a time value since we "weren't tracking" and we were trying to be | 574 // get a time value since we "weren't tracking" and we were trying to be |
488 // efficient by not calling for a genuine time value. For simplicity, we'll | 575 // efficient by not calling for a genuine time value. For simplicity, we'll |
489 // use a default zero duration when we can't calculate a true value. | 576 // use a default zero duration when we can't calculate a true value. |
490 TrackedTime start_of_run = stopwatch.StartTime(); | 577 TrackedTime start_of_run = stopwatch.StartTime(); |
491 int32 queue_duration = 0; | 578 int32 queue_duration = 0; |
492 if (!start_of_run.is_null()) { | 579 if (!start_of_run.is_null()) { |
493 queue_duration = (start_of_run - completed_task.EffectiveTimePosted()) | 580 queue_duration = (start_of_run - completed_task.EffectiveTimePosted()) |
494 .InMilliseconds(); | 581 .InMilliseconds(); |
495 } | 582 } |
496 current_thread_data->TallyADeath(*birth, queue_duration, stopwatch); | 583 current_thread_data->TallyADeath(*births, queue_duration, stopwatch); |
497 } | 584 } |
498 | 585 |
499 // static | 586 // static |
500 void ThreadData::TallyRunOnWorkerThreadIfTracking( | 587 void ThreadData::TallyRunOnWorkerThreadIfTracking( |
501 const Births* birth, | 588 const Births* births, |
502 const TrackedTime& time_posted, | 589 const TrackedTime& time_posted, |
503 const TaskStopwatch& stopwatch) { | 590 const TaskStopwatch& stopwatch) { |
504 // Even if we have been DEACTIVATED, we will process any pending births so | 591 // Even if we have been DEACTIVATED, we will process any pending births so |
505 // that our data structures (which counted the outstanding births) remain | 592 // that our data structures (which counted the outstanding births) remain |
506 // consistent. | 593 // consistent. |
507 if (!birth) | 594 if (!births) |
508 return; | 595 return; |
509 | 596 |
510 // TODO(jar): Support the option to coalesce all worker-thread activity under | 597 // TODO(jar): Support the option to coalesce all worker-thread activity under |
511 // one ThreadData instance that uses locks to protect *all* access. This will | 598 // one ThreadData instance that uses locks to protect *all* access. This will |
512 // reduce memory (making it provably bounded), but run incrementally slower | 599 // reduce memory (making it provably bounded), but run incrementally slower |
513 // (since we'll use locks on TallyABirth and TallyADeath). The good news is | 600 // (since we'll use locks on TallyABirth and TallyADeath). The good news is |
514 // that the locks on TallyADeath will be *after* the worker thread has run, | 601 // that the locks on TallyADeath will be *after* the worker thread has run, |
515 // and hence nothing will be waiting for the completion (... besides some | 602 // and hence nothing will be waiting for the completion (... besides some |
516 // other thread that might like to run). Also, the worker threads tasks are | 603 // other thread that might like to run). Also, the worker threads tasks are |
517 // generally longer, and hence the cost of the lock may perchance be amortized | 604 // generally longer, and hence the cost of the lock may perchance be amortized |
518 // over the long task's lifetime. | 605 // over the long task's lifetime. |
519 ThreadData* current_thread_data = stopwatch.GetThreadData(); | 606 ThreadData* current_thread_data = stopwatch.GetThreadData(); |
520 if (!current_thread_data) | 607 if (!current_thread_data) |
521 return; | 608 return; |
522 | 609 |
523 TrackedTime start_of_run = stopwatch.StartTime(); | 610 TrackedTime start_of_run = stopwatch.StartTime(); |
524 int32 queue_duration = 0; | 611 int32 queue_duration = 0; |
525 if (!start_of_run.is_null()) { | 612 if (!start_of_run.is_null()) { |
526 queue_duration = (start_of_run - time_posted).InMilliseconds(); | 613 queue_duration = (start_of_run - time_posted).InMilliseconds(); |
527 } | 614 } |
528 current_thread_data->TallyADeath(*birth, queue_duration, stopwatch); | 615 current_thread_data->TallyADeath(*births, queue_duration, stopwatch); |
529 } | 616 } |
530 | 617 |
531 // static | 618 // static |
532 void ThreadData::TallyRunInAScopedRegionIfTracking( | 619 void ThreadData::TallyRunInAScopedRegionIfTracking( |
533 const Births* birth, | 620 const Births* births, |
534 const TaskStopwatch& stopwatch) { | 621 const TaskStopwatch& stopwatch) { |
535 // Even if we have been DEACTIVATED, we will process any pending births so | 622 // Even if we have been DEACTIVATED, we will process any pending births so |
536 // that our data structures (which counted the outstanding births) remain | 623 // that our data structures (which counted the outstanding births) remain |
537 // consistent. | 624 // consistent. |
538 if (!birth) | 625 if (!births) |
539 return; | 626 return; |
540 | 627 |
541 ThreadData* current_thread_data = stopwatch.GetThreadData(); | 628 ThreadData* current_thread_data = stopwatch.GetThreadData(); |
542 if (!current_thread_data) | 629 if (!current_thread_data) |
543 return; | 630 return; |
544 | 631 |
545 int32 queue_duration = 0; | 632 int32 queue_duration = 0; |
546 current_thread_data->TallyADeath(*birth, queue_duration, stopwatch); | 633 current_thread_data->TallyADeath(*births, queue_duration, stopwatch); |
547 } | |
548 | |
549 // static | |
550 void ThreadData::SnapshotAllExecutedTasks( | |
551 ProcessDataPhaseSnapshot* process_data_phase, | |
552 BirthCountMap* birth_counts) { | |
553 // Get an unchanging copy of a ThreadData list. | |
554 ThreadData* my_list = ThreadData::first(); | |
555 | |
556 // Gather data serially. | |
557 // This hackish approach *can* get some slighly corrupt tallies, as we are | |
558 // grabbing values without the protection of a lock, but it has the advantage | |
559 // of working even with threads that don't have message loops. If a user | |
560 // sees any strangeness, they can always just run their stats gathering a | |
561 // second time. | |
562 for (ThreadData* thread_data = my_list; | |
563 thread_data; | |
564 thread_data = thread_data->next()) { | |
565 thread_data->SnapshotExecutedTasks(process_data_phase, birth_counts); | |
566 } | |
567 } | |
568 | |
569 // static | |
570 void ThreadData::SnapshotCurrentPhase( | |
571 ProcessDataPhaseSnapshot* process_data_phase) { | |
572 // Add births that have run to completion to |collected_data|. | |
573 // |birth_counts| tracks the total number of births recorded at each location | |
574 // for which we have not seen a death count. | |
575 BirthCountMap birth_counts; | |
576 ThreadData::SnapshotAllExecutedTasks(process_data_phase, &birth_counts); | |
577 | |
578 // Add births that are still active -- i.e. objects that have tallied a birth, | |
579 // but have not yet tallied a matching death, and hence must be either | |
580 // running, queued up, or being held in limbo for future posting. | |
581 for (const auto& birth_count : birth_counts) { | |
582 if (birth_count.second > 0) { | |
583 process_data_phase->tasks.push_back(TaskSnapshot( | |
584 *birth_count.first, DeathData(birth_count.second), "Still_Alive")); | |
585 } | |
586 } | |
587 } | 634 } |
588 | 635 |
589 void ThreadData::SnapshotExecutedTasks( | 636 void ThreadData::SnapshotExecutedTasks( |
590 ProcessDataPhaseSnapshot* process_data_phase, | 637 int current_profiling_phase, |
638 PhasedProcessDataSnapshotMap* phased_process_data_snapshots, | |
591 BirthCountMap* birth_counts) { | 639 BirthCountMap* birth_counts) { |
592 // Get copy of data, so that the data will not change during the iterations | 640 // Get copy of data, so that the data will not change during the iterations |
593 // and processing. | 641 // and processing. |
594 ThreadData::BirthMap birth_map; | 642 BirthMap birth_map; |
595 ThreadData::DeathMap death_map; | 643 DeathsSnapshot deaths; |
596 ThreadData::ParentChildSet parent_child_set; | 644 ParentChildSet parent_child_set; |
597 SnapshotMaps(&birth_map, &death_map, &parent_child_set); | 645 SnapshotMaps(current_profiling_phase, &birth_map, &deaths, &parent_child_set); |
598 | |
599 for (const auto& death : death_map) { | |
600 process_data_phase->tasks.push_back( | |
601 TaskSnapshot(*death.first, death.second, thread_name())); | |
602 (*birth_counts)[death.first] -= death.first->birth_count(); | |
603 } | |
604 | 646 |
605 for (const auto& birth : birth_map) { | 647 for (const auto& birth : birth_map) { |
606 (*birth_counts)[birth.second] += birth.second->birth_count(); | 648 (*birth_counts)[birth.second] += birth.second->birth_count(); |
607 } | 649 } |
608 | 650 |
609 if (!kTrackParentChildLinks) | 651 for (const auto& death : deaths) { |
Alexei Svitkine (slow)
2015/04/09 15:39:06
Add a short comment above this outlining what this
vadimt
2015/04/09 21:28:39
Done.
| |
610 return; | 652 (*birth_counts)[death.first] -= death.first->birth_count(); |
611 | 653 |
612 for (const auto& parent_child : parent_child_set) { | 654 for (const DeathDataPhaseSnapshot* phase = &death.second; phase; |
Alexei Svitkine (slow)
2015/04/09 15:39:05
Is it intentional that this feature is being remov
vadimt
2015/04/09 21:28:39
Not quite; feel through cracks :)
I'd remove it th
| |
613 process_data_phase->descendants.push_back( | 655 phase = phase->prev) { |
614 ParentChildPairSnapshot(parent_child)); | 656 DeathDataSnapshot death_data = phase->death_data; |
Alexei Svitkine (slow)
2015/04/09 15:39:06
I'm guessing it's important that you do this on th
vadimt
2015/04/09 21:28:39
Done.
| |
657 | |
658 if (phase->prev) | |
659 death_data.CalculateDelta(phase->prev->death_data); | |
660 | |
661 if (death_data.count > 0) { | |
662 (*phased_process_data_snapshots)[phase->profiling_phase] | |
663 .tasks.push_back(TaskSnapshot(BirthOnThreadSnapshot(*death.first), | |
664 death_data, thread_name())); | |
665 } | |
666 } | |
615 } | 667 } |
616 } | 668 } |
617 | 669 |
618 // This may be called from another thread. | 670 // This may be called from another thread. |
619 void ThreadData::SnapshotMaps(BirthMap* birth_map, | 671 void ThreadData::SnapshotMaps(int profiling_phase, |
620 DeathMap* death_map, | 672 BirthMap* birth_map, |
673 DeathsSnapshot* deaths, | |
621 ParentChildSet* parent_child_set) { | 674 ParentChildSet* parent_child_set) { |
622 base::AutoLock lock(map_lock_); | 675 base::AutoLock lock(map_lock_); |
676 | |
623 for (const auto& birth : birth_map_) | 677 for (const auto& birth : birth_map_) |
624 (*birth_map)[birth.first] = birth.second; | 678 (*birth_map)[birth.first] = birth.second; |
625 for (const auto& death : death_map_) | 679 |
626 (*death_map)[death.first] = death.second; | 680 for (const auto& death : death_map_) { |
681 deaths->push_back(DeathsSnapshot::value_type( | |
682 death.first, | |
683 DeathDataPhaseSnapshot(profiling_phase, death.second.count(), | |
Alexei Svitkine (slow)
2015/04/09 15:39:06
To make this cleaner, how about adding a Snapshot(
vadimt
2015/04/09 21:28:39
I'm not super-thrilled with the additional amount
Alexei Svitkine (slow)
2015/04/09 22:19:44
I believe modern compilers will actually optimize
vadimt
2015/04/09 22:42:20
Given these not-quite-pleasant choices, the curren
Alexei Svitkine (slow)
2015/04/10 15:27:26
All right, though probably worth recording the rat
vadimt
2015/04/14 15:52:05
Done. Also reordered declarations to avoid forward
| |
684 death.second.run_duration_sum(), | |
685 death.second.run_duration_max(), | |
686 death.second.run_duration_sample(), | |
687 death.second.queue_duration_sum(), | |
688 death.second.queue_duration_max(), | |
689 death.second.queue_duration_sample(), | |
690 death.second.last_phase_snapshot()))); | |
691 } | |
627 | 692 |
628 if (!kTrackParentChildLinks) | 693 if (!kTrackParentChildLinks) |
629 return; | 694 return; |
630 | 695 |
631 for (const auto& parent_child : parent_child_set_) | 696 for (const auto& parent_child : parent_child_set_) |
632 parent_child_set->insert(parent_child); | 697 parent_child_set->insert(parent_child); |
633 } | 698 } |
634 | 699 |
700 void ThreadData::OnProfilingPhaseCompletionOnThread(int profiling_phase) { | |
701 base::AutoLock lock(map_lock_); | |
702 | |
703 for (auto& death : death_map_) { | |
704 death.second.OnProfilingPhaseCompleted(profiling_phase); | |
705 } | |
706 } | |
707 | |
635 static void OptionallyInitializeAlternateTimer() { | 708 static void OptionallyInitializeAlternateTimer() { |
636 NowFunction* alternate_time_source = GetAlternateTimeSource(); | 709 NowFunction* alternate_time_source = GetAlternateTimeSource(); |
637 if (alternate_time_source) | 710 if (alternate_time_source) |
638 ThreadData::SetAlternateTimeSource(alternate_time_source); | 711 ThreadData::SetAlternateTimeSource(alternate_time_source); |
639 } | 712 } |
640 | 713 |
641 bool ThreadData::Initialize() { | 714 bool ThreadData::Initialize() { |
642 if (status_ >= DEACTIVATED) | 715 if (status_ >= DEACTIVATED) |
643 return true; // Someone else did the initialization. | 716 return true; // Someone else did the initialization. |
644 // Due to racy lazy initialization in tests, we'll need to recheck status_ | 717 // Due to racy lazy initialization in tests, we'll need to recheck status_ |
(...skipping 257 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
902 | 975 |
903 ThreadData* TaskStopwatch::GetThreadData() const { | 976 ThreadData* TaskStopwatch::GetThreadData() const { |
904 #if DCHECK_IS_ON() | 977 #if DCHECK_IS_ON() |
905 DCHECK(state_ != CREATED); | 978 DCHECK(state_ != CREATED); |
906 #endif | 979 #endif |
907 | 980 |
908 return current_thread_data_; | 981 return current_thread_data_; |
909 } | 982 } |
910 | 983 |
911 //------------------------------------------------------------------------------ | 984 //------------------------------------------------------------------------------ |
985 // DeathDataPhaseSnapshot | |
986 | |
987 DeathDataPhaseSnapshot::DeathDataPhaseSnapshot(int profiling_phase, | |
988 int count, | |
989 int32 run_duration_sum, | |
990 int32 run_duration_max, | |
991 int32 run_duration_sample, | |
992 int32 queue_duration_sum, | |
993 int32 queue_duration_max, | |
994 int32 queue_duration_sample, | |
995 DeathDataPhaseSnapshot* prev) | |
996 : profiling_phase(profiling_phase), | |
997 death_data(count, | |
998 run_duration_sum, | |
999 run_duration_max, | |
1000 run_duration_sample, | |
1001 queue_duration_sum, | |
1002 queue_duration_max, | |
1003 queue_duration_sample), | |
1004 prev(prev) { | |
1005 } | |
1006 | |
1007 //------------------------------------------------------------------------------ | |
1008 // TaskSnapshot | |
1009 | |
912 TaskSnapshot::TaskSnapshot() { | 1010 TaskSnapshot::TaskSnapshot() { |
913 } | 1011 } |
914 | 1012 |
915 TaskSnapshot::TaskSnapshot(const BirthOnThread& birth, | 1013 TaskSnapshot::TaskSnapshot(const BirthOnThreadSnapshot& birth, |
916 const DeathData& death_data, | 1014 const DeathDataSnapshot& death_data, |
917 const std::string& death_thread_name) | 1015 const std::string& death_thread_name) |
918 : birth(birth), | 1016 : birth(birth), |
919 death_data(death_data), | 1017 death_data(death_data), |
920 death_thread_name(death_thread_name) { | 1018 death_thread_name(death_thread_name) { |
921 } | 1019 } |
922 | 1020 |
923 TaskSnapshot::~TaskSnapshot() { | 1021 TaskSnapshot::~TaskSnapshot() { |
924 } | 1022 } |
925 | 1023 |
926 //------------------------------------------------------------------------------ | 1024 //------------------------------------------------------------------------------ |
(...skipping 28 matching lines...) Expand all Loading... | |
955 : process_id(base::GetCurrentProcId()) { | 1053 : process_id(base::GetCurrentProcId()) { |
956 #else | 1054 #else |
957 : process_id(base::kNullProcessId) { | 1055 : process_id(base::kNullProcessId) { |
958 #endif | 1056 #endif |
959 } | 1057 } |
960 | 1058 |
961 ProcessDataSnapshot::~ProcessDataSnapshot() { | 1059 ProcessDataSnapshot::~ProcessDataSnapshot() { |
962 } | 1060 } |
963 | 1061 |
964 } // namespace tracked_objects | 1062 } // namespace tracked_objects |
OLD | NEW |