OLD | NEW |
---|---|
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/tracked_objects.h" | 5 #include "base/tracked_objects.h" |
6 | 6 |
7 #include <limits.h> | 7 #include <limits.h> |
8 #include <stdlib.h> | 8 #include <stdlib.h> |
9 | 9 |
10 #include "base/atomicops.h" | 10 #include "base/atomicops.h" |
(...skipping 19 matching lines...) Expand all Loading... | |
30 namespace { | 30 namespace { |
31 // TODO(jar): Evaluate the perf impact of enabling this. If the perf impact is | 31 // TODO(jar): Evaluate the perf impact of enabling this. If the perf impact is |
32 // negligible, enable by default. | 32 // negligible, enable by default. |
33 // Flag to compile out parent-child link recording. | 33 // Flag to compile out parent-child link recording. |
34 const bool kTrackParentChildLinks = false; | 34 const bool kTrackParentChildLinks = false; |
35 | 35 |
36 // When ThreadData is first initialized, should we start in an ACTIVE state to | 36 // When ThreadData is first initialized, should we start in an ACTIVE state to |
37 // record all of the startup-time tasks, or should we start up DEACTIVATED, so | 37 // record all of the startup-time tasks, or should we start up DEACTIVATED, so |
38 // that we only record after parsing the command line flag --enable-tracking. | 38 // that we only record after parsing the command line flag --enable-tracking. |
39 // Note that the flag may force either state, so this really controls only the | 39 // Note that the flag may force either state, so this really controls only the |
40 // period of time up until that flag is parsed. If there is no flag seen, then | 40 // period of time up until that flag is parsed. If there is no flag seen, then |
41 // this state may prevail for much or all of the process lifetime. | 41 // this state may prevail for much or all of the process lifetime. |
42 const ThreadData::Status kInitialStartupState = | 42 const ThreadData::Status kInitialStartupState = |
43 ThreadData::PROFILING_CHILDREN_ACTIVE; | 43 ThreadData::PROFILING_CHILDREN_ACTIVE; |
44 | 44 |
45 // Control whether an alternate time source (Now() function) is supported by | 45 // Control whether an alternate time source (Now() function) is supported by |
46 // the ThreadData class. This compile time flag should be set to true if we | 46 // the ThreadData class. This compile time flag should be set to true if we |
47 // want other modules (such as a memory allocator, or a thread-specific CPU time | 47 // want other modules (such as a memory allocator, or a thread-specific CPU time |
48 // clock) to be able to provide a thread-specific Now() function. Without this | 48 // clock) to be able to provide a thread-specific Now() function. Without this |
49 // compile-time flag, the code will only support the wall-clock time. This flag | 49 // compile-time flag, the code will only support the wall-clock time. This flag |
50 // can be flipped to efficiently disable this path (if there is a performance | 50 // can be flipped to efficiently disable this path (if there is a performance |
51 // problem with its presence). | 51 // problem with its presence). |
52 static const bool kAllowAlternateTimeSourceHandling = true; | 52 static const bool kAllowAlternateTimeSourceHandling = true; |
53 | 53 |
54 // Possible states of the profiler timing enabledness. | 54 // Possible states of the profiler timing enabledness. |
55 enum { | 55 enum { |
56 UNDEFINED_TIMING, | 56 UNDEFINED_TIMING, |
57 ENABLED_TIMING, | 57 ENABLED_TIMING, |
58 DISABLED_TIMING, | 58 DISABLED_TIMING, |
59 }; | 59 }; |
60 | 60 |
61 // State of the profiler timing enabledness. | 61 // State of the profiler timing enabledness. |
62 base::subtle::Atomic32 g_profiler_timing_enabled = UNDEFINED_TIMING; | 62 base::subtle::Atomic32 g_profiler_timing_enabled = UNDEFINED_TIMING; |
63 | 63 |
64 // Returns whether profiler timing is enabled. The default is true, but this may | 64 // Returns whether profiler timing is enabled. The default is true, but this |
65 // be overridden by a command-line flag. Some platforms may programmatically set | 65 // may be overridden by a command-line flag. Some platforms may |
66 // this command-line flag to the "off" value if it's not specified. | 66 // programmatically set this command-line flag to the "off" value if it's not |
67 // specified. | |
67 // This in turn can be overridden by explicitly calling | 68 // This in turn can be overridden by explicitly calling |
68 // ThreadData::EnableProfilerTiming, say, based on a field trial. | 69 // ThreadData::EnableProfilerTiming, say, based on a field trial. |
69 inline bool IsProfilerTimingEnabled() { | 70 inline bool IsProfilerTimingEnabled() { |
70 // Reading |g_profiler_timing_enabled| is done without barrier because | 71 // Reading |g_profiler_timing_enabled| is done without barrier because |
71 // multiple initialization is not an issue while the barrier can be relatively | 72 // multiple initialization is not an issue while the barrier can be relatively |
72 // costly given that this method is sometimes called in a tight loop. | 73 // costly given that this method is sometimes called in a tight loop. |
73 base::subtle::Atomic32 current_timing_enabled = | 74 base::subtle::Atomic32 current_timing_enabled = |
74 base::subtle::NoBarrier_Load(&g_profiler_timing_enabled); | 75 base::subtle::NoBarrier_Load(&g_profiler_timing_enabled); |
75 if (current_timing_enabled == UNDEFINED_TIMING) { | 76 if (current_timing_enabled == UNDEFINED_TIMING) { |
76 if (!base::CommandLine::InitializedForCurrentProcess()) | 77 if (!base::CommandLine::InitializedForCurrentProcess()) |
77 return true; | 78 return true; |
78 current_timing_enabled = | 79 current_timing_enabled = |
79 (base::CommandLine::ForCurrentProcess()->GetSwitchValueASCII( | 80 (base::CommandLine::ForCurrentProcess()->GetSwitchValueASCII( |
80 switches::kProfilerTiming) == | 81 switches::kProfilerTiming) == |
81 switches::kProfilerTimingDisabledValue) | 82 switches::kProfilerTimingDisabledValue) |
82 ? DISABLED_TIMING | 83 ? DISABLED_TIMING |
83 : ENABLED_TIMING; | 84 : ENABLED_TIMING; |
84 base::subtle::NoBarrier_Store(&g_profiler_timing_enabled, | 85 base::subtle::NoBarrier_Store(&g_profiler_timing_enabled, |
85 current_timing_enabled); | 86 current_timing_enabled); |
86 } | 87 } |
87 return current_timing_enabled == ENABLED_TIMING; | 88 return current_timing_enabled == ENABLED_TIMING; |
88 } | 89 } |
89 | 90 |
90 } // namespace | 91 } // namespace |
91 | 92 |
92 //------------------------------------------------------------------------------ | 93 //------------------------------------------------------------------------------ |
93 // DeathData tallies durations when a death takes place. | 94 // DeathData tallies durations when a death takes place. |
94 | 95 |
95 DeathData::DeathData() { | 96 DeathData::DeathData() |
96 Clear(); | 97 : count_(0), |
98 sample_probability_count_(0), | |
99 run_duration_sum_(0), | |
100 queue_duration_sum_(0), | |
101 run_duration_max_(0), | |
102 queue_duration_max_(0), | |
103 run_duration_sample_(0), | |
104 queue_duration_sample_(0), | |
105 last_phase_snapshot_(nullptr) { | |
97 } | 106 } |
98 | 107 |
99 DeathData::DeathData(int count) { | 108 DeathData::DeathData(const DeathData& other) |
100 Clear(); | 109 : count_(other.count_), |
101 count_ = count; | 110 sample_probability_count_(other.sample_probability_count_), |
111 run_duration_sum_(other.run_duration_sum_), | |
112 queue_duration_sum_(other.queue_duration_sum_), | |
113 run_duration_max_(other.run_duration_max_), | |
114 queue_duration_max_(other.queue_duration_max_), | |
115 run_duration_sample_(other.run_duration_sample_), | |
116 queue_duration_sample_(other.queue_duration_sample_), | |
117 last_phase_snapshot_(nullptr) { | |
118 // This constructor will be used by std::map when adding new DeathData values | |
119 // to the map. At that point, last_phase_snapshot_ is still NULL, so we don't | |
120 // need to worry about ownership transfer. | |
121 DCHECK(other.last_phase_snapshot_ == nullptr); | |
122 } | |
123 | |
124 DeathData::~DeathData() { | |
125 while (last_phase_snapshot_) { | |
126 const DeathDataPhaseSnapshot* snapshot = last_phase_snapshot_; | |
127 last_phase_snapshot_ = snapshot->prev; | |
128 delete snapshot; | |
129 } | |
102 } | 130 } |
103 | 131 |
104 // TODO(jar): I need to see if this macro to optimize branching is worth using. | 132 // TODO(jar): I need to see if this macro to optimize branching is worth using. |
105 // | 133 // |
106 // This macro has no branching, so it is surely fast, and is equivalent to: | 134 // This macro has no branching, so it is surely fast, and is equivalent to: |
107 // if (assign_it) | 135 // if (assign_it) |
108 // target = source; | 136 // target = source; |
109 // We use a macro rather than a template to force this to inline. | 137 // We use a macro rather than a template to force this to inline. |
110 // Related code for calculating max is discussed on the web. | 138 // Related code for calculating max is discussed on the web. |
111 #define CONDITIONAL_ASSIGN(assign_it, target, source) \ | 139 #define CONDITIONAL_ASSIGN(assign_it, target, source) \ |
112 ((target) ^= ((target) ^ (source)) & -static_cast<int32>(assign_it)) | 140 ((target) ^= ((target) ^ (source)) & -static_cast<int32>(assign_it)) |
113 | 141 |
114 void DeathData::RecordDeath(const int32 queue_duration, | 142 void DeathData::RecordDeath(const int32 queue_duration, |
115 const int32 run_duration, | 143 const int32 run_duration, |
116 const uint32 random_number) { | 144 const uint32 random_number) { |
117 // We'll just clamp at INT_MAX, but we should note this in the UI as such. | 145 // We'll just clamp at INT_MAX, but we should note this in the UI as such. |
118 if (count_ < INT_MAX) | 146 if (count_ < INT_MAX) |
119 ++count_; | 147 ++count_; |
148 | |
149 int sample_probability_count = sample_probability_count_; | |
Dmitry Vyukov
2015/04/28 04:14:15
This is still racy and can cause all the same effe
vadimt
2015/04/28 15:15:37
Please note that we use local variable sample_prob
Dmitry Vyukov
2015/04/28 15:24:26
This code contains a data race, sample_probability
| |
150 if (sample_probability_count < INT_MAX) | |
151 ++sample_probability_count; | |
152 sample_probability_count_ = sample_probability_count; | |
153 | |
120 queue_duration_sum_ += queue_duration; | 154 queue_duration_sum_ += queue_duration; |
121 run_duration_sum_ += run_duration; | 155 run_duration_sum_ += run_duration; |
122 | 156 |
123 if (queue_duration_max_ < queue_duration) | 157 if (queue_duration_max_ < queue_duration) |
124 queue_duration_max_ = queue_duration; | 158 queue_duration_max_ = queue_duration; |
125 if (run_duration_max_ < run_duration) | 159 if (run_duration_max_ < run_duration) |
126 run_duration_max_ = run_duration; | 160 run_duration_max_ = run_duration; |
127 | 161 |
128 // Take a uniformly distributed sample over all durations ever supplied. | 162 // Take a uniformly distributed sample over all durations ever supplied during |
129 // The probability that we (instead) use this new sample is 1/count_. This | 163 // the current profiling phase. |
130 // results in a completely uniform selection of the sample (at least when we | 164 // The probability that we (instead) use this new sample is |
131 // don't clamp count_... but that should be inconsequentially likely). | 165 // 1/sample_probability_count_. This results in a completely uniform selection |
132 // We ignore the fact that we correlated our selection of a sample to the run | 166 // of the sample (at least when we don't clamp sample_probability_count_... |
133 // and queue times (i.e., we used them to generate random_number). | 167 // but that should be inconsequentially likely). We ignore the fact that we |
134 CHECK_GT(count_, 0); | 168 // correlated our selection of a sample to the run and queue times (i.e., we |
135 if (0 == (random_number % count_)) { | 169 // used them to generate random_number). |
170 CHECK_GT(sample_probability_count, 0); | |
171 if (0 == (random_number % sample_probability_count)) { | |
136 queue_duration_sample_ = queue_duration; | 172 queue_duration_sample_ = queue_duration; |
137 run_duration_sample_ = run_duration; | 173 run_duration_sample_ = run_duration; |
138 } | 174 } |
139 } | 175 } |
140 | 176 |
141 int DeathData::count() const { return count_; } | 177 int DeathData::count() const { return count_; } |
142 | 178 |
143 int32 DeathData::run_duration_sum() const { return run_duration_sum_; } | 179 int32 DeathData::run_duration_sum() const { return run_duration_sum_; } |
144 | 180 |
145 int32 DeathData::run_duration_max() const { return run_duration_max_; } | 181 int32 DeathData::run_duration_max() const { return run_duration_max_; } |
146 | 182 |
147 int32 DeathData::run_duration_sample() const { | 183 int32 DeathData::run_duration_sample() const { |
148 return run_duration_sample_; | 184 return run_duration_sample_; |
149 } | 185 } |
150 | 186 |
151 int32 DeathData::queue_duration_sum() const { | 187 int32 DeathData::queue_duration_sum() const { |
152 return queue_duration_sum_; | 188 return queue_duration_sum_; |
153 } | 189 } |
154 | 190 |
155 int32 DeathData::queue_duration_max() const { | 191 int32 DeathData::queue_duration_max() const { |
156 return queue_duration_max_; | 192 return queue_duration_max_; |
157 } | 193 } |
158 | 194 |
159 int32 DeathData::queue_duration_sample() const { | 195 int32 DeathData::queue_duration_sample() const { |
160 return queue_duration_sample_; | 196 return queue_duration_sample_; |
161 } | 197 } |
162 | 198 |
163 void DeathData::Clear() { | 199 const DeathDataPhaseSnapshot* DeathData::last_phase_snapshot() const { |
164 count_ = 0; | 200 return last_phase_snapshot_; |
165 run_duration_sum_ = 0; | 201 } |
202 | |
203 void DeathData::OnProfilingPhaseCompleted(int profiling_phase) { | |
204 // Snapshotting and storing current state. | |
205 last_phase_snapshot_ = new DeathDataPhaseSnapshot( | |
206 profiling_phase, count_, run_duration_sum_, run_duration_max_, | |
207 run_duration_sample_, queue_duration_sum_, queue_duration_max_, | |
208 queue_duration_sample_, last_phase_snapshot_); | |
209 | |
210 // Not touching fields for which a delta can be computed by comparing with a | |
211 // snapshot from the previous phase. Resetting other fields. Sample values | |
212 // will be reset upon next death recording because sample_probability_count_ | |
213 // is set to 0. | |
214 // We avoid resetting to 0 in favor of deltas whenever possible. The reason | |
215 // is that for incrementable fields, resetting to 0 from the snapshot thread | |
216 // potentially in parallel with incrementing in the death thread may result in | |
217 // significant data corruption that has a potential to grow with time. Not | |
218 // resetting incrementable fields and using deltas will cause any | |
219 // off-by-little corruptions to be likely fixed at the next snapshot. | |
220 // The max values are not incrementable, and cannot be deduced using deltas | |
221 // for a given phase. Hence, we have to reset them to 0. But the potential | |
222 // damage is limited to getting the previous phase's max to apply for the next | |
223 // phase, and the error doesn't have a potential to keep growing with new | |
224 // resets. | |
225 // sample_probability_count_ is incrementable, but must be reset to 0 at the | |
226 // phase end, so that we start a new uniformly randomized sample selection | |
227 // after the reset. Corruptions due to race conditions are possible, but the | |
228 // damage is limited to selecting a wrong sample, which is not something that | |
229 // can cause accumulating or cascading effects. | |
230 // If there were no corruptions caused by race conditions, we never send a | |
231 // sample for the previous phase in the next phase's snapshot because | |
232 // ThreadData::SnapshotExecutedTasks doesn't send deltas with 0 count. | |
233 sample_probability_count_ = 0; | |
Dmitry Vyukov
2015/04/28 04:14:15
This store needs to be an atomic store.
vadimt
2015/04/28 15:15:37
We should try avoiding using barriers. Performance
Dmitry Vyukov
2015/04/28 15:24:26
It is you who said about barriers. I did not.
| |
166 run_duration_max_ = 0; | 234 run_duration_max_ = 0; |
167 run_duration_sample_ = 0; | |
168 queue_duration_sum_ = 0; | |
169 queue_duration_max_ = 0; | 235 queue_duration_max_ = 0; |
170 queue_duration_sample_ = 0; | |
171 } | 236 } |
172 | 237 |
173 //------------------------------------------------------------------------------ | 238 //------------------------------------------------------------------------------ |
174 DeathDataSnapshot::DeathDataSnapshot() | 239 DeathDataSnapshot::DeathDataSnapshot() |
175 : count(-1), | 240 : count(-1), |
176 run_duration_sum(-1), | 241 run_duration_sum(-1), |
177 run_duration_max(-1), | 242 run_duration_max(-1), |
178 run_duration_sample(-1), | 243 run_duration_sample(-1), |
179 queue_duration_sum(-1), | 244 queue_duration_sum(-1), |
180 queue_duration_max(-1), | 245 queue_duration_max(-1), |
181 queue_duration_sample(-1) { | 246 queue_duration_sample(-1) { |
182 } | 247 } |
183 | 248 |
184 DeathDataSnapshot::DeathDataSnapshot( | 249 DeathDataSnapshot::DeathDataSnapshot(int count, |
185 const tracked_objects::DeathData& death_data) | 250 int32 run_duration_sum, |
186 : count(death_data.count()), | 251 int32 run_duration_max, |
187 run_duration_sum(death_data.run_duration_sum()), | 252 int32 run_duration_sample, |
188 run_duration_max(death_data.run_duration_max()), | 253 int32 queue_duration_sum, |
189 run_duration_sample(death_data.run_duration_sample()), | 254 int32 queue_duration_max, |
190 queue_duration_sum(death_data.queue_duration_sum()), | 255 int32 queue_duration_sample) |
191 queue_duration_max(death_data.queue_duration_max()), | 256 : count(count), |
192 queue_duration_sample(death_data.queue_duration_sample()) { | 257 run_duration_sum(run_duration_sum), |
258 run_duration_max(run_duration_max), | |
259 run_duration_sample(run_duration_sample), | |
260 queue_duration_sum(queue_duration_sum), | |
261 queue_duration_max(queue_duration_max), | |
262 queue_duration_sample(queue_duration_sample) { | |
193 } | 263 } |
194 | 264 |
195 DeathDataSnapshot::~DeathDataSnapshot() { | 265 DeathDataSnapshot::~DeathDataSnapshot() { |
196 } | 266 } |
197 | 267 |
268 DeathDataSnapshot DeathDataSnapshot::Delta( | |
269 const DeathDataSnapshot& older) const { | |
270 return DeathDataSnapshot(count - older.count, | |
271 run_duration_sum - older.run_duration_sum, | |
272 run_duration_max, run_duration_sample, | |
273 queue_duration_sum - older.queue_duration_sum, | |
274 queue_duration_max, queue_duration_sample); | |
275 } | |
276 | |
198 //------------------------------------------------------------------------------ | 277 //------------------------------------------------------------------------------ |
199 BirthOnThread::BirthOnThread(const Location& location, | 278 BirthOnThread::BirthOnThread(const Location& location, |
200 const ThreadData& current) | 279 const ThreadData& current) |
201 : location_(location), | 280 : location_(location), |
202 birth_thread_(¤t) { | 281 birth_thread_(¤t) { |
203 } | 282 } |
204 | 283 |
205 //------------------------------------------------------------------------------ | 284 //------------------------------------------------------------------------------ |
206 BirthOnThreadSnapshot::BirthOnThreadSnapshot() { | 285 BirthOnThreadSnapshot::BirthOnThreadSnapshot() { |
207 } | 286 } |
208 | 287 |
209 BirthOnThreadSnapshot::BirthOnThreadSnapshot( | 288 BirthOnThreadSnapshot::BirthOnThreadSnapshot(const BirthOnThread& birth) |
210 const tracked_objects::BirthOnThread& birth) | |
211 : location(birth.location()), | 289 : location(birth.location()), |
212 thread_name(birth.birth_thread()->thread_name()) { | 290 thread_name(birth.birth_thread()->thread_name()) { |
213 } | 291 } |
214 | 292 |
215 BirthOnThreadSnapshot::~BirthOnThreadSnapshot() { | 293 BirthOnThreadSnapshot::~BirthOnThreadSnapshot() { |
216 } | 294 } |
217 | 295 |
218 //------------------------------------------------------------------------------ | 296 //------------------------------------------------------------------------------ |
219 Births::Births(const Location& location, const ThreadData& current) | 297 Births::Births(const Location& location, const ThreadData& current) |
220 : BirthOnThread(location, current), | 298 : BirthOnThread(location, current), |
(...skipping 10 matching lines...) Expand all Loading... | |
231 // TODO(jar): We should pull all these static vars together, into a struct, and | 309 // TODO(jar): We should pull all these static vars together, into a struct, and |
232 // optimize layout so that we benefit from locality of reference during accesses | 310 // optimize layout so that we benefit from locality of reference during accesses |
233 // to them. | 311 // to them. |
234 | 312 |
235 // static | 313 // static |
236 NowFunction* ThreadData::now_function_ = NULL; | 314 NowFunction* ThreadData::now_function_ = NULL; |
237 | 315 |
238 // static | 316 // static |
239 bool ThreadData::now_function_is_time_ = false; | 317 bool ThreadData::now_function_is_time_ = false; |
240 | 318 |
241 // A TLS slot which points to the ThreadData instance for the current thread. We | 319 // A TLS slot which points to the ThreadData instance for the current thread. |
242 // do a fake initialization here (zeroing out data), and then the real in-place | 320 // We do a fake initialization here (zeroing out data), and then the real |
243 // construction happens when we call tls_index_.Initialize(). | 321 // in-place construction happens when we call tls_index_.Initialize(). |
244 // static | 322 // static |
245 base::ThreadLocalStorage::StaticSlot ThreadData::tls_index_ = TLS_INITIALIZER; | 323 base::ThreadLocalStorage::StaticSlot ThreadData::tls_index_ = TLS_INITIALIZER; |
246 | 324 |
247 // static | 325 // static |
248 int ThreadData::worker_thread_data_creation_count_ = 0; | 326 int ThreadData::worker_thread_data_creation_count_ = 0; |
249 | 327 |
250 // static | 328 // static |
251 int ThreadData::cleanup_count_ = 0; | 329 int ThreadData::cleanup_count_ = 0; |
252 | 330 |
253 // static | 331 // static |
(...skipping 27 matching lines...) Expand all Loading... | |
281 : next_(NULL), | 359 : next_(NULL), |
282 next_retired_worker_(NULL), | 360 next_retired_worker_(NULL), |
283 worker_thread_number_(thread_number), | 361 worker_thread_number_(thread_number), |
284 incarnation_count_for_pool_(-1), | 362 incarnation_count_for_pool_(-1), |
285 current_stopwatch_(NULL) { | 363 current_stopwatch_(NULL) { |
286 CHECK_GT(thread_number, 0); | 364 CHECK_GT(thread_number, 0); |
287 base::StringAppendF(&thread_name_, "WorkerThread-%d", thread_number); | 365 base::StringAppendF(&thread_name_, "WorkerThread-%d", thread_number); |
288 PushToHeadOfList(); // Which sets real incarnation_count_for_pool_. | 366 PushToHeadOfList(); // Which sets real incarnation_count_for_pool_. |
289 } | 367 } |
290 | 368 |
291 ThreadData::~ThreadData() {} | 369 ThreadData::~ThreadData() { |
370 } | |
292 | 371 |
293 void ThreadData::PushToHeadOfList() { | 372 void ThreadData::PushToHeadOfList() { |
294 // Toss in a hint of randomness (atop the uniniitalized value). | 373 // Toss in a hint of randomness (atop the uniniitalized value). |
295 (void)VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE(&random_number_, | 374 (void)VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE(&random_number_, |
296 sizeof(random_number_)); | 375 sizeof(random_number_)); |
297 MSAN_UNPOISON(&random_number_, sizeof(random_number_)); | 376 MSAN_UNPOISON(&random_number_, sizeof(random_number_)); |
298 random_number_ += static_cast<uint32>(this - static_cast<ThreadData*>(0)); | 377 random_number_ += static_cast<uint32>(this - static_cast<ThreadData*>(0)); |
299 random_number_ ^= (Now() - TrackedTime()).InMilliseconds(); | 378 random_number_ ^= (Now() - TrackedTime()).InMilliseconds(); |
300 | 379 |
301 DCHECK(!next_); | 380 DCHECK(!next_); |
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
354 } | 433 } |
355 DCHECK_GT(worker_thread_data->worker_thread_number_, 0); | 434 DCHECK_GT(worker_thread_data->worker_thread_number_, 0); |
356 | 435 |
357 tls_index_.Set(worker_thread_data); | 436 tls_index_.Set(worker_thread_data); |
358 return worker_thread_data; | 437 return worker_thread_data; |
359 } | 438 } |
360 | 439 |
361 // static | 440 // static |
362 void ThreadData::OnThreadTermination(void* thread_data) { | 441 void ThreadData::OnThreadTermination(void* thread_data) { |
363 DCHECK(thread_data); // TLS should *never* call us with a NULL. | 442 DCHECK(thread_data); // TLS should *never* call us with a NULL. |
364 // We must NOT do any allocations during this callback. There is a chance | 443 // We must NOT do any allocations during this callback. There is a chance |
365 // that the allocator is no longer active on this thread. | 444 // that the allocator is no longer active on this thread. |
366 reinterpret_cast<ThreadData*>(thread_data)->OnThreadTerminationCleanup(); | 445 reinterpret_cast<ThreadData*>(thread_data)->OnThreadTerminationCleanup(); |
367 } | 446 } |
368 | 447 |
369 void ThreadData::OnThreadTerminationCleanup() { | 448 void ThreadData::OnThreadTerminationCleanup() { |
370 // The list_lock_ was created when we registered the callback, so it won't be | 449 // The list_lock_ was created when we registered the callback, so it won't be |
371 // allocated here despite the lazy reference. | 450 // allocated here despite the lazy reference. |
372 base::AutoLock lock(*list_lock_.Pointer()); | 451 base::AutoLock lock(*list_lock_.Pointer()); |
373 if (incarnation_counter_ != incarnation_count_for_pool_) | 452 if (incarnation_counter_ != incarnation_count_for_pool_) |
374 return; // ThreadData was constructed in an earlier unit test. | 453 return; // ThreadData was constructed in an earlier unit test. |
375 ++cleanup_count_; | 454 ++cleanup_count_; |
376 // Only worker threads need to be retired and reused. | 455 // Only worker threads need to be retired and reused. |
377 if (!worker_thread_number_) { | 456 if (!worker_thread_number_) { |
378 return; | 457 return; |
379 } | 458 } |
380 // We must NOT do any allocations during this callback. | 459 // We must NOT do any allocations during this callback. |
381 // Using the simple linked lists avoids all allocations. | 460 // Using the simple linked lists avoids all allocations. |
382 DCHECK_EQ(this->next_retired_worker_, reinterpret_cast<ThreadData*>(NULL)); | 461 DCHECK_EQ(this->next_retired_worker_, reinterpret_cast<ThreadData*>(NULL)); |
383 this->next_retired_worker_ = first_retired_worker_; | 462 this->next_retired_worker_ = first_retired_worker_; |
384 first_retired_worker_ = this; | 463 first_retired_worker_ = this; |
385 } | 464 } |
386 | 465 |
387 // static | 466 // static |
388 void ThreadData::Snapshot(ProcessDataSnapshot* process_data_snapshot) { | 467 void ThreadData::Snapshot(int current_profiling_phase, |
389 ThreadData::SnapshotCurrentPhase( | 468 ProcessDataSnapshot* process_data_snapshot) { |
390 &process_data_snapshot->phased_process_data_snapshots[0]); | 469 // Get an unchanging copy of a ThreadData list. |
470 ThreadData* my_list = ThreadData::first(); | |
471 | |
472 // Gather data serially. | |
473 // This hackish approach *can* get some slightly corrupt tallies, as we are | |
474 // grabbing values without the protection of a lock, but it has the advantage | |
475 // of working even with threads that don't have message loops. If a user | |
476 // sees any strangeness, they can always just run their stats gathering a | |
477 // second time. | |
478 BirthCountMap birth_counts; | |
479 for (ThreadData* thread_data = my_list; thread_data; | |
480 thread_data = thread_data->next()) { | |
481 thread_data->SnapshotExecutedTasks(current_profiling_phase, | |
482 &process_data_snapshot->phased_snapshots, | |
483 &birth_counts); | |
484 } | |
485 | |
486 // Add births that are still active -- i.e. objects that have tallied a birth, | |
487 // but have not yet tallied a matching death, and hence must be either | |
488 // running, queued up, or being held in limbo for future posting. | |
489 auto* current_phase_tasks = | |
490 &process_data_snapshot->phased_snapshots[current_profiling_phase].tasks; | |
491 for (const auto& birth_count : birth_counts) { | |
492 if (birth_count.second > 0) { | |
493 current_phase_tasks->push_back( | |
494 TaskSnapshot(BirthOnThreadSnapshot(*birth_count.first), | |
495 DeathDataSnapshot(birth_count.second, 0, 0, 0, 0, 0, 0), | |
496 "Still_Alive")); | |
497 } | |
498 } | |
499 } | |
500 | |
501 // static | |
502 void ThreadData::OnProfilingPhaseCompleted(int profiling_phase) { | |
503 // Get an unchanging copy of a ThreadData list. | |
504 ThreadData* my_list = ThreadData::first(); | |
505 | |
506 // Add snapshots for all instances of death data in all threads serially. | |
507 // This hackish approach *can* get some slightly corrupt tallies, as we are | |
508 // grabbing values without the protection of a lock, but it has the advantage | |
509 // of working even with threads that don't have message loops. Any corruption | |
510 // shouldn't cause "cascading damage" to anything else (in later phases). | |
511 for (ThreadData* thread_data = my_list; thread_data; | |
512 thread_data = thread_data->next()) { | |
513 thread_data->OnProfilingPhaseCompletedOnThread(profiling_phase); | |
514 } | |
391 } | 515 } |
392 | 516 |
393 Births* ThreadData::TallyABirth(const Location& location) { | 517 Births* ThreadData::TallyABirth(const Location& location) { |
394 BirthMap::iterator it = birth_map_.find(location); | 518 BirthMap::iterator it = birth_map_.find(location); |
395 Births* child; | 519 Births* child; |
396 if (it != birth_map_.end()) { | 520 if (it != birth_map_.end()) { |
397 child = it->second; | 521 child = it->second; |
398 child->RecordBirth(); | 522 child->RecordBirth(); |
399 } else { | 523 } else { |
400 child = new Births(location, *this); // Leak this. | 524 child = new Births(location, *this); // Leak this. |
(...skipping 11 matching lines...) Expand all Loading... | |
412 // Lock since the map may get relocated now, and other threads sometimes | 536 // Lock since the map may get relocated now, and other threads sometimes |
413 // snapshot it (but they lock before copying it). | 537 // snapshot it (but they lock before copying it). |
414 base::AutoLock lock(map_lock_); | 538 base::AutoLock lock(map_lock_); |
415 parent_child_set_.insert(pair); | 539 parent_child_set_.insert(pair); |
416 } | 540 } |
417 } | 541 } |
418 | 542 |
419 return child; | 543 return child; |
420 } | 544 } |
421 | 545 |
422 void ThreadData::TallyADeath(const Births& birth, | 546 void ThreadData::TallyADeath(const Births& births, |
423 int32 queue_duration, | 547 int32 queue_duration, |
424 const TaskStopwatch& stopwatch) { | 548 const TaskStopwatch& stopwatch) { |
425 int32 run_duration = stopwatch.RunDurationMs(); | 549 int32 run_duration = stopwatch.RunDurationMs(); |
426 | 550 |
427 // Stir in some randomness, plus add constant in case durations are zero. | 551 // Stir in some randomness, plus add constant in case durations are zero. |
428 const uint32 kSomePrimeNumber = 2147483647; | 552 const uint32 kSomePrimeNumber = 2147483647; |
429 random_number_ += queue_duration + run_duration + kSomePrimeNumber; | 553 random_number_ += queue_duration + run_duration + kSomePrimeNumber; |
430 // An address is going to have some randomness to it as well ;-). | 554 // An address is going to have some randomness to it as well ;-). |
431 random_number_ ^= static_cast<uint32>(&birth - reinterpret_cast<Births*>(0)); | 555 random_number_ ^= static_cast<uint32>(&births - reinterpret_cast<Births*>(0)); |
432 | 556 |
433 // We don't have queue durations without OS timer. OS timer is automatically | 557 // We don't have queue durations without OS timer. OS timer is automatically |
434 // used for task-post-timing, so the use of an alternate timer implies all | 558 // used for task-post-timing, so the use of an alternate timer implies all |
435 // queue times are invalid, unless it was explicitly said that we can trust | 559 // queue times are invalid, unless it was explicitly said that we can trust |
436 // the alternate timer. | 560 // the alternate timer. |
437 if (kAllowAlternateTimeSourceHandling && | 561 if (kAllowAlternateTimeSourceHandling && |
438 now_function_ && | 562 now_function_ && |
439 !now_function_is_time_) { | 563 !now_function_is_time_) { |
440 queue_duration = 0; | 564 queue_duration = 0; |
441 } | 565 } |
442 | 566 |
443 DeathMap::iterator it = death_map_.find(&birth); | 567 DeathMap::iterator it = death_map_.find(&births); |
444 DeathData* death_data; | 568 DeathData* death_data; |
445 if (it != death_map_.end()) { | 569 if (it != death_map_.end()) { |
446 death_data = &it->second; | 570 death_data = &it->second; |
447 } else { | 571 } else { |
448 base::AutoLock lock(map_lock_); // Lock as the map may get relocated now. | 572 base::AutoLock lock(map_lock_); // Lock as the map may get relocated now. |
449 death_data = &death_map_[&birth]; | 573 death_data = &death_map_[&births]; |
450 } // Release lock ASAP. | 574 } // Release lock ASAP. |
451 death_data->RecordDeath(queue_duration, run_duration, random_number_); | 575 death_data->RecordDeath(queue_duration, run_duration, random_number_); |
452 | 576 |
453 if (!kTrackParentChildLinks) | 577 if (!kTrackParentChildLinks) |
454 return; | 578 return; |
455 if (!parent_stack_.empty()) { // We might get turned off. | 579 if (!parent_stack_.empty()) { // We might get turned off. |
456 DCHECK_EQ(parent_stack_.top(), &birth); | 580 DCHECK_EQ(parent_stack_.top(), &births); |
457 parent_stack_.pop(); | 581 parent_stack_.pop(); |
458 } | 582 } |
459 } | 583 } |
460 | 584 |
461 // static | 585 // static |
462 Births* ThreadData::TallyABirthIfActive(const Location& location) { | 586 Births* ThreadData::TallyABirthIfActive(const Location& location) { |
463 if (!TrackingStatus()) | 587 if (!TrackingStatus()) |
464 return NULL; | 588 return NULL; |
465 ThreadData* current_thread_data = Get(); | 589 ThreadData* current_thread_data = Get(); |
466 if (!current_thread_data) | 590 if (!current_thread_data) |
467 return NULL; | 591 return NULL; |
468 return current_thread_data->TallyABirth(location); | 592 return current_thread_data->TallyABirth(location); |
469 } | 593 } |
470 | 594 |
471 // static | 595 // static |
472 void ThreadData::TallyRunOnNamedThreadIfTracking( | 596 void ThreadData::TallyRunOnNamedThreadIfTracking( |
473 const base::TrackingInfo& completed_task, | 597 const base::TrackingInfo& completed_task, |
474 const TaskStopwatch& stopwatch) { | 598 const TaskStopwatch& stopwatch) { |
475 // Even if we have been DEACTIVATED, we will process any pending births so | 599 // Even if we have been DEACTIVATED, we will process any pending births so |
476 // that our data structures (which counted the outstanding births) remain | 600 // that our data structures (which counted the outstanding births) remain |
477 // consistent. | 601 // consistent. |
478 const Births* birth = completed_task.birth_tally; | 602 const Births* births = completed_task.birth_tally; |
479 if (!birth) | 603 if (!births) |
480 return; | 604 return; |
481 ThreadData* current_thread_data = stopwatch.GetThreadData(); | 605 ThreadData* current_thread_data = stopwatch.GetThreadData(); |
482 if (!current_thread_data) | 606 if (!current_thread_data) |
483 return; | 607 return; |
484 | 608 |
485 // Watch out for a race where status_ is changing, and hence one or both | 609 // Watch out for a race where status_ is changing, and hence one or both |
486 // of start_of_run or end_of_run is zero. In that case, we didn't bother to | 610 // of start_of_run or end_of_run is zero. In that case, we didn't bother to |
487 // get a time value since we "weren't tracking" and we were trying to be | 611 // get a time value since we "weren't tracking" and we were trying to be |
488 // efficient by not calling for a genuine time value. For simplicity, we'll | 612 // efficient by not calling for a genuine time value. For simplicity, we'll |
489 // use a default zero duration when we can't calculate a true value. | 613 // use a default zero duration when we can't calculate a true value. |
490 TrackedTime start_of_run = stopwatch.StartTime(); | 614 TrackedTime start_of_run = stopwatch.StartTime(); |
491 int32 queue_duration = 0; | 615 int32 queue_duration = 0; |
492 if (!start_of_run.is_null()) { | 616 if (!start_of_run.is_null()) { |
493 queue_duration = (start_of_run - completed_task.EffectiveTimePosted()) | 617 queue_duration = (start_of_run - completed_task.EffectiveTimePosted()) |
494 .InMilliseconds(); | 618 .InMilliseconds(); |
495 } | 619 } |
496 current_thread_data->TallyADeath(*birth, queue_duration, stopwatch); | 620 current_thread_data->TallyADeath(*births, queue_duration, stopwatch); |
497 } | 621 } |
498 | 622 |
499 // static | 623 // static |
500 void ThreadData::TallyRunOnWorkerThreadIfTracking( | 624 void ThreadData::TallyRunOnWorkerThreadIfTracking( |
501 const Births* birth, | 625 const Births* births, |
502 const TrackedTime& time_posted, | 626 const TrackedTime& time_posted, |
503 const TaskStopwatch& stopwatch) { | 627 const TaskStopwatch& stopwatch) { |
504 // Even if we have been DEACTIVATED, we will process any pending births so | 628 // Even if we have been DEACTIVATED, we will process any pending births so |
505 // that our data structures (which counted the outstanding births) remain | 629 // that our data structures (which counted the outstanding births) remain |
506 // consistent. | 630 // consistent. |
507 if (!birth) | 631 if (!births) |
508 return; | 632 return; |
509 | 633 |
510 // TODO(jar): Support the option to coalesce all worker-thread activity under | 634 // TODO(jar): Support the option to coalesce all worker-thread activity under |
511 // one ThreadData instance that uses locks to protect *all* access. This will | 635 // one ThreadData instance that uses locks to protect *all* access. This will |
512 // reduce memory (making it provably bounded), but run incrementally slower | 636 // reduce memory (making it provably bounded), but run incrementally slower |
513 // (since we'll use locks on TallyABirth and TallyADeath). The good news is | 637 // (since we'll use locks on TallyABirth and TallyADeath). The good news is |
514 // that the locks on TallyADeath will be *after* the worker thread has run, | 638 // that the locks on TallyADeath will be *after* the worker thread has run, |
515 // and hence nothing will be waiting for the completion (... besides some | 639 // and hence nothing will be waiting for the completion (... besides some |
516 // other thread that might like to run). Also, the worker threads tasks are | 640 // other thread that might like to run). Also, the worker threads tasks are |
517 // generally longer, and hence the cost of the lock may perchance be amortized | 641 // generally longer, and hence the cost of the lock may perchance be amortized |
518 // over the long task's lifetime. | 642 // over the long task's lifetime. |
519 ThreadData* current_thread_data = stopwatch.GetThreadData(); | 643 ThreadData* current_thread_data = stopwatch.GetThreadData(); |
520 if (!current_thread_data) | 644 if (!current_thread_data) |
521 return; | 645 return; |
522 | 646 |
523 TrackedTime start_of_run = stopwatch.StartTime(); | 647 TrackedTime start_of_run = stopwatch.StartTime(); |
524 int32 queue_duration = 0; | 648 int32 queue_duration = 0; |
525 if (!start_of_run.is_null()) { | 649 if (!start_of_run.is_null()) { |
526 queue_duration = (start_of_run - time_posted).InMilliseconds(); | 650 queue_duration = (start_of_run - time_posted).InMilliseconds(); |
527 } | 651 } |
528 current_thread_data->TallyADeath(*birth, queue_duration, stopwatch); | 652 current_thread_data->TallyADeath(*births, queue_duration, stopwatch); |
529 } | 653 } |
530 | 654 |
531 // static | 655 // static |
532 void ThreadData::TallyRunInAScopedRegionIfTracking( | 656 void ThreadData::TallyRunInAScopedRegionIfTracking( |
533 const Births* birth, | 657 const Births* births, |
534 const TaskStopwatch& stopwatch) { | 658 const TaskStopwatch& stopwatch) { |
535 // Even if we have been DEACTIVATED, we will process any pending births so | 659 // Even if we have been DEACTIVATED, we will process any pending births so |
536 // that our data structures (which counted the outstanding births) remain | 660 // that our data structures (which counted the outstanding births) remain |
537 // consistent. | 661 // consistent. |
538 if (!birth) | 662 if (!births) |
539 return; | 663 return; |
540 | 664 |
541 ThreadData* current_thread_data = stopwatch.GetThreadData(); | 665 ThreadData* current_thread_data = stopwatch.GetThreadData(); |
542 if (!current_thread_data) | 666 if (!current_thread_data) |
543 return; | 667 return; |
544 | 668 |
545 int32 queue_duration = 0; | 669 int32 queue_duration = 0; |
546 current_thread_data->TallyADeath(*birth, queue_duration, stopwatch); | 670 current_thread_data->TallyADeath(*births, queue_duration, stopwatch); |
547 } | |
548 | |
549 // static | |
550 void ThreadData::SnapshotAllExecutedTasks( | |
551 ProcessDataPhaseSnapshot* process_data_phase, | |
552 BirthCountMap* birth_counts) { | |
553 // Get an unchanging copy of a ThreadData list. | |
554 ThreadData* my_list = ThreadData::first(); | |
555 | |
556 // Gather data serially. | |
557 // This hackish approach *can* get some slighly corrupt tallies, as we are | |
558 // grabbing values without the protection of a lock, but it has the advantage | |
559 // of working even with threads that don't have message loops. If a user | |
560 // sees any strangeness, they can always just run their stats gathering a | |
561 // second time. | |
562 for (ThreadData* thread_data = my_list; | |
563 thread_data; | |
564 thread_data = thread_data->next()) { | |
565 thread_data->SnapshotExecutedTasks(process_data_phase, birth_counts); | |
566 } | |
567 } | |
568 | |
569 // static | |
570 void ThreadData::SnapshotCurrentPhase( | |
571 ProcessDataPhaseSnapshot* process_data_phase) { | |
572 // Add births that have run to completion to |collected_data|. | |
573 // |birth_counts| tracks the total number of births recorded at each location | |
574 // for which we have not seen a death count. | |
575 BirthCountMap birth_counts; | |
576 ThreadData::SnapshotAllExecutedTasks(process_data_phase, &birth_counts); | |
577 | |
578 // Add births that are still active -- i.e. objects that have tallied a birth, | |
579 // but have not yet tallied a matching death, and hence must be either | |
580 // running, queued up, or being held in limbo for future posting. | |
581 for (const auto& birth_count : birth_counts) { | |
582 if (birth_count.second > 0) { | |
583 process_data_phase->tasks.push_back(TaskSnapshot( | |
584 *birth_count.first, DeathData(birth_count.second), "Still_Alive")); | |
585 } | |
586 } | |
587 } | 671 } |
588 | 672 |
589 void ThreadData::SnapshotExecutedTasks( | 673 void ThreadData::SnapshotExecutedTasks( |
590 ProcessDataPhaseSnapshot* process_data_phase, | 674 int current_profiling_phase, |
675 PhasedProcessDataSnapshotMap* phased_snapshots, | |
591 BirthCountMap* birth_counts) { | 676 BirthCountMap* birth_counts) { |
592 // Get copy of data, so that the data will not change during the iterations | 677 // Get copy of data, so that the data will not change during the iterations |
593 // and processing. | 678 // and processing. |
594 ThreadData::BirthMap birth_map; | 679 BirthMap birth_map; |
595 ThreadData::DeathMap death_map; | 680 DeathsSnapshot deaths; |
596 ThreadData::ParentChildSet parent_child_set; | 681 ParentChildSet parent_child_set; |
597 SnapshotMaps(&birth_map, &death_map, &parent_child_set); | 682 SnapshotMaps(current_profiling_phase, &birth_map, &deaths, &parent_child_set); |
598 | |
599 for (const auto& death : death_map) { | |
600 process_data_phase->tasks.push_back( | |
601 TaskSnapshot(*death.first, death.second, thread_name())); | |
602 (*birth_counts)[death.first] -= death.first->birth_count(); | |
603 } | |
604 | 683 |
605 for (const auto& birth : birth_map) { | 684 for (const auto& birth : birth_map) { |
606 (*birth_counts)[birth.second] += birth.second->birth_count(); | 685 (*birth_counts)[birth.second] += birth.second->birth_count(); |
607 } | 686 } |
608 | 687 |
609 if (!kTrackParentChildLinks) | 688 for (const auto& death : deaths) { |
610 return; | 689 (*birth_counts)[death.first] -= death.first->birth_count(); |
611 | 690 |
612 for (const auto& parent_child : parent_child_set) { | 691 // For the current death data, walk through all its snapshots, starting from |
613 process_data_phase->descendants.push_back( | 692 // the current one, then from the previous profiling phase etc., and for |
614 ParentChildPairSnapshot(parent_child)); | 693 // each snapshot calculate the delta between the snapshot and the previous |
694 // phase, if any. Store the deltas in the result. | |
695 for (const DeathDataPhaseSnapshot* phase = &death.second; phase; | |
696 phase = phase->prev) { | |
697 const DeathDataSnapshot& death_data = | |
698 phase->prev ? phase->death_data.Delta(phase->prev->death_data) | |
699 : phase->death_data; | |
700 | |
701 if (death_data.count > 0) { | |
702 (*phased_snapshots)[phase->profiling_phase].tasks.push_back( | |
703 TaskSnapshot(BirthOnThreadSnapshot(*death.first), death_data, | |
704 thread_name())); | |
705 } | |
706 } | |
615 } | 707 } |
616 } | 708 } |
617 | 709 |
618 // This may be called from another thread. | 710 // This may be called from another thread. |
619 void ThreadData::SnapshotMaps(BirthMap* birth_map, | 711 void ThreadData::SnapshotMaps(int profiling_phase, |
620 DeathMap* death_map, | 712 BirthMap* birth_map, |
713 DeathsSnapshot* deaths, | |
621 ParentChildSet* parent_child_set) { | 714 ParentChildSet* parent_child_set) { |
622 base::AutoLock lock(map_lock_); | 715 base::AutoLock lock(map_lock_); |
716 | |
623 for (const auto& birth : birth_map_) | 717 for (const auto& birth : birth_map_) |
624 (*birth_map)[birth.first] = birth.second; | 718 (*birth_map)[birth.first] = birth.second; |
625 for (const auto& death : death_map_) | 719 |
626 (*death_map)[death.first] = death.second; | 720 for (const auto& death : death_map_) { |
721 deaths->push_back(std::make_pair( | |
722 death.first, | |
723 DeathDataPhaseSnapshot(profiling_phase, death.second.count(), | |
724 death.second.run_duration_sum(), | |
725 death.second.run_duration_max(), | |
726 death.second.run_duration_sample(), | |
727 death.second.queue_duration_sum(), | |
728 death.second.queue_duration_max(), | |
729 death.second.queue_duration_sample(), | |
730 death.second.last_phase_snapshot()))); | |
731 } | |
627 | 732 |
628 if (!kTrackParentChildLinks) | 733 if (!kTrackParentChildLinks) |
629 return; | 734 return; |
630 | 735 |
631 for (const auto& parent_child : parent_child_set_) | 736 for (const auto& parent_child : parent_child_set_) |
632 parent_child_set->insert(parent_child); | 737 parent_child_set->insert(parent_child); |
633 } | 738 } |
634 | 739 |
740 void ThreadData::OnProfilingPhaseCompletedOnThread(int profiling_phase) { | |
741 base::AutoLock lock(map_lock_); | |
742 | |
743 for (auto& death : death_map_) { | |
744 death.second.OnProfilingPhaseCompleted(profiling_phase); | |
745 } | |
746 } | |
747 | |
635 static void OptionallyInitializeAlternateTimer() { | 748 static void OptionallyInitializeAlternateTimer() { |
636 NowFunction* alternate_time_source = GetAlternateTimeSource(); | 749 NowFunction* alternate_time_source = GetAlternateTimeSource(); |
637 if (alternate_time_source) | 750 if (alternate_time_source) |
638 ThreadData::SetAlternateTimeSource(alternate_time_source); | 751 ThreadData::SetAlternateTimeSource(alternate_time_source); |
639 } | 752 } |
640 | 753 |
641 bool ThreadData::Initialize() { | 754 bool ThreadData::Initialize() { |
642 if (status_ >= DEACTIVATED) | 755 if (status_ >= DEACTIVATED) |
643 return true; // Someone else did the initialization. | 756 return true; // Someone else did the initialization. |
644 // Due to racy lazy initialization in tests, we'll need to recheck status_ | 757 // Due to racy lazy initialization in tests, we'll need to recheck status_ |
(...skipping 22 matching lines...) Expand all Loading... | |
667 return false; | 780 return false; |
668 } else { | 781 } else { |
669 // TLS was initialzed for us earlier. | 782 // TLS was initialzed for us earlier. |
670 DCHECK_EQ(status_, DORMANT_DURING_TESTS); | 783 DCHECK_EQ(status_, DORMANT_DURING_TESTS); |
671 } | 784 } |
672 | 785 |
673 // Incarnation counter is only significant to testing, as it otherwise will | 786 // Incarnation counter is only significant to testing, as it otherwise will |
674 // never again change in this process. | 787 // never again change in this process. |
675 ++incarnation_counter_; | 788 ++incarnation_counter_; |
676 | 789 |
677 // The lock is not critical for setting status_, but it doesn't hurt. It also | 790 // The lock is not critical for setting status_, but it doesn't hurt. It also |
678 // ensures that if we have a racy initialization, that we'll bail as soon as | 791 // ensures that if we have a racy initialization, that we'll bail as soon as |
679 // we get the lock earlier in this method. | 792 // we get the lock earlier in this method. |
680 status_ = kInitialStartupState; | 793 status_ = kInitialStartupState; |
681 if (!kTrackParentChildLinks && | 794 if (!kTrackParentChildLinks && |
682 kInitialStartupState == PROFILING_CHILDREN_ACTIVE) | 795 kInitialStartupState == PROFILING_CHILDREN_ACTIVE) |
683 status_ = PROFILING_ACTIVE; | 796 status_ = PROFILING_ACTIVE; |
684 DCHECK(status_ != UNINITIALIZED); | 797 DCHECK(status_ != UNINITIALIZED); |
685 return true; | 798 return true; |
686 } | 799 } |
687 | 800 |
(...skipping 214 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
902 | 1015 |
903 ThreadData* TaskStopwatch::GetThreadData() const { | 1016 ThreadData* TaskStopwatch::GetThreadData() const { |
904 #if DCHECK_IS_ON() | 1017 #if DCHECK_IS_ON() |
905 DCHECK(state_ != CREATED); | 1018 DCHECK(state_ != CREATED); |
906 #endif | 1019 #endif |
907 | 1020 |
908 return current_thread_data_; | 1021 return current_thread_data_; |
909 } | 1022 } |
910 | 1023 |
911 //------------------------------------------------------------------------------ | 1024 //------------------------------------------------------------------------------ |
1025 // DeathDataPhaseSnapshot | |
1026 | |
1027 DeathDataPhaseSnapshot::DeathDataPhaseSnapshot( | |
1028 int profiling_phase, | |
1029 int count, | |
1030 int32 run_duration_sum, | |
1031 int32 run_duration_max, | |
1032 int32 run_duration_sample, | |
1033 int32 queue_duration_sum, | |
1034 int32 queue_duration_max, | |
1035 int32 queue_duration_sample, | |
1036 const DeathDataPhaseSnapshot* prev) | |
1037 : profiling_phase(profiling_phase), | |
1038 death_data(count, | |
1039 run_duration_sum, | |
1040 run_duration_max, | |
1041 run_duration_sample, | |
1042 queue_duration_sum, | |
1043 queue_duration_max, | |
1044 queue_duration_sample), | |
1045 prev(prev) { | |
1046 } | |
1047 | |
1048 //------------------------------------------------------------------------------ | |
1049 // TaskSnapshot | |
1050 | |
912 TaskSnapshot::TaskSnapshot() { | 1051 TaskSnapshot::TaskSnapshot() { |
913 } | 1052 } |
914 | 1053 |
915 TaskSnapshot::TaskSnapshot(const BirthOnThread& birth, | 1054 TaskSnapshot::TaskSnapshot(const BirthOnThreadSnapshot& birth, |
916 const DeathData& death_data, | 1055 const DeathDataSnapshot& death_data, |
917 const std::string& death_thread_name) | 1056 const std::string& death_thread_name) |
918 : birth(birth), | 1057 : birth(birth), |
919 death_data(death_data), | 1058 death_data(death_data), |
920 death_thread_name(death_thread_name) { | 1059 death_thread_name(death_thread_name) { |
921 } | 1060 } |
922 | 1061 |
923 TaskSnapshot::~TaskSnapshot() { | 1062 TaskSnapshot::~TaskSnapshot() { |
924 } | 1063 } |
925 | 1064 |
926 //------------------------------------------------------------------------------ | 1065 //------------------------------------------------------------------------------ |
(...skipping 28 matching lines...) Expand all Loading... | |
955 : process_id(base::GetCurrentProcId()) { | 1094 : process_id(base::GetCurrentProcId()) { |
956 #else | 1095 #else |
957 : process_id(base::kNullProcessId) { | 1096 : process_id(base::kNullProcessId) { |
958 #endif | 1097 #endif |
959 } | 1098 } |
960 | 1099 |
961 ProcessDataSnapshot::~ProcessDataSnapshot() { | 1100 ProcessDataSnapshot::~ProcessDataSnapshot() { |
962 } | 1101 } |
963 | 1102 |
964 } // namespace tracked_objects | 1103 } // namespace tracked_objects |
OLD | NEW |