| OLD | NEW |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/tracked_objects.h" | 5 #include "base/tracked_objects.h" |
| 6 | 6 |
| 7 #include <limits.h> | 7 #include <limits.h> |
| 8 #include <stdlib.h> | 8 #include <stdlib.h> |
| 9 | 9 |
| 10 #include "base/atomicops.h" | 10 #include "base/atomicops.h" |
| (...skipping 10 matching lines...) Expand all Loading... |
| 21 | 21 |
| 22 using base::TimeDelta; | 22 using base::TimeDelta; |
| 23 | 23 |
| 24 namespace base { | 24 namespace base { |
| 25 class TimeDelta; | 25 class TimeDelta; |
| 26 } | 26 } |
| 27 | 27 |
| 28 namespace tracked_objects { | 28 namespace tracked_objects { |
| 29 | 29 |
| 30 namespace { | 30 namespace { |
| 31 // TODO(jar): Evaluate the perf impact of enabling this. If the perf impact is | |
| 32 // negligible, enable by default. | |
| 33 // Flag to compile out parent-child link recording. | |
| 34 const bool kTrackParentChildLinks = false; | |
| 35 | |
| 36 // When ThreadData is first initialized, should we start in an ACTIVE state to | 31 // When ThreadData is first initialized, should we start in an ACTIVE state to |
| 37 // record all of the startup-time tasks, or should we start up DEACTIVATED, so | 32 // record all of the startup-time tasks, or should we start up DEACTIVATED, so |
| 38 // that we only record after parsing the command line flag --enable-tracking. | 33 // that we only record after parsing the command line flag --enable-tracking. |
| 39 // Note that the flag may force either state, so this really controls only the | 34 // Note that the flag may force either state, so this really controls only the |
| 40 // period of time up until that flag is parsed. If there is no flag seen, then | 35 // period of time up until that flag is parsed. If there is no flag seen, then |
| 41 // this state may prevail for much or all of the process lifetime. | 36 // this state may prevail for much or all of the process lifetime. |
| 42 const ThreadData::Status kInitialStartupState = | 37 const ThreadData::Status kInitialStartupState = ThreadData::PROFILING_ACTIVE; |
| 43 ThreadData::PROFILING_CHILDREN_ACTIVE; | |
| 44 | 38 |
| 45 // Control whether an alternate time source (Now() function) is supported by | 39 // Control whether an alternate time source (Now() function) is supported by |
| 46 // the ThreadData class. This compile time flag should be set to true if we | 40 // the ThreadData class. This compile time flag should be set to true if we |
| 47 // want other modules (such as a memory allocator, or a thread-specific CPU time | 41 // want other modules (such as a memory allocator, or a thread-specific CPU time |
| 48 // clock) to be able to provide a thread-specific Now() function. Without this | 42 // clock) to be able to provide a thread-specific Now() function. Without this |
| 49 // compile-time flag, the code will only support the wall-clock time. This flag | 43 // compile-time flag, the code will only support the wall-clock time. This flag |
| 50 // can be flipped to efficiently disable this path (if there is a performance | 44 // can be flipped to efficiently disable this path (if there is a performance |
| 51 // problem with its presence). | 45 // problem with its presence). |
| 52 static const bool kAllowAlternateTimeSourceHandling = true; | 46 static const bool kAllowAlternateTimeSourceHandling = true; |
| 53 | 47 |
| 54 // Possible states of the profiler timing enabledness. | 48 // Possible states of the profiler timing enabledness. |
| 55 enum { | 49 enum { |
| 56 UNDEFINED_TIMING, | 50 UNDEFINED_TIMING, |
| 57 ENABLED_TIMING, | 51 ENABLED_TIMING, |
| 58 DISABLED_TIMING, | 52 DISABLED_TIMING, |
| 59 }; | 53 }; |
| 60 | 54 |
| 61 // State of the profiler timing enabledness. | 55 // State of the profiler timing enabledness. |
| 62 base::subtle::Atomic32 g_profiler_timing_enabled = UNDEFINED_TIMING; | 56 base::subtle::Atomic32 g_profiler_timing_enabled = UNDEFINED_TIMING; |
| 63 | 57 |
| 64 // Returns whether profiler timing is enabled. The default is true, but this may | 58 // Returns whether profiler timing is enabled. The default is true, but this |
| 65 // be overridden by a command-line flag. Some platforms may programmatically set | 59 // may be overridden by a command-line flag. Some platforms may |
| 66 // this command-line flag to the "off" value if it's not specified. | 60 // programmatically set this command-line flag to the "off" value if it's not |
| 61 // specified. |
| 67 // This in turn can be overridden by explicitly calling | 62 // This in turn can be overridden by explicitly calling |
| 68 // ThreadData::EnableProfilerTiming, say, based on a field trial. | 63 // ThreadData::EnableProfilerTiming, say, based on a field trial. |
| 69 inline bool IsProfilerTimingEnabled() { | 64 inline bool IsProfilerTimingEnabled() { |
| 70 // Reading |g_profiler_timing_enabled| is done without barrier because | 65 // Reading |g_profiler_timing_enabled| is done without barrier because |
| 71 // multiple initialization is not an issue while the barrier can be relatively | 66 // multiple initialization is not an issue while the barrier can be relatively |
| 72 // costly given that this method is sometimes called in a tight loop. | 67 // costly given that this method is sometimes called in a tight loop. |
| 73 base::subtle::Atomic32 current_timing_enabled = | 68 base::subtle::Atomic32 current_timing_enabled = |
| 74 base::subtle::NoBarrier_Load(&g_profiler_timing_enabled); | 69 base::subtle::NoBarrier_Load(&g_profiler_timing_enabled); |
| 75 if (current_timing_enabled == UNDEFINED_TIMING) { | 70 if (current_timing_enabled == UNDEFINED_TIMING) { |
| 76 if (!base::CommandLine::InitializedForCurrentProcess()) | 71 if (!base::CommandLine::InitializedForCurrentProcess()) |
| 77 return true; | 72 return true; |
| 78 current_timing_enabled = | 73 current_timing_enabled = |
| 79 (base::CommandLine::ForCurrentProcess()->GetSwitchValueASCII( | 74 (base::CommandLine::ForCurrentProcess()->GetSwitchValueASCII( |
| 80 switches::kProfilerTiming) == | 75 switches::kProfilerTiming) == |
| 81 switches::kProfilerTimingDisabledValue) | 76 switches::kProfilerTimingDisabledValue) |
| 82 ? DISABLED_TIMING | 77 ? DISABLED_TIMING |
| 83 : ENABLED_TIMING; | 78 : ENABLED_TIMING; |
| 84 base::subtle::NoBarrier_Store(&g_profiler_timing_enabled, | 79 base::subtle::NoBarrier_Store(&g_profiler_timing_enabled, |
| 85 current_timing_enabled); | 80 current_timing_enabled); |
| 86 } | 81 } |
| 87 return current_timing_enabled == ENABLED_TIMING; | 82 return current_timing_enabled == ENABLED_TIMING; |
| 88 } | 83 } |
| 89 | 84 |
| 90 } // namespace | 85 } // namespace |
| 91 | 86 |
| 92 //------------------------------------------------------------------------------ | 87 //------------------------------------------------------------------------------ |
| 93 // DeathData tallies durations when a death takes place. | 88 // DeathData tallies durations when a death takes place. |
| 94 | 89 |
| 95 DeathData::DeathData() { | 90 DeathData::DeathData() |
| 96 Clear(); | 91 : count_(0), |
| 92 sample_probability_count_(0), |
| 93 run_duration_sum_(0), |
| 94 queue_duration_sum_(0), |
| 95 run_duration_max_(0), |
| 96 queue_duration_max_(0), |
| 97 run_duration_sample_(0), |
| 98 queue_duration_sample_(0), |
| 99 last_phase_snapshot_(nullptr) { |
| 97 } | 100 } |
| 98 | 101 |
| 99 DeathData::DeathData(int count) { | 102 DeathData::DeathData(const DeathData& other) |
| 100 Clear(); | 103 : count_(other.count_), |
| 101 count_ = count; | 104 sample_probability_count_(other.sample_probability_count_), |
| 105 run_duration_sum_(other.run_duration_sum_), |
| 106 queue_duration_sum_(other.queue_duration_sum_), |
| 107 run_duration_max_(other.run_duration_max_), |
| 108 queue_duration_max_(other.queue_duration_max_), |
| 109 run_duration_sample_(other.run_duration_sample_), |
| 110 queue_duration_sample_(other.queue_duration_sample_), |
| 111 last_phase_snapshot_(nullptr) { |
| 112 // This constructor will be used by std::map when adding new DeathData values |
| 113 // to the map. At that point, last_phase_snapshot_ is still NULL, so we don't |
| 114 // need to worry about ownership transfer. |
| 115 DCHECK(other.last_phase_snapshot_ == nullptr); |
| 116 } |
| 117 |
| 118 DeathData::~DeathData() { |
| 119 while (last_phase_snapshot_) { |
| 120 const DeathDataPhaseSnapshot* snapshot = last_phase_snapshot_; |
| 121 last_phase_snapshot_ = snapshot->prev; |
| 122 delete snapshot; |
| 123 } |
| 102 } | 124 } |
| 103 | 125 |
| 104 // TODO(jar): I need to see if this macro to optimize branching is worth using. | 126 // TODO(jar): I need to see if this macro to optimize branching is worth using. |
| 105 // | 127 // |
| 106 // This macro has no branching, so it is surely fast, and is equivalent to: | 128 // This macro has no branching, so it is surely fast, and is equivalent to: |
| 107 // if (assign_it) | 129 // if (assign_it) |
| 108 // target = source; | 130 // target = source; |
| 109 // We use a macro rather than a template to force this to inline. | 131 // We use a macro rather than a template to force this to inline. |
| 110 // Related code for calculating max is discussed on the web. | 132 // Related code for calculating max is discussed on the web. |
| 111 #define CONDITIONAL_ASSIGN(assign_it, target, source) \ | 133 #define CONDITIONAL_ASSIGN(assign_it, target, source) \ |
| 112 ((target) ^= ((target) ^ (source)) & -static_cast<int32>(assign_it)) | 134 ((target) ^= ((target) ^ (source)) & -static_cast<int32>(assign_it)) |
| 113 | 135 |
| 114 void DeathData::RecordDeath(const int32 queue_duration, | 136 void DeathData::RecordDeath(const int32 queue_duration, |
| 115 const int32 run_duration, | 137 const int32 run_duration, |
| 116 const uint32 random_number) { | 138 const uint32 random_number) { |
| 117 // We'll just clamp at INT_MAX, but we should note this in the UI as such. | 139 // We'll just clamp at INT_MAX, but we should note this in the UI as such. |
| 118 if (count_ < INT_MAX) | 140 if (count_ < INT_MAX) |
| 119 ++count_; | 141 ++count_; |
| 142 |
| 143 int sample_probability_count = sample_probability_count_; |
| 144 if (sample_probability_count < INT_MAX) |
| 145 ++sample_probability_count; |
| 146 sample_probability_count_ = sample_probability_count; |
| 147 |
| 120 queue_duration_sum_ += queue_duration; | 148 queue_duration_sum_ += queue_duration; |
| 121 run_duration_sum_ += run_duration; | 149 run_duration_sum_ += run_duration; |
| 122 | 150 |
| 123 if (queue_duration_max_ < queue_duration) | 151 if (queue_duration_max_ < queue_duration) |
| 124 queue_duration_max_ = queue_duration; | 152 queue_duration_max_ = queue_duration; |
| 125 if (run_duration_max_ < run_duration) | 153 if (run_duration_max_ < run_duration) |
| 126 run_duration_max_ = run_duration; | 154 run_duration_max_ = run_duration; |
| 127 | 155 |
| 128 // Take a uniformly distributed sample over all durations ever supplied. | 156 // Take a uniformly distributed sample over all durations ever supplied during |
| 129 // The probability that we (instead) use this new sample is 1/count_. This | 157 // the current profiling phase. |
| 130 // results in a completely uniform selection of the sample (at least when we | 158 // The probability that we (instead) use this new sample is |
| 131 // don't clamp count_... but that should be inconsequentially likely). | 159 // 1/sample_probability_count_. This results in a completely uniform selection |
| 132 // We ignore the fact that we correlated our selection of a sample to the run | 160 // of the sample (at least when we don't clamp sample_probability_count_... |
| 133 // and queue times (i.e., we used them to generate random_number). | 161 // but that should be inconsequentially likely). We ignore the fact that we |
| 134 CHECK_GT(count_, 0); | 162 // correlated our selection of a sample to the run and queue times (i.e., we |
| 135 if (0 == (random_number % count_)) { | 163 // used them to generate random_number). |
| 164 CHECK_GT(sample_probability_count, 0); |
| 165 if (0 == (random_number % sample_probability_count)) { |
| 136 queue_duration_sample_ = queue_duration; | 166 queue_duration_sample_ = queue_duration; |
| 137 run_duration_sample_ = run_duration; | 167 run_duration_sample_ = run_duration; |
| 138 } | 168 } |
| 139 } | 169 } |
| 140 | 170 |
| 141 int DeathData::count() const { return count_; } | 171 int DeathData::count() const { return count_; } |
| 142 | 172 |
| 143 int32 DeathData::run_duration_sum() const { return run_duration_sum_; } | 173 int32 DeathData::run_duration_sum() const { return run_duration_sum_; } |
| 144 | 174 |
| 145 int32 DeathData::run_duration_max() const { return run_duration_max_; } | 175 int32 DeathData::run_duration_max() const { return run_duration_max_; } |
| 146 | 176 |
| 147 int32 DeathData::run_duration_sample() const { | 177 int32 DeathData::run_duration_sample() const { |
| 148 return run_duration_sample_; | 178 return run_duration_sample_; |
| 149 } | 179 } |
| 150 | 180 |
| 151 int32 DeathData::queue_duration_sum() const { | 181 int32 DeathData::queue_duration_sum() const { |
| 152 return queue_duration_sum_; | 182 return queue_duration_sum_; |
| 153 } | 183 } |
| 154 | 184 |
| 155 int32 DeathData::queue_duration_max() const { | 185 int32 DeathData::queue_duration_max() const { |
| 156 return queue_duration_max_; | 186 return queue_duration_max_; |
| 157 } | 187 } |
| 158 | 188 |
| 159 int32 DeathData::queue_duration_sample() const { | 189 int32 DeathData::queue_duration_sample() const { |
| 160 return queue_duration_sample_; | 190 return queue_duration_sample_; |
| 161 } | 191 } |
| 162 | 192 |
| 163 void DeathData::Clear() { | 193 const DeathDataPhaseSnapshot* DeathData::last_phase_snapshot() const { |
| 164 count_ = 0; | 194 return last_phase_snapshot_; |
| 165 run_duration_sum_ = 0; | 195 } |
| 196 |
| 197 void DeathData::OnProfilingPhaseCompleted(int profiling_phase) { |
| 198 // Snapshotting and storing current state. |
| 199 last_phase_snapshot_ = new DeathDataPhaseSnapshot( |
| 200 profiling_phase, count_, run_duration_sum_, run_duration_max_, |
| 201 run_duration_sample_, queue_duration_sum_, queue_duration_max_, |
| 202 queue_duration_sample_, last_phase_snapshot_); |
| 203 |
| 204 // Not touching fields for which a delta can be computed by comparing with a |
| 205 // snapshot from the previous phase. Resetting other fields. Sample values |
| 206 // will be reset upon next death recording because sample_probability_count_ |
| 207 // is set to 0. |
| 208 // We avoid resetting to 0 in favor of deltas whenever possible. The reason |
| 209 // is that for incrementable fields, resetting to 0 from the snapshot thread |
| 210 // potentially in parallel with incrementing in the death thread may result in |
| 211 // significant data corruption that has a potential to grow with time. Not |
| 212 // resetting incrementable fields and using deltas will cause any |
| 213 // off-by-little corruptions to be likely fixed at the next snapshot. |
| 214 // The max values are not incrementable, and cannot be deduced using deltas |
| 215 // for a given phase. Hence, we have to reset them to 0. But the potential |
| 216 // damage is limited to getting the previous phase's max to apply for the next |
| 217 // phase, and the error doesn't have a potential to keep growing with new |
| 218 // resets. |
| 219 // sample_probability_count_ is incrementable, but must be reset to 0 at the |
| 220 // phase end, so that we start a new uniformly randomized sample selection |
| 221 // after the reset. Corruptions due to race conditions are possible, but the |
| 222 // damage is limited to selecting a wrong sample, which is not something that |
| 223 // can cause accumulating or cascading effects. |
| 224 // If there were no corruptions caused by race conditions, we never send a |
| 225 // sample for the previous phase in the next phase's snapshot because |
| 226 // ThreadData::SnapshotExecutedTasks doesn't send deltas with 0 count. |
| 227 sample_probability_count_ = 0; |
| 166 run_duration_max_ = 0; | 228 run_duration_max_ = 0; |
| 167 run_duration_sample_ = 0; | |
| 168 queue_duration_sum_ = 0; | |
| 169 queue_duration_max_ = 0; | 229 queue_duration_max_ = 0; |
| 170 queue_duration_sample_ = 0; | |
| 171 } | 230 } |
| 172 | 231 |
| 173 //------------------------------------------------------------------------------ | 232 //------------------------------------------------------------------------------ |
| 174 DeathDataSnapshot::DeathDataSnapshot() | 233 DeathDataSnapshot::DeathDataSnapshot() |
| 175 : count(-1), | 234 : count(-1), |
| 176 run_duration_sum(-1), | 235 run_duration_sum(-1), |
| 177 run_duration_max(-1), | 236 run_duration_max(-1), |
| 178 run_duration_sample(-1), | 237 run_duration_sample(-1), |
| 179 queue_duration_sum(-1), | 238 queue_duration_sum(-1), |
| 180 queue_duration_max(-1), | 239 queue_duration_max(-1), |
| 181 queue_duration_sample(-1) { | 240 queue_duration_sample(-1) { |
| 182 } | 241 } |
| 183 | 242 |
| 184 DeathDataSnapshot::DeathDataSnapshot( | 243 DeathDataSnapshot::DeathDataSnapshot(int count, |
| 185 const tracked_objects::DeathData& death_data) | 244 int32 run_duration_sum, |
| 186 : count(death_data.count()), | 245 int32 run_duration_max, |
| 187 run_duration_sum(death_data.run_duration_sum()), | 246 int32 run_duration_sample, |
| 188 run_duration_max(death_data.run_duration_max()), | 247 int32 queue_duration_sum, |
| 189 run_duration_sample(death_data.run_duration_sample()), | 248 int32 queue_duration_max, |
| 190 queue_duration_sum(death_data.queue_duration_sum()), | 249 int32 queue_duration_sample) |
| 191 queue_duration_max(death_data.queue_duration_max()), | 250 : count(count), |
| 192 queue_duration_sample(death_data.queue_duration_sample()) { | 251 run_duration_sum(run_duration_sum), |
| 252 run_duration_max(run_duration_max), |
| 253 run_duration_sample(run_duration_sample), |
| 254 queue_duration_sum(queue_duration_sum), |
| 255 queue_duration_max(queue_duration_max), |
| 256 queue_duration_sample(queue_duration_sample) { |
| 193 } | 257 } |
| 194 | 258 |
| 195 DeathDataSnapshot::~DeathDataSnapshot() { | 259 DeathDataSnapshot::~DeathDataSnapshot() { |
| 196 } | 260 } |
| 197 | 261 |
| 262 DeathDataSnapshot DeathDataSnapshot::Delta( |
| 263 const DeathDataSnapshot& older) const { |
| 264 return DeathDataSnapshot(count - older.count, |
| 265 run_duration_sum - older.run_duration_sum, |
| 266 run_duration_max, run_duration_sample, |
| 267 queue_duration_sum - older.queue_duration_sum, |
| 268 queue_duration_max, queue_duration_sample); |
| 269 } |
| 270 |
| 198 //------------------------------------------------------------------------------ | 271 //------------------------------------------------------------------------------ |
| 199 BirthOnThread::BirthOnThread(const Location& location, | 272 BirthOnThread::BirthOnThread(const Location& location, |
| 200 const ThreadData& current) | 273 const ThreadData& current) |
| 201 : location_(location), | 274 : location_(location), |
| 202 birth_thread_(¤t) { | 275 birth_thread_(¤t) { |
| 203 } | 276 } |
| 204 | 277 |
| 205 //------------------------------------------------------------------------------ | 278 //------------------------------------------------------------------------------ |
| 206 BirthOnThreadSnapshot::BirthOnThreadSnapshot() { | 279 BirthOnThreadSnapshot::BirthOnThreadSnapshot() { |
| 207 } | 280 } |
| 208 | 281 |
| 209 BirthOnThreadSnapshot::BirthOnThreadSnapshot( | 282 BirthOnThreadSnapshot::BirthOnThreadSnapshot(const BirthOnThread& birth) |
| 210 const tracked_objects::BirthOnThread& birth) | |
| 211 : location(birth.location()), | 283 : location(birth.location()), |
| 212 thread_name(birth.birth_thread()->thread_name()) { | 284 thread_name(birth.birth_thread()->thread_name()) { |
| 213 } | 285 } |
| 214 | 286 |
| 215 BirthOnThreadSnapshot::~BirthOnThreadSnapshot() { | 287 BirthOnThreadSnapshot::~BirthOnThreadSnapshot() { |
| 216 } | 288 } |
| 217 | 289 |
| 218 //------------------------------------------------------------------------------ | 290 //------------------------------------------------------------------------------ |
| 219 Births::Births(const Location& location, const ThreadData& current) | 291 Births::Births(const Location& location, const ThreadData& current) |
| 220 : BirthOnThread(location, current), | 292 : BirthOnThread(location, current), |
| (...skipping 10 matching lines...) Expand all Loading... |
| 231 // TODO(jar): We should pull all these static vars together, into a struct, and | 303 // TODO(jar): We should pull all these static vars together, into a struct, and |
| 232 // optimize layout so that we benefit from locality of reference during accesses | 304 // optimize layout so that we benefit from locality of reference during accesses |
| 233 // to them. | 305 // to them. |
| 234 | 306 |
| 235 // static | 307 // static |
| 236 NowFunction* ThreadData::now_function_ = NULL; | 308 NowFunction* ThreadData::now_function_ = NULL; |
| 237 | 309 |
| 238 // static | 310 // static |
| 239 bool ThreadData::now_function_is_time_ = false; | 311 bool ThreadData::now_function_is_time_ = false; |
| 240 | 312 |
| 241 // A TLS slot which points to the ThreadData instance for the current thread. We | 313 // A TLS slot which points to the ThreadData instance for the current thread. |
| 242 // do a fake initialization here (zeroing out data), and then the real in-place | 314 // We do a fake initialization here (zeroing out data), and then the real |
| 243 // construction happens when we call tls_index_.Initialize(). | 315 // in-place construction happens when we call tls_index_.Initialize(). |
| 244 // static | 316 // static |
| 245 base::ThreadLocalStorage::StaticSlot ThreadData::tls_index_ = TLS_INITIALIZER; | 317 base::ThreadLocalStorage::StaticSlot ThreadData::tls_index_ = TLS_INITIALIZER; |
| 246 | 318 |
| 247 // static | 319 // static |
| 248 int ThreadData::worker_thread_data_creation_count_ = 0; | 320 int ThreadData::worker_thread_data_creation_count_ = 0; |
| 249 | 321 |
| 250 // static | 322 // static |
| 251 int ThreadData::cleanup_count_ = 0; | 323 int ThreadData::cleanup_count_ = 0; |
| 252 | 324 |
| 253 // static | 325 // static |
| (...skipping 27 matching lines...) Expand all Loading... |
| 281 : next_(NULL), | 353 : next_(NULL), |
| 282 next_retired_worker_(NULL), | 354 next_retired_worker_(NULL), |
| 283 worker_thread_number_(thread_number), | 355 worker_thread_number_(thread_number), |
| 284 incarnation_count_for_pool_(-1), | 356 incarnation_count_for_pool_(-1), |
| 285 current_stopwatch_(NULL) { | 357 current_stopwatch_(NULL) { |
| 286 CHECK_GT(thread_number, 0); | 358 CHECK_GT(thread_number, 0); |
| 287 base::StringAppendF(&thread_name_, "WorkerThread-%d", thread_number); | 359 base::StringAppendF(&thread_name_, "WorkerThread-%d", thread_number); |
| 288 PushToHeadOfList(); // Which sets real incarnation_count_for_pool_. | 360 PushToHeadOfList(); // Which sets real incarnation_count_for_pool_. |
| 289 } | 361 } |
| 290 | 362 |
| 291 ThreadData::~ThreadData() {} | 363 ThreadData::~ThreadData() { |
| 364 } |
| 292 | 365 |
| 293 void ThreadData::PushToHeadOfList() { | 366 void ThreadData::PushToHeadOfList() { |
| 294 // Toss in a hint of randomness (atop the uniniitalized value). | 367 // Toss in a hint of randomness (atop the uniniitalized value). |
| 295 (void)VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE(&random_number_, | 368 (void)VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE(&random_number_, |
| 296 sizeof(random_number_)); | 369 sizeof(random_number_)); |
| 297 MSAN_UNPOISON(&random_number_, sizeof(random_number_)); | 370 MSAN_UNPOISON(&random_number_, sizeof(random_number_)); |
| 298 random_number_ += static_cast<uint32>(this - static_cast<ThreadData*>(0)); | 371 random_number_ += static_cast<uint32>(this - static_cast<ThreadData*>(0)); |
| 299 random_number_ ^= (Now() - TrackedTime()).InMilliseconds(); | 372 random_number_ ^= (Now() - TrackedTime()).InMilliseconds(); |
| 300 | 373 |
| 301 DCHECK(!next_); | 374 DCHECK(!next_); |
| (...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 354 } | 427 } |
| 355 DCHECK_GT(worker_thread_data->worker_thread_number_, 0); | 428 DCHECK_GT(worker_thread_data->worker_thread_number_, 0); |
| 356 | 429 |
| 357 tls_index_.Set(worker_thread_data); | 430 tls_index_.Set(worker_thread_data); |
| 358 return worker_thread_data; | 431 return worker_thread_data; |
| 359 } | 432 } |
| 360 | 433 |
| 361 // static | 434 // static |
| 362 void ThreadData::OnThreadTermination(void* thread_data) { | 435 void ThreadData::OnThreadTermination(void* thread_data) { |
| 363 DCHECK(thread_data); // TLS should *never* call us with a NULL. | 436 DCHECK(thread_data); // TLS should *never* call us with a NULL. |
| 364 // We must NOT do any allocations during this callback. There is a chance | 437 // We must NOT do any allocations during this callback. There is a chance |
| 365 // that the allocator is no longer active on this thread. | 438 // that the allocator is no longer active on this thread. |
| 366 reinterpret_cast<ThreadData*>(thread_data)->OnThreadTerminationCleanup(); | 439 reinterpret_cast<ThreadData*>(thread_data)->OnThreadTerminationCleanup(); |
| 367 } | 440 } |
| 368 | 441 |
| 369 void ThreadData::OnThreadTerminationCleanup() { | 442 void ThreadData::OnThreadTerminationCleanup() { |
| 370 // The list_lock_ was created when we registered the callback, so it won't be | 443 // The list_lock_ was created when we registered the callback, so it won't be |
| 371 // allocated here despite the lazy reference. | 444 // allocated here despite the lazy reference. |
| 372 base::AutoLock lock(*list_lock_.Pointer()); | 445 base::AutoLock lock(*list_lock_.Pointer()); |
| 373 if (incarnation_counter_ != incarnation_count_for_pool_) | 446 if (incarnation_counter_ != incarnation_count_for_pool_) |
| 374 return; // ThreadData was constructed in an earlier unit test. | 447 return; // ThreadData was constructed in an earlier unit test. |
| 375 ++cleanup_count_; | 448 ++cleanup_count_; |
| 376 // Only worker threads need to be retired and reused. | 449 // Only worker threads need to be retired and reused. |
| 377 if (!worker_thread_number_) { | 450 if (!worker_thread_number_) { |
| 378 return; | 451 return; |
| 379 } | 452 } |
| 380 // We must NOT do any allocations during this callback. | 453 // We must NOT do any allocations during this callback. |
| 381 // Using the simple linked lists avoids all allocations. | 454 // Using the simple linked lists avoids all allocations. |
| 382 DCHECK_EQ(this->next_retired_worker_, reinterpret_cast<ThreadData*>(NULL)); | 455 DCHECK_EQ(this->next_retired_worker_, reinterpret_cast<ThreadData*>(NULL)); |
| 383 this->next_retired_worker_ = first_retired_worker_; | 456 this->next_retired_worker_ = first_retired_worker_; |
| 384 first_retired_worker_ = this; | 457 first_retired_worker_ = this; |
| 385 } | 458 } |
| 386 | 459 |
| 387 // static | 460 // static |
| 388 void ThreadData::Snapshot(ProcessDataSnapshot* process_data_snapshot) { | 461 void ThreadData::Snapshot(int current_profiling_phase, |
| 389 ThreadData::SnapshotCurrentPhase( | 462 ProcessDataSnapshot* process_data_snapshot) { |
| 390 &process_data_snapshot->phased_process_data_snapshots[0]); | 463 // Get an unchanging copy of a ThreadData list. |
| 464 ThreadData* my_list = ThreadData::first(); |
| 465 |
| 466 // Gather data serially. |
| 467 // This hackish approach *can* get some slightly corrupt tallies, as we are |
| 468 // grabbing values without the protection of a lock, but it has the advantage |
| 469 // of working even with threads that don't have message loops. If a user |
| 470 // sees any strangeness, they can always just run their stats gathering a |
| 471 // second time. |
| 472 BirthCountMap birth_counts; |
| 473 for (ThreadData* thread_data = my_list; thread_data; |
| 474 thread_data = thread_data->next()) { |
| 475 thread_data->SnapshotExecutedTasks(current_profiling_phase, |
| 476 &process_data_snapshot->phased_snapshots, |
| 477 &birth_counts); |
| 478 } |
| 479 |
| 480 // Add births that are still active -- i.e. objects that have tallied a birth, |
| 481 // but have not yet tallied a matching death, and hence must be either |
| 482 // running, queued up, or being held in limbo for future posting. |
| 483 auto* current_phase_tasks = |
| 484 &process_data_snapshot->phased_snapshots[current_profiling_phase].tasks; |
| 485 for (const auto& birth_count : birth_counts) { |
| 486 if (birth_count.second > 0) { |
| 487 current_phase_tasks->push_back( |
| 488 TaskSnapshot(BirthOnThreadSnapshot(*birth_count.first), |
| 489 DeathDataSnapshot(birth_count.second, 0, 0, 0, 0, 0, 0), |
| 490 "Still_Alive")); |
| 491 } |
| 492 } |
| 493 } |
| 494 |
| 495 // static |
| 496 void ThreadData::OnProfilingPhaseCompleted(int profiling_phase) { |
| 497 // Get an unchanging copy of a ThreadData list. |
| 498 ThreadData* my_list = ThreadData::first(); |
| 499 |
| 500 // Add snapshots for all instances of death data in all threads serially. |
| 501 // This hackish approach *can* get some slightly corrupt tallies, as we are |
| 502 // grabbing values without the protection of a lock, but it has the advantage |
| 503 // of working even with threads that don't have message loops. Any corruption |
| 504 // shouldn't cause "cascading damage" to anything else (in later phases). |
| 505 for (ThreadData* thread_data = my_list; thread_data; |
| 506 thread_data = thread_data->next()) { |
| 507 thread_data->OnProfilingPhaseCompletedOnThread(profiling_phase); |
| 508 } |
| 391 } | 509 } |
| 392 | 510 |
| 393 Births* ThreadData::TallyABirth(const Location& location) { | 511 Births* ThreadData::TallyABirth(const Location& location) { |
| 394 BirthMap::iterator it = birth_map_.find(location); | 512 BirthMap::iterator it = birth_map_.find(location); |
| 395 Births* child; | 513 Births* child; |
| 396 if (it != birth_map_.end()) { | 514 if (it != birth_map_.end()) { |
| 397 child = it->second; | 515 child = it->second; |
| 398 child->RecordBirth(); | 516 child->RecordBirth(); |
| 399 } else { | 517 } else { |
| 400 child = new Births(location, *this); // Leak this. | 518 child = new Births(location, *this); // Leak this. |
| 401 // Lock since the map may get relocated now, and other threads sometimes | 519 // Lock since the map may get relocated now, and other threads sometimes |
| 402 // snapshot it (but they lock before copying it). | 520 // snapshot it (but they lock before copying it). |
| 403 base::AutoLock lock(map_lock_); | 521 base::AutoLock lock(map_lock_); |
| 404 birth_map_[location] = child; | 522 birth_map_[location] = child; |
| 405 } | 523 } |
| 406 | 524 |
| 407 if (kTrackParentChildLinks && status_ > PROFILING_ACTIVE && | |
| 408 !parent_stack_.empty()) { | |
| 409 const Births* parent = parent_stack_.top(); | |
| 410 ParentChildPair pair(parent, child); | |
| 411 if (parent_child_set_.find(pair) == parent_child_set_.end()) { | |
| 412 // Lock since the map may get relocated now, and other threads sometimes | |
| 413 // snapshot it (but they lock before copying it). | |
| 414 base::AutoLock lock(map_lock_); | |
| 415 parent_child_set_.insert(pair); | |
| 416 } | |
| 417 } | |
| 418 | |
| 419 return child; | 525 return child; |
| 420 } | 526 } |
| 421 | 527 |
| 422 void ThreadData::TallyADeath(const Births& birth, | 528 void ThreadData::TallyADeath(const Births& births, |
| 423 int32 queue_duration, | 529 int32 queue_duration, |
| 424 const TaskStopwatch& stopwatch) { | 530 const TaskStopwatch& stopwatch) { |
| 425 int32 run_duration = stopwatch.RunDurationMs(); | 531 int32 run_duration = stopwatch.RunDurationMs(); |
| 426 | 532 |
| 427 // Stir in some randomness, plus add constant in case durations are zero. | 533 // Stir in some randomness, plus add constant in case durations are zero. |
| 428 const uint32 kSomePrimeNumber = 2147483647; | 534 const uint32 kSomePrimeNumber = 2147483647; |
| 429 random_number_ += queue_duration + run_duration + kSomePrimeNumber; | 535 random_number_ += queue_duration + run_duration + kSomePrimeNumber; |
| 430 // An address is going to have some randomness to it as well ;-). | 536 // An address is going to have some randomness to it as well ;-). |
| 431 random_number_ ^= static_cast<uint32>(&birth - reinterpret_cast<Births*>(0)); | 537 random_number_ ^= static_cast<uint32>(&births - reinterpret_cast<Births*>(0)); |
| 432 | 538 |
| 433 // We don't have queue durations without OS timer. OS timer is automatically | 539 // We don't have queue durations without OS timer. OS timer is automatically |
| 434 // used for task-post-timing, so the use of an alternate timer implies all | 540 // used for task-post-timing, so the use of an alternate timer implies all |
| 435 // queue times are invalid, unless it was explicitly said that we can trust | 541 // queue times are invalid, unless it was explicitly said that we can trust |
| 436 // the alternate timer. | 542 // the alternate timer. |
| 437 if (kAllowAlternateTimeSourceHandling && | 543 if (kAllowAlternateTimeSourceHandling && |
| 438 now_function_ && | 544 now_function_ && |
| 439 !now_function_is_time_) { | 545 !now_function_is_time_) { |
| 440 queue_duration = 0; | 546 queue_duration = 0; |
| 441 } | 547 } |
| 442 | 548 |
| 443 DeathMap::iterator it = death_map_.find(&birth); | 549 DeathMap::iterator it = death_map_.find(&births); |
| 444 DeathData* death_data; | 550 DeathData* death_data; |
| 445 if (it != death_map_.end()) { | 551 if (it != death_map_.end()) { |
| 446 death_data = &it->second; | 552 death_data = &it->second; |
| 447 } else { | 553 } else { |
| 448 base::AutoLock lock(map_lock_); // Lock as the map may get relocated now. | 554 base::AutoLock lock(map_lock_); // Lock as the map may get relocated now. |
| 449 death_data = &death_map_[&birth]; | 555 death_data = &death_map_[&births]; |
| 450 } // Release lock ASAP. | 556 } // Release lock ASAP. |
| 451 death_data->RecordDeath(queue_duration, run_duration, random_number_); | 557 death_data->RecordDeath(queue_duration, run_duration, random_number_); |
| 452 | |
| 453 if (!kTrackParentChildLinks) | |
| 454 return; | |
| 455 if (!parent_stack_.empty()) { // We might get turned off. | |
| 456 DCHECK_EQ(parent_stack_.top(), &birth); | |
| 457 parent_stack_.pop(); | |
| 458 } | |
| 459 } | 558 } |
| 460 | 559 |
| 461 // static | 560 // static |
| 462 Births* ThreadData::TallyABirthIfActive(const Location& location) { | 561 Births* ThreadData::TallyABirthIfActive(const Location& location) { |
| 463 if (!TrackingStatus()) | 562 if (!TrackingStatus()) |
| 464 return NULL; | 563 return NULL; |
| 465 ThreadData* current_thread_data = Get(); | 564 ThreadData* current_thread_data = Get(); |
| 466 if (!current_thread_data) | 565 if (!current_thread_data) |
| 467 return NULL; | 566 return NULL; |
| 468 return current_thread_data->TallyABirth(location); | 567 return current_thread_data->TallyABirth(location); |
| 469 } | 568 } |
| 470 | 569 |
| 471 // static | 570 // static |
| 472 void ThreadData::TallyRunOnNamedThreadIfTracking( | 571 void ThreadData::TallyRunOnNamedThreadIfTracking( |
| 473 const base::TrackingInfo& completed_task, | 572 const base::TrackingInfo& completed_task, |
| 474 const TaskStopwatch& stopwatch) { | 573 const TaskStopwatch& stopwatch) { |
| 475 // Even if we have been DEACTIVATED, we will process any pending births so | 574 // Even if we have been DEACTIVATED, we will process any pending births so |
| 476 // that our data structures (which counted the outstanding births) remain | 575 // that our data structures (which counted the outstanding births) remain |
| 477 // consistent. | 576 // consistent. |
| 478 const Births* birth = completed_task.birth_tally; | 577 const Births* births = completed_task.birth_tally; |
| 479 if (!birth) | 578 if (!births) |
| 480 return; | 579 return; |
| 481 ThreadData* current_thread_data = stopwatch.GetThreadData(); | 580 ThreadData* current_thread_data = stopwatch.GetThreadData(); |
| 482 if (!current_thread_data) | 581 if (!current_thread_data) |
| 483 return; | 582 return; |
| 484 | 583 |
| 485 // Watch out for a race where status_ is changing, and hence one or both | 584 // Watch out for a race where status_ is changing, and hence one or both |
| 486 // of start_of_run or end_of_run is zero. In that case, we didn't bother to | 585 // of start_of_run or end_of_run is zero. In that case, we didn't bother to |
| 487 // get a time value since we "weren't tracking" and we were trying to be | 586 // get a time value since we "weren't tracking" and we were trying to be |
| 488 // efficient by not calling for a genuine time value. For simplicity, we'll | 587 // efficient by not calling for a genuine time value. For simplicity, we'll |
| 489 // use a default zero duration when we can't calculate a true value. | 588 // use a default zero duration when we can't calculate a true value. |
| 490 TrackedTime start_of_run = stopwatch.StartTime(); | 589 TrackedTime start_of_run = stopwatch.StartTime(); |
| 491 int32 queue_duration = 0; | 590 int32 queue_duration = 0; |
| 492 if (!start_of_run.is_null()) { | 591 if (!start_of_run.is_null()) { |
| 493 queue_duration = (start_of_run - completed_task.EffectiveTimePosted()) | 592 queue_duration = (start_of_run - completed_task.EffectiveTimePosted()) |
| 494 .InMilliseconds(); | 593 .InMilliseconds(); |
| 495 } | 594 } |
| 496 current_thread_data->TallyADeath(*birth, queue_duration, stopwatch); | 595 current_thread_data->TallyADeath(*births, queue_duration, stopwatch); |
| 497 } | 596 } |
| 498 | 597 |
| 499 // static | 598 // static |
| 500 void ThreadData::TallyRunOnWorkerThreadIfTracking( | 599 void ThreadData::TallyRunOnWorkerThreadIfTracking( |
| 501 const Births* birth, | 600 const Births* births, |
| 502 const TrackedTime& time_posted, | 601 const TrackedTime& time_posted, |
| 503 const TaskStopwatch& stopwatch) { | 602 const TaskStopwatch& stopwatch) { |
| 504 // Even if we have been DEACTIVATED, we will process any pending births so | 603 // Even if we have been DEACTIVATED, we will process any pending births so |
| 505 // that our data structures (which counted the outstanding births) remain | 604 // that our data structures (which counted the outstanding births) remain |
| 506 // consistent. | 605 // consistent. |
| 507 if (!birth) | 606 if (!births) |
| 508 return; | 607 return; |
| 509 | 608 |
| 510 // TODO(jar): Support the option to coalesce all worker-thread activity under | 609 // TODO(jar): Support the option to coalesce all worker-thread activity under |
| 511 // one ThreadData instance that uses locks to protect *all* access. This will | 610 // one ThreadData instance that uses locks to protect *all* access. This will |
| 512 // reduce memory (making it provably bounded), but run incrementally slower | 611 // reduce memory (making it provably bounded), but run incrementally slower |
| 513 // (since we'll use locks on TallyABirth and TallyADeath). The good news is | 612 // (since we'll use locks on TallyABirth and TallyADeath). The good news is |
| 514 // that the locks on TallyADeath will be *after* the worker thread has run, | 613 // that the locks on TallyADeath will be *after* the worker thread has run, |
| 515 // and hence nothing will be waiting for the completion (... besides some | 614 // and hence nothing will be waiting for the completion (... besides some |
| 516 // other thread that might like to run). Also, the worker threads tasks are | 615 // other thread that might like to run). Also, the worker threads tasks are |
| 517 // generally longer, and hence the cost of the lock may perchance be amortized | 616 // generally longer, and hence the cost of the lock may perchance be amortized |
| 518 // over the long task's lifetime. | 617 // over the long task's lifetime. |
| 519 ThreadData* current_thread_data = stopwatch.GetThreadData(); | 618 ThreadData* current_thread_data = stopwatch.GetThreadData(); |
| 520 if (!current_thread_data) | 619 if (!current_thread_data) |
| 521 return; | 620 return; |
| 522 | 621 |
| 523 TrackedTime start_of_run = stopwatch.StartTime(); | 622 TrackedTime start_of_run = stopwatch.StartTime(); |
| 524 int32 queue_duration = 0; | 623 int32 queue_duration = 0; |
| 525 if (!start_of_run.is_null()) { | 624 if (!start_of_run.is_null()) { |
| 526 queue_duration = (start_of_run - time_posted).InMilliseconds(); | 625 queue_duration = (start_of_run - time_posted).InMilliseconds(); |
| 527 } | 626 } |
| 528 current_thread_data->TallyADeath(*birth, queue_duration, stopwatch); | 627 current_thread_data->TallyADeath(*births, queue_duration, stopwatch); |
| 529 } | 628 } |
| 530 | 629 |
| 531 // static | 630 // static |
| 532 void ThreadData::TallyRunInAScopedRegionIfTracking( | 631 void ThreadData::TallyRunInAScopedRegionIfTracking( |
| 533 const Births* birth, | 632 const Births* births, |
| 534 const TaskStopwatch& stopwatch) { | 633 const TaskStopwatch& stopwatch) { |
| 535 // Even if we have been DEACTIVATED, we will process any pending births so | 634 // Even if we have been DEACTIVATED, we will process any pending births so |
| 536 // that our data structures (which counted the outstanding births) remain | 635 // that our data structures (which counted the outstanding births) remain |
| 537 // consistent. | 636 // consistent. |
| 538 if (!birth) | 637 if (!births) |
| 539 return; | 638 return; |
| 540 | 639 |
| 541 ThreadData* current_thread_data = stopwatch.GetThreadData(); | 640 ThreadData* current_thread_data = stopwatch.GetThreadData(); |
| 542 if (!current_thread_data) | 641 if (!current_thread_data) |
| 543 return; | 642 return; |
| 544 | 643 |
| 545 int32 queue_duration = 0; | 644 int32 queue_duration = 0; |
| 546 current_thread_data->TallyADeath(*birth, queue_duration, stopwatch); | 645 current_thread_data->TallyADeath(*births, queue_duration, stopwatch); |
| 547 } | |
| 548 | |
| 549 // static | |
| 550 void ThreadData::SnapshotAllExecutedTasks( | |
| 551 ProcessDataPhaseSnapshot* process_data_phase, | |
| 552 BirthCountMap* birth_counts) { | |
| 553 // Get an unchanging copy of a ThreadData list. | |
| 554 ThreadData* my_list = ThreadData::first(); | |
| 555 | |
| 556 // Gather data serially. | |
| 557 // This hackish approach *can* get some slighly corrupt tallies, as we are | |
| 558 // grabbing values without the protection of a lock, but it has the advantage | |
| 559 // of working even with threads that don't have message loops. If a user | |
| 560 // sees any strangeness, they can always just run their stats gathering a | |
| 561 // second time. | |
| 562 for (ThreadData* thread_data = my_list; | |
| 563 thread_data; | |
| 564 thread_data = thread_data->next()) { | |
| 565 thread_data->SnapshotExecutedTasks(process_data_phase, birth_counts); | |
| 566 } | |
| 567 } | |
| 568 | |
| 569 // static | |
| 570 void ThreadData::SnapshotCurrentPhase( | |
| 571 ProcessDataPhaseSnapshot* process_data_phase) { | |
| 572 // Add births that have run to completion to |collected_data|. | |
| 573 // |birth_counts| tracks the total number of births recorded at each location | |
| 574 // for which we have not seen a death count. | |
| 575 BirthCountMap birth_counts; | |
| 576 ThreadData::SnapshotAllExecutedTasks(process_data_phase, &birth_counts); | |
| 577 | |
| 578 // Add births that are still active -- i.e. objects that have tallied a birth, | |
| 579 // but have not yet tallied a matching death, and hence must be either | |
| 580 // running, queued up, or being held in limbo for future posting. | |
| 581 for (const auto& birth_count : birth_counts) { | |
| 582 if (birth_count.second > 0) { | |
| 583 process_data_phase->tasks.push_back(TaskSnapshot( | |
| 584 *birth_count.first, DeathData(birth_count.second), "Still_Alive")); | |
| 585 } | |
| 586 } | |
| 587 } | 646 } |
| 588 | 647 |
| 589 void ThreadData::SnapshotExecutedTasks( | 648 void ThreadData::SnapshotExecutedTasks( |
| 590 ProcessDataPhaseSnapshot* process_data_phase, | 649 int current_profiling_phase, |
| 650 PhasedProcessDataSnapshotMap* phased_snapshots, |
| 591 BirthCountMap* birth_counts) { | 651 BirthCountMap* birth_counts) { |
| 592 // Get copy of data, so that the data will not change during the iterations | 652 // Get copy of data, so that the data will not change during the iterations |
| 593 // and processing. | 653 // and processing. |
| 594 ThreadData::BirthMap birth_map; | 654 BirthMap birth_map; |
| 595 ThreadData::DeathMap death_map; | 655 DeathsSnapshot deaths; |
| 596 ThreadData::ParentChildSet parent_child_set; | 656 SnapshotMaps(current_profiling_phase, &birth_map, &deaths); |
| 597 SnapshotMaps(&birth_map, &death_map, &parent_child_set); | |
| 598 | |
| 599 for (const auto& death : death_map) { | |
| 600 process_data_phase->tasks.push_back( | |
| 601 TaskSnapshot(*death.first, death.second, thread_name())); | |
| 602 (*birth_counts)[death.first] -= death.first->birth_count(); | |
| 603 } | |
| 604 | 657 |
| 605 for (const auto& birth : birth_map) { | 658 for (const auto& birth : birth_map) { |
| 606 (*birth_counts)[birth.second] += birth.second->birth_count(); | 659 (*birth_counts)[birth.second] += birth.second->birth_count(); |
| 607 } | 660 } |
| 608 | 661 |
| 609 if (!kTrackParentChildLinks) | 662 for (const auto& death : deaths) { |
| 610 return; | 663 (*birth_counts)[death.first] -= death.first->birth_count(); |
| 611 | 664 |
| 612 for (const auto& parent_child : parent_child_set) { | 665 // For the current death data, walk through all its snapshots, starting from |
| 613 process_data_phase->descendants.push_back( | 666 // the current one, then from the previous profiling phase etc., and for |
| 614 ParentChildPairSnapshot(parent_child)); | 667 // each snapshot calculate the delta between the snapshot and the previous |
| 668 // phase, if any. Store the deltas in the result. |
| 669 for (const DeathDataPhaseSnapshot* phase = &death.second; phase; |
| 670 phase = phase->prev) { |
| 671 const DeathDataSnapshot& death_data = |
| 672 phase->prev ? phase->death_data.Delta(phase->prev->death_data) |
| 673 : phase->death_data; |
| 674 |
| 675 if (death_data.count > 0) { |
| 676 (*phased_snapshots)[phase->profiling_phase].tasks.push_back( |
| 677 TaskSnapshot(BirthOnThreadSnapshot(*death.first), death_data, |
| 678 thread_name())); |
| 679 } |
| 680 } |
| 615 } | 681 } |
| 616 } | 682 } |
| 617 | 683 |
| 618 // This may be called from another thread. | 684 // This may be called from another thread. |
| 619 void ThreadData::SnapshotMaps(BirthMap* birth_map, | 685 void ThreadData::SnapshotMaps(int profiling_phase, |
| 620 DeathMap* death_map, | 686 BirthMap* birth_map, |
| 621 ParentChildSet* parent_child_set) { | 687 DeathsSnapshot* deaths) { |
| 622 base::AutoLock lock(map_lock_); | 688 base::AutoLock lock(map_lock_); |
| 689 |
| 623 for (const auto& birth : birth_map_) | 690 for (const auto& birth : birth_map_) |
| 624 (*birth_map)[birth.first] = birth.second; | 691 (*birth_map)[birth.first] = birth.second; |
| 625 for (const auto& death : death_map_) | |
| 626 (*death_map)[death.first] = death.second; | |
| 627 | 692 |
| 628 if (!kTrackParentChildLinks) | 693 for (const auto& death : death_map_) { |
| 629 return; | 694 deaths->push_back(std::make_pair( |
| 695 death.first, |
| 696 DeathDataPhaseSnapshot(profiling_phase, death.second.count(), |
| 697 death.second.run_duration_sum(), |
| 698 death.second.run_duration_max(), |
| 699 death.second.run_duration_sample(), |
| 700 death.second.queue_duration_sum(), |
| 701 death.second.queue_duration_max(), |
| 702 death.second.queue_duration_sample(), |
| 703 death.second.last_phase_snapshot()))); |
| 704 } |
| 705 } |
| 630 | 706 |
| 631 for (const auto& parent_child : parent_child_set_) | 707 void ThreadData::OnProfilingPhaseCompletedOnThread(int profiling_phase) { |
| 632 parent_child_set->insert(parent_child); | 708 base::AutoLock lock(map_lock_); |
| 709 |
| 710 for (auto& death : death_map_) { |
| 711 death.second.OnProfilingPhaseCompleted(profiling_phase); |
| 712 } |
| 633 } | 713 } |
| 634 | 714 |
| 635 static void OptionallyInitializeAlternateTimer() { | 715 static void OptionallyInitializeAlternateTimer() { |
| 636 NowFunction* alternate_time_source = GetAlternateTimeSource(); | 716 NowFunction* alternate_time_source = GetAlternateTimeSource(); |
| 637 if (alternate_time_source) | 717 if (alternate_time_source) |
| 638 ThreadData::SetAlternateTimeSource(alternate_time_source); | 718 ThreadData::SetAlternateTimeSource(alternate_time_source); |
| 639 } | 719 } |
| 640 | 720 |
| 641 bool ThreadData::Initialize() { | 721 bool ThreadData::Initialize() { |
| 642 if (status_ >= DEACTIVATED) | 722 if (status_ >= DEACTIVATED) |
| (...skipping 24 matching lines...) Expand all Loading... |
| 667 return false; | 747 return false; |
| 668 } else { | 748 } else { |
| 669 // TLS was initialzed for us earlier. | 749 // TLS was initialzed for us earlier. |
| 670 DCHECK_EQ(status_, DORMANT_DURING_TESTS); | 750 DCHECK_EQ(status_, DORMANT_DURING_TESTS); |
| 671 } | 751 } |
| 672 | 752 |
| 673 // Incarnation counter is only significant to testing, as it otherwise will | 753 // Incarnation counter is only significant to testing, as it otherwise will |
| 674 // never again change in this process. | 754 // never again change in this process. |
| 675 ++incarnation_counter_; | 755 ++incarnation_counter_; |
| 676 | 756 |
| 677 // The lock is not critical for setting status_, but it doesn't hurt. It also | 757 // The lock is not critical for setting status_, but it doesn't hurt. It also |
| 678 // ensures that if we have a racy initialization, that we'll bail as soon as | 758 // ensures that if we have a racy initialization, that we'll bail as soon as |
| 679 // we get the lock earlier in this method. | 759 // we get the lock earlier in this method. |
| 680 status_ = kInitialStartupState; | 760 status_ = kInitialStartupState; |
| 681 if (!kTrackParentChildLinks && | |
| 682 kInitialStartupState == PROFILING_CHILDREN_ACTIVE) | |
| 683 status_ = PROFILING_ACTIVE; | |
| 684 DCHECK(status_ != UNINITIALIZED); | 761 DCHECK(status_ != UNINITIALIZED); |
| 685 return true; | 762 return true; |
| 686 } | 763 } |
| 687 | 764 |
| 688 // static | 765 // static |
| 689 bool ThreadData::InitializeAndSetTrackingStatus(Status status) { | 766 bool ThreadData::InitializeAndSetTrackingStatus(Status status) { |
| 690 DCHECK_GE(status, DEACTIVATED); | 767 DCHECK_GE(status, DEACTIVATED); |
| 691 DCHECK_LE(status, PROFILING_CHILDREN_ACTIVE); | 768 DCHECK_LE(status, PROFILING_ACTIVE); |
| 692 | 769 |
| 693 if (!Initialize()) // No-op if already initialized. | 770 if (!Initialize()) // No-op if already initialized. |
| 694 return false; // Not compiled in. | 771 return false; // Not compiled in. |
| 695 | 772 |
| 696 if (!kTrackParentChildLinks && status > DEACTIVATED) | 773 if (status > DEACTIVATED) |
| 697 status = PROFILING_ACTIVE; | 774 status = PROFILING_ACTIVE; |
| 698 status_ = status; | 775 status_ = status; |
| 699 return true; | 776 return true; |
| 700 } | 777 } |
| 701 | 778 |
| 702 // static | 779 // static |
| 703 ThreadData::Status ThreadData::status() { | 780 ThreadData::Status ThreadData::status() { |
| 704 return status_; | 781 return status_; |
| 705 } | 782 } |
| 706 | 783 |
| 707 // static | 784 // static |
| 708 bool ThreadData::TrackingStatus() { | 785 bool ThreadData::TrackingStatus() { |
| 709 return status_ > DEACTIVATED; | 786 return status_ > DEACTIVATED; |
| 710 } | 787 } |
| 711 | 788 |
| 712 // static | 789 // static |
| 713 bool ThreadData::TrackingParentChildStatus() { | |
| 714 return status_ >= PROFILING_CHILDREN_ACTIVE; | |
| 715 } | |
| 716 | |
| 717 // static | |
| 718 void ThreadData::PrepareForStartOfRun(const Births* parent) { | |
| 719 if (kTrackParentChildLinks && parent && status_ > PROFILING_ACTIVE) { | |
| 720 ThreadData* current_thread_data = Get(); | |
| 721 if (current_thread_data) | |
| 722 current_thread_data->parent_stack_.push(parent); | |
| 723 } | |
| 724 } | |
| 725 | |
| 726 // static | |
| 727 void ThreadData::SetAlternateTimeSource(NowFunction* now_function) { | 790 void ThreadData::SetAlternateTimeSource(NowFunction* now_function) { |
| 728 DCHECK(now_function); | 791 DCHECK(now_function); |
| 729 if (kAllowAlternateTimeSourceHandling) | 792 if (kAllowAlternateTimeSourceHandling) |
| 730 now_function_ = now_function; | 793 now_function_ = now_function; |
| 731 } | 794 } |
| 732 | 795 |
| 733 // static | 796 // static |
| 734 void ThreadData::EnableProfilerTiming() { | 797 void ThreadData::EnableProfilerTiming() { |
| 735 base::subtle::NoBarrier_Store(&g_profiler_timing_enabled, ENABLED_TIMING); | 798 base::subtle::NoBarrier_Store(&g_profiler_timing_enabled, ENABLED_TIMING); |
| 736 } | 799 } |
| (...skipping 165 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 902 | 965 |
| 903 ThreadData* TaskStopwatch::GetThreadData() const { | 966 ThreadData* TaskStopwatch::GetThreadData() const { |
| 904 #if DCHECK_IS_ON() | 967 #if DCHECK_IS_ON() |
| 905 DCHECK(state_ != CREATED); | 968 DCHECK(state_ != CREATED); |
| 906 #endif | 969 #endif |
| 907 | 970 |
| 908 return current_thread_data_; | 971 return current_thread_data_; |
| 909 } | 972 } |
| 910 | 973 |
| 911 //------------------------------------------------------------------------------ | 974 //------------------------------------------------------------------------------ |
| 975 // DeathDataPhaseSnapshot |
| 976 |
| 977 DeathDataPhaseSnapshot::DeathDataPhaseSnapshot( |
| 978 int profiling_phase, |
| 979 int count, |
| 980 int32 run_duration_sum, |
| 981 int32 run_duration_max, |
| 982 int32 run_duration_sample, |
| 983 int32 queue_duration_sum, |
| 984 int32 queue_duration_max, |
| 985 int32 queue_duration_sample, |
| 986 const DeathDataPhaseSnapshot* prev) |
| 987 : profiling_phase(profiling_phase), |
| 988 death_data(count, |
| 989 run_duration_sum, |
| 990 run_duration_max, |
| 991 run_duration_sample, |
| 992 queue_duration_sum, |
| 993 queue_duration_max, |
| 994 queue_duration_sample), |
| 995 prev(prev) { |
| 996 } |
| 997 |
| 998 //------------------------------------------------------------------------------ |
| 999 // TaskSnapshot |
| 1000 |
| 912 TaskSnapshot::TaskSnapshot() { | 1001 TaskSnapshot::TaskSnapshot() { |
| 913 } | 1002 } |
| 914 | 1003 |
| 915 TaskSnapshot::TaskSnapshot(const BirthOnThread& birth, | 1004 TaskSnapshot::TaskSnapshot(const BirthOnThreadSnapshot& birth, |
| 916 const DeathData& death_data, | 1005 const DeathDataSnapshot& death_data, |
| 917 const std::string& death_thread_name) | 1006 const std::string& death_thread_name) |
| 918 : birth(birth), | 1007 : birth(birth), |
| 919 death_data(death_data), | 1008 death_data(death_data), |
| 920 death_thread_name(death_thread_name) { | 1009 death_thread_name(death_thread_name) { |
| 921 } | 1010 } |
| 922 | 1011 |
| 923 TaskSnapshot::~TaskSnapshot() { | 1012 TaskSnapshot::~TaskSnapshot() { |
| 924 } | 1013 } |
| 925 | 1014 |
| 926 //------------------------------------------------------------------------------ | 1015 //------------------------------------------------------------------------------ |
| 927 // ParentChildPairSnapshot | |
| 928 | |
| 929 ParentChildPairSnapshot::ParentChildPairSnapshot() { | |
| 930 } | |
| 931 | |
| 932 ParentChildPairSnapshot::ParentChildPairSnapshot( | |
| 933 const ThreadData::ParentChildPair& parent_child) | |
| 934 : parent(*parent_child.first), | |
| 935 child(*parent_child.second) { | |
| 936 } | |
| 937 | |
| 938 ParentChildPairSnapshot::~ParentChildPairSnapshot() { | |
| 939 } | |
| 940 | |
| 941 //------------------------------------------------------------------------------ | |
| 942 // ProcessDataPhaseSnapshot | 1016 // ProcessDataPhaseSnapshot |
| 943 | 1017 |
| 944 ProcessDataPhaseSnapshot::ProcessDataPhaseSnapshot() { | 1018 ProcessDataPhaseSnapshot::ProcessDataPhaseSnapshot() { |
| 945 } | 1019 } |
| 946 | 1020 |
| 947 ProcessDataPhaseSnapshot::~ProcessDataPhaseSnapshot() { | 1021 ProcessDataPhaseSnapshot::~ProcessDataPhaseSnapshot() { |
| 948 } | 1022 } |
| 949 | 1023 |
| 950 //------------------------------------------------------------------------------ | 1024 //------------------------------------------------------------------------------ |
| 951 // ProcessDataPhaseSnapshot | 1025 // ProcessDataPhaseSnapshot |
| 952 | 1026 |
| 953 ProcessDataSnapshot::ProcessDataSnapshot() | 1027 ProcessDataSnapshot::ProcessDataSnapshot() |
| 954 #if !defined(OS_NACL) | 1028 #if !defined(OS_NACL) |
| 955 : process_id(base::GetCurrentProcId()) { | 1029 : process_id(base::GetCurrentProcId()) { |
| 956 #else | 1030 #else |
| 957 : process_id(base::kNullProcessId) { | 1031 : process_id(base::kNullProcessId) { |
| 958 #endif | 1032 #endif |
| 959 } | 1033 } |
| 960 | 1034 |
| 961 ProcessDataSnapshot::~ProcessDataSnapshot() { | 1035 ProcessDataSnapshot::~ProcessDataSnapshot() { |
| 962 } | 1036 } |
| 963 | 1037 |
| 964 } // namespace tracked_objects | 1038 } // namespace tracked_objects |
| OLD | NEW |