OLD | NEW |
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/tracked_objects.h" | 5 #include "base/tracked_objects.h" |
6 | 6 |
7 #include <math.h> | 7 #include <math.h> |
8 | 8 |
9 #include "base/format_macros.h" | 9 #include "base/format_macros.h" |
10 #include "base/message_loop.h" | 10 #include "base/message_loop.h" |
(...skipping 15 matching lines...) Expand all Loading... |
26 // that we only record after parsing the command line flag --enable-tracking. | 26 // that we only record after parsing the command line flag --enable-tracking. |
27 // Note that the flag may force either state, so this really controls only the | 27 // Note that the flag may force either state, so this really controls only the |
28 // period of time up until that flag is parsed. If there is no flag seen, then | 28 // period of time up until that flag is parsed. If there is no flag seen, then |
29 // this state may prevail for much or all of the process lifetime. | 29 // this state may prevail for much or all of the process lifetime. |
30 static const ThreadData::Status kInitialStartupState = ThreadData::ACTIVE; | 30 static const ThreadData::Status kInitialStartupState = ThreadData::ACTIVE; |
31 } // anonymous namespace. | 31 } // anonymous namespace. |
32 | 32 |
33 //------------------------------------------------------------------------------ | 33 //------------------------------------------------------------------------------ |
34 // DeathData tallies durations when a death takes place. | 34 // DeathData tallies durations when a death takes place. |
35 | 35 |
36 void DeathData::RecordDeath(DurationInt queue_duration, | 36 DeathData::DeathData() { |
37 DurationInt run_duration) { | 37 Clear(); |
38 ++count_; | |
39 queue_time_.AddDuration(queue_duration); | |
40 run_time_.AddDuration(run_duration); | |
41 } | 38 } |
42 | 39 |
43 DurationInt DeathData::AverageMsRunDuration() const { | 40 DeathData::DeathData(int count) { |
44 return run_time_.AverageMsDuration(count_); | 41 Clear(); |
| 42 count_ = count; |
45 } | 43 } |
46 | 44 |
47 DurationInt DeathData::AverageMsQueueDuration() const { | 45 // TODO(jar): I need to see if this macro to optimize branching is worth it. |
48 return queue_time_.AverageMsDuration(count_); | 46 // |
| 47 // This macro has no branching, so it is surely fast, and is equivalent to: |
| 48 // if (assign_it) |
| 49 // target = source; |
| 50 // We use a macro rather than a template to force this to inline. |
| 51 // Related code for calculating max is discussed on the web. |
| 52 #define CONDITIONAL_ASSIGN(assign_it, target, source) \ |
| 53 ((target) ^= ((target) ^ (source)) & -static_cast<DurationInt>(assign_it)) |
| 54 |
| 55 void DeathData::RecordDeath(const DurationInt queue_duration, |
| 56 const DurationInt run_duration, |
| 57 int32 random_number) { |
| 58 queue_duration_sum_ += queue_duration; |
| 59 run_duration_sum_ += run_duration; |
| 60 ++count_; |
| 61 |
| 62 // Take a uniformly distributed sample over all durations ever supplied. |
| 63 // The probability that we (instead) use this new sample is 1/count_. This |
| 64 // results in a completely uniform selection of the sample. |
| 65 // We ignore the fact that we correlated our selection of a sample of run |
| 66 // and queue times. |
| 67 bool take_sample = 0 == (random_number % count_); |
| 68 CONDITIONAL_ASSIGN(take_sample, queue_duration_sample_, queue_duration); |
| 69 CONDITIONAL_ASSIGN(take_sample, run_duration_sample_, run_duration); |
| 70 |
| 71 CONDITIONAL_ASSIGN(queue_duration_max_ < queue_duration, queue_duration_max_, |
| 72 queue_duration); |
| 73 CONDITIONAL_ASSIGN(run_duration_max_ < run_duration, run_duration_max_, |
| 74 run_duration); |
| 75 // Ensure we got the macros right. |
| 76 DCHECK_GE(queue_duration_max_, queue_duration); |
| 77 DCHECK_GE(run_duration_max_, run_duration); |
| 78 DCHECK(!take_sample || run_duration_sample_ == run_duration); |
| 79 DCHECK(!take_sample || queue_duration_sample_ == queue_duration); |
49 } | 80 } |
50 | 81 |
51 void DeathData::AddDeathData(const DeathData& other) { | 82 int DeathData::count() const { return count_; } |
52 count_ += other.count_; | 83 |
53 queue_time_.AddData(other.queue_time_); | 84 DurationInt DeathData::run_duration_sum() const { return run_duration_sum_; } |
54 run_time_.AddData(other.run_time_); | 85 |
| 86 DurationInt DeathData::run_duration_max() const { return run_duration_max_; } |
| 87 |
| 88 DurationInt DeathData::run_duration_sample() const { |
| 89 return run_duration_sample_; |
55 } | 90 } |
56 | 91 |
| 92 DurationInt DeathData::queue_duration_sum() const { |
| 93 return queue_duration_sum_; |
| 94 } |
| 95 |
| 96 DurationInt DeathData::queue_duration_max() const { |
| 97 return queue_duration_max_; |
| 98 } |
| 99 |
| 100 DurationInt DeathData::queue_duration_sample() const { |
| 101 return queue_duration_sample_; |
| 102 } |
| 103 |
| 104 |
57 base::DictionaryValue* DeathData::ToValue() const { | 105 base::DictionaryValue* DeathData::ToValue() const { |
58 base::DictionaryValue* dictionary = new base::DictionaryValue; | 106 base::DictionaryValue* dictionary = new base::DictionaryValue; |
59 dictionary->Set("count", base::Value::CreateIntegerValue(count_)); | 107 dictionary->Set("count", base::Value::CreateIntegerValue(count_)); |
60 dictionary->Set("run_ms", | 108 dictionary->Set("run_ms", |
61 base::Value::CreateIntegerValue(run_time_.duration())); | 109 base::Value::CreateIntegerValue(run_duration_sum())); |
| 110 dictionary->Set("run_ms_max", |
| 111 base::Value::CreateIntegerValue(run_duration_max())); |
| 112 dictionary->Set("run_ms_sample", |
| 113 base::Value::CreateIntegerValue(run_duration_sample())); |
62 dictionary->Set("queue_ms", | 114 dictionary->Set("queue_ms", |
63 base::Value::CreateIntegerValue(queue_time_.duration())); | 115 base::Value::CreateIntegerValue(queue_duration_sum())); |
64 dictionary->Set("run_ms_max", | |
65 base::Value::CreateIntegerValue(run_time_.max())); | |
66 dictionary->Set("queue_ms_max", | 116 dictionary->Set("queue_ms_max", |
67 base::Value::CreateIntegerValue(queue_time_.max())); | 117 base::Value::CreateIntegerValue(queue_duration_max())); |
| 118 dictionary->Set("queue_ms_sample", |
| 119 base::Value::CreateIntegerValue(queue_duration_sample())); |
68 return dictionary; | 120 return dictionary; |
69 } | 121 } |
70 | 122 |
| 123 void DeathData::ResetMax() { |
| 124 run_duration_max_ = 0; |
| 125 queue_duration_max_ = 0; |
| 126 } |
| 127 |
71 void DeathData::Clear() { | 128 void DeathData::Clear() { |
72 count_ = 0; | 129 count_ = 0; |
73 run_time_.Clear(); | 130 run_duration_sum_ = 0; |
74 queue_time_.Clear(); | 131 run_duration_max_ = 0; |
| 132 run_duration_sample_ = 0; |
| 133 queue_duration_sum_ = 0; |
| 134 queue_duration_max_ = 0; |
| 135 queue_duration_sample_ = 0; |
75 } | 136 } |
76 | 137 |
77 //------------------------------------------------------------------------------ | 138 //------------------------------------------------------------------------------ |
78 | |
79 void DeathData::Data::AddData(const Data& other) { | |
80 duration_ += other.duration_; | |
81 if (max_ > other.max_) | |
82 return; | |
83 max_ = other.max_; | |
84 } | |
85 | |
86 void DeathData::Data::AddDuration(DurationInt duration) { | |
87 duration_ += duration; | |
88 if (max_ > duration) | |
89 return; | |
90 max_ = duration; | |
91 } | |
92 | |
93 DurationInt DeathData::Data::AverageMsDuration(int count) const { | |
94 if (duration_ == 0 || !count) | |
95 return 0; | |
96 return (duration_ + count / 2) / count; | |
97 } | |
98 | |
99 void DeathData::Data::Clear() { | |
100 duration_ = 0; | |
101 max_ = 0; | |
102 } | |
103 //------------------------------------------------------------------------------ | |
104 BirthOnThread::BirthOnThread(const Location& location, | 139 BirthOnThread::BirthOnThread(const Location& location, |
105 const ThreadData& current) | 140 const ThreadData& current) |
106 : location_(location), | 141 : location_(location), |
107 birth_thread_(¤t) {} | 142 birth_thread_(¤t) { |
| 143 } |
| 144 |
| 145 const Location BirthOnThread::location() const { return location_; } |
| 146 const ThreadData* BirthOnThread::birth_thread() const { return birth_thread_; } |
108 | 147 |
109 //------------------------------------------------------------------------------ | 148 //------------------------------------------------------------------------------ |
110 Births::Births(const Location& location, const ThreadData& current) | 149 Births::Births(const Location& location, const ThreadData& current) |
111 : BirthOnThread(location, current), | 150 : BirthOnThread(location, current), |
112 birth_count_(1) { } | 151 birth_count_(1) { } |
113 | 152 |
| 153 int Births::birth_count() const { return birth_count_; } |
| 154 |
| 155 void Births::RecordBirth() { ++birth_count_; } |
| 156 |
| 157 void Births::ForgetBirth() { --birth_count_; } |
| 158 |
| 159 void Births::Clear() { birth_count_ = 0; } |
| 160 |
114 //------------------------------------------------------------------------------ | 161 //------------------------------------------------------------------------------ |
115 // ThreadData maintains the central data for all births and deaths. | 162 // ThreadData maintains the central data for all births and deaths on a single |
| 163 // thread. |
116 | 164 |
117 // TODO(jar): We should pull all these static vars together, into a struct, and | 165 // TODO(jar): We should pull all these static vars together, into a struct, and |
118 // optimize layout so that we benefit from locality of reference during accesses | 166 // optimize layout so that we benefit from locality of reference during accesses |
119 // to them. | 167 // to them. |
120 | 168 |
121 // A TLS slot which points to the ThreadData instance for the current thread. We | 169 // A TLS slot which points to the ThreadData instance for the current thread. We |
122 // do a fake initialization here (zeroing out data), and then the real in-place | 170 // do a fake initialization here (zeroing out data), and then the real in-place |
123 // construction happens when we call tls_index_.Initialize(). | 171 // construction happens when we call tls_index_.Initialize(). |
124 // static | 172 // static |
125 base::ThreadLocalStorage::Slot ThreadData::tls_index_(base::LINKER_INITIALIZED); | 173 base::ThreadLocalStorage::Slot ThreadData::tls_index_(base::LINKER_INITIALIZED); |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
163 next_retired_worker_(NULL), | 211 next_retired_worker_(NULL), |
164 worker_thread_number_(thread_number) { | 212 worker_thread_number_(thread_number) { |
165 CHECK_GT(thread_number, 0); | 213 CHECK_GT(thread_number, 0); |
166 base::StringAppendF(&thread_name_, "WorkerThread-%d", thread_number); | 214 base::StringAppendF(&thread_name_, "WorkerThread-%d", thread_number); |
167 PushToHeadOfList(); // Which sets real incarnation_count_for_pool_. | 215 PushToHeadOfList(); // Which sets real incarnation_count_for_pool_. |
168 } | 216 } |
169 | 217 |
170 ThreadData::~ThreadData() {} | 218 ThreadData::~ThreadData() {} |
171 | 219 |
172 void ThreadData::PushToHeadOfList() { | 220 void ThreadData::PushToHeadOfList() { |
| 221 // Toss in a hint of randomness (atop the uniniitalized value). |
| 222 random_number_ += static_cast<int32>(this - static_cast<ThreadData*>(0)); |
| 223 random_number_ ^= (Now() - TrackedTime()).InMilliseconds(); |
| 224 |
173 DCHECK(!next_); | 225 DCHECK(!next_); |
174 base::AutoLock lock(*list_lock_.Pointer()); | 226 base::AutoLock lock(*list_lock_.Pointer()); |
175 incarnation_count_for_pool_ = incarnation_counter_; | 227 incarnation_count_for_pool_ = incarnation_counter_; |
176 next_ = all_thread_data_list_head_; | 228 next_ = all_thread_data_list_head_; |
177 all_thread_data_list_head_ = this; | 229 all_thread_data_list_head_ = this; |
178 } | 230 } |
179 | 231 |
180 // static | 232 // static |
| 233 ThreadData* ThreadData::first() { |
| 234 base::AutoLock lock(*list_lock_.Pointer()); |
| 235 return all_thread_data_list_head_; |
| 236 } |
| 237 |
| 238 ThreadData* ThreadData::next() const { return next_; } |
| 239 |
| 240 // static |
181 void ThreadData::InitializeThreadContext(const std::string& suggested_name) { | 241 void ThreadData::InitializeThreadContext(const std::string& suggested_name) { |
182 if (!Initialize()) // Always initialize if needed. | 242 if (!Initialize()) // Always initialize if needed. |
183 return; | 243 return; |
184 ThreadData* current_thread_data = | 244 ThreadData* current_thread_data = |
185 reinterpret_cast<ThreadData*>(tls_index_.Get()); | 245 reinterpret_cast<ThreadData*>(tls_index_.Get()); |
186 if (current_thread_data) | 246 if (current_thread_data) |
187 return; // Browser tests instigate this. | 247 return; // Browser tests instigate this. |
188 current_thread_data = new ThreadData(suggested_name); | 248 current_thread_data = new ThreadData(suggested_name); |
189 tls_index_.Set(current_thread_data); | 249 tls_index_.Set(current_thread_data); |
190 } | 250 } |
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
245 return; | 305 return; |
246 } | 306 } |
247 // We must NOT do any allocations during this callback. | 307 // We must NOT do any allocations during this callback. |
248 // Using the simple linked lists avoids all allocations. | 308 // Using the simple linked lists avoids all allocations. |
249 DCHECK_EQ(this->next_retired_worker_, reinterpret_cast<ThreadData*>(NULL)); | 309 DCHECK_EQ(this->next_retired_worker_, reinterpret_cast<ThreadData*>(NULL)); |
250 this->next_retired_worker_ = first_retired_worker_; | 310 this->next_retired_worker_ = first_retired_worker_; |
251 first_retired_worker_ = this; | 311 first_retired_worker_ = this; |
252 } | 312 } |
253 | 313 |
254 // static | 314 // static |
255 base::DictionaryValue* ThreadData::ToValue() { | 315 base::DictionaryValue* ThreadData::ToValue(bool reset_max) { |
256 DataCollector collected_data; // Gather data. | 316 DataCollector collected_data; // Gather data. |
| 317 // Request multiple calls to collected_data.Append() for all threads. |
| 318 SendAllMaps(reset_max, &collected_data); |
257 collected_data.AddListOfLivingObjects(); // Add births that are still alive. | 319 collected_data.AddListOfLivingObjects(); // Add births that are still alive. |
258 base::ListValue* list = collected_data.ToValue(); | 320 base::ListValue* list = collected_data.ToValue(); |
259 base::DictionaryValue* dictionary = new base::DictionaryValue(); | 321 base::DictionaryValue* dictionary = new base::DictionaryValue(); |
260 dictionary->Set("list", list); | 322 dictionary->Set("list", list); |
261 return dictionary; | 323 return dictionary; |
262 } | 324 } |
263 | 325 |
264 Births* ThreadData::TallyABirth(const Location& location) { | 326 Births* ThreadData::TallyABirth(const Location& location) { |
265 BirthMap::iterator it = birth_map_.find(location); | 327 BirthMap::iterator it = birth_map_.find(location); |
266 if (it != birth_map_.end()) { | 328 if (it != birth_map_.end()) { |
267 it->second->RecordBirth(); | 329 it->second->RecordBirth(); |
268 return it->second; | 330 return it->second; |
269 } | 331 } |
270 | 332 |
271 Births* tracker = new Births(location, *this); | 333 Births* tracker = new Births(location, *this); |
272 // Lock since the map may get relocated now, and other threads sometimes | 334 // Lock since the map may get relocated now, and other threads sometimes |
273 // snapshot it (but they lock before copying it). | 335 // snapshot it (but they lock before copying it). |
274 base::AutoLock lock(map_lock_); | 336 base::AutoLock lock(map_lock_); |
275 birth_map_[location] = tracker; | 337 birth_map_[location] = tracker; |
276 return tracker; | 338 return tracker; |
277 } | 339 } |
278 | 340 |
279 void ThreadData::TallyADeath(const Births& birth, | 341 void ThreadData::TallyADeath(const Births& birth, |
280 DurationInt queue_duration, | 342 DurationInt queue_duration, |
281 DurationInt run_duration) { | 343 DurationInt run_duration) { |
| 344 // Stir in some randomness, plus add constant in case durations are zero. |
| 345 const DurationInt kSomePrimeNumber = 4294967279; |
| 346 random_number_ += queue_duration + run_duration + kSomePrimeNumber; |
| 347 // An address is going to have some randomness to it as well ;-). |
| 348 random_number_ ^= static_cast<int32>(&birth - reinterpret_cast<Births*>(0)); |
| 349 |
282 DeathMap::iterator it = death_map_.find(&birth); | 350 DeathMap::iterator it = death_map_.find(&birth); |
283 DeathData* death_data; | 351 DeathData* death_data; |
284 if (it != death_map_.end()) { | 352 if (it != death_map_.end()) { |
285 death_data = &it->second; | 353 death_data = &it->second; |
286 } else { | 354 } else { |
287 base::AutoLock lock(map_lock_); // Lock as the map may get relocated now. | 355 base::AutoLock lock(map_lock_); // Lock as the map may get relocated now. |
288 death_data = &death_map_[&birth]; | 356 death_data = &death_map_[&birth]; |
289 } // Release lock ASAP. | 357 } // Release lock ASAP. |
290 death_data->RecordDeath(queue_duration, run_duration); | 358 death_data->RecordDeath(queue_duration, run_duration, random_number_); |
291 } | 359 } |
292 | 360 |
293 // static | 361 // static |
294 Births* ThreadData::TallyABirthIfActive(const Location& location) { | 362 Births* ThreadData::TallyABirthIfActive(const Location& location) { |
295 if (!kTrackAllTaskObjects) | 363 if (!kTrackAllTaskObjects) |
296 return NULL; // Not compiled in. | 364 return NULL; // Not compiled in. |
297 | 365 |
298 if (!tracking_status()) | 366 if (!tracking_status()) |
299 return NULL; | 367 return NULL; |
300 ThreadData* current_thread_data = Get(); | 368 ThreadData* current_thread_data = Get(); |
(...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
402 if (!current_thread_data) | 470 if (!current_thread_data) |
403 return; | 471 return; |
404 | 472 |
405 DurationInt queue_duration = 0; | 473 DurationInt queue_duration = 0; |
406 DurationInt run_duration = 0; | 474 DurationInt run_duration = 0; |
407 if (!start_of_run.is_null() && !end_of_run.is_null()) | 475 if (!start_of_run.is_null() && !end_of_run.is_null()) |
408 run_duration = (end_of_run - start_of_run).InMilliseconds(); | 476 run_duration = (end_of_run - start_of_run).InMilliseconds(); |
409 current_thread_data->TallyADeath(*birth, queue_duration, run_duration); | 477 current_thread_data->TallyADeath(*birth, queue_duration, run_duration); |
410 } | 478 } |
411 | 479 |
412 // static | 480 const std::string ThreadData::thread_name() const { return thread_name_; } |
413 ThreadData* ThreadData::first() { | |
414 base::AutoLock lock(*list_lock_.Pointer()); | |
415 return all_thread_data_list_head_; | |
416 } | |
417 | 481 |
418 // This may be called from another thread. | 482 // This may be called from another thread. |
419 void ThreadData::SnapshotBirthMap(BirthMap *output) const { | 483 void ThreadData::SnapshotMaps(bool reset_max, |
| 484 BirthMap* birth_map, |
| 485 DeathMap* death_map) { |
420 base::AutoLock lock(map_lock_); | 486 base::AutoLock lock(map_lock_); |
421 for (BirthMap::const_iterator it = birth_map_.begin(); | 487 for (BirthMap::const_iterator it = birth_map_.begin(); |
422 it != birth_map_.end(); ++it) | 488 it != birth_map_.end(); ++it) |
423 (*output)[it->first] = it->second; | 489 (*birth_map)[it->first] = it->second; |
424 } | 490 for (DeathMap::iterator it = death_map_.begin(); |
425 | 491 it != death_map_.end(); ++it) { |
426 // This may be called from another thread. | 492 (*death_map)[it->first] = it->second; |
427 void ThreadData::SnapshotDeathMap(DeathMap *output) const { | 493 if (reset_max) |
428 base::AutoLock lock(map_lock_); | 494 it->second.ResetMax(); |
429 for (DeathMap::const_iterator it = death_map_.begin(); | 495 } |
430 it != death_map_.end(); ++it) | |
431 (*output)[it->first] = it->second; | |
432 } | 496 } |
433 | 497 |
434 // static | 498 // static |
| 499 void ThreadData::SendAllMaps(bool reset_max, class DataCollector* target) { |
| 500 if (!kTrackAllTaskObjects) |
| 501 return; // Not compiled in. |
| 502 // Get an unchanging copy of a ThreadData list. |
| 503 ThreadData* my_list = ThreadData::first(); |
| 504 |
| 505 // Gather data serially. |
| 506 // This hackish approach *can* get some slighly corrupt tallies, as we are |
| 507 // grabbing values without the protection of a lock, but it has the advantage |
| 508 // of working even with threads that don't have message loops. If a user |
| 509 // sees any strangeness, they can always just run their stats gathering a |
| 510 // second time. |
| 511 for (ThreadData* thread_data = my_list; |
| 512 thread_data; |
| 513 thread_data = thread_data->next()) { |
| 514 // Get copy of data. |
| 515 ThreadData::BirthMap birth_map; |
| 516 ThreadData::DeathMap death_map; |
| 517 thread_data->SnapshotMaps(reset_max, &birth_map, &death_map); |
| 518 target->Append(*thread_data, birth_map, death_map); |
| 519 } |
| 520 } |
| 521 |
| 522 // static |
435 void ThreadData::ResetAllThreadData() { | 523 void ThreadData::ResetAllThreadData() { |
436 ThreadData* my_list = first(); | 524 ThreadData* my_list = first(); |
437 | 525 |
438 for (ThreadData* thread_data = my_list; | 526 for (ThreadData* thread_data = my_list; |
439 thread_data; | 527 thread_data; |
440 thread_data = thread_data->next()) | 528 thread_data = thread_data->next()) |
441 thread_data->Reset(); | 529 thread_data->Reset(); |
442 } | 530 } |
443 | 531 |
444 void ThreadData::Reset() { | 532 void ThreadData::Reset() { |
(...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
536 // We must be single threaded... but be careful anyway. | 624 // We must be single threaded... but be careful anyway. |
537 if (!InitializeAndSetTrackingStatus(false)) | 625 if (!InitializeAndSetTrackingStatus(false)) |
538 return; | 626 return; |
539 ThreadData* thread_data_list; | 627 ThreadData* thread_data_list; |
540 { | 628 { |
541 base::AutoLock lock(*list_lock_.Pointer()); | 629 base::AutoLock lock(*list_lock_.Pointer()); |
542 thread_data_list = all_thread_data_list_head_; | 630 thread_data_list = all_thread_data_list_head_; |
543 all_thread_data_list_head_ = NULL; | 631 all_thread_data_list_head_ = NULL; |
544 ++incarnation_counter_; | 632 ++incarnation_counter_; |
545 // To be clean, break apart the retired worker list (though we leak them). | 633 // To be clean, break apart the retired worker list (though we leak them). |
546 while(first_retired_worker_) { | 634 while (first_retired_worker_) { |
547 ThreadData* worker = first_retired_worker_; | 635 ThreadData* worker = first_retired_worker_; |
548 CHECK_GT(worker->worker_thread_number_, 0); | 636 CHECK_GT(worker->worker_thread_number_, 0); |
549 first_retired_worker_ = worker->next_retired_worker_; | 637 first_retired_worker_ = worker->next_retired_worker_; |
550 worker->next_retired_worker_ = NULL; | 638 worker->next_retired_worker_ = NULL; |
551 } | 639 } |
552 } | 640 } |
553 | 641 |
554 // Put most global static back in pristine shape. | 642 // Put most global static back in pristine shape. |
555 worker_thread_data_creation_count_ = 0; | 643 worker_thread_data_creation_count_ = 0; |
556 cleanup_count_ = 0; | 644 cleanup_count_ = 0; |
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
610 base::Value::CreateStringValue(birth_->birth_thread()->thread_name())); | 698 base::Value::CreateStringValue(birth_->birth_thread()->thread_name())); |
611 dictionary->Set("death_thread", | 699 dictionary->Set("death_thread", |
612 base::Value::CreateStringValue(DeathThreadName())); | 700 base::Value::CreateStringValue(DeathThreadName())); |
613 dictionary->Set("location", birth_->location().ToValue()); | 701 dictionary->Set("location", birth_->location().ToValue()); |
614 return dictionary; | 702 return dictionary; |
615 } | 703 } |
616 | 704 |
617 //------------------------------------------------------------------------------ | 705 //------------------------------------------------------------------------------ |
618 // DataCollector | 706 // DataCollector |
619 | 707 |
620 DataCollector::DataCollector() { | 708 DataCollector::DataCollector() {} |
621 if (!kTrackAllTaskObjects) | |
622 return; // Not compiled in. | |
623 | |
624 // Get an unchanging copy of a ThreadData list. | |
625 ThreadData* my_list = ThreadData::first(); | |
626 | |
627 // Gather data serially. | |
628 // This hackish approach *can* get some slighly corrupt tallies, as we are | |
629 // grabbing values without the protection of a lock, but it has the advantage | |
630 // of working even with threads that don't have message loops. If a user | |
631 // sees any strangeness, they can always just run their stats gathering a | |
632 // second time. | |
633 for (ThreadData* thread_data = my_list; | |
634 thread_data; | |
635 thread_data = thread_data->next()) { | |
636 Append(*thread_data); | |
637 } | |
638 } | |
639 | 709 |
640 DataCollector::~DataCollector() { | 710 DataCollector::~DataCollector() { |
641 } | 711 } |
642 | 712 |
643 void DataCollector::Append(const ThreadData& thread_data) { | 713 void DataCollector::Append(const ThreadData &thread_data, |
644 // Get copy of data. | 714 const ThreadData::BirthMap &birth_map, |
645 ThreadData::BirthMap birth_map; | 715 const ThreadData::DeathMap &death_map) { |
646 thread_data.SnapshotBirthMap(&birth_map); | |
647 ThreadData::DeathMap death_map; | |
648 thread_data.SnapshotDeathMap(&death_map); | |
649 | |
650 for (ThreadData::DeathMap::const_iterator it = death_map.begin(); | 716 for (ThreadData::DeathMap::const_iterator it = death_map.begin(); |
651 it != death_map.end(); ++it) { | 717 it != death_map.end(); ++it) { |
652 collection_.push_back(Snapshot(*it->first, thread_data, it->second)); | 718 collection_.push_back(Snapshot(*it->first, thread_data, it->second)); |
653 global_birth_count_[it->first] -= it->first->birth_count(); | 719 global_birth_count_[it->first] -= it->first->birth_count(); |
654 } | 720 } |
655 | 721 |
656 for (ThreadData::BirthMap::const_iterator it = birth_map.begin(); | 722 for (ThreadData::BirthMap::const_iterator it = birth_map.begin(); |
657 it != birth_map.end(); ++it) { | 723 it != birth_map.end(); ++it) { |
658 global_birth_count_[it->second] += it->second->birth_count(); | 724 global_birth_count_[it->second] += it->second->birth_count(); |
659 } | 725 } |
(...skipping 13 matching lines...) Expand all Loading... |
673 | 739 |
674 base::ListValue* DataCollector::ToValue() const { | 740 base::ListValue* DataCollector::ToValue() const { |
675 base::ListValue* list = new base::ListValue; | 741 base::ListValue* list = new base::ListValue; |
676 for (size_t i = 0; i < collection_.size(); ++i) { | 742 for (size_t i = 0; i < collection_.size(); ++i) { |
677 list->Append(collection_[i].ToValue()); | 743 list->Append(collection_[i].ToValue()); |
678 } | 744 } |
679 return list; | 745 return list; |
680 } | 746 } |
681 | 747 |
682 } // namespace tracked_objects | 748 } // namespace tracked_objects |
OLD | NEW |