OLD | NEW |
---|---|
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/tracked_objects.h" | 5 #include "base/tracked_objects.h" |
6 | 6 |
7 #include <math.h> | 7 #include <math.h> |
8 | 8 |
9 #include "base/format_macros.h" | 9 #include "base/format_macros.h" |
10 #include "base/message_loop.h" | 10 #include "base/message_loop.h" |
(...skipping 106 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
117 // TODO(jar): We should pull all these static vars together, into a struct, and | 117 // TODO(jar): We should pull all these static vars together, into a struct, and |
118 // optimize layout so that we benefit from locality of reference during accesses | 118 // optimize layout so that we benefit from locality of reference during accesses |
119 // to them. | 119 // to them. |
120 | 120 |
121 // A TLS slot which points to the ThreadData instance for the current thread. We | 121 // A TLS slot which points to the ThreadData instance for the current thread. We |
122 // do a fake initialization here (zeroing out data), and then the real in-place | 122 // do a fake initialization here (zeroing out data), and then the real in-place |
123 // construction happens when we call tls_index_.Initialize(). | 123 // construction happens when we call tls_index_.Initialize(). |
124 // static | 124 // static |
125 base::ThreadLocalStorage::Slot ThreadData::tls_index_(base::LINKER_INITIALIZED); | 125 base::ThreadLocalStorage::Slot ThreadData::tls_index_(base::LINKER_INITIALIZED); |
126 | 126 |
127 // A lock-protected counter to assign sequence number to threads. | |
128 // static | 127 // static |
129 int ThreadData::thread_number_counter_ = 0; | 128 int ThreadData::worker_thread_data_creation_count_ = 0; |
129 | |
130 // static | |
131 int ThreadData::cleanup_count_ = 0; | |
130 | 132 |
131 // static | 133 // static |
132 int ThreadData::incarnation_counter_ = 0; | 134 int ThreadData::incarnation_counter_ = 0; |
133 | 135 |
134 // static | 136 // static |
135 ThreadData* ThreadData::all_thread_data_list_head_ = NULL; | 137 ThreadData* ThreadData::all_thread_data_list_head_ = NULL; |
136 | 138 |
137 // static | 139 // static |
138 ThreadData* ThreadData::first_retired_worker_ = NULL; | 140 ThreadData* ThreadData::first_retired_worker_ = NULL; |
139 | 141 |
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
190 // static | 192 // static |
191 ThreadData* ThreadData::Get() { | 193 ThreadData* ThreadData::Get() { |
192 if (!tls_index_.initialized()) | 194 if (!tls_index_.initialized()) |
193 return NULL; // For unittests only. | 195 return NULL; // For unittests only. |
194 ThreadData* registered = reinterpret_cast<ThreadData*>(tls_index_.Get()); | 196 ThreadData* registered = reinterpret_cast<ThreadData*>(tls_index_.Get()); |
195 if (registered) | 197 if (registered) |
196 return registered; | 198 return registered; |
197 | 199 |
198 // We must be a worker thread, since we didn't pre-register. | 200 // We must be a worker thread, since we didn't pre-register. |
199 ThreadData* worker_thread_data = NULL; | 201 ThreadData* worker_thread_data = NULL; |
200 int thread_number = 0; | 202 int worker_thread_number = 0; |
201 { | 203 { |
202 base::AutoLock lock(*list_lock_.Pointer()); | 204 base::AutoLock lock(*list_lock_.Pointer()); |
203 if (first_retired_worker_) { | 205 if (first_retired_worker_) { |
204 worker_thread_data = first_retired_worker_; | 206 worker_thread_data = first_retired_worker_; |
205 first_retired_worker_ = first_retired_worker_->next_retired_worker_; | 207 first_retired_worker_ = first_retired_worker_->next_retired_worker_; |
206 worker_thread_data->next_retired_worker_ = NULL; | 208 worker_thread_data->next_retired_worker_ = NULL; |
207 } else { | 209 } else { |
208 thread_number = ++thread_number_counter_; | 210 worker_thread_number = ++worker_thread_data_creation_count_; |
209 } | 211 } |
210 } | 212 } |
211 | 213 |
212 // If we can't find a previously used instance, then we have to create one. | 214 // If we can't find a previously used instance, then we have to create one. |
213 if (!worker_thread_data) | 215 if (!worker_thread_data) { |
214 worker_thread_data = new ThreadData(thread_number); | 216 DCHECK_GT(worker_thread_number, 0); |
217 worker_thread_data = new ThreadData(worker_thread_number); | |
218 } | |
215 DCHECK_GT(worker_thread_data->worker_thread_number_, 0); | 219 DCHECK_GT(worker_thread_data->worker_thread_number_, 0); |
216 | 220 |
217 tls_index_.Set(worker_thread_data); | 221 tls_index_.Set(worker_thread_data); |
218 return worker_thread_data; | 222 return worker_thread_data; |
219 } | 223 } |
220 | 224 |
221 // static | 225 // static |
222 void ThreadData::OnThreadTermination(void* thread_data) { | 226 void ThreadData::OnThreadTermination(void* thread_data) { |
227 // We must NOT do any allocations during this callback. There is a chance | |
228 // that the allocator is no longer active on this thread. | |
223 if (!kTrackAllTaskObjects) | 229 if (!kTrackAllTaskObjects) |
224 return; // Not compiled in. | 230 return; // Not compiled in. |
225 if (!thread_data) | 231 if (!thread_data) |
226 return; | 232 return; |
227 reinterpret_cast<ThreadData*>(thread_data)->OnThreadTerminationCleanup(); | 233 reinterpret_cast<ThreadData*>(thread_data)->OnThreadTerminationCleanup(); |
228 } | 234 } |
229 | 235 |
230 void ThreadData::OnThreadTerminationCleanup() { | 236 void ThreadData::OnThreadTerminationCleanup() { |
231 if (!worker_thread_number_) | 237 // The list_lock_ was created when we registered the callback, so it won't be |
232 return; | 238 // allocated here despite the lazy reference. |
233 base::AutoLock lock(*list_lock_.Pointer()); | 239 base::AutoLock lock(*list_lock_.Pointer()); |
234 if (incarnation_counter_ != incarnation_count_for_pool_) | 240 if (incarnation_counter_ != incarnation_count_for_pool_) |
235 return; // ThreadData was constructed in an earlier unit test. | 241 return; // ThreadData was constructed in an earlier unit test. |
242 ++cleanup_count_; | |
243 // Only worker threads need to be retired and reused. | |
244 if (!worker_thread_number_) { | |
245 return; | |
246 } | |
236 // We must NOT do any allocations during this callback. | 247 // We must NOT do any allocations during this callback. |
237 // Using the simple linked lists avoids all allocations. | 248 // Using the simple linked lists avoids all allocations. |
238 DCHECK_EQ(this->next_retired_worker_, reinterpret_cast<ThreadData*>(NULL)); | 249 DCHECK_EQ(this->next_retired_worker_, reinterpret_cast<ThreadData*>(NULL)); |
239 this->next_retired_worker_ = first_retired_worker_; | 250 this->next_retired_worker_ = first_retired_worker_; |
240 first_retired_worker_ = this; | 251 first_retired_worker_ = this; |
241 } | 252 } |
242 | 253 |
243 // static | 254 // static |
244 base::DictionaryValue* ThreadData::ToValue() { | 255 base::DictionaryValue* ThreadData::ToValue() { |
245 DataCollector collected_data; // Gather data. | 256 DataCollector collected_data; // Gather data. |
246 collected_data.AddListOfLivingObjects(); // Add births that are still alive. | 257 collected_data.AddListOfLivingObjects(); // Add births that are still alive. |
247 base::ListValue* list = collected_data.ToValue(); | 258 base::ListValue* list = collected_data.ToValue(); |
248 base::DictionaryValue* dictionary = new base::DictionaryValue(); | 259 base::DictionaryValue* dictionary = new base::DictionaryValue(); |
249 dictionary->Set("list", list); | 260 dictionary->Set("list", list); |
250 return dictionary; | 261 return dictionary; |
251 } | 262 } |
252 | 263 |
253 Births* ThreadData::TallyABirth(const Location& location) { | 264 Births* ThreadData::TallyABirth(const Location& location) { |
254 BirthMap::iterator it = birth_map_.find(location); | 265 BirthMap::iterator it = birth_map_.find(location); |
255 if (it != birth_map_.end()) { | 266 if (it != birth_map_.end()) { |
256 it->second->RecordBirth(); | 267 it->second->RecordBirth(); |
257 return it->second; | 268 return it->second; |
258 } | 269 } |
259 | 270 |
260 Births* tracker = new Births(location, *this); | 271 Births* tracker = new Births(location, *this); |
261 // Lock since the map may get relocated now, and other threads sometimes | 272 // Lock since the map may get relocated now, and other threads sometimes |
262 // snapshot it (but they lock before copying it). | 273 // snapshot it (but they lock before copying it). |
263 base::AutoLock lock(lock_); | 274 base::AutoLock lock(map_lock_); |
264 birth_map_[location] = tracker; | 275 birth_map_[location] = tracker; |
265 return tracker; | 276 return tracker; |
266 } | 277 } |
267 | 278 |
268 void ThreadData::TallyADeath(const Births& birth, | 279 void ThreadData::TallyADeath(const Births& birth, |
269 DurationInt queue_duration, | 280 DurationInt queue_duration, |
270 DurationInt run_duration) { | 281 DurationInt run_duration) { |
271 DeathMap::iterator it = death_map_.find(&birth); | 282 DeathMap::iterator it = death_map_.find(&birth); |
272 DeathData* death_data; | 283 DeathData* death_data; |
273 if (it != death_map_.end()) { | 284 if (it != death_map_.end()) { |
274 death_data = &it->second; | 285 death_data = &it->second; |
275 } else { | 286 } else { |
276 base::AutoLock lock(lock_); // Lock since the map may get relocated now. | 287 base::AutoLock lock(map_lock_); // Lock as the map may get relocated now. |
277 death_data = &death_map_[&birth]; | 288 death_data = &death_map_[&birth]; |
278 } // Release lock ASAP. | 289 } // Release lock ASAP. |
279 death_data->RecordDeath(queue_duration, run_duration); | 290 death_data->RecordDeath(queue_duration, run_duration); |
280 } | 291 } |
281 | 292 |
282 // static | 293 // static |
283 Births* ThreadData::TallyABirthIfActive(const Location& location) { | 294 Births* ThreadData::TallyABirthIfActive(const Location& location) { |
284 if (!kTrackAllTaskObjects) | 295 if (!kTrackAllTaskObjects) |
285 return NULL; // Not compiled in. | 296 return NULL; // Not compiled in. |
286 | 297 |
(...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
399 } | 410 } |
400 | 411 |
401 // static | 412 // static |
402 ThreadData* ThreadData::first() { | 413 ThreadData* ThreadData::first() { |
403 base::AutoLock lock(*list_lock_.Pointer()); | 414 base::AutoLock lock(*list_lock_.Pointer()); |
404 return all_thread_data_list_head_; | 415 return all_thread_data_list_head_; |
405 } | 416 } |
406 | 417 |
407 // This may be called from another thread. | 418 // This may be called from another thread. |
408 void ThreadData::SnapshotBirthMap(BirthMap *output) const { | 419 void ThreadData::SnapshotBirthMap(BirthMap *output) const { |
409 base::AutoLock lock(lock_); | 420 base::AutoLock lock(map_lock_); |
410 for (BirthMap::const_iterator it = birth_map_.begin(); | 421 for (BirthMap::const_iterator it = birth_map_.begin(); |
411 it != birth_map_.end(); ++it) | 422 it != birth_map_.end(); ++it) |
412 (*output)[it->first] = it->second; | 423 (*output)[it->first] = it->second; |
413 } | 424 } |
414 | 425 |
415 // This may be called from another thread. | 426 // This may be called from another thread. |
416 void ThreadData::SnapshotDeathMap(DeathMap *output) const { | 427 void ThreadData::SnapshotDeathMap(DeathMap *output) const { |
417 base::AutoLock lock(lock_); | 428 base::AutoLock lock(map_lock_); |
418 for (DeathMap::const_iterator it = death_map_.begin(); | 429 for (DeathMap::const_iterator it = death_map_.begin(); |
419 it != death_map_.end(); ++it) | 430 it != death_map_.end(); ++it) |
420 (*output)[it->first] = it->second; | 431 (*output)[it->first] = it->second; |
421 } | 432 } |
422 | 433 |
423 // static | 434 // static |
424 void ThreadData::ResetAllThreadData() { | 435 void ThreadData::ResetAllThreadData() { |
425 ThreadData* my_list = first(); | 436 ThreadData* my_list = first(); |
426 | 437 |
427 for (ThreadData* thread_data = my_list; | 438 for (ThreadData* thread_data = my_list; |
428 thread_data; | 439 thread_data; |
429 thread_data = thread_data->next()) | 440 thread_data = thread_data->next()) |
430 thread_data->Reset(); | 441 thread_data->Reset(); |
431 } | 442 } |
432 | 443 |
433 void ThreadData::Reset() { | 444 void ThreadData::Reset() { |
434 base::AutoLock lock(lock_); | 445 base::AutoLock lock(map_lock_); |
435 for (DeathMap::iterator it = death_map_.begin(); | 446 for (DeathMap::iterator it = death_map_.begin(); |
436 it != death_map_.end(); ++it) | 447 it != death_map_.end(); ++it) |
437 it->second.Clear(); | 448 it->second.Clear(); |
438 for (BirthMap::iterator it = birth_map_.begin(); | 449 for (BirthMap::iterator it = birth_map_.begin(); |
439 it != birth_map_.end(); ++it) | 450 it != birth_map_.end(); ++it) |
440 it->second->Clear(); | 451 it->second->Clear(); |
441 } | 452 } |
442 | 453 |
443 bool ThreadData::Initialize() { | 454 bool ThreadData::Initialize() { |
444 if (!kTrackAllTaskObjects) | 455 if (!kTrackAllTaskObjects) |
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
500 } | 511 } |
501 | 512 |
502 // static | 513 // static |
503 TrackedTime ThreadData::Now() { | 514 TrackedTime ThreadData::Now() { |
504 if (kTrackAllTaskObjects && tracking_status()) | 515 if (kTrackAllTaskObjects && tracking_status()) |
505 return TrackedTime::Now(); | 516 return TrackedTime::Now(); |
506 return TrackedTime(); // Super fast when disabled, or not compiled. | 517 return TrackedTime(); // Super fast when disabled, or not compiled. |
507 } | 518 } |
508 | 519 |
509 // static | 520 // static |
521 void ThreadData::EnsureCleanupWasCalled(int major_threads_shutdown_count) { | |
522 base::AutoLock lock(*list_lock_.Pointer()); | |
523 if (worker_thread_data_creation_count_ == 0) | |
524 return; // We haven't really run much, and couldn't have leaked. | |
525 // Verify that we've at least shutdown/cleanup the major namesd threads. The | |
526 // caller should tell us how many thread shutdowns should have takes place by | |
ramant (doing other things)
2011/11/22 03:59:45
nit: takes -> taken
jar (doing other things)
2011/11/22 17:15:43
Done.
| |
527 // now. | |
528 CHECK_GT(cleanup_count_, major_threads_shutdown_count); | |
529 } | |
530 | |
531 // static | |
510 void ThreadData::ShutdownSingleThreadedCleanup(bool leak) { | 532 void ThreadData::ShutdownSingleThreadedCleanup(bool leak) { |
511 // This is only called from test code, where we need to cleanup so that | 533 // This is only called from test code, where we need to cleanup so that |
512 // additional tests can be run. | 534 // additional tests can be run. |
513 // We must be single threaded... but be careful anyway. | 535 // We must be single threaded... but be careful anyway. |
514 if (!InitializeAndSetTrackingStatus(false)) | 536 if (!InitializeAndSetTrackingStatus(false)) |
515 return; | 537 return; |
516 ThreadData* thread_data_list; | 538 ThreadData* thread_data_list; |
517 { | 539 { |
518 base::AutoLock lock(*list_lock_.Pointer()); | 540 base::AutoLock lock(*list_lock_.Pointer()); |
519 thread_data_list = all_thread_data_list_head_; | 541 thread_data_list = all_thread_data_list_head_; |
520 all_thread_data_list_head_ = NULL; | 542 all_thread_data_list_head_ = NULL; |
521 ++incarnation_counter_; | 543 ++incarnation_counter_; |
522 // To be clean, break apart the retired worker list (though we leak them). | 544 // To be clean, break apart the retired worker list (though we leak them). |
523 while(first_retired_worker_) { | 545 while(first_retired_worker_) { |
524 ThreadData* worker = first_retired_worker_; | 546 ThreadData* worker = first_retired_worker_; |
525 CHECK_GT(worker->worker_thread_number_, 0); | 547 CHECK_GT(worker->worker_thread_number_, 0); |
526 first_retired_worker_ = worker->next_retired_worker_; | 548 first_retired_worker_ = worker->next_retired_worker_; |
527 worker->next_retired_worker_ = NULL; | 549 worker->next_retired_worker_ = NULL; |
528 } | 550 } |
529 } | 551 } |
530 | 552 |
531 // Put most global static back in pristine shape. | 553 // Put most global static back in pristine shape. |
532 thread_number_counter_ = 0; | 554 worker_thread_data_creation_count_ = 0; |
555 cleanup_count_ = 0; | |
533 tls_index_.Set(NULL); | 556 tls_index_.Set(NULL); |
534 status_ = UNINITIALIZED; | 557 status_ = UNINITIALIZED; |
535 | 558 |
536 // To avoid any chance of racing in unit tests, which is the only place we | 559 // To avoid any chance of racing in unit tests, which is the only place we |
537 // call this function, we may sometimes leak all the data structures we | 560 // call this function, we may sometimes leak all the data structures we |
538 // recovered, as they may still be in use on threads from prior tests! | 561 // recovered, as they may still be in use on threads from prior tests! |
539 if (leak) | 562 if (leak) |
540 return; | 563 return; |
541 | 564 |
542 // When we want to cleanup (on a single thread), here is what we do. | 565 // When we want to cleanup (on a single thread), here is what we do. |
(...skipping 106 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
649 | 672 |
650 base::ListValue* DataCollector::ToValue() const { | 673 base::ListValue* DataCollector::ToValue() const { |
651 base::ListValue* list = new base::ListValue; | 674 base::ListValue* list = new base::ListValue; |
652 for (size_t i = 0; i < collection_.size(); ++i) { | 675 for (size_t i = 0; i < collection_.size(); ++i) { |
653 list->Append(collection_[i].ToValue()); | 676 list->Append(collection_[i].ToValue()); |
654 } | 677 } |
655 return list; | 678 return list; |
656 } | 679 } |
657 | 680 |
658 } // namespace tracked_objects | 681 } // namespace tracked_objects |
OLD | NEW |