OLD | NEW |
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/tracked_objects.h" | 5 #include "base/tracked_objects.h" |
6 | 6 |
7 #include <math.h> | 7 #include <math.h> |
8 | 8 |
9 #include "base/format_macros.h" | 9 #include "base/format_macros.h" |
10 #include "base/message_loop.h" | 10 #include "base/message_loop.h" |
(...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
128 // static | 128 // static |
129 int ThreadData::thread_number_counter_ = 0; | 129 int ThreadData::thread_number_counter_ = 0; |
130 | 130 |
131 // static | 131 // static |
132 int ThreadData::incarnation_counter_ = 0; | 132 int ThreadData::incarnation_counter_ = 0; |
133 | 133 |
134 // static | 134 // static |
135 ThreadData* ThreadData::all_thread_data_list_head_ = NULL; | 135 ThreadData* ThreadData::all_thread_data_list_head_ = NULL; |
136 | 136 |
137 // static | 137 // static |
138 ThreadData::ThreadDataPool* ThreadData::unregistered_thread_data_pool_ = NULL; | 138 ThreadData* ThreadData::first_retired_worker_ = NULL; |
139 | 139 |
140 // static | 140 // static |
141 base::LazyInstance<base::Lock, | 141 base::LazyInstance<base::Lock, |
142 base::LeakyLazyInstanceTraits<base::Lock> > | 142 base::LeakyLazyInstanceTraits<base::Lock> > |
143 ThreadData::list_lock_ = LAZY_INSTANCE_INITIALIZER; | 143 ThreadData::list_lock_ = LAZY_INSTANCE_INITIALIZER; |
144 | 144 |
145 // static | 145 // static |
146 ThreadData::Status ThreadData::status_ = ThreadData::UNINITIALIZED; | 146 ThreadData::Status ThreadData::status_ = ThreadData::UNINITIALIZED; |
147 | 147 |
148 ThreadData::ThreadData(const std::string& suggested_name) | 148 ThreadData::ThreadData(const std::string& suggested_name) |
149 : incarnation_count_for_pool_(-1), | 149 : incarnation_count_for_pool_(-1), |
150 next_(NULL), | 150 next_(NULL), |
| 151 next_retired_worker_(NULL), |
151 worker_thread_number_(0) { | 152 worker_thread_number_(0) { |
152 DCHECK_GE(suggested_name.size(), 0u); | 153 DCHECK_GE(suggested_name.size(), 0u); |
153 thread_name_ = suggested_name; | 154 thread_name_ = suggested_name; |
154 PushToHeadOfList(); // Which sets real incarnation_count_for_pool_. | 155 PushToHeadOfList(); // Which sets real incarnation_count_for_pool_. |
155 } | 156 } |
156 | 157 |
157 ThreadData::ThreadData(size_t thread_number) | 158 ThreadData::ThreadData(int thread_number) |
158 : incarnation_count_for_pool_(-1), | 159 : incarnation_count_for_pool_(-1), |
159 next_(NULL), | 160 next_(NULL), |
| 161 next_retired_worker_(NULL), |
160 worker_thread_number_(thread_number) { | 162 worker_thread_number_(thread_number) { |
161 CHECK_NE(thread_number, 0u); | 163 CHECK_GT(thread_number, 0); |
162 base::StringAppendF(&thread_name_, "WorkerThread-%"PRIuS, thread_number); | 164 base::StringAppendF(&thread_name_, "WorkerThread-%d", thread_number); |
163 PushToHeadOfList(); // Which sets real incarnation_count_for_pool_. | 165 PushToHeadOfList(); // Which sets real incarnation_count_for_pool_. |
164 } | 166 } |
165 | 167 |
166 ThreadData::~ThreadData() {} | 168 ThreadData::~ThreadData() {} |
167 | 169 |
168 void ThreadData::PushToHeadOfList() { | 170 void ThreadData::PushToHeadOfList() { |
169 DCHECK(!next_); | 171 DCHECK(!next_); |
170 base::AutoLock lock(*list_lock_.Pointer()); | 172 base::AutoLock lock(*list_lock_.Pointer()); |
171 incarnation_count_for_pool_ = incarnation_counter_; | 173 incarnation_count_for_pool_ = incarnation_counter_; |
172 next_ = all_thread_data_list_head_; | 174 next_ = all_thread_data_list_head_; |
(...skipping 15 matching lines...) Expand all Loading... |
188 // static | 190 // static |
189 ThreadData* ThreadData::Get() { | 191 ThreadData* ThreadData::Get() { |
190 if (!tls_index_.initialized()) | 192 if (!tls_index_.initialized()) |
191 return NULL; // For unittests only. | 193 return NULL; // For unittests only. |
192 ThreadData* registered = reinterpret_cast<ThreadData*>(tls_index_.Get()); | 194 ThreadData* registered = reinterpret_cast<ThreadData*>(tls_index_.Get()); |
193 if (registered) | 195 if (registered) |
194 return registered; | 196 return registered; |
195 | 197 |
196 // We must be a worker thread, since we didn't pre-register. | 198 // We must be a worker thread, since we didn't pre-register. |
197 ThreadData* worker_thread_data = NULL; | 199 ThreadData* worker_thread_data = NULL; |
198 size_t thread_number = 0; | 200 int thread_number = 0; |
199 { | 201 { |
200 base::AutoLock lock(*list_lock_.Pointer()); | 202 base::AutoLock lock(*list_lock_.Pointer()); |
201 if (!unregistered_thread_data_pool_) | 203 if (first_retired_worker_) { |
202 unregistered_thread_data_pool_ = new ThreadDataPool; | 204 worker_thread_data = first_retired_worker_; |
203 if (!unregistered_thread_data_pool_->empty()) { | 205 first_retired_worker_ = first_retired_worker_->next_retired_worker_; |
204 worker_thread_data = | 206 worker_thread_data->next_retired_worker_ = NULL; |
205 const_cast<ThreadData*>(unregistered_thread_data_pool_->top()); | |
206 unregistered_thread_data_pool_->pop(); | |
207 } else { | 207 } else { |
208 thread_number = ++thread_number_counter_; | 208 thread_number = ++thread_number_counter_; |
209 unregistered_thread_data_pool_->reserve(thread_number); | |
210 } | 209 } |
211 } | 210 } |
212 | 211 |
213 // If we can't find a previously used instance, then we have to create one. | 212 // If we can't find a previously used instance, then we have to create one. |
214 if (!worker_thread_data) | 213 if (!worker_thread_data) |
215 worker_thread_data = new ThreadData(thread_number); | 214 worker_thread_data = new ThreadData(thread_number); |
216 DCHECK_GT(worker_thread_data->worker_thread_number_, 0u); | 215 DCHECK_GT(worker_thread_data->worker_thread_number_, 0); |
217 | 216 |
218 tls_index_.Set(worker_thread_data); | 217 tls_index_.Set(worker_thread_data); |
219 return worker_thread_data; | 218 return worker_thread_data; |
220 } | 219 } |
221 | 220 |
222 // static | 221 // static |
223 void ThreadData::OnThreadTermination(void* thread_data) { | 222 void ThreadData::OnThreadTermination(void* thread_data) { |
224 if (!kTrackAllTaskObjects) | 223 if (!kTrackAllTaskObjects) |
225 return; // Not compiled in. | 224 return; // Not compiled in. |
226 if (!thread_data) | 225 if (!thread_data) |
227 return; | 226 return; |
228 reinterpret_cast<ThreadData*>(thread_data)->OnThreadTerminationCleanup(); | 227 reinterpret_cast<ThreadData*>(thread_data)->OnThreadTerminationCleanup(); |
229 } | 228 } |
230 | 229 |
231 void ThreadData::OnThreadTerminationCleanup() const { | 230 void ThreadData::OnThreadTerminationCleanup() { |
232 if (!worker_thread_number_) | 231 if (!worker_thread_number_) |
233 return; | 232 return; |
234 base::AutoLock lock(*list_lock_.Pointer()); | 233 base::AutoLock lock(*list_lock_.Pointer()); |
235 if (incarnation_counter_ != incarnation_count_for_pool_) | 234 if (incarnation_counter_ != incarnation_count_for_pool_) |
236 return; // ThreadData was constructed in an earlier unit test. | 235 return; // ThreadData was constructed in an earlier unit test. |
237 // The following will never have to do an allocation. | 236 // We must NOT do any allocations during this callback. |
238 unregistered_thread_data_pool_->push(this); | 237 // Using the simple linked lists avoids all allocations. |
| 238 DCHECK_EQ(this->next_retired_worker_, reinterpret_cast<ThreadData*>(NULL)); |
| 239 this->next_retired_worker_ = first_retired_worker_; |
| 240 first_retired_worker_ = this; |
239 } | 241 } |
240 | 242 |
241 // static | 243 // static |
242 base::DictionaryValue* ThreadData::ToValue() { | 244 base::DictionaryValue* ThreadData::ToValue() { |
243 DataCollector collected_data; // Gather data. | 245 DataCollector collected_data; // Gather data. |
244 collected_data.AddListOfLivingObjects(); // Add births that are still alive. | 246 collected_data.AddListOfLivingObjects(); // Add births that are still alive. |
245 base::ListValue* list = collected_data.ToValue(); | 247 base::ListValue* list = collected_data.ToValue(); |
246 base::DictionaryValue* dictionary = new base::DictionaryValue(); | 248 base::DictionaryValue* dictionary = new base::DictionaryValue(); |
247 dictionary->Set("list", list); | 249 dictionary->Set("list", list); |
248 return dictionary; | 250 return dictionary; |
(...skipping 256 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
505 } | 507 } |
506 | 508 |
507 // static | 509 // static |
508 void ThreadData::ShutdownSingleThreadedCleanup(bool leak) { | 510 void ThreadData::ShutdownSingleThreadedCleanup(bool leak) { |
509 // This is only called from test code, where we need to cleanup so that | 511 // This is only called from test code, where we need to cleanup so that |
510 // additional tests can be run. | 512 // additional tests can be run. |
511 // We must be single threaded... but be careful anyway. | 513 // We must be single threaded... but be careful anyway. |
512 if (!InitializeAndSetTrackingStatus(false)) | 514 if (!InitializeAndSetTrackingStatus(false)) |
513 return; | 515 return; |
514 ThreadData* thread_data_list; | 516 ThreadData* thread_data_list; |
515 ThreadDataPool* final_pool; | |
516 { | 517 { |
517 base::AutoLock lock(*list_lock_.Pointer()); | 518 base::AutoLock lock(*list_lock_.Pointer()); |
518 thread_data_list = all_thread_data_list_head_; | 519 thread_data_list = all_thread_data_list_head_; |
519 all_thread_data_list_head_ = NULL; | 520 all_thread_data_list_head_ = NULL; |
520 final_pool = unregistered_thread_data_pool_; | |
521 unregistered_thread_data_pool_ = NULL; | |
522 ++incarnation_counter_; | 521 ++incarnation_counter_; |
| 522 // To be clean, break apart the retired worker list (though we leak them). |
| 523 while(first_retired_worker_) { |
| 524 ThreadData* worker = first_retired_worker_; |
| 525 CHECK_GT(worker->worker_thread_number_, 0); |
| 526 first_retired_worker_ = worker->next_retired_worker_; |
| 527 worker->next_retired_worker_ = NULL; |
| 528 } |
523 } | 529 } |
524 | 530 |
525 // Put most global static back in pristine shape. | 531 // Put most global static back in pristine shape. |
526 thread_number_counter_ = 0; | 532 thread_number_counter_ = 0; |
527 tls_index_.Set(NULL); | 533 tls_index_.Set(NULL); |
528 status_ = UNINITIALIZED; | 534 status_ = UNINITIALIZED; |
529 | 535 |
530 // To avoid any chance of racing in unit tests, which is the only place we | 536 // To avoid any chance of racing in unit tests, which is the only place we |
531 // call this function, we may sometimes leak all the data structures we | 537 // call this function, we may sometimes leak all the data structures we |
532 // recovered, as they may still be in use on threads from prior tests! | 538 // recovered, as they may still be in use on threads from prior tests! |
533 if (leak) | 539 if (leak) |
534 return; | 540 return; |
535 | 541 |
536 // When we want to cleanup (on a single thread), here is what we do. | 542 // When we want to cleanup (on a single thread), here is what we do. |
537 | 543 |
538 if (final_pool) { | |
539 // The thread_data_list contains *all* the instances, and we'll use it to | |
540 // delete them. This pool has pointers to some instances, and we just | |
541 // have to drop those pointers (and not do the deletes here). | |
542 while (!final_pool->empty()) | |
543 final_pool->pop(); | |
544 delete final_pool; | |
545 } | |
546 | |
547 // Do actual recursive delete in all ThreadData instances. | 544 // Do actual recursive delete in all ThreadData instances. |
548 while (thread_data_list) { | 545 while (thread_data_list) { |
549 ThreadData* next_thread_data = thread_data_list; | 546 ThreadData* next_thread_data = thread_data_list; |
550 thread_data_list = thread_data_list->next(); | 547 thread_data_list = thread_data_list->next(); |
551 | 548 |
552 for (BirthMap::iterator it = next_thread_data->birth_map_.begin(); | 549 for (BirthMap::iterator it = next_thread_data->birth_map_.begin(); |
553 next_thread_data->birth_map_.end() != it; ++it) | 550 next_thread_data->birth_map_.end() != it; ++it) |
554 delete it->second; // Delete the Birth Records. | 551 delete it->second; // Delete the Birth Records. |
555 next_thread_data->birth_map_.clear(); | 552 next_thread_data->birth_map_.clear(); |
556 next_thread_data->death_map_.clear(); | 553 next_thread_data->death_map_.clear(); |
557 delete next_thread_data; // Includes all Death Records. | 554 delete next_thread_data; // Includes all Death Records. |
558 } | 555 } |
559 } | 556 } |
560 | 557 |
561 //------------------------------------------------------------------------------ | 558 //------------------------------------------------------------------------------ |
562 // Small partial implementation of a stack that never has to allocate during a | |
563 // push() operation, because it is always prepared to accept the maximum number | |
564 // of ThreadData instances (all the worker thread related instances). | |
565 | |
566 ThreadData::ThreadDataPool::ThreadDataPool() : empty_slot_(0) {}; | |
567 ThreadData::ThreadDataPool::~ThreadDataPool() {}; | |
568 | |
569 bool ThreadData::ThreadDataPool::empty() const { return empty_slot_ == 0; } | |
570 | |
571 void ThreadData::ThreadDataPool::reserve(size_t largest_worker_pool_number) { | |
572 // Worker pool numbers start at 1, and exclude 0, so the number is exactly | |
573 // the least size needed. | |
574 // Due to asynchronous construction of worker-pool numbers (and associated | |
575 // ThreadData), we might not hear about the numbers sequentially. | |
576 if (largest_worker_pool_number > stack_.size()) | |
577 stack_.resize(largest_worker_pool_number); | |
578 } | |
579 | |
580 const ThreadData* ThreadData::ThreadDataPool::top() const { | |
581 if (empty_slot_ > 0) | |
582 return stack_[empty_slot_ - 1]; | |
583 NOTREACHED(); | |
584 return NULL; | |
585 } | |
586 | |
587 void ThreadData::ThreadDataPool::push(const ThreadData* thread_data) { | |
588 if (empty_slot_ < stack_.size()) { | |
589 stack_[empty_slot_] = thread_data; | |
590 ++empty_slot_; | |
591 return; | |
592 } | |
593 NOTREACHED(); | |
594 } | |
595 | |
596 void ThreadData::ThreadDataPool::pop() { | |
597 if (empty_slot_ > 0) { | |
598 --empty_slot_; | |
599 return; | |
600 } | |
601 NOTREACHED(); | |
602 } | |
603 | |
604 //------------------------------------------------------------------------------ | |
605 // Individual 3-tuple of birth (place and thread) along with death thread, and | 559 // Individual 3-tuple of birth (place and thread) along with death thread, and |
606 // the accumulated stats for instances (DeathData). | 560 // the accumulated stats for instances (DeathData). |
607 | 561 |
608 Snapshot::Snapshot(const BirthOnThread& birth_on_thread, | 562 Snapshot::Snapshot(const BirthOnThread& birth_on_thread, |
609 const ThreadData& death_thread, | 563 const ThreadData& death_thread, |
610 const DeathData& death_data) | 564 const DeathData& death_data) |
611 : birth_(&birth_on_thread), | 565 : birth_(&birth_on_thread), |
612 death_thread_(&death_thread), | 566 death_thread_(&death_thread), |
613 death_data_(death_data) { | 567 death_data_(death_data) { |
614 } | 568 } |
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
695 | 649 |
696 base::ListValue* DataCollector::ToValue() const { | 650 base::ListValue* DataCollector::ToValue() const { |
697 base::ListValue* list = new base::ListValue; | 651 base::ListValue* list = new base::ListValue; |
698 for (size_t i = 0; i < collection_.size(); ++i) { | 652 for (size_t i = 0; i < collection_.size(); ++i) { |
699 list->Append(collection_[i].ToValue()); | 653 list->Append(collection_[i].ToValue()); |
700 } | 654 } |
701 return list; | 655 return list; |
702 } | 656 } |
703 | 657 |
704 } // namespace tracked_objects | 658 } // namespace tracked_objects |
OLD | NEW |