OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/tracked_objects.h" | 5 #include "base/tracked_objects.h" |
6 | 6 |
7 #include <ctype.h> | 7 #include <ctype.h> |
8 #include <limits.h> | 8 #include <limits.h> |
9 #include <stdlib.h> | 9 #include <stdlib.h> |
10 | 10 |
(...skipping 376 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
387 // static | 387 // static |
388 int ThreadData::incarnation_counter_ = 0; | 388 int ThreadData::incarnation_counter_ = 0; |
389 | 389 |
390 // static | 390 // static |
391 ThreadData* ThreadData::all_thread_data_list_head_ = NULL; | 391 ThreadData* ThreadData::all_thread_data_list_head_ = NULL; |
392 | 392 |
393 // static | 393 // static |
394 ThreadData* ThreadData::first_retired_thread_data_ = NULL; | 394 ThreadData* ThreadData::first_retired_thread_data_ = NULL; |
395 | 395 |
396 // static | 396 // static |
397 base::LazyInstance<base::Lock>::Leaky | |
398 ThreadData::list_lock_ = LAZY_INSTANCE_INITIALIZER; | |
399 | |
400 // static | |
401 base::subtle::Atomic32 ThreadData::status_ = ThreadData::UNINITIALIZED; | 397 base::subtle::Atomic32 ThreadData::status_ = ThreadData::UNINITIALIZED; |
402 | 398 |
403 ThreadData::ThreadData(const std::string& sanitized_thread_name) | 399 ThreadData::ThreadData(const std::string& sanitized_thread_name) |
404 : next_(NULL), | 400 : next_(NULL), |
405 next_retired_thread_data_(NULL), | 401 next_retired_thread_data_(NULL), |
406 sanitized_thread_name_(sanitized_thread_name), | 402 sanitized_thread_name_(sanitized_thread_name), |
407 incarnation_count_for_pool_(-1), | 403 incarnation_count_for_pool_(-1), |
408 current_stopwatch_(NULL) { | 404 current_stopwatch_(NULL) { |
409 DCHECK(sanitized_thread_name_.empty() || | 405 DCHECK(sanitized_thread_name_.empty() || |
410 !isdigit(sanitized_thread_name_.back())); | 406 !isdigit(sanitized_thread_name_.back())); |
411 PushToHeadOfList(); // Which sets real incarnation_count_for_pool_. | 407 PushToHeadOfList(); // Which sets real incarnation_count_for_pool_. |
412 } | 408 } |
413 | 409 |
414 ThreadData::~ThreadData() { | 410 ThreadData::~ThreadData() { |
415 } | 411 } |
416 | 412 |
417 void ThreadData::PushToHeadOfList() { | 413 void ThreadData::PushToHeadOfList() { |
418 // Toss in a hint of randomness (atop the uniniitalized value). | 414 // Toss in a hint of randomness (atop the uniniitalized value). |
419 (void)VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE(&random_number_, | 415 (void)VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE(&random_number_, |
420 sizeof(random_number_)); | 416 sizeof(random_number_)); |
421 MSAN_UNPOISON(&random_number_, sizeof(random_number_)); | 417 MSAN_UNPOISON(&random_number_, sizeof(random_number_)); |
422 random_number_ += static_cast<uint32_t>(this - static_cast<ThreadData*>(0)); | 418 random_number_ += static_cast<uint32_t>(this - static_cast<ThreadData*>(0)); |
423 random_number_ ^= (Now() - TrackedTime()).InMilliseconds(); | 419 random_number_ ^= (Now() - TrackedTime()).InMilliseconds(); |
424 | 420 |
425 DCHECK(!next_); | 421 DCHECK(!next_); |
426 base::AutoLock lock(*list_lock_.Pointer()); | 422 base::AutoLock lock(*ListLock()); |
427 incarnation_count_for_pool_ = incarnation_counter_; | 423 incarnation_count_for_pool_ = incarnation_counter_; |
428 next_ = all_thread_data_list_head_; | 424 next_ = all_thread_data_list_head_; |
429 all_thread_data_list_head_ = this; | 425 all_thread_data_list_head_ = this; |
430 } | 426 } |
431 | 427 |
432 // static | 428 // static |
433 ThreadData* ThreadData::first() { | 429 ThreadData* ThreadData::first() { |
434 base::AutoLock lock(*list_lock_.Pointer()); | 430 base::AutoLock lock(*ListLock()); |
435 return all_thread_data_list_head_; | 431 return all_thread_data_list_head_; |
436 } | 432 } |
437 | 433 |
438 ThreadData* ThreadData::next() const { return next_; } | 434 ThreadData* ThreadData::next() const { return next_; } |
439 | 435 |
440 // static | 436 // static |
441 void ThreadData::InitializeThreadContext(const std::string& thread_name) { | 437 void ThreadData::InitializeThreadContext(const std::string& thread_name) { |
442 if (base::WorkerPool::RunsTasksOnCurrentThread()) | 438 if (base::WorkerPool::RunsTasksOnCurrentThread()) |
443 return; | 439 return; |
444 DCHECK_NE(thread_name, kWorkerThreadSanitizedName); | 440 DCHECK_NE(thread_name, kWorkerThreadSanitizedName); |
(...skipping 27 matching lines...) Expand all Loading... |
472 DCHECK(thread_data); // TLS should *never* call us with a NULL. | 468 DCHECK(thread_data); // TLS should *never* call us with a NULL. |
473 // We must NOT do any allocations during this callback. There is a chance | 469 // We must NOT do any allocations during this callback. There is a chance |
474 // that the allocator is no longer active on this thread. | 470 // that the allocator is no longer active on this thread. |
475 reinterpret_cast<ThreadData*>(thread_data)->OnThreadTerminationCleanup(); | 471 reinterpret_cast<ThreadData*>(thread_data)->OnThreadTerminationCleanup(); |
476 } | 472 } |
477 | 473 |
478 void ThreadData::OnThreadTerminationCleanup() { | 474 void ThreadData::OnThreadTerminationCleanup() { |
479 // We must NOT do any allocations during this callback. There is a chance that | 475 // We must NOT do any allocations during this callback. There is a chance that |
480 // the allocator is no longer active on this thread. | 476 // the allocator is no longer active on this thread. |
481 | 477 |
482 // The list_lock_ was created when we registered the callback, so it won't be | 478 // The list lock was created when we registered the callback, so it won't be |
483 // allocated here despite the lazy reference. | 479 // allocated here despite the lazy reference. |
484 base::AutoLock lock(*list_lock_.Pointer()); | 480 base::AutoLock lock(*ListLock()); |
485 if (incarnation_counter_ != incarnation_count_for_pool_) | 481 if (incarnation_counter_ != incarnation_count_for_pool_) |
486 return; // ThreadData was constructed in an earlier unit test. | 482 return; // ThreadData was constructed in an earlier unit test. |
487 ++cleanup_count_; | 483 ++cleanup_count_; |
488 | 484 |
489 // Add this ThreadData to a retired list so that it can be reused by a thread | 485 // Add this ThreadData to a retired list so that it can be reused by a thread |
490 // with the same name sanitized name in the future. | 486 // with the same name sanitized name in the future. |
491 // |next_retired_thread_data_| is expected to be nullptr for a ThreadData | 487 // |next_retired_thread_data_| is expected to be nullptr for a ThreadData |
492 // associated with an active thread. | 488 // associated with an active thread. |
493 DCHECK(!next_retired_thread_data_); | 489 DCHECK(!next_retired_thread_data_); |
494 next_retired_thread_data_ = first_retired_thread_data_; | 490 next_retired_thread_data_ = first_retired_thread_data_; |
(...skipping 257 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
752 | 748 |
753 void ThreadData::EnsureTlsInitialization() { | 749 void ThreadData::EnsureTlsInitialization() { |
754 if (base::subtle::Acquire_Load(&status_) >= DEACTIVATED) | 750 if (base::subtle::Acquire_Load(&status_) >= DEACTIVATED) |
755 return; // Someone else did the initialization. | 751 return; // Someone else did the initialization. |
756 // Due to racy lazy initialization in tests, we'll need to recheck status_ | 752 // Due to racy lazy initialization in tests, we'll need to recheck status_ |
757 // after we acquire the lock. | 753 // after we acquire the lock. |
758 | 754 |
759 // Ensure that we don't double initialize tls. We are called when single | 755 // Ensure that we don't double initialize tls. We are called when single |
760 // threaded in the product, but some tests may be racy and lazy about our | 756 // threaded in the product, but some tests may be racy and lazy about our |
761 // initialization. | 757 // initialization. |
762 base::AutoLock lock(*list_lock_.Pointer()); | 758 base::AutoLock lock(*ListLock()); |
763 if (base::subtle::Acquire_Load(&status_) >= DEACTIVATED) | 759 if (base::subtle::Acquire_Load(&status_) >= DEACTIVATED) |
764 return; // Someone raced in here and beat us. | 760 return; // Someone raced in here and beat us. |
765 | 761 |
766 // Perform the "real" TLS initialization now, and leave it intact through | 762 // Perform the "real" TLS initialization now, and leave it intact through |
767 // process termination. | 763 // process termination. |
768 if (!tls_index_.initialized()) { // Testing may have initialized this. | 764 if (!tls_index_.initialized()) { // Testing may have initialized this. |
769 DCHECK_EQ(base::subtle::NoBarrier_Load(&status_), UNINITIALIZED); | 765 DCHECK_EQ(base::subtle::NoBarrier_Load(&status_), UNINITIALIZED); |
770 tls_index_.Initialize(&ThreadData::OnThreadTermination); | 766 tls_index_.Initialize(&ThreadData::OnThreadTermination); |
771 DCHECK(tls_index_.initialized()); | 767 DCHECK(tls_index_.initialized()); |
772 } else { | 768 } else { |
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
830 TrackedTime ThreadData::Now() { | 826 TrackedTime ThreadData::Now() { |
831 if (now_function_for_testing_) | 827 if (now_function_for_testing_) |
832 return TrackedTime::FromMilliseconds((*now_function_for_testing_)()); | 828 return TrackedTime::FromMilliseconds((*now_function_for_testing_)()); |
833 if (IsProfilerTimingEnabled() && TrackingStatus()) | 829 if (IsProfilerTimingEnabled() && TrackingStatus()) |
834 return TrackedTime::Now(); | 830 return TrackedTime::Now(); |
835 return TrackedTime(); // Super fast when disabled, or not compiled. | 831 return TrackedTime(); // Super fast when disabled, or not compiled. |
836 } | 832 } |
837 | 833 |
838 // static | 834 // static |
839 void ThreadData::EnsureCleanupWasCalled(int major_threads_shutdown_count) { | 835 void ThreadData::EnsureCleanupWasCalled(int major_threads_shutdown_count) { |
840 base::AutoLock lock(*list_lock_.Pointer()); | 836 base::AutoLock lock(*ListLock()); |
841 | 837 |
842 // TODO(jar): until this is working on XP, don't run the real test. | 838 // TODO(jar): until this is working on XP, don't run the real test. |
843 #if 0 | 839 #if 0 |
844 // Verify that we've at least shutdown/cleanup the major namesd threads. The | 840 // Verify that we've at least shutdown/cleanup the major namesd threads. The |
845 // caller should tell us how many thread shutdowns should have taken place by | 841 // caller should tell us how many thread shutdowns should have taken place by |
846 // now. | 842 // now. |
847 CHECK_GT(cleanup_count_, major_threads_shutdown_count); | 843 CHECK_GT(cleanup_count_, major_threads_shutdown_count); |
848 #endif | 844 #endif |
849 } | 845 } |
850 | 846 |
851 // static | 847 // static |
852 void ThreadData::ShutdownSingleThreadedCleanup(bool leak) { | 848 void ThreadData::ShutdownSingleThreadedCleanup(bool leak) { |
853 // This is only called from test code, where we need to cleanup so that | 849 // This is only called from test code, where we need to cleanup so that |
854 // additional tests can be run. | 850 // additional tests can be run. |
855 // We must be single threaded... but be careful anyway. | 851 // We must be single threaded... but be careful anyway. |
856 InitializeAndSetTrackingStatus(DEACTIVATED); | 852 InitializeAndSetTrackingStatus(DEACTIVATED); |
857 | 853 |
858 ThreadData* thread_data_list; | 854 ThreadData* thread_data_list; |
859 { | 855 { |
860 base::AutoLock lock(*list_lock_.Pointer()); | 856 base::AutoLock lock(*ListLock()); |
861 thread_data_list = all_thread_data_list_head_; | 857 thread_data_list = all_thread_data_list_head_; |
862 all_thread_data_list_head_ = NULL; | 858 all_thread_data_list_head_ = NULL; |
863 ++incarnation_counter_; | 859 ++incarnation_counter_; |
864 // To be clean, break apart the retired worker list (though we leak them). | 860 // To be clean, break apart the retired worker list (though we leak them). |
865 while (first_retired_thread_data_) { | 861 while (first_retired_thread_data_) { |
866 ThreadData* thread_data = first_retired_thread_data_; | 862 ThreadData* thread_data = first_retired_thread_data_; |
867 first_retired_thread_data_ = thread_data->next_retired_thread_data_; | 863 first_retired_thread_data_ = thread_data->next_retired_thread_data_; |
868 thread_data->next_retired_thread_data_ = nullptr; | 864 thread_data->next_retired_thread_data_ = nullptr; |
869 } | 865 } |
870 } | 866 } |
(...skipping 29 matching lines...) Expand all Loading... |
900 delete next_thread_data; // Includes all Death Records. | 896 delete next_thread_data; // Includes all Death Records. |
901 } | 897 } |
902 } | 898 } |
903 | 899 |
904 // static | 900 // static |
905 ThreadData* ThreadData::GetRetiredOrCreateThreadData( | 901 ThreadData* ThreadData::GetRetiredOrCreateThreadData( |
906 const std::string& sanitized_thread_name) { | 902 const std::string& sanitized_thread_name) { |
907 SCOPED_UMA_HISTOGRAM_TIMER("TrackedObjects.GetRetiredOrCreateThreadData"); | 903 SCOPED_UMA_HISTOGRAM_TIMER("TrackedObjects.GetRetiredOrCreateThreadData"); |
908 | 904 |
909 { | 905 { |
910 base::AutoLock lock(*list_lock_.Pointer()); | 906 base::AutoLock lock(*ListLock()); |
911 ThreadData** pcursor = &first_retired_thread_data_; | 907 ThreadData** pcursor = &first_retired_thread_data_; |
912 ThreadData* cursor = first_retired_thread_data_; | 908 ThreadData* cursor = first_retired_thread_data_; |
913 | 909 |
914 // Assuming that there aren't more than a few tens of retired ThreadData | 910 // Assuming that there aren't more than a few tens of retired ThreadData |
915 // instances, this lookup should be quick compared to the thread creation | 911 // instances, this lookup should be quick compared to the thread creation |
916 // time. Retired ThreadData instances cannot be stored in a map because | 912 // time. Retired ThreadData instances cannot be stored in a map because |
917 // insertions are done from OnThreadTerminationCleanup() where allocations | 913 // insertions are done from OnThreadTerminationCleanup() where allocations |
918 // are not allowed. | 914 // are not allowed. |
919 // | 915 // |
920 // Note: Test processes may have more than a few tens of retired ThreadData | 916 // Note: Test processes may have more than a few tens of retired ThreadData |
(...skipping 168 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1089 #endif | 1085 #endif |
1090 } | 1086 } |
1091 | 1087 |
1092 ProcessDataSnapshot::ProcessDataSnapshot(const ProcessDataSnapshot& other) = | 1088 ProcessDataSnapshot::ProcessDataSnapshot(const ProcessDataSnapshot& other) = |
1093 default; | 1089 default; |
1094 | 1090 |
1095 ProcessDataSnapshot::~ProcessDataSnapshot() { | 1091 ProcessDataSnapshot::~ProcessDataSnapshot() { |
1096 } | 1092 } |
1097 | 1093 |
1098 } // namespace tracked_objects | 1094 } // namespace tracked_objects |
OLD | NEW |