OLD | NEW |
---|---|
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/tracked_objects.h" | 5 #include "base/tracked_objects.h" |
6 | 6 |
7 #include <limits.h> | 7 #include <limits.h> |
8 #include <stdlib.h> | 8 #include <stdlib.h> |
9 | 9 |
10 #include "base/atomicops.h" | 10 #include "base/atomicops.h" |
(...skipping 251 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
262 base::LazyInstance<base::Lock>::Leaky | 262 base::LazyInstance<base::Lock>::Leaky |
263 ThreadData::list_lock_ = LAZY_INSTANCE_INITIALIZER; | 263 ThreadData::list_lock_ = LAZY_INSTANCE_INITIALIZER; |
264 | 264 |
265 // static | 265 // static |
266 ThreadData::Status ThreadData::status_ = ThreadData::UNINITIALIZED; | 266 ThreadData::Status ThreadData::status_ = ThreadData::UNINITIALIZED; |
267 | 267 |
268 ThreadData::ThreadData(const std::string& suggested_name) | 268 ThreadData::ThreadData(const std::string& suggested_name) |
269 : next_(NULL), | 269 : next_(NULL), |
270 next_retired_worker_(NULL), | 270 next_retired_worker_(NULL), |
271 worker_thread_number_(0), | 271 worker_thread_number_(0), |
272 incarnation_count_for_pool_(-1) { | 272 incarnation_count_for_pool_(-1), |
273 current_stopwatch_(NULL) { | |
273 DCHECK_GE(suggested_name.size(), 0u); | 274 DCHECK_GE(suggested_name.size(), 0u); |
274 thread_name_ = suggested_name; | 275 thread_name_ = suggested_name; |
275 PushToHeadOfList(); // Which sets real incarnation_count_for_pool_. | 276 PushToHeadOfList(); // Which sets real incarnation_count_for_pool_. |
276 } | 277 } |
277 | 278 |
278 ThreadData::ThreadData(int thread_number) | 279 ThreadData::ThreadData(int thread_number) |
279 : next_(NULL), | 280 : next_(NULL), |
280 next_retired_worker_(NULL), | 281 next_retired_worker_(NULL), |
281 worker_thread_number_(thread_number), | 282 worker_thread_number_(thread_number), |
282 incarnation_count_for_pool_(-1) { | 283 incarnation_count_for_pool_(-1), |
284 current_stopwatch_(NULL) { | |
283 CHECK_GT(thread_number, 0); | 285 CHECK_GT(thread_number, 0); |
284 base::StringAppendF(&thread_name_, "WorkerThread-%d", thread_number); | 286 base::StringAppendF(&thread_name_, "WorkerThread-%d", thread_number); |
285 PushToHeadOfList(); // Which sets real incarnation_count_for_pool_. | 287 PushToHeadOfList(); // Which sets real incarnation_count_for_pool_. |
286 } | 288 } |
287 | 289 |
288 ThreadData::~ThreadData() {} | 290 ThreadData::~ThreadData() {} |
289 | 291 |
290 void ThreadData::PushToHeadOfList() { | 292 void ThreadData::PushToHeadOfList() { |
291 // Toss in a hint of randomness (atop the uniniitalized value). | 293 // Toss in a hint of randomness (atop the uniniitalized value). |
292 (void)VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE(&random_number_, | 294 (void)VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE(&random_number_, |
(...skipping 134 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
427 base::AutoLock lock(map_lock_); | 429 base::AutoLock lock(map_lock_); |
428 parent_child_set_.insert(pair); | 430 parent_child_set_.insert(pair); |
429 } | 431 } |
430 } | 432 } |
431 | 433 |
432 return child; | 434 return child; |
433 } | 435 } |
434 | 436 |
435 void ThreadData::TallyADeath(const Births& birth, | 437 void ThreadData::TallyADeath(const Births& birth, |
436 int32 queue_duration, | 438 int32 queue_duration, |
437 int32 run_duration) { | 439 const TaskStopwatch& stopwatch) { |
440 int32 run_duration = stopwatch.RunDurationMs(); | |
441 | |
438 // Stir in some randomness, plus add constant in case durations are zero. | 442 // Stir in some randomness, plus add constant in case durations are zero. |
439 const int32 kSomePrimeNumber = 2147483647; | 443 const int32 kSomePrimeNumber = 2147483647; |
440 random_number_ += queue_duration + run_duration + kSomePrimeNumber; | 444 random_number_ += queue_duration + run_duration + kSomePrimeNumber; |
441 // An address is going to have some randomness to it as well ;-). | 445 // An address is going to have some randomness to it as well ;-). |
442 random_number_ ^= static_cast<int32>(&birth - reinterpret_cast<Births*>(0)); | 446 random_number_ ^= static_cast<int32>(&birth - reinterpret_cast<Births*>(0)); |
443 | 447 |
444 // We don't have queue durations without OS timer. OS timer is automatically | 448 // We don't have queue durations without OS timer. OS timer is automatically |
445 // used for task-post-timing, so the use of an alternate timer implies all | 449 // used for task-post-timing, so the use of an alternate timer implies all |
446 // queue times are invalid. | 450 // queue times are invalid. |
447 if (kAllowAlternateTimeSourceHandling && now_function_) | 451 if (kAllowAlternateTimeSourceHandling && now_function_) |
(...skipping 26 matching lines...) Expand all Loading... | |
474 return NULL; | 478 return NULL; |
475 ThreadData* current_thread_data = Get(); | 479 ThreadData* current_thread_data = Get(); |
476 if (!current_thread_data) | 480 if (!current_thread_data) |
477 return NULL; | 481 return NULL; |
478 return current_thread_data->TallyABirth(location); | 482 return current_thread_data->TallyABirth(location); |
479 } | 483 } |
480 | 484 |
481 // static | 485 // static |
482 void ThreadData::TallyRunOnNamedThreadIfTracking( | 486 void ThreadData::TallyRunOnNamedThreadIfTracking( |
483 const base::TrackingInfo& completed_task, | 487 const base::TrackingInfo& completed_task, |
484 const TrackedTime& start_of_run, | 488 const TaskStopwatch& stopwatch) { |
485 const TrackedTime& end_of_run) { | |
486 if (!kTrackAllTaskObjects) | 489 if (!kTrackAllTaskObjects) |
487 return; // Not compiled in. | 490 return; // Not compiled in. |
488 | 491 |
489 // Even if we have been DEACTIVATED, we will process any pending births so | 492 // Even if we have been DEACTIVATED, we will process any pending births so |
490 // that our data structures (which counted the outstanding births) remain | 493 // that our data structures (which counted the outstanding births) remain |
491 // consistent. | 494 // consistent. |
492 const Births* birth = completed_task.birth_tally; | 495 const Births* birth = completed_task.birth_tally; |
493 if (!birth) | 496 if (!birth) |
494 return; | 497 return; |
495 ThreadData* current_thread_data = Get(); | 498 ThreadData* current_thread_data = stopwatch.GetThreadData(); |
496 if (!current_thread_data) | 499 if (!current_thread_data) |
497 return; | 500 return; |
498 | 501 |
499 // Watch out for a race where status_ is changing, and hence one or both | 502 // Watch out for a race where status_ is changing, and hence one or both |
500 // of start_of_run or end_of_run is zero. In that case, we didn't bother to | 503 // of start_of_run or end_of_run is zero. In that case, we didn't bother to |
501 // get a time value since we "weren't tracking" and we were trying to be | 504 // get a time value since we "weren't tracking" and we were trying to be |
502 // efficient by not calling for a genuine time value. For simplicity, we'll | 505 // efficient by not calling for a genuine time value. For simplicity, we'll |
503 // use a default zero duration when we can't calculate a true value. | 506 // use a default zero duration when we can't calculate a true value. |
507 TrackedTime start_of_run = stopwatch.StartTime(); | |
504 int32 queue_duration = 0; | 508 int32 queue_duration = 0; |
505 int32 run_duration = 0; | |
506 if (!start_of_run.is_null()) { | 509 if (!start_of_run.is_null()) { |
507 queue_duration = (start_of_run - completed_task.EffectiveTimePosted()) | 510 queue_duration = (start_of_run - completed_task.EffectiveTimePosted()) |
508 .InMilliseconds(); | 511 .InMilliseconds(); |
509 if (!end_of_run.is_null()) | |
510 run_duration = (end_of_run - start_of_run).InMilliseconds(); | |
511 } | 512 } |
512 current_thread_data->TallyADeath(*birth, queue_duration, run_duration); | 513 current_thread_data->TallyADeath(*birth, queue_duration, stopwatch); |
513 } | 514 } |
514 | 515 |
515 // static | 516 // static |
516 void ThreadData::TallyRunOnWorkerThreadIfTracking( | 517 void ThreadData::TallyRunOnWorkerThreadIfTracking( |
517 const Births* birth, | 518 const Births* birth, |
518 const TrackedTime& time_posted, | 519 const TrackedTime& time_posted, |
519 const TrackedTime& start_of_run, | 520 const TaskStopwatch& stopwatch) { |
520 const TrackedTime& end_of_run) { | |
521 if (!kTrackAllTaskObjects) | 521 if (!kTrackAllTaskObjects) |
522 return; // Not compiled in. | 522 return; // Not compiled in. |
523 | 523 |
524 // Even if we have been DEACTIVATED, we will process any pending births so | 524 // Even if we have been DEACTIVATED, we will process any pending births so |
525 // that our data structures (which counted the outstanding births) remain | 525 // that our data structures (which counted the outstanding births) remain |
526 // consistent. | 526 // consistent. |
527 if (!birth) | 527 if (!birth) |
528 return; | 528 return; |
529 | 529 |
530 // TODO(jar): Support the option to coalesce all worker-thread activity under | 530 // TODO(jar): Support the option to coalesce all worker-thread activity under |
531 // one ThreadData instance that uses locks to protect *all* access. This will | 531 // one ThreadData instance that uses locks to protect *all* access. This will |
532 // reduce memory (making it provably bounded), but run incrementally slower | 532 // reduce memory (making it provably bounded), but run incrementally slower |
533 // (since we'll use locks on TallyABirth and TallyADeath). The good news is | 533 // (since we'll use locks on TallyABirth and TallyADeath). The good news is |
534 // that the locks on TallyADeath will be *after* the worker thread has run, | 534 // that the locks on TallyADeath will be *after* the worker thread has run, |
535 // and hence nothing will be waiting for the completion (... besides some | 535 // and hence nothing will be waiting for the completion (... besides some |
536 // other thread that might like to run). Also, the worker threads tasks are | 536 // other thread that might like to run). Also, the worker threads tasks are |
537 // generally longer, and hence the cost of the lock may perchance be amortized | 537 // generally longer, and hence the cost of the lock may perchance be amortized |
538 // over the long task's lifetime. | 538 // over the long task's lifetime. |
539 ThreadData* current_thread_data = Get(); | 539 ThreadData* current_thread_data = stopwatch.GetThreadData(); |
540 if (!current_thread_data) | 540 if (!current_thread_data) |
541 return; | 541 return; |
542 | 542 |
543 TrackedTime start_of_run = stopwatch.StartTime(); | |
543 int32 queue_duration = 0; | 544 int32 queue_duration = 0; |
544 int32 run_duration = 0; | |
545 if (!start_of_run.is_null()) { | 545 if (!start_of_run.is_null()) { |
546 queue_duration = (start_of_run - time_posted).InMilliseconds(); | 546 queue_duration = (start_of_run - time_posted).InMilliseconds(); |
547 if (!end_of_run.is_null()) | |
548 run_duration = (end_of_run - start_of_run).InMilliseconds(); | |
549 } | 547 } |
550 current_thread_data->TallyADeath(*birth, queue_duration, run_duration); | 548 current_thread_data->TallyADeath(*birth, queue_duration, stopwatch); |
551 } | 549 } |
552 | 550 |
553 // static | 551 // static |
554 void ThreadData::TallyRunInAScopedRegionIfTracking( | 552 void ThreadData::TallyRunInAScopedRegionIfTracking( |
555 const Births* birth, | 553 const Births* birth, |
556 const TrackedTime& start_of_run, | 554 const TaskStopwatch& stopwatch) { |
557 const TrackedTime& end_of_run) { | |
558 if (!kTrackAllTaskObjects) | 555 if (!kTrackAllTaskObjects) |
559 return; // Not compiled in. | 556 return; // Not compiled in. |
560 | 557 |
561 // Even if we have been DEACTIVATED, we will process any pending births so | 558 // Even if we have been DEACTIVATED, we will process any pending births so |
562 // that our data structures (which counted the outstanding births) remain | 559 // that our data structures (which counted the outstanding births) remain |
563 // consistent. | 560 // consistent. |
564 if (!birth) | 561 if (!birth) |
565 return; | 562 return; |
566 | 563 |
567 ThreadData* current_thread_data = Get(); | 564 ThreadData* current_thread_data = stopwatch.GetThreadData(); |
568 if (!current_thread_data) | 565 if (!current_thread_data) |
569 return; | 566 return; |
570 | 567 |
571 int32 queue_duration = 0; | 568 int32 queue_duration = 0; |
572 int32 run_duration = 0; | 569 current_thread_data->TallyADeath(*birth, queue_duration, stopwatch); |
573 if (!start_of_run.is_null() && !end_of_run.is_null()) | |
574 run_duration = (end_of_run - start_of_run).InMilliseconds(); | |
575 current_thread_data->TallyADeath(*birth, queue_duration, run_duration); | |
576 } | 570 } |
577 | 571 |
578 // static | 572 // static |
579 void ThreadData::SnapshotAllExecutedTasks(bool reset_max, | 573 void ThreadData::SnapshotAllExecutedTasks(bool reset_max, |
580 ProcessDataSnapshot* process_data, | 574 ProcessDataSnapshot* process_data, |
581 BirthCountMap* birth_counts) { | 575 BirthCountMap* birth_counts) { |
582 if (!kTrackAllTaskObjects) | 576 if (!kTrackAllTaskObjects) |
583 return; // Not compiled in. | 577 return; // Not compiled in. |
584 | 578 |
585 // Get an unchanging copy of a ThreadData list. | 579 // Get an unchanging copy of a ThreadData list. |
(...skipping 265 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
851 thread_data_list = thread_data_list->next(); | 845 thread_data_list = thread_data_list->next(); |
852 | 846 |
853 for (BirthMap::iterator it = next_thread_data->birth_map_.begin(); | 847 for (BirthMap::iterator it = next_thread_data->birth_map_.begin(); |
854 next_thread_data->birth_map_.end() != it; ++it) | 848 next_thread_data->birth_map_.end() != it; ++it) |
855 delete it->second; // Delete the Birth Records. | 849 delete it->second; // Delete the Birth Records. |
856 delete next_thread_data; // Includes all Death Records. | 850 delete next_thread_data; // Includes all Death Records. |
857 } | 851 } |
858 } | 852 } |
859 | 853 |
860 //------------------------------------------------------------------------------ | 854 //------------------------------------------------------------------------------ |
855 TaskStopwatch::TaskStopwatch() | |
856 : current_thread_data_(NULL), | |
857 nested_stopwatches_duration_ms_(0), | |
858 parent_stopwatch_(NULL) { | |
859 #ifndef NDEBUG | |
860 state_ = CREATED; | |
861 running_child_ = NULL; | |
862 #endif | |
863 } | |
864 | |
865 TaskStopwatch::~TaskStopwatch() { | |
866 #ifndef NDEBUG | |
867 DCHECK(state_ != RUNNING); | |
jar (doing other things)
2014/08/26 04:09:09
This is adding a lot of conditional test code, whi
vadimt
2014/08/26 19:15:04
I believe it's still worth having this code, becau
| |
868 DCHECK(running_child_ == NULL); | |
869 #endif | |
870 } | |
871 | |
872 void TaskStopwatch::Start(const TrackedTime& start_time) { | |
873 #ifndef NDEBUG | |
874 DCHECK(state_ != RUNNING); | |
875 state_ = RUNNING; | |
876 DCHECK(running_child_ == NULL); | |
877 #endif | |
878 | |
879 nested_stopwatches_duration_ms_ = 0; | |
880 start_time_ = start_time; | |
881 wallclock_duration_ms_ = 0; | |
882 current_thread_data_ = ThreadData::Get(); | |
jar (doing other things)
2014/08/26 04:09:09
This is a nice example of something that probably
vadimt
2014/08/26 19:15:04
See my first answer about Start and Stop calls.
No
| |
883 if (current_thread_data_) { | |
jar (doing other things)
2014/08/26 04:09:09
nit: Personal preference: Earlier returns make cod
vadimt
2014/08/26 19:15:04
Done.
| |
884 parent_stopwatch_ = current_thread_data_->current_stopwatch_; | |
885 #ifndef NDEBUG | |
886 if (parent_stopwatch_) { | |
887 DCHECK(parent_stopwatch_->state_ == RUNNING); | |
888 DCHECK(parent_stopwatch_->running_child_ == NULL); | |
889 parent_stopwatch_->running_child_ = this; | |
890 } | |
891 #endif | |
892 current_thread_data_->current_stopwatch_ = this; | |
893 } | |
894 } | |
895 | |
896 void TaskStopwatch::Stop(const TrackedTime& end_time) { | |
897 #ifndef NDEBUG | |
898 DCHECK(state_ == RUNNING); | |
899 state_ = STOPPED; | |
900 DCHECK(running_child_ == NULL); | |
901 #endif | |
902 | |
903 if (!start_time_.is_null() && !end_time.is_null()) { | |
904 wallclock_duration_ms_ = (end_time - start_time_).InMilliseconds(); | |
905 } | |
906 | |
907 if (current_thread_data_) { | |
jar (doing other things)
2014/08/26 04:09:09
nit: early return is probably cleaner.
vadimt
2014/08/26 19:15:04
Done.
| |
908 DCHECK(current_thread_data_->current_stopwatch_ == this); | |
909 current_thread_data_->current_stopwatch_ = parent_stopwatch_; | |
910 | |
911 if (parent_stopwatch_) { | |
jar (doing other things)
2014/08/26 04:09:09
nit: early return again.
vadimt
2014/08/26 19:15:04
Done.
| |
912 #ifndef NDEBUG | |
913 DCHECK(parent_stopwatch_->state_ == RUNNING); | |
914 DCHECK(parent_stopwatch_->running_child_ == this); | |
915 parent_stopwatch_->running_child_ = NULL; | |
916 #endif | |
917 parent_stopwatch_->nested_stopwatches_duration_ms_ += | |
918 wallclock_duration_ms_; | |
jar (doing other things)
2014/08/26 04:09:09
I'm pretty sure this is the correct way to calcula
vadimt
2014/08/26 19:15:04
This code calculates as described in the header, i
| |
919 parent_stopwatch_ = NULL; | |
920 } | |
921 } | |
922 } | |
923 | |
924 TrackedTime TaskStopwatch::StartTime() const { | |
925 #ifndef NDEBUG | |
926 DCHECK(state_ != CREATED); | |
927 #endif | |
928 return start_time_; | |
929 } | |
930 | |
931 int32 TaskStopwatch::RunDurationMs() const { | |
932 #ifndef NDEBUG | |
933 DCHECK(state_ == STOPPED); | |
934 #endif | |
935 | |
936 return wallclock_duration_ms_ - nested_stopwatches_duration_ms_; | |
jar (doing other things)
2014/08/26 04:09:09
This is the more common (by far) use of wallclock_
vadimt
2014/08/26 19:15:04
Note that we don't store end time, so by eliminati
| |
937 } | |
938 | |
939 ThreadData * TaskStopwatch::GetThreadData() const { | |
940 #ifndef NDEBUG | |
941 DCHECK(state_ != CREATED); | |
942 #endif | |
943 | |
944 return current_thread_data_; | |
945 } | |
946 | |
947 //------------------------------------------------------------------------------ | |
861 TaskSnapshot::TaskSnapshot() { | 948 TaskSnapshot::TaskSnapshot() { |
862 } | 949 } |
863 | 950 |
864 TaskSnapshot::TaskSnapshot(const BirthOnThread& birth, | 951 TaskSnapshot::TaskSnapshot(const BirthOnThread& birth, |
865 const DeathData& death_data, | 952 const DeathData& death_data, |
866 const std::string& death_thread_name) | 953 const std::string& death_thread_name) |
867 : birth(birth), | 954 : birth(birth), |
868 death_data(death_data), | 955 death_data(death_data), |
869 death_thread_name(death_thread_name) { | 956 death_thread_name(death_thread_name) { |
870 } | 957 } |
(...skipping 24 matching lines...) Expand all Loading... | |
895 : process_id(base::GetCurrentProcId()) { | 982 : process_id(base::GetCurrentProcId()) { |
896 #else | 983 #else |
897 : process_id(0) { | 984 : process_id(0) { |
898 #endif | 985 #endif |
899 } | 986 } |
900 | 987 |
901 ProcessDataSnapshot::~ProcessDataSnapshot() { | 988 ProcessDataSnapshot::~ProcessDataSnapshot() { |
902 } | 989 } |
903 | 990 |
904 } // namespace tracked_objects | 991 } // namespace tracked_objects |
OLD | NEW |