OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/tracked_objects.h" | 5 #include "base/tracked_objects.h" |
6 | 6 |
7 #include <limits.h> | 7 #include <limits.h> |
8 #include <stdlib.h> | 8 #include <stdlib.h> |
9 | 9 |
10 #include "base/atomicops.h" | 10 #include "base/atomicops.h" |
(...skipping 10 matching lines...) Expand all Loading... |
21 | 21 |
22 using base::TimeDelta; | 22 using base::TimeDelta; |
23 | 23 |
24 namespace base { | 24 namespace base { |
25 class TimeDelta; | 25 class TimeDelta; |
26 } | 26 } |
27 | 27 |
28 namespace tracked_objects { | 28 namespace tracked_objects { |
29 | 29 |
30 namespace { | 30 namespace { |
31 // Flag to compile out almost all of the task tracking code. | |
32 const bool kTrackAllTaskObjects = true; | |
33 | |
34 // TODO(jar): Evaluate the perf impact of enabling this. If the perf impact is | 31 // TODO(jar): Evaluate the perf impact of enabling this. If the perf impact is |
35 // negligible, enable by default. | 32 // negligible, enable by default. |
36 // Flag to compile out parent-child link recording. | 33 // Flag to compile out parent-child link recording. |
37 const bool kTrackParentChildLinks = false; | 34 const bool kTrackParentChildLinks = false; |
38 | 35 |
39 // When ThreadData is first initialized, should we start in an ACTIVE state to | 36 // When ThreadData is first initialized, should we start in an ACTIVE state to |
40 // record all of the startup-time tasks, or should we start up DEACTIVATED, so | 37 // record all of the startup-time tasks, or should we start up DEACTIVATED, so |
41 // that we only record after parsing the command line flag --enable-tracking. | 38 // that we only record after parsing the command line flag --enable-tracking. |
42 // Note that the flag may force either state, so this really controls only the | 39 // Note that the flag may force either state, so this really controls only the |
43 // period of time up until that flag is parsed. If there is no flag seen, then | 40 // period of time up until that flag is parsed. If there is no flag seen, then |
(...skipping 315 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
359 | 356 |
360 tls_index_.Set(worker_thread_data); | 357 tls_index_.Set(worker_thread_data); |
361 return worker_thread_data; | 358 return worker_thread_data; |
362 } | 359 } |
363 | 360 |
364 // static | 361 // static |
365 void ThreadData::OnThreadTermination(void* thread_data) { | 362 void ThreadData::OnThreadTermination(void* thread_data) { |
366 DCHECK(thread_data); // TLS should *never* call us with a NULL. | 363 DCHECK(thread_data); // TLS should *never* call us with a NULL. |
367 // We must NOT do any allocations during this callback. There is a chance | 364 // We must NOT do any allocations during this callback. There is a chance |
368 // that the allocator is no longer active on this thread. | 365 // that the allocator is no longer active on this thread. |
369 if (!kTrackAllTaskObjects) | |
370 return; // Not compiled in. | |
371 reinterpret_cast<ThreadData*>(thread_data)->OnThreadTerminationCleanup(); | 366 reinterpret_cast<ThreadData*>(thread_data)->OnThreadTerminationCleanup(); |
372 } | 367 } |
373 | 368 |
374 void ThreadData::OnThreadTerminationCleanup() { | 369 void ThreadData::OnThreadTerminationCleanup() { |
375 // The list_lock_ was created when we registered the callback, so it won't be | 370 // The list_lock_ was created when we registered the callback, so it won't be |
376 // allocated here despite the lazy reference. | 371 // allocated here despite the lazy reference. |
377 base::AutoLock lock(*list_lock_.Pointer()); | 372 base::AutoLock lock(*list_lock_.Pointer()); |
378 if (incarnation_counter_ != incarnation_count_for_pool_) | 373 if (incarnation_counter_ != incarnation_count_for_pool_) |
379 return; // ThreadData was constructed in an earlier unit test. | 374 return; // ThreadData was constructed in an earlier unit test. |
380 ++cleanup_count_; | 375 ++cleanup_count_; |
(...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
458 if (!kTrackParentChildLinks) | 453 if (!kTrackParentChildLinks) |
459 return; | 454 return; |
460 if (!parent_stack_.empty()) { // We might get turned off. | 455 if (!parent_stack_.empty()) { // We might get turned off. |
461 DCHECK_EQ(parent_stack_.top(), &birth); | 456 DCHECK_EQ(parent_stack_.top(), &birth); |
462 parent_stack_.pop(); | 457 parent_stack_.pop(); |
463 } | 458 } |
464 } | 459 } |
465 | 460 |
466 // static | 461 // static |
467 Births* ThreadData::TallyABirthIfActive(const Location& location) { | 462 Births* ThreadData::TallyABirthIfActive(const Location& location) { |
468 if (!kTrackAllTaskObjects) | |
469 return NULL; // Not compiled in. | |
470 | |
471 if (!TrackingStatus()) | 463 if (!TrackingStatus()) |
472 return NULL; | 464 return NULL; |
473 ThreadData* current_thread_data = Get(); | 465 ThreadData* current_thread_data = Get(); |
474 if (!current_thread_data) | 466 if (!current_thread_data) |
475 return NULL; | 467 return NULL; |
476 return current_thread_data->TallyABirth(location); | 468 return current_thread_data->TallyABirth(location); |
477 } | 469 } |
478 | 470 |
479 // static | 471 // static |
480 void ThreadData::TallyRunOnNamedThreadIfTracking( | 472 void ThreadData::TallyRunOnNamedThreadIfTracking( |
481 const base::TrackingInfo& completed_task, | 473 const base::TrackingInfo& completed_task, |
482 const TaskStopwatch& stopwatch) { | 474 const TaskStopwatch& stopwatch) { |
483 if (!kTrackAllTaskObjects) | |
484 return; // Not compiled in. | |
485 | |
486 // Even if we have been DEACTIVATED, we will process any pending births so | 475 // Even if we have been DEACTIVATED, we will process any pending births so |
487 // that our data structures (which counted the outstanding births) remain | 476 // that our data structures (which counted the outstanding births) remain |
488 // consistent. | 477 // consistent. |
489 const Births* birth = completed_task.birth_tally; | 478 const Births* birth = completed_task.birth_tally; |
490 if (!birth) | 479 if (!birth) |
491 return; | 480 return; |
492 ThreadData* current_thread_data = stopwatch.GetThreadData(); | 481 ThreadData* current_thread_data = stopwatch.GetThreadData(); |
493 if (!current_thread_data) | 482 if (!current_thread_data) |
494 return; | 483 return; |
495 | 484 |
496 // Watch out for a race where status_ is changing, and hence one or both | 485 // Watch out for a race where status_ is changing, and hence one or both |
497 // of start_of_run or end_of_run is zero. In that case, we didn't bother to | 486 // of start_of_run or end_of_run is zero. In that case, we didn't bother to |
498 // get a time value since we "weren't tracking" and we were trying to be | 487 // get a time value since we "weren't tracking" and we were trying to be |
499 // efficient by not calling for a genuine time value. For simplicity, we'll | 488 // efficient by not calling for a genuine time value. For simplicity, we'll |
500 // use a default zero duration when we can't calculate a true value. | 489 // use a default zero duration when we can't calculate a true value. |
501 TrackedTime start_of_run = stopwatch.StartTime(); | 490 TrackedTime start_of_run = stopwatch.StartTime(); |
502 int32 queue_duration = 0; | 491 int32 queue_duration = 0; |
503 if (!start_of_run.is_null()) { | 492 if (!start_of_run.is_null()) { |
504 queue_duration = (start_of_run - completed_task.EffectiveTimePosted()) | 493 queue_duration = (start_of_run - completed_task.EffectiveTimePosted()) |
505 .InMilliseconds(); | 494 .InMilliseconds(); |
506 } | 495 } |
507 current_thread_data->TallyADeath(*birth, queue_duration, stopwatch); | 496 current_thread_data->TallyADeath(*birth, queue_duration, stopwatch); |
508 } | 497 } |
509 | 498 |
510 // static | 499 // static |
511 void ThreadData::TallyRunOnWorkerThreadIfTracking( | 500 void ThreadData::TallyRunOnWorkerThreadIfTracking( |
512 const Births* birth, | 501 const Births* birth, |
513 const TrackedTime& time_posted, | 502 const TrackedTime& time_posted, |
514 const TaskStopwatch& stopwatch) { | 503 const TaskStopwatch& stopwatch) { |
515 if (!kTrackAllTaskObjects) | |
516 return; // Not compiled in. | |
517 | |
518 // Even if we have been DEACTIVATED, we will process any pending births so | 504 // Even if we have been DEACTIVATED, we will process any pending births so |
519 // that our data structures (which counted the outstanding births) remain | 505 // that our data structures (which counted the outstanding births) remain |
520 // consistent. | 506 // consistent. |
521 if (!birth) | 507 if (!birth) |
522 return; | 508 return; |
523 | 509 |
524 // TODO(jar): Support the option to coalesce all worker-thread activity under | 510 // TODO(jar): Support the option to coalesce all worker-thread activity under |
525 // one ThreadData instance that uses locks to protect *all* access. This will | 511 // one ThreadData instance that uses locks to protect *all* access. This will |
526 // reduce memory (making it provably bounded), but run incrementally slower | 512 // reduce memory (making it provably bounded), but run incrementally slower |
527 // (since we'll use locks on TallyABirth and TallyADeath). The good news is | 513 // (since we'll use locks on TallyABirth and TallyADeath). The good news is |
(...skipping 11 matching lines...) Expand all Loading... |
539 if (!start_of_run.is_null()) { | 525 if (!start_of_run.is_null()) { |
540 queue_duration = (start_of_run - time_posted).InMilliseconds(); | 526 queue_duration = (start_of_run - time_posted).InMilliseconds(); |
541 } | 527 } |
542 current_thread_data->TallyADeath(*birth, queue_duration, stopwatch); | 528 current_thread_data->TallyADeath(*birth, queue_duration, stopwatch); |
543 } | 529 } |
544 | 530 |
545 // static | 531 // static |
546 void ThreadData::TallyRunInAScopedRegionIfTracking( | 532 void ThreadData::TallyRunInAScopedRegionIfTracking( |
547 const Births* birth, | 533 const Births* birth, |
548 const TaskStopwatch& stopwatch) { | 534 const TaskStopwatch& stopwatch) { |
549 if (!kTrackAllTaskObjects) | |
550 return; // Not compiled in. | |
551 | |
552 // Even if we have been DEACTIVATED, we will process any pending births so | 535 // Even if we have been DEACTIVATED, we will process any pending births so |
553 // that our data structures (which counted the outstanding births) remain | 536 // that our data structures (which counted the outstanding births) remain |
554 // consistent. | 537 // consistent. |
555 if (!birth) | 538 if (!birth) |
556 return; | 539 return; |
557 | 540 |
558 ThreadData* current_thread_data = stopwatch.GetThreadData(); | 541 ThreadData* current_thread_data = stopwatch.GetThreadData(); |
559 if (!current_thread_data) | 542 if (!current_thread_data) |
560 return; | 543 return; |
561 | 544 |
562 int32 queue_duration = 0; | 545 int32 queue_duration = 0; |
563 current_thread_data->TallyADeath(*birth, queue_duration, stopwatch); | 546 current_thread_data->TallyADeath(*birth, queue_duration, stopwatch); |
564 } | 547 } |
565 | 548 |
566 // static | 549 // static |
567 void ThreadData::SnapshotAllExecutedTasks( | 550 void ThreadData::SnapshotAllExecutedTasks( |
568 ProcessDataPhaseSnapshot* process_data_phase, | 551 ProcessDataPhaseSnapshot* process_data_phase, |
569 BirthCountMap* birth_counts) { | 552 BirthCountMap* birth_counts) { |
570 if (!kTrackAllTaskObjects) | |
571 return; // Not compiled in. | |
572 | |
573 // Get an unchanging copy of a ThreadData list. | 553 // Get an unchanging copy of a ThreadData list. |
574 ThreadData* my_list = ThreadData::first(); | 554 ThreadData* my_list = ThreadData::first(); |
575 | 555 |
576 // Gather data serially. | 556 // Gather data serially. |
577 // This hackish approach *can* get some slighly corrupt tallies, as we are | 557 // This hackish approach *can* get some slighly corrupt tallies, as we are |
578 // grabbing values without the protection of a lock, but it has the advantage | 558 // grabbing values without the protection of a lock, but it has the advantage |
579 // of working even with threads that don't have message loops. If a user | 559 // of working even with threads that don't have message loops. If a user |
580 // sees any strangeness, they can always just run their stats gathering a | 560 // sees any strangeness, they can always just run their stats gathering a |
581 // second time. | 561 // second time. |
582 for (ThreadData* thread_data = my_list; | 562 for (ThreadData* thread_data = my_list; |
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
652 parent_child_set->insert(parent_child); | 632 parent_child_set->insert(parent_child); |
653 } | 633 } |
654 | 634 |
655 static void OptionallyInitializeAlternateTimer() { | 635 static void OptionallyInitializeAlternateTimer() { |
656 NowFunction* alternate_time_source = GetAlternateTimeSource(); | 636 NowFunction* alternate_time_source = GetAlternateTimeSource(); |
657 if (alternate_time_source) | 637 if (alternate_time_source) |
658 ThreadData::SetAlternateTimeSource(alternate_time_source); | 638 ThreadData::SetAlternateTimeSource(alternate_time_source); |
659 } | 639 } |
660 | 640 |
661 bool ThreadData::Initialize() { | 641 bool ThreadData::Initialize() { |
662 if (!kTrackAllTaskObjects) | |
663 return false; // Not compiled in. | |
664 if (status_ >= DEACTIVATED) | 642 if (status_ >= DEACTIVATED) |
665 return true; // Someone else did the initialization. | 643 return true; // Someone else did the initialization. |
666 // Due to racy lazy initialization in tests, we'll need to recheck status_ | 644 // Due to racy lazy initialization in tests, we'll need to recheck status_ |
667 // after we acquire the lock. | 645 // after we acquire the lock. |
668 | 646 |
669 // Ensure that we don't double initialize tls. We are called when single | 647 // Ensure that we don't double initialize tls. We are called when single |
670 // threaded in the product, but some tests may be racy and lazy about our | 648 // threaded in the product, but some tests may be racy and lazy about our |
671 // initialization. | 649 // initialization. |
672 base::AutoLock lock(*list_lock_.Pointer()); | 650 base::AutoLock lock(*list_lock_.Pointer()); |
673 if (status_ >= DEACTIVATED) | 651 if (status_ >= DEACTIVATED) |
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
754 | 732 |
755 // static | 733 // static |
756 void ThreadData::EnableProfilerTiming() { | 734 void ThreadData::EnableProfilerTiming() { |
757 base::subtle::NoBarrier_Store(&g_profiler_timing_enabled, ENABLED_TIMING); | 735 base::subtle::NoBarrier_Store(&g_profiler_timing_enabled, ENABLED_TIMING); |
758 } | 736 } |
759 | 737 |
760 // static | 738 // static |
761 TrackedTime ThreadData::Now() { | 739 TrackedTime ThreadData::Now() { |
762 if (kAllowAlternateTimeSourceHandling && now_function_) | 740 if (kAllowAlternateTimeSourceHandling && now_function_) |
763 return TrackedTime::FromMilliseconds((*now_function_)()); | 741 return TrackedTime::FromMilliseconds((*now_function_)()); |
764 if (kTrackAllTaskObjects && IsProfilerTimingEnabled() && TrackingStatus()) | 742 if (IsProfilerTimingEnabled() && TrackingStatus()) |
765 return TrackedTime::Now(); | 743 return TrackedTime::Now(); |
766 return TrackedTime(); // Super fast when disabled, or not compiled. | 744 return TrackedTime(); // Super fast when disabled, or not compiled. |
767 } | 745 } |
768 | 746 |
769 // static | 747 // static |
770 void ThreadData::EnsureCleanupWasCalled(int major_threads_shutdown_count) { | 748 void ThreadData::EnsureCleanupWasCalled(int major_threads_shutdown_count) { |
771 base::AutoLock lock(*list_lock_.Pointer()); | 749 base::AutoLock lock(*list_lock_.Pointer()); |
772 if (worker_thread_data_creation_count_ == 0) | 750 if (worker_thread_data_creation_count_ == 0) |
773 return; // We haven't really run much, and couldn't have leaked. | 751 return; // We haven't really run much, and couldn't have leaked. |
774 | 752 |
(...skipping 202 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
977 : process_id(base::GetCurrentProcId()) { | 955 : process_id(base::GetCurrentProcId()) { |
978 #else | 956 #else |
979 : process_id(base::kNullProcessId) { | 957 : process_id(base::kNullProcessId) { |
980 #endif | 958 #endif |
981 } | 959 } |
982 | 960 |
983 ProcessDataSnapshot::~ProcessDataSnapshot() { | 961 ProcessDataSnapshot::~ProcessDataSnapshot() { |
984 } | 962 } |
985 | 963 |
986 } // namespace tracked_objects | 964 } // namespace tracked_objects |
OLD | NEW |