OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/threading/sequenced_worker_pool.h" | 5 #include "base/threading/sequenced_worker_pool.h" |
6 | 6 |
7 #include <stdint.h> | 7 #include <stdint.h> |
8 | 8 |
9 #include <list> | 9 #include <list> |
10 #include <map> | 10 #include <map> |
11 #include <memory> | 11 #include <memory> |
12 #include <set> | 12 #include <set> |
13 #include <unordered_map> | 13 #include <unordered_map> |
14 #include <utility> | 14 #include <utility> |
15 #include <vector> | 15 #include <vector> |
16 | 16 |
17 #include "base/atomic_sequence_num.h" | 17 #include "base/atomic_sequence_num.h" |
| 18 #include "base/atomicops.h" |
18 #include "base/callback.h" | 19 #include "base/callback.h" |
19 #include "base/compiler_specific.h" | 20 #include "base/compiler_specific.h" |
20 #include "base/critical_closure.h" | 21 #include "base/critical_closure.h" |
21 #include "base/debug/dump_without_crashing.h" | |
22 #include "base/lazy_instance.h" | 22 #include "base/lazy_instance.h" |
23 #include "base/logging.h" | 23 #include "base/logging.h" |
24 #include "base/macros.h" | 24 #include "base/macros.h" |
25 #include "base/memory/ptr_util.h" | 25 #include "base/memory/ptr_util.h" |
26 #include "base/stl_util.h" | 26 #include "base/stl_util.h" |
27 #include "base/strings/stringprintf.h" | 27 #include "base/strings/stringprintf.h" |
28 #include "base/synchronization/condition_variable.h" | 28 #include "base/synchronization/condition_variable.h" |
29 #include "base/synchronization/lock.h" | 29 #include "base/synchronization/lock.h" |
30 #include "base/task_scheduler/post_task.h" | 30 #include "base/task_scheduler/post_task.h" |
31 #include "base/task_scheduler/task_scheduler.h" | 31 #include "base/task_scheduler/task_scheduler.h" |
(...skipping 15 matching lines...) Expand all Loading... |
47 #endif | 47 #endif |
48 | 48 |
49 #if !defined(OS_NACL) | 49 #if !defined(OS_NACL) |
50 #include "base/metrics/histogram_macros.h" | 50 #include "base/metrics/histogram_macros.h" |
51 #endif | 51 #endif |
52 | 52 |
53 namespace base { | 53 namespace base { |
54 | 54 |
55 namespace { | 55 namespace { |
56 | 56 |
57 // An enum representing the state of all pools. A non-test process should only | 57 // An enum representing the state of all pools. Any given non-test process |
58 // ever transition from POST_TASK_DISABLED to one of the active states. A test | 58 // should only ever transition from NONE_ACTIVE to one of the active states. |
59 // process may transition from one of the active states to POST_TASK_DISABLED | 59 // Transitions between actives states are unexpected. The |
60 // when DisableForProcessForTesting() is called. | 60 // REDIRECTED_TO_TASK_SCHEDULER transition occurs when |
| 61 // RedirectToTaskSchedulerForProcess() is called. The WORKER_CREATED transition |
| 62 // occurs when a Worker needs to be created because the first task was posted |
| 63 // and the state is still NONE_ACTIVE. In a test process, a transition to |
| 64 // NONE_ACTIVE occurs when ResetRedirectToTaskSchedulerForProcessForTesting() is |
| 65 // called. |
61 // | 66 // |
62 // External memory synchronization is required to call a method that reads | 67 // |g_all_pools_state| uses relaxed atomic operations to ensure no data race |
63 // |g_all_pools_state| after calling a method that modifies it. | 68 // between reads/writes, strict memory ordering isn't required per no other |
| 69 // state being inferred from its value. Explicit synchronization (e.g. locks or |
| 70 // events) would be overkill (it's fine for other threads to still see |
| 71 // NONE_ACTIVE after the first Worker was created -- this is not possible for |
| 72 // REDIRECTED_TO_TASK_SCHEDULER per its API requesting to be invoked while no |
| 73 // other threads are active). |
64 // | 74 // |
65 // TODO(gab): Remove this if http://crbug.com/622400 fails (SequencedWorkerPool | 75 // TODO(gab): Remove this if http://crbug.com/622400 fails (SequencedWorkerPool |
66 // will be phased out completely otherwise). | 76 // will be phased out completely otherwise). |
67 enum class AllPoolsState { | 77 enum AllPoolsState : subtle::Atomic32 { |
68 POST_TASK_DISABLED, | 78 NONE_ACTIVE, |
69 USE_WORKER_POOL, | 79 WORKER_CREATED, |
70 REDIRECTED_TO_TASK_SCHEDULER, | 80 REDIRECTED_TO_TASK_SCHEDULER, |
71 }; | 81 }; |
72 AllPoolsState g_all_pools_state = AllPoolsState::POST_TASK_DISABLED; | 82 subtle::Atomic32 g_all_pools_state = AllPoolsState::NONE_ACTIVE; |
73 | 83 |
74 struct SequencedTask : public TrackingInfo { | 84 struct SequencedTask : public TrackingInfo { |
75 SequencedTask() | 85 SequencedTask() |
76 : sequence_token_id(0), | 86 : sequence_token_id(0), |
77 trace_id(0), | 87 trace_id(0), |
78 sequence_task_number(0), | 88 sequence_task_number(0), |
79 shutdown_behavior(SequencedWorkerPool::BLOCK_SHUTDOWN) {} | 89 shutdown_behavior(SequencedWorkerPool::BLOCK_SHUTDOWN) {} |
80 | 90 |
81 explicit SequencedTask(const tracked_objects::Location& from_here) | 91 explicit SequencedTask(const tracked_objects::Location& from_here) |
82 : base::TrackingInfo(from_here, TimeTicks()), | 92 : base::TrackingInfo(from_here, TimeTicks()), |
(...skipping 482 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
565 // Worker definitions --------------------------------------------------------- | 575 // Worker definitions --------------------------------------------------------- |
566 | 576 |
567 SequencedWorkerPool::Worker::Worker( | 577 SequencedWorkerPool::Worker::Worker( |
568 scoped_refptr<SequencedWorkerPool> worker_pool, | 578 scoped_refptr<SequencedWorkerPool> worker_pool, |
569 int thread_number, | 579 int thread_number, |
570 const std::string& prefix) | 580 const std::string& prefix) |
571 : SimpleThread(prefix + StringPrintf("Worker%d", thread_number)), | 581 : SimpleThread(prefix + StringPrintf("Worker%d", thread_number)), |
572 worker_pool_(std::move(worker_pool)), | 582 worker_pool_(std::move(worker_pool)), |
573 task_shutdown_behavior_(BLOCK_SHUTDOWN), | 583 task_shutdown_behavior_(BLOCK_SHUTDOWN), |
574 is_processing_task_(false) { | 584 is_processing_task_(false) { |
575 DCHECK_EQ(AllPoolsState::USE_WORKER_POOL, g_all_pools_state); | 585 DCHECK_EQ(AllPoolsState::WORKER_CREATED, |
| 586 subtle::NoBarrier_Load(&g_all_pools_state)); |
576 Start(); | 587 Start(); |
577 } | 588 } |
578 | 589 |
579 SequencedWorkerPool::Worker::~Worker() { | 590 SequencedWorkerPool::Worker::~Worker() { |
580 } | 591 } |
581 | 592 |
582 void SequencedWorkerPool::Worker::Run() { | 593 void SequencedWorkerPool::Worker::Run() { |
583 DCHECK_EQ(AllPoolsState::USE_WORKER_POOL, g_all_pools_state); | 594 DCHECK_EQ(AllPoolsState::WORKER_CREATED, |
| 595 subtle::NoBarrier_Load(&g_all_pools_state)); |
584 | 596 |
585 #if defined(OS_WIN) | 597 #if defined(OS_WIN) |
586 win::ScopedCOMInitializer com_initializer; | 598 win::ScopedCOMInitializer com_initializer; |
587 #endif | 599 #endif |
588 | 600 |
589 // Store a pointer to this worker in thread local storage for static function | 601 // Store a pointer to this worker in thread local storage for static function |
590 // access. | 602 // access. |
591 DCHECK(!lazy_tls_ptr_.Get().Get()); | 603 DCHECK(!lazy_tls_ptr_.Get().Get()); |
592 lazy_tls_ptr_.Get().Set(this); | 604 lazy_tls_ptr_.Get().Set(this); |
593 | 605 |
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
672 return SequenceToken(LockedGetNamedTokenID(name)); | 684 return SequenceToken(LockedGetNamedTokenID(name)); |
673 } | 685 } |
674 | 686 |
675 bool SequencedWorkerPool::Inner::PostTask( | 687 bool SequencedWorkerPool::Inner::PostTask( |
676 const std::string* optional_token_name, | 688 const std::string* optional_token_name, |
677 SequenceToken sequence_token, | 689 SequenceToken sequence_token, |
678 WorkerShutdown shutdown_behavior, | 690 WorkerShutdown shutdown_behavior, |
679 const tracked_objects::Location& from_here, | 691 const tracked_objects::Location& from_here, |
680 const Closure& task, | 692 const Closure& task, |
681 TimeDelta delay) { | 693 TimeDelta delay) { |
682 // TODO(fdoray): Uncomment this DCHECK. It is initially commented to avoid a | |
683 // revert of the CL that adds debug::DumpWithoutCrashing() if it fails on the | |
684 // waterfall. https://crbug.com/622400 | |
685 // DCHECK_NE(AllPoolsState::POST_TASK_DISABLED, g_all_pools_state); | |
686 | |
687 if (g_all_pools_state == AllPoolsState::POST_TASK_DISABLED) | |
688 debug::DumpWithoutCrashing(); | |
689 | |
690 DCHECK(delay.is_zero() || shutdown_behavior == SKIP_ON_SHUTDOWN); | 694 DCHECK(delay.is_zero() || shutdown_behavior == SKIP_ON_SHUTDOWN); |
691 SequencedTask sequenced(from_here); | 695 SequencedTask sequenced(from_here); |
692 sequenced.sequence_token_id = sequence_token.id_; | 696 sequenced.sequence_token_id = sequence_token.id_; |
693 sequenced.shutdown_behavior = shutdown_behavior; | 697 sequenced.shutdown_behavior = shutdown_behavior; |
694 sequenced.posted_from = from_here; | 698 sequenced.posted_from = from_here; |
695 sequenced.task = | 699 sequenced.task = |
696 shutdown_behavior == BLOCK_SHUTDOWN ? | 700 shutdown_behavior == BLOCK_SHUTDOWN ? |
697 base::MakeCriticalClosure(task) : task; | 701 base::MakeCriticalClosure(task) : task; |
698 sequenced.time_to_run = TimeTicks::Now() + delay; | 702 sequenced.time_to_run = TimeTicks::Now() + delay; |
699 | 703 |
(...skipping 29 matching lines...) Expand all Loading... |
729 "SequencedWorkerPool::Inner::PostTask", | 733 "SequencedWorkerPool::Inner::PostTask", |
730 TRACE_ID_MANGLE(GetTaskTraceID(sequenced, static_cast<void*>(this))), | 734 TRACE_ID_MANGLE(GetTaskTraceID(sequenced, static_cast<void*>(this))), |
731 TRACE_EVENT_FLAG_FLOW_OUT); | 735 TRACE_EVENT_FLAG_FLOW_OUT); |
732 | 736 |
733 sequenced.sequence_task_number = LockedGetNextSequenceTaskNumber(); | 737 sequenced.sequence_task_number = LockedGetNextSequenceTaskNumber(); |
734 | 738 |
735 // Now that we have the lock, apply the named token rules. | 739 // Now that we have the lock, apply the named token rules. |
736 if (optional_token_name) | 740 if (optional_token_name) |
737 sequenced.sequence_token_id = LockedGetNamedTokenID(*optional_token_name); | 741 sequenced.sequence_token_id = LockedGetNamedTokenID(*optional_token_name); |
738 | 742 |
739 if (g_all_pools_state == AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER) { | 743 if (subtle::NoBarrier_Load(&g_all_pools_state) == |
| 744 AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER) { |
740 if (!PostTaskToTaskScheduler(sequenced, delay)) | 745 if (!PostTaskToTaskScheduler(sequenced, delay)) |
741 return false; | 746 return false; |
742 } else { | 747 } else { |
743 pending_tasks_.insert(sequenced); | 748 pending_tasks_.insert(sequenced); |
744 | 749 |
745 if (sequenced.shutdown_behavior == BLOCK_SHUTDOWN) | 750 if (sequenced.shutdown_behavior == BLOCK_SHUTDOWN) |
746 blocking_shutdown_pending_task_count_++; | 751 blocking_shutdown_pending_task_count_++; |
747 | 752 |
748 create_thread_id = PrepareToStartAdditionalThreadIfHelpful(); | 753 create_thread_id = PrepareToStartAdditionalThreadIfHelpful(); |
749 } | 754 } |
750 } | 755 } |
751 | 756 |
752 // Use != REDIRECTED_TO_TASK_SCHEDULER instead of == USE_WORKER_POOL to ensure | 757 if (subtle::NoBarrier_Load(&g_all_pools_state) != |
753 // correct behavior if a task is posted to a SequencedWorkerPool before | 758 AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER) { |
754 // Enable(WithRedirectionToTaskScheduler)ForProcess() in a non-DCHECK build. | |
755 if (g_all_pools_state != AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER) { | |
756 // Actually start the additional thread or signal an existing one outside | 759 // Actually start the additional thread or signal an existing one outside |
757 // the lock. | 760 // the lock. |
758 if (create_thread_id) | 761 if (create_thread_id) |
759 FinishStartingAdditionalThread(create_thread_id); | 762 FinishStartingAdditionalThread(create_thread_id); |
760 else | 763 else |
761 SignalHasWork(); | 764 SignalHasWork(); |
762 } | 765 } |
763 | 766 |
764 #if DCHECK_IS_ON() | 767 #if DCHECK_IS_ON() |
765 { | 768 { |
766 AutoLock lock_for_dcheck(lock_); | 769 AutoLock lock_for_dcheck(lock_); |
767 // Some variables are exposed in both modes for convenience but only really | 770 // Some variables are exposed in both modes for convenience but only really |
768 // intended for one of them at runtime, confirm exclusive usage here. | 771 // intended for one of them at runtime, confirm exclusive usage here. |
769 if (g_all_pools_state == AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER) { | 772 if (subtle::NoBarrier_Load(&g_all_pools_state) == |
| 773 AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER) { |
770 DCHECK(pending_tasks_.empty()); | 774 DCHECK(pending_tasks_.empty()); |
771 DCHECK_EQ(0, create_thread_id); | 775 DCHECK_EQ(0, create_thread_id); |
772 } else { | 776 } else { |
773 DCHECK(sequenced_task_runner_map_.empty()); | 777 DCHECK(sequenced_task_runner_map_.empty()); |
774 } | 778 } |
775 } | 779 } |
776 #endif // DCHECK_IS_ON() | 780 #endif // DCHECK_IS_ON() |
777 | 781 |
778 return true; | 782 return true; |
779 } | 783 } |
780 | 784 |
781 bool SequencedWorkerPool::Inner::PostTaskToTaskScheduler( | 785 bool SequencedWorkerPool::Inner::PostTaskToTaskScheduler( |
782 const SequencedTask& sequenced, | 786 const SequencedTask& sequenced, |
783 const TimeDelta& delay) { | 787 const TimeDelta& delay) { |
784 DCHECK_EQ(AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER, g_all_pools_state); | 788 DCHECK_EQ(AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER, |
| 789 subtle::NoBarrier_Load(&g_all_pools_state)); |
785 | 790 |
786 lock_.AssertAcquired(); | 791 lock_.AssertAcquired(); |
787 | 792 |
788 // Confirm that the TaskScheduler's shutdown behaviors use the same | 793 // Confirm that the TaskScheduler's shutdown behaviors use the same |
789 // underlying values as SequencedWorkerPool. | 794 // underlying values as SequencedWorkerPool. |
790 static_assert( | 795 static_assert( |
791 static_cast<int>(TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN) == | 796 static_cast<int>(TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN) == |
792 static_cast<int>(CONTINUE_ON_SHUTDOWN), | 797 static_cast<int>(CONTINUE_ON_SHUTDOWN), |
793 "TaskShutdownBehavior and WorkerShutdown enum mismatch for " | 798 "TaskShutdownBehavior and WorkerShutdown enum mismatch for " |
794 "CONTINUE_ON_SHUTDOWN."); | 799 "CONTINUE_ON_SHUTDOWN."); |
(...skipping 13 matching lines...) Expand all Loading... |
808 .WithPriority(task_priority_) | 813 .WithPriority(task_priority_) |
809 .WithShutdownBehavior(task_shutdown_behavior); | 814 .WithShutdownBehavior(task_shutdown_behavior); |
810 return GetTaskSchedulerTaskRunner(sequenced.sequence_token_id, traits) | 815 return GetTaskSchedulerTaskRunner(sequenced.sequence_token_id, traits) |
811 ->PostDelayedTask(sequenced.posted_from, sequenced.task, delay); | 816 ->PostDelayedTask(sequenced.posted_from, sequenced.task, delay); |
812 } | 817 } |
813 | 818 |
814 scoped_refptr<TaskRunner> | 819 scoped_refptr<TaskRunner> |
815 SequencedWorkerPool::Inner::GetTaskSchedulerTaskRunner( | 820 SequencedWorkerPool::Inner::GetTaskSchedulerTaskRunner( |
816 int sequence_token_id, | 821 int sequence_token_id, |
817 const TaskTraits& traits) { | 822 const TaskTraits& traits) { |
818 DCHECK_EQ(AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER, g_all_pools_state); | 823 DCHECK_EQ(AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER, |
| 824 subtle::NoBarrier_Load(&g_all_pools_state)); |
819 | 825 |
820 lock_.AssertAcquired(); | 826 lock_.AssertAcquired(); |
821 | 827 |
822 static_assert( | 828 static_assert( |
823 static_cast<int>(TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN) == 0, | 829 static_cast<int>(TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN) == 0, |
824 "TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN must be equal to 0 to be " | 830 "TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN must be equal to 0 to be " |
825 "used as an index in |unsequenced_task_runners_|."); | 831 "used as an index in |unsequenced_task_runners_|."); |
826 static_assert(static_cast<int>(TaskShutdownBehavior::SKIP_ON_SHUTDOWN) == 1, | 832 static_assert(static_cast<int>(TaskShutdownBehavior::SKIP_ON_SHUTDOWN) == 1, |
827 "TaskShutdownBehavior::SKIP_ON_SHUTDOWN must be equal to 1 to " | 833 "TaskShutdownBehavior::SKIP_ON_SHUTDOWN must be equal to 1 to " |
828 "be used as an index in |unsequenced_task_runners_|."); | 834 "be used as an index in |unsequenced_task_runners_|."); |
(...skipping 16 matching lines...) Expand all Loading... |
845 task_runner = sequence_token_id | 851 task_runner = sequence_token_id |
846 ? CreateSequencedTaskRunnerWithTraits(traits) | 852 ? CreateSequencedTaskRunnerWithTraits(traits) |
847 : CreateTaskRunnerWithTraits(traits); | 853 : CreateTaskRunnerWithTraits(traits); |
848 } | 854 } |
849 | 855 |
850 return task_runner; | 856 return task_runner; |
851 } | 857 } |
852 | 858 |
853 bool SequencedWorkerPool::Inner::RunsTasksOnCurrentThread() const { | 859 bool SequencedWorkerPool::Inner::RunsTasksOnCurrentThread() const { |
854 AutoLock lock(lock_); | 860 AutoLock lock(lock_); |
855 if (g_all_pools_state == AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER) { | 861 if (subtle::NoBarrier_Load(&g_all_pools_state) == |
| 862 AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER) { |
856 if (!runs_tasks_on_verifier_) { | 863 if (!runs_tasks_on_verifier_) { |
857 runs_tasks_on_verifier_ = CreateTaskRunnerWithTraits( | 864 runs_tasks_on_verifier_ = CreateTaskRunnerWithTraits( |
858 TaskTraits().WithFileIO().WithPriority(task_priority_)); | 865 TaskTraits().WithFileIO().WithPriority(task_priority_)); |
859 } | 866 } |
860 return runs_tasks_on_verifier_->RunsTasksOnCurrentThread(); | 867 return runs_tasks_on_verifier_->RunsTasksOnCurrentThread(); |
861 } else { | 868 } else { |
862 return ContainsKey(threads_, PlatformThread::CurrentId()); | 869 return ContainsKey(threads_, PlatformThread::CurrentId()); |
863 } | 870 } |
864 } | 871 } |
865 | 872 |
866 bool SequencedWorkerPool::Inner::IsRunningSequenceOnCurrentThread( | 873 bool SequencedWorkerPool::Inner::IsRunningSequenceOnCurrentThread( |
867 SequenceToken sequence_token) const { | 874 SequenceToken sequence_token) const { |
868 DCHECK(sequence_token.IsValid()); | 875 DCHECK(sequence_token.IsValid()); |
869 | 876 |
870 AutoLock lock(lock_); | 877 AutoLock lock(lock_); |
871 | 878 |
872 if (g_all_pools_state == AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER) { | 879 if (subtle::NoBarrier_Load(&g_all_pools_state) == |
| 880 AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER) { |
873 const auto sequenced_task_runner_it = | 881 const auto sequenced_task_runner_it = |
874 sequenced_task_runner_map_.find(sequence_token.id_); | 882 sequenced_task_runner_map_.find(sequence_token.id_); |
875 return sequenced_task_runner_it != sequenced_task_runner_map_.end() && | 883 return sequenced_task_runner_it != sequenced_task_runner_map_.end() && |
876 sequenced_task_runner_it->second->RunsTasksOnCurrentThread(); | 884 sequenced_task_runner_it->second->RunsTasksOnCurrentThread(); |
877 } else { | 885 } else { |
878 ThreadMap::const_iterator found = | 886 ThreadMap::const_iterator found = |
879 threads_.find(PlatformThread::CurrentId()); | 887 threads_.find(PlatformThread::CurrentId()); |
880 return found != threads_.end() && found->second->is_processing_task() && | 888 return found != threads_.end() && found->second->is_processing_task() && |
881 sequence_token.Equals(found->second->task_sequence_token()); | 889 sequence_token.Equals(found->second->task_sequence_token()); |
882 } | 890 } |
883 } | 891 } |
884 | 892 |
885 // See https://code.google.com/p/chromium/issues/detail?id=168415 | 893 // See https://code.google.com/p/chromium/issues/detail?id=168415 |
886 void SequencedWorkerPool::Inner::CleanupForTesting() { | 894 void SequencedWorkerPool::Inner::CleanupForTesting() { |
887 DCHECK_NE(g_all_pools_state, AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER); | 895 DCHECK_NE(subtle::NoBarrier_Load(&g_all_pools_state), |
| 896 AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER); |
888 AutoLock lock(lock_); | 897 AutoLock lock(lock_); |
889 CHECK_EQ(CLEANUP_DONE, cleanup_state_); | 898 CHECK_EQ(CLEANUP_DONE, cleanup_state_); |
890 if (shutdown_called_) | 899 if (shutdown_called_) |
891 return; | 900 return; |
892 if (pending_tasks_.empty() && waiting_thread_count_ == threads_.size()) | 901 if (pending_tasks_.empty() && waiting_thread_count_ == threads_.size()) |
893 return; | 902 return; |
894 cleanup_state_ = CLEANUP_REQUESTED; | 903 cleanup_state_ = CLEANUP_REQUESTED; |
895 cleanup_idlers_ = 0; | 904 cleanup_idlers_ = 0; |
896 has_work_cv_.Signal(); | 905 has_work_cv_.Signal(); |
897 while (cleanup_state_ != CLEANUP_DONE) | 906 while (cleanup_state_ != CLEANUP_DONE) |
(...skipping 10 matching lines...) Expand all Loading... |
908 { | 917 { |
909 AutoLock lock(lock_); | 918 AutoLock lock(lock_); |
910 // Cleanup and Shutdown should not be called concurrently. | 919 // Cleanup and Shutdown should not be called concurrently. |
911 CHECK_EQ(CLEANUP_DONE, cleanup_state_); | 920 CHECK_EQ(CLEANUP_DONE, cleanup_state_); |
912 if (shutdown_called_) | 921 if (shutdown_called_) |
913 return; | 922 return; |
914 shutdown_called_ = true; | 923 shutdown_called_ = true; |
915 | 924 |
916 max_blocking_tasks_after_shutdown_ = max_new_blocking_tasks_after_shutdown; | 925 max_blocking_tasks_after_shutdown_ = max_new_blocking_tasks_after_shutdown; |
917 | 926 |
918 if (g_all_pools_state != AllPoolsState::USE_WORKER_POOL) | 927 if (subtle::NoBarrier_Load(&g_all_pools_state) != |
| 928 AllPoolsState::WORKER_CREATED) { |
919 return; | 929 return; |
| 930 } |
920 | 931 |
921 // Tickle the threads. This will wake up a waiting one so it will know that | 932 // Tickle the threads. This will wake up a waiting one so it will know that |
922 // it can exit, which in turn will wake up any other waiting ones. | 933 // it can exit, which in turn will wake up any other waiting ones. |
923 SignalHasWork(); | 934 SignalHasWork(); |
924 | 935 |
925 // There are no pending or running tasks blocking shutdown, we're done. | 936 // There are no pending or running tasks blocking shutdown, we're done. |
926 if (CanShutdown()) | 937 if (CanShutdown()) |
927 return; | 938 return; |
928 } | 939 } |
929 | 940 |
(...skipping 18 matching lines...) Expand all Loading... |
948 TimeTicks::Now() - shutdown_wait_begin); | 959 TimeTicks::Now() - shutdown_wait_begin); |
949 #endif | 960 #endif |
950 } | 961 } |
951 | 962 |
952 bool SequencedWorkerPool::Inner::IsShutdownInProgress() { | 963 bool SequencedWorkerPool::Inner::IsShutdownInProgress() { |
953 AutoLock lock(lock_); | 964 AutoLock lock(lock_); |
954 return shutdown_called_; | 965 return shutdown_called_; |
955 } | 966 } |
956 | 967 |
957 void SequencedWorkerPool::Inner::ThreadLoop(Worker* this_worker) { | 968 void SequencedWorkerPool::Inner::ThreadLoop(Worker* this_worker) { |
958 DCHECK_EQ(AllPoolsState::USE_WORKER_POOL, g_all_pools_state); | 969 DCHECK_EQ(AllPoolsState::WORKER_CREATED, |
| 970 subtle::NoBarrier_Load(&g_all_pools_state)); |
959 { | 971 { |
960 AutoLock lock(lock_); | 972 AutoLock lock(lock_); |
961 DCHECK(thread_being_created_); | 973 DCHECK(thread_being_created_); |
962 thread_being_created_ = false; | 974 thread_being_created_ = false; |
963 auto result = threads_.insert( | 975 auto result = threads_.insert( |
964 std::make_pair(this_worker->tid(), WrapUnique(this_worker))); | 976 std::make_pair(this_worker->tid(), WrapUnique(this_worker))); |
965 DCHECK(result.second); | 977 DCHECK(result.second); |
966 | 978 |
967 while (true) { | 979 while (true) { |
968 #if defined(OS_MACOSX) | 980 #if defined(OS_MACOSX) |
(...skipping 111 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1080 | 1092 |
1081 // We noticed we should exit. Wake up the next worker so it knows it should | 1093 // We noticed we should exit. Wake up the next worker so it knows it should |
1082 // exit as well (because the Shutdown() code only signals once). | 1094 // exit as well (because the Shutdown() code only signals once). |
1083 SignalHasWork(); | 1095 SignalHasWork(); |
1084 | 1096 |
1085 // Possibly unblock shutdown. | 1097 // Possibly unblock shutdown. |
1086 can_shutdown_cv_.Signal(); | 1098 can_shutdown_cv_.Signal(); |
1087 } | 1099 } |
1088 | 1100 |
1089 void SequencedWorkerPool::Inner::HandleCleanup() { | 1101 void SequencedWorkerPool::Inner::HandleCleanup() { |
1090 DCHECK_EQ(AllPoolsState::USE_WORKER_POOL, g_all_pools_state); | 1102 DCHECK_EQ(AllPoolsState::WORKER_CREATED, |
| 1103 subtle::NoBarrier_Load(&g_all_pools_state)); |
1091 | 1104 |
1092 lock_.AssertAcquired(); | 1105 lock_.AssertAcquired(); |
1093 if (cleanup_state_ == CLEANUP_DONE) | 1106 if (cleanup_state_ == CLEANUP_DONE) |
1094 return; | 1107 return; |
1095 if (cleanup_state_ == CLEANUP_REQUESTED) { | 1108 if (cleanup_state_ == CLEANUP_REQUESTED) { |
1096 // We win, we get to do the cleanup as soon as the others wise up and idle. | 1109 // We win, we get to do the cleanup as soon as the others wise up and idle. |
1097 cleanup_state_ = CLEANUP_STARTING; | 1110 cleanup_state_ = CLEANUP_STARTING; |
1098 while (thread_being_created_ || | 1111 while (thread_being_created_ || |
1099 cleanup_idlers_ != threads_.size() - 1) { | 1112 cleanup_idlers_ != threads_.size() - 1) { |
1100 has_work_cv_.Signal(); | 1113 has_work_cv_.Signal(); |
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1147 int64_t SequencedWorkerPool::Inner::LockedGetNextSequenceTaskNumber() { | 1160 int64_t SequencedWorkerPool::Inner::LockedGetNextSequenceTaskNumber() { |
1148 lock_.AssertAcquired(); | 1161 lock_.AssertAcquired(); |
1149 // We assume that we never create enough tasks to wrap around. | 1162 // We assume that we never create enough tasks to wrap around. |
1150 return next_sequence_task_number_++; | 1163 return next_sequence_task_number_++; |
1151 } | 1164 } |
1152 | 1165 |
1153 SequencedWorkerPool::Inner::GetWorkStatus SequencedWorkerPool::Inner::GetWork( | 1166 SequencedWorkerPool::Inner::GetWorkStatus SequencedWorkerPool::Inner::GetWork( |
1154 SequencedTask* task, | 1167 SequencedTask* task, |
1155 TimeDelta* wait_time, | 1168 TimeDelta* wait_time, |
1156 std::vector<Closure>* delete_these_outside_lock) { | 1169 std::vector<Closure>* delete_these_outside_lock) { |
1157 DCHECK_EQ(AllPoolsState::USE_WORKER_POOL, g_all_pools_state); | 1170 DCHECK_EQ(AllPoolsState::WORKER_CREATED, |
| 1171 subtle::NoBarrier_Load(&g_all_pools_state)); |
1158 | 1172 |
1159 lock_.AssertAcquired(); | 1173 lock_.AssertAcquired(); |
1160 | 1174 |
1161 // Find the next task with a sequence token that's not currently in use. | 1175 // Find the next task with a sequence token that's not currently in use. |
1162 // If the token is in use, that means another thread is running something | 1176 // If the token is in use, that means another thread is running something |
1163 // in that sequence, and we can't run it without going out-of-order. | 1177 // in that sequence, and we can't run it without going out-of-order. |
1164 // | 1178 // |
1165 // This algorithm is simple and fair, but inefficient in some cases. For | 1179 // This algorithm is simple and fair, but inefficient in some cases. For |
1166 // example, say somebody schedules 1000 slow tasks with the same sequence | 1180 // example, say somebody schedules 1000 slow tasks with the same sequence |
1167 // number. We'll have to go through all those tasks each time we feel like | 1181 // number. We'll have to go through all those tasks each time we feel like |
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1235 } | 1249 } |
1236 | 1250 |
1237 status = GET_WORK_FOUND; | 1251 status = GET_WORK_FOUND; |
1238 break; | 1252 break; |
1239 } | 1253 } |
1240 | 1254 |
1241 return status; | 1255 return status; |
1242 } | 1256 } |
1243 | 1257 |
1244 int SequencedWorkerPool::Inner::WillRunWorkerTask(const SequencedTask& task) { | 1258 int SequencedWorkerPool::Inner::WillRunWorkerTask(const SequencedTask& task) { |
1245 DCHECK_EQ(AllPoolsState::USE_WORKER_POOL, g_all_pools_state); | 1259 DCHECK_EQ(AllPoolsState::WORKER_CREATED, |
| 1260 subtle::NoBarrier_Load(&g_all_pools_state)); |
1246 | 1261 |
1247 lock_.AssertAcquired(); | 1262 lock_.AssertAcquired(); |
1248 | 1263 |
1249 // Mark the task's sequence number as in use. | 1264 // Mark the task's sequence number as in use. |
1250 if (task.sequence_token_id) | 1265 if (task.sequence_token_id) |
1251 current_sequences_.insert(task.sequence_token_id); | 1266 current_sequences_.insert(task.sequence_token_id); |
1252 | 1267 |
1253 // Ensure that threads running tasks posted with either SKIP_ON_SHUTDOWN | 1268 // Ensure that threads running tasks posted with either SKIP_ON_SHUTDOWN |
1254 // or BLOCK_SHUTDOWN will prevent shutdown until that task or thread | 1269 // or BLOCK_SHUTDOWN will prevent shutdown until that task or thread |
1255 // completes. | 1270 // completes. |
(...skipping 12 matching lines...) Expand all Loading... |
1268 // if there is one waiting to pick up the next task. | 1283 // if there is one waiting to pick up the next task. |
1269 // | 1284 // |
1270 // Note that we really need to do this *before* running the task, not | 1285 // Note that we really need to do this *before* running the task, not |
1271 // after. Otherwise, if more than one task is posted, the creation of the | 1286 // after. Otherwise, if more than one task is posted, the creation of the |
1272 // second thread (since we only create one at a time) will be blocked by | 1287 // second thread (since we only create one at a time) will be blocked by |
1273 // the execution of the first task, which could be arbitrarily long. | 1288 // the execution of the first task, which could be arbitrarily long. |
1274 return PrepareToStartAdditionalThreadIfHelpful(); | 1289 return PrepareToStartAdditionalThreadIfHelpful(); |
1275 } | 1290 } |
1276 | 1291 |
1277 void SequencedWorkerPool::Inner::DidRunWorkerTask(const SequencedTask& task) { | 1292 void SequencedWorkerPool::Inner::DidRunWorkerTask(const SequencedTask& task) { |
1278 DCHECK_EQ(AllPoolsState::USE_WORKER_POOL, g_all_pools_state); | 1293 DCHECK_EQ(AllPoolsState::WORKER_CREATED, |
| 1294 subtle::NoBarrier_Load(&g_all_pools_state)); |
1279 | 1295 |
1280 lock_.AssertAcquired(); | 1296 lock_.AssertAcquired(); |
1281 | 1297 |
1282 if (task.shutdown_behavior != CONTINUE_ON_SHUTDOWN) { | 1298 if (task.shutdown_behavior != CONTINUE_ON_SHUTDOWN) { |
1283 DCHECK_GT(blocking_shutdown_thread_count_, 0u); | 1299 DCHECK_GT(blocking_shutdown_thread_count_, 0u); |
1284 blocking_shutdown_thread_count_--; | 1300 blocking_shutdown_thread_count_--; |
1285 } | 1301 } |
1286 | 1302 |
1287 if (task.sequence_token_id) | 1303 if (task.sequence_token_id) |
1288 current_sequences_.erase(task.sequence_token_id); | 1304 current_sequences_.erase(task.sequence_token_id); |
1289 } | 1305 } |
1290 | 1306 |
1291 bool SequencedWorkerPool::Inner::IsSequenceTokenRunnable( | 1307 bool SequencedWorkerPool::Inner::IsSequenceTokenRunnable( |
1292 int sequence_token_id) const { | 1308 int sequence_token_id) const { |
1293 DCHECK_NE(AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER, g_all_pools_state); | 1309 DCHECK_NE(AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER, |
| 1310 subtle::NoBarrier_Load(&g_all_pools_state)); |
1294 | 1311 |
1295 lock_.AssertAcquired(); | 1312 lock_.AssertAcquired(); |
1296 return !sequence_token_id || | 1313 return !sequence_token_id || |
1297 current_sequences_.find(sequence_token_id) == | 1314 current_sequences_.find(sequence_token_id) == |
1298 current_sequences_.end(); | 1315 current_sequences_.end(); |
1299 } | 1316 } |
1300 | 1317 |
1301 int SequencedWorkerPool::Inner::PrepareToStartAdditionalThreadIfHelpful() { | 1318 int SequencedWorkerPool::Inner::PrepareToStartAdditionalThreadIfHelpful() { |
1302 DCHECK_NE(AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER, g_all_pools_state); | 1319 DCHECK_NE(AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER, |
| 1320 subtle::NoBarrier_Load(&g_all_pools_state)); |
1303 | 1321 |
1304 lock_.AssertAcquired(); | 1322 lock_.AssertAcquired(); |
1305 // How thread creation works: | 1323 // How thread creation works: |
1306 // | 1324 // |
1307 // We'de like to avoid creating threads with the lock held. However, we | 1325 // We'de like to avoid creating threads with the lock held. However, we |
1308 // need to be sure that we have an accurate accounting of the threads for | 1326 // need to be sure that we have an accurate accounting of the threads for |
1309 // proper Joining and deltion on shutdown. | 1327 // proper Joining and deltion on shutdown. |
1310 // | 1328 // |
1311 // We need to figure out if we need another thread with the lock held, which | 1329 // We need to figure out if we need another thread with the lock held, which |
1312 // is what this function does. It then marks us as in the process of creating | 1330 // is what this function does. It then marks us as in the process of creating |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1344 thread_being_created_ = true; | 1362 thread_being_created_ = true; |
1345 return static_cast<int>(threads_.size() + 1); | 1363 return static_cast<int>(threads_.size() + 1); |
1346 } | 1364 } |
1347 } | 1365 } |
1348 } | 1366 } |
1349 return 0; | 1367 return 0; |
1350 } | 1368 } |
1351 | 1369 |
1352 void SequencedWorkerPool::Inner::FinishStartingAdditionalThread( | 1370 void SequencedWorkerPool::Inner::FinishStartingAdditionalThread( |
1353 int thread_number) { | 1371 int thread_number) { |
1354 DCHECK_EQ(AllPoolsState::USE_WORKER_POOL, g_all_pools_state); | 1372 DCHECK_NE(AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER, |
| 1373 subtle::NoBarrier_Load(&g_all_pools_state)); |
1355 | 1374 |
1356 // Called outside of the lock. | 1375 // Called outside of the lock. |
1357 DCHECK_GT(thread_number, 0); | 1376 DCHECK_GT(thread_number, 0); |
1358 | 1377 |
| 1378 if (subtle::NoBarrier_Load(&g_all_pools_state) != |
| 1379 AllPoolsState::WORKER_CREATED) { |
| 1380 DCHECK_EQ(AllPoolsState::NONE_ACTIVE, |
| 1381 subtle::NoBarrier_Load(&g_all_pools_state)); |
| 1382 subtle::NoBarrier_Store(&g_all_pools_state, AllPoolsState::WORKER_CREATED); |
| 1383 } |
| 1384 |
1359 // The worker is assigned to the list when the thread actually starts, which | 1385 // The worker is assigned to the list when the thread actually starts, which |
1360 // will manage the memory of the pointer. | 1386 // will manage the memory of the pointer. |
1361 new Worker(worker_pool_, thread_number, thread_name_prefix_); | 1387 new Worker(worker_pool_, thread_number, thread_name_prefix_); |
1362 } | 1388 } |
1363 | 1389 |
1364 void SequencedWorkerPool::Inner::SignalHasWork() { | 1390 void SequencedWorkerPool::Inner::SignalHasWork() { |
1365 DCHECK_NE(AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER, g_all_pools_state); | 1391 DCHECK_NE(AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER, |
| 1392 subtle::NoBarrier_Load(&g_all_pools_state)); |
1366 | 1393 |
1367 has_work_cv_.Signal(); | 1394 has_work_cv_.Signal(); |
1368 if (testing_observer_) { | 1395 if (testing_observer_) { |
1369 testing_observer_->OnHasWork(); | 1396 testing_observer_->OnHasWork(); |
1370 } | 1397 } |
1371 } | 1398 } |
1372 | 1399 |
1373 bool SequencedWorkerPool::Inner::CanShutdown() const { | 1400 bool SequencedWorkerPool::Inner::CanShutdown() const { |
1374 DCHECK_EQ(AllPoolsState::USE_WORKER_POOL, g_all_pools_state); | 1401 DCHECK_EQ(AllPoolsState::WORKER_CREATED, |
| 1402 subtle::NoBarrier_Load(&g_all_pools_state)); |
1375 lock_.AssertAcquired(); | 1403 lock_.AssertAcquired(); |
1376 // See PrepareToStartAdditionalThreadIfHelpful for how thread creation works. | 1404 // See PrepareToStartAdditionalThreadIfHelpful for how thread creation works. |
1377 return !thread_being_created_ && | 1405 return !thread_being_created_ && |
1378 blocking_shutdown_thread_count_ == 0 && | 1406 blocking_shutdown_thread_count_ == 0 && |
1379 blocking_shutdown_pending_task_count_ == 0; | 1407 blocking_shutdown_pending_task_count_ == 0; |
1380 } | 1408 } |
1381 | 1409 |
1382 base::StaticAtomicSequenceNumber | 1410 base::StaticAtomicSequenceNumber |
1383 SequencedWorkerPool::Inner::g_last_sequence_number_; | 1411 SequencedWorkerPool::Inner::g_last_sequence_number_; |
1384 | 1412 |
(...skipping 17 matching lines...) Expand all Loading... |
1402 scoped_refptr<SequencedWorkerPool> | 1430 scoped_refptr<SequencedWorkerPool> |
1403 SequencedWorkerPool::GetWorkerPoolForCurrentThread() { | 1431 SequencedWorkerPool::GetWorkerPoolForCurrentThread() { |
1404 Worker* worker = Worker::GetForCurrentThread(); | 1432 Worker* worker = Worker::GetForCurrentThread(); |
1405 if (!worker) | 1433 if (!worker) |
1406 return nullptr; | 1434 return nullptr; |
1407 | 1435 |
1408 return worker->worker_pool(); | 1436 return worker->worker_pool(); |
1409 } | 1437 } |
1410 | 1438 |
1411 // static | 1439 // static |
1412 void SequencedWorkerPool::EnableForProcess() { | 1440 void SequencedWorkerPool::RedirectToTaskSchedulerForProcess() { |
1413 DCHECK_EQ(AllPoolsState::POST_TASK_DISABLED, g_all_pools_state); | 1441 DCHECK(TaskScheduler::GetInstance()); |
1414 g_all_pools_state = AllPoolsState::USE_WORKER_POOL; | 1442 // Hitting this DCHECK indicates that a task was posted to a |
| 1443 // SequencedWorkerPool before the TaskScheduler was initialized and |
| 1444 // redirected, posting task to SequencedWorkerPools needs to at least be |
| 1445 // delayed until after that point. |
| 1446 DCHECK_EQ(AllPoolsState::NONE_ACTIVE, |
| 1447 subtle::NoBarrier_Load(&g_all_pools_state)); |
| 1448 subtle::NoBarrier_Store(&g_all_pools_state, |
| 1449 AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER); |
1415 } | 1450 } |
1416 | 1451 |
1417 // static | 1452 // static |
1418 void SequencedWorkerPool::EnableWithRedirectionToTaskSchedulerForProcess() { | 1453 void SequencedWorkerPool::ResetRedirectToTaskSchedulerForProcessForTesting() { |
1419 DCHECK_EQ(AllPoolsState::POST_TASK_DISABLED, g_all_pools_state); | 1454 // This can be called when the current state is REDIRECTED_TO_TASK_SCHEDULER |
1420 DCHECK(TaskScheduler::GetInstance()); | 1455 // to stop redirecting tasks. It can also be called when the current state is |
1421 g_all_pools_state = AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER; | 1456 // WORKER_CREATED to allow RedirectToTaskSchedulerForProcess() to be called |
1422 } | 1457 // (RedirectToTaskSchedulerForProcess() cannot be called after a worker has |
1423 | 1458 // been created if this isn't called). |
1424 // static | 1459 subtle::NoBarrier_Store(&g_all_pools_state, AllPoolsState::NONE_ACTIVE); |
1425 void SequencedWorkerPool::DisableForProcessForTesting() { | |
1426 g_all_pools_state = AllPoolsState::POST_TASK_DISABLED; | |
1427 } | |
1428 | |
1429 // static | |
1430 bool SequencedWorkerPool::IsEnabled() { | |
1431 return g_all_pools_state != AllPoolsState::POST_TASK_DISABLED; | |
1432 } | 1460 } |
1433 | 1461 |
1434 SequencedWorkerPool::SequencedWorkerPool(size_t max_threads, | 1462 SequencedWorkerPool::SequencedWorkerPool(size_t max_threads, |
1435 const std::string& thread_name_prefix, | 1463 const std::string& thread_name_prefix, |
1436 base::TaskPriority task_priority) | 1464 base::TaskPriority task_priority) |
1437 : constructor_task_runner_(SequencedTaskRunnerHandle::Get()), | 1465 : constructor_task_runner_(SequencedTaskRunnerHandle::Get()), |
1438 inner_(new Inner(this, | 1466 inner_(new Inner(this, |
1439 max_threads, | 1467 max_threads, |
1440 thread_name_prefix, | 1468 thread_name_prefix, |
1441 task_priority, | 1469 task_priority, |
(...skipping 118 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1560 return PostDelayedWorkerTask(from_here, task, delay); | 1588 return PostDelayedWorkerTask(from_here, task, delay); |
1561 } | 1589 } |
1562 | 1590 |
1563 bool SequencedWorkerPool::RunsTasksOnCurrentThread() const { | 1591 bool SequencedWorkerPool::RunsTasksOnCurrentThread() const { |
1564 return inner_->RunsTasksOnCurrentThread(); | 1592 return inner_->RunsTasksOnCurrentThread(); |
1565 } | 1593 } |
1566 | 1594 |
1567 void SequencedWorkerPool::FlushForTesting() { | 1595 void SequencedWorkerPool::FlushForTesting() { |
1568 DCHECK(!RunsTasksOnCurrentThread()); | 1596 DCHECK(!RunsTasksOnCurrentThread()); |
1569 base::ThreadRestrictions::ScopedAllowWait allow_wait; | 1597 base::ThreadRestrictions::ScopedAllowWait allow_wait; |
1570 if (g_all_pools_state == AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER) { | 1598 if (subtle::NoBarrier_Load(&g_all_pools_state) == |
| 1599 AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER) { |
1571 // TODO(gab): Remove this if http://crbug.com/622400 fails. | 1600 // TODO(gab): Remove this if http://crbug.com/622400 fails. |
1572 TaskScheduler::GetInstance()->FlushForTesting(); | 1601 TaskScheduler::GetInstance()->FlushForTesting(); |
1573 } else { | 1602 } else { |
1574 inner_->CleanupForTesting(); | 1603 inner_->CleanupForTesting(); |
1575 } | 1604 } |
1576 } | 1605 } |
1577 | 1606 |
1578 void SequencedWorkerPool::SignalHasWorkForTesting() { | 1607 void SequencedWorkerPool::SignalHasWorkForTesting() { |
1579 inner_->SignalHasWorkForTesting(); | 1608 inner_->SignalHasWorkForTesting(); |
1580 } | 1609 } |
1581 | 1610 |
1582 void SequencedWorkerPool::Shutdown(int max_new_blocking_tasks_after_shutdown) { | 1611 void SequencedWorkerPool::Shutdown(int max_new_blocking_tasks_after_shutdown) { |
1583 DCHECK(constructor_task_runner_->RunsTasksOnCurrentThread()); | 1612 DCHECK(constructor_task_runner_->RunsTasksOnCurrentThread()); |
1584 inner_->Shutdown(max_new_blocking_tasks_after_shutdown); | 1613 inner_->Shutdown(max_new_blocking_tasks_after_shutdown); |
1585 } | 1614 } |
1586 | 1615 |
1587 bool SequencedWorkerPool::IsShutdownInProgress() { | 1616 bool SequencedWorkerPool::IsShutdownInProgress() { |
1588 return inner_->IsShutdownInProgress(); | 1617 return inner_->IsShutdownInProgress(); |
1589 } | 1618 } |
1590 | 1619 |
1591 bool SequencedWorkerPool::IsRunningSequenceOnCurrentThread( | 1620 bool SequencedWorkerPool::IsRunningSequenceOnCurrentThread( |
1592 SequenceToken sequence_token) const { | 1621 SequenceToken sequence_token) const { |
1593 return inner_->IsRunningSequenceOnCurrentThread(sequence_token); | 1622 return inner_->IsRunningSequenceOnCurrentThread(sequence_token); |
1594 } | 1623 } |
1595 | 1624 |
1596 } // namespace base | 1625 } // namespace base |
OLD | NEW |