OLD | NEW |
1 // Copyright 2014 The Chromium Authors. All rights reserved. | 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef THIRD_PARTY_WEBKIT_SOURCE_PLATFORM_SCHEDULER_BASE_TASK_QUEUE_MANAGER_H_ | 5 #ifndef THIRD_PARTY_WEBKIT_SOURCE_PLATFORM_SCHEDULER_BASE_TASK_QUEUE_MANAGER_H_ |
6 #define THIRD_PARTY_WEBKIT_SOURCE_PLATFORM_SCHEDULER_BASE_TASK_QUEUE_MANAGER_H_ | 6 #define THIRD_PARTY_WEBKIT_SOURCE_PLATFORM_SCHEDULER_BASE_TASK_QUEUE_MANAGER_H_ |
7 | 7 |
8 #include <map> | 8 #include <map> |
9 | 9 |
10 #include "base/atomic_sequence_num.h" | 10 #include "base/atomic_sequence_num.h" |
| 11 #include "base/cancelable_callback.h" |
11 #include "base/debug/task_annotator.h" | 12 #include "base/debug/task_annotator.h" |
12 #include "base/macros.h" | 13 #include "base/macros.h" |
13 #include "base/memory/weak_ptr.h" | 14 #include "base/memory/weak_ptr.h" |
14 #include "base/message_loop/message_loop.h" | 15 #include "base/message_loop/message_loop.h" |
15 #include "base/pending_task.h" | 16 #include "base/pending_task.h" |
16 #include "base/synchronization/lock.h" | 17 #include "base/synchronization/lock.h" |
17 #include "base/threading/thread_checker.h" | 18 #include "base/threading/thread_checker.h" |
18 #include "platform/scheduler/base/enqueue_order.h" | 19 #include "platform/scheduler/base/enqueue_order.h" |
| 20 #include "platform/scheduler/base/moveable_auto_lock.h" |
19 #include "platform/scheduler/base/task_queue_impl.h" | 21 #include "platform/scheduler/base/task_queue_impl.h" |
20 #include "platform/scheduler/base/task_queue_selector.h" | 22 #include "platform/scheduler/base/task_queue_selector.h" |
21 | 23 |
22 namespace base { | 24 namespace base { |
23 namespace trace_event { | 25 namespace trace_event { |
24 class ConvertableToTraceFormat; | 26 class ConvertableToTraceFormat; |
25 } // namespace trace_event | 27 } // namespace trace_event |
26 } // namespace base | 28 } // namespace base |
27 | 29 |
28 namespace blink { | 30 namespace blink { |
(...skipping 123 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
152 friend class internal::TaskQueueImpl; | 154 friend class internal::TaskQueueImpl; |
153 friend class TaskQueueManagerTest; | 155 friend class TaskQueueManagerTest; |
154 | 156 |
155 class DeletionSentinel : public base::RefCounted<DeletionSentinel> { | 157 class DeletionSentinel : public base::RefCounted<DeletionSentinel> { |
156 private: | 158 private: |
157 friend class base::RefCounted<DeletionSentinel>; | 159 friend class base::RefCounted<DeletionSentinel>; |
158 ~DeletionSentinel() {} | 160 ~DeletionSentinel() {} |
159 }; | 161 }; |
160 | 162 |
161 // Unregisters a TaskQueue previously created by |NewTaskQueue()|. | 163 // Unregisters a TaskQueue previously created by |NewTaskQueue()|. |
162 // NOTE we have to flush the queue from |newly_updatable_| which means as a | |
163 // side effect MoveNewlyUpdatableQueuesIntoUpdatableQueueSet is called by this | |
164 // function. | |
165 void UnregisterTaskQueue(scoped_refptr<internal::TaskQueueImpl> task_queue); | 164 void UnregisterTaskQueue(scoped_refptr<internal::TaskQueueImpl> task_queue); |
166 | 165 |
167 // TaskQueueSelector::Observer implementation: | 166 // TaskQueueSelector::Observer implementation: |
168 void OnTaskQueueEnabled(internal::TaskQueueImpl* queue) override; | 167 void OnTaskQueueEnabled(internal::TaskQueueImpl* queue) override; |
169 void OnTriedToSelectBlockedWorkQueue( | 168 void OnTriedToSelectBlockedWorkQueue( |
170 internal::WorkQueue* work_queue) override; | 169 internal::WorkQueue* work_queue) override; |
171 | 170 |
172 // base::MessageLoop::NestingObserver implementation: | 171 // base::MessageLoop::NestingObserver implementation: |
173 void OnBeginNestedMessageLoop() override; | 172 void OnBeginNestedMessageLoop() override; |
174 | 173 |
175 // Called by the task queue to register a new pending task. | 174 // Called by the task queue to register a new pending task. |
176 void DidQueueTask(const internal::TaskQueueImpl::Task& pending_task); | 175 void DidQueueTask(const internal::TaskQueueImpl::Task& pending_task); |
177 | 176 |
178 // Use the selector to choose a pending task and run it. | 177 // Use the selector to choose a pending task and run it. |
179 void DoWork(base::TimeTicks run_time, bool from_main_thread); | 178 void DoWork(bool delayed); |
| 179 |
| 180 // Post a DoWork continuation if |next_delay| is not empty. |
| 181 void PostDoWorkContinuationLocked(base::Optional<base::TimeDelta> next_delay, |
| 182 LazyNow* lazy_now, |
| 183 MoveableAutoLock&& lock); |
180 | 184 |
181 // Delayed Tasks with run_times <= Now() are enqueued onto the work queue and | 185 // Delayed Tasks with run_times <= Now() are enqueued onto the work queue and |
182 // reloads any empty work queues. | 186 // reloads any empty work queues. |
183 void WakeupReadyDelayedQueues(LazyNow* lazy_now); | 187 void WakeupReadyDelayedQueues(LazyNow* lazy_now); |
184 | 188 |
185 // Chooses the next work queue to service. Returns true if |out_queue| | 189 // Chooses the next work queue to service. Returns true if |out_queue| |
186 // indicates the queue from which the next task should be run, false to | 190 // indicates the queue from which the next task should be run, false to |
187 // avoid running any tasks. | 191 // avoid running any tasks. |
188 bool SelectWorkQueueToService(internal::WorkQueue** out_work_queue); | 192 bool SelectWorkQueueToService(internal::WorkQueue** out_work_queue); |
189 | 193 |
(...skipping 15 matching lines...) Expand all Loading... |
205 | 209 |
206 bool RunsTasksOnCurrentThread() const; | 210 bool RunsTasksOnCurrentThread() const; |
207 bool PostNonNestableDelayedTask(const tracked_objects::Location& from_here, | 211 bool PostNonNestableDelayedTask(const tracked_objects::Location& from_here, |
208 const base::Closure& task, | 212 const base::Closure& task, |
209 base::TimeDelta delay); | 213 base::TimeDelta delay); |
210 | 214 |
211 internal::EnqueueOrder GetNextSequenceNumber(); | 215 internal::EnqueueOrder GetNextSequenceNumber(); |
212 | 216 |
213 // Calls DelayTillNextTask on all time domains and returns the smallest delay | 217 // Calls DelayTillNextTask on all time domains and returns the smallest delay |
214 // requested if any. | 218 // requested if any. |
215 base::Optional<base::TimeDelta> ComputeDelayTillNextTask(LazyNow* lazy_now); | 219 base::Optional<base::TimeDelta> ComputeDelayTillNextTaskLocked( |
| 220 LazyNow* lazy_now); |
216 | 221 |
217 void MaybeRecordTaskDelayHistograms( | 222 void MaybeRecordTaskDelayHistograms( |
218 const internal::TaskQueueImpl::Task& pending_task, | 223 const internal::TaskQueueImpl::Task& pending_task, |
219 const internal::TaskQueueImpl* queue); | 224 const internal::TaskQueueImpl* queue); |
220 | 225 |
221 std::unique_ptr<base::trace_event::ConvertableToTraceFormat> | 226 std::unique_ptr<base::trace_event::ConvertableToTraceFormat> |
222 AsValueWithSelectorResult(bool should_run, | 227 AsValueWithSelectorResult(bool should_run, |
223 internal::WorkQueue* selected_work_queue) const; | 228 internal::WorkQueue* selected_work_queue) const; |
224 | 229 |
| 230 void MaybeScheduleImmediateWorkLocked( |
| 231 const tracked_objects::Location& from_here, |
| 232 MoveableAutoLock&& lock); |
| 233 |
225 // Adds |queue| to |any_thread().has_incoming_immediate_work_| and if | 234 // Adds |queue| to |any_thread().has_incoming_immediate_work_| and if |
226 // |queue_is_blocked| is false it makes sure a DoWork is posted. | 235 // |queue_is_blocked| is false it makes sure a DoWork is posted. |
227 // Can be called from any thread. | 236 // Can be called from any thread. |
228 void OnQueueHasIncomingImmediateWork(internal::TaskQueueImpl* queue, | 237 void OnQueueHasIncomingImmediateWork(internal::TaskQueueImpl* queue, |
| 238 internal::EnqueueOrder enqueue_order, |
229 bool queue_is_blocked); | 239 bool queue_is_blocked); |
230 | 240 |
| 241 using IncomingImmediateWorkMap = |
| 242 std::unordered_map<internal::TaskQueueImpl*, internal::EnqueueOrder>; |
| 243 |
231 // Calls |ReloadImmediateWorkQueueIfEmpty| on all queues in | 244 // Calls |ReloadImmediateWorkQueueIfEmpty| on all queues in |
232 // |queues_to_reload|. | 245 // |queues_to_reload|. |
233 void ReloadEmptyWorkQueues(const std::unordered_set<internal::TaskQueueImpl*>& | 246 void ReloadEmptyWorkQueues( |
234 queues_to_reload) const; | 247 const IncomingImmediateWorkMap& queues_to_reload) const; |
235 | 248 |
236 std::set<TimeDomain*> time_domains_; | 249 std::set<TimeDomain*> time_domains_; |
237 std::unique_ptr<RealTimeDomain> real_time_domain_; | 250 std::unique_ptr<RealTimeDomain> real_time_domain_; |
238 | 251 |
239 std::set<scoped_refptr<internal::TaskQueueImpl>> queues_; | 252 std::set<scoped_refptr<internal::TaskQueueImpl>> queues_; |
240 | 253 |
241 // We have to be careful when deleting a queue because some of the code uses | 254 // We have to be careful when deleting a queue because some of the code uses |
242 // raw pointers and doesn't expect the rug to be pulled out from underneath. | 255 // raw pointers and doesn't expect the rug to be pulled out from underneath. |
243 std::set<scoped_refptr<internal::TaskQueueImpl>> queues_to_delete_; | 256 std::set<scoped_refptr<internal::TaskQueueImpl>> queues_to_delete_; |
244 | 257 |
245 internal::EnqueueOrderGenerator enqueue_order_generator_; | 258 internal::EnqueueOrderGenerator enqueue_order_generator_; |
246 base::debug::TaskAnnotator task_annotator_; | 259 base::debug::TaskAnnotator task_annotator_; |
247 | 260 |
248 base::ThreadChecker main_thread_checker_; | 261 base::ThreadChecker main_thread_checker_; |
249 scoped_refptr<TaskQueueManagerDelegate> delegate_; | 262 scoped_refptr<TaskQueueManagerDelegate> delegate_; |
250 internal::TaskQueueSelector selector_; | 263 internal::TaskQueueSelector selector_; |
251 | 264 |
252 base::Closure from_main_thread_immediate_do_work_closure_; | 265 base::Closure immediate_do_work_closure_; |
253 base::Closure from_other_thread_immediate_do_work_closure_; | 266 base::Closure delayed_do_work_closure_; |
| 267 base::CancelableClosure cancelable_delayed_do_work_closure_; |
254 | 268 |
255 bool task_was_run_on_quiescence_monitored_queue_; | 269 bool task_was_run_on_quiescence_monitored_queue_; |
256 | 270 |
257 // To reduce locking overhead we track pending calls to DoWork separately for | |
258 // the main thread and other threads. | |
259 std::set<base::TimeTicks> main_thread_pending_wakeups_; | |
260 | |
261 struct AnyThread { | 271 struct AnyThread { |
262 AnyThread(); | 272 AnyThread(); |
263 | 273 |
264 // Set of task queues with newly available work on the incoming queue. | 274 // Task queues with newly available work on the incoming queue. |
265 std::unordered_set<internal::TaskQueueImpl*> has_incoming_immediate_work; | 275 IncomingImmediateWorkMap has_incoming_immediate_work; |
266 | 276 |
267 bool other_thread_pending_wakeup; | 277 int do_work_running_count; |
| 278 int immediate_do_work_posted_count; |
| 279 bool is_nested; // Whether or not the message loop is currently nested. |
268 }; | 280 }; |
269 | 281 |
270 // TODO(alexclarke): Add a MainThreadOnly struct too. | 282 // TODO(alexclarke): Add a MainThreadOnly struct too. |
271 | 283 |
272 mutable base::Lock any_thread_lock_; | 284 mutable base::Lock any_thread_lock_; |
273 AnyThread any_thread_; | 285 AnyThread any_thread_; |
274 | 286 |
275 struct AnyThread& any_thread() { | 287 struct AnyThread& any_thread() { |
276 any_thread_lock_.AssertAcquired(); | 288 any_thread_lock_.AssertAcquired(); |
277 return any_thread_; | 289 return any_thread_; |
278 } | 290 } |
279 const struct AnyThread& any_thread() const { | 291 const struct AnyThread& any_thread() const { |
280 any_thread_lock_.AssertAcquired(); | 292 any_thread_lock_.AssertAcquired(); |
281 return any_thread_; | 293 return any_thread_; |
282 } | 294 } |
283 | 295 |
| 296 base::TimeTicks next_scheduled_delayed_do_work_time_; |
| 297 |
284 bool record_task_delay_histograms_; | 298 bool record_task_delay_histograms_; |
285 | 299 |
286 int work_batch_size_; | 300 int work_batch_size_; |
287 size_t task_count_; | 301 size_t task_count_; |
288 | 302 |
289 base::ObserverList<base::MessageLoop::TaskObserver> task_observers_; | 303 base::ObserverList<base::MessageLoop::TaskObserver> task_observers_; |
290 | 304 |
291 base::ObserverList<TaskTimeObserver> task_time_observers_; | 305 base::ObserverList<TaskTimeObserver> task_time_observers_; |
292 | 306 |
293 const char* tracing_category_; | 307 const char* tracing_category_; |
294 const char* disabled_by_default_tracing_category_; | 308 const char* disabled_by_default_tracing_category_; |
295 const char* disabled_by_default_verbose_tracing_category_; | 309 const char* disabled_by_default_verbose_tracing_category_; |
296 | 310 |
297 internal::TaskQueueImpl* currently_executing_task_queue_; // NOT OWNED | 311 internal::TaskQueueImpl* currently_executing_task_queue_; // NOT OWNED |
298 | 312 |
299 Observer* observer_; // NOT OWNED | 313 Observer* observer_; // NOT OWNED |
300 scoped_refptr<DeletionSentinel> deletion_sentinel_; | 314 scoped_refptr<DeletionSentinel> deletion_sentinel_; |
301 base::WeakPtrFactory<TaskQueueManager> weak_factory_; | 315 base::WeakPtrFactory<TaskQueueManager> weak_factory_; |
302 | 316 |
303 DISALLOW_COPY_AND_ASSIGN(TaskQueueManager); | 317 DISALLOW_COPY_AND_ASSIGN(TaskQueueManager); |
304 }; | 318 }; |
305 | 319 |
306 } // namespace scheduler | 320 } // namespace scheduler |
307 } // namespace blink | 321 } // namespace blink |
308 | 322 |
309 #endif // THIRD_PARTY_WEBKIT_SOURCE_PLATFORM_SCHEDULER_BASE_TASK_QUEUE_MANAGER_
H_ | 323 #endif // THIRD_PARTY_WEBKIT_SOURCE_PLATFORM_SCHEDULER_BASE_TASK_QUEUE_MANAGER_
H_ |
OLD | NEW |