Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(161)

Side by Side Diff: third_party/WebKit/Source/platform/scheduler/base/task_queue_manager.h

Issue 2546423002: [Try # 3] Scheduler refactoring to virtually eliminate redundant DoWorks (Closed)
Patch Set: Rebase it's even smaller now! Created 3 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2014 The Chromium Authors. All rights reserved. 1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef THIRD_PARTY_WEBKIT_SOURCE_PLATFORM_SCHEDULER_BASE_TASK_QUEUE_MANAGER_H_ 5 #ifndef THIRD_PARTY_WEBKIT_SOURCE_PLATFORM_SCHEDULER_BASE_TASK_QUEUE_MANAGER_H_
6 #define THIRD_PARTY_WEBKIT_SOURCE_PLATFORM_SCHEDULER_BASE_TASK_QUEUE_MANAGER_H_ 6 #define THIRD_PARTY_WEBKIT_SOURCE_PLATFORM_SCHEDULER_BASE_TASK_QUEUE_MANAGER_H_
7 7
8 #include <map> 8 #include <map>
9 9
10 #include "base/atomic_sequence_num.h" 10 #include "base/atomic_sequence_num.h"
11 #include "base/cancelable_callback.h"
11 #include "base/debug/task_annotator.h" 12 #include "base/debug/task_annotator.h"
12 #include "base/macros.h" 13 #include "base/macros.h"
13 #include "base/memory/weak_ptr.h" 14 #include "base/memory/weak_ptr.h"
14 #include "base/message_loop/message_loop.h" 15 #include "base/message_loop/message_loop.h"
15 #include "base/pending_task.h" 16 #include "base/pending_task.h"
16 #include "base/synchronization/lock.h" 17 #include "base/synchronization/lock.h"
17 #include "base/threading/thread_checker.h" 18 #include "base/threading/thread_checker.h"
18 #include "platform/scheduler/base/enqueue_order.h" 19 #include "platform/scheduler/base/enqueue_order.h"
20 #include "platform/scheduler/base/moveable_auto_lock.h"
19 #include "platform/scheduler/base/task_queue_impl.h" 21 #include "platform/scheduler/base/task_queue_impl.h"
20 #include "platform/scheduler/base/task_queue_selector.h" 22 #include "platform/scheduler/base/task_queue_selector.h"
21 23
22 namespace base { 24 namespace base {
23 namespace trace_event { 25 namespace trace_event {
24 class ConvertableToTraceFormat; 26 class ConvertableToTraceFormat;
25 } // namespace trace_event 27 } // namespace trace_event
26 } // namespace base 28 } // namespace base
27 29
28 namespace blink { 30 namespace blink {
(...skipping 123 matching lines...) Expand 10 before | Expand all | Expand 10 after
152 friend class internal::TaskQueueImpl; 154 friend class internal::TaskQueueImpl;
153 friend class TaskQueueManagerTest; 155 friend class TaskQueueManagerTest;
154 156
155 class DeletionSentinel : public base::RefCounted<DeletionSentinel> { 157 class DeletionSentinel : public base::RefCounted<DeletionSentinel> {
156 private: 158 private:
157 friend class base::RefCounted<DeletionSentinel>; 159 friend class base::RefCounted<DeletionSentinel>;
158 ~DeletionSentinel() {} 160 ~DeletionSentinel() {}
159 }; 161 };
160 162
161 // Unregisters a TaskQueue previously created by |NewTaskQueue()|. 163 // Unregisters a TaskQueue previously created by |NewTaskQueue()|.
162 // NOTE we have to flush the queue from |newly_updatable_| which means as a
163 // side effect MoveNewlyUpdatableQueuesIntoUpdatableQueueSet is called by this
164 // function.
165 void UnregisterTaskQueue(scoped_refptr<internal::TaskQueueImpl> task_queue); 164 void UnregisterTaskQueue(scoped_refptr<internal::TaskQueueImpl> task_queue);
166 165
167 // TaskQueueSelector::Observer implementation: 166 // TaskQueueSelector::Observer implementation:
168 void OnTaskQueueEnabled(internal::TaskQueueImpl* queue) override; 167 void OnTaskQueueEnabled(internal::TaskQueueImpl* queue) override;
169 void OnTriedToSelectBlockedWorkQueue( 168 void OnTriedToSelectBlockedWorkQueue(
170 internal::WorkQueue* work_queue) override; 169 internal::WorkQueue* work_queue) override;
171 170
172 // base::MessageLoop::NestingObserver implementation: 171 // base::MessageLoop::NestingObserver implementation:
173 void OnBeginNestedMessageLoop() override; 172 void OnBeginNestedMessageLoop() override;
174 173
175 // Called by the task queue to register a new pending task. 174 // Called by the task queue to register a new pending task.
176 void DidQueueTask(const internal::TaskQueueImpl::Task& pending_task); 175 void DidQueueTask(const internal::TaskQueueImpl::Task& pending_task);
177 176
178 // Use the selector to choose a pending task and run it. 177 // Use the selector to choose a pending task and run it.
179 void DoWork(base::TimeTicks run_time, bool from_main_thread); 178 void DoWork(bool delayed);
179
180 // Post a DoWork continuation if |next_delay| is not empty.
181 void PostDoWorkContinuationLocked(base::Optional<base::TimeDelta> next_delay,
182 LazyNow* lazy_now,
183 MoveableAutoLock&& lock);
180 184
181 // Delayed Tasks with run_times <= Now() are enqueued onto the work queue and 185 // Delayed Tasks with run_times <= Now() are enqueued onto the work queue and
182 // reloads any empty work queues. 186 // reloads any empty work queues.
183 void WakeupReadyDelayedQueues(LazyNow* lazy_now); 187 void WakeupReadyDelayedQueues(LazyNow* lazy_now);
184 188
185 // Chooses the next work queue to service. Returns true if |out_queue| 189 // Chooses the next work queue to service. Returns true if |out_queue|
186 // indicates the queue from which the next task should be run, false to 190 // indicates the queue from which the next task should be run, false to
187 // avoid running any tasks. 191 // avoid running any tasks.
188 bool SelectWorkQueueToService(internal::WorkQueue** out_work_queue); 192 bool SelectWorkQueueToService(internal::WorkQueue** out_work_queue);
189 193
(...skipping 15 matching lines...) Expand all
205 209
206 bool RunsTasksOnCurrentThread() const; 210 bool RunsTasksOnCurrentThread() const;
207 bool PostNonNestableDelayedTask(const tracked_objects::Location& from_here, 211 bool PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
208 const base::Closure& task, 212 const base::Closure& task,
209 base::TimeDelta delay); 213 base::TimeDelta delay);
210 214
211 internal::EnqueueOrder GetNextSequenceNumber(); 215 internal::EnqueueOrder GetNextSequenceNumber();
212 216
213 // Calls DelayTillNextTask on all time domains and returns the smallest delay 217 // Calls DelayTillNextTask on all time domains and returns the smallest delay
214 // requested if any. 218 // requested if any.
215 base::Optional<base::TimeDelta> ComputeDelayTillNextTask(LazyNow* lazy_now); 219 base::Optional<base::TimeDelta> ComputeDelayTillNextTaskLocked(
220 LazyNow* lazy_now);
216 221
217 void MaybeRecordTaskDelayHistograms( 222 void MaybeRecordTaskDelayHistograms(
218 const internal::TaskQueueImpl::Task& pending_task, 223 const internal::TaskQueueImpl::Task& pending_task,
219 const internal::TaskQueueImpl* queue); 224 const internal::TaskQueueImpl* queue);
220 225
221 std::unique_ptr<base::trace_event::ConvertableToTraceFormat> 226 std::unique_ptr<base::trace_event::ConvertableToTraceFormat>
222 AsValueWithSelectorResult(bool should_run, 227 AsValueWithSelectorResult(bool should_run,
223 internal::WorkQueue* selected_work_queue) const; 228 internal::WorkQueue* selected_work_queue) const;
224 229
230 void MaybeScheduleImmediateWorkLocked(
231 const tracked_objects::Location& from_here,
232 MoveableAutoLock&& lock);
233
225 // Adds |queue| to |any_thread().has_incoming_immediate_work_| and if 234 // Adds |queue| to |any_thread().has_incoming_immediate_work_| and if
226 // |queue_is_blocked| is false it makes sure a DoWork is posted. 235 // |queue_is_blocked| is false it makes sure a DoWork is posted.
227 // Can be called from any thread. 236 // Can be called from any thread.
228 void OnQueueHasIncomingImmediateWork(internal::TaskQueueImpl* queue, 237 void OnQueueHasIncomingImmediateWork(internal::TaskQueueImpl* queue,
229 bool queue_is_blocked); 238 bool queue_is_blocked);
230 239
231 // Calls |ReloadImmediateWorkQueueIfEmpty| on all queues in 240 // Calls |ReloadImmediateWorkQueueIfEmpty| on all queues in
232 // |queues_to_reload|. 241 // |queues_to_reload|.
233 void ReloadEmptyWorkQueues(const std::unordered_set<internal::TaskQueueImpl*>& 242 void ReloadEmptyWorkQueues(const std::unordered_set<internal::TaskQueueImpl*>&
234 queues_to_reload) const; 243 queues_to_reload) const;
235 244
236 std::set<TimeDomain*> time_domains_; 245 std::set<TimeDomain*> time_domains_;
237 std::unique_ptr<RealTimeDomain> real_time_domain_; 246 std::unique_ptr<RealTimeDomain> real_time_domain_;
238 247
239 std::set<scoped_refptr<internal::TaskQueueImpl>> queues_; 248 std::set<scoped_refptr<internal::TaskQueueImpl>> queues_;
240 249
241 // We have to be careful when deleting a queue because some of the code uses 250 // We have to be careful when deleting a queue because some of the code uses
242 // raw pointers and doesn't expect the rug to be pulled out from underneath. 251 // raw pointers and doesn't expect the rug to be pulled out from underneath.
243 std::set<scoped_refptr<internal::TaskQueueImpl>> queues_to_delete_; 252 std::set<scoped_refptr<internal::TaskQueueImpl>> queues_to_delete_;
244 253
245 internal::EnqueueOrderGenerator enqueue_order_generator_; 254 internal::EnqueueOrderGenerator enqueue_order_generator_;
246 base::debug::TaskAnnotator task_annotator_; 255 base::debug::TaskAnnotator task_annotator_;
247 256
248 base::ThreadChecker main_thread_checker_; 257 base::ThreadChecker main_thread_checker_;
249 scoped_refptr<TaskQueueManagerDelegate> delegate_; 258 scoped_refptr<TaskQueueManagerDelegate> delegate_;
250 internal::TaskQueueSelector selector_; 259 internal::TaskQueueSelector selector_;
251 260
252 base::Closure from_main_thread_immediate_do_work_closure_; 261 base::Closure immediate_do_work_closure_;
253 base::Closure from_other_thread_immediate_do_work_closure_; 262 base::Closure delayed_do_work_closure_;
254 263
255 bool task_was_run_on_quiescence_monitored_queue_; 264 bool task_was_run_on_quiescence_monitored_queue_;
256 265
257 // To reduce locking overhead we track pending calls to DoWork separately for
258 // the main thread and other threads.
259 std::set<base::TimeTicks> main_thread_pending_wakeups_;
260
261 struct AnyThread { 266 struct AnyThread {
262 AnyThread(); 267 AnyThread();
263 268
264 // Set of task queues with newly available work on the incoming queue. 269 // Set of task queues with newly available work on the incoming queue.
265 std::unordered_set<internal::TaskQueueImpl*> has_incoming_immediate_work; 270 std::unordered_set<internal::TaskQueueImpl*> has_incoming_immediate_work;
266 271
267 bool other_thread_pending_wakeup; 272 int do_work_running_count;
273 int immediate_do_work_posted_count;
274 bool is_nested; // Whether or not the message loop is currently nested.
268 }; 275 };
269 276
270 // TODO(alexclarke): Add a MainThreadOnly struct too. 277 // TODO(alexclarke): Add a MainThreadOnly struct too.
271 278
272 mutable base::Lock any_thread_lock_; 279 mutable base::Lock any_thread_lock_;
273 AnyThread any_thread_; 280 AnyThread any_thread_;
274 281
275 struct AnyThread& any_thread() { 282 struct AnyThread& any_thread() {
276 any_thread_lock_.AssertAcquired(); 283 any_thread_lock_.AssertAcquired();
277 return any_thread_; 284 return any_thread_;
278 } 285 }
279 const struct AnyThread& any_thread() const { 286 const struct AnyThread& any_thread() const {
280 any_thread_lock_.AssertAcquired(); 287 any_thread_lock_.AssertAcquired();
281 return any_thread_; 288 return any_thread_;
282 } 289 }
283 290
291 base::TimeTicks next_delayed_do_work_;
Sami 2017/01/26 12:29:17 nit: Should we call this next_delayed_do_work_time
alex clarke (OOO till 29th) 2017/01/26 15:22:38 Done.
292 base::CancelableClosure cancelable_delayed_do_work_closure_;
Sami 2017/01/26 12:29:17 Mind moving this next to |delayed_do_work_closure_
alex clarke (OOO till 29th) 2017/01/26 15:22:38 Done.
293
284 bool record_task_delay_histograms_; 294 bool record_task_delay_histograms_;
285 295
286 int work_batch_size_; 296 int work_batch_size_;
287 size_t task_count_; 297 size_t task_count_;
288 298
289 base::ObserverList<base::MessageLoop::TaskObserver> task_observers_; 299 base::ObserverList<base::MessageLoop::TaskObserver> task_observers_;
290 300
291 base::ObserverList<TaskTimeObserver> task_time_observers_; 301 base::ObserverList<TaskTimeObserver> task_time_observers_;
292 302
293 const char* tracing_category_; 303 const char* tracing_category_;
294 const char* disabled_by_default_tracing_category_; 304 const char* disabled_by_default_tracing_category_;
295 const char* disabled_by_default_verbose_tracing_category_; 305 const char* disabled_by_default_verbose_tracing_category_;
296 306
297 internal::TaskQueueImpl* currently_executing_task_queue_; // NOT OWNED 307 internal::TaskQueueImpl* currently_executing_task_queue_; // NOT OWNED
298 308
299 Observer* observer_; // NOT OWNED 309 Observer* observer_; // NOT OWNED
300 scoped_refptr<DeletionSentinel> deletion_sentinel_; 310 scoped_refptr<DeletionSentinel> deletion_sentinel_;
301 base::WeakPtrFactory<TaskQueueManager> weak_factory_; 311 base::WeakPtrFactory<TaskQueueManager> weak_factory_;
302 312
303 DISALLOW_COPY_AND_ASSIGN(TaskQueueManager); 313 DISALLOW_COPY_AND_ASSIGN(TaskQueueManager);
304 }; 314 };
305 315
306 } // namespace scheduler 316 } // namespace scheduler
307 } // namespace blink 317 } // namespace blink
308 318
309 #endif // THIRD_PARTY_WEBKIT_SOURCE_PLATFORM_SCHEDULER_BASE_TASK_QUEUE_MANAGER_ H_ 319 #endif // THIRD_PARTY_WEBKIT_SOURCE_PLATFORM_SCHEDULER_BASE_TASK_QUEUE_MANAGER_ H_
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698