Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(81)

Side by Side Diff: content/renderer/raster_worker_pool.cc

Issue 1538433002: Revert of TaskGraphRunner Group support (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@refactor
Patch Set: Created 5 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « content/renderer/raster_worker_pool.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "content/renderer/raster_worker_pool.h" 5 #include "content/renderer/raster_worker_pool.h"
6 6
7 #include <utility>
8
9 #include "base/strings/stringprintf.h" 7 #include "base/strings/stringprintf.h"
10 #include "base/threading/thread_restrictions.h" 8 #include "base/threading/thread_restrictions.h"
11 #include "base/trace_event/trace_event.h" 9 #include "base/trace_event/trace_event.h"
12 10
13 namespace content { 11 namespace content {
14 12
15 // A sequenced task runner which posts tasks to a RasterWorkerPool. 13 // A sequenced task runner which posts tasks to a RasterWorkerPool.
16 class RasterWorkerPool::RasterWorkerPoolSequencedTaskRunner 14 class RasterWorkerPool::RasterWorkerPoolSequencedTaskRunner
17 : public base::SequencedTaskRunner { 15 : public base::SequencedTaskRunner {
18 public: 16 public:
(...skipping 23 matching lines...) Expand all
42 40
43 tasks_.erase(tasks_.begin(), tasks_.begin() + completed_tasks_.size()); 41 tasks_.erase(tasks_.begin(), tasks_.begin() + completed_tasks_.size());
44 42
45 tasks_.push_back(make_scoped_refptr(new ClosureTask(task))); 43 tasks_.push_back(make_scoped_refptr(new ClosureTask(task)));
46 graph_.Reset(); 44 graph_.Reset();
47 for (const auto& graph_task : tasks_) { 45 for (const auto& graph_task : tasks_) {
48 int dependencies = 0; 46 int dependencies = 0;
49 if (!graph_.nodes.empty()) 47 if (!graph_.nodes.empty())
50 dependencies = 1; 48 dependencies = 1;
51 49
52 cc::TaskGraph::Node node(graph_task.get(), 0u /* category */, 50 cc::TaskGraph::Node node(graph_task.get(), 0, dependencies);
53 0u /* priority */, dependencies);
54 if (dependencies) { 51 if (dependencies) {
55 graph_.edges.push_back( 52 graph_.edges.push_back(
56 cc::TaskGraph::Edge(graph_.nodes.back().task, node.task)); 53 cc::TaskGraph::Edge(graph_.nodes.back().task, node.task));
57 } 54 }
58 graph_.nodes.push_back(node); 55 graph_.nodes.push_back(node);
59 } 56 }
60 task_graph_runner_->ScheduleTasks(namespace_token_, &graph_); 57 task_graph_runner_->ScheduleTasks(namespace_token_, &graph_);
61 completed_tasks_.clear(); 58 completed_tasks_.clear();
62 return true; 59 return true;
63 } 60 }
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after
144 tasks_.begin(), tasks_.end(), [this](const scoped_refptr<cc::Task>& e) { 141 tasks_.begin(), tasks_.end(), [this](const scoped_refptr<cc::Task>& e) {
145 return std::find(this->completed_tasks_.begin(), 142 return std::find(this->completed_tasks_.begin(),
146 this->completed_tasks_.end(), 143 this->completed_tasks_.end(),
147 e) != this->completed_tasks_.end(); 144 e) != this->completed_tasks_.end();
148 }); 145 });
149 tasks_.erase(end, tasks_.end()); 146 tasks_.erase(end, tasks_.end());
150 147
151 tasks_.push_back(make_scoped_refptr(new ClosureTask(task))); 148 tasks_.push_back(make_scoped_refptr(new ClosureTask(task)));
152 graph_.Reset(); 149 graph_.Reset();
153 for (const auto& graph_task : tasks_) 150 for (const auto& graph_task : tasks_)
154 graph_.nodes.push_back( 151 graph_.nodes.push_back(cc::TaskGraph::Node(graph_task.get(), 0, 0));
155 cc::TaskGraph::Node(graph_task.get(), 0u /* category */,
156 0u /* priority */, 0u /* dependencies */));
157 152
158 ScheduleTasksWithLockAcquired(namespace_token_, &graph_); 153 ScheduleTasksWithLockAcquired(namespace_token_, &graph_);
159 completed_tasks_.clear(); 154 completed_tasks_.clear();
160 return true; 155 return true;
161 } 156 }
162 157
163 bool RasterWorkerPool::RunsTasksOnCurrentThread() const { 158 bool RasterWorkerPool::RunsTasksOnCurrentThread() const {
164 return true; 159 return true;
165 } 160 }
166 161
167 // Overridden from base::DelegateSimpleThread::Delegate: 162 // Overridden from base::DelegateSimpleThread::Delegate:
168 void RasterWorkerPool::Run() { 163 void RasterWorkerPool::Run() {
169 base::AutoLock lock(lock_); 164 base::AutoLock lock(lock_);
170 165
171 while (true) { 166 while (true) {
172 if (!RunTaskWithLockAcquired()) { 167 if (!work_queue_.HasReadyToRunTasks()) {
173 // Exit when shutdown is set and no more tasks are pending. 168 // Exit when shutdown is set and no more tasks are pending.
174 if (shutdown_) 169 if (shutdown_)
175 break; 170 break;
176 171
177 // Wait for more tasks. 172 // Wait for more tasks.
178 has_ready_to_run_tasks_cv_.Wait(); 173 has_ready_to_run_tasks_cv_.Wait();
179 continue; 174 continue;
180 } 175 }
176
177 RunTaskWithLockAcquired();
181 } 178 }
182 } 179 }
183 180
184 void RasterWorkerPool::FlushForTesting() { 181 void RasterWorkerPool::FlushForTesting() {
185 base::AutoLock lock(lock_); 182 base::AutoLock lock(lock_);
186 183
187 while (!work_queue_.HasFinishedRunningTasksInAllNamespaces()) { 184 while (!work_queue_.HasFinishedRunningTasksInAllNamespaces()) {
188 has_namespaces_with_finished_running_tasks_cv_.Wait(); 185 has_namespaces_with_finished_running_tasks_cv_.Wait();
189 } 186 }
190 } 187 }
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after
254 } 251 }
255 } 252 }
256 253
257 void RasterWorkerPool::CollectCompletedTasksWithLockAcquired( 254 void RasterWorkerPool::CollectCompletedTasksWithLockAcquired(
258 cc::NamespaceToken token, 255 cc::NamespaceToken token,
259 cc::Task::Vector* completed_tasks) { 256 cc::Task::Vector* completed_tasks) {
260 DCHECK(token.IsValid()); 257 DCHECK(token.IsValid());
261 work_queue_.CollectCompletedTasks(token, completed_tasks); 258 work_queue_.CollectCompletedTasks(token, completed_tasks);
262 } 259 }
263 260
264 bool RasterWorkerPool::RunTaskWithLockAcquired() { 261 void RasterWorkerPool::RunTaskWithLockAcquired() {
265 TRACE_EVENT0("toplevel", "TaskGraphRunner::RunTask"); 262 TRACE_EVENT0("toplevel", "TaskGraphRunner::RunTask");
266 263
267 lock_.AssertAcquired(); 264 lock_.AssertAcquired();
268 265
269 // Find the first category with any tasks to run. This task graph runner 266 auto prioritized_task = work_queue_.GetNextTaskToRun();
270 // treats categories as an additional priority.
271 // TODO(ericrk): Add more category/thread logic.
272 const auto& ready_to_run_namespaces = work_queue_.ready_to_run_namespaces();
273 auto found = std::find_if(
274 ready_to_run_namespaces.cbegin(), ready_to_run_namespaces.cend(),
275 [](const std::pair<uint16_t,
276 cc::TaskGraphWorkQueue::TaskNamespace::Vector>& pair) {
277 return !pair.second.empty();
278 });
279
280 if (found == ready_to_run_namespaces.cend()) {
281 return false;
282 }
283
284 const uint16_t category = found->first;
285 auto prioritized_task = work_queue_.GetNextTaskToRun(category);
286 cc::Task* task = prioritized_task.task; 267 cc::Task* task = prioritized_task.task;
287 268
288 // There may be more work available, so wake up another worker thread. 269 // There may be more work available, so wake up another worker thread.
289 if (work_queue_.HasReadyToRunTasks()) 270 if (work_queue_.HasReadyToRunTasks())
290 has_ready_to_run_tasks_cv_.Signal(); 271 has_ready_to_run_tasks_cv_.Signal();
291 272
292 // Call WillRun() before releasing |lock_| and running task. 273 // Call WillRun() before releasing |lock_| and running task.
293 task->WillRun(); 274 task->WillRun();
294 275
295 { 276 {
296 base::AutoUnlock unlock(lock_); 277 base::AutoUnlock unlock(lock_);
297 278
298 task->RunOnWorkerThread(); 279 task->RunOnWorkerThread();
299 } 280 }
300 281
301 // This will mark task as finished running. 282 // This will mark task as finished running.
302 task->DidRun(); 283 task->DidRun();
303 284
304 work_queue_.CompleteTask(prioritized_task); 285 work_queue_.CompleteTask(prioritized_task);
305 286
306 // If namespace has finished running all tasks, wake up origin threads. 287 // If namespace has finished running all tasks, wake up origin threads.
307 if (work_queue_.HasFinishedRunningTasksInNamespace( 288 if (work_queue_.HasFinishedRunningTasksInNamespace(
308 prioritized_task.task_namespace)) 289 prioritized_task.task_namespace))
309 has_namespaces_with_finished_running_tasks_cv_.Broadcast(); 290 has_namespaces_with_finished_running_tasks_cv_.Broadcast();
310
311 return true;
312 } 291 }
313 292
314 RasterWorkerPool::ClosureTask::ClosureTask(const base::Closure& closure) 293 RasterWorkerPool::ClosureTask::ClosureTask(const base::Closure& closure)
315 : closure_(closure) {} 294 : closure_(closure) {}
316 295
317 // Overridden from cc::Task: 296 // Overridden from cc::Task:
318 void RasterWorkerPool::ClosureTask::RunOnWorkerThread() { 297 void RasterWorkerPool::ClosureTask::RunOnWorkerThread() {
319 closure_.Run(); 298 closure_.Run();
320 closure_.Reset(); 299 closure_.Reset();
321 } 300 }
322 301
323 RasterWorkerPool::ClosureTask::~ClosureTask() {} 302 RasterWorkerPool::ClosureTask::~ClosureTask() {}
324 303
325 } // namespace content 304 } // namespace content
OLDNEW
« no previous file with comments | « content/renderer/raster_worker_pool.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698