OLD | NEW |
---|---|
1 // Copyright 2014 The Chromium Authors. All rights reserved. | 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "cc/resources/gpu_raster_worker_pool.h" | 5 #include "cc/resources/gpu_raster_worker_pool.h" |
6 | 6 |
7 #include "base/debug/trace_event.h" | 7 #include "base/debug/trace_event.h" |
8 #include "cc/output/context_provider.h" | 8 #include "cc/output/context_provider.h" |
9 #include "cc/resources/resource.h" | 9 #include "cc/resources/resource.h" |
10 #include "cc/resources/resource_provider.h" | 10 #include "cc/resources/resource_provider.h" |
(...skipping 14 matching lines...) Expand all Loading... | |
25 | 25 |
26 GpuRasterWorkerPool::GpuRasterWorkerPool(base::SequencedTaskRunner* task_runner, | 26 GpuRasterWorkerPool::GpuRasterWorkerPool(base::SequencedTaskRunner* task_runner, |
27 ContextProvider* context_provider, | 27 ContextProvider* context_provider, |
28 ResourceProvider* resource_provider) | 28 ResourceProvider* resource_provider) |
29 : task_runner_(task_runner), | 29 : task_runner_(task_runner), |
30 task_graph_runner_(new TaskGraphRunner), | 30 task_graph_runner_(new TaskGraphRunner), |
31 namespace_token_(task_graph_runner_->GetNamespaceToken()), | 31 namespace_token_(task_graph_runner_->GetNamespaceToken()), |
32 context_provider_(context_provider), | 32 context_provider_(context_provider), |
33 resource_provider_(resource_provider), | 33 resource_provider_(resource_provider), |
34 run_tasks_on_origin_thread_pending_(false), | 34 run_tasks_on_origin_thread_pending_(false), |
35 raster_tasks_pending_(false), | |
36 raster_tasks_required_for_activation_pending_(false), | |
37 raster_finished_weak_ptr_factory_(this), | 35 raster_finished_weak_ptr_factory_(this), |
38 weak_ptr_factory_(this) { | 36 weak_ptr_factory_(this) { |
39 DCHECK(context_provider_); | 37 DCHECK(context_provider_); |
40 } | 38 } |
41 | 39 |
42 GpuRasterWorkerPool::~GpuRasterWorkerPool() { | 40 GpuRasterWorkerPool::~GpuRasterWorkerPool() { |
43 DCHECK_EQ(0u, completed_tasks_.size()); | 41 DCHECK_EQ(0u, completed_tasks_.size()); |
44 } | 42 } |
45 | 43 |
46 Rasterizer* GpuRasterWorkerPool::AsRasterizer() { | 44 Rasterizer* GpuRasterWorkerPool::AsRasterizer() { |
47 return this; | 45 return this; |
48 } | 46 } |
49 | 47 |
50 void GpuRasterWorkerPool::SetClient(RasterizerClient* client) { | 48 void GpuRasterWorkerPool::SetClient(RasterizerClient* client) { |
51 client_ = client; | 49 client_ = client; |
52 } | 50 } |
53 | 51 |
54 void GpuRasterWorkerPool::Shutdown() { | 52 void GpuRasterWorkerPool::Shutdown() { |
55 TRACE_EVENT0("cc", "GpuRasterWorkerPool::Shutdown"); | 53 TRACE_EVENT0("cc", "GpuRasterWorkerPool::Shutdown"); |
56 | 54 |
57 TaskGraph empty; | 55 TaskGraph empty; |
58 task_graph_runner_->ScheduleTasks(namespace_token_, &empty); | 56 task_graph_runner_->ScheduleTasks(namespace_token_, &empty); |
59 task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_); | 57 task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_); |
60 } | 58 } |
61 | 59 |
62 void GpuRasterWorkerPool::ScheduleTasks(RasterTaskQueue* queue) { | 60 void GpuRasterWorkerPool::ScheduleTasks(RasterTaskQueue* queue) { |
63 TRACE_EVENT0("cc", "GpuRasterWorkerPool::ScheduleTasks"); | 61 TRACE_EVENT0("cc", "GpuRasterWorkerPool::ScheduleTasks"); |
64 | 62 |
65 DCHECK_EQ(queue->required_for_activation_count, | 63 raster_task_sets_pending_.set(); |
reveman
2014/09/10 16:41:49
is it worth adding a comment here that makes it cl
ernstm
2014/09/10 18:26:18
Done.
| |
66 static_cast<size_t>( | |
67 std::count_if(queue->items.begin(), | |
68 queue->items.end(), | |
69 RasterTaskQueue::Item::IsRequiredForActivation))); | |
70 | |
71 raster_tasks_pending_ = true; | |
72 raster_tasks_required_for_activation_pending_ = true; | |
73 | 64 |
74 unsigned priority = kRasterTaskPriorityBase; | 65 unsigned priority = kRasterTaskPriorityBase; |
75 | 66 |
76 graph_.Reset(); | 67 graph_.Reset(); |
77 | 68 |
78 // Cancel existing OnRasterFinished callbacks. | 69 // Cancel existing OnRasterFinished callbacks. |
79 raster_finished_weak_ptr_factory_.InvalidateWeakPtrs(); | 70 raster_finished_weak_ptr_factory_.InvalidateWeakPtrs(); |
80 | 71 |
81 scoped_refptr<RasterizerTask> | 72 scoped_refptr<RasterizerTask> new_task_set_finished_tasks[kNumberOfTaskSets]; |
82 new_raster_required_for_activation_finished_task( | 73 |
83 CreateRasterRequiredForActivationFinishedTask( | 74 TaskSetSizes task_set_sizes(queue); |
reveman
2014/09/10 16:41:49
We wouldn't need this if not for the synthetic del
ernstm
2014/09/10 18:26:18
I'd prefer to do this as a follow-up.
reveman
2014/09/10 19:41:27
I have to insist on doing this prior to this patch
ernstm
2014/09/10 20:48:06
Could we simply put the delay into the REQUIRED _F
reveman
2014/09/10 21:00:52
I don't think so. That will cause us to block on t
| |
84 queue->required_for_activation_count, | 75 |
85 task_runner_.get(), | 76 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { |
86 base::Bind( | 77 base::debug::TraceEventSyntheticDelay* synthetic_delay = NULL; |
87 &GpuRasterWorkerPool::OnRasterRequiredForActivationFinished, | 78 if (task_set_sizes[task_set] > 0) |
88 raster_finished_weak_ptr_factory_.GetWeakPtr()))); | 79 synthetic_delay = client_->SyntheticDelayForTaskSet(task_set); |
89 scoped_refptr<RasterizerTask> new_raster_finished_task( | 80 |
90 CreateRasterFinishedTask( | 81 new_task_set_finished_tasks[task_set] = CreateRasterFinishedTask( |
91 task_runner_.get(), | 82 task_runner_.get(), |
92 base::Bind(&GpuRasterWorkerPool::OnRasterFinished, | 83 base::Bind(&GpuRasterWorkerPool::OnRasterTaskSetFinished, |
93 raster_finished_weak_ptr_factory_.GetWeakPtr()))); | 84 raster_finished_weak_ptr_factory_.GetWeakPtr(), |
85 task_set), | |
86 synthetic_delay); | |
87 } | |
94 | 88 |
95 for (RasterTaskQueue::Item::Vector::const_iterator it = queue->items.begin(); | 89 for (RasterTaskQueue::Item::Vector::const_iterator it = queue->items.begin(); |
96 it != queue->items.end(); | 90 it != queue->items.end(); |
97 ++it) { | 91 ++it) { |
98 const RasterTaskQueue::Item& item = *it; | 92 const RasterTaskQueue::Item& item = *it; |
99 RasterTask* task = item.task; | 93 RasterTask* task = item.task; |
100 DCHECK(!task->HasCompleted()); | 94 DCHECK(!task->HasCompleted()); |
101 | 95 |
102 if (item.required_for_activation) { | 96 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; task_set++) { |
103 graph_.edges.push_back(TaskGraph::Edge( | 97 if (!it->task_sets[task_set]) |
reveman
2014/09/10 16:41:49
use item.task_sets instead of it->task_sets
ernstm
2014/09/10 18:26:18
Done.
| |
104 task, new_raster_required_for_activation_finished_task.get())); | 98 continue; |
99 | |
100 graph_.edges.push_back( | |
101 TaskGraph::Edge(task, new_task_set_finished_tasks[task_set].get())); | |
105 } | 102 } |
106 | 103 |
107 InsertNodesForRasterTask(&graph_, task, task->dependencies(), priority++); | 104 InsertNodesForRasterTask(&graph_, task, task->dependencies(), priority++); |
108 | |
109 graph_.edges.push_back( | |
110 TaskGraph::Edge(task, new_raster_finished_task.get())); | |
111 } | 105 } |
112 | 106 |
113 InsertNodeForTask(&graph_, | 107 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; task_set++) { |
114 new_raster_required_for_activation_finished_task.get(), | 108 InsertNodeForTask(&graph_, |
115 kRasterRequiredForActivationFinishedTaskPriority, | 109 new_task_set_finished_tasks[task_set].get(), |
116 queue->required_for_activation_count); | 110 kRasterTaskSetFinishedTaskPriority, |
117 InsertNodeForTask(&graph_, | 111 task_set_sizes[task_set]); |
118 new_raster_finished_task.get(), | 112 } |
119 kRasterFinishedTaskPriority, | |
120 queue->items.size()); | |
121 | 113 |
122 ScheduleTasksOnOriginThread(this, &graph_); | 114 ScheduleTasksOnOriginThread(this, &graph_); |
123 task_graph_runner_->ScheduleTasks(namespace_token_, &graph_); | 115 task_graph_runner_->ScheduleTasks(namespace_token_, &graph_); |
124 | 116 |
125 ScheduleRunTasksOnOriginThread(); | 117 ScheduleRunTasksOnOriginThread(); |
126 | 118 |
127 raster_finished_task_ = new_raster_finished_task; | 119 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; task_set++) |
128 raster_required_for_activation_finished_task_ = | 120 task_set_finished_tasks_[task_set] = new_task_set_finished_tasks[task_set]; |
reveman
2014/09/10 16:41:49
Maybe have |task_set_finished_tasks_| be a std::ve
ernstm
2014/09/10 18:26:18
We can use std::copy on two arrays as well. I like
reveman
2014/09/10 19:41:27
Great. I didn't know std::copy worked with plain o
| |
129 new_raster_required_for_activation_finished_task; | |
130 } | 121 } |
131 | 122 |
132 void GpuRasterWorkerPool::CheckForCompletedTasks() { | 123 void GpuRasterWorkerPool::CheckForCompletedTasks() { |
133 TRACE_EVENT0("cc", "GpuRasterWorkerPool::CheckForCompletedTasks"); | 124 TRACE_EVENT0("cc", "GpuRasterWorkerPool::CheckForCompletedTasks"); |
134 | 125 |
135 task_graph_runner_->CollectCompletedTasks(namespace_token_, | 126 task_graph_runner_->CollectCompletedTasks(namespace_token_, |
136 &completed_tasks_); | 127 &completed_tasks_); |
137 for (Task::Vector::const_iterator it = completed_tasks_.begin(); | 128 for (Task::Vector::const_iterator it = completed_tasks_.begin(); |
138 it != completed_tasks_.end(); | 129 it != completed_tasks_.end(); |
139 ++it) { | 130 ++it) { |
140 RasterizerTask* task = static_cast<RasterizerTask*>(it->get()); | 131 RasterizerTask* task = static_cast<RasterizerTask*>(it->get()); |
141 | 132 |
142 task->WillComplete(); | 133 task->WillComplete(); |
143 task->CompleteOnOriginThread(this); | 134 task->CompleteOnOriginThread(this); |
144 task->DidComplete(); | 135 task->DidComplete(); |
145 | 136 |
146 task->RunReplyOnOriginThread(); | 137 task->RunReplyOnOriginThread(); |
147 } | 138 } |
148 completed_tasks_.clear(); | 139 completed_tasks_.clear(); |
149 } | 140 } |
150 | 141 |
151 RasterBuffer* GpuRasterWorkerPool::AcquireBufferForRaster(RasterTask* task) { | 142 RasterBuffer* GpuRasterWorkerPool::AcquireBufferForRaster(RasterTask* task) { |
152 return resource_provider_->AcquireGpuRasterBuffer(task->resource()->id()); | 143 return resource_provider_->AcquireGpuRasterBuffer(task->resource()->id()); |
153 } | 144 } |
154 | 145 |
155 void GpuRasterWorkerPool::ReleaseBufferForRaster(RasterTask* task) { | 146 void GpuRasterWorkerPool::ReleaseBufferForRaster(RasterTask* task) { |
156 resource_provider_->ReleaseGpuRasterBuffer(task->resource()->id()); | 147 resource_provider_->ReleaseGpuRasterBuffer(task->resource()->id()); |
157 } | 148 } |
158 | 149 |
159 void GpuRasterWorkerPool::OnRasterFinished() { | 150 void GpuRasterWorkerPool::OnRasterTaskSetFinished(TaskSet task_set) { |
160 TRACE_EVENT0("cc", "GpuRasterWorkerPool::OnRasterFinished"); | 151 TRACE_EVENT1("cc", |
152 "GpuRasterWorkerPool::OnRasterTaskSetFinished", | |
153 "task_set", | |
154 task_set); | |
161 | 155 |
162 DCHECK(raster_tasks_pending_); | 156 DCHECK(raster_task_sets_pending_[task_set]); |
163 raster_tasks_pending_ = false; | 157 raster_task_sets_pending_[task_set] = false; |
164 client_->DidFinishRunningTasks(); | 158 client_->DidFinishRunningTaskSet(task_set); |
165 } | |
166 | |
167 void GpuRasterWorkerPool::OnRasterRequiredForActivationFinished() { | |
168 TRACE_EVENT0("cc", | |
169 "GpuRasterWorkerPool::OnRasterRequiredForActivationFinished"); | |
170 | |
171 DCHECK(raster_tasks_required_for_activation_pending_); | |
172 raster_tasks_required_for_activation_pending_ = false; | |
173 client_->DidFinishRunningTasksRequiredForActivation(); | |
174 } | 159 } |
175 | 160 |
176 void GpuRasterWorkerPool::ScheduleRunTasksOnOriginThread() { | 161 void GpuRasterWorkerPool::ScheduleRunTasksOnOriginThread() { |
177 if (run_tasks_on_origin_thread_pending_) | 162 if (run_tasks_on_origin_thread_pending_) |
178 return; | 163 return; |
179 | 164 |
180 task_runner_->PostTask( | 165 task_runner_->PostTask( |
181 FROM_HERE, | 166 FROM_HERE, |
182 base::Bind(&GpuRasterWorkerPool::RunTasksOnOriginThread, | 167 base::Bind(&GpuRasterWorkerPool::RunTasksOnOriginThread, |
183 weak_ptr_factory_.GetWeakPtr())); | 168 weak_ptr_factory_.GetWeakPtr())); |
184 run_tasks_on_origin_thread_pending_ = true; | 169 run_tasks_on_origin_thread_pending_ = true; |
185 } | 170 } |
186 | 171 |
187 void GpuRasterWorkerPool::RunTasksOnOriginThread() { | 172 void GpuRasterWorkerPool::RunTasksOnOriginThread() { |
188 TRACE_EVENT0("cc", "GpuRasterWorkerPool::RunTasksOnOriginThread"); | 173 TRACE_EVENT0("cc", "GpuRasterWorkerPool::RunTasksOnOriginThread"); |
189 | 174 |
190 DCHECK(run_tasks_on_origin_thread_pending_); | 175 DCHECK(run_tasks_on_origin_thread_pending_); |
191 run_tasks_on_origin_thread_pending_ = false; | 176 run_tasks_on_origin_thread_pending_ = false; |
192 | 177 |
193 ScopedGpuRaster gpu_raster(context_provider_); | 178 ScopedGpuRaster gpu_raster(context_provider_); |
194 task_graph_runner_->RunUntilIdle(); | 179 task_graph_runner_->RunUntilIdle(); |
195 } | 180 } |
196 | 181 |
197 } // namespace cc | 182 } // namespace cc |
OLD | NEW |