OLD | NEW |
| (Empty) |
1 // Copyright 2014 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "cc/resources/gpu_tile_task_worker_pool.h" | |
6 | |
7 #include <algorithm> | |
8 | |
9 #include "base/trace_event/trace_event.h" | |
10 #include "cc/resources/gpu_rasterizer.h" | |
11 #include "cc/resources/raster_buffer.h" | |
12 #include "cc/resources/raster_source.h" | |
13 #include "cc/resources/resource.h" | |
14 #include "cc/resources/scoped_gpu_raster.h" | |
15 #include "gpu/command_buffer/client/gles2_interface.h" | |
16 #include "third_party/skia/include/core/SkMultiPictureDraw.h" | |
17 #include "third_party/skia/include/core/SkPictureRecorder.h" | |
18 #include "third_party/skia/include/core/SkSurface.h" | |
19 #include "third_party/skia/include/gpu/GrContext.h" | |
20 | |
21 namespace cc { | |
22 namespace { | |
23 | |
24 class RasterBufferImpl : public RasterBuffer { | |
25 public: | |
26 RasterBufferImpl(GpuRasterizer* rasterizer, const Resource* resource) | |
27 : rasterizer_(rasterizer), | |
28 lock_(rasterizer->resource_provider(), resource->id()), | |
29 resource_(resource) {} | |
30 | |
31 // Overridden from RasterBuffer: | |
32 void Playback(const RasterSource* raster_source, | |
33 const gfx::Rect& rect, | |
34 float scale) override { | |
35 TRACE_EVENT0("cc", "RasterBufferImpl::Playback"); | |
36 ContextProvider* context_provider = rasterizer_->resource_provider() | |
37 ->output_surface() | |
38 ->worker_context_provider(); | |
39 | |
40 // The context lock must be held while accessing the context on a | |
41 // worker thread. | |
42 base::AutoLock context_lock(*context_provider->GetLock()); | |
43 | |
44 // Allow this worker thread to bind to context_provider. | |
45 context_provider->DetachFromThread(); | |
46 | |
47 // Rasterize source into resource. | |
48 rasterizer_->RasterizeSource(&lock_, raster_source, rect, scale); | |
49 | |
50 // Barrier to sync worker context output to cc context. | |
51 context_provider->ContextGL()->OrderingBarrierCHROMIUM(); | |
52 | |
53 // Allow compositor thread to bind to context_provider. | |
54 context_provider->DetachFromThread(); | |
55 } | |
56 | |
57 private: | |
58 GpuRasterizer* rasterizer_; | |
59 ResourceProvider::ScopedWriteLockGr lock_; | |
60 const Resource* resource_; | |
61 | |
62 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl); | |
63 }; | |
64 | |
65 } // namespace | |
66 | |
67 // static | |
68 scoped_ptr<TileTaskWorkerPool> GpuTileTaskWorkerPool::Create( | |
69 base::SequencedTaskRunner* task_runner, | |
70 TaskGraphRunner* task_graph_runner, | |
71 ContextProvider* context_provider, | |
72 ResourceProvider* resource_provider, | |
73 bool use_distance_field_text, | |
74 int gpu_rasterization_msaa_sample_count) { | |
75 return make_scoped_ptr<TileTaskWorkerPool>(new GpuTileTaskWorkerPool( | |
76 task_runner, task_graph_runner, context_provider, resource_provider, | |
77 use_distance_field_text, gpu_rasterization_msaa_sample_count)); | |
78 } | |
79 | |
80 GpuTileTaskWorkerPool::GpuTileTaskWorkerPool( | |
81 base::SequencedTaskRunner* task_runner, | |
82 TaskGraphRunner* task_graph_runner, | |
83 ContextProvider* context_provider, | |
84 ResourceProvider* resource_provider, | |
85 bool use_distance_field_text, | |
86 int gpu_rasterization_msaa_sample_count) | |
87 : task_runner_(task_runner), | |
88 task_graph_runner_(task_graph_runner), | |
89 namespace_token_(task_graph_runner_->GetNamespaceToken()), | |
90 rasterizer_(new GpuRasterizer(context_provider, | |
91 resource_provider, | |
92 use_distance_field_text, | |
93 gpu_rasterization_msaa_sample_count)), | |
94 task_set_finished_weak_ptr_factory_(this), | |
95 weak_ptr_factory_(this) { | |
96 } | |
97 | |
98 GpuTileTaskWorkerPool::~GpuTileTaskWorkerPool() { | |
99 DCHECK_EQ(0u, completed_tasks_.size()); | |
100 } | |
101 | |
102 TileTaskRunner* GpuTileTaskWorkerPool::AsTileTaskRunner() { | |
103 return this; | |
104 } | |
105 | |
106 void GpuTileTaskWorkerPool::SetClient(TileTaskRunnerClient* client) { | |
107 client_ = client; | |
108 } | |
109 | |
110 void GpuTileTaskWorkerPool::Shutdown() { | |
111 TRACE_EVENT0("cc", "GpuTileTaskWorkerPool::Shutdown"); | |
112 | |
113 TaskGraph empty; | |
114 task_graph_runner_->ScheduleTasks(namespace_token_, &empty); | |
115 task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_); | |
116 } | |
117 | |
118 void GpuTileTaskWorkerPool::ScheduleTasks(TileTaskQueue* queue) { | |
119 TRACE_EVENT0("cc", "GpuTileTaskWorkerPool::ScheduleTasks"); | |
120 | |
121 // Mark all task sets as pending. | |
122 tasks_pending_.set(); | |
123 | |
124 unsigned priority = kTileTaskPriorityBase; | |
125 | |
126 graph_.Reset(); | |
127 | |
128 // Cancel existing OnTaskSetFinished callbacks. | |
129 task_set_finished_weak_ptr_factory_.InvalidateWeakPtrs(); | |
130 | |
131 scoped_refptr<TileTask> new_task_set_finished_tasks[kNumberOfTaskSets]; | |
132 | |
133 size_t task_count[kNumberOfTaskSets] = {0}; | |
134 | |
135 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { | |
136 new_task_set_finished_tasks[task_set] = CreateTaskSetFinishedTask( | |
137 task_runner_.get(), | |
138 base::Bind(&GpuTileTaskWorkerPool::OnTaskSetFinished, | |
139 task_set_finished_weak_ptr_factory_.GetWeakPtr(), task_set)); | |
140 } | |
141 | |
142 for (TileTaskQueue::Item::Vector::const_iterator it = queue->items.begin(); | |
143 it != queue->items.end(); ++it) { | |
144 const TileTaskQueue::Item& item = *it; | |
145 RasterTask* task = item.task; | |
146 DCHECK(!task->HasCompleted()); | |
147 | |
148 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { | |
149 if (!item.task_sets[task_set]) | |
150 continue; | |
151 | |
152 ++task_count[task_set]; | |
153 | |
154 graph_.edges.push_back( | |
155 TaskGraph::Edge(task, new_task_set_finished_tasks[task_set].get())); | |
156 } | |
157 | |
158 InsertNodesForRasterTask(&graph_, task, task->dependencies(), priority++); | |
159 } | |
160 | |
161 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { | |
162 InsertNodeForTask(&graph_, new_task_set_finished_tasks[task_set].get(), | |
163 kTaskSetFinishedTaskPriorityBase + task_set, | |
164 task_count[task_set]); | |
165 } | |
166 | |
167 ScheduleTasksOnOriginThread(this, &graph_); | |
168 | |
169 // Barrier to sync any new resources to the worker context. | |
170 rasterizer_->resource_provider() | |
171 ->output_surface() | |
172 ->context_provider() | |
173 ->ContextGL() | |
174 ->OrderingBarrierCHROMIUM(); | |
175 | |
176 task_graph_runner_->ScheduleTasks(namespace_token_, &graph_); | |
177 | |
178 std::copy(new_task_set_finished_tasks, | |
179 new_task_set_finished_tasks + kNumberOfTaskSets, | |
180 task_set_finished_tasks_); | |
181 } | |
182 | |
183 void GpuTileTaskWorkerPool::CheckForCompletedTasks() { | |
184 TRACE_EVENT0("cc", "GpuTileTaskWorkerPool::CheckForCompletedTasks"); | |
185 | |
186 task_graph_runner_->CollectCompletedTasks(namespace_token_, | |
187 &completed_tasks_); | |
188 CompleteTasks(completed_tasks_); | |
189 completed_tasks_.clear(); | |
190 } | |
191 | |
192 ResourceFormat GpuTileTaskWorkerPool::GetResourceFormat() { | |
193 return rasterizer_->resource_provider()->best_texture_format(); | |
194 } | |
195 | |
196 void GpuTileTaskWorkerPool::CompleteTasks(const Task::Vector& tasks) { | |
197 for (auto& task : tasks) { | |
198 RasterTask* raster_task = static_cast<RasterTask*>(task.get()); | |
199 | |
200 raster_task->WillComplete(); | |
201 raster_task->CompleteOnOriginThread(this); | |
202 raster_task->DidComplete(); | |
203 | |
204 raster_task->RunReplyOnOriginThread(); | |
205 } | |
206 completed_tasks_.clear(); | |
207 } | |
208 | |
209 scoped_ptr<RasterBuffer> GpuTileTaskWorkerPool::AcquireBufferForRaster( | |
210 const Resource* resource) { | |
211 return make_scoped_ptr<RasterBuffer>( | |
212 new RasterBufferImpl(rasterizer_.get(), resource)); | |
213 } | |
214 | |
215 void GpuTileTaskWorkerPool::ReleaseBufferForRaster( | |
216 scoped_ptr<RasterBuffer> buffer) { | |
217 // Nothing to do here. RasterBufferImpl destructor cleans up after itself. | |
218 } | |
219 | |
220 void GpuTileTaskWorkerPool::OnTaskSetFinished(TaskSet task_set) { | |
221 TRACE_EVENT1("cc", "GpuTileTaskWorkerPool::OnTaskSetFinished", "task_set", | |
222 task_set); | |
223 | |
224 DCHECK(tasks_pending_[task_set]); | |
225 tasks_pending_[task_set] = false; | |
226 client_->DidFinishRunningTileTasks(task_set); | |
227 } | |
228 | |
229 } // namespace cc | |
OLD | NEW |