OLD | NEW |
| (Empty) |
1 // Copyright 2014 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "cc/raster/one_copy_tile_task_worker_pool.h" | |
6 | |
7 #include <stdint.h> | |
8 | |
9 #include <algorithm> | |
10 #include <limits> | |
11 #include <utility> | |
12 | |
13 #include "base/macros.h" | |
14 #include "cc/base/math_util.h" | |
15 #include "cc/raster/staging_buffer_pool.h" | |
16 #include "cc/resources/platform_color.h" | |
17 #include "cc/resources/resource_format.h" | |
18 #include "cc/resources/resource_util.h" | |
19 #include "cc/resources/scoped_resource.h" | |
20 #include "gpu/GLES2/gl2extchromium.h" | |
21 #include "gpu/command_buffer/client/gles2_interface.h" | |
22 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h" | |
23 #include "ui/gfx/buffer_format_util.h" | |
24 | |
25 namespace cc { | |
26 namespace { | |
27 | |
28 class RasterBufferImpl : public RasterBuffer { | |
29 public: | |
30 RasterBufferImpl(OneCopyTileTaskWorkerPool* worker_pool, | |
31 ResourceProvider* resource_provider, | |
32 ResourceFormat resource_format, | |
33 const Resource* resource, | |
34 uint64_t previous_content_id) | |
35 : worker_pool_(worker_pool), | |
36 resource_(resource), | |
37 lock_(resource_provider, resource->id()), | |
38 previous_content_id_(previous_content_id) {} | |
39 | |
40 ~RasterBufferImpl() override {} | |
41 | |
42 // Overridden from RasterBuffer: | |
43 void Playback( | |
44 const RasterSource* raster_source, | |
45 const gfx::Rect& raster_full_rect, | |
46 const gfx::Rect& raster_dirty_rect, | |
47 uint64_t new_content_id, | |
48 float scale, | |
49 const RasterSource::PlaybackSettings& playback_settings) override { | |
50 worker_pool_->PlaybackAndCopyOnWorkerThread( | |
51 resource_, &lock_, raster_source, raster_full_rect, raster_dirty_rect, | |
52 scale, playback_settings, previous_content_id_, new_content_id); | |
53 } | |
54 | |
55 private: | |
56 OneCopyTileTaskWorkerPool* worker_pool_; | |
57 const Resource* resource_; | |
58 ResourceProvider::ScopedWriteLockGL lock_; | |
59 uint64_t previous_content_id_; | |
60 | |
61 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl); | |
62 }; | |
63 | |
64 // 4MiB is the size of 4 512x512 tiles, which has proven to be a good | |
65 // default batch size for copy operations. | |
66 const int kMaxBytesPerCopyOperation = 1024 * 1024 * 4; | |
67 | |
68 } // namespace | |
69 | |
70 // static | |
71 std::unique_ptr<TileTaskWorkerPool> OneCopyTileTaskWorkerPool::Create( | |
72 base::SequencedTaskRunner* task_runner, | |
73 TaskGraphRunner* task_graph_runner, | |
74 ContextProvider* context_provider, | |
75 ResourceProvider* resource_provider, | |
76 int max_copy_texture_chromium_size, | |
77 bool use_partial_raster, | |
78 int max_staging_buffer_usage_in_bytes, | |
79 ResourceFormat preferred_tile_format) { | |
80 return base::WrapUnique<TileTaskWorkerPool>(new OneCopyTileTaskWorkerPool( | |
81 task_runner, task_graph_runner, resource_provider, | |
82 max_copy_texture_chromium_size, use_partial_raster, | |
83 max_staging_buffer_usage_in_bytes, preferred_tile_format)); | |
84 } | |
85 | |
86 OneCopyTileTaskWorkerPool::OneCopyTileTaskWorkerPool( | |
87 base::SequencedTaskRunner* task_runner, | |
88 TaskGraphRunner* task_graph_runner, | |
89 ResourceProvider* resource_provider, | |
90 int max_copy_texture_chromium_size, | |
91 bool use_partial_raster, | |
92 int max_staging_buffer_usage_in_bytes, | |
93 ResourceFormat preferred_tile_format) | |
94 : task_graph_runner_(task_graph_runner), | |
95 namespace_token_(task_graph_runner->GetNamespaceToken()), | |
96 resource_provider_(resource_provider), | |
97 max_bytes_per_copy_operation_( | |
98 max_copy_texture_chromium_size | |
99 ? std::min(kMaxBytesPerCopyOperation, | |
100 max_copy_texture_chromium_size) | |
101 : kMaxBytesPerCopyOperation), | |
102 use_partial_raster_(use_partial_raster), | |
103 bytes_scheduled_since_last_flush_(0), | |
104 preferred_tile_format_(preferred_tile_format) { | |
105 staging_pool_ = StagingBufferPool::Create(task_runner, resource_provider, | |
106 use_partial_raster, | |
107 max_staging_buffer_usage_in_bytes); | |
108 } | |
109 | |
110 OneCopyTileTaskWorkerPool::~OneCopyTileTaskWorkerPool() { | |
111 } | |
112 | |
113 void OneCopyTileTaskWorkerPool::Shutdown() { | |
114 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::Shutdown"); | |
115 | |
116 TaskGraph empty; | |
117 task_graph_runner_->ScheduleTasks(namespace_token_, &empty); | |
118 task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_); | |
119 | |
120 staging_pool_->Shutdown(); | |
121 } | |
122 | |
123 void OneCopyTileTaskWorkerPool::ScheduleTasks(TaskGraph* graph) { | |
124 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::ScheduleTasks"); | |
125 | |
126 ScheduleTasksOnOriginThread(this, graph); | |
127 | |
128 // Barrier to sync any new resources to the worker context. | |
129 resource_provider_->output_surface() | |
130 ->context_provider() | |
131 ->ContextGL() | |
132 ->OrderingBarrierCHROMIUM(); | |
133 | |
134 task_graph_runner_->ScheduleTasks(namespace_token_, graph); | |
135 } | |
136 | |
137 void OneCopyTileTaskWorkerPool::CheckForCompletedTasks() { | |
138 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::CheckForCompletedTasks"); | |
139 | |
140 task_graph_runner_->CollectCompletedTasks(namespace_token_, | |
141 &completed_tasks_); | |
142 | |
143 for (Task::Vector::const_iterator it = completed_tasks_.begin(); | |
144 it != completed_tasks_.end(); ++it) { | |
145 TileTask* task = static_cast<TileTask*>(it->get()); | |
146 | |
147 task->WillComplete(); | |
148 task->CompleteOnOriginThread(this); | |
149 task->DidComplete(); | |
150 } | |
151 completed_tasks_.clear(); | |
152 } | |
153 | |
154 ResourceFormat OneCopyTileTaskWorkerPool::GetResourceFormat( | |
155 bool must_support_alpha) const { | |
156 if (resource_provider_->IsResourceFormatSupported(preferred_tile_format_) && | |
157 (DoesResourceFormatSupportAlpha(preferred_tile_format_) || | |
158 !must_support_alpha)) { | |
159 return preferred_tile_format_; | |
160 } | |
161 | |
162 return resource_provider_->best_texture_format(); | |
163 } | |
164 | |
165 bool OneCopyTileTaskWorkerPool::GetResourceRequiresSwizzle( | |
166 bool must_support_alpha) const { | |
167 return ResourceFormatRequiresSwizzle(GetResourceFormat(must_support_alpha)); | |
168 } | |
169 | |
170 RasterBufferProvider* OneCopyTileTaskWorkerPool::AsRasterBufferProvider() { | |
171 return this; | |
172 } | |
173 | |
174 std::unique_ptr<RasterBuffer> OneCopyTileTaskWorkerPool::AcquireBufferForRaster( | |
175 const Resource* resource, | |
176 uint64_t resource_content_id, | |
177 uint64_t previous_content_id) { | |
178 // TODO(danakj): If resource_content_id != 0, we only need to copy/upload | |
179 // the dirty rect. | |
180 return base::WrapUnique<RasterBuffer>( | |
181 new RasterBufferImpl(this, resource_provider_, resource->format(), | |
182 resource, previous_content_id)); | |
183 } | |
184 | |
185 void OneCopyTileTaskWorkerPool::ReleaseBufferForRaster( | |
186 std::unique_ptr<RasterBuffer> buffer) { | |
187 // Nothing to do here. RasterBufferImpl destructor cleans up after itself. | |
188 } | |
189 | |
190 void OneCopyTileTaskWorkerPool::PlaybackAndCopyOnWorkerThread( | |
191 const Resource* resource, | |
192 ResourceProvider::ScopedWriteLockGL* resource_lock, | |
193 const RasterSource* raster_source, | |
194 const gfx::Rect& raster_full_rect, | |
195 const gfx::Rect& raster_dirty_rect, | |
196 float scale, | |
197 const RasterSource::PlaybackSettings& playback_settings, | |
198 uint64_t previous_content_id, | |
199 uint64_t new_content_id) { | |
200 std::unique_ptr<StagingBuffer> staging_buffer = | |
201 staging_pool_->AcquireStagingBuffer(resource, previous_content_id); | |
202 | |
203 PlaybackToStagingBuffer(staging_buffer.get(), resource, raster_source, | |
204 raster_full_rect, raster_dirty_rect, scale, | |
205 playback_settings, previous_content_id, | |
206 new_content_id); | |
207 | |
208 CopyOnWorkerThread(staging_buffer.get(), resource, resource_lock, | |
209 raster_source, previous_content_id, new_content_id); | |
210 | |
211 staging_pool_->ReleaseStagingBuffer(std::move(staging_buffer)); | |
212 } | |
213 | |
214 void OneCopyTileTaskWorkerPool::PlaybackToStagingBuffer( | |
215 StagingBuffer* staging_buffer, | |
216 const Resource* resource, | |
217 const RasterSource* raster_source, | |
218 const gfx::Rect& raster_full_rect, | |
219 const gfx::Rect& raster_dirty_rect, | |
220 float scale, | |
221 const RasterSource::PlaybackSettings& playback_settings, | |
222 uint64_t previous_content_id, | |
223 uint64_t new_content_id) { | |
224 // Allocate GpuMemoryBuffer if necessary. If using partial raster, we | |
225 // must allocate a buffer with BufferUsage CPU_READ_WRITE_PERSISTENT. | |
226 if (!staging_buffer->gpu_memory_buffer) { | |
227 staging_buffer->gpu_memory_buffer = | |
228 resource_provider_->gpu_memory_buffer_manager() | |
229 ->AllocateGpuMemoryBuffer( | |
230 staging_buffer->size, BufferFormat(resource->format()), | |
231 use_partial_raster_ | |
232 ? gfx::BufferUsage::GPU_READ_CPU_READ_WRITE_PERSISTENT | |
233 : gfx::BufferUsage::GPU_READ_CPU_READ_WRITE, | |
234 gpu::kNullSurfaceHandle); | |
235 } | |
236 | |
237 gfx::Rect playback_rect = raster_full_rect; | |
238 if (use_partial_raster_ && previous_content_id) { | |
239 // Reduce playback rect to dirty region if the content id of the staging | |
240 // buffer matches the prevous content id. | |
241 if (previous_content_id == staging_buffer->content_id) | |
242 playback_rect.Intersect(raster_dirty_rect); | |
243 } | |
244 | |
245 if (staging_buffer->gpu_memory_buffer) { | |
246 gfx::GpuMemoryBuffer* buffer = staging_buffer->gpu_memory_buffer.get(); | |
247 DCHECK_EQ(1u, gfx::NumberOfPlanesForBufferFormat(buffer->GetFormat())); | |
248 bool rv = buffer->Map(); | |
249 DCHECK(rv); | |
250 DCHECK(buffer->memory(0)); | |
251 // TileTaskWorkerPool::PlaybackToMemory only supports unsigned strides. | |
252 DCHECK_GE(buffer->stride(0), 0); | |
253 | |
254 DCHECK(!playback_rect.IsEmpty()) | |
255 << "Why are we rastering a tile that's not dirty?"; | |
256 TileTaskWorkerPool::PlaybackToMemory( | |
257 buffer->memory(0), resource->format(), staging_buffer->size, | |
258 buffer->stride(0), raster_source, raster_full_rect, playback_rect, | |
259 scale, playback_settings); | |
260 buffer->Unmap(); | |
261 staging_buffer->content_id = new_content_id; | |
262 } | |
263 } | |
264 | |
265 void OneCopyTileTaskWorkerPool::CopyOnWorkerThread( | |
266 StagingBuffer* staging_buffer, | |
267 const Resource* resource, | |
268 ResourceProvider::ScopedWriteLockGL* resource_lock, | |
269 const RasterSource* raster_source, | |
270 uint64_t previous_content_id, | |
271 uint64_t new_content_id) { | |
272 ContextProvider* context_provider = | |
273 resource_provider_->output_surface()->worker_context_provider(); | |
274 DCHECK(context_provider); | |
275 | |
276 { | |
277 ContextProvider::ScopedContextLock scoped_context(context_provider); | |
278 | |
279 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL(); | |
280 DCHECK(gl); | |
281 | |
282 unsigned image_target = | |
283 resource_provider_->GetImageTextureTarget(resource->format()); | |
284 | |
285 // Create and bind staging texture. | |
286 if (!staging_buffer->texture_id) { | |
287 gl->GenTextures(1, &staging_buffer->texture_id); | |
288 gl->BindTexture(image_target, staging_buffer->texture_id); | |
289 gl->TexParameteri(image_target, GL_TEXTURE_MIN_FILTER, GL_NEAREST); | |
290 gl->TexParameteri(image_target, GL_TEXTURE_MAG_FILTER, GL_NEAREST); | |
291 gl->TexParameteri(image_target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); | |
292 gl->TexParameteri(image_target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); | |
293 } else { | |
294 gl->BindTexture(image_target, staging_buffer->texture_id); | |
295 } | |
296 | |
297 // Create and bind image. | |
298 if (!staging_buffer->image_id) { | |
299 if (staging_buffer->gpu_memory_buffer) { | |
300 staging_buffer->image_id = gl->CreateImageCHROMIUM( | |
301 staging_buffer->gpu_memory_buffer->AsClientBuffer(), | |
302 staging_buffer->size.width(), staging_buffer->size.height(), | |
303 GLInternalFormat(resource->format())); | |
304 gl->BindTexImage2DCHROMIUM(image_target, staging_buffer->image_id); | |
305 } | |
306 } else { | |
307 gl->ReleaseTexImage2DCHROMIUM(image_target, staging_buffer->image_id); | |
308 gl->BindTexImage2DCHROMIUM(image_target, staging_buffer->image_id); | |
309 } | |
310 | |
311 // Unbind staging texture. | |
312 gl->BindTexture(image_target, 0); | |
313 | |
314 if (resource_provider_->use_sync_query()) { | |
315 if (!staging_buffer->query_id) | |
316 gl->GenQueriesEXT(1, &staging_buffer->query_id); | |
317 | |
318 #if defined(OS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY) | |
319 // TODO(reveman): This avoids a performance problem on ARM ChromeOS | |
320 // devices. crbug.com/580166 | |
321 gl->BeginQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM, staging_buffer->query_id); | |
322 #else | |
323 gl->BeginQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM, | |
324 staging_buffer->query_id); | |
325 #endif | |
326 } | |
327 | |
328 // Since compressed texture's cannot be pre-allocated we might have an | |
329 // unallocated resource in which case we need to perform a full size copy. | |
330 if (IsResourceFormatCompressed(resource->format())) { | |
331 gl->CompressedCopyTextureCHROMIUM(staging_buffer->texture_id, | |
332 resource_lock->texture_id()); | |
333 } else { | |
334 int bytes_per_row = ResourceUtil::UncheckedWidthInBytes<int>( | |
335 resource->size().width(), resource->format()); | |
336 int chunk_size_in_rows = | |
337 std::max(1, max_bytes_per_copy_operation_ / bytes_per_row); | |
338 // Align chunk size to 4. Required to support compressed texture formats. | |
339 chunk_size_in_rows = MathUtil::UncheckedRoundUp(chunk_size_in_rows, 4); | |
340 int y = 0; | |
341 int height = resource->size().height(); | |
342 while (y < height) { | |
343 // Copy at most |chunk_size_in_rows|. | |
344 int rows_to_copy = std::min(chunk_size_in_rows, height - y); | |
345 DCHECK_GT(rows_to_copy, 0); | |
346 | |
347 gl->CopySubTextureCHROMIUM( | |
348 staging_buffer->texture_id, resource_lock->texture_id(), 0, y, 0, y, | |
349 resource->size().width(), rows_to_copy, false, false, false); | |
350 y += rows_to_copy; | |
351 | |
352 // Increment |bytes_scheduled_since_last_flush_| by the amount of memory | |
353 // used for this copy operation. | |
354 bytes_scheduled_since_last_flush_ += rows_to_copy * bytes_per_row; | |
355 | |
356 if (bytes_scheduled_since_last_flush_ >= | |
357 max_bytes_per_copy_operation_) { | |
358 gl->ShallowFlushCHROMIUM(); | |
359 bytes_scheduled_since_last_flush_ = 0; | |
360 } | |
361 } | |
362 } | |
363 | |
364 if (resource_provider_->use_sync_query()) { | |
365 #if defined(OS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY) | |
366 gl->EndQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM); | |
367 #else | |
368 gl->EndQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM); | |
369 #endif | |
370 } | |
371 | |
372 const uint64_t fence_sync = gl->InsertFenceSyncCHROMIUM(); | |
373 | |
374 // Barrier to sync worker context output to cc context. | |
375 gl->OrderingBarrierCHROMIUM(); | |
376 | |
377 // Generate sync token after the barrier for cross context synchronization. | |
378 gpu::SyncToken sync_token; | |
379 gl->GenUnverifiedSyncTokenCHROMIUM(fence_sync, sync_token.GetData()); | |
380 resource_lock->UpdateResourceSyncToken(sync_token); | |
381 } | |
382 } | |
383 | |
384 } // namespace cc | |
OLD | NEW |