Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(344)

Side by Side Diff: cc/raster/one_copy_raster_buffer_provider.cc

Issue 2446523002: cc: Use CHROMIUM_copy_image for one-copy tile updates.
Patch Set: rebase Created 4 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | cc/raster/staging_buffer_pool.h » ('j') | cc/raster/staging_buffer_pool.cc » ('J')
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2014 The Chromium Authors. All rights reserved. 1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "cc/raster/one_copy_raster_buffer_provider.h" 5 #include "cc/raster/one_copy_raster_buffer_provider.h"
6 6
7 #include <stdint.h> 7 #include <stdint.h>
8 8
9 #include <algorithm> 9 #include <algorithm>
10 #include <limits> 10 #include <limits>
11 #include <utility> 11 #include <utility>
12 12
13 #include "base/macros.h" 13 #include "base/macros.h"
14 #include "base/metrics/histogram_macros.h" 14 #include "base/metrics/histogram_macros.h"
15 #include "base/trace_event/trace_event.h" 15 #include "base/trace_event/trace_event.h"
16 #include "cc/base/histograms.h" 16 #include "cc/base/histograms.h"
17 #include "cc/base/math_util.h" 17 #include "cc/base/math_util.h"
18 #include "cc/resources/platform_color.h" 18 #include "cc/resources/platform_color.h"
19 #include "cc/resources/resource_format.h" 19 #include "cc/resources/resource_format.h"
20 #include "cc/resources/resource_util.h" 20 #include "cc/resources/resource_util.h"
21 #include "cc/resources/scoped_resource.h" 21 #include "cc/resources/scoped_resource.h"
22 #include "gpu/GLES2/gl2extchromium.h" 22 #include "gpu/GLES2/gl2extchromium.h"
23 #include "gpu/command_buffer/client/gles2_interface.h" 23 #include "gpu/command_buffer/client/gles2_interface.h"
24 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h" 24 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h"
25 #include "ui/gfx/buffer_format_util.h" 25 #include "ui/gfx/buffer_format_util.h"
26 #include "ui/gfx/gpu_fence.h"
26 27
27 namespace cc { 28 namespace cc {
28 namespace { 29 namespace {
29 30
30 // 4MiB is the size of 4 512x512 tiles, which has proven to be a good 31 // 4MiB is the size of 4 512x512 tiles, which has proven to be a good
31 // default batch size for copy operations. 32 // default batch size for copy operations.
32 const int kMaxBytesPerCopyOperation = 1024 * 1024 * 4; 33 const int kMaxBytesPerCopyOperation = 1024 * 1024 * 4;
33 34
34 } // namespace 35 } // namespace
35 36
(...skipping 181 matching lines...) Expand 10 before | Expand all | Expand 10 after
217 // Allocate GpuMemoryBuffer if necessary. If using partial raster, we 218 // Allocate GpuMemoryBuffer if necessary. If using partial raster, we
218 // must allocate a buffer with BufferUsage CPU_READ_WRITE_PERSISTENT. 219 // must allocate a buffer with BufferUsage CPU_READ_WRITE_PERSISTENT.
219 if (!staging_buffer->gpu_memory_buffer) { 220 if (!staging_buffer->gpu_memory_buffer) {
220 staging_buffer->gpu_memory_buffer = 221 staging_buffer->gpu_memory_buffer =
221 resource_provider_->gpu_memory_buffer_manager() 222 resource_provider_->gpu_memory_buffer_manager()
222 ->AllocateGpuMemoryBuffer( 223 ->AllocateGpuMemoryBuffer(
223 staging_buffer->size, BufferFormat(resource->format()), 224 staging_buffer->size, BufferFormat(resource->format()),
224 StagingBufferUsage(), gpu::kNullSurfaceHandle); 225 StagingBufferUsage(), gpu::kNullSurfaceHandle);
225 } 226 }
226 227
228 // Allocate GpuFence if necessary.
229 if (!staging_buffer->gpu_fence) {
230 staging_buffer->gpu_fence =
231 resource_provider_->gpu_memory_buffer_manager()->CreateGpuFence();
232 }
233
227 gfx::Rect playback_rect = raster_full_rect; 234 gfx::Rect playback_rect = raster_full_rect;
228 if (use_partial_raster_ && previous_content_id) { 235 if (use_partial_raster_ && previous_content_id) {
229 // Reduce playback rect to dirty region if the content id of the staging 236 // Reduce playback rect to dirty region if the content id of the staging
230 // buffer matches the prevous content id. 237 // buffer matches the prevous content id.
231 if (previous_content_id == staging_buffer->content_id) 238 if (previous_content_id == staging_buffer->content_id)
232 playback_rect.Intersect(raster_dirty_rect); 239 playback_rect.Intersect(raster_dirty_rect);
233 } 240 }
234 241
235 // Log a histogram of the percentage of pixels that were saved due to 242 // Log a histogram of the percentage of pixels that were saved due to
236 // partial raster. 243 // partial raster.
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
275 uint64_t new_content_id) { 282 uint64_t new_content_id) {
276 ContextProvider::ScopedContextLock scoped_context(worker_context_provider_); 283 ContextProvider::ScopedContextLock scoped_context(worker_context_provider_);
277 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL(); 284 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL();
278 DCHECK(gl); 285 DCHECK(gl);
279 286
280 // Create texture after synchronizing with compositor. 287 // Create texture after synchronizing with compositor.
281 ResourceProvider::ScopedTextureProvider scoped_texture( 288 ResourceProvider::ScopedTextureProvider scoped_texture(
282 gl, resource_lock, async_worker_context_enabled_); 289 gl, resource_lock, async_worker_context_enabled_);
283 290
284 unsigned resource_texture_id = scoped_texture.texture_id(); 291 unsigned resource_texture_id = scoped_texture.texture_id();
285 unsigned image_target = resource_provider_->GetImageTextureTarget(
286 StagingBufferUsage(), staging_buffer->format);
287 292
288 // Create and bind staging texture. 293 // Create image.
289 if (!staging_buffer->texture_id) {
290 gl->GenTextures(1, &staging_buffer->texture_id);
291 gl->BindTexture(image_target, staging_buffer->texture_id);
292 gl->TexParameteri(image_target, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
293 gl->TexParameteri(image_target, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
294 gl->TexParameteri(image_target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
295 gl->TexParameteri(image_target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
296 } else {
297 gl->BindTexture(image_target, staging_buffer->texture_id);
298 }
299
300 // Create and bind image.
301 if (!staging_buffer->image_id) { 294 if (!staging_buffer->image_id) {
302 if (staging_buffer->gpu_memory_buffer) { 295 if (staging_buffer->gpu_memory_buffer) {
303 staging_buffer->image_id = gl->CreateImageCHROMIUM( 296 staging_buffer->image_id = gl->CreateImageCHROMIUM(
304 staging_buffer->gpu_memory_buffer->AsClientBuffer(), 297 staging_buffer->gpu_memory_buffer->AsClientBuffer(),
305 staging_buffer->size.width(), staging_buffer->size.height(), 298 staging_buffer->size.width(), staging_buffer->size.height(),
306 GLInternalFormat(resource_lock->format())); 299 GLInternalFormat(resource_lock->format()));
307 gl->BindTexImage2DCHROMIUM(image_target, staging_buffer->image_id, 0);
308 } 300 }
309 } else {
310 gl->ReleaseTexImage2DCHROMIUM(image_target, staging_buffer->image_id);
311 gl->BindTexImage2DCHROMIUM(image_target, staging_buffer->image_id, 0);
312 } 301 }
313 302
314 // Unbind staging texture. 303 // GpuFence must be reset.
315 gl->BindTexture(image_target, 0); 304 DCHECK(!staging_buffer->gpu_fence ||
305 !staging_buffer->gpu_fence->IsSignaled());
316 306
317 if (resource_provider_->use_sync_query()) { 307 // Create fence.
318 if (!staging_buffer->query_id) 308 if (!staging_buffer->fence_id) {
319 gl->GenQueriesEXT(1, &staging_buffer->query_id); 309 if (staging_buffer->gpu_fence) {
320 310 staging_buffer->fence_id =
321 #if defined(OS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY) 311 gl->CreateFenceCHROMIUM(staging_buffer->gpu_fence->AsClientFence());
322 // TODO(reveman): This avoids a performance problem on ARM ChromeOS 312 }
323 // devices. crbug.com/580166
324 gl->BeginQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM, staging_buffer->query_id);
325 #else
326 gl->BeginQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM, staging_buffer->query_id);
327 #endif
328 } 313 }
329 314
330 // Since compressed texture's cannot be pre-allocated we might have an 315 // Since compressed texture's cannot be pre-allocated we might have an
331 // unallocated resource in which case we need to perform a full size copy. 316 // unallocated resource in which case we need to perform a full size copy.
332 if (IsResourceFormatCompressed(resource_lock->format())) { 317 if (IsResourceFormatCompressed(resource_lock->format())) {
333 gl->CompressedCopyTextureCHROMIUM(staging_buffer->texture_id, 318 gl->CopyImageSubDataCHROMIUM(staging_buffer->image_id, resource_texture_id,
ericrk 2017/01/10 03:50:31 Internally (if I traced through the code correctly
reveman 2017/01/10 19:37:55 We might need to add some special code for this on
334 resource_texture_id); 319 0, 0, 0, 0, resource_lock->size().width(),
320 resource_lock->size().height(), 0,
321 staging_buffer->fence_id);
335 } else { 322 } else {
336 int bytes_per_row = ResourceUtil::UncheckedWidthInBytes<int>( 323 int bytes_per_row = ResourceUtil::UncheckedWidthInBytes<int>(
337 resource_lock->size().width(), resource_lock->format()); 324 resource_lock->size().width(), resource_lock->format());
338 int chunk_size_in_rows = 325 int chunk_size_in_rows =
339 std::max(1, max_bytes_per_copy_operation_ / bytes_per_row); 326 std::max(1, max_bytes_per_copy_operation_ / bytes_per_row);
340 // Align chunk size to 4. Required to support compressed texture formats. 327 // Align chunk size to 4. Required to support compressed texture formats.
341 chunk_size_in_rows = MathUtil::UncheckedRoundUp(chunk_size_in_rows, 4); 328 chunk_size_in_rows = MathUtil::UncheckedRoundUp(chunk_size_in_rows, 4);
342 int y = 0; 329 int y = 0;
343 int height = resource_lock->size().height(); 330 int height = resource_lock->size().height();
344 while (y < height) { 331 while (y < height) {
345 // Copy at most |chunk_size_in_rows|. 332 // Copy at most |chunk_size_in_rows|.
346 int rows_to_copy = std::min(chunk_size_in_rows, height - y); 333 int rows_to_copy = std::min(chunk_size_in_rows, height - y);
347 DCHECK_GT(rows_to_copy, 0); 334 DCHECK_GT(rows_to_copy, 0);
348 335
349 gl->CopySubTextureCHROMIUM( 336 gl->CopyImageSubDataCHROMIUM(staging_buffer->image_id,
350 staging_buffer->texture_id, resource_texture_id, 0, y, 0, y, 337 resource_texture_id, 0, y, 0, y,
351 resource_lock->size().width(), rows_to_copy, false, false, false); 338 resource_lock->size().width(), rows_to_copy,
339 0, staging_buffer->fence_id);
340
352 y += rows_to_copy; 341 y += rows_to_copy;
353 342
354 // Increment |bytes_scheduled_since_last_flush_| by the amount of memory 343 // Increment |bytes_scheduled_since_last_flush_| by the amount of memory
355 // used for this copy operation. 344 // used for this copy operation.
356 bytes_scheduled_since_last_flush_ += rows_to_copy * bytes_per_row; 345 bytes_scheduled_since_last_flush_ += rows_to_copy * bytes_per_row;
357 346
358 if (bytes_scheduled_since_last_flush_ >= max_bytes_per_copy_operation_) { 347 if (bytes_scheduled_since_last_flush_ >= max_bytes_per_copy_operation_) {
359 gl->ShallowFlushCHROMIUM(); 348 gl->ShallowFlushCHROMIUM();
360 bytes_scheduled_since_last_flush_ = 0; 349 bytes_scheduled_since_last_flush_ = 0;
361 } 350 }
362 } 351 }
363 } 352 }
364 353
365 if (resource_provider_->use_sync_query()) {
366 #if defined(OS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY)
367 gl->EndQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM);
368 #else
369 gl->EndQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM);
370 #endif
371 }
372
373 const uint64_t fence_sync = gl->InsertFenceSyncCHROMIUM(); 354 const uint64_t fence_sync = gl->InsertFenceSyncCHROMIUM();
374 355
375 // Barrier to sync worker context output to cc context. 356 // Barrier to sync worker context output to cc context.
376 gl->OrderingBarrierCHROMIUM(); 357 gl->OrderingBarrierCHROMIUM();
377 358
378 // Generate sync token after the barrier for cross context synchronization. 359 // Generate sync token after the barrier for cross context synchronization.
379 gpu::SyncToken resource_sync_token; 360 gpu::SyncToken resource_sync_token;
380 gl->GenUnverifiedSyncTokenCHROMIUM(fence_sync, resource_sync_token.GetData()); 361 gl->GenUnverifiedSyncTokenCHROMIUM(fence_sync, resource_sync_token.GetData());
381 resource_lock->set_sync_token(resource_sync_token); 362 resource_lock->set_sync_token(resource_sync_token);
382 resource_lock->set_synchronized(!async_worker_context_enabled_); 363 resource_lock->set_synchronized(!async_worker_context_enabled_);
383 } 364 }
384 365
385 gfx::BufferUsage OneCopyRasterBufferProvider::StagingBufferUsage() const { 366 gfx::BufferUsage OneCopyRasterBufferProvider::StagingBufferUsage() const {
386 return use_partial_raster_ 367 return use_partial_raster_
387 ? gfx::BufferUsage::GPU_READ_CPU_READ_WRITE_PERSISTENT 368 ? gfx::BufferUsage::GPU_READ_CPU_READ_WRITE_PERSISTENT
388 : gfx::BufferUsage::GPU_READ_CPU_READ_WRITE; 369 : gfx::BufferUsage::GPU_READ_CPU_READ_WRITE;
389 } 370 }
390 371
391 } // namespace cc 372 } // namespace cc
OLDNEW
« no previous file with comments | « no previous file | cc/raster/staging_buffer_pool.h » ('j') | cc/raster/staging_buffer_pool.cc » ('J')

Powered by Google App Engine
This is Rietveld 408576698