 Chromium Code Reviews
 Chromium Code Reviews Issue 1157943004:
  cc: [WIP] Use worker context and OrderingBarrierCHROMIUM for one-copy.  (Closed) 
  Base URL: https://chromium.googlesource.com/chromium/src.git@master
    
  
    Issue 1157943004:
  cc: [WIP] Use worker context and OrderingBarrierCHROMIUM for one-copy.  (Closed) 
  Base URL: https://chromium.googlesource.com/chromium/src.git@master| Index: cc/raster/one_copy_tile_task_worker_pool.cc | 
| diff --git a/cc/raster/one_copy_tile_task_worker_pool.cc b/cc/raster/one_copy_tile_task_worker_pool.cc | 
| index 78e6b106ce94308185987f79f3d47d8bd282c630..bcfa2e5eeff6016c00c0588ea812ff808321bbf6 100644 | 
| --- a/cc/raster/one_copy_tile_task_worker_pool.cc | 
| +++ b/cc/raster/one_copy_tile_task_worker_pool.cc | 
| @@ -19,6 +19,8 @@ | 
| #include "gpu/command_buffer/client/gles2_interface.h" | 
| #include "ui/gfx/gpu_memory_buffer.h" | 
| +using gpu::gles2::GLES2Interface; | 
| + | 
| namespace cc { | 
| namespace { | 
| @@ -34,8 +36,7 @@ class RasterBufferImpl : public RasterBuffer { | 
| resource_provider_(resource_provider), | 
| resource_pool_(resource_pool), | 
| output_resource_(output_resource), | 
| - raster_content_id_(0), | 
| - sequence_(0) { | 
| + raster_content_id_(0) { | 
| if (worker_pool->have_persistent_gpu_memory_buffers() && | 
| previous_content_id) { | 
| raster_resource_ = | 
| @@ -51,7 +52,10 @@ class RasterBufferImpl : public RasterBuffer { | 
| resource_format); | 
| } | 
| - lock_.reset(new ResourceProvider::ScopedWriteLockGpuMemoryBuffer( | 
| + gl_lock_.reset(new ResourceProvider::ScopedWriteLockGL( | 
| + resource_provider_, output_resource_->id())); | 
| + | 
| + lock_.reset(new ResourceProvider::ScopedWriteLockGpuMemoryBufferForThread( | 
| resource_provider_, raster_resource_->id())); | 
| } | 
| @@ -59,11 +63,7 @@ class RasterBufferImpl : public RasterBuffer { | 
| // Release write lock in case a copy was never scheduled. | 
| lock_.reset(); | 
| - // Make sure any scheduled copy operations are issued before we release the | 
| - // raster resource. | 
| - if (sequence_) | 
| - worker_pool_->AdvanceLastIssuedCopyTo(sequence_); | 
| - | 
| + gl_lock_.reset(); | 
| // Return resources to pool so they can be used by another RasterBuffer | 
| // instance. | 
| resource_pool_->ReleaseResource(raster_resource_.Pass(), | 
| @@ -79,10 +79,10 @@ class RasterBufferImpl : public RasterBuffer { | 
| // If there's a raster_content_id_, we are reusing a resource with that | 
| // content id. | 
| bool reusing_raster_resource = raster_content_id_ != 0; | 
| - sequence_ = worker_pool_->PlaybackAndScheduleCopyOnWorkerThread( | 
| - reusing_raster_resource, lock_.Pass(), raster_resource_.get(), | 
| - output_resource_, raster_source, raster_full_rect, raster_dirty_rect, | 
| - scale); | 
| + worker_pool_->PlaybackAndCopyOnWorkerThread( | 
| + reusing_raster_resource, lock_.Pass(), gl_lock_.Pass(), | 
| 
piman
2015/07/10 02:25:09
Oh, actually, something I missed in previous revie
 
sohanjg
2015/07/10 13:34:31
Done.
 | 
| + raster_resource_.get(), output_resource_, raster_source, | 
| + raster_full_rect, raster_dirty_rect, scale); | 
| // Store the content id of the resource to return to the pool. | 
| raster_content_id_ = new_content_id; | 
| } | 
| @@ -94,39 +94,18 @@ class RasterBufferImpl : public RasterBuffer { | 
| const Resource* output_resource_; | 
| uint64_t raster_content_id_; | 
| scoped_ptr<ScopedResource> raster_resource_; | 
| - scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> lock_; | 
| - CopySequenceNumber sequence_; | 
| + scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBufferForThread> lock_; | 
| + scoped_ptr<ResourceProvider::ScopedWriteLockGL> gl_lock_; | 
| DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl); | 
| }; | 
| -// Number of in-flight copy operations to allow. | 
| -const int kMaxCopyOperations = 32; | 
| - | 
| -// Delay been checking for copy operations to complete. | 
| -const int kCheckForCompletedCopyOperationsTickRateMs = 1; | 
| - | 
| -// Number of failed attempts to allow before we perform a check that will | 
| -// wait for copy operations to complete if needed. | 
| -const int kFailedAttemptsBeforeWaitIfNeeded = 256; | 
| - | 
| // 4MiB is the size of 4 512x512 tiles, which has proven to be a good | 
| // default batch size for copy operations. | 
| const int kMaxBytesPerCopyOperation = 1024 * 1024 * 4; | 
| } // namespace | 
| -OneCopyTileTaskWorkerPool::CopyOperation::CopyOperation( | 
| - scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> src_write_lock, | 
| - const Resource* src, | 
| - const Resource* dst, | 
| - const gfx::Rect& rect) | 
| - : src_write_lock(src_write_lock.Pass()), src(src), dst(dst), rect(rect) { | 
| -} | 
| - | 
| -OneCopyTileTaskWorkerPool::CopyOperation::~CopyOperation() { | 
| -} | 
| - | 
| // static | 
| scoped_ptr<TileTaskWorkerPool> OneCopyTileTaskWorkerPool::Create( | 
| base::SequencedTaskRunner* task_runner, | 
| @@ -162,14 +141,7 @@ OneCopyTileTaskWorkerPool::OneCopyTileTaskWorkerPool( | 
| max_copy_texture_chromium_size) | 
| : kMaxBytesPerCopyOperation), | 
| have_persistent_gpu_memory_buffers_(have_persistent_gpu_memory_buffers), | 
| - last_issued_copy_operation_(0), | 
| - last_flushed_copy_operation_(0), | 
| lock_(), | 
| - copy_operation_count_cv_(&lock_), | 
| - bytes_scheduled_since_last_flush_(0), | 
| - issued_copy_operation_count_(0), | 
| - next_copy_operation_sequence_(1), | 
| - check_for_completed_copy_operations_pending_(false), | 
| shutdown_(false), | 
| weak_ptr_factory_(this), | 
| task_set_finished_weak_ptr_factory_(this) { | 
| @@ -177,7 +149,6 @@ OneCopyTileTaskWorkerPool::OneCopyTileTaskWorkerPool( | 
| } | 
| OneCopyTileTaskWorkerPool::~OneCopyTileTaskWorkerPool() { | 
| - DCHECK_EQ(pending_copy_operations_.size(), 0u); | 
| } | 
| TileTaskRunner* OneCopyTileTaskWorkerPool::AsTileTaskRunner() { | 
| @@ -195,7 +166,6 @@ void OneCopyTileTaskWorkerPool::Shutdown() { | 
| base::AutoLock lock(lock_); | 
| shutdown_ = true; | 
| - copy_operation_count_cv_.Signal(); | 
| } | 
| TaskGraph empty; | 
| @@ -265,6 +235,13 @@ void OneCopyTileTaskWorkerPool::ScheduleTasks(TileTaskQueue* queue) { | 
| } | 
| ScheduleTasksOnOriginThread(this, &graph_); | 
| + | 
| + // Barrier to sync any new resources to the worker context. | 
| + resource_provider_->output_surface() | 
| + ->context_provider() | 
| + ->ContextGL() | 
| + ->OrderingBarrierCHROMIUM(); | 
| + | 
| task_graph_runner_->ScheduleTasks(namespace_token_, &graph_); | 
| std::copy(new_task_set_finished_tasks, | 
| @@ -322,19 +299,28 @@ void OneCopyTileTaskWorkerPool::ReleaseBufferForRaster( | 
| // Nothing to do here. RasterBufferImpl destructor cleans up after itself. | 
| } | 
| -CopySequenceNumber | 
| -OneCopyTileTaskWorkerPool::PlaybackAndScheduleCopyOnWorkerThread( | 
| +void OneCopyTileTaskWorkerPool::PlaybackAndCopyOnWorkerThread( | 
| bool reusing_raster_resource, | 
| - scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> | 
| + scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBufferForThread> | 
| raster_resource_write_lock, | 
| + scoped_ptr<ResourceProvider::ScopedWriteLockGL> output_resource_write_lock, | 
| const Resource* raster_resource, | 
| const Resource* output_resource, | 
| const RasterSource* raster_source, | 
| const gfx::Rect& raster_full_rect, | 
| const gfx::Rect& raster_dirty_rect, | 
| float scale) { | 
| + TRACE_EVENT0("cc", | 
| + "OneCopyTileTaskWorkerPool::PlaybackAndCopyOnWorkerThread"); | 
| + ContextProvider* context_provider = | 
| + raster_resource_write_lock->worker_context(); | 
| + if (!context_provider) { | 
| + return; | 
| + } | 
| + GLES2Interface* gl = context_provider->ContextGL(); | 
| gfx::GpuMemoryBuffer* gpu_memory_buffer = | 
| raster_resource_write_lock->GetGpuMemoryBuffer(); | 
| + | 
| if (gpu_memory_buffer) { | 
| void* data = NULL; | 
| bool rv = gpu_memory_buffer->Map(&data); | 
| @@ -351,105 +337,41 @@ OneCopyTileTaskWorkerPool::PlaybackAndScheduleCopyOnWorkerThread( | 
| DCHECK(!playback_rect.IsEmpty()) | 
| << "Why are we rastering a tile that's not dirty?"; | 
| TileTaskWorkerPool::PlaybackToMemory( | 
| - data, raster_resource->format(), raster_resource->size(), | 
| - static_cast<size_t>(stride), raster_source, raster_full_rect, | 
| - playback_rect, scale); | 
| + data, raster_resource_write_lock->format(), | 
| + raster_resource_write_lock->size(), static_cast<size_t>(stride), | 
| + raster_source, raster_full_rect, playback_rect, scale); | 
| gpu_memory_buffer->Unmap(); | 
| } | 
| - base::AutoLock lock(lock_); | 
| + raster_resource_write_lock->CreateAndBindImage(); | 
| - CopySequenceNumber sequence = 0; | 
| - int bytes_per_row = (BitsPerPixel(raster_resource->format()) * | 
| - raster_resource->size().width()) / | 
| + int bytes_per_row = (BitsPerPixel(raster_resource_write_lock->format()) * | 
| + raster_resource_write_lock->size().width()) / | 
| 8; | 
| int chunk_size_in_rows = | 
| std::max(1, max_bytes_per_copy_operation_ / bytes_per_row); | 
| // Align chunk size to 4. Required to support compressed texture formats. | 
| chunk_size_in_rows = MathUtil::RoundUp(chunk_size_in_rows, 4); | 
| int y = 0; | 
| - int height = raster_resource->size().height(); | 
| + int height = raster_resource_write_lock->size().height(); | 
| while (y < height) { | 
| - int failed_attempts = 0; | 
| - while ((pending_copy_operations_.size() + issued_copy_operation_count_) >= | 
| - kMaxCopyOperations) { | 
| - // Ignore limit when shutdown is set. | 
| - if (shutdown_) | 
| - break; | 
| - | 
| - ++failed_attempts; | 
| - | 
| - // Schedule a check that will also wait for operations to complete | 
| - // after too many failed attempts. | 
| - bool wait_if_needed = failed_attempts > kFailedAttemptsBeforeWaitIfNeeded; | 
| - | 
| - // Schedule a check for completed copy operations if too many operations | 
| - // are currently in-flight. | 
| - ScheduleCheckForCompletedCopyOperationsWithLockAcquired(wait_if_needed); | 
| - | 
| - { | 
| - TRACE_EVENT0("cc", "WaitingForCopyOperationsToComplete"); | 
| - | 
| - // Wait for in-flight copy operations to drop below limit. | 
| - copy_operation_count_cv_.Wait(); | 
| - } | 
| - } | 
| - | 
| - // There may be more work available, so wake up another worker thread. | 
| - copy_operation_count_cv_.Signal(); | 
| - | 
| + base::AutoLock context_lock(*context_provider->GetLock()); | 
| + context_provider->DetachFromThread(); | 
| // Copy at most |chunk_size_in_rows|. | 
| int rows_to_copy = std::min(chunk_size_in_rows, height - y); | 
| DCHECK_GT(rows_to_copy, 0); | 
| - | 
| - // |raster_resource_write_lock| is passed to the first copy operation as it | 
| - // needs to be released before we can issue a copy. | 
| - pending_copy_operations_.push_back(make_scoped_ptr(new CopyOperation( | 
| - raster_resource_write_lock.Pass(), raster_resource, output_resource, | 
| - gfx::Rect(0, y, raster_resource->size().width(), rows_to_copy)))); | 
| + raster_resource_write_lock->BeginCopyTexture(); | 
| + gl->CopySubTextureCHROMIUM(GL_TEXTURE_2D, | 
| + raster_resource_write_lock->source_gl_id(), | 
| + output_resource_write_lock->texture_id(), 0, y, | 
| + 0, y, raster_resource_write_lock->size().width(), | 
| + rows_to_copy, false, false, false); | 
| + raster_resource_write_lock->EndCopyTexture(); | 
| y += rows_to_copy; | 
| - | 
| - // Acquire a sequence number for this copy operation. | 
| - sequence = next_copy_operation_sequence_++; | 
| - | 
| - // Increment |bytes_scheduled_since_last_flush_| by the amount of memory | 
| - // used for this copy operation. | 
| - bytes_scheduled_since_last_flush_ += rows_to_copy * bytes_per_row; | 
| - | 
| - // Post task that will advance last flushed copy operation to |sequence| | 
| - // when |bytes_scheduled_since_last_flush_| has reached | 
| - // |max_bytes_per_copy_operation_|. | 
| - if (bytes_scheduled_since_last_flush_ >= max_bytes_per_copy_operation_) { | 
| - task_runner_->PostTask( | 
| - FROM_HERE, | 
| - base::Bind(&OneCopyTileTaskWorkerPool::AdvanceLastFlushedCopyTo, | 
| - weak_ptr_factory_.GetWeakPtr(), sequence)); | 
| - bytes_scheduled_since_last_flush_ = 0; | 
| - } | 
| + // Sync/Deferred flush worker context to cc context. | 
| + gl->OrderingBarrierCHROMIUM(); | 
| + context_provider->DetachFromThread(); | 
| } | 
| - | 
| - return sequence; | 
| -} | 
| - | 
| -void OneCopyTileTaskWorkerPool::AdvanceLastIssuedCopyTo( | 
| - CopySequenceNumber sequence) { | 
| - if (last_issued_copy_operation_ >= sequence) | 
| - return; | 
| - | 
| - IssueCopyOperations(sequence - last_issued_copy_operation_); | 
| - last_issued_copy_operation_ = sequence; | 
| -} | 
| - | 
| -void OneCopyTileTaskWorkerPool::AdvanceLastFlushedCopyTo( | 
| - CopySequenceNumber sequence) { | 
| - if (last_flushed_copy_operation_ >= sequence) | 
| - return; | 
| - | 
| - AdvanceLastIssuedCopyTo(sequence); | 
| - | 
| - // Flush all issued copy operations. | 
| - context_provider_->ContextGL()->ShallowFlushCHROMIUM(); | 
| - last_flushed_copy_operation_ = last_issued_copy_operation_; | 
| } | 
| void OneCopyTileTaskWorkerPool::OnTaskSetFinished(TaskSet task_set) { | 
| @@ -467,92 +389,6 @@ void OneCopyTileTaskWorkerPool::OnTaskSetFinished(TaskSet task_set) { | 
| client_->DidFinishRunningTileTasks(task_set); | 
| } | 
| -void OneCopyTileTaskWorkerPool::IssueCopyOperations(int64 count) { | 
| - TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::IssueCopyOperations", "count", | 
| - count); | 
| - | 
| - CopyOperation::Deque copy_operations; | 
| - | 
| - { | 
| - base::AutoLock lock(lock_); | 
| - | 
| - for (int64 i = 0; i < count; ++i) { | 
| - DCHECK(!pending_copy_operations_.empty()); | 
| - copy_operations.push_back(pending_copy_operations_.take_front()); | 
| - } | 
| - | 
| - // Increment |issued_copy_operation_count_| to reflect the transition of | 
| - // copy operations from "pending" to "issued" state. | 
| - issued_copy_operation_count_ += copy_operations.size(); | 
| - } | 
| - | 
| - while (!copy_operations.empty()) { | 
| - scoped_ptr<CopyOperation> copy_operation = copy_operations.take_front(); | 
| - | 
| - // Remove the write lock. | 
| - copy_operation->src_write_lock.reset(); | 
| - | 
| - // Copy contents of source resource to destination resource. | 
| - resource_provider_->CopyResource(copy_operation->src->id(), | 
| - copy_operation->dst->id(), | 
| - copy_operation->rect); | 
| - } | 
| -} | 
| - | 
| -void OneCopyTileTaskWorkerPool:: | 
| - ScheduleCheckForCompletedCopyOperationsWithLockAcquired( | 
| - bool wait_if_needed) { | 
| - lock_.AssertAcquired(); | 
| - | 
| - if (check_for_completed_copy_operations_pending_) | 
| - return; | 
| - | 
| - base::TimeTicks now = base::TimeTicks::Now(); | 
| - | 
| - // Schedule a check for completed copy operations as soon as possible but | 
| - // don't allow two consecutive checks to be scheduled to run less than the | 
| - // tick rate apart. | 
| - base::TimeTicks next_check_for_completed_copy_operations_time = | 
| - std::max(last_check_for_completed_copy_operations_time_ + | 
| - base::TimeDelta::FromMilliseconds( | 
| - kCheckForCompletedCopyOperationsTickRateMs), | 
| - now); | 
| - | 
| - task_runner_->PostDelayedTask( | 
| - FROM_HERE, | 
| - base::Bind(&OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations, | 
| - weak_ptr_factory_.GetWeakPtr(), wait_if_needed), | 
| - next_check_for_completed_copy_operations_time - now); | 
| - | 
| - last_check_for_completed_copy_operations_time_ = | 
| - next_check_for_completed_copy_operations_time; | 
| - check_for_completed_copy_operations_pending_ = true; | 
| -} | 
| - | 
| -void OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations( | 
| - bool wait_if_needed) { | 
| - TRACE_EVENT1("cc", | 
| - "OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations", | 
| - "wait_if_needed", wait_if_needed); | 
| - | 
| - resource_pool_->CheckBusyResources(wait_if_needed); | 
| - | 
| - { | 
| - base::AutoLock lock(lock_); | 
| - | 
| - DCHECK(check_for_completed_copy_operations_pending_); | 
| - check_for_completed_copy_operations_pending_ = false; | 
| - | 
| - // The number of busy resources in the pool reflects the number of issued | 
| - // copy operations that have not yet completed. | 
| - issued_copy_operation_count_ = resource_pool_->busy_resource_count(); | 
| - | 
| - // There may be work blocked on too many in-flight copy operations, so wake | 
| - // up a worker thread. | 
| - copy_operation_count_cv_.Signal(); | 
| - } | 
| -} | 
| - | 
| scoped_refptr<base::trace_event::ConvertableToTraceFormat> | 
| OneCopyTileTaskWorkerPool::StateAsValue() const { | 
| scoped_refptr<base::trace_event::TracedValue> state = |