Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1311)

Unified Diff: cc/raster/one_copy_tile_task_worker_pool.cc

Issue 1230203007: Re-land: cc: Use worker context for one-copy tile initialization. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: add ContextProvider::ScopedContextGL class Created 5 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « cc/raster/one_copy_tile_task_worker_pool.h ('k') | cc/raster/tile_task_worker_pool.h » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: cc/raster/one_copy_tile_task_worker_pool.cc
diff --git a/cc/raster/one_copy_tile_task_worker_pool.cc b/cc/raster/one_copy_tile_task_worker_pool.cc
index 78e6b106ce94308185987f79f3d47d8bd282c630..035e002e089d49c12495eefd67e3c29815990525 100644
--- a/cc/raster/one_copy_tile_task_worker_pool.cc
+++ b/cc/raster/one_copy_tile_task_worker_pool.cc
@@ -7,6 +7,7 @@
#include <algorithm>
#include <limits>
+#include "base/metrics/histogram.h"
#include "base/strings/stringprintf.h"
#include "base/trace_event/trace_event.h"
#include "base/trace_event/trace_event_argument.h"
@@ -14,10 +15,10 @@
#include "cc/debug/traced_value.h"
#include "cc/raster/raster_buffer.h"
#include "cc/resources/platform_color.h"
-#include "cc/resources/resource_pool.h"
#include "cc/resources/scoped_resource.h"
+#include "gpu/GLES2/gl2extchromium.h"
#include "gpu/command_buffer/client/gles2_interface.h"
-#include "ui/gfx/gpu_memory_buffer.h"
+#include "gpu/command_buffer/client/gpu_memory_buffer_manager.h"
namespace cc {
namespace {
@@ -26,49 +27,15 @@ class RasterBufferImpl : public RasterBuffer {
public:
RasterBufferImpl(OneCopyTileTaskWorkerPool* worker_pool,
ResourceProvider* resource_provider,
- ResourcePool* resource_pool,
ResourceFormat resource_format,
- const Resource* output_resource,
+ const Resource* resource,
uint64_t previous_content_id)
: worker_pool_(worker_pool),
- resource_provider_(resource_provider),
- resource_pool_(resource_pool),
- output_resource_(output_resource),
- raster_content_id_(0),
- sequence_(0) {
- if (worker_pool->have_persistent_gpu_memory_buffers() &&
- previous_content_id) {
- raster_resource_ =
- resource_pool->TryAcquireResourceWithContentId(previous_content_id);
- }
- if (raster_resource_) {
- raster_content_id_ = previous_content_id;
- DCHECK_EQ(resource_format, raster_resource_->format());
- DCHECK_EQ(output_resource->size().ToString(),
- raster_resource_->size().ToString());
- } else {
- raster_resource_ = resource_pool->AcquireResource(output_resource->size(),
- resource_format);
- }
-
- lock_.reset(new ResourceProvider::ScopedWriteLockGpuMemoryBuffer(
- resource_provider_, raster_resource_->id()));
- }
-
- ~RasterBufferImpl() override {
- // Release write lock in case a copy was never scheduled.
- lock_.reset();
+ resource_(resource),
+ lock_(resource_provider, resource->id()),
+ previous_content_id_(previous_content_id) {}
- // Make sure any scheduled copy operations are issued before we release the
- // raster resource.
- if (sequence_)
- worker_pool_->AdvanceLastIssuedCopyTo(sequence_);
-
- // Return resources to pool so they can be used by another RasterBuffer
- // instance.
- resource_pool_->ReleaseResource(raster_resource_.Pass(),
- raster_content_id_);
- }
+ ~RasterBufferImpl() override {}
// Overridden from RasterBuffer:
void Playback(const RasterSource* raster_source,
@@ -76,55 +43,88 @@ class RasterBufferImpl : public RasterBuffer {
const gfx::Rect& raster_dirty_rect,
uint64_t new_content_id,
float scale) override {
- // If there's a raster_content_id_, we are reusing a resource with that
- // content id.
- bool reusing_raster_resource = raster_content_id_ != 0;
- sequence_ = worker_pool_->PlaybackAndScheduleCopyOnWorkerThread(
- reusing_raster_resource, lock_.Pass(), raster_resource_.get(),
- output_resource_, raster_source, raster_full_rect, raster_dirty_rect,
- scale);
- // Store the content id of the resource to return to the pool.
- raster_content_id_ = new_content_id;
+ worker_pool_->PlaybackAndScheduleCopyOnWorkerThread(
+ resource_, &lock_, raster_source, raster_full_rect, raster_dirty_rect,
+ scale, previous_content_id_, new_content_id);
}
private:
OneCopyTileTaskWorkerPool* worker_pool_;
- ResourceProvider* resource_provider_;
- ResourcePool* resource_pool_;
- const Resource* output_resource_;
- uint64_t raster_content_id_;
- scoped_ptr<ScopedResource> raster_resource_;
- scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> lock_;
- CopySequenceNumber sequence_;
+ const Resource* resource_;
+ ResourceProvider::ScopedWriteLockGL lock_;
+ uint64_t previous_content_id_;
DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl);
};
-// Number of in-flight copy operations to allow.
-const int kMaxCopyOperations = 32;
+// Number of staging buffers to use.
+const size_t kMaxStagingBuffers = 32;
-// Delay been checking for copy operations to complete.
-const int kCheckForCompletedCopyOperationsTickRateMs = 1;
+// Delay between checking for query result to be available.
+const int kCheckForQueryResultAvailableTickRateMs = 1;
-// Number of failed attempts to allow before we perform a check that will
-// wait for copy operations to complete if needed.
-const int kFailedAttemptsBeforeWaitIfNeeded = 256;
+// Number of attempts to allow before we perform a check that will wait for
+// query to complete.
+const int kMaxCheckForQueryResultAvailableAttempts = 256;
// 4MiB is the size of 4 512x512 tiles, which has proven to be a good
// default batch size for copy operations.
const int kMaxBytesPerCopyOperation = 1024 * 1024 * 4;
+void WaitForQueryResult(gpu::gles2::GLES2Interface* gl, unsigned query_id) {
+ TRACE_EVENT0("cc", "WaitForQueryResult");
+
+ int attempts_left = kMaxCheckForQueryResultAvailableAttempts;
+ while (attempts_left--) {
+ GLuint complete = 1;
+ gl->GetQueryObjectuivEXT(query_id, GL_QUERY_RESULT_AVAILABLE_EXT,
+ &complete);
+ if (complete)
+ break;
+
+ usleep(kCheckForQueryResultAvailableTickRateMs * 1000);
vmpstr 2015/07/16 22:25:26 PlatformThread::Sleep?
reveman 2015/07/22 22:39:15 Done.
+ }
+
+ unsigned time_elapsed_us = 0;
+ gl->GetQueryObjectuivEXT(query_id, GL_QUERY_RESULT_EXT, &time_elapsed_us);
+ UMA_HISTOGRAM_CUSTOM_COUNTS("Renderer4.CopyTextureLatency", time_elapsed_us,
vmpstr 2015/07/16 22:25:26 CopyTextureLatencyUS (we usually the time units, I
reveman 2015/07/22 22:39:15 This is the same as before but I removed it from l
+ 0, 256000, 50);
+}
+
} // namespace
-OneCopyTileTaskWorkerPool::CopyOperation::CopyOperation(
- scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> src_write_lock,
- const Resource* src,
- const Resource* dst,
- const gfx::Rect& rect)
- : src_write_lock(src_write_lock.Pass()), src(src), dst(dst), rect(rect) {
+OneCopyTileTaskWorkerPool::StagingBuffer::StagingBuffer(
+ scoped_ptr<gfx::GpuMemoryBuffer> gpu_memory_buffer,
+ const gfx::Size& size)
+ : gpu_memory_buffer(gpu_memory_buffer.Pass()),
+ size(size),
+ texture_id(0),
+ image_id(0),
+ query_id(0),
+ content_id(0),
+ sequence_id(0) {
+}
+
+OneCopyTileTaskWorkerPool::StagingBuffer::~StagingBuffer() {
+ DCHECK_EQ(texture_id, 0u);
+ DCHECK_EQ(image_id, 0u);
+ DCHECK_EQ(query_id, 0u);
}
-OneCopyTileTaskWorkerPool::CopyOperation::~CopyOperation() {
+void OneCopyTileTaskWorkerPool::StagingBuffer::DestroyGLResources(
+ gpu::gles2::GLES2Interface* gl) {
+ if (query_id) {
+ gl->DeleteQueriesEXT(1, &query_id);
+ query_id = 0;
+ }
+ if (image_id) {
+ gl->DestroyImageCHROMIUM(image_id);
+ image_id = 0;
+ }
+ if (texture_id) {
+ gl->DeleteTextures(1, &texture_id);
+ texture_id = 0;
+ }
}
// static
@@ -133,51 +133,65 @@ scoped_ptr<TileTaskWorkerPool> OneCopyTileTaskWorkerPool::Create(
TaskGraphRunner* task_graph_runner,
ContextProvider* context_provider,
ResourceProvider* resource_provider,
- ResourcePool* resource_pool,
int max_copy_texture_chromium_size,
- bool have_persistent_gpu_memory_buffers) {
+ bool use_persistent_gpu_memory_buffers,
+ unsigned image_target) {
return make_scoped_ptr<TileTaskWorkerPool>(new OneCopyTileTaskWorkerPool(
- task_runner, task_graph_runner, context_provider, resource_provider,
- resource_pool, max_copy_texture_chromium_size,
- have_persistent_gpu_memory_buffers));
+ task_runner, task_graph_runner, resource_provider,
+ max_copy_texture_chromium_size, use_persistent_gpu_memory_buffers,
+ image_target));
}
OneCopyTileTaskWorkerPool::OneCopyTileTaskWorkerPool(
base::SequencedTaskRunner* task_runner,
TaskGraphRunner* task_graph_runner,
- ContextProvider* context_provider,
ResourceProvider* resource_provider,
- ResourcePool* resource_pool,
int max_copy_texture_chromium_size,
- bool have_persistent_gpu_memory_buffers)
+ bool use_persistent_gpu_memory_buffers,
+ unsigned image_target)
: task_runner_(task_runner),
task_graph_runner_(task_graph_runner),
namespace_token_(task_graph_runner->GetNamespaceToken()),
- context_provider_(context_provider),
resource_provider_(resource_provider),
- resource_pool_(resource_pool),
max_bytes_per_copy_operation_(
max_copy_texture_chromium_size
? std::min(kMaxBytesPerCopyOperation,
max_copy_texture_chromium_size)
: kMaxBytesPerCopyOperation),
- have_persistent_gpu_memory_buffers_(have_persistent_gpu_memory_buffers),
- last_issued_copy_operation_(0),
- last_flushed_copy_operation_(0),
- lock_(),
- copy_operation_count_cv_(&lock_),
+ use_persistent_gpu_memory_buffers_(use_persistent_gpu_memory_buffers),
+ image_target_(image_target),
+ next_sequence_id_(1),
bytes_scheduled_since_last_flush_(0),
- issued_copy_operation_count_(0),
- next_copy_operation_sequence_(1),
- check_for_completed_copy_operations_pending_(false),
- shutdown_(false),
weak_ptr_factory_(this),
task_set_finished_weak_ptr_factory_(this) {
- DCHECK(context_provider_);
}
OneCopyTileTaskWorkerPool::~OneCopyTileTaskWorkerPool() {
- DCHECK_EQ(pending_copy_operations_.size(), 0u);
+}
+
+void OneCopyTileTaskWorkerPool::ReleaseFreeMemory() {
+ base::AutoLock lock(lock_);
+
+ if (free_buffers_.empty() && busy_buffers_.empty())
+ return;
+
+ ContextProvider* context_provider =
+ resource_provider_->output_surface()->worker_context_provider();
+ DCHECK(context_provider);
+
+ {
+ ContextProvider::ScopedContextGL scoped_context(context_provider);
+
+ gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL();
+ DCHECK(gl);
+
+ std::for_each(
+ free_buffers_.begin(), free_buffers_.end(),
+ [gl](StagingBuffer* buffer) { buffer->DestroyGLResources(gl); });
+ std::for_each(
+ busy_buffers_.begin(), busy_buffers_.end(),
+ [gl](StagingBuffer* buffer) { buffer->DestroyGLResources(gl); });
+ }
}
TileTaskRunner* OneCopyTileTaskWorkerPool::AsTileTaskRunner() {
@@ -191,28 +205,16 @@ void OneCopyTileTaskWorkerPool::SetClient(TileTaskRunnerClient* client) {
void OneCopyTileTaskWorkerPool::Shutdown() {
TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::Shutdown");
- {
- base::AutoLock lock(lock_);
-
- shutdown_ = true;
- copy_operation_count_cv_.Signal();
- }
-
TaskGraph empty;
task_graph_runner_->ScheduleTasks(namespace_token_, &empty);
task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_);
+
+ ReleaseFreeMemory();
}
void OneCopyTileTaskWorkerPool::ScheduleTasks(TileTaskQueue* queue) {
TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::ScheduleTasks");
-#if DCHECK_IS_ON()
- {
- base::AutoLock lock(lock_);
- DCHECK(!shutdown_);
- }
-#endif
-
if (tasks_pending_.none())
TRACE_EVENT_ASYNC_BEGIN0("cc", "ScheduledTasks", this);
@@ -237,8 +239,6 @@ void OneCopyTileTaskWorkerPool::ScheduleTasks(TileTaskQueue* queue) {
task_set_finished_weak_ptr_factory_.GetWeakPtr(), task_set));
}
- resource_pool_->CheckBusyResources(false);
-
for (TileTaskQueue::Item::Vector::const_iterator it = queue->items.begin();
it != queue->items.end(); ++it) {
const TileTaskQueue::Item& item = *it;
@@ -265,14 +265,19 @@ void OneCopyTileTaskWorkerPool::ScheduleTasks(TileTaskQueue* queue) {
}
ScheduleTasksOnOriginThread(this, &graph_);
+
+ // Barrier to sync any new resources to the worker context.
+ resource_provider_->output_surface()
+ ->context_provider()
+ ->ContextGL()
+ ->OrderingBarrierCHROMIUM();
+
task_graph_runner_->ScheduleTasks(namespace_token_, &graph_);
std::copy(new_task_set_finished_tasks,
new_task_set_finished_tasks + kNumberOfTaskSets,
task_set_finished_tasks_);
- resource_pool_->ReduceResourceUsage();
-
TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", "state",
StateAsValue());
}
@@ -311,10 +316,9 @@ scoped_ptr<RasterBuffer> OneCopyTileTaskWorkerPool::AcquireBufferForRaster(
// TODO(danakj): If resource_content_id != 0, we only need to copy/upload
// the dirty rect.
DCHECK_EQ(resource->format(), resource_provider_->best_texture_format());
- return make_scoped_ptr<RasterBuffer>(
- new RasterBufferImpl(this, resource_provider_, resource_pool_,
- resource_provider_->best_texture_format(), resource,
- previous_content_id));
+ return make_scoped_ptr<RasterBuffer>(new RasterBufferImpl(
+ this, resource_provider_, resource_provider_->best_texture_format(),
+ resource, previous_content_id));
}
void OneCopyTileTaskWorkerPool::ReleaseBufferForRaster(
@@ -322,134 +326,277 @@ void OneCopyTileTaskWorkerPool::ReleaseBufferForRaster(
// Nothing to do here. RasterBufferImpl destructor cleans up after itself.
}
-CopySequenceNumber
-OneCopyTileTaskWorkerPool::PlaybackAndScheduleCopyOnWorkerThread(
- bool reusing_raster_resource,
- scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer>
- raster_resource_write_lock,
- const Resource* raster_resource,
- const Resource* output_resource,
+void OneCopyTileTaskWorkerPool::PlaybackAndScheduleCopyOnWorkerThread(
vmpstr 2015/07/16 22:25:26 Can you break this function up into smaller logica
reveman 2015/07/22 22:39:15 This has been completely refactored in latest patc
+ const Resource* resource,
+ const ResourceProvider::ScopedWriteLockGL* resource_lock,
const RasterSource* raster_source,
const gfx::Rect& raster_full_rect,
const gfx::Rect& raster_dirty_rect,
- float scale) {
- gfx::GpuMemoryBuffer* gpu_memory_buffer =
- raster_resource_write_lock->GetGpuMemoryBuffer();
- if (gpu_memory_buffer) {
- void* data = NULL;
- bool rv = gpu_memory_buffer->Map(&data);
- DCHECK(rv);
- int stride;
- gpu_memory_buffer->GetStride(&stride);
- // TileTaskWorkerPool::PlaybackToMemory only supports unsigned strides.
- DCHECK_GE(stride, 0);
-
- gfx::Rect playback_rect = raster_full_rect;
- if (reusing_raster_resource) {
- playback_rect.Intersect(raster_dirty_rect);
- }
- DCHECK(!playback_rect.IsEmpty())
- << "Why are we rastering a tile that's not dirty?";
- TileTaskWorkerPool::PlaybackToMemory(
- data, raster_resource->format(), raster_resource->size(),
- static_cast<size_t>(stride), raster_source, raster_full_rect,
- playback_rect, scale);
- gpu_memory_buffer->Unmap();
- }
-
+ float scale,
+ uint64_t previous_content_id,
+ uint64_t new_content_id) {
base::AutoLock lock(lock_);
- CopySequenceNumber sequence = 0;
- int bytes_per_row = (BitsPerPixel(raster_resource->format()) *
- raster_resource->size().width()) /
- 8;
- int chunk_size_in_rows =
- std::max(1, max_bytes_per_copy_operation_ / bytes_per_row);
- // Align chunk size to 4. Required to support compressed texture formats.
- chunk_size_in_rows = MathUtil::RoundUp(chunk_size_in_rows, 4);
- int y = 0;
- int height = raster_resource->size().height();
- while (y < height) {
- int failed_attempts = 0;
- while ((pending_copy_operations_.size() + issued_copy_operation_count_) >=
- kMaxCopyOperations) {
- // Ignore limit when shutdown is set.
- if (shutdown_)
+ gfx::Size size = resource->size();
+ gfx::Rect playback_rect = raster_full_rect;
+
+ scoped_ptr<StagingBuffer> staging_buffer;
+
+ // Try to find a staging buffer that allows us to perform partial raster when
+ // using persistent GpuMemoryBuffers.
+ if (use_persistent_gpu_memory_buffers_ && previous_content_id &&
+ raster_dirty_rect != raster_full_rect) {
+ for (;;) {
+ StagingBufferVector::iterator it =
+ std::find_if(free_buffers_.begin(), free_buffers_.end(),
+ [previous_content_id](const StagingBuffer* buffer) {
+ return buffer->content_id == previous_content_id;
+ });
+ if (it != free_buffers_.end()) {
+ std::swap(*it, free_buffers_.back());
+ staging_buffer = make_scoped_ptr(free_buffers_.back());
+ free_buffers_.weak_erase(free_buffers_.end() - 1);
+ DCHECK_EQ(staging_buffer->size.ToString(), size.ToString());
break;
+ }
- ++failed_attempts;
+ // Fall-back to full raster if sync queries are not available.
+ if (resource_provider_->use_sync_query())
+ break;
- // Schedule a check that will also wait for operations to complete
- // after too many failed attempts.
- bool wait_if_needed = failed_attempts > kFailedAttemptsBeforeWaitIfNeeded;
+ // Fall-back to full raster if a staging buffer with |previous_content_id|
+ // doesn't exist.
+ if (std::find_if(busy_buffers_.begin(), busy_buffers_.end(),
+ [previous_content_id](const StagingBuffer* buffer) {
+ return buffer->content_id == previous_content_id;
+ }) == busy_buffers_.end()) {
+ break;
+ }
- // Schedule a check for completed copy operations if too many operations
- // are currently in-flight.
- ScheduleCheckForCompletedCopyOperationsWithLockAcquired(wait_if_needed);
+ ContextProvider* context_provider =
+ resource_provider_->output_surface()->worker_context_provider();
+ DCHECK(context_provider);
{
- TRACE_EVENT0("cc", "WaitingForCopyOperationsToComplete");
+ ContextProvider::ScopedContextGL scoped_context(context_provider);
+
+ gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL();
+ DCHECK(gl);
+
+ DCHECK(!busy_buffers_.empty());
+ WaitForQueryResult(gl, busy_buffers_.front()->query_id);
vmpstr 2015/07/16 22:25:26 Are we waiting for the first one, because they are
reveman 2015/07/22 22:39:15 The idea is that it would provide more predictable
+ free_buffers_.push_back(busy_buffers_.take_front());
+ }
+ }
+ }
+
+ if (!staging_buffer) {
+ for (;;) {
+ // Sort free buffers based on usage. MRU buffer first.
+ std::sort(free_buffers_.begin(), free_buffers_.end(),
vmpstr 2015/07/16 22:25:26 Should we have some sort of a dirty flag to ensure
reveman 2015/07/22 22:39:15 removed this code form latest patch.
+ [](const StagingBuffer* a, const StagingBuffer* b) {
+ return a->sequence_id > b->sequence_id;
+ });
+
+ // Find MRU buffer of correct size.
+ StagingBufferVector::iterator it = std::find_if(
+ free_buffers_.begin(), free_buffers_.end(),
+ [size](const StagingBuffer* buffer) { return buffer->size == size; });
+ if (it != free_buffers_.end()) {
+ std::swap(*it, free_buffers_.back());
+ staging_buffer = make_scoped_ptr(free_buffers_.back());
+ free_buffers_.weak_erase(free_buffers_.end() - 1);
+ break;
+ }
+
+ ContextProvider* context_provider =
+ resource_provider_->output_surface()->worker_context_provider();
+ DCHECK(context_provider);
- // Wait for in-flight copy operations to drop below limit.
- copy_operation_count_cv_.Wait();
+ {
+ ContextProvider::ScopedContextGL scoped_context(context_provider);
+
+ gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL();
+ DCHECK(gl);
+
+ // First check if the query result for the next busy buffer is already
+ // available.
+ if (!busy_buffers_.empty() && resource_provider_->use_sync_query()) {
+ GLuint complete = 1;
+ gl->GetQueryObjectuivEXT(busy_buffers_.front()->query_id,
+ GL_QUERY_RESULT_AVAILABLE_EXT, &complete);
+ if (complete) {
+ free_buffers_.push_back(busy_buffers_.take_front());
+ continue;
+ }
+ }
+
+ // If we haven't reached the staging buffer limit then prefer to
+ // allocate a new buffer instead of releasing a free buffer or waiting
+ // for a busy buffer to become available.
+ if ((free_buffers_.size() + busy_buffers_.size()) < kMaxStagingBuffers)
+ break;
+
+ // Release LRU buffer instead of waiting for a busy buffer to become
+ // available.
+ if (!free_buffers_.empty()) {
+ free_buffers_.back()->DestroyGLResources(gl);
+ free_buffers_.pop_back();
+ // Note: we 'continue' here in case the number of staging buffers is
+ // above the limit.
+ continue;
+ }
+
+ DCHECK(!busy_buffers_.empty());
+
+ // Use CHROMIUM_sync_query if available, otherwise fallback to glFinish.
+ if (resource_provider_->use_sync_query()) {
+ WaitForQueryResult(gl, busy_buffers_.front()->query_id);
+ free_buffers_.push_back(busy_buffers_.take_front());
+ } else {
+ gl->Finish();
+ while (!busy_buffers_.empty())
+ free_buffers_.push_back(busy_buffers_.take_front());
+ }
}
}
+ }
+
+ {
+ base::AutoUnlock unlock(lock_);
+
+ // Allocate new staging buffer if necessary.
+ if (!staging_buffer) {
+ staging_buffer = make_scoped_ptr(new StagingBuffer(
+ resource_provider_->gpu_memory_buffer_manager()
+ ->AllocateGpuMemoryBuffer(
+ size, ToGpuMemoryBufferFormat(
+ resource_provider_->best_texture_format()),
+ use_persistent_gpu_memory_buffers_
+ ? gfx::GpuMemoryBuffer::PERSISTENT_MAP
+ : gfx::GpuMemoryBuffer::MAP),
+ size));
+ }
- // There may be more work available, so wake up another worker thread.
- copy_operation_count_cv_.Signal();
-
- // Copy at most |chunk_size_in_rows|.
- int rows_to_copy = std::min(chunk_size_in_rows, height - y);
- DCHECK_GT(rows_to_copy, 0);
-
- // |raster_resource_write_lock| is passed to the first copy operation as it
- // needs to be released before we can issue a copy.
- pending_copy_operations_.push_back(make_scoped_ptr(new CopyOperation(
- raster_resource_write_lock.Pass(), raster_resource, output_resource,
- gfx::Rect(0, y, raster_resource->size().width(), rows_to_copy))));
- y += rows_to_copy;
-
- // Acquire a sequence number for this copy operation.
- sequence = next_copy_operation_sequence_++;
-
- // Increment |bytes_scheduled_since_last_flush_| by the amount of memory
- // used for this copy operation.
- bytes_scheduled_since_last_flush_ += rows_to_copy * bytes_per_row;
-
- // Post task that will advance last flushed copy operation to |sequence|
- // when |bytes_scheduled_since_last_flush_| has reached
- // |max_bytes_per_copy_operation_|.
- if (bytes_scheduled_since_last_flush_ >= max_bytes_per_copy_operation_) {
- task_runner_->PostTask(
- FROM_HERE,
- base::Bind(&OneCopyTileTaskWorkerPool::AdvanceLastFlushedCopyTo,
- weak_ptr_factory_.GetWeakPtr(), sequence));
- bytes_scheduled_since_last_flush_ = 0;
+ if (use_persistent_gpu_memory_buffers_ && previous_content_id) {
+ // Reduce playback rect to dirty region if the content id of the staging
+ // buffer matches the prevous content id.
+ if (previous_content_id == staging_buffer->content_id)
+ playback_rect.Intersect(raster_dirty_rect);
+ }
+
+ if (staging_buffer->gpu_memory_buffer) {
+ void* data = NULL;
+ bool rv = staging_buffer->gpu_memory_buffer->Map(&data);
+ DCHECK(rv);
+ int stride;
+ staging_buffer->gpu_memory_buffer->GetStride(&stride);
+ // TileTaskWorkerPool::PlaybackToMemory only supports unsigned strides.
+ DCHECK_GE(stride, 0);
+
+ DCHECK(!playback_rect.IsEmpty())
+ << "Why are we rastering a tile that's not dirty?";
+ TileTaskWorkerPool::PlaybackToMemory(
+ data, resource_provider_->best_texture_format(), staging_buffer->size,
+ static_cast<size_t>(stride), raster_source, raster_full_rect,
+ playback_rect, scale);
+ staging_buffer->gpu_memory_buffer->Unmap();
}
}
- return sequence;
-}
+ ContextProvider* context_provider =
+ resource_provider_->output_surface()->worker_context_provider();
-void OneCopyTileTaskWorkerPool::AdvanceLastIssuedCopyTo(
- CopySequenceNumber sequence) {
- if (last_issued_copy_operation_ >= sequence)
- return;
+ {
+ ContextProvider::ScopedContextGL scoped_context(context_provider);
+
+ gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL();
+ DCHECK(gl);
+
+ if (!staging_buffer->texture_id) {
+ gl->GenTextures(1, &staging_buffer->texture_id);
+ gl->BindTexture(image_target_, staging_buffer->texture_id);
+ gl->TexParameteri(image_target_, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+ gl->TexParameteri(image_target_, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ gl->TexParameteri(image_target_, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ gl->TexParameteri(image_target_, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ if (staging_buffer->gpu_memory_buffer) {
+ staging_buffer->image_id = gl->CreateImageCHROMIUM(
+ staging_buffer->gpu_memory_buffer->AsClientBuffer(),
+ staging_buffer->size.width(), staging_buffer->size.height(),
+ GLInternalFormat(resource_provider_->best_texture_format()));
+ gl->BindTexImage2DCHROMIUM(image_target_, staging_buffer->image_id);
+ }
+ } else {
+ gl->BindTexture(image_target_, staging_buffer->texture_id);
+ if (staging_buffer->image_id) {
+ gl->ReleaseTexImage2DCHROMIUM(image_target_, staging_buffer->image_id);
+ gl->BindTexImage2DCHROMIUM(image_target_, staging_buffer->image_id);
+ }
+ }
- IssueCopyOperations(sequence - last_issued_copy_operation_);
- last_issued_copy_operation_ = sequence;
-}
+ if (resource_provider_->use_sync_query()) {
+ if (!staging_buffer->query_id)
+ gl->GenQueriesEXT(1, &staging_buffer->query_id);
+
+#if defined(OS_CHROMEOS)
+ // TODO(reveman): This avoids a performance problem on some ChromeOS
+ // devices. This needs to be removed to support native GpuMemoryBuffer
+ // implementations. crbug.com/436314
+ gl->BeginQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM, staging_buffer->query_id);
+#else
+ gl->BeginQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM,
+ staging_buffer->query_id);
+#endif
+ }
-void OneCopyTileTaskWorkerPool::AdvanceLastFlushedCopyTo(
- CopySequenceNumber sequence) {
- if (last_flushed_copy_operation_ >= sequence)
- return;
+ int bytes_per_row =
+ (BitsPerPixel(resource_provider_->best_texture_format()) *
+ size.width()) /
+ 8;
+ int chunk_size_in_rows =
+ std::max(1, max_bytes_per_copy_operation_ / bytes_per_row);
+ // Align chunk size to 4. Required to support compressed texture formats.
+ chunk_size_in_rows = MathUtil::RoundUp(chunk_size_in_rows, 4);
+ int y = 0;
+ int height = size.height();
+ while (y < height) {
+ // Copy at most |chunk_size_in_rows|.
+ int rows_to_copy = std::min(chunk_size_in_rows, height - y);
+ DCHECK_GT(rows_to_copy, 0);
+
+ gl->CopySubTextureCHROMIUM(GL_TEXTURE_2D, staging_buffer->texture_id,
+ resource_lock->texture_id(), 0, y, 0, y,
+ size.width(), rows_to_copy, false, false,
+ false);
+ y += rows_to_copy;
+
+ // Increment |bytes_scheduled_since_last_flush_| by the amount of memory
+ // used for this copy operation.
+ bytes_scheduled_since_last_flush_ += rows_to_copy * bytes_per_row;
+
+ if (bytes_scheduled_since_last_flush_ >= max_bytes_per_copy_operation_) {
+ gl->ShallowFlushCHROMIUM();
+ bytes_scheduled_since_last_flush_ = 0;
+ }
+ }
- AdvanceLastIssuedCopyTo(sequence);
+ if (resource_provider_->use_sync_query()) {
+#if defined(OS_CHROMEOS)
+ gl->EndQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM);
+#else
+ gl->EndQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM);
+#endif
+ }
+
+ // Barrier to sync worker context output to cc context.
+ gl->OrderingBarrierCHROMIUM();
+ }
+
+ staging_buffer->content_id = new_content_id;
+ staging_buffer->sequence_id = next_sequence_id_++;
- // Flush all issued copy operations.
- context_provider_->ContextGL()->ShallowFlushCHROMIUM();
- last_flushed_copy_operation_ = last_issued_copy_operation_;
+ busy_buffers_.push_back(staging_buffer.Pass());
}
void OneCopyTileTaskWorkerPool::OnTaskSetFinished(TaskSet task_set) {
@@ -467,92 +614,6 @@ void OneCopyTileTaskWorkerPool::OnTaskSetFinished(TaskSet task_set) {
client_->DidFinishRunningTileTasks(task_set);
}
-void OneCopyTileTaskWorkerPool::IssueCopyOperations(int64 count) {
- TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::IssueCopyOperations", "count",
- count);
-
- CopyOperation::Deque copy_operations;
-
- {
- base::AutoLock lock(lock_);
-
- for (int64 i = 0; i < count; ++i) {
- DCHECK(!pending_copy_operations_.empty());
- copy_operations.push_back(pending_copy_operations_.take_front());
- }
-
- // Increment |issued_copy_operation_count_| to reflect the transition of
- // copy operations from "pending" to "issued" state.
- issued_copy_operation_count_ += copy_operations.size();
- }
-
- while (!copy_operations.empty()) {
- scoped_ptr<CopyOperation> copy_operation = copy_operations.take_front();
-
- // Remove the write lock.
- copy_operation->src_write_lock.reset();
-
- // Copy contents of source resource to destination resource.
- resource_provider_->CopyResource(copy_operation->src->id(),
- copy_operation->dst->id(),
- copy_operation->rect);
- }
-}
-
-void OneCopyTileTaskWorkerPool::
- ScheduleCheckForCompletedCopyOperationsWithLockAcquired(
- bool wait_if_needed) {
- lock_.AssertAcquired();
-
- if (check_for_completed_copy_operations_pending_)
- return;
-
- base::TimeTicks now = base::TimeTicks::Now();
-
- // Schedule a check for completed copy operations as soon as possible but
- // don't allow two consecutive checks to be scheduled to run less than the
- // tick rate apart.
- base::TimeTicks next_check_for_completed_copy_operations_time =
- std::max(last_check_for_completed_copy_operations_time_ +
- base::TimeDelta::FromMilliseconds(
- kCheckForCompletedCopyOperationsTickRateMs),
- now);
-
- task_runner_->PostDelayedTask(
- FROM_HERE,
- base::Bind(&OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations,
- weak_ptr_factory_.GetWeakPtr(), wait_if_needed),
- next_check_for_completed_copy_operations_time - now);
-
- last_check_for_completed_copy_operations_time_ =
- next_check_for_completed_copy_operations_time;
- check_for_completed_copy_operations_pending_ = true;
-}
-
-void OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations(
- bool wait_if_needed) {
- TRACE_EVENT1("cc",
- "OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations",
- "wait_if_needed", wait_if_needed);
-
- resource_pool_->CheckBusyResources(wait_if_needed);
-
- {
- base::AutoLock lock(lock_);
-
- DCHECK(check_for_completed_copy_operations_pending_);
- check_for_completed_copy_operations_pending_ = false;
-
- // The number of busy resources in the pool reflects the number of issued
- // copy operations that have not yet completed.
- issued_copy_operation_count_ = resource_pool_->busy_resource_count();
-
- // There may be work blocked on too many in-flight copy operations, so wake
- // up a worker thread.
- copy_operation_count_cv_.Signal();
- }
-}
-
scoped_refptr<base::trace_event::ConvertableToTraceFormat>
OneCopyTileTaskWorkerPool::StateAsValue() const {
scoped_refptr<base::trace_event::TracedValue> state =
@@ -571,20 +632,13 @@ OneCopyTileTaskWorkerPool::StateAsValue() const {
void OneCopyTileTaskWorkerPool::StagingStateAsValueInto(
base::trace_event::TracedValue* staging_state) const {
+ base::AutoLock lock(lock_);
+
staging_state->SetInteger(
"staging_resource_count",
- static_cast<int>(resource_pool_->total_resource_count()));
- staging_state->SetInteger(
- "bytes_used_for_staging_resources",
- static_cast<int>(resource_pool_->total_memory_usage_bytes()));
- staging_state->SetInteger(
- "pending_copy_count",
- static_cast<int>(resource_pool_->total_resource_count() -
- resource_pool_->acquired_resource_count()));
- staging_state->SetInteger(
- "bytes_pending_copy",
- static_cast<int>(resource_pool_->total_memory_usage_bytes() -
- resource_pool_->acquired_memory_usage_bytes()));
+ static_cast<int>(free_buffers_.size() + busy_buffers_.size()));
+ staging_state->SetInteger("pending_copy_count",
+ static_cast<int>(busy_buffers_.size()));
}
} // namespace cc
« no previous file with comments | « cc/raster/one_copy_tile_task_worker_pool.h ('k') | cc/raster/tile_task_worker_pool.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698