| OLD | NEW |
| 1 // Copyright 2014 The Chromium Authors. All rights reserved. | 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "cc/raster/one_copy_tile_task_worker_pool.h" | 5 #include "cc/raster/one_copy_tile_task_worker_pool.h" |
| 6 | 6 |
| 7 #include <algorithm> | 7 #include <algorithm> |
| 8 #include <limits> | 8 #include <limits> |
| 9 | 9 |
| 10 #include "base/strings/stringprintf.h" | 10 #include "base/strings/stringprintf.h" |
| 11 #include "base/thread_task_runner_handle.h" | |
| 12 #include "base/trace_event/memory_dump_manager.h" | |
| 13 #include "base/trace_event/trace_event.h" | 11 #include "base/trace_event/trace_event.h" |
| 14 #include "base/trace_event/trace_event_argument.h" | 12 #include "base/trace_event/trace_event_argument.h" |
| 15 #include "cc/base/math_util.h" | 13 #include "cc/base/math_util.h" |
| 16 #include "cc/debug/traced_value.h" | 14 #include "cc/debug/traced_value.h" |
| 17 #include "cc/raster/raster_buffer.h" | 15 #include "cc/raster/raster_buffer.h" |
| 18 #include "cc/resources/platform_color.h" | 16 #include "cc/resources/platform_color.h" |
| 19 #include "cc/resources/resource_format.h" | 17 #include "cc/resources/resource_pool.h" |
| 20 #include "cc/resources/resource_util.h" | |
| 21 #include "cc/resources/scoped_resource.h" | 18 #include "cc/resources/scoped_resource.h" |
| 22 #include "gpu/GLES2/gl2extchromium.h" | |
| 23 #include "gpu/command_buffer/client/gles2_interface.h" | 19 #include "gpu/command_buffer/client/gles2_interface.h" |
| 24 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h" | |
| 25 #include "ui/gfx/buffer_format_util.h" | 20 #include "ui/gfx/buffer_format_util.h" |
| 21 #include "ui/gfx/gpu_memory_buffer.h" |
| 26 | 22 |
| 27 namespace cc { | 23 namespace cc { |
| 28 namespace { | 24 namespace { |
| 29 | 25 |
| 30 class RasterBufferImpl : public RasterBuffer { | 26 class RasterBufferImpl : public RasterBuffer { |
| 31 public: | 27 public: |
| 32 RasterBufferImpl(OneCopyTileTaskWorkerPool* worker_pool, | 28 RasterBufferImpl(OneCopyTileTaskWorkerPool* worker_pool, |
| 33 ResourceProvider* resource_provider, | 29 ResourceProvider* resource_provider, |
| 30 ResourcePool* resource_pool, |
| 34 ResourceFormat resource_format, | 31 ResourceFormat resource_format, |
| 35 const Resource* resource, | 32 const Resource* output_resource, |
| 36 uint64_t previous_content_id) | 33 uint64_t previous_content_id) |
| 37 : worker_pool_(worker_pool), | 34 : worker_pool_(worker_pool), |
| 38 resource_(resource), | 35 resource_provider_(resource_provider), |
| 39 lock_(resource_provider, resource->id()), | 36 resource_pool_(resource_pool), |
| 40 previous_content_id_(previous_content_id) {} | 37 output_resource_(output_resource), |
| 38 raster_content_id_(0), |
| 39 raster_resource_(nullptr), |
| 40 sequence_(0) { |
| 41 if (worker_pool->have_persistent_gpu_memory_buffers() && |
| 42 previous_content_id) { |
| 43 raster_resource_ = |
| 44 resource_pool->TryAcquireResourceWithContentId(previous_content_id); |
| 45 } |
| 46 if (raster_resource_) { |
| 47 raster_content_id_ = previous_content_id; |
| 48 DCHECK_EQ(resource_format, raster_resource_->format()); |
| 49 DCHECK_EQ(output_resource->size().ToString(), |
| 50 raster_resource_->size().ToString()); |
| 51 } else { |
| 52 raster_resource_ = resource_pool->AcquireResource(output_resource->size(), |
| 53 resource_format); |
| 54 } |
| 41 | 55 |
| 42 ~RasterBufferImpl() override {} | 56 lock_.reset(new ResourceProvider::ScopedWriteLockGpuMemoryBuffer( |
| 57 resource_provider_, raster_resource_->id())); |
| 58 } |
| 59 |
| 60 ~RasterBufferImpl() override { |
| 61 // Release write lock in case a copy was never scheduled. |
| 62 lock_.reset(); |
| 63 |
| 64 // Make sure any scheduled copy operations are issued before we release the |
| 65 // raster resource. |
| 66 if (sequence_) |
| 67 worker_pool_->AdvanceLastIssuedCopyTo(sequence_); |
| 68 |
| 69 // Return resources to pool so they can be used by another RasterBuffer |
| 70 // instance. |
| 71 resource_pool_->ReleaseResource(raster_resource_, raster_content_id_); |
| 72 raster_resource_ = nullptr; |
| 73 } |
| 43 | 74 |
| 44 // Overridden from RasterBuffer: | 75 // Overridden from RasterBuffer: |
| 45 void Playback(const RasterSource* raster_source, | 76 void Playback(const RasterSource* raster_source, |
| 46 const gfx::Rect& raster_full_rect, | 77 const gfx::Rect& raster_full_rect, |
| 47 const gfx::Rect& raster_dirty_rect, | 78 const gfx::Rect& raster_dirty_rect, |
| 48 uint64_t new_content_id, | 79 uint64_t new_content_id, |
| 49 float scale, | 80 float scale, |
| 50 bool include_images) override { | 81 bool include_images) override { |
| 51 worker_pool_->PlaybackAndCopyOnWorkerThread( | 82 // If there's a raster_content_id_, we are reusing a resource with that |
| 52 resource_, &lock_, raster_source, raster_full_rect, raster_dirty_rect, | 83 // content id. |
| 53 scale, include_images, previous_content_id_, new_content_id); | 84 bool reusing_raster_resource = raster_content_id_ != 0; |
| 85 sequence_ = worker_pool_->PlaybackAndScheduleCopyOnWorkerThread( |
| 86 reusing_raster_resource, lock_.Pass(), raster_resource_, |
| 87 output_resource_, raster_source, raster_full_rect, raster_dirty_rect, |
| 88 scale, include_images); |
| 89 // Store the content id of the resource to return to the pool. |
| 90 raster_content_id_ = new_content_id; |
| 54 } | 91 } |
| 55 | 92 |
| 56 private: | 93 private: |
| 57 OneCopyTileTaskWorkerPool* worker_pool_; | 94 OneCopyTileTaskWorkerPool* worker_pool_; |
| 58 const Resource* resource_; | 95 ResourceProvider* resource_provider_; |
| 59 ResourceProvider::ScopedWriteLockGL lock_; | 96 ResourcePool* resource_pool_; |
| 60 uint64_t previous_content_id_; | 97 const Resource* output_resource_; |
| 98 uint64_t raster_content_id_; |
| 99 Resource* raster_resource_; |
| 100 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> lock_; |
| 101 CopySequenceNumber sequence_; |
| 61 | 102 |
| 62 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl); | 103 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl); |
| 63 }; | 104 }; |
| 64 | 105 |
| 65 // Delay between checking for query result to be available. | 106 // Number of in-flight copy operations to allow. |
| 66 const int kCheckForQueryResultAvailableTickRateMs = 1; | 107 const int kMaxCopyOperations = 32; |
| 67 | 108 |
| 68 // Number of attempts to allow before we perform a check that will wait for | 109 // Delay been checking for copy operations to complete. |
| 69 // query to complete. | 110 const int kCheckForCompletedCopyOperationsTickRateMs = 1; |
| 70 const int kMaxCheckForQueryResultAvailableAttempts = 256; | 111 |
| 112 // Number of failed attempts to allow before we perform a check that will |
| 113 // wait for copy operations to complete if needed. |
| 114 const int kFailedAttemptsBeforeWaitIfNeeded = 256; |
| 71 | 115 |
| 72 // 4MiB is the size of 4 512x512 tiles, which has proven to be a good | 116 // 4MiB is the size of 4 512x512 tiles, which has proven to be a good |
| 73 // default batch size for copy operations. | 117 // default batch size for copy operations. |
| 74 const int kMaxBytesPerCopyOperation = 1024 * 1024 * 4; | 118 const int kMaxBytesPerCopyOperation = 1024 * 1024 * 4; |
| 75 | 119 |
| 76 // Delay before a staging buffer might be released. | 120 } // namespace |
| 77 const int kStagingBufferExpirationDelayMs = 1000; | |
| 78 | 121 |
| 79 bool CheckForQueryResult(gpu::gles2::GLES2Interface* gl, unsigned query_id) { | 122 OneCopyTileTaskWorkerPool::CopyOperation::CopyOperation( |
| 80 unsigned complete = 1; | 123 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> src_write_lock, |
| 81 gl->GetQueryObjectuivEXT(query_id, GL_QUERY_RESULT_AVAILABLE_EXT, &complete); | 124 const Resource* src, |
| 82 return !!complete; | 125 const Resource* dst, |
| 126 const gfx::Rect& rect) |
| 127 : src_write_lock(src_write_lock.Pass()), src(src), dst(dst), rect(rect) { |
| 83 } | 128 } |
| 84 | 129 |
| 85 void WaitForQueryResult(gpu::gles2::GLES2Interface* gl, unsigned query_id) { | 130 OneCopyTileTaskWorkerPool::CopyOperation::~CopyOperation() { |
| 86 TRACE_EVENT0("cc", "WaitForQueryResult"); | |
| 87 | |
| 88 int attempts_left = kMaxCheckForQueryResultAvailableAttempts; | |
| 89 while (attempts_left--) { | |
| 90 if (CheckForQueryResult(gl, query_id)) | |
| 91 break; | |
| 92 | |
| 93 base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds( | |
| 94 kCheckForQueryResultAvailableTickRateMs)); | |
| 95 } | |
| 96 | |
| 97 unsigned result = 0; | |
| 98 gl->GetQueryObjectuivEXT(query_id, GL_QUERY_RESULT_EXT, &result); | |
| 99 } | |
| 100 | |
| 101 } // namespace | |
| 102 | |
| 103 OneCopyTileTaskWorkerPool::StagingBuffer::StagingBuffer(const gfx::Size& size) | |
| 104 : size(size), texture_id(0), image_id(0), query_id(0), content_id(0) {} | |
| 105 | |
| 106 OneCopyTileTaskWorkerPool::StagingBuffer::~StagingBuffer() { | |
| 107 DCHECK_EQ(texture_id, 0u); | |
| 108 DCHECK_EQ(image_id, 0u); | |
| 109 DCHECK_EQ(query_id, 0u); | |
| 110 } | |
| 111 | |
| 112 void OneCopyTileTaskWorkerPool::StagingBuffer::DestroyGLResources( | |
| 113 gpu::gles2::GLES2Interface* gl) { | |
| 114 if (query_id) { | |
| 115 gl->DeleteQueriesEXT(1, &query_id); | |
| 116 query_id = 0; | |
| 117 } | |
| 118 if (image_id) { | |
| 119 gl->DestroyImageCHROMIUM(image_id); | |
| 120 image_id = 0; | |
| 121 } | |
| 122 if (texture_id) { | |
| 123 gl->DeleteTextures(1, &texture_id); | |
| 124 texture_id = 0; | |
| 125 } | |
| 126 } | |
| 127 | |
| 128 void OneCopyTileTaskWorkerPool::StagingBuffer::OnMemoryDump( | |
| 129 base::trace_event::ProcessMemoryDump* pmd, | |
| 130 ResourceFormat format, | |
| 131 bool in_free_list) const { | |
| 132 if (!gpu_memory_buffer) | |
| 133 return; | |
| 134 | |
| 135 gfx::GpuMemoryBufferId buffer_id = gpu_memory_buffer->GetId(); | |
| 136 std::string buffer_dump_name = | |
| 137 base::StringPrintf("cc/one_copy/staging_memory/buffer_%d", buffer_id.id); | |
| 138 base::trace_event::MemoryAllocatorDump* buffer_dump = | |
| 139 pmd->CreateAllocatorDump(buffer_dump_name); | |
| 140 | |
| 141 uint64_t buffer_size_in_bytes = | |
| 142 ResourceUtil::UncheckedSizeInBytes<uint64_t>(size, format); | |
| 143 buffer_dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize, | |
| 144 base::trace_event::MemoryAllocatorDump::kUnitsBytes, | |
| 145 buffer_size_in_bytes); | |
| 146 buffer_dump->AddScalar("free_size", | |
| 147 base::trace_event::MemoryAllocatorDump::kUnitsBytes, | |
| 148 in_free_list ? buffer_size_in_bytes : 0); | |
| 149 | |
| 150 // Emit an ownership edge towards a global allocator dump node. | |
| 151 const uint64 tracing_process_id = | |
| 152 base::trace_event::MemoryDumpManager::GetInstance() | |
| 153 ->GetTracingProcessId(); | |
| 154 base::trace_event::MemoryAllocatorDumpGuid shared_buffer_guid = | |
| 155 gfx::GetGpuMemoryBufferGUIDForTracing(tracing_process_id, buffer_id); | |
| 156 pmd->CreateSharedGlobalAllocatorDump(shared_buffer_guid); | |
| 157 | |
| 158 // By creating an edge with a higher |importance| (w.r.t. browser-side dumps) | |
| 159 // the tracing UI will account the effective size of the buffer to the child. | |
| 160 const int kImportance = 2; | |
| 161 pmd->AddOwnershipEdge(buffer_dump->guid(), shared_buffer_guid, kImportance); | |
| 162 } | 131 } |
| 163 | 132 |
| 164 // static | 133 // static |
| 165 scoped_ptr<TileTaskWorkerPool> OneCopyTileTaskWorkerPool::Create( | 134 scoped_ptr<TileTaskWorkerPool> OneCopyTileTaskWorkerPool::Create( |
| 166 base::SequencedTaskRunner* task_runner, | 135 base::SequencedTaskRunner* task_runner, |
| 167 TaskGraphRunner* task_graph_runner, | 136 TaskGraphRunner* task_graph_runner, |
| 168 ContextProvider* context_provider, | 137 ContextProvider* context_provider, |
| 169 ResourceProvider* resource_provider, | 138 ResourceProvider* resource_provider, |
| 139 ResourcePool* resource_pool, |
| 170 int max_copy_texture_chromium_size, | 140 int max_copy_texture_chromium_size, |
| 171 bool use_persistent_gpu_memory_buffers, | 141 bool have_persistent_gpu_memory_buffers) { |
| 172 int max_staging_buffers) { | |
| 173 return make_scoped_ptr<TileTaskWorkerPool>(new OneCopyTileTaskWorkerPool( | 142 return make_scoped_ptr<TileTaskWorkerPool>(new OneCopyTileTaskWorkerPool( |
| 174 task_runner, task_graph_runner, resource_provider, | 143 task_runner, task_graph_runner, context_provider, resource_provider, |
| 175 max_copy_texture_chromium_size, use_persistent_gpu_memory_buffers, | 144 resource_pool, max_copy_texture_chromium_size, |
| 176 max_staging_buffers)); | 145 have_persistent_gpu_memory_buffers)); |
| 177 } | 146 } |
| 178 | 147 |
| 179 OneCopyTileTaskWorkerPool::OneCopyTileTaskWorkerPool( | 148 OneCopyTileTaskWorkerPool::OneCopyTileTaskWorkerPool( |
| 180 base::SequencedTaskRunner* task_runner, | 149 base::SequencedTaskRunner* task_runner, |
| 181 TaskGraphRunner* task_graph_runner, | 150 TaskGraphRunner* task_graph_runner, |
| 151 ContextProvider* context_provider, |
| 182 ResourceProvider* resource_provider, | 152 ResourceProvider* resource_provider, |
| 153 ResourcePool* resource_pool, |
| 183 int max_copy_texture_chromium_size, | 154 int max_copy_texture_chromium_size, |
| 184 bool use_persistent_gpu_memory_buffers, | 155 bool have_persistent_gpu_memory_buffers) |
| 185 int max_staging_buffers) | |
| 186 : task_runner_(task_runner), | 156 : task_runner_(task_runner), |
| 187 task_graph_runner_(task_graph_runner), | 157 task_graph_runner_(task_graph_runner), |
| 188 namespace_token_(task_graph_runner->GetNamespaceToken()), | 158 namespace_token_(task_graph_runner->GetNamespaceToken()), |
| 159 context_provider_(context_provider), |
| 189 resource_provider_(resource_provider), | 160 resource_provider_(resource_provider), |
| 161 resource_pool_(resource_pool), |
| 190 max_bytes_per_copy_operation_( | 162 max_bytes_per_copy_operation_( |
| 191 max_copy_texture_chromium_size | 163 max_copy_texture_chromium_size |
| 192 ? std::min(kMaxBytesPerCopyOperation, | 164 ? std::min(kMaxBytesPerCopyOperation, |
| 193 max_copy_texture_chromium_size) | 165 max_copy_texture_chromium_size) |
| 194 : kMaxBytesPerCopyOperation), | 166 : kMaxBytesPerCopyOperation), |
| 195 use_persistent_gpu_memory_buffers_(use_persistent_gpu_memory_buffers), | 167 have_persistent_gpu_memory_buffers_(have_persistent_gpu_memory_buffers), |
| 168 last_issued_copy_operation_(0), |
| 169 last_flushed_copy_operation_(0), |
| 170 lock_(), |
| 171 copy_operation_count_cv_(&lock_), |
| 196 bytes_scheduled_since_last_flush_(0), | 172 bytes_scheduled_since_last_flush_(0), |
| 197 max_staging_buffers_(max_staging_buffers), | 173 issued_copy_operation_count_(0), |
| 198 staging_buffer_expiration_delay_( | 174 next_copy_operation_sequence_(1), |
| 199 base::TimeDelta::FromMilliseconds(kStagingBufferExpirationDelayMs)), | 175 check_for_completed_copy_operations_pending_(false), |
| 200 reduce_memory_usage_pending_(false), | 176 shutdown_(false), |
| 201 weak_ptr_factory_(this), | 177 weak_ptr_factory_(this), |
| 202 task_set_finished_weak_ptr_factory_(this) { | 178 task_set_finished_weak_ptr_factory_(this) { |
| 203 base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider( | 179 DCHECK(context_provider_); |
| 204 this, base::ThreadTaskRunnerHandle::Get()); | |
| 205 reduce_memory_usage_callback_ = | |
| 206 base::Bind(&OneCopyTileTaskWorkerPool::ReduceMemoryUsage, | |
| 207 weak_ptr_factory_.GetWeakPtr()); | |
| 208 } | 180 } |
| 209 | 181 |
| 210 OneCopyTileTaskWorkerPool::~OneCopyTileTaskWorkerPool() { | 182 OneCopyTileTaskWorkerPool::~OneCopyTileTaskWorkerPool() { |
| 211 base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider( | 183 DCHECK_EQ(pending_copy_operations_.size(), 0u); |
| 212 this); | |
| 213 } | 184 } |
| 214 | 185 |
| 215 TileTaskRunner* OneCopyTileTaskWorkerPool::AsTileTaskRunner() { | 186 TileTaskRunner* OneCopyTileTaskWorkerPool::AsTileTaskRunner() { |
| 216 return this; | 187 return this; |
| 217 } | 188 } |
| 218 | 189 |
| 219 void OneCopyTileTaskWorkerPool::SetClient(TileTaskRunnerClient* client) { | 190 void OneCopyTileTaskWorkerPool::SetClient(TileTaskRunnerClient* client) { |
| 220 client_ = client; | 191 client_ = client; |
| 221 } | 192 } |
| 222 | 193 |
| 223 void OneCopyTileTaskWorkerPool::Shutdown() { | 194 void OneCopyTileTaskWorkerPool::Shutdown() { |
| 224 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::Shutdown"); | 195 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::Shutdown"); |
| 225 | 196 |
| 197 { |
| 198 base::AutoLock lock(lock_); |
| 199 |
| 200 shutdown_ = true; |
| 201 copy_operation_count_cv_.Signal(); |
| 202 } |
| 203 |
| 226 TaskGraph empty; | 204 TaskGraph empty; |
| 227 task_graph_runner_->ScheduleTasks(namespace_token_, &empty); | 205 task_graph_runner_->ScheduleTasks(namespace_token_, &empty); |
| 228 task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_); | 206 task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_); |
| 229 | |
| 230 base::AutoLock lock(lock_); | |
| 231 | |
| 232 if (buffers_.empty()) | |
| 233 return; | |
| 234 | |
| 235 ReleaseBuffersNotUsedSince(base::TimeTicks() + base::TimeDelta::Max()); | |
| 236 } | 207 } |
| 237 | 208 |
| 238 void OneCopyTileTaskWorkerPool::ScheduleTasks(TileTaskQueue* queue) { | 209 void OneCopyTileTaskWorkerPool::ScheduleTasks(TileTaskQueue* queue) { |
| 239 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::ScheduleTasks"); | 210 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::ScheduleTasks"); |
| 240 | 211 |
| 212 #if DCHECK_IS_ON() |
| 213 { |
| 214 base::AutoLock lock(lock_); |
| 215 DCHECK(!shutdown_); |
| 216 } |
| 217 #endif |
| 218 |
| 241 if (tasks_pending_.none()) | 219 if (tasks_pending_.none()) |
| 242 TRACE_EVENT_ASYNC_BEGIN0("cc", "ScheduledTasks", this); | 220 TRACE_EVENT_ASYNC_BEGIN0("cc", "ScheduledTasks", this); |
| 243 | 221 |
| 244 // Mark all task sets as pending. | 222 // Mark all task sets as pending. |
| 245 tasks_pending_.set(); | 223 tasks_pending_.set(); |
| 246 | 224 |
| 247 size_t priority = kTileTaskPriorityBase; | 225 size_t priority = kTileTaskPriorityBase; |
| 248 | 226 |
| 249 graph_.Reset(); | 227 graph_.Reset(); |
| 250 | 228 |
| 251 // Cancel existing OnTaskSetFinished callbacks. | 229 // Cancel existing OnTaskSetFinished callbacks. |
| 252 task_set_finished_weak_ptr_factory_.InvalidateWeakPtrs(); | 230 task_set_finished_weak_ptr_factory_.InvalidateWeakPtrs(); |
| 253 | 231 |
| 254 scoped_refptr<TileTask> new_task_set_finished_tasks[kNumberOfTaskSets]; | 232 scoped_refptr<TileTask> new_task_set_finished_tasks[kNumberOfTaskSets]; |
| 255 | 233 |
| 256 size_t task_count[kNumberOfTaskSets] = {0}; | 234 size_t task_count[kNumberOfTaskSets] = {0}; |
| 257 | 235 |
| 258 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { | 236 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { |
| 259 new_task_set_finished_tasks[task_set] = CreateTaskSetFinishedTask( | 237 new_task_set_finished_tasks[task_set] = CreateTaskSetFinishedTask( |
| 260 task_runner_.get(), | 238 task_runner_.get(), |
| 261 base::Bind(&OneCopyTileTaskWorkerPool::OnTaskSetFinished, | 239 base::Bind(&OneCopyTileTaskWorkerPool::OnTaskSetFinished, |
| 262 task_set_finished_weak_ptr_factory_.GetWeakPtr(), task_set)); | 240 task_set_finished_weak_ptr_factory_.GetWeakPtr(), task_set)); |
| 263 } | 241 } |
| 264 | 242 |
| 243 resource_pool_->CheckBusyResources(false); |
| 244 |
| 265 for (TileTaskQueue::Item::Vector::const_iterator it = queue->items.begin(); | 245 for (TileTaskQueue::Item::Vector::const_iterator it = queue->items.begin(); |
| 266 it != queue->items.end(); ++it) { | 246 it != queue->items.end(); ++it) { |
| 267 const TileTaskQueue::Item& item = *it; | 247 const TileTaskQueue::Item& item = *it; |
| 268 RasterTask* task = item.task; | 248 RasterTask* task = item.task; |
| 269 DCHECK(!task->HasCompleted()); | 249 DCHECK(!task->HasCompleted()); |
| 270 | 250 |
| 271 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { | 251 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { |
| 272 if (!item.task_sets[task_set]) | 252 if (!item.task_sets[task_set]) |
| 273 continue; | 253 continue; |
| 274 | 254 |
| 275 ++task_count[task_set]; | 255 ++task_count[task_set]; |
| 276 | 256 |
| 277 graph_.edges.push_back( | 257 graph_.edges.push_back( |
| 278 TaskGraph::Edge(task, new_task_set_finished_tasks[task_set].get())); | 258 TaskGraph::Edge(task, new_task_set_finished_tasks[task_set].get())); |
| 279 } | 259 } |
| 280 | 260 |
| 281 InsertNodesForRasterTask(&graph_, task, task->dependencies(), priority++); | 261 InsertNodesForRasterTask(&graph_, task, task->dependencies(), priority++); |
| 282 } | 262 } |
| 283 | 263 |
| 284 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { | 264 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { |
| 285 InsertNodeForTask(&graph_, new_task_set_finished_tasks[task_set].get(), | 265 InsertNodeForTask(&graph_, new_task_set_finished_tasks[task_set].get(), |
| 286 kTaskSetFinishedTaskPriorityBase + task_set, | 266 kTaskSetFinishedTaskPriorityBase + task_set, |
| 287 task_count[task_set]); | 267 task_count[task_set]); |
| 288 } | 268 } |
| 289 | 269 |
| 290 ScheduleTasksOnOriginThread(this, &graph_); | 270 ScheduleTasksOnOriginThread(this, &graph_); |
| 291 | |
| 292 // Barrier to sync any new resources to the worker context. | |
| 293 resource_provider_->output_surface() | |
| 294 ->context_provider() | |
| 295 ->ContextGL() | |
| 296 ->OrderingBarrierCHROMIUM(); | |
| 297 | |
| 298 task_graph_runner_->ScheduleTasks(namespace_token_, &graph_); | 271 task_graph_runner_->ScheduleTasks(namespace_token_, &graph_); |
| 299 | 272 |
| 300 std::copy(new_task_set_finished_tasks, | 273 std::copy(new_task_set_finished_tasks, |
| 301 new_task_set_finished_tasks + kNumberOfTaskSets, | 274 new_task_set_finished_tasks + kNumberOfTaskSets, |
| 302 task_set_finished_tasks_); | 275 task_set_finished_tasks_); |
| 303 | 276 |
| 277 resource_pool_->ReduceResourceUsage(); |
| 278 |
| 304 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", "state", | 279 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", "state", |
| 305 StateAsValue()); | 280 StateAsValue()); |
| 306 } | 281 } |
| 307 | 282 |
| 308 void OneCopyTileTaskWorkerPool::CheckForCompletedTasks() { | 283 void OneCopyTileTaskWorkerPool::CheckForCompletedTasks() { |
| 309 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::CheckForCompletedTasks"); | 284 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::CheckForCompletedTasks"); |
| 310 | 285 |
| 311 task_graph_runner_->CollectCompletedTasks(namespace_token_, | 286 task_graph_runner_->CollectCompletedTasks(namespace_token_, |
| 312 &completed_tasks_); | 287 &completed_tasks_); |
| 313 | 288 |
| (...skipping 18 matching lines...) Expand all Loading... |
| 332 return !PlatformColor::SameComponentOrder(GetResourceFormat()); | 307 return !PlatformColor::SameComponentOrder(GetResourceFormat()); |
| 333 } | 308 } |
| 334 | 309 |
| 335 scoped_ptr<RasterBuffer> OneCopyTileTaskWorkerPool::AcquireBufferForRaster( | 310 scoped_ptr<RasterBuffer> OneCopyTileTaskWorkerPool::AcquireBufferForRaster( |
| 336 const Resource* resource, | 311 const Resource* resource, |
| 337 uint64_t resource_content_id, | 312 uint64_t resource_content_id, |
| 338 uint64_t previous_content_id) { | 313 uint64_t previous_content_id) { |
| 339 // TODO(danakj): If resource_content_id != 0, we only need to copy/upload | 314 // TODO(danakj): If resource_content_id != 0, we only need to copy/upload |
| 340 // the dirty rect. | 315 // the dirty rect. |
| 341 DCHECK_EQ(resource->format(), resource_provider_->best_texture_format()); | 316 DCHECK_EQ(resource->format(), resource_provider_->best_texture_format()); |
| 342 return make_scoped_ptr<RasterBuffer>(new RasterBufferImpl( | 317 return make_scoped_ptr<RasterBuffer>( |
| 343 this, resource_provider_, resource_provider_->best_texture_format(), | 318 new RasterBufferImpl(this, resource_provider_, resource_pool_, |
| 344 resource, previous_content_id)); | 319 resource_provider_->best_texture_format(), resource, |
| 320 previous_content_id)); |
| 345 } | 321 } |
| 346 | 322 |
| 347 void OneCopyTileTaskWorkerPool::ReleaseBufferForRaster( | 323 void OneCopyTileTaskWorkerPool::ReleaseBufferForRaster( |
| 348 scoped_ptr<RasterBuffer> buffer) { | 324 scoped_ptr<RasterBuffer> buffer) { |
| 349 // Nothing to do here. RasterBufferImpl destructor cleans up after itself. | 325 // Nothing to do here. RasterBufferImpl destructor cleans up after itself. |
| 350 } | 326 } |
| 351 | 327 |
| 352 void OneCopyTileTaskWorkerPool::PlaybackAndCopyOnWorkerThread( | 328 CopySequenceNumber |
| 353 const Resource* resource, | 329 OneCopyTileTaskWorkerPool::PlaybackAndScheduleCopyOnWorkerThread( |
| 354 const ResourceProvider::ScopedWriteLockGL* resource_lock, | 330 bool reusing_raster_resource, |
| 331 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> |
| 332 raster_resource_write_lock, |
| 333 const Resource* raster_resource, |
| 334 const Resource* output_resource, |
| 355 const RasterSource* raster_source, | 335 const RasterSource* raster_source, |
| 356 const gfx::Rect& raster_full_rect, | 336 const gfx::Rect& raster_full_rect, |
| 357 const gfx::Rect& raster_dirty_rect, | 337 const gfx::Rect& raster_dirty_rect, |
| 358 float scale, | 338 float scale, |
| 359 bool include_images, | 339 bool include_images) { |
| 360 uint64_t previous_content_id, | 340 gfx::GpuMemoryBuffer* gpu_memory_buffer = |
| 361 uint64_t new_content_id) { | 341 raster_resource_write_lock->GetGpuMemoryBuffer(); |
| 342 if (gpu_memory_buffer) { |
| 343 DCHECK_EQ( |
| 344 1u, gfx::NumberOfPlanesForBufferFormat(gpu_memory_buffer->GetFormat())); |
| 345 void* data = NULL; |
| 346 bool rv = gpu_memory_buffer->Map(&data); |
| 347 DCHECK(rv); |
| 348 int stride; |
| 349 gpu_memory_buffer->GetStride(&stride); |
| 350 // TileTaskWorkerPool::PlaybackToMemory only supports unsigned strides. |
| 351 DCHECK_GE(stride, 0); |
| 352 |
| 353 gfx::Rect playback_rect = raster_full_rect; |
| 354 if (reusing_raster_resource) { |
| 355 playback_rect.Intersect(raster_dirty_rect); |
| 356 } |
| 357 DCHECK(!playback_rect.IsEmpty()) |
| 358 << "Why are we rastering a tile that's not dirty?"; |
| 359 TileTaskWorkerPool::PlaybackToMemory( |
| 360 data, raster_resource->format(), raster_resource->size(), |
| 361 static_cast<size_t>(stride), raster_source, raster_full_rect, |
| 362 playback_rect, scale, include_images); |
| 363 gpu_memory_buffer->Unmap(); |
| 364 } |
| 365 |
| 362 base::AutoLock lock(lock_); | 366 base::AutoLock lock(lock_); |
| 363 | 367 |
| 364 scoped_ptr<StagingBuffer> staging_buffer = | 368 CopySequenceNumber sequence = 0; |
| 365 AcquireStagingBuffer(resource, previous_content_id); | 369 int bytes_per_row = (BitsPerPixel(raster_resource->format()) * |
| 366 DCHECK(staging_buffer); | 370 raster_resource->size().width()) / |
| 371 8; |
| 372 int chunk_size_in_rows = |
| 373 std::max(1, max_bytes_per_copy_operation_ / bytes_per_row); |
| 374 // Align chunk size to 4. Required to support compressed texture formats. |
| 375 chunk_size_in_rows = MathUtil::UncheckedRoundUp(chunk_size_in_rows, 4); |
| 376 int y = 0; |
| 377 int height = raster_resource->size().height(); |
| 378 while (y < height) { |
| 379 int failed_attempts = 0; |
| 380 while ((pending_copy_operations_.size() + issued_copy_operation_count_) >= |
| 381 kMaxCopyOperations) { |
| 382 // Ignore limit when shutdown is set. |
| 383 if (shutdown_) |
| 384 break; |
| 367 | 385 |
| 368 { | 386 ++failed_attempts; |
| 369 base::AutoUnlock unlock(lock_); | |
| 370 | 387 |
| 371 // Allocate GpuMemoryBuffer if necessary. | 388 // Schedule a check that will also wait for operations to complete |
| 372 if (!staging_buffer->gpu_memory_buffer) { | 389 // after too many failed attempts. |
| 373 staging_buffer->gpu_memory_buffer = | 390 bool wait_if_needed = failed_attempts > kFailedAttemptsBeforeWaitIfNeeded; |
| 374 resource_provider_->gpu_memory_buffer_manager() | 391 |
| 375 ->AllocateGpuMemoryBuffer( | 392 // Schedule a check for completed copy operations if too many operations |
| 376 staging_buffer->size, | 393 // are currently in-flight. |
| 377 BufferFormat(resource_provider_->best_texture_format()), | 394 ScheduleCheckForCompletedCopyOperationsWithLockAcquired(wait_if_needed); |
| 378 use_persistent_gpu_memory_buffers_ | 395 |
| 379 ? gfx::BufferUsage::PERSISTENT_MAP | 396 { |
| 380 : gfx::BufferUsage::MAP); | 397 TRACE_EVENT0("cc", "WaitingForCopyOperationsToComplete"); |
| 381 DCHECK_EQ(gfx::NumberOfPlanesForBufferFormat( | 398 |
| 382 staging_buffer->gpu_memory_buffer->GetFormat()), | 399 // Wait for in-flight copy operations to drop below limit. |
| 383 1u); | 400 copy_operation_count_cv_.Wait(); |
| 401 } |
| 384 } | 402 } |
| 385 | 403 |
| 386 gfx::Rect playback_rect = raster_full_rect; | 404 // There may be more work available, so wake up another worker thread. |
| 387 if (use_persistent_gpu_memory_buffers_ && previous_content_id) { | 405 copy_operation_count_cv_.Signal(); |
| 388 // Reduce playback rect to dirty region if the content id of the staging | |
| 389 // buffer matches the prevous content id. | |
| 390 if (previous_content_id == staging_buffer->content_id) | |
| 391 playback_rect.Intersect(raster_dirty_rect); | |
| 392 } | |
| 393 | 406 |
| 394 if (staging_buffer->gpu_memory_buffer) { | 407 // Copy at most |chunk_size_in_rows|. |
| 395 void* data = nullptr; | 408 int rows_to_copy = std::min(chunk_size_in_rows, height - y); |
| 396 bool rv = staging_buffer->gpu_memory_buffer->Map(&data); | 409 DCHECK_GT(rows_to_copy, 0); |
| 397 DCHECK(rv); | |
| 398 int stride; | |
| 399 staging_buffer->gpu_memory_buffer->GetStride(&stride); | |
| 400 // TileTaskWorkerPool::PlaybackToMemory only supports unsigned strides. | |
| 401 DCHECK_GE(stride, 0); | |
| 402 | 410 |
| 403 DCHECK(!playback_rect.IsEmpty()) | 411 // |raster_resource_write_lock| is passed to the first copy operation as it |
| 404 << "Why are we rastering a tile that's not dirty?"; | 412 // needs to be released before we can issue a copy. |
| 405 TileTaskWorkerPool::PlaybackToMemory( | 413 pending_copy_operations_.push_back(make_scoped_ptr(new CopyOperation( |
| 406 data, resource_provider_->best_texture_format(), staging_buffer->size, | 414 raster_resource_write_lock.Pass(), raster_resource, output_resource, |
| 407 static_cast<size_t>(stride), raster_source, raster_full_rect, | 415 gfx::Rect(0, y, raster_resource->size().width(), rows_to_copy)))); |
| 408 playback_rect, scale, include_images); | 416 y += rows_to_copy; |
| 409 staging_buffer->gpu_memory_buffer->Unmap(); | 417 |
| 410 staging_buffer->content_id = new_content_id; | 418 // Acquire a sequence number for this copy operation. |
| 419 sequence = next_copy_operation_sequence_++; |
| 420 |
| 421 // Increment |bytes_scheduled_since_last_flush_| by the amount of memory |
| 422 // used for this copy operation. |
| 423 bytes_scheduled_since_last_flush_ += rows_to_copy * bytes_per_row; |
| 424 |
| 425 // Post task that will advance last flushed copy operation to |sequence| |
| 426 // when |bytes_scheduled_since_last_flush_| has reached |
| 427 // |max_bytes_per_copy_operation_|. |
| 428 if (bytes_scheduled_since_last_flush_ >= max_bytes_per_copy_operation_) { |
| 429 task_runner_->PostTask( |
| 430 FROM_HERE, |
| 431 base::Bind(&OneCopyTileTaskWorkerPool::AdvanceLastFlushedCopyTo, |
| 432 weak_ptr_factory_.GetWeakPtr(), sequence)); |
| 433 bytes_scheduled_since_last_flush_ = 0; |
| 411 } | 434 } |
| 412 } | 435 } |
| 413 | 436 |
| 414 ContextProvider* context_provider = | 437 return sequence; |
| 415 resource_provider_->output_surface()->worker_context_provider(); | |
| 416 DCHECK(context_provider); | |
| 417 | |
| 418 { | |
| 419 ContextProvider::ScopedContextLock scoped_context(context_provider); | |
| 420 | |
| 421 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL(); | |
| 422 DCHECK(gl); | |
| 423 | |
| 424 unsigned image_target = resource_provider_->GetImageTextureTarget( | |
| 425 resource_provider_->best_texture_format()); | |
| 426 | |
| 427 // Create and bind staging texture. | |
| 428 if (!staging_buffer->texture_id) { | |
| 429 gl->GenTextures(1, &staging_buffer->texture_id); | |
| 430 gl->BindTexture(image_target, staging_buffer->texture_id); | |
| 431 gl->TexParameteri(image_target, GL_TEXTURE_MIN_FILTER, GL_NEAREST); | |
| 432 gl->TexParameteri(image_target, GL_TEXTURE_MAG_FILTER, GL_NEAREST); | |
| 433 gl->TexParameteri(image_target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); | |
| 434 gl->TexParameteri(image_target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); | |
| 435 } else { | |
| 436 gl->BindTexture(image_target, staging_buffer->texture_id); | |
| 437 } | |
| 438 | |
| 439 // Create and bind image. | |
| 440 if (!staging_buffer->image_id) { | |
| 441 if (staging_buffer->gpu_memory_buffer) { | |
| 442 staging_buffer->image_id = gl->CreateImageCHROMIUM( | |
| 443 staging_buffer->gpu_memory_buffer->AsClientBuffer(), | |
| 444 staging_buffer->size.width(), staging_buffer->size.height(), | |
| 445 GLInternalFormat(resource_provider_->best_texture_format())); | |
| 446 gl->BindTexImage2DCHROMIUM(image_target, staging_buffer->image_id); | |
| 447 } | |
| 448 } else { | |
| 449 gl->ReleaseTexImage2DCHROMIUM(image_target, staging_buffer->image_id); | |
| 450 gl->BindTexImage2DCHROMIUM(image_target, staging_buffer->image_id); | |
| 451 } | |
| 452 | |
| 453 // Unbind staging texture. | |
| 454 gl->BindTexture(image_target, 0); | |
| 455 | |
| 456 if (resource_provider_->use_sync_query()) { | |
| 457 if (!staging_buffer->query_id) | |
| 458 gl->GenQueriesEXT(1, &staging_buffer->query_id); | |
| 459 | |
| 460 #if defined(OS_CHROMEOS) | |
| 461 // TODO(reveman): This avoids a performance problem on some ChromeOS | |
| 462 // devices. This needs to be removed to support native GpuMemoryBuffer | |
| 463 // implementations. crbug.com/436314 | |
| 464 gl->BeginQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM, staging_buffer->query_id); | |
| 465 #else | |
| 466 gl->BeginQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM, | |
| 467 staging_buffer->query_id); | |
| 468 #endif | |
| 469 } | |
| 470 | |
| 471 int bytes_per_row = | |
| 472 (BitsPerPixel(resource_provider_->best_texture_format()) * | |
| 473 resource->size().width()) / | |
| 474 8; | |
| 475 int chunk_size_in_rows = | |
| 476 std::max(1, max_bytes_per_copy_operation_ / bytes_per_row); | |
| 477 // Align chunk size to 4. Required to support compressed texture formats. | |
| 478 chunk_size_in_rows = MathUtil::UncheckedRoundUp(chunk_size_in_rows, 4); | |
| 479 int y = 0; | |
| 480 int height = resource->size().height(); | |
| 481 while (y < height) { | |
| 482 // Copy at most |chunk_size_in_rows|. | |
| 483 int rows_to_copy = std::min(chunk_size_in_rows, height - y); | |
| 484 DCHECK_GT(rows_to_copy, 0); | |
| 485 | |
| 486 gl->CopySubTextureCHROMIUM(GL_TEXTURE_2D, staging_buffer->texture_id, | |
| 487 resource_lock->texture_id(), 0, y, 0, y, | |
| 488 resource->size().width(), rows_to_copy, false, | |
| 489 false, false); | |
| 490 y += rows_to_copy; | |
| 491 | |
| 492 // Increment |bytes_scheduled_since_last_flush_| by the amount of memory | |
| 493 // used for this copy operation. | |
| 494 bytes_scheduled_since_last_flush_ += rows_to_copy * bytes_per_row; | |
| 495 | |
| 496 if (bytes_scheduled_since_last_flush_ >= max_bytes_per_copy_operation_) { | |
| 497 gl->ShallowFlushCHROMIUM(); | |
| 498 bytes_scheduled_since_last_flush_ = 0; | |
| 499 } | |
| 500 } | |
| 501 | |
| 502 if (resource_provider_->use_sync_query()) { | |
| 503 #if defined(OS_CHROMEOS) | |
| 504 gl->EndQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM); | |
| 505 #else | |
| 506 gl->EndQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM); | |
| 507 #endif | |
| 508 } | |
| 509 | |
| 510 // Barrier to sync worker context output to cc context. | |
| 511 gl->OrderingBarrierCHROMIUM(); | |
| 512 } | |
| 513 | |
| 514 staging_buffer->last_usage = base::TimeTicks::Now(); | |
| 515 busy_buffers_.push_back(staging_buffer.Pass()); | |
| 516 | |
| 517 ScheduleReduceMemoryUsage(); | |
| 518 } | 438 } |
| 519 | 439 |
| 520 bool OneCopyTileTaskWorkerPool::OnMemoryDump( | 440 void OneCopyTileTaskWorkerPool::AdvanceLastIssuedCopyTo( |
| 521 const base::trace_event::MemoryDumpArgs& args, | 441 CopySequenceNumber sequence) { |
| 522 base::trace_event::ProcessMemoryDump* pmd) { | 442 if (last_issued_copy_operation_ >= sequence) |
| 523 base::AutoLock lock(lock_); | 443 return; |
| 524 | 444 |
| 525 for (const auto& buffer : buffers_) { | 445 IssueCopyOperations(sequence - last_issued_copy_operation_); |
| 526 buffer->OnMemoryDump(pmd, resource_provider_->best_texture_format(), | 446 last_issued_copy_operation_ = sequence; |
| 527 std::find(free_buffers_.begin(), free_buffers_.end(), | |
| 528 buffer) != free_buffers_.end()); | |
| 529 } | |
| 530 | |
| 531 return true; | |
| 532 } | 447 } |
| 533 | 448 |
| 534 scoped_ptr<OneCopyTileTaskWorkerPool::StagingBuffer> | 449 void OneCopyTileTaskWorkerPool::AdvanceLastFlushedCopyTo( |
| 535 OneCopyTileTaskWorkerPool::AcquireStagingBuffer(const Resource* resource, | 450 CopySequenceNumber sequence) { |
| 536 uint64_t previous_content_id) { | 451 if (last_flushed_copy_operation_ >= sequence) |
| 537 lock_.AssertAcquired(); | |
| 538 | |
| 539 scoped_ptr<StagingBuffer> staging_buffer; | |
| 540 | |
| 541 ContextProvider* context_provider = | |
| 542 resource_provider_->output_surface()->worker_context_provider(); | |
| 543 DCHECK(context_provider); | |
| 544 | |
| 545 ContextProvider::ScopedContextLock scoped_context(context_provider); | |
| 546 | |
| 547 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL(); | |
| 548 DCHECK(gl); | |
| 549 | |
| 550 // Check if any busy buffers have become available. | |
| 551 if (resource_provider_->use_sync_query()) { | |
| 552 while (!busy_buffers_.empty()) { | |
| 553 if (!CheckForQueryResult(gl, busy_buffers_.front()->query_id)) | |
| 554 break; | |
| 555 | |
| 556 free_buffers_.push_back(busy_buffers_.take_front()); | |
| 557 } | |
| 558 } | |
| 559 | |
| 560 // Wait for number of non-free buffers to become less than the limit. | |
| 561 while ((buffers_.size() - free_buffers_.size()) >= max_staging_buffers_) { | |
| 562 // Stop when there are no more busy buffers to wait for. | |
| 563 if (busy_buffers_.empty()) | |
| 564 break; | |
| 565 | |
| 566 if (resource_provider_->use_sync_query()) { | |
| 567 WaitForQueryResult(gl, busy_buffers_.front()->query_id); | |
| 568 free_buffers_.push_back(busy_buffers_.take_front()); | |
| 569 } else { | |
| 570 // Fall-back to glFinish if CHROMIUM_sync_query is not available. | |
| 571 gl->Finish(); | |
| 572 while (!busy_buffers_.empty()) | |
| 573 free_buffers_.push_back(busy_buffers_.take_front()); | |
| 574 } | |
| 575 } | |
| 576 | |
| 577 // Find a staging buffer that allows us to perform partial raster when | |
| 578 // using persistent GpuMemoryBuffers. | |
| 579 if (use_persistent_gpu_memory_buffers_ && previous_content_id) { | |
| 580 StagingBufferDeque::iterator it = | |
| 581 std::find_if(free_buffers_.begin(), free_buffers_.end(), | |
| 582 [previous_content_id](const StagingBuffer* buffer) { | |
| 583 return buffer->content_id == previous_content_id; | |
| 584 }); | |
| 585 if (it != free_buffers_.end()) | |
| 586 staging_buffer = free_buffers_.take(it); | |
| 587 } | |
| 588 | |
| 589 // Find staging buffer of correct size. | |
| 590 if (!staging_buffer) { | |
| 591 StagingBufferDeque::iterator it = | |
| 592 std::find_if(free_buffers_.begin(), free_buffers_.end(), | |
| 593 [resource](const StagingBuffer* buffer) { | |
| 594 return buffer->size == resource->size(); | |
| 595 }); | |
| 596 if (it != free_buffers_.end()) | |
| 597 staging_buffer = free_buffers_.take(it); | |
| 598 } | |
| 599 | |
| 600 // Create new staging buffer if necessary. | |
| 601 if (!staging_buffer) { | |
| 602 staging_buffer = make_scoped_ptr(new StagingBuffer(resource->size())); | |
| 603 buffers_.insert(staging_buffer.get()); | |
| 604 } | |
| 605 | |
| 606 // Release enough free buffers to stay within the limit. | |
| 607 while (buffers_.size() > max_staging_buffers_) { | |
| 608 if (free_buffers_.empty()) | |
| 609 break; | |
| 610 | |
| 611 free_buffers_.front()->DestroyGLResources(gl); | |
| 612 buffers_.erase(free_buffers_.front()); | |
| 613 free_buffers_.take_front(); | |
| 614 } | |
| 615 | |
| 616 return staging_buffer.Pass(); | |
| 617 } | |
| 618 | |
| 619 base::TimeTicks OneCopyTileTaskWorkerPool::GetUsageTimeForLRUBuffer() { | |
| 620 lock_.AssertAcquired(); | |
| 621 | |
| 622 if (!free_buffers_.empty()) | |
| 623 return free_buffers_.front()->last_usage; | |
| 624 | |
| 625 if (!busy_buffers_.empty()) | |
| 626 return busy_buffers_.front()->last_usage; | |
| 627 | |
| 628 return base::TimeTicks(); | |
| 629 } | |
| 630 | |
| 631 void OneCopyTileTaskWorkerPool::ScheduleReduceMemoryUsage() { | |
| 632 lock_.AssertAcquired(); | |
| 633 | |
| 634 if (reduce_memory_usage_pending_) | |
| 635 return; | 452 return; |
| 636 | 453 |
| 637 reduce_memory_usage_pending_ = true; | 454 AdvanceLastIssuedCopyTo(sequence); |
| 638 | 455 |
| 639 // Schedule a call to ReduceMemoryUsage at the time when the LRU buffer | 456 // Flush all issued copy operations. |
| 640 // should be released. | 457 context_provider_->ContextGL()->ShallowFlushCHROMIUM(); |
| 641 base::TimeTicks reduce_memory_usage_time = | 458 last_flushed_copy_operation_ = last_issued_copy_operation_; |
| 642 GetUsageTimeForLRUBuffer() + staging_buffer_expiration_delay_; | |
| 643 task_runner_->PostDelayedTask( | |
| 644 FROM_HERE, reduce_memory_usage_callback_, | |
| 645 reduce_memory_usage_time - base::TimeTicks::Now()); | |
| 646 } | |
| 647 | |
| 648 void OneCopyTileTaskWorkerPool::ReduceMemoryUsage() { | |
| 649 base::AutoLock lock(lock_); | |
| 650 | |
| 651 reduce_memory_usage_pending_ = false; | |
| 652 | |
| 653 if (free_buffers_.empty() && busy_buffers_.empty()) | |
| 654 return; | |
| 655 | |
| 656 base::TimeTicks current_time = base::TimeTicks::Now(); | |
| 657 ReleaseBuffersNotUsedSince(current_time - staging_buffer_expiration_delay_); | |
| 658 | |
| 659 if (free_buffers_.empty() && busy_buffers_.empty()) | |
| 660 return; | |
| 661 | |
| 662 reduce_memory_usage_pending_ = true; | |
| 663 | |
| 664 // Schedule another call to ReduceMemoryUsage at the time when the next | |
| 665 // buffer should be released. | |
| 666 base::TimeTicks reduce_memory_usage_time = | |
| 667 GetUsageTimeForLRUBuffer() + staging_buffer_expiration_delay_; | |
| 668 task_runner_->PostDelayedTask(FROM_HERE, reduce_memory_usage_callback_, | |
| 669 reduce_memory_usage_time - current_time); | |
| 670 } | |
| 671 | |
| 672 void OneCopyTileTaskWorkerPool::ReleaseBuffersNotUsedSince( | |
| 673 base::TimeTicks time) { | |
| 674 lock_.AssertAcquired(); | |
| 675 | |
| 676 ContextProvider* context_provider = | |
| 677 resource_provider_->output_surface()->worker_context_provider(); | |
| 678 DCHECK(context_provider); | |
| 679 | |
| 680 { | |
| 681 ContextProvider::ScopedContextLock scoped_context(context_provider); | |
| 682 | |
| 683 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL(); | |
| 684 DCHECK(gl); | |
| 685 | |
| 686 // Note: Front buffer is guaranteed to be LRU so we can stop releasing | |
| 687 // buffers as soon as we find a buffer that has been used since |time|. | |
| 688 while (!free_buffers_.empty()) { | |
| 689 if (free_buffers_.front()->last_usage > time) | |
| 690 return; | |
| 691 | |
| 692 free_buffers_.front()->DestroyGLResources(gl); | |
| 693 buffers_.erase(free_buffers_.front()); | |
| 694 free_buffers_.take_front(); | |
| 695 } | |
| 696 | |
| 697 while (!busy_buffers_.empty()) { | |
| 698 if (busy_buffers_.front()->last_usage > time) | |
| 699 return; | |
| 700 | |
| 701 busy_buffers_.front()->DestroyGLResources(gl); | |
| 702 buffers_.erase(busy_buffers_.front()); | |
| 703 busy_buffers_.take_front(); | |
| 704 } | |
| 705 } | |
| 706 } | 459 } |
| 707 | 460 |
| 708 void OneCopyTileTaskWorkerPool::OnTaskSetFinished(TaskSet task_set) { | 461 void OneCopyTileTaskWorkerPool::OnTaskSetFinished(TaskSet task_set) { |
| 709 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::OnTaskSetFinished", "task_set", | 462 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::OnTaskSetFinished", "task_set", |
| 710 task_set); | 463 task_set); |
| 711 | 464 |
| 712 DCHECK(tasks_pending_[task_set]); | 465 DCHECK(tasks_pending_[task_set]); |
| 713 tasks_pending_[task_set] = false; | 466 tasks_pending_[task_set] = false; |
| 714 if (tasks_pending_.any()) { | 467 if (tasks_pending_.any()) { |
| 715 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", | 468 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", |
| 716 "state", StateAsValue()); | 469 "state", StateAsValue()); |
| 717 } else { | 470 } else { |
| 718 TRACE_EVENT_ASYNC_END0("cc", "ScheduledTasks", this); | 471 TRACE_EVENT_ASYNC_END0("cc", "ScheduledTasks", this); |
| 719 } | 472 } |
| 720 client_->DidFinishRunningTileTasks(task_set); | 473 client_->DidFinishRunningTileTasks(task_set); |
| 721 } | 474 } |
| 722 | 475 |
| 476 void OneCopyTileTaskWorkerPool::IssueCopyOperations(int64 count) { |
| 477 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::IssueCopyOperations", "count", |
| 478 count); |
| 479 |
| 480 CopyOperation::Deque copy_operations; |
| 481 |
| 482 { |
| 483 base::AutoLock lock(lock_); |
| 484 |
| 485 for (int64 i = 0; i < count; ++i) { |
| 486 DCHECK(!pending_copy_operations_.empty()); |
| 487 copy_operations.push_back(pending_copy_operations_.take_front()); |
| 488 } |
| 489 |
| 490 // Increment |issued_copy_operation_count_| to reflect the transition of |
| 491 // copy operations from "pending" to "issued" state. |
| 492 issued_copy_operation_count_ += copy_operations.size(); |
| 493 } |
| 494 |
| 495 while (!copy_operations.empty()) { |
| 496 scoped_ptr<CopyOperation> copy_operation = copy_operations.take_front(); |
| 497 |
| 498 // Remove the write lock. |
| 499 copy_operation->src_write_lock.reset(); |
| 500 |
| 501 // Copy contents of source resource to destination resource. |
| 502 resource_provider_->CopyResource(copy_operation->src->id(), |
| 503 copy_operation->dst->id(), |
| 504 copy_operation->rect); |
| 505 } |
| 506 } |
| 507 |
| 508 void OneCopyTileTaskWorkerPool:: |
| 509 ScheduleCheckForCompletedCopyOperationsWithLockAcquired( |
| 510 bool wait_if_needed) { |
| 511 lock_.AssertAcquired(); |
| 512 |
| 513 if (check_for_completed_copy_operations_pending_) |
| 514 return; |
| 515 |
| 516 base::TimeTicks now = base::TimeTicks::Now(); |
| 517 |
| 518 // Schedule a check for completed copy operations as soon as possible but |
| 519 // don't allow two consecutive checks to be scheduled to run less than the |
| 520 // tick rate apart. |
| 521 base::TimeTicks next_check_for_completed_copy_operations_time = |
| 522 std::max(last_check_for_completed_copy_operations_time_ + |
| 523 base::TimeDelta::FromMilliseconds( |
| 524 kCheckForCompletedCopyOperationsTickRateMs), |
| 525 now); |
| 526 |
| 527 task_runner_->PostDelayedTask( |
| 528 FROM_HERE, |
| 529 base::Bind(&OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations, |
| 530 weak_ptr_factory_.GetWeakPtr(), wait_if_needed), |
| 531 next_check_for_completed_copy_operations_time - now); |
| 532 |
| 533 last_check_for_completed_copy_operations_time_ = |
| 534 next_check_for_completed_copy_operations_time; |
| 535 check_for_completed_copy_operations_pending_ = true; |
| 536 } |
| 537 |
| 538 void OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations( |
| 539 bool wait_if_needed) { |
| 540 TRACE_EVENT1("cc", |
| 541 "OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations", |
| 542 "wait_if_needed", wait_if_needed); |
| 543 |
| 544 resource_pool_->CheckBusyResources(wait_if_needed); |
| 545 |
| 546 { |
| 547 base::AutoLock lock(lock_); |
| 548 |
| 549 DCHECK(check_for_completed_copy_operations_pending_); |
| 550 check_for_completed_copy_operations_pending_ = false; |
| 551 |
| 552 // The number of busy resources in the pool reflects the number of issued |
| 553 // copy operations that have not yet completed. |
| 554 issued_copy_operation_count_ = resource_pool_->busy_resource_count(); |
| 555 |
| 556 // There may be work blocked on too many in-flight copy operations, so wake |
| 557 // up a worker thread. |
| 558 copy_operation_count_cv_.Signal(); |
| 559 } |
| 560 } |
| 561 |
| 723 scoped_refptr<base::trace_event::ConvertableToTraceFormat> | 562 scoped_refptr<base::trace_event::ConvertableToTraceFormat> |
| 724 OneCopyTileTaskWorkerPool::StateAsValue() const { | 563 OneCopyTileTaskWorkerPool::StateAsValue() const { |
| 725 scoped_refptr<base::trace_event::TracedValue> state = | 564 scoped_refptr<base::trace_event::TracedValue> state = |
| 726 new base::trace_event::TracedValue(); | 565 new base::trace_event::TracedValue(); |
| 727 | 566 |
| 728 state->BeginArray("tasks_pending"); | 567 state->BeginArray("tasks_pending"); |
| 729 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) | 568 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) |
| 730 state->AppendBoolean(tasks_pending_[task_set]); | 569 state->AppendBoolean(tasks_pending_[task_set]); |
| 731 state->EndArray(); | 570 state->EndArray(); |
| 732 state->BeginDictionary("staging_state"); | 571 state->BeginDictionary("staging_state"); |
| 733 StagingStateAsValueInto(state.get()); | 572 StagingStateAsValueInto(state.get()); |
| 734 state->EndDictionary(); | 573 state->EndDictionary(); |
| 735 | 574 |
| 736 return state; | 575 return state; |
| 737 } | 576 } |
| 738 | 577 |
| 739 void OneCopyTileTaskWorkerPool::StagingStateAsValueInto( | 578 void OneCopyTileTaskWorkerPool::StagingStateAsValueInto( |
| 740 base::trace_event::TracedValue* staging_state) const { | 579 base::trace_event::TracedValue* staging_state) const { |
| 741 base::AutoLock lock(lock_); | 580 staging_state->SetInteger( |
| 742 | 581 "staging_resource_count", |
| 743 staging_state->SetInteger("staging_buffer_count", | 582 static_cast<int>(resource_pool_->total_resource_count())); |
| 744 static_cast<int>(buffers_.size())); | 583 staging_state->SetInteger( |
| 745 staging_state->SetInteger("busy_count", | 584 "bytes_used_for_staging_resources", |
| 746 static_cast<int>(busy_buffers_.size())); | 585 static_cast<int>(resource_pool_->total_memory_usage_bytes())); |
| 747 staging_state->SetInteger("free_count", | 586 staging_state->SetInteger( |
| 748 static_cast<int>(free_buffers_.size())); | 587 "pending_copy_count", |
| 588 static_cast<int>(resource_pool_->total_resource_count() - |
| 589 resource_pool_->acquired_resource_count())); |
| 590 staging_state->SetInteger( |
| 591 "bytes_pending_copy", |
| 592 static_cast<int>(resource_pool_->total_memory_usage_bytes() - |
| 593 resource_pool_->acquired_memory_usage_bytes())); |
| 749 } | 594 } |
| 750 | 595 |
| 751 } // namespace cc | 596 } // namespace cc |
| OLD | NEW |