Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(827)

Side by Side Diff: cc/raster/one_copy_tile_task_worker_pool.cc

Issue 1230203007: Re-land: cc: Use worker context for one-copy tile initialization. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: address review feedback Created 5 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2014 The Chromium Authors. All rights reserved. 1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "cc/raster/one_copy_tile_task_worker_pool.h" 5 #include "cc/raster/one_copy_tile_task_worker_pool.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <limits> 8 #include <limits>
9 9
10 #include "base/strings/stringprintf.h" 10 #include "base/strings/stringprintf.h"
11 #include "base/thread_task_runner_handle.h"
12 #include "base/trace_event/memory_dump_manager.h"
11 #include "base/trace_event/trace_event.h" 13 #include "base/trace_event/trace_event.h"
12 #include "base/trace_event/trace_event_argument.h" 14 #include "base/trace_event/trace_event_argument.h"
13 #include "cc/base/math_util.h" 15 #include "cc/base/math_util.h"
14 #include "cc/debug/traced_value.h" 16 #include "cc/debug/traced_value.h"
15 #include "cc/raster/raster_buffer.h" 17 #include "cc/raster/raster_buffer.h"
16 #include "cc/resources/platform_color.h" 18 #include "cc/resources/platform_color.h"
17 #include "cc/resources/resource_pool.h"
18 #include "cc/resources/scoped_resource.h" 19 #include "cc/resources/scoped_resource.h"
20 #include "gpu/GLES2/gl2extchromium.h"
19 #include "gpu/command_buffer/client/gles2_interface.h" 21 #include "gpu/command_buffer/client/gles2_interface.h"
20 #include "ui/gfx/gpu_memory_buffer.h" 22 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h"
21 23
22 namespace cc { 24 namespace cc {
23 namespace { 25 namespace {
24 26
25 class RasterBufferImpl : public RasterBuffer { 27 class RasterBufferImpl : public RasterBuffer {
26 public: 28 public:
27 RasterBufferImpl(OneCopyTileTaskWorkerPool* worker_pool, 29 RasterBufferImpl(OneCopyTileTaskWorkerPool* worker_pool,
28 ResourceProvider* resource_provider, 30 ResourceProvider* resource_provider,
29 ResourcePool* resource_pool,
30 ResourceFormat resource_format, 31 ResourceFormat resource_format,
31 const Resource* output_resource, 32 const Resource* resource,
32 uint64_t previous_content_id) 33 uint64_t previous_content_id)
33 : worker_pool_(worker_pool), 34 : worker_pool_(worker_pool),
34 resource_provider_(resource_provider), 35 resource_(resource),
35 resource_pool_(resource_pool), 36 lock_(resource_provider, resource->id()),
36 output_resource_(output_resource), 37 previous_content_id_(previous_content_id) {}
37 raster_content_id_(0),
38 sequence_(0) {
39 if (worker_pool->have_persistent_gpu_memory_buffers() &&
40 previous_content_id) {
41 raster_resource_ =
42 resource_pool->TryAcquireResourceWithContentId(previous_content_id);
43 }
44 if (raster_resource_) {
45 raster_content_id_ = previous_content_id;
46 DCHECK_EQ(resource_format, raster_resource_->format());
47 DCHECK_EQ(output_resource->size().ToString(),
48 raster_resource_->size().ToString());
49 } else {
50 raster_resource_ = resource_pool->AcquireResource(output_resource->size(),
51 resource_format);
52 }
53 38
54 lock_.reset(new ResourceProvider::ScopedWriteLockGpuMemoryBuffer( 39 ~RasterBufferImpl() override {}
55 resource_provider_, raster_resource_->id()));
56 }
57
58 ~RasterBufferImpl() override {
59 // Release write lock in case a copy was never scheduled.
60 lock_.reset();
61
62 // Make sure any scheduled copy operations are issued before we release the
63 // raster resource.
64 if (sequence_)
65 worker_pool_->AdvanceLastIssuedCopyTo(sequence_);
66
67 // Return resources to pool so they can be used by another RasterBuffer
68 // instance.
69 resource_pool_->ReleaseResource(raster_resource_.Pass(),
70 raster_content_id_);
71 }
72 40
73 // Overridden from RasterBuffer: 41 // Overridden from RasterBuffer:
74 void Playback(const RasterSource* raster_source, 42 void Playback(const RasterSource* raster_source,
75 const gfx::Rect& raster_full_rect, 43 const gfx::Rect& raster_full_rect,
76 const gfx::Rect& raster_dirty_rect, 44 const gfx::Rect& raster_dirty_rect,
77 uint64_t new_content_id, 45 uint64_t new_content_id,
78 float scale) override { 46 float scale) override {
79 // If there's a raster_content_id_, we are reusing a resource with that 47 worker_pool_->PlaybackAndCopyOnWorkerThread(
80 // content id. 48 resource_, &lock_, raster_source, raster_full_rect, raster_dirty_rect,
81 bool reusing_raster_resource = raster_content_id_ != 0; 49 scale, previous_content_id_, new_content_id);
82 sequence_ = worker_pool_->PlaybackAndScheduleCopyOnWorkerThread(
83 reusing_raster_resource, lock_.Pass(), raster_resource_.get(),
84 output_resource_, raster_source, raster_full_rect, raster_dirty_rect,
85 scale);
86 // Store the content id of the resource to return to the pool.
87 raster_content_id_ = new_content_id;
88 } 50 }
89 51
90 private: 52 private:
91 OneCopyTileTaskWorkerPool* worker_pool_; 53 OneCopyTileTaskWorkerPool* worker_pool_;
92 ResourceProvider* resource_provider_; 54 const Resource* resource_;
93 ResourcePool* resource_pool_; 55 ResourceProvider::ScopedWriteLockGL lock_;
94 const Resource* output_resource_; 56 uint64_t previous_content_id_;
95 uint64_t raster_content_id_;
96 scoped_ptr<ScopedResource> raster_resource_;
97 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> lock_;
98 CopySequenceNumber sequence_;
99 57
100 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl); 58 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl);
101 }; 59 };
102 60
103 // Number of in-flight copy operations to allow. 61 // Delay between checking for query result to be available.
104 const int kMaxCopyOperations = 32; 62 const int kCheckForQueryResultAvailableTickRateMs = 1;
105 63
106 // Delay been checking for copy operations to complete. 64 // Number of attempts to allow before we perform a check that will wait for
107 const int kCheckForCompletedCopyOperationsTickRateMs = 1; 65 // query to complete.
108 66 const int kMaxCheckForQueryResultAvailableAttempts = 256;
109 // Number of failed attempts to allow before we perform a check that will
110 // wait for copy operations to complete if needed.
111 const int kFailedAttemptsBeforeWaitIfNeeded = 256;
112 67
113 // 4MiB is the size of 4 512x512 tiles, which has proven to be a good 68 // 4MiB is the size of 4 512x512 tiles, which has proven to be a good
114 // default batch size for copy operations. 69 // default batch size for copy operations.
115 const int kMaxBytesPerCopyOperation = 1024 * 1024 * 4; 70 const int kMaxBytesPerCopyOperation = 1024 * 1024 * 4;
116 71
72 // Delay before a staging buffer might be released.
73 const int kStagingBufferExpirationDelayMs = 1000;
74
75 bool CheckForQueryResult(gpu::gles2::GLES2Interface* gl, unsigned query_id) {
76 GLuint complete = 1;
77 gl->GetQueryObjectuivEXT(query_id, GL_QUERY_RESULT_AVAILABLE_EXT, &complete);
78 return complete;
79 }
80
81 void WaitForQueryResult(gpu::gles2::GLES2Interface* gl, unsigned query_id) {
82 TRACE_EVENT0("cc", "WaitForQueryResult");
83
84 int attempts_left = kMaxCheckForQueryResultAvailableAttempts;
85 while (attempts_left--) {
86 if (CheckForQueryResult(gl, query_id))
87 break;
88
89 base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(
90 kCheckForQueryResultAvailableTickRateMs));
91 }
92
93 unsigned result = 0;
94 gl->GetQueryObjectuivEXT(query_id, GL_QUERY_RESULT_EXT, &result);
95 }
96
117 } // namespace 97 } // namespace
118 98
119 OneCopyTileTaskWorkerPool::CopyOperation::CopyOperation( 99 OneCopyTileTaskWorkerPool::StagingBuffer::StagingBuffer(const gfx::Size& size)
120 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> src_write_lock, 100 : size(size), texture_id(0), image_id(0), query_id(0), content_id(0) {}
121 const Resource* src, 101
122 const Resource* dst, 102 OneCopyTileTaskWorkerPool::StagingBuffer::~StagingBuffer() {
123 const gfx::Rect& rect) 103 DCHECK_EQ(texture_id, 0u);
124 : src_write_lock(src_write_lock.Pass()), src(src), dst(dst), rect(rect) { 104 DCHECK_EQ(image_id, 0u);
105 DCHECK_EQ(query_id, 0u);
125 } 106 }
126 107
127 OneCopyTileTaskWorkerPool::CopyOperation::~CopyOperation() { 108 void OneCopyTileTaskWorkerPool::StagingBuffer::DestroyGLResources(
109 gpu::gles2::GLES2Interface* gl) {
110 if (query_id) {
111 gl->DeleteQueriesEXT(1, &query_id);
112 query_id = 0;
113 }
114 if (image_id) {
115 gl->DestroyImageCHROMIUM(image_id);
116 image_id = 0;
117 }
118 if (texture_id) {
119 gl->DeleteTextures(1, &texture_id);
120 texture_id = 0;
121 }
122 }
123
124 void OneCopyTileTaskWorkerPool::StagingBuffer::OnMemoryDump(
125 base::trace_event::ProcessMemoryDump* pmd,
126 ResourceFormat format) const {
127 if (!gpu_memory_buffer)
128 return;
129
130 gfx::GpuMemoryBufferId buffer_id = gpu_memory_buffer->GetId();
131 std::string buffer_dump_name =
132 base::StringPrintf("gpumemorybuffer/buffer_%d", buffer_id);
133 base::trace_event::MemoryAllocatorDump* buffer_dump =
134 pmd->CreateAllocatorDump(buffer_dump_name);
135
136 size_t buffer_size_in_bytes =
137 Resource::UncheckedMemorySizeBytes(size, format);
138 buffer_dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize,
139 base::trace_event::MemoryAllocatorDump::kUnitsBytes,
140 static_cast<uint64_t>(buffer_size_in_bytes));
141
142 // Emit an ownership edge towards a global allocator dump node.
143 const uint64 tracing_process_id =
144 base::trace_event::MemoryDumpManager::GetInstance()->tracing_process_id();
145 base::trace_event::MemoryAllocatorDumpGuid shared_buffer_guid =
146 gfx::GetGpuMemoryBufferGUIDForTracing(tracing_process_id, buffer_id);
147 pmd->CreateSharedGlobalAllocatorDump(shared_buffer_guid);
148
149 // By creating an edge with a higher |importance| (w.r.t. browser-side dumps)
150 // the tracing UI will account the effective size of the buffer to the child.
151 const int kImportance = 2;
152 pmd->AddOwnershipEdge(buffer_dump->guid(), shared_buffer_guid, kImportance);
128 } 153 }
129 154
130 // static 155 // static
131 scoped_ptr<TileTaskWorkerPool> OneCopyTileTaskWorkerPool::Create( 156 scoped_ptr<TileTaskWorkerPool> OneCopyTileTaskWorkerPool::Create(
132 base::SequencedTaskRunner* task_runner, 157 base::SequencedTaskRunner* task_runner,
133 TaskGraphRunner* task_graph_runner, 158 TaskGraphRunner* task_graph_runner,
134 ContextProvider* context_provider, 159 ContextProvider* context_provider,
135 ResourceProvider* resource_provider, 160 ResourceProvider* resource_provider,
136 ResourcePool* resource_pool,
137 int max_copy_texture_chromium_size, 161 int max_copy_texture_chromium_size,
138 bool have_persistent_gpu_memory_buffers) { 162 bool use_persistent_gpu_memory_buffers,
163 unsigned image_target,
164 int max_staging_buffers) {
139 return make_scoped_ptr<TileTaskWorkerPool>(new OneCopyTileTaskWorkerPool( 165 return make_scoped_ptr<TileTaskWorkerPool>(new OneCopyTileTaskWorkerPool(
140 task_runner, task_graph_runner, context_provider, resource_provider, 166 task_runner, task_graph_runner, resource_provider,
141 resource_pool, max_copy_texture_chromium_size, 167 max_copy_texture_chromium_size, use_persistent_gpu_memory_buffers,
142 have_persistent_gpu_memory_buffers)); 168 image_target, max_staging_buffers));
143 } 169 }
144 170
145 OneCopyTileTaskWorkerPool::OneCopyTileTaskWorkerPool( 171 OneCopyTileTaskWorkerPool::OneCopyTileTaskWorkerPool(
146 base::SequencedTaskRunner* task_runner, 172 base::SequencedTaskRunner* task_runner,
147 TaskGraphRunner* task_graph_runner, 173 TaskGraphRunner* task_graph_runner,
148 ContextProvider* context_provider,
149 ResourceProvider* resource_provider, 174 ResourceProvider* resource_provider,
150 ResourcePool* resource_pool,
151 int max_copy_texture_chromium_size, 175 int max_copy_texture_chromium_size,
152 bool have_persistent_gpu_memory_buffers) 176 bool use_persistent_gpu_memory_buffers,
177 unsigned image_target,
178 int max_staging_buffers)
153 : task_runner_(task_runner), 179 : task_runner_(task_runner),
154 task_graph_runner_(task_graph_runner), 180 task_graph_runner_(task_graph_runner),
155 namespace_token_(task_graph_runner->GetNamespaceToken()), 181 namespace_token_(task_graph_runner->GetNamespaceToken()),
156 context_provider_(context_provider),
157 resource_provider_(resource_provider), 182 resource_provider_(resource_provider),
158 resource_pool_(resource_pool),
159 max_bytes_per_copy_operation_( 183 max_bytes_per_copy_operation_(
160 max_copy_texture_chromium_size 184 max_copy_texture_chromium_size
161 ? std::min(kMaxBytesPerCopyOperation, 185 ? std::min(kMaxBytesPerCopyOperation,
162 max_copy_texture_chromium_size) 186 max_copy_texture_chromium_size)
163 : kMaxBytesPerCopyOperation), 187 : kMaxBytesPerCopyOperation),
164 have_persistent_gpu_memory_buffers_(have_persistent_gpu_memory_buffers), 188 use_persistent_gpu_memory_buffers_(use_persistent_gpu_memory_buffers),
165 last_issued_copy_operation_(0), 189 image_target_(image_target),
166 last_flushed_copy_operation_(0),
167 lock_(),
168 copy_operation_count_cv_(&lock_),
169 bytes_scheduled_since_last_flush_(0), 190 bytes_scheduled_since_last_flush_(0),
170 issued_copy_operation_count_(0), 191 max_staging_buffers_(max_staging_buffers),
171 next_copy_operation_sequence_(1), 192 staging_buffer_expiration_delay_(
172 check_for_completed_copy_operations_pending_(false), 193 base::TimeDelta::FromMilliseconds(kStagingBufferExpirationDelayMs)),
173 shutdown_(false), 194 reduce_memory_usage_pending_(false),
174 weak_ptr_factory_(this), 195 weak_ptr_factory_(this),
175 task_set_finished_weak_ptr_factory_(this) { 196 task_set_finished_weak_ptr_factory_(this) {
176 DCHECK(context_provider_); 197 base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
198 this, base::ThreadTaskRunnerHandle::Get());
199 reduce_memory_usage_callback_ =
200 base::Bind(&OneCopyTileTaskWorkerPool::ReduceMemoryUsage,
201 weak_ptr_factory_.GetWeakPtr());
177 } 202 }
178 203
179 OneCopyTileTaskWorkerPool::~OneCopyTileTaskWorkerPool() { 204 OneCopyTileTaskWorkerPool::~OneCopyTileTaskWorkerPool() {
180 DCHECK_EQ(pending_copy_operations_.size(), 0u); 205 base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider(
206 this);
181 } 207 }
182 208
183 TileTaskRunner* OneCopyTileTaskWorkerPool::AsTileTaskRunner() { 209 TileTaskRunner* OneCopyTileTaskWorkerPool::AsTileTaskRunner() {
184 return this; 210 return this;
185 } 211 }
186 212
187 void OneCopyTileTaskWorkerPool::SetClient(TileTaskRunnerClient* client) { 213 void OneCopyTileTaskWorkerPool::SetClient(TileTaskRunnerClient* client) {
188 client_ = client; 214 client_ = client;
189 } 215 }
190 216
191 void OneCopyTileTaskWorkerPool::Shutdown() { 217 void OneCopyTileTaskWorkerPool::Shutdown() {
192 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::Shutdown"); 218 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::Shutdown");
193 219
194 {
195 base::AutoLock lock(lock_);
196
197 shutdown_ = true;
198 copy_operation_count_cv_.Signal();
199 }
200
201 TaskGraph empty; 220 TaskGraph empty;
202 task_graph_runner_->ScheduleTasks(namespace_token_, &empty); 221 task_graph_runner_->ScheduleTasks(namespace_token_, &empty);
203 task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_); 222 task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_);
223
224 base::AutoLock lock(lock_);
225
226 if (buffers_.empty())
227 return;
228
229 ReleaseBuffersNotUsedSince(base::TimeTicks() + base::TimeDelta::Max());
204 } 230 }
205 231
206 void OneCopyTileTaskWorkerPool::ScheduleTasks(TileTaskQueue* queue) { 232 void OneCopyTileTaskWorkerPool::ScheduleTasks(TileTaskQueue* queue) {
207 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::ScheduleTasks"); 233 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::ScheduleTasks");
208 234
209 #if DCHECK_IS_ON()
210 {
211 base::AutoLock lock(lock_);
212 DCHECK(!shutdown_);
213 }
214 #endif
215
216 if (tasks_pending_.none()) 235 if (tasks_pending_.none())
217 TRACE_EVENT_ASYNC_BEGIN0("cc", "ScheduledTasks", this); 236 TRACE_EVENT_ASYNC_BEGIN0("cc", "ScheduledTasks", this);
218 237
219 // Mark all task sets as pending. 238 // Mark all task sets as pending.
220 tasks_pending_.set(); 239 tasks_pending_.set();
221 240
222 size_t priority = kTileTaskPriorityBase; 241 size_t priority = kTileTaskPriorityBase;
223 242
224 graph_.Reset(); 243 graph_.Reset();
225 244
226 // Cancel existing OnTaskSetFinished callbacks. 245 // Cancel existing OnTaskSetFinished callbacks.
227 task_set_finished_weak_ptr_factory_.InvalidateWeakPtrs(); 246 task_set_finished_weak_ptr_factory_.InvalidateWeakPtrs();
228 247
229 scoped_refptr<TileTask> new_task_set_finished_tasks[kNumberOfTaskSets]; 248 scoped_refptr<TileTask> new_task_set_finished_tasks[kNumberOfTaskSets];
230 249
231 size_t task_count[kNumberOfTaskSets] = {0}; 250 size_t task_count[kNumberOfTaskSets] = {0};
232 251
233 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { 252 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
234 new_task_set_finished_tasks[task_set] = CreateTaskSetFinishedTask( 253 new_task_set_finished_tasks[task_set] = CreateTaskSetFinishedTask(
235 task_runner_.get(), 254 task_runner_.get(),
236 base::Bind(&OneCopyTileTaskWorkerPool::OnTaskSetFinished, 255 base::Bind(&OneCopyTileTaskWorkerPool::OnTaskSetFinished,
237 task_set_finished_weak_ptr_factory_.GetWeakPtr(), task_set)); 256 task_set_finished_weak_ptr_factory_.GetWeakPtr(), task_set));
238 } 257 }
239 258
240 resource_pool_->CheckBusyResources(false);
241
242 for (TileTaskQueue::Item::Vector::const_iterator it = queue->items.begin(); 259 for (TileTaskQueue::Item::Vector::const_iterator it = queue->items.begin();
243 it != queue->items.end(); ++it) { 260 it != queue->items.end(); ++it) {
244 const TileTaskQueue::Item& item = *it; 261 const TileTaskQueue::Item& item = *it;
245 RasterTask* task = item.task; 262 RasterTask* task = item.task;
246 DCHECK(!task->HasCompleted()); 263 DCHECK(!task->HasCompleted());
247 264
248 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { 265 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
249 if (!item.task_sets[task_set]) 266 if (!item.task_sets[task_set])
250 continue; 267 continue;
251 268
252 ++task_count[task_set]; 269 ++task_count[task_set];
253 270
254 graph_.edges.push_back( 271 graph_.edges.push_back(
255 TaskGraph::Edge(task, new_task_set_finished_tasks[task_set].get())); 272 TaskGraph::Edge(task, new_task_set_finished_tasks[task_set].get()));
256 } 273 }
257 274
258 InsertNodesForRasterTask(&graph_, task, task->dependencies(), priority++); 275 InsertNodesForRasterTask(&graph_, task, task->dependencies(), priority++);
259 } 276 }
260 277
261 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { 278 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
262 InsertNodeForTask(&graph_, new_task_set_finished_tasks[task_set].get(), 279 InsertNodeForTask(&graph_, new_task_set_finished_tasks[task_set].get(),
263 kTaskSetFinishedTaskPriorityBase + task_set, 280 kTaskSetFinishedTaskPriorityBase + task_set,
264 task_count[task_set]); 281 task_count[task_set]);
265 } 282 }
266 283
267 ScheduleTasksOnOriginThread(this, &graph_); 284 ScheduleTasksOnOriginThread(this, &graph_);
285
286 // Barrier to sync any new resources to the worker context.
287 resource_provider_->output_surface()
288 ->context_provider()
289 ->ContextGL()
290 ->OrderingBarrierCHROMIUM();
291
268 task_graph_runner_->ScheduleTasks(namespace_token_, &graph_); 292 task_graph_runner_->ScheduleTasks(namespace_token_, &graph_);
269 293
270 std::copy(new_task_set_finished_tasks, 294 std::copy(new_task_set_finished_tasks,
271 new_task_set_finished_tasks + kNumberOfTaskSets, 295 new_task_set_finished_tasks + kNumberOfTaskSets,
272 task_set_finished_tasks_); 296 task_set_finished_tasks_);
273 297
274 resource_pool_->ReduceResourceUsage();
275
276 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", "state", 298 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", "state",
277 StateAsValue()); 299 StateAsValue());
278 } 300 }
279 301
280 void OneCopyTileTaskWorkerPool::CheckForCompletedTasks() { 302 void OneCopyTileTaskWorkerPool::CheckForCompletedTasks() {
281 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::CheckForCompletedTasks"); 303 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::CheckForCompletedTasks");
282 304
283 task_graph_runner_->CollectCompletedTasks(namespace_token_, 305 task_graph_runner_->CollectCompletedTasks(namespace_token_,
284 &completed_tasks_); 306 &completed_tasks_);
285 307
286 for (Task::Vector::const_iterator it = completed_tasks_.begin(); 308 for (Task::Vector::const_iterator it = completed_tasks_.begin();
287 it != completed_tasks_.end(); ++it) { 309 it != completed_tasks_.end(); ++it) {
288 TileTask* task = static_cast<TileTask*>(it->get()); 310 TileTask* task = static_cast<TileTask*>(it->get());
289 311
290 task->WillComplete(); 312 task->WillComplete();
291 task->CompleteOnOriginThread(this); 313 task->CompleteOnOriginThread(this);
292 task->DidComplete(); 314 task->DidComplete();
293 315
294 task->RunReplyOnOriginThread(); 316 task->RunReplyOnOriginThread();
295 } 317 }
296 completed_tasks_.clear(); 318 completed_tasks_.clear();
297 } 319 }
298 320
299 ResourceFormat OneCopyTileTaskWorkerPool::GetResourceFormat() const { 321 ResourceFormat OneCopyTileTaskWorkerPool::GetResourceFormat() const {
300 return resource_provider_->best_texture_format(); 322 return resource_provider_->memory_efficient_texture_format();
301 } 323 }
302 324
303 bool OneCopyTileTaskWorkerPool::GetResourceRequiresSwizzle() const { 325 bool OneCopyTileTaskWorkerPool::GetResourceRequiresSwizzle() const {
304 return !PlatformColor::SameComponentOrder(GetResourceFormat()); 326 return !PlatformColor::SameComponentOrder(GetResourceFormat());
305 } 327 }
306 328
307 scoped_ptr<RasterBuffer> OneCopyTileTaskWorkerPool::AcquireBufferForRaster( 329 scoped_ptr<RasterBuffer> OneCopyTileTaskWorkerPool::AcquireBufferForRaster(
308 const Resource* resource, 330 const Resource* resource,
309 uint64_t resource_content_id, 331 uint64_t resource_content_id,
310 uint64_t previous_content_id) { 332 uint64_t previous_content_id) {
311 // TODO(danakj): If resource_content_id != 0, we only need to copy/upload 333 // TODO(danakj): If resource_content_id != 0, we only need to copy/upload
312 // the dirty rect. 334 // the dirty rect.
313 DCHECK_EQ(resource->format(), resource_provider_->best_texture_format()); 335 DCHECK_EQ(resource->format(),
314 return make_scoped_ptr<RasterBuffer>( 336 resource_provider_->memory_efficient_texture_format());
315 new RasterBufferImpl(this, resource_provider_, resource_pool_, 337 return make_scoped_ptr<RasterBuffer>(new RasterBufferImpl(
316 resource_provider_->best_texture_format(), resource, 338 this, resource_provider_,
317 previous_content_id)); 339 resource_provider_->memory_efficient_texture_format(), resource,
340 previous_content_id));
318 } 341 }
319 342
320 void OneCopyTileTaskWorkerPool::ReleaseBufferForRaster( 343 void OneCopyTileTaskWorkerPool::ReleaseBufferForRaster(
321 scoped_ptr<RasterBuffer> buffer) { 344 scoped_ptr<RasterBuffer> buffer) {
322 // Nothing to do here. RasterBufferImpl destructor cleans up after itself. 345 // Nothing to do here. RasterBufferImpl destructor cleans up after itself.
323 } 346 }
324 347
325 CopySequenceNumber 348 void OneCopyTileTaskWorkerPool::PlaybackAndCopyOnWorkerThread(
326 OneCopyTileTaskWorkerPool::PlaybackAndScheduleCopyOnWorkerThread( 349 const Resource* resource,
327 bool reusing_raster_resource, 350 const ResourceProvider::ScopedWriteLockGL* resource_lock,
328 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer>
329 raster_resource_write_lock,
330 const Resource* raster_resource,
331 const Resource* output_resource,
332 const RasterSource* raster_source, 351 const RasterSource* raster_source,
333 const gfx::Rect& raster_full_rect, 352 const gfx::Rect& raster_full_rect,
334 const gfx::Rect& raster_dirty_rect, 353 const gfx::Rect& raster_dirty_rect,
335 float scale) { 354 float scale,
336 gfx::GpuMemoryBuffer* gpu_memory_buffer = 355 uint64_t previous_content_id,
337 raster_resource_write_lock->GetGpuMemoryBuffer(); 356 uint64_t new_content_id) {
338 if (gpu_memory_buffer) { 357 base::AutoLock lock(lock_);
339 void* data = NULL; 358
340 bool rv = gpu_memory_buffer->Map(&data); 359 scoped_ptr<StagingBuffer> staging_buffer =
341 DCHECK(rv); 360 AcquireStagingBuffer(resource, previous_content_id);
342 int stride; 361 DCHECK(staging_buffer);
343 gpu_memory_buffer->GetStride(&stride); 362
344 // TileTaskWorkerPool::PlaybackToMemory only supports unsigned strides. 363 {
345 DCHECK_GE(stride, 0); 364 base::AutoUnlock unlock(lock_);
365
366 // Allocate GpuMemoryBuffer if necessary.
367 if (!staging_buffer->gpu_memory_buffer) {
368 staging_buffer->gpu_memory_buffer =
369 resource_provider_->gpu_memory_buffer_manager()
370 ->AllocateGpuMemoryBuffer(
371 staging_buffer->size,
372 ToGpuMemoryBufferFormat(
373 resource_provider_->memory_efficient_texture_format()),
374 use_persistent_gpu_memory_buffers_
375 ? gfx::GpuMemoryBuffer::PERSISTENT_MAP
376 : gfx::GpuMemoryBuffer::MAP);
377 }
346 378
347 gfx::Rect playback_rect = raster_full_rect; 379 gfx::Rect playback_rect = raster_full_rect;
348 if (reusing_raster_resource) { 380 if (use_persistent_gpu_memory_buffers_ && previous_content_id) {
349 playback_rect.Intersect(raster_dirty_rect); 381 // Reduce playback rect to dirty region if the content id of the staging
350 } 382 // buffer matches the prevous content id.
351 DCHECK(!playback_rect.IsEmpty()) 383 if (previous_content_id == staging_buffer->content_id)
352 << "Why are we rastering a tile that's not dirty?"; 384 playback_rect.Intersect(raster_dirty_rect);
353 TileTaskWorkerPool::PlaybackToMemory( 385 }
354 data, raster_resource->format(), raster_resource->size(), 386
355 static_cast<size_t>(stride), raster_source, raster_full_rect, 387 if (staging_buffer->gpu_memory_buffer) {
356 playback_rect, scale); 388 void* data = nullptr;
357 gpu_memory_buffer->Unmap(); 389 bool rv = staging_buffer->gpu_memory_buffer->Map(&data);
358 } 390 DCHECK(rv);
359 391 int stride;
392 staging_buffer->gpu_memory_buffer->GetStride(&stride);
393 // TileTaskWorkerPool::PlaybackToMemory only supports unsigned strides.
394 DCHECK_GE(stride, 0);
395
396 DCHECK(!playback_rect.IsEmpty())
397 << "Why are we rastering a tile that's not dirty?";
398 TileTaskWorkerPool::PlaybackToMemory(
399 data, resource_provider_->memory_efficient_texture_format(),
400 staging_buffer->size, static_cast<size_t>(stride), raster_source,
401 raster_full_rect, playback_rect, scale);
402 staging_buffer->gpu_memory_buffer->Unmap();
403 staging_buffer->content_id = new_content_id;
404 }
405 }
406
407 ContextProvider* context_provider =
408 resource_provider_->output_surface()->worker_context_provider();
409 DCHECK(context_provider);
410
411 {
412 ContextProvider::ScopedContextGL scoped_context(context_provider);
413
414 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL();
415 DCHECK(gl);
416
417 if (!staging_buffer->texture_id) {
418 gl->GenTextures(1, &staging_buffer->texture_id);
419 gl->BindTexture(image_target_, staging_buffer->texture_id);
420 gl->TexParameteri(image_target_, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
421 gl->TexParameteri(image_target_, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
422 gl->TexParameteri(image_target_, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
423 gl->TexParameteri(image_target_, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
424 if (staging_buffer->gpu_memory_buffer) {
425 DCHECK(!staging_buffer->image_id);
426 staging_buffer->image_id = gl->CreateImageCHROMIUM(
427 staging_buffer->gpu_memory_buffer->AsClientBuffer(),
428 staging_buffer->size.width(), staging_buffer->size.height(),
429 GLInternalFormat(
430 resource_provider_->memory_efficient_texture_format()));
431 gl->BindTexImage2DCHROMIUM(image_target_, staging_buffer->image_id);
432 } else {
433 gl->BindTexture(image_target_, staging_buffer->texture_id);
434 gl->ReleaseTexImage2DCHROMIUM(image_target_, staging_buffer->image_id);
435 gl->BindTexImage2DCHROMIUM(image_target_, staging_buffer->image_id);
436 }
437 }
438
439 if (resource_provider_->use_sync_query()) {
440 if (!staging_buffer->query_id)
441 gl->GenQueriesEXT(1, &staging_buffer->query_id);
442
443 #if defined(OS_CHROMEOS)
444 // TODO(reveman): This avoids a performance problem on some ChromeOS
445 // devices. This needs to be removed to support native GpuMemoryBuffer
446 // implementations. crbug.com/436314
447 gl->BeginQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM, staging_buffer->query_id);
448 #else
449 gl->BeginQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM,
450 staging_buffer->query_id);
451 #endif
452 }
453
454 int bytes_per_row =
455 (BitsPerPixel(resource_provider_->memory_efficient_texture_format()) *
456 resource->size().width()) /
457 8;
458 int chunk_size_in_rows =
459 std::max(1, max_bytes_per_copy_operation_ / bytes_per_row);
460 // Align chunk size to 4. Required to support compressed texture formats.
461 chunk_size_in_rows = MathUtil::RoundUp(chunk_size_in_rows, 4);
462 int y = 0;
463 int height = resource->size().height();
464 while (y < height) {
465 // Copy at most |chunk_size_in_rows|.
466 int rows_to_copy = std::min(chunk_size_in_rows, height - y);
467 DCHECK_GT(rows_to_copy, 0);
468
469 gl->CopySubTextureCHROMIUM(GL_TEXTURE_2D, staging_buffer->texture_id,
470 resource_lock->texture_id(), 0, y, 0, y,
471 resource->size().width(), rows_to_copy, false,
472 false, false);
473 y += rows_to_copy;
474
475 // Increment |bytes_scheduled_since_last_flush_| by the amount of memory
476 // used for this copy operation.
477 bytes_scheduled_since_last_flush_ += rows_to_copy * bytes_per_row;
478
479 if (bytes_scheduled_since_last_flush_ >= max_bytes_per_copy_operation_) {
480 gl->ShallowFlushCHROMIUM();
481 bytes_scheduled_since_last_flush_ = 0;
482 }
483 }
484
485 if (resource_provider_->use_sync_query()) {
486 #if defined(OS_CHROMEOS)
487 gl->EndQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM);
488 #else
489 gl->EndQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM);
490 #endif
491 }
492
493 // Barrier to sync worker context output to cc context.
494 gl->OrderingBarrierCHROMIUM();
495 }
496
497 staging_buffer->last_usage = base::TimeTicks::Now();
498 busy_buffers_.push_back(staging_buffer.Pass());
499
500 ScheduleReduceMemoryUsage();
501 }
502
503 bool OneCopyTileTaskWorkerPool::OnMemoryDump(
504 base::trace_event::ProcessMemoryDump* pmd) {
360 base::AutoLock lock(lock_); 505 base::AutoLock lock(lock_);
361 506
362 CopySequenceNumber sequence = 0; 507 ResourceFormat format = resource_provider_->memory_efficient_texture_format();
363 int bytes_per_row = (BitsPerPixel(raster_resource->format()) * 508 std::for_each(buffers_.begin(), buffers_.end(),
364 raster_resource->size().width()) / 509 [pmd, format](const StagingBuffer* buffer) {
365 8; 510 buffer->OnMemoryDump(pmd, format);
366 int chunk_size_in_rows = 511 });
367 std::max(1, max_bytes_per_copy_operation_ / bytes_per_row); 512
368 // Align chunk size to 4. Required to support compressed texture formats. 513 return true;
369 chunk_size_in_rows = MathUtil::RoundUp(chunk_size_in_rows, 4); 514 }
370 int y = 0; 515
371 int height = raster_resource->size().height(); 516 scoped_ptr<OneCopyTileTaskWorkerPool::StagingBuffer>
372 while (y < height) { 517 OneCopyTileTaskWorkerPool::AcquireStagingBuffer(const Resource* resource,
373 int failed_attempts = 0; 518 uint64_t previous_content_id) {
374 while ((pending_copy_operations_.size() + issued_copy_operation_count_) >= 519 lock_.AssertAcquired();
375 kMaxCopyOperations) { 520
376 // Ignore limit when shutdown is set. 521 scoped_ptr<StagingBuffer> staging_buffer;
377 if (shutdown_) 522
523 ContextProvider* context_provider =
524 resource_provider_->output_surface()->worker_context_provider();
525 DCHECK(context_provider);
526
527 {
vmpstr 2015/07/23 17:51:07 nit: You don't really need to scope it, since the
reveman 2015/07/23 18:40:40 Done.
528 ContextProvider::ScopedContextGL scoped_context(context_provider);
529
530 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL();
531 DCHECK(gl);
532
533 // Check if any busy buffers have become available.
534 if (resource_provider_->use_sync_query()) {
535 while (!busy_buffers_.empty()) {
536 if (!CheckForQueryResult(gl, busy_buffers_.front()->query_id))
537 break;
538
539 free_buffers_.push_back(busy_buffers_.take_front());
540 }
541 }
542
543 // Wait for number of non-free buffers to become less than the limit.
544 while ((buffers_.size() - free_buffers_.size()) >= max_staging_buffers_) {
545 // Stop when there are no more busy buffers to wait for.
546 if (busy_buffers_.empty())
378 break; 547 break;
379 548
380 ++failed_attempts; 549 if (resource_provider_->use_sync_query()) {
381 550 WaitForQueryResult(gl, busy_buffers_.front()->query_id);
382 // Schedule a check that will also wait for operations to complete 551 free_buffers_.push_back(busy_buffers_.take_front());
383 // after too many failed attempts. 552 } else {
384 bool wait_if_needed = failed_attempts > kFailedAttemptsBeforeWaitIfNeeded; 553 // Fall-back to glFinish if CHROMIUM_sync_query is not available.
385 554 gl->Finish();
386 // Schedule a check for completed copy operations if too many operations 555 while (!busy_buffers_.empty())
387 // are currently in-flight. 556 free_buffers_.push_back(busy_buffers_.take_front());
388 ScheduleCheckForCompletedCopyOperationsWithLockAcquired(wait_if_needed);
389
390 {
391 TRACE_EVENT0("cc", "WaitingForCopyOperationsToComplete");
392
393 // Wait for in-flight copy operations to drop below limit.
394 copy_operation_count_cv_.Wait();
395 } 557 }
396 } 558 }
397 559
398 // There may be more work available, so wake up another worker thread. 560 // Find a staging buffer that allows us to perform partial raster when
399 copy_operation_count_cv_.Signal(); 561 // using persistent GpuMemoryBuffers.
400 562 if (use_persistent_gpu_memory_buffers_ && previous_content_id) {
401 // Copy at most |chunk_size_in_rows|. 563 StagingBufferDeque::iterator it =
402 int rows_to_copy = std::min(chunk_size_in_rows, height - y); 564 std::find_if(free_buffers_.begin(), free_buffers_.end(),
403 DCHECK_GT(rows_to_copy, 0); 565 [previous_content_id](const StagingBuffer* buffer) {
404 566 return buffer->content_id == previous_content_id;
405 // |raster_resource_write_lock| is passed to the first copy operation as it 567 });
406 // needs to be released before we can issue a copy. 568 if (it != free_buffers_.end())
407 pending_copy_operations_.push_back(make_scoped_ptr(new CopyOperation( 569 staging_buffer = free_buffers_.take(it);
408 raster_resource_write_lock.Pass(), raster_resource, output_resource, 570 }
409 gfx::Rect(0, y, raster_resource->size().width(), rows_to_copy)))); 571
410 y += rows_to_copy; 572 // Find staging buffer of correct size.
411 573 if (!staging_buffer) {
412 // Acquire a sequence number for this copy operation. 574 StagingBufferDeque::iterator it =
413 sequence = next_copy_operation_sequence_++; 575 std::find_if(free_buffers_.begin(), free_buffers_.end(),
414 576 [resource](const StagingBuffer* buffer) {
415 // Increment |bytes_scheduled_since_last_flush_| by the amount of memory 577 return buffer->size == resource->size();
416 // used for this copy operation. 578 });
417 bytes_scheduled_since_last_flush_ += rows_to_copy * bytes_per_row; 579 if (it != free_buffers_.end())
418 580 staging_buffer = free_buffers_.take(it);
419 // Post task that will advance last flushed copy operation to |sequence| 581 }
420 // when |bytes_scheduled_since_last_flush_| has reached 582
421 // |max_bytes_per_copy_operation_|. 583 // Create new staging buffer if necessary.
422 if (bytes_scheduled_since_last_flush_ >= max_bytes_per_copy_operation_) { 584 if (!staging_buffer) {
423 task_runner_->PostTask( 585 staging_buffer = make_scoped_ptr(new StagingBuffer(resource->size()));
424 FROM_HERE, 586 buffers_.insert(staging_buffer.get());
425 base::Bind(&OneCopyTileTaskWorkerPool::AdvanceLastFlushedCopyTo, 587 }
426 weak_ptr_factory_.GetWeakPtr(), sequence)); 588
427 bytes_scheduled_since_last_flush_ = 0; 589 // Release enough free buffers to stay within the limit.
428 } 590 while (buffers_.size() > max_staging_buffers_) {
429 } 591 if (free_buffers_.empty())
430 592 break;
431 return sequence; 593
432 } 594 free_buffers_.front()->DestroyGLResources(gl);
433 595 buffers_.erase(free_buffers_.front());
434 void OneCopyTileTaskWorkerPool::AdvanceLastIssuedCopyTo( 596 free_buffers_.take_front();
435 CopySequenceNumber sequence) { 597 }
436 if (last_issued_copy_operation_ >= sequence) 598 }
599
600 return staging_buffer.Pass();
601 }
602
603 base::TimeTicks OneCopyTileTaskWorkerPool::GetUsageTimeForLRUBuffer() {
604 lock_.AssertAcquired();
605
606 if (!free_buffers_.empty())
607 return free_buffers_.front()->last_usage;
608
609 if (!busy_buffers_.empty())
610 return busy_buffers_.front()->last_usage;
611
612 return base::TimeTicks();
613 }
614
615 void OneCopyTileTaskWorkerPool::ScheduleReduceMemoryUsage() {
616 lock_.AssertAcquired();
617
618 if (reduce_memory_usage_pending_)
437 return; 619 return;
438 620
439 IssueCopyOperations(sequence - last_issued_copy_operation_); 621 reduce_memory_usage_pending_ = true;
440 last_issued_copy_operation_ = sequence; 622
441 } 623 // Schedule a call to ReduceMemoryUsage at the time when the LRU buffer
442 624 // should be released.
443 void OneCopyTileTaskWorkerPool::AdvanceLastFlushedCopyTo( 625 base::TimeTicks reduce_memory_usage_time =
444 CopySequenceNumber sequence) { 626 GetUsageTimeForLRUBuffer() + staging_buffer_expiration_delay_;
445 if (last_flushed_copy_operation_ >= sequence) 627 task_runner_->PostDelayedTask(
628 FROM_HERE, reduce_memory_usage_callback_,
629 reduce_memory_usage_time - base::TimeTicks::Now());
630 }
631
632 void OneCopyTileTaskWorkerPool::ReduceMemoryUsage() {
633 base::AutoLock lock(lock_);
634
635 reduce_memory_usage_pending_ = false;
636
637 if (free_buffers_.empty() && busy_buffers_.empty())
446 return; 638 return;
447 639
448 AdvanceLastIssuedCopyTo(sequence); 640 base::TimeTicks current_time = base::TimeTicks::Now();
449 641 ReleaseBuffersNotUsedSince(current_time - staging_buffer_expiration_delay_);
450 // Flush all issued copy operations. 642
451 context_provider_->ContextGL()->ShallowFlushCHROMIUM(); 643 if (free_buffers_.empty() && busy_buffers_.empty())
452 last_flushed_copy_operation_ = last_issued_copy_operation_; 644 return;
645
646 reduce_memory_usage_pending_ = true;
647
648 // Schedule another call to ReduceMemoryUsage at the time when the next
649 // buffer should be released.
650 base::TimeTicks reduce_memory_usage_time =
651 GetUsageTimeForLRUBuffer() + staging_buffer_expiration_delay_;
652 task_runner_->PostDelayedTask(FROM_HERE, reduce_memory_usage_callback_,
653 reduce_memory_usage_time - current_time);
654 }
655
656 void OneCopyTileTaskWorkerPool::ReleaseBuffersNotUsedSince(
657 base::TimeTicks time) {
658 lock_.AssertAcquired();
659
660 ContextProvider* context_provider =
661 resource_provider_->output_surface()->worker_context_provider();
662 DCHECK(context_provider);
663
664 {
665 ContextProvider::ScopedContextGL scoped_context(context_provider);
666
667 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL();
668 DCHECK(gl);
669
670 // Note: Front buffer is guaranteed to be LRU so we can stop releasing
671 // buffers as soon as we find a buffer that has been used since |time|.
672 while (!free_buffers_.empty()) {
673 if (free_buffers_.front()->last_usage > time)
674 return;
675
676 free_buffers_.front()->DestroyGLResources(gl);
677 buffers_.erase(free_buffers_.front());
678 free_buffers_.take_front();
679 }
680
681 while (!busy_buffers_.empty()) {
682 if (busy_buffers_.front()->last_usage > time)
683 return;
684
685 busy_buffers_.front()->DestroyGLResources(gl);
686 buffers_.erase(busy_buffers_.front());
687 busy_buffers_.take_front();
688 }
689 }
453 } 690 }
454 691
455 void OneCopyTileTaskWorkerPool::OnTaskSetFinished(TaskSet task_set) { 692 void OneCopyTileTaskWorkerPool::OnTaskSetFinished(TaskSet task_set) {
456 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::OnTaskSetFinished", "task_set", 693 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::OnTaskSetFinished", "task_set",
457 task_set); 694 task_set);
458 695
459 DCHECK(tasks_pending_[task_set]); 696 DCHECK(tasks_pending_[task_set]);
460 tasks_pending_[task_set] = false; 697 tasks_pending_[task_set] = false;
461 if (tasks_pending_.any()) { 698 if (tasks_pending_.any()) {
462 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", 699 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running",
463 "state", StateAsValue()); 700 "state", StateAsValue());
464 } else { 701 } else {
465 TRACE_EVENT_ASYNC_END0("cc", "ScheduledTasks", this); 702 TRACE_EVENT_ASYNC_END0("cc", "ScheduledTasks", this);
466 } 703 }
467 client_->DidFinishRunningTileTasks(task_set); 704 client_->DidFinishRunningTileTasks(task_set);
468 } 705 }
469 706
470 void OneCopyTileTaskWorkerPool::IssueCopyOperations(int64 count) {
471 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::IssueCopyOperations", "count",
472 count);
473
474 CopyOperation::Deque copy_operations;
475
476 {
477 base::AutoLock lock(lock_);
478
479 for (int64 i = 0; i < count; ++i) {
480 DCHECK(!pending_copy_operations_.empty());
481 copy_operations.push_back(pending_copy_operations_.take_front());
482 }
483
484 // Increment |issued_copy_operation_count_| to reflect the transition of
485 // copy operations from "pending" to "issued" state.
486 issued_copy_operation_count_ += copy_operations.size();
487 }
488
489 while (!copy_operations.empty()) {
490 scoped_ptr<CopyOperation> copy_operation = copy_operations.take_front();
491
492 // Remove the write lock.
493 copy_operation->src_write_lock.reset();
494
495 // Copy contents of source resource to destination resource.
496 resource_provider_->CopyResource(copy_operation->src->id(),
497 copy_operation->dst->id(),
498 copy_operation->rect);
499 }
500 }
501
502 void OneCopyTileTaskWorkerPool::
503 ScheduleCheckForCompletedCopyOperationsWithLockAcquired(
504 bool wait_if_needed) {
505 lock_.AssertAcquired();
506
507 if (check_for_completed_copy_operations_pending_)
508 return;
509
510 base::TimeTicks now = base::TimeTicks::Now();
511
512 // Schedule a check for completed copy operations as soon as possible but
513 // don't allow two consecutive checks to be scheduled to run less than the
514 // tick rate apart.
515 base::TimeTicks next_check_for_completed_copy_operations_time =
516 std::max(last_check_for_completed_copy_operations_time_ +
517 base::TimeDelta::FromMilliseconds(
518 kCheckForCompletedCopyOperationsTickRateMs),
519 now);
520
521 task_runner_->PostDelayedTask(
522 FROM_HERE,
523 base::Bind(&OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations,
524 weak_ptr_factory_.GetWeakPtr(), wait_if_needed),
525 next_check_for_completed_copy_operations_time - now);
526
527 last_check_for_completed_copy_operations_time_ =
528 next_check_for_completed_copy_operations_time;
529 check_for_completed_copy_operations_pending_ = true;
530 }
531
532 void OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations(
533 bool wait_if_needed) {
534 TRACE_EVENT1("cc",
535 "OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations",
536 "wait_if_needed", wait_if_needed);
537
538 resource_pool_->CheckBusyResources(wait_if_needed);
539
540 {
541 base::AutoLock lock(lock_);
542
543 DCHECK(check_for_completed_copy_operations_pending_);
544 check_for_completed_copy_operations_pending_ = false;
545
546 // The number of busy resources in the pool reflects the number of issued
547 // copy operations that have not yet completed.
548 issued_copy_operation_count_ = resource_pool_->busy_resource_count();
549
550 // There may be work blocked on too many in-flight copy operations, so wake
551 // up a worker thread.
552 copy_operation_count_cv_.Signal();
553 }
554 }
555
556 scoped_refptr<base::trace_event::ConvertableToTraceFormat> 707 scoped_refptr<base::trace_event::ConvertableToTraceFormat>
557 OneCopyTileTaskWorkerPool::StateAsValue() const { 708 OneCopyTileTaskWorkerPool::StateAsValue() const {
558 scoped_refptr<base::trace_event::TracedValue> state = 709 scoped_refptr<base::trace_event::TracedValue> state =
559 new base::trace_event::TracedValue(); 710 new base::trace_event::TracedValue();
560 711
561 state->BeginArray("tasks_pending"); 712 state->BeginArray("tasks_pending");
562 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) 713 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set)
563 state->AppendBoolean(tasks_pending_[task_set]); 714 state->AppendBoolean(tasks_pending_[task_set]);
564 state->EndArray(); 715 state->EndArray();
565 state->BeginDictionary("staging_state"); 716 state->BeginDictionary("staging_state");
566 StagingStateAsValueInto(state.get()); 717 StagingStateAsValueInto(state.get());
567 state->EndDictionary(); 718 state->EndDictionary();
568 719
569 return state; 720 return state;
570 } 721 }
571 722
572 void OneCopyTileTaskWorkerPool::StagingStateAsValueInto( 723 void OneCopyTileTaskWorkerPool::StagingStateAsValueInto(
573 base::trace_event::TracedValue* staging_state) const { 724 base::trace_event::TracedValue* staging_state) const {
574 staging_state->SetInteger( 725 base::AutoLock lock(lock_);
575 "staging_resource_count", 726
576 static_cast<int>(resource_pool_->total_resource_count())); 727 staging_state->SetInteger("staging_buffer_count",
577 staging_state->SetInteger( 728 static_cast<int>(buffers_.size()));
578 "bytes_used_for_staging_resources", 729 staging_state->SetInteger("busy_count",
579 static_cast<int>(resource_pool_->total_memory_usage_bytes())); 730 static_cast<int>(busy_buffers_.size()));
580 staging_state->SetInteger( 731 staging_state->SetInteger("free_count",
581 "pending_copy_count", 732 static_cast<int>(free_buffers_.size()));
582 static_cast<int>(resource_pool_->total_resource_count() -
583 resource_pool_->acquired_resource_count()));
584 staging_state->SetInteger(
585 "bytes_pending_copy",
586 static_cast<int>(resource_pool_->total_memory_usage_bytes() -
587 resource_pool_->acquired_memory_usage_bytes()));
588 } 733 }
589 734
590 } // namespace cc 735 } // namespace cc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698