Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(35)

Side by Side Diff: cc/resources/one_copy_tile_task_worker_pool.cc

Issue 1144693002: cc: Move files out of cc/resources/. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: resources: android Created 5 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « cc/resources/one_copy_tile_task_worker_pool.h ('k') | cc/resources/picture.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "cc/resources/one_copy_tile_task_worker_pool.h"
6
7 #include <algorithm>
8 #include <limits>
9
10 #include "base/strings/stringprintf.h"
11 #include "base/trace_event/trace_event.h"
12 #include "base/trace_event/trace_event_argument.h"
13 #include "cc/debug/traced_value.h"
14 #include "cc/resources/raster_buffer.h"
15 #include "cc/resources/resource_pool.h"
16 #include "cc/resources/scoped_resource.h"
17 #include "gpu/command_buffer/client/gles2_interface.h"
18 #include "ui/gfx/gpu_memory_buffer.h"
19
20 namespace cc {
21 namespace {
22
23 class RasterBufferImpl : public RasterBuffer {
24 public:
25 RasterBufferImpl(OneCopyTileTaskWorkerPool* worker_pool,
26 ResourceProvider* resource_provider,
27 ResourcePool* resource_pool,
28 ResourceFormat resource_format,
29 const Resource* resource)
30 : worker_pool_(worker_pool),
31 resource_provider_(resource_provider),
32 resource_pool_(resource_pool),
33 resource_(resource),
34 raster_resource_(
35 resource_pool->AcquireResource(resource->size(), resource_format)),
36 lock_(new ResourceProvider::ScopedWriteLockGpuMemoryBuffer(
37 resource_provider_,
38 raster_resource_->id())),
39 sequence_(0) {}
40
41 ~RasterBufferImpl() override {
42 // Release write lock in case a copy was never scheduled.
43 lock_.reset();
44
45 // Make sure any scheduled copy operations are issued before we release the
46 // raster resource.
47 if (sequence_)
48 worker_pool_->AdvanceLastIssuedCopyTo(sequence_);
49
50 // Return raster resource to pool so it can be used by another RasterBuffer
51 // instance.
52 if (raster_resource_)
53 resource_pool_->ReleaseResource(raster_resource_.Pass());
54 }
55
56 // Overridden from RasterBuffer:
57 void Playback(const RasterSource* raster_source,
58 const gfx::Rect& rect,
59 float scale) override {
60 sequence_ = worker_pool_->PlaybackAndScheduleCopyOnWorkerThread(
61 lock_.Pass(), raster_resource_.Pass(), resource_, raster_source, rect,
62 scale);
63 }
64
65 private:
66 OneCopyTileTaskWorkerPool* worker_pool_;
67 ResourceProvider* resource_provider_;
68 ResourcePool* resource_pool_;
69 const Resource* resource_;
70 scoped_ptr<ScopedResource> raster_resource_;
71 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> lock_;
72 CopySequenceNumber sequence_;
73
74 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl);
75 };
76
77 // Flush interval when performing copy operations.
78 const int kCopyFlushPeriod = 4;
79
80 // Number of in-flight copy operations to allow.
81 const int kMaxCopyOperations = 32;
82
83 // Delay been checking for copy operations to complete.
84 const int kCheckForCompletedCopyOperationsTickRateMs = 1;
85
86 // Number of failed attempts to allow before we perform a check that will
87 // wait for copy operations to complete if needed.
88 const int kFailedAttemptsBeforeWaitIfNeeded = 256;
89
90 } // namespace
91
92 OneCopyTileTaskWorkerPool::CopyOperation::CopyOperation(
93 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> write_lock,
94 scoped_ptr<ScopedResource> src,
95 const Resource* dst)
96 : write_lock(write_lock.Pass()), src(src.Pass()), dst(dst) {
97 }
98
99 OneCopyTileTaskWorkerPool::CopyOperation::~CopyOperation() {
100 }
101
102 // static
103 scoped_ptr<TileTaskWorkerPool> OneCopyTileTaskWorkerPool::Create(
104 base::SequencedTaskRunner* task_runner,
105 TaskGraphRunner* task_graph_runner,
106 ContextProvider* context_provider,
107 ResourceProvider* resource_provider,
108 ResourcePool* resource_pool) {
109 return make_scoped_ptr<TileTaskWorkerPool>(new OneCopyTileTaskWorkerPool(
110 task_runner, task_graph_runner, context_provider, resource_provider,
111 resource_pool));
112 }
113
114 OneCopyTileTaskWorkerPool::OneCopyTileTaskWorkerPool(
115 base::SequencedTaskRunner* task_runner,
116 TaskGraphRunner* task_graph_runner,
117 ContextProvider* context_provider,
118 ResourceProvider* resource_provider,
119 ResourcePool* resource_pool)
120 : task_runner_(task_runner),
121 task_graph_runner_(task_graph_runner),
122 namespace_token_(task_graph_runner->GetNamespaceToken()),
123 context_provider_(context_provider),
124 resource_provider_(resource_provider),
125 resource_pool_(resource_pool),
126 last_issued_copy_operation_(0),
127 last_flushed_copy_operation_(0),
128 lock_(),
129 copy_operation_count_cv_(&lock_),
130 scheduled_copy_operation_count_(0),
131 issued_copy_operation_count_(0),
132 next_copy_operation_sequence_(1),
133 check_for_completed_copy_operations_pending_(false),
134 shutdown_(false),
135 weak_ptr_factory_(this),
136 task_set_finished_weak_ptr_factory_(this) {
137 DCHECK(context_provider_);
138 }
139
140 OneCopyTileTaskWorkerPool::~OneCopyTileTaskWorkerPool() {
141 DCHECK_EQ(scheduled_copy_operation_count_, 0u);
142 }
143
144 TileTaskRunner* OneCopyTileTaskWorkerPool::AsTileTaskRunner() {
145 return this;
146 }
147
148 void OneCopyTileTaskWorkerPool::SetClient(TileTaskRunnerClient* client) {
149 client_ = client;
150 }
151
152 void OneCopyTileTaskWorkerPool::Shutdown() {
153 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::Shutdown");
154
155 {
156 base::AutoLock lock(lock_);
157
158 shutdown_ = true;
159 copy_operation_count_cv_.Signal();
160 }
161
162 TaskGraph empty;
163 task_graph_runner_->ScheduleTasks(namespace_token_, &empty);
164 task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_);
165 }
166
167 void OneCopyTileTaskWorkerPool::ScheduleTasks(TileTaskQueue* queue) {
168 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::ScheduleTasks");
169
170 #if DCHECK_IS_ON()
171 {
172 base::AutoLock lock(lock_);
173 DCHECK(!shutdown_);
174 }
175 #endif
176
177 if (tasks_pending_.none())
178 TRACE_EVENT_ASYNC_BEGIN0("cc", "ScheduledTasks", this);
179
180 // Mark all task sets as pending.
181 tasks_pending_.set();
182
183 unsigned priority = kTileTaskPriorityBase;
184
185 graph_.Reset();
186
187 // Cancel existing OnTaskSetFinished callbacks.
188 task_set_finished_weak_ptr_factory_.InvalidateWeakPtrs();
189
190 scoped_refptr<TileTask> new_task_set_finished_tasks[kNumberOfTaskSets];
191
192 size_t task_count[kNumberOfTaskSets] = {0};
193
194 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
195 new_task_set_finished_tasks[task_set] = CreateTaskSetFinishedTask(
196 task_runner_.get(),
197 base::Bind(&OneCopyTileTaskWorkerPool::OnTaskSetFinished,
198 task_set_finished_weak_ptr_factory_.GetWeakPtr(), task_set));
199 }
200
201 resource_pool_->CheckBusyResources(false);
202
203 for (TileTaskQueue::Item::Vector::const_iterator it = queue->items.begin();
204 it != queue->items.end(); ++it) {
205 const TileTaskQueue::Item& item = *it;
206 RasterTask* task = item.task;
207 DCHECK(!task->HasCompleted());
208
209 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
210 if (!item.task_sets[task_set])
211 continue;
212
213 ++task_count[task_set];
214
215 graph_.edges.push_back(
216 TaskGraph::Edge(task, new_task_set_finished_tasks[task_set].get()));
217 }
218
219 InsertNodesForRasterTask(&graph_, task, task->dependencies(), priority++);
220 }
221
222 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
223 InsertNodeForTask(&graph_, new_task_set_finished_tasks[task_set].get(),
224 kTaskSetFinishedTaskPriorityBase + task_set,
225 task_count[task_set]);
226 }
227
228 ScheduleTasksOnOriginThread(this, &graph_);
229 task_graph_runner_->ScheduleTasks(namespace_token_, &graph_);
230
231 std::copy(new_task_set_finished_tasks,
232 new_task_set_finished_tasks + kNumberOfTaskSets,
233 task_set_finished_tasks_);
234
235 resource_pool_->ReduceResourceUsage();
236
237 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", "state",
238 StateAsValue());
239 }
240
241 void OneCopyTileTaskWorkerPool::CheckForCompletedTasks() {
242 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::CheckForCompletedTasks");
243
244 task_graph_runner_->CollectCompletedTasks(namespace_token_,
245 &completed_tasks_);
246
247 for (Task::Vector::const_iterator it = completed_tasks_.begin();
248 it != completed_tasks_.end(); ++it) {
249 TileTask* task = static_cast<TileTask*>(it->get());
250
251 task->WillComplete();
252 task->CompleteOnOriginThread(this);
253 task->DidComplete();
254
255 task->RunReplyOnOriginThread();
256 }
257 completed_tasks_.clear();
258 }
259
260 ResourceFormat OneCopyTileTaskWorkerPool::GetResourceFormat() {
261 return resource_provider_->best_texture_format();
262 }
263
264 scoped_ptr<RasterBuffer> OneCopyTileTaskWorkerPool::AcquireBufferForRaster(
265 const Resource* resource) {
266 DCHECK_EQ(resource->format(), resource_provider_->best_texture_format());
267 return make_scoped_ptr<RasterBuffer>(
268 new RasterBufferImpl(this, resource_provider_, resource_pool_,
269 resource_provider_->best_texture_format(),
270 resource));
271 }
272
273 void OneCopyTileTaskWorkerPool::ReleaseBufferForRaster(
274 scoped_ptr<RasterBuffer> buffer) {
275 // Nothing to do here. RasterBufferImpl destructor cleans up after itself.
276 }
277
278 CopySequenceNumber
279 OneCopyTileTaskWorkerPool::PlaybackAndScheduleCopyOnWorkerThread(
280 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> write_lock,
281 scoped_ptr<ScopedResource> src,
282 const Resource* dst,
283 const RasterSource* raster_source,
284 const gfx::Rect& rect,
285 float scale) {
286 base::AutoLock lock(lock_);
287
288 int failed_attempts = 0;
289 while ((scheduled_copy_operation_count_ + issued_copy_operation_count_) >=
290 kMaxCopyOperations) {
291 // Ignore limit when shutdown is set.
292 if (shutdown_)
293 break;
294
295 ++failed_attempts;
296
297 // Schedule a check that will also wait for operations to complete
298 // after too many failed attempts.
299 bool wait_if_needed = failed_attempts > kFailedAttemptsBeforeWaitIfNeeded;
300
301 // Schedule a check for completed copy operations if too many operations
302 // are currently in-flight.
303 ScheduleCheckForCompletedCopyOperationsWithLockAcquired(wait_if_needed);
304
305 {
306 TRACE_EVENT0("cc", "WaitingForCopyOperationsToComplete");
307
308 // Wait for in-flight copy operations to drop below limit.
309 copy_operation_count_cv_.Wait();
310 }
311 }
312
313 // Increment |scheduled_copy_operation_count_| before releasing |lock_|.
314 ++scheduled_copy_operation_count_;
315
316 // There may be more work available, so wake up another worker thread.
317 copy_operation_count_cv_.Signal();
318
319 {
320 base::AutoUnlock unlock(lock_);
321
322 gfx::GpuMemoryBuffer* gpu_memory_buffer = write_lock->GetGpuMemoryBuffer();
323 if (gpu_memory_buffer) {
324 void* data = NULL;
325 bool rv = gpu_memory_buffer->Map(&data);
326 DCHECK(rv);
327 int stride;
328 gpu_memory_buffer->GetStride(&stride);
329 TileTaskWorkerPool::PlaybackToMemory(data, src->format(), src->size(),
330 stride, raster_source, rect, scale);
331 gpu_memory_buffer->Unmap();
332 }
333 }
334
335 pending_copy_operations_.push_back(
336 make_scoped_ptr(new CopyOperation(write_lock.Pass(), src.Pass(), dst)));
337
338 // Acquire a sequence number for this copy operation.
339 CopySequenceNumber sequence = next_copy_operation_sequence_++;
340
341 // Post task that will advance last flushed copy operation to |sequence|
342 // if we have reached the flush period.
343 if ((sequence % kCopyFlushPeriod) == 0) {
344 task_runner_->PostTask(
345 FROM_HERE,
346 base::Bind(&OneCopyTileTaskWorkerPool::AdvanceLastFlushedCopyTo,
347 weak_ptr_factory_.GetWeakPtr(), sequence));
348 }
349
350 return sequence;
351 }
352
353 void OneCopyTileTaskWorkerPool::AdvanceLastIssuedCopyTo(
354 CopySequenceNumber sequence) {
355 if (last_issued_copy_operation_ >= sequence)
356 return;
357
358 IssueCopyOperations(sequence - last_issued_copy_operation_);
359 last_issued_copy_operation_ = sequence;
360 }
361
362 void OneCopyTileTaskWorkerPool::AdvanceLastFlushedCopyTo(
363 CopySequenceNumber sequence) {
364 if (last_flushed_copy_operation_ >= sequence)
365 return;
366
367 AdvanceLastIssuedCopyTo(sequence);
368
369 // Flush all issued copy operations.
370 context_provider_->ContextGL()->ShallowFlushCHROMIUM();
371 last_flushed_copy_operation_ = last_issued_copy_operation_;
372 }
373
374 void OneCopyTileTaskWorkerPool::OnTaskSetFinished(TaskSet task_set) {
375 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::OnTaskSetFinished", "task_set",
376 task_set);
377
378 DCHECK(tasks_pending_[task_set]);
379 tasks_pending_[task_set] = false;
380 if (tasks_pending_.any()) {
381 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running",
382 "state", StateAsValue());
383 } else {
384 TRACE_EVENT_ASYNC_END0("cc", "ScheduledTasks", this);
385 }
386 client_->DidFinishRunningTileTasks(task_set);
387 }
388
389 void OneCopyTileTaskWorkerPool::IssueCopyOperations(int64 count) {
390 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::IssueCopyOperations", "count",
391 count);
392
393 CopyOperation::Deque copy_operations;
394
395 {
396 base::AutoLock lock(lock_);
397
398 for (int64 i = 0; i < count; ++i) {
399 DCHECK(!pending_copy_operations_.empty());
400 copy_operations.push_back(pending_copy_operations_.take_front());
401 }
402
403 // Decrement |scheduled_copy_operation_count_| and increment
404 // |issued_copy_operation_count_| to reflect the transition of copy
405 // operations from "pending" to "issued" state.
406 DCHECK_GE(scheduled_copy_operation_count_, copy_operations.size());
407 scheduled_copy_operation_count_ -= copy_operations.size();
408 issued_copy_operation_count_ += copy_operations.size();
409 }
410
411 while (!copy_operations.empty()) {
412 scoped_ptr<CopyOperation> copy_operation = copy_operations.take_front();
413
414 // Remove the write lock.
415 copy_operation->write_lock.reset();
416
417 // Copy contents of source resource to destination resource.
418 resource_provider_->CopyResource(copy_operation->src->id(),
419 copy_operation->dst->id());
420
421 // Return source resource to pool where it can be reused once copy
422 // operation has completed and resource is no longer busy.
423 resource_pool_->ReleaseResource(copy_operation->src.Pass());
424 }
425 }
426
427 void OneCopyTileTaskWorkerPool::
428 ScheduleCheckForCompletedCopyOperationsWithLockAcquired(
429 bool wait_if_needed) {
430 lock_.AssertAcquired();
431
432 if (check_for_completed_copy_operations_pending_)
433 return;
434
435 base::TimeTicks now = base::TimeTicks::Now();
436
437 // Schedule a check for completed copy operations as soon as possible but
438 // don't allow two consecutive checks to be scheduled to run less than the
439 // tick rate apart.
440 base::TimeTicks next_check_for_completed_copy_operations_time =
441 std::max(last_check_for_completed_copy_operations_time_ +
442 base::TimeDelta::FromMilliseconds(
443 kCheckForCompletedCopyOperationsTickRateMs),
444 now);
445
446 task_runner_->PostDelayedTask(
447 FROM_HERE,
448 base::Bind(&OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations,
449 weak_ptr_factory_.GetWeakPtr(), wait_if_needed),
450 next_check_for_completed_copy_operations_time - now);
451
452 last_check_for_completed_copy_operations_time_ =
453 next_check_for_completed_copy_operations_time;
454 check_for_completed_copy_operations_pending_ = true;
455 }
456
457 void OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations(
458 bool wait_if_needed) {
459 TRACE_EVENT1("cc",
460 "OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations",
461 "wait_if_needed", wait_if_needed);
462
463 resource_pool_->CheckBusyResources(wait_if_needed);
464
465 {
466 base::AutoLock lock(lock_);
467
468 DCHECK(check_for_completed_copy_operations_pending_);
469 check_for_completed_copy_operations_pending_ = false;
470
471 // The number of busy resources in the pool reflects the number of issued
472 // copy operations that have not yet completed.
473 issued_copy_operation_count_ = resource_pool_->busy_resource_count();
474
475 // There may be work blocked on too many in-flight copy operations, so wake
476 // up a worker thread.
477 copy_operation_count_cv_.Signal();
478 }
479 }
480
481 scoped_refptr<base::trace_event::ConvertableToTraceFormat>
482 OneCopyTileTaskWorkerPool::StateAsValue() const {
483 scoped_refptr<base::trace_event::TracedValue> state =
484 new base::trace_event::TracedValue();
485
486 state->BeginArray("tasks_pending");
487 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set)
488 state->AppendBoolean(tasks_pending_[task_set]);
489 state->EndArray();
490 state->BeginDictionary("staging_state");
491 StagingStateAsValueInto(state.get());
492 state->EndDictionary();
493
494 return state;
495 }
496
497 void OneCopyTileTaskWorkerPool::StagingStateAsValueInto(
498 base::trace_event::TracedValue* staging_state) const {
499 staging_state->SetInteger("staging_resource_count",
500 resource_pool_->total_resource_count());
501 staging_state->SetInteger("bytes_used_for_staging_resources",
502 resource_pool_->total_memory_usage_bytes());
503 staging_state->SetInteger("pending_copy_count",
504 resource_pool_->total_resource_count() -
505 resource_pool_->acquired_resource_count());
506 staging_state->SetInteger("bytes_pending_copy",
507 resource_pool_->total_memory_usage_bytes() -
508 resource_pool_->acquired_memory_usage_bytes());
509 }
510
511 } // namespace cc
OLDNEW
« no previous file with comments | « cc/resources/one_copy_tile_task_worker_pool.h ('k') | cc/resources/picture.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698