Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(404)

Side by Side Diff: cc/raster/one_copy_tile_task_worker_pool.cc

Issue 1157943004: cc: [WIP] Use worker context and OrderingBarrierCHROMIUM for one-copy. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: do copy in worker thread. Created 5 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2014 The Chromium Authors. All rights reserved. 1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "cc/raster/one_copy_tile_task_worker_pool.h" 5 #include "cc/raster/one_copy_tile_task_worker_pool.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <limits> 8 #include <limits>
9 9
10 #include "base/strings/stringprintf.h" 10 #include "base/strings/stringprintf.h"
(...skipping 15 matching lines...) Expand all
26 RasterBufferImpl(OneCopyTileTaskWorkerPool* worker_pool, 26 RasterBufferImpl(OneCopyTileTaskWorkerPool* worker_pool,
27 ResourceProvider* resource_provider, 27 ResourceProvider* resource_provider,
28 ResourcePool* resource_pool, 28 ResourcePool* resource_pool,
29 ResourceFormat resource_format, 29 ResourceFormat resource_format,
30 const Resource* output_resource, 30 const Resource* output_resource,
31 uint64_t previous_content_id) 31 uint64_t previous_content_id)
32 : worker_pool_(worker_pool), 32 : worker_pool_(worker_pool),
33 resource_provider_(resource_provider), 33 resource_provider_(resource_provider),
34 resource_pool_(resource_pool), 34 resource_pool_(resource_pool),
35 output_resource_(output_resource), 35 output_resource_(output_resource),
36 raster_content_id_(0), 36 raster_content_id_(0) {
37 sequence_(0) {
38 if (worker_pool->have_persistent_gpu_memory_buffers() && 37 if (worker_pool->have_persistent_gpu_memory_buffers() &&
39 previous_content_id) { 38 previous_content_id) {
40 raster_resource_ = 39 raster_resource_ =
41 resource_pool->TryAcquireResourceWithContentId(previous_content_id); 40 resource_pool->TryAcquireResourceWithContentId(previous_content_id);
42 } 41 }
43 if (raster_resource_) { 42 if (raster_resource_) {
44 raster_content_id_ = previous_content_id; 43 raster_content_id_ = previous_content_id;
45 DCHECK_EQ(resource_format, raster_resource_->format()); 44 DCHECK_EQ(resource_format, raster_resource_->format());
46 DCHECK_EQ(output_resource->size().ToString(), 45 DCHECK_EQ(output_resource->size().ToString(),
47 raster_resource_->size().ToString()); 46 raster_resource_->size().ToString());
48 } else { 47 } else {
49 raster_resource_ = resource_pool->AcquireResource(output_resource->size(), 48 raster_resource_ = resource_pool->AcquireResource(output_resource->size(),
50 resource_format); 49 resource_format);
51 } 50 }
52 51
53 lock_.reset(new ResourceProvider::ScopedWriteLockGpuMemoryBuffer( 52 lock_.reset(new ResourceProvider::ScopedWriteLockGpuMemoryBuffer(
54 resource_provider_, raster_resource_->id())); 53 resource_provider_, raster_resource_->id()));
55 } 54 }
56 55
57 ~RasterBufferImpl() override { 56 ~RasterBufferImpl() override {
58 // Release write lock in case a copy was never scheduled. 57 // Release write lock in case a copy was never scheduled.
59 lock_.reset(); 58 lock_.reset();
60 59
61 // Make sure any scheduled copy operations are issued before we release the
62 // raster resource.
63 if (sequence_)
64 worker_pool_->AdvanceLastIssuedCopyTo(sequence_);
65
66 // Return resources to pool so they can be used by another RasterBuffer 60 // Return resources to pool so they can be used by another RasterBuffer
67 // instance. 61 // instance.
68 resource_pool_->ReleaseResource(raster_resource_.Pass(), 62 resource_pool_->ReleaseResource(raster_resource_.Pass(),
69 raster_content_id_); 63 raster_content_id_);
70 } 64 }
71 65
72 // Overridden from RasterBuffer: 66 // Overridden from RasterBuffer:
73 void Playback(const RasterSource* raster_source, 67 void Playback(const RasterSource* raster_source,
74 const gfx::Rect& raster_full_rect, 68 const gfx::Rect& raster_full_rect,
75 const gfx::Rect& raster_dirty_rect, 69 const gfx::Rect& raster_dirty_rect,
76 uint64_t new_content_id, 70 uint64_t new_content_id,
77 float scale) override { 71 float scale) override {
78 // If there's a raster_content_id_, we are reusing a resource with that 72 // If there's a raster_content_id_, we are reusing a resource with that
79 // content id. 73 // content id.
80 bool reusing_raster_resource = raster_content_id_ != 0; 74 bool reusing_raster_resource = raster_content_id_ != 0;
81 sequence_ = worker_pool_->PlaybackAndScheduleCopyOnWorkerThread( 75 worker_pool_->PlaybackAndCopyOnWorkerThread(
82 reusing_raster_resource, lock_.Pass(), raster_resource_.get(), 76 reusing_raster_resource, lock_.Pass(), raster_resource_.get(),
83 output_resource_, raster_source, raster_full_rect, raster_dirty_rect, 77 output_resource_, raster_source, raster_full_rect, raster_dirty_rect,
84 scale); 78 scale);
85 // Store the content id of the resource to return to the pool. 79 // Store the content id of the resource to return to the pool.
86 raster_content_id_ = new_content_id; 80 raster_content_id_ = new_content_id;
87 } 81 }
88 82
89 private: 83 private:
90 OneCopyTileTaskWorkerPool* worker_pool_; 84 OneCopyTileTaskWorkerPool* worker_pool_;
91 ResourceProvider* resource_provider_; 85 ResourceProvider* resource_provider_;
92 ResourcePool* resource_pool_; 86 ResourcePool* resource_pool_;
93 const Resource* output_resource_; 87 const Resource* output_resource_;
94 uint64_t raster_content_id_; 88 uint64_t raster_content_id_;
95 scoped_ptr<ScopedResource> raster_resource_; 89 scoped_ptr<ScopedResource> raster_resource_;
96 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> lock_; 90 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> lock_;
97 CopySequenceNumber sequence_;
98 91
99 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl); 92 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl);
100 }; 93 };
101 94
102 // Number of in-flight copy operations to allow.
103 const int kMaxCopyOperations = 32;
reveman 2015/06/09 13:30:55 we still need this
sohanjg 2015/06/09 15:59:23 i cant find it being used in this file, now that w
reveman 2015/06/09 18:00:30 We still want to limit the number of copy operatio
sohanjg 2015/06/11 13:32:25 Acknowledged.
104
105 // Delay been checking for copy operations to complete.
106 const int kCheckForCompletedCopyOperationsTickRateMs = 1;
107
108 // Number of failed attempts to allow before we perform a check that will
109 // wait for copy operations to complete if needed.
110 const int kFailedAttemptsBeforeWaitIfNeeded = 256;
111
112 } // namespace 95 } // namespace
113 96
114 OneCopyTileTaskWorkerPool::CopyOperation::CopyOperation( 97 OneCopyTileTaskWorkerPool::CopyOperation::CopyOperation(
115 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> src_write_lock, 98 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> src_write_lock,
116 const Resource* src, 99 const Resource* src,
117 const Resource* dst, 100 const Resource* dst,
118 const gfx::Rect& rect) 101 const gfx::Rect& rect)
119 : src_write_lock(src_write_lock.Pass()), src(src), dst(dst), rect(rect) { 102 : src_write_lock(src_write_lock.Pass()), src(src), dst(dst), rect(rect) {
120 } 103 }
121 104
(...skipping 24 matching lines...) Expand all
146 int max_bytes_per_copy_operation, 129 int max_bytes_per_copy_operation,
147 bool have_persistent_gpu_memory_buffers) 130 bool have_persistent_gpu_memory_buffers)
148 : task_runner_(task_runner), 131 : task_runner_(task_runner),
149 task_graph_runner_(task_graph_runner), 132 task_graph_runner_(task_graph_runner),
150 namespace_token_(task_graph_runner->GetNamespaceToken()), 133 namespace_token_(task_graph_runner->GetNamespaceToken()),
151 context_provider_(context_provider), 134 context_provider_(context_provider),
152 resource_provider_(resource_provider), 135 resource_provider_(resource_provider),
153 resource_pool_(resource_pool), 136 resource_pool_(resource_pool),
154 max_bytes_per_copy_operation_(max_bytes_per_copy_operation), 137 max_bytes_per_copy_operation_(max_bytes_per_copy_operation),
155 have_persistent_gpu_memory_buffers_(have_persistent_gpu_memory_buffers), 138 have_persistent_gpu_memory_buffers_(have_persistent_gpu_memory_buffers),
156 last_issued_copy_operation_(0),
157 last_flushed_copy_operation_(0),
158 lock_(), 139 lock_(),
159 copy_operation_count_cv_(&lock_),
160 bytes_scheduled_since_last_flush_(0),
161 issued_copy_operation_count_(0),
162 next_copy_operation_sequence_(1),
163 check_for_completed_copy_operations_pending_(false),
164 shutdown_(false), 140 shutdown_(false),
165 weak_ptr_factory_(this), 141 weak_ptr_factory_(this),
166 task_set_finished_weak_ptr_factory_(this) { 142 task_set_finished_weak_ptr_factory_(this) {
167 DCHECK(context_provider_); 143 DCHECK(context_provider_);
168 } 144 }
169 145
170 OneCopyTileTaskWorkerPool::~OneCopyTileTaskWorkerPool() { 146 OneCopyTileTaskWorkerPool::~OneCopyTileTaskWorkerPool() {
171 DCHECK_EQ(pending_copy_operations_.size(), 0u); 147 DCHECK_EQ(pending_copy_operations_.size(), 0u);
172 } 148 }
173 149
174 TileTaskRunner* OneCopyTileTaskWorkerPool::AsTileTaskRunner() { 150 TileTaskRunner* OneCopyTileTaskWorkerPool::AsTileTaskRunner() {
175 return this; 151 return this;
176 } 152 }
177 153
178 void OneCopyTileTaskWorkerPool::SetClient(TileTaskRunnerClient* client) { 154 void OneCopyTileTaskWorkerPool::SetClient(TileTaskRunnerClient* client) {
179 client_ = client; 155 client_ = client;
180 } 156 }
181 157
182 void OneCopyTileTaskWorkerPool::Shutdown() { 158 void OneCopyTileTaskWorkerPool::Shutdown() {
183 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::Shutdown"); 159 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::Shutdown");
184 160
185 { 161 {
186 base::AutoLock lock(lock_); 162 base::AutoLock lock(lock_);
187 163
188 shutdown_ = true; 164 shutdown_ = true;
189 copy_operation_count_cv_.Signal();
190 } 165 }
191 166
192 TaskGraph empty; 167 TaskGraph empty;
193 task_graph_runner_->ScheduleTasks(namespace_token_, &empty); 168 task_graph_runner_->ScheduleTasks(namespace_token_, &empty);
194 task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_); 169 task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_);
195 } 170 }
196 171
197 void OneCopyTileTaskWorkerPool::ScheduleTasks(TileTaskQueue* queue) { 172 void OneCopyTileTaskWorkerPool::ScheduleTasks(TileTaskQueue* queue) {
198 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::ScheduleTasks"); 173 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::ScheduleTasks");
199 174
(...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after
302 new RasterBufferImpl(this, resource_provider_, resource_pool_, 277 new RasterBufferImpl(this, resource_provider_, resource_pool_,
303 resource_provider_->best_texture_format(), resource, 278 resource_provider_->best_texture_format(), resource,
304 previous_content_id)); 279 previous_content_id));
305 } 280 }
306 281
307 void OneCopyTileTaskWorkerPool::ReleaseBufferForRaster( 282 void OneCopyTileTaskWorkerPool::ReleaseBufferForRaster(
308 scoped_ptr<RasterBuffer> buffer) { 283 scoped_ptr<RasterBuffer> buffer) {
309 // Nothing to do here. RasterBufferImpl destructor cleans up after itself. 284 // Nothing to do here. RasterBufferImpl destructor cleans up after itself.
310 } 285 }
311 286
312 CopySequenceNumber 287 void OneCopyTileTaskWorkerPool::PlaybackAndCopyOnWorkerThread(
313 OneCopyTileTaskWorkerPool::PlaybackAndScheduleCopyOnWorkerThread(
314 bool reusing_raster_resource, 288 bool reusing_raster_resource,
315 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> 289 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer>
316 raster_resource_write_lock, 290 raster_resource_write_lock,
317 const Resource* raster_resource, 291 const Resource* raster_resource,
318 const Resource* output_resource, 292 const Resource* output_resource,
319 const RasterSource* raster_source, 293 const RasterSource* raster_source,
320 const gfx::Rect& raster_full_rect, 294 const gfx::Rect& raster_full_rect,
321 const gfx::Rect& raster_dirty_rect, 295 const gfx::Rect& raster_dirty_rect,
322 float scale) { 296 float scale) {
323 gfx::GpuMemoryBuffer* gpu_memory_buffer = 297 gfx::GpuMemoryBuffer* gpu_memory_buffer =
(...skipping 13 matching lines...) Expand all
337 } 311 }
338 DCHECK(!playback_rect.IsEmpty()) 312 DCHECK(!playback_rect.IsEmpty())
339 << "Why are we rastering a tile that's not dirty?"; 313 << "Why are we rastering a tile that's not dirty?";
340 TileTaskWorkerPool::PlaybackToMemory( 314 TileTaskWorkerPool::PlaybackToMemory(
341 data, raster_resource->format(), raster_resource->size(), 315 data, raster_resource->format(), raster_resource->size(),
342 static_cast<size_t>(stride), raster_source, raster_full_rect, 316 static_cast<size_t>(stride), raster_source, raster_full_rect,
343 playback_rect, scale); 317 playback_rect, scale);
344 gpu_memory_buffer->Unmap(); 318 gpu_memory_buffer->Unmap();
345 } 319 }
346 320
347 base::AutoLock lock(lock_);
348
349 CopySequenceNumber sequence = 0;
350 int bytes_per_row = (BitsPerPixel(raster_resource->format()) * 321 int bytes_per_row = (BitsPerPixel(raster_resource->format()) *
351 raster_resource->size().width()) / 322 raster_resource->size().width()) /
352 8; 323 8;
353 int chunk_size_in_rows = 324 int chunk_size_in_rows =
354 std::max(1, max_bytes_per_copy_operation_ / bytes_per_row); 325 std::max(1, max_bytes_per_copy_operation_ / bytes_per_row);
355 // Align chunk size to 4. Required to support compressed texture formats. 326 // Align chunk size to 4. Required to support compressed texture formats.
356 chunk_size_in_rows = MathUtil::RoundUp(chunk_size_in_rows, 4); 327 chunk_size_in_rows = MathUtil::RoundUp(chunk_size_in_rows, 4);
357 int y = 0; 328 int y = 0;
358 int height = raster_resource->size().height(); 329 int height = raster_resource->size().height();
330 ContextProvider* context_provider =
331 resource_provider_->output_surface()->worker_context_provider();
359 while (y < height) { 332 while (y < height) {
360 int failed_attempts = 0; 333 base::AutoLock context_lock(*context_provider->GetLock());
361 while ((pending_copy_operations_.size() + issued_copy_operation_count_) >= 334 context_provider->DetachFromThread();
362 kMaxCopyOperations) {
363 // Ignore limit when shutdown is set.
364 if (shutdown_)
365 break;
366
367 ++failed_attempts;
368
369 // Schedule a check that will also wait for operations to complete
370 // after too many failed attempts.
371 bool wait_if_needed = failed_attempts > kFailedAttemptsBeforeWaitIfNeeded;
372
373 // Schedule a check for completed copy operations if too many operations
374 // are currently in-flight.
375 ScheduleCheckForCompletedCopyOperationsWithLockAcquired(wait_if_needed);
376
377 {
378 TRACE_EVENT0("cc", "WaitingForCopyOperationsToComplete");
379
380 // Wait for in-flight copy operations to drop below limit.
381 copy_operation_count_cv_.Wait();
382 }
383 }
384
385 // There may be more work available, so wake up another worker thread.
386 copy_operation_count_cv_.Signal();
387
388 // Copy at most |chunk_size_in_rows|. 335 // Copy at most |chunk_size_in_rows|.
389 int rows_to_copy = std::min(chunk_size_in_rows, height - y); 336 int rows_to_copy = std::min(chunk_size_in_rows, height - y);
390 DCHECK_GT(rows_to_copy, 0); 337 DCHECK_GT(rows_to_copy, 0);
391 338 IssueCopyOperations(new CopyOperation(
reveman 2015/06/09 13:30:55 This is leaking a CopyOperations instance. IssueCo
sohanjg 2015/06/09 15:59:23 Done.
392 // |raster_resource_write_lock| is passed to the first copy operation as it
393 // needs to be released before we can issue a copy.
394 pending_copy_operations_.push_back(make_scoped_ptr(new CopyOperation(
395 raster_resource_write_lock.Pass(), raster_resource, output_resource, 339 raster_resource_write_lock.Pass(), raster_resource, output_resource,
396 gfx::Rect(0, y, raster_resource->size().width(), rows_to_copy)))); 340 gfx::Rect(0, y, raster_resource->size().width(), rows_to_copy)));
397 y += rows_to_copy; 341 y += rows_to_copy;
398 342 context_provider->ContextGL()->OrderingBarrierCHROMIUM();
reveman 2015/06/09 13:30:55 please add comment explaining this ordering barrie
sohanjg 2015/06/09 15:59:23 Done.
399 // Acquire a sequence number for this copy operation. 343 context_provider->DetachFromThread();
400 sequence = next_copy_operation_sequence_++;
401
402 // Increment |bytes_scheduled_since_last_flush_| by the amount of memory
403 // used for this copy operation.
404 bytes_scheduled_since_last_flush_ += rows_to_copy * bytes_per_row;
405
406 // Post task that will advance last flushed copy operation to |sequence|
407 // when |bytes_scheduled_since_last_flush_| has reached
408 // |max_bytes_per_copy_operation_|.
409 if (bytes_scheduled_since_last_flush_ >= max_bytes_per_copy_operation_) {
410 task_runner_->PostTask(
411 FROM_HERE,
412 base::Bind(&OneCopyTileTaskWorkerPool::AdvanceLastFlushedCopyTo,
413 weak_ptr_factory_.GetWeakPtr(), sequence));
414 bytes_scheduled_since_last_flush_ = 0;
415 }
416 } 344 }
417
418 return sequence;
419 }
420
421 void OneCopyTileTaskWorkerPool::AdvanceLastIssuedCopyTo(
422 CopySequenceNumber sequence) {
423 if (last_issued_copy_operation_ >= sequence)
424 return;
425
426 IssueCopyOperations(sequence - last_issued_copy_operation_);
427 last_issued_copy_operation_ = sequence;
428 }
429
430 void OneCopyTileTaskWorkerPool::AdvanceLastFlushedCopyTo(
431 CopySequenceNumber sequence) {
432 if (last_flushed_copy_operation_ >= sequence)
433 return;
434
435 AdvanceLastIssuedCopyTo(sequence);
436
437 // Flush all issued copy operations.
438 context_provider_->ContextGL()->ShallowFlushCHROMIUM();
439 last_flushed_copy_operation_ = last_issued_copy_operation_;
440 } 345 }
441 346
442 void OneCopyTileTaskWorkerPool::OnTaskSetFinished(TaskSet task_set) { 347 void OneCopyTileTaskWorkerPool::OnTaskSetFinished(TaskSet task_set) {
443 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::OnTaskSetFinished", "task_set", 348 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::OnTaskSetFinished", "task_set",
444 task_set); 349 task_set);
445 350
446 DCHECK(tasks_pending_[task_set]); 351 DCHECK(tasks_pending_[task_set]);
447 tasks_pending_[task_set] = false; 352 tasks_pending_[task_set] = false;
448 if (tasks_pending_.any()) { 353 if (tasks_pending_.any()) {
449 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", 354 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running",
450 "state", StateAsValue()); 355 "state", StateAsValue());
451 } else { 356 } else {
452 TRACE_EVENT_ASYNC_END0("cc", "ScheduledTasks", this); 357 TRACE_EVENT_ASYNC_END0("cc", "ScheduledTasks", this);
453 } 358 }
454 client_->DidFinishRunningTileTasks(task_set); 359 client_->DidFinishRunningTileTasks(task_set);
455 } 360 }
456 361
457 void OneCopyTileTaskWorkerPool::IssueCopyOperations(int64 count) { 362 void OneCopyTileTaskWorkerPool::IssueCopyOperations(
458 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::IssueCopyOperations", "count", 363 CopyOperation* copy_operation) {
459 count); 364 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::IssueCopyOperations");
460 365
461 CopyOperation::Deque copy_operations; 366 copy_operation->src_write_lock.reset();
462 367 // Copy contents of source resource to destination resource.
463 { 368 resource_provider_->CopyResource(copy_operation->src->id(),
reveman 2015/06/09 13:30:55 We can't use ResourceProvider on this thread.
sohanjg 2015/06/09 15:59:23 Done.
464 base::AutoLock lock(lock_); 369 copy_operation->dst->id(),
465 370 copy_operation->rect);
466 for (int64 i = 0; i < count; ++i) {
467 DCHECK(!pending_copy_operations_.empty());
468 copy_operations.push_back(pending_copy_operations_.take_front());
469 }
470
471 // Increment |issued_copy_operation_count_| to reflect the transition of
472 // copy operations from "pending" to "issued" state.
473 issued_copy_operation_count_ += copy_operations.size();
474 }
475
476 while (!copy_operations.empty()) {
477 scoped_ptr<CopyOperation> copy_operation = copy_operations.take_front();
478
479 // Remove the write lock.
480 copy_operation->src_write_lock.reset();
481
482 // Copy contents of source resource to destination resource.
483 resource_provider_->CopyResource(copy_operation->src->id(),
484 copy_operation->dst->id(),
485 copy_operation->rect);
486 }
487 }
488
489 void OneCopyTileTaskWorkerPool::
490 ScheduleCheckForCompletedCopyOperationsWithLockAcquired(
491 bool wait_if_needed) {
492 lock_.AssertAcquired();
493
494 if (check_for_completed_copy_operations_pending_)
495 return;
496
497 base::TimeTicks now = base::TimeTicks::Now();
498
499 // Schedule a check for completed copy operations as soon as possible but
500 // don't allow two consecutive checks to be scheduled to run less than the
501 // tick rate apart.
502 base::TimeTicks next_check_for_completed_copy_operations_time =
503 std::max(last_check_for_completed_copy_operations_time_ +
504 base::TimeDelta::FromMilliseconds(
505 kCheckForCompletedCopyOperationsTickRateMs),
506 now);
507
508 task_runner_->PostDelayedTask(
509 FROM_HERE,
510 base::Bind(&OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations,
511 weak_ptr_factory_.GetWeakPtr(), wait_if_needed),
512 next_check_for_completed_copy_operations_time - now);
513
514 last_check_for_completed_copy_operations_time_ =
515 next_check_for_completed_copy_operations_time;
516 check_for_completed_copy_operations_pending_ = true;
517 }
518
519 void OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations(
520 bool wait_if_needed) {
521 TRACE_EVENT1("cc",
522 "OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations",
523 "wait_if_needed", wait_if_needed);
524
525 resource_pool_->CheckBusyResources(wait_if_needed);
526
527 {
528 base::AutoLock lock(lock_);
529
530 DCHECK(check_for_completed_copy_operations_pending_);
531 check_for_completed_copy_operations_pending_ = false;
532
533 // The number of busy resources in the pool reflects the number of issued
534 // copy operations that have not yet completed.
535 issued_copy_operation_count_ = resource_pool_->busy_resource_count();
536
537 // There may be work blocked on too many in-flight copy operations, so wake
538 // up a worker thread.
539 copy_operation_count_cv_.Signal();
540 }
541 } 371 }
542 372
543 scoped_refptr<base::trace_event::ConvertableToTraceFormat> 373 scoped_refptr<base::trace_event::ConvertableToTraceFormat>
544 OneCopyTileTaskWorkerPool::StateAsValue() const { 374 OneCopyTileTaskWorkerPool::StateAsValue() const {
545 scoped_refptr<base::trace_event::TracedValue> state = 375 scoped_refptr<base::trace_event::TracedValue> state =
546 new base::trace_event::TracedValue(); 376 new base::trace_event::TracedValue();
547 377
548 state->BeginArray("tasks_pending"); 378 state->BeginArray("tasks_pending");
549 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) 379 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set)
550 state->AppendBoolean(tasks_pending_[task_set]); 380 state->AppendBoolean(tasks_pending_[task_set]);
(...skipping 17 matching lines...) Expand all
568 "pending_copy_count", 398 "pending_copy_count",
569 static_cast<int>(resource_pool_->total_resource_count() - 399 static_cast<int>(resource_pool_->total_resource_count() -
570 resource_pool_->acquired_resource_count())); 400 resource_pool_->acquired_resource_count()));
571 staging_state->SetInteger( 401 staging_state->SetInteger(
572 "bytes_pending_copy", 402 "bytes_pending_copy",
573 static_cast<int>(resource_pool_->total_memory_usage_bytes() - 403 static_cast<int>(resource_pool_->total_memory_usage_bytes() -
574 resource_pool_->acquired_memory_usage_bytes())); 404 resource_pool_->acquired_memory_usage_bytes()));
575 } 405 }
576 406
577 } // namespace cc 407 } // namespace cc
OLDNEW
« cc/raster/one_copy_tile_task_worker_pool.h ('K') | « cc/raster/one_copy_tile_task_worker_pool.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698