OLD | NEW |
---|---|
1 // Copyright 2014 The Chromium Authors. All rights reserved. | 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "cc/raster/one_copy_tile_task_worker_pool.h" | 5 #include "cc/raster/one_copy_tile_task_worker_pool.h" |
6 | 6 |
7 #include <algorithm> | 7 #include <algorithm> |
8 #include <limits> | 8 #include <limits> |
9 | 9 |
10 #include "base/strings/stringprintf.h" | 10 #include "base/strings/stringprintf.h" |
(...skipping 15 matching lines...) Expand all Loading... | |
26 RasterBufferImpl(OneCopyTileTaskWorkerPool* worker_pool, | 26 RasterBufferImpl(OneCopyTileTaskWorkerPool* worker_pool, |
27 ResourceProvider* resource_provider, | 27 ResourceProvider* resource_provider, |
28 ResourcePool* resource_pool, | 28 ResourcePool* resource_pool, |
29 ResourceFormat resource_format, | 29 ResourceFormat resource_format, |
30 const Resource* output_resource, | 30 const Resource* output_resource, |
31 uint64_t previous_content_id) | 31 uint64_t previous_content_id) |
32 : worker_pool_(worker_pool), | 32 : worker_pool_(worker_pool), |
33 resource_provider_(resource_provider), | 33 resource_provider_(resource_provider), |
34 resource_pool_(resource_pool), | 34 resource_pool_(resource_pool), |
35 output_resource_(output_resource), | 35 output_resource_(output_resource), |
36 raster_content_id_(0), | 36 raster_content_id_(0) { |
37 sequence_(0) { | |
38 if (worker_pool->have_persistent_gpu_memory_buffers() && | 37 if (worker_pool->have_persistent_gpu_memory_buffers() && |
39 previous_content_id) { | 38 previous_content_id) { |
40 raster_resource_ = | 39 raster_resource_ = |
41 resource_pool->TryAcquireResourceWithContentId(previous_content_id); | 40 resource_pool->TryAcquireResourceWithContentId(previous_content_id); |
42 } | 41 } |
43 if (raster_resource_) { | 42 if (raster_resource_) { |
44 raster_content_id_ = previous_content_id; | 43 raster_content_id_ = previous_content_id; |
45 DCHECK_EQ(resource_format, raster_resource_->format()); | 44 DCHECK_EQ(resource_format, raster_resource_->format()); |
46 DCHECK_EQ(output_resource->size().ToString(), | 45 DCHECK_EQ(output_resource->size().ToString(), |
47 raster_resource_->size().ToString()); | 46 raster_resource_->size().ToString()); |
48 } else { | 47 } else { |
49 raster_resource_ = resource_pool->AcquireResource(output_resource->size(), | 48 raster_resource_ = resource_pool->AcquireResource(output_resource->size(), |
50 resource_format); | 49 resource_format); |
51 } | 50 } |
52 | 51 |
53 lock_.reset(new ResourceProvider::ScopedWriteLockGpuMemoryBuffer( | 52 lock_.reset(new ResourceProvider::ScopedWriteLockGpuMemoryBuffer( |
54 resource_provider_, raster_resource_->id())); | 53 resource_provider_, raster_resource_->id())); |
55 } | 54 } |
56 | 55 |
57 ~RasterBufferImpl() override { | 56 ~RasterBufferImpl() override { |
58 // Release write lock in case a copy was never scheduled. | 57 // Release write lock in case a copy was never scheduled. |
59 lock_.reset(); | 58 lock_.reset(); |
60 | 59 |
61 // Make sure any scheduled copy operations are issued before we release the | |
62 // raster resource. | |
63 if (sequence_) | |
64 worker_pool_->AdvanceLastIssuedCopyTo(sequence_); | |
65 | |
66 // Return resources to pool so they can be used by another RasterBuffer | 60 // Return resources to pool so they can be used by another RasterBuffer |
67 // instance. | 61 // instance. |
68 resource_pool_->ReleaseResource(raster_resource_.Pass(), | 62 resource_pool_->ReleaseResource(raster_resource_.Pass(), |
69 raster_content_id_); | 63 raster_content_id_); |
70 } | 64 } |
71 | 65 |
72 // Overridden from RasterBuffer: | 66 // Overridden from RasterBuffer: |
73 void Playback(const RasterSource* raster_source, | 67 void Playback(const RasterSource* raster_source, |
74 const gfx::Rect& raster_full_rect, | 68 const gfx::Rect& raster_full_rect, |
75 const gfx::Rect& raster_dirty_rect, | 69 const gfx::Rect& raster_dirty_rect, |
76 uint64_t new_content_id, | 70 uint64_t new_content_id, |
77 float scale) override { | 71 float scale) override { |
78 // If there's a raster_content_id_, we are reusing a resource with that | 72 // If there's a raster_content_id_, we are reusing a resource with that |
79 // content id. | 73 // content id. |
80 bool reusing_raster_resource = raster_content_id_ != 0; | 74 bool reusing_raster_resource = raster_content_id_ != 0; |
81 sequence_ = worker_pool_->PlaybackAndScheduleCopyOnWorkerThread( | 75 worker_pool_->PlaybackAndCopyOnWorkerThread( |
82 reusing_raster_resource, lock_.Pass(), raster_resource_.get(), | 76 reusing_raster_resource, lock_.Pass(), raster_resource_.get(), |
83 output_resource_, raster_source, raster_full_rect, raster_dirty_rect, | 77 output_resource_, raster_source, raster_full_rect, raster_dirty_rect, |
84 scale); | 78 scale); |
85 // Store the content id of the resource to return to the pool. | 79 // Store the content id of the resource to return to the pool. |
86 raster_content_id_ = new_content_id; | 80 raster_content_id_ = new_content_id; |
87 } | 81 } |
88 | 82 |
89 private: | 83 private: |
90 OneCopyTileTaskWorkerPool* worker_pool_; | 84 OneCopyTileTaskWorkerPool* worker_pool_; |
91 ResourceProvider* resource_provider_; | 85 ResourceProvider* resource_provider_; |
92 ResourcePool* resource_pool_; | 86 ResourcePool* resource_pool_; |
93 const Resource* output_resource_; | 87 const Resource* output_resource_; |
94 uint64_t raster_content_id_; | 88 uint64_t raster_content_id_; |
95 scoped_ptr<ScopedResource> raster_resource_; | 89 scoped_ptr<ScopedResource> raster_resource_; |
96 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> lock_; | 90 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> lock_; |
97 CopySequenceNumber sequence_; | |
98 | 91 |
99 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl); | 92 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl); |
100 }; | 93 }; |
101 | 94 |
102 // Number of in-flight copy operations to allow. | |
103 const int kMaxCopyOperations = 32; | |
104 | |
105 // Delay been checking for copy operations to complete. | |
106 const int kCheckForCompletedCopyOperationsTickRateMs = 1; | |
107 | |
108 // Number of failed attempts to allow before we perform a check that will | |
109 // wait for copy operations to complete if needed. | |
110 const int kFailedAttemptsBeforeWaitIfNeeded = 256; | |
111 | |
112 } // namespace | 95 } // namespace |
113 | 96 |
114 OneCopyTileTaskWorkerPool::CopyOperation::CopyOperation( | |
115 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> src_write_lock, | |
116 const Resource* src, | |
117 const Resource* dst, | |
118 const gfx::Rect& rect) | |
119 : src_write_lock(src_write_lock.Pass()), src(src), dst(dst), rect(rect) { | |
120 } | |
121 | |
122 OneCopyTileTaskWorkerPool::CopyOperation::~CopyOperation() { | |
123 } | |
124 | |
125 // static | 97 // static |
126 scoped_ptr<TileTaskWorkerPool> OneCopyTileTaskWorkerPool::Create( | 98 scoped_ptr<TileTaskWorkerPool> OneCopyTileTaskWorkerPool::Create( |
127 base::SequencedTaskRunner* task_runner, | 99 base::SequencedTaskRunner* task_runner, |
128 TaskGraphRunner* task_graph_runner, | 100 TaskGraphRunner* task_graph_runner, |
129 ContextProvider* context_provider, | 101 ContextProvider* context_provider, |
130 ResourceProvider* resource_provider, | 102 ResourceProvider* resource_provider, |
131 ResourcePool* resource_pool, | 103 ResourcePool* resource_pool, |
132 int max_bytes_per_copy_operation, | 104 int max_bytes_per_copy_operation, |
133 bool have_persistent_gpu_memory_buffers) { | 105 bool have_persistent_gpu_memory_buffers) { |
134 return make_scoped_ptr<TileTaskWorkerPool>(new OneCopyTileTaskWorkerPool( | 106 return make_scoped_ptr<TileTaskWorkerPool>(new OneCopyTileTaskWorkerPool( |
(...skipping 11 matching lines...) Expand all Loading... | |
146 int max_bytes_per_copy_operation, | 118 int max_bytes_per_copy_operation, |
147 bool have_persistent_gpu_memory_buffers) | 119 bool have_persistent_gpu_memory_buffers) |
148 : task_runner_(task_runner), | 120 : task_runner_(task_runner), |
149 task_graph_runner_(task_graph_runner), | 121 task_graph_runner_(task_graph_runner), |
150 namespace_token_(task_graph_runner->GetNamespaceToken()), | 122 namespace_token_(task_graph_runner->GetNamespaceToken()), |
151 context_provider_(context_provider), | 123 context_provider_(context_provider), |
152 resource_provider_(resource_provider), | 124 resource_provider_(resource_provider), |
153 resource_pool_(resource_pool), | 125 resource_pool_(resource_pool), |
154 max_bytes_per_copy_operation_(max_bytes_per_copy_operation), | 126 max_bytes_per_copy_operation_(max_bytes_per_copy_operation), |
155 have_persistent_gpu_memory_buffers_(have_persistent_gpu_memory_buffers), | 127 have_persistent_gpu_memory_buffers_(have_persistent_gpu_memory_buffers), |
156 last_issued_copy_operation_(0), | |
157 last_flushed_copy_operation_(0), | |
158 lock_(), | 128 lock_(), |
159 copy_operation_count_cv_(&lock_), | |
160 bytes_scheduled_since_last_flush_(0), | |
161 issued_copy_operation_count_(0), | |
162 next_copy_operation_sequence_(1), | |
163 check_for_completed_copy_operations_pending_(false), | |
164 shutdown_(false), | 129 shutdown_(false), |
165 weak_ptr_factory_(this), | 130 weak_ptr_factory_(this), |
166 task_set_finished_weak_ptr_factory_(this) { | 131 task_set_finished_weak_ptr_factory_(this) { |
167 DCHECK(context_provider_); | 132 DCHECK(context_provider_); |
168 } | 133 } |
169 | 134 |
170 OneCopyTileTaskWorkerPool::~OneCopyTileTaskWorkerPool() { | 135 OneCopyTileTaskWorkerPool::~OneCopyTileTaskWorkerPool() { |
171 DCHECK_EQ(pending_copy_operations_.size(), 0u); | |
172 } | 136 } |
173 | 137 |
174 TileTaskRunner* OneCopyTileTaskWorkerPool::AsTileTaskRunner() { | 138 TileTaskRunner* OneCopyTileTaskWorkerPool::AsTileTaskRunner() { |
175 return this; | 139 return this; |
176 } | 140 } |
177 | 141 |
178 void OneCopyTileTaskWorkerPool::SetClient(TileTaskRunnerClient* client) { | 142 void OneCopyTileTaskWorkerPool::SetClient(TileTaskRunnerClient* client) { |
179 client_ = client; | 143 client_ = client; |
180 } | 144 } |
181 | 145 |
182 void OneCopyTileTaskWorkerPool::Shutdown() { | 146 void OneCopyTileTaskWorkerPool::Shutdown() { |
183 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::Shutdown"); | 147 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::Shutdown"); |
184 | 148 |
185 { | 149 { |
186 base::AutoLock lock(lock_); | 150 base::AutoLock lock(lock_); |
187 | 151 |
188 shutdown_ = true; | 152 shutdown_ = true; |
189 copy_operation_count_cv_.Signal(); | |
190 } | 153 } |
191 | 154 |
192 TaskGraph empty; | 155 TaskGraph empty; |
193 task_graph_runner_->ScheduleTasks(namespace_token_, &empty); | 156 task_graph_runner_->ScheduleTasks(namespace_token_, &empty); |
194 task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_); | 157 task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_); |
195 } | 158 } |
196 | 159 |
197 void OneCopyTileTaskWorkerPool::ScheduleTasks(TileTaskQueue* queue) { | 160 void OneCopyTileTaskWorkerPool::ScheduleTasks(TileTaskQueue* queue) { |
198 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::ScheduleTasks"); | 161 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::ScheduleTasks"); |
199 | 162 |
(...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
302 new RasterBufferImpl(this, resource_provider_, resource_pool_, | 265 new RasterBufferImpl(this, resource_provider_, resource_pool_, |
303 resource_provider_->best_texture_format(), resource, | 266 resource_provider_->best_texture_format(), resource, |
304 previous_content_id)); | 267 previous_content_id)); |
305 } | 268 } |
306 | 269 |
307 void OneCopyTileTaskWorkerPool::ReleaseBufferForRaster( | 270 void OneCopyTileTaskWorkerPool::ReleaseBufferForRaster( |
308 scoped_ptr<RasterBuffer> buffer) { | 271 scoped_ptr<RasterBuffer> buffer) { |
309 // Nothing to do here. RasterBufferImpl destructor cleans up after itself. | 272 // Nothing to do here. RasterBufferImpl destructor cleans up after itself. |
310 } | 273 } |
311 | 274 |
312 CopySequenceNumber | 275 void OneCopyTileTaskWorkerPool::PlaybackAndCopyOnWorkerThread( |
313 OneCopyTileTaskWorkerPool::PlaybackAndScheduleCopyOnWorkerThread( | |
314 bool reusing_raster_resource, | 276 bool reusing_raster_resource, |
315 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> | 277 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> |
316 raster_resource_write_lock, | 278 raster_resource_write_lock, |
317 const Resource* raster_resource, | 279 const Resource* raster_resource, |
318 const Resource* output_resource, | 280 const Resource* output_resource, |
319 const RasterSource* raster_source, | 281 const RasterSource* raster_source, |
320 const gfx::Rect& raster_full_rect, | 282 const gfx::Rect& raster_full_rect, |
321 const gfx::Rect& raster_dirty_rect, | 283 const gfx::Rect& raster_dirty_rect, |
322 float scale) { | 284 float scale) { |
323 gfx::GpuMemoryBuffer* gpu_memory_buffer = | 285 gfx::GpuMemoryBuffer* gpu_memory_buffer = |
(...skipping 13 matching lines...) Expand all Loading... | |
337 } | 299 } |
338 DCHECK(!playback_rect.IsEmpty()) | 300 DCHECK(!playback_rect.IsEmpty()) |
339 << "Why are we rastering a tile that's not dirty?"; | 301 << "Why are we rastering a tile that's not dirty?"; |
340 TileTaskWorkerPool::PlaybackToMemory( | 302 TileTaskWorkerPool::PlaybackToMemory( |
341 data, raster_resource->format(), raster_resource->size(), | 303 data, raster_resource->format(), raster_resource->size(), |
342 static_cast<size_t>(stride), raster_source, raster_full_rect, | 304 static_cast<size_t>(stride), raster_source, raster_full_rect, |
343 playback_rect, scale); | 305 playback_rect, scale); |
344 gpu_memory_buffer->Unmap(); | 306 gpu_memory_buffer->Unmap(); |
345 } | 307 } |
346 | 308 |
347 base::AutoLock lock(lock_); | |
348 | |
349 CopySequenceNumber sequence = 0; | |
350 int bytes_per_row = (BitsPerPixel(raster_resource->format()) * | 309 int bytes_per_row = (BitsPerPixel(raster_resource->format()) * |
351 raster_resource->size().width()) / | 310 raster_resource->size().width()) / |
352 8; | 311 8; |
353 int chunk_size_in_rows = | 312 int chunk_size_in_rows = |
354 std::max(1, max_bytes_per_copy_operation_ / bytes_per_row); | 313 std::max(1, max_bytes_per_copy_operation_ / bytes_per_row); |
355 // Align chunk size to 4. Required to support compressed texture formats. | 314 // Align chunk size to 4. Required to support compressed texture formats. |
356 chunk_size_in_rows = MathUtil::RoundUp(chunk_size_in_rows, 4); | 315 chunk_size_in_rows = MathUtil::RoundUp(chunk_size_in_rows, 4); |
357 int y = 0; | 316 int y = 0; |
358 int height = raster_resource->size().height(); | 317 int height = raster_resource->size().height(); |
318 ContextProvider* context_provider = | |
319 resource_provider_->output_surface()->worker_context_provider(); | |
359 while (y < height) { | 320 while (y < height) { |
360 int failed_attempts = 0; | 321 base::AutoLock context_lock(*context_provider->GetLock()); |
361 while ((pending_copy_operations_.size() + issued_copy_operation_count_) >= | 322 context_provider->DetachFromThread(); |
362 kMaxCopyOperations) { | |
363 // Ignore limit when shutdown is set. | |
364 if (shutdown_) | |
365 break; | |
366 | |
367 ++failed_attempts; | |
368 | |
369 // Schedule a check that will also wait for operations to complete | |
370 // after too many failed attempts. | |
371 bool wait_if_needed = failed_attempts > kFailedAttemptsBeforeWaitIfNeeded; | |
372 | |
373 // Schedule a check for completed copy operations if too many operations | |
374 // are currently in-flight. | |
375 ScheduleCheckForCompletedCopyOperationsWithLockAcquired(wait_if_needed); | |
376 | |
377 { | |
378 TRACE_EVENT0("cc", "WaitingForCopyOperationsToComplete"); | |
379 | |
380 // Wait for in-flight copy operations to drop below limit. | |
381 copy_operation_count_cv_.Wait(); | |
382 } | |
383 } | |
384 | |
385 // There may be more work available, so wake up another worker thread. | |
386 copy_operation_count_cv_.Signal(); | |
387 | |
388 // Copy at most |chunk_size_in_rows|. | 323 // Copy at most |chunk_size_in_rows|. |
389 int rows_to_copy = std::min(chunk_size_in_rows, height - y); | 324 int rows_to_copy = std::min(chunk_size_in_rows, height - y); |
390 DCHECK_GT(rows_to_copy, 0); | 325 DCHECK_GT(rows_to_copy, 0); |
391 | 326 |
392 // |raster_resource_write_lock| is passed to the first copy operation as it | 327 context_provider->ContextGL()->CopySubTextureCHROMIUM( |
393 // needs to be released before we can issue a copy. | 328 output_resource->format(), raster_resource->id(), output_resource->id(), |
394 pending_copy_operations_.push_back(make_scoped_ptr(new CopyOperation( | 329 0, y, 0, y, raster_resource->size().width(), rows_to_copy); |
395 raster_resource_write_lock.Pass(), raster_resource, output_resource, | 330 |
sohanjg
2015/06/09 15:59:24
do we need all the validation code, that we do in
reveman
2015/06/09 18:00:30
You'll need the commands completed queries as a st
sohanjg
2015/06/11 13:32:26
Done.
| |
396 gfx::Rect(0, y, raster_resource->size().width(), rows_to_copy)))); | |
397 y += rows_to_copy; | 331 y += rows_to_copy; |
398 | 332 |
399 // Acquire a sequence number for this copy operation. | 333 // Sync/Deferred flush worker context to cc context. |
400 sequence = next_copy_operation_sequence_++; | 334 context_provider->ContextGL()->OrderingBarrierCHROMIUM(); |
401 | 335 context_provider->DetachFromThread(); |
402 // Increment |bytes_scheduled_since_last_flush_| by the amount of memory | |
403 // used for this copy operation. | |
404 bytes_scheduled_since_last_flush_ += rows_to_copy * bytes_per_row; | |
405 | |
406 // Post task that will advance last flushed copy operation to |sequence| | |
407 // when |bytes_scheduled_since_last_flush_| has reached | |
408 // |max_bytes_per_copy_operation_|. | |
409 if (bytes_scheduled_since_last_flush_ >= max_bytes_per_copy_operation_) { | |
410 task_runner_->PostTask( | |
411 FROM_HERE, | |
412 base::Bind(&OneCopyTileTaskWorkerPool::AdvanceLastFlushedCopyTo, | |
413 weak_ptr_factory_.GetWeakPtr(), sequence)); | |
414 bytes_scheduled_since_last_flush_ = 0; | |
415 } | |
416 } | 336 } |
417 | |
418 return sequence; | |
419 } | |
420 | |
421 void OneCopyTileTaskWorkerPool::AdvanceLastIssuedCopyTo( | |
422 CopySequenceNumber sequence) { | |
423 if (last_issued_copy_operation_ >= sequence) | |
424 return; | |
425 | |
426 IssueCopyOperations(sequence - last_issued_copy_operation_); | |
427 last_issued_copy_operation_ = sequence; | |
428 } | |
429 | |
430 void OneCopyTileTaskWorkerPool::AdvanceLastFlushedCopyTo( | |
431 CopySequenceNumber sequence) { | |
432 if (last_flushed_copy_operation_ >= sequence) | |
433 return; | |
434 | |
435 AdvanceLastIssuedCopyTo(sequence); | |
436 | |
437 // Flush all issued copy operations. | |
438 context_provider_->ContextGL()->ShallowFlushCHROMIUM(); | |
439 last_flushed_copy_operation_ = last_issued_copy_operation_; | |
440 } | 337 } |
441 | 338 |
442 void OneCopyTileTaskWorkerPool::OnTaskSetFinished(TaskSet task_set) { | 339 void OneCopyTileTaskWorkerPool::OnTaskSetFinished(TaskSet task_set) { |
443 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::OnTaskSetFinished", "task_set", | 340 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::OnTaskSetFinished", "task_set", |
444 task_set); | 341 task_set); |
445 | 342 |
446 DCHECK(tasks_pending_[task_set]); | 343 DCHECK(tasks_pending_[task_set]); |
447 tasks_pending_[task_set] = false; | 344 tasks_pending_[task_set] = false; |
448 if (tasks_pending_.any()) { | 345 if (tasks_pending_.any()) { |
449 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", | 346 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", |
450 "state", StateAsValue()); | 347 "state", StateAsValue()); |
451 } else { | 348 } else { |
452 TRACE_EVENT_ASYNC_END0("cc", "ScheduledTasks", this); | 349 TRACE_EVENT_ASYNC_END0("cc", "ScheduledTasks", this); |
453 } | 350 } |
454 client_->DidFinishRunningTileTasks(task_set); | 351 client_->DidFinishRunningTileTasks(task_set); |
455 } | 352 } |
456 | 353 |
457 void OneCopyTileTaskWorkerPool::IssueCopyOperations(int64 count) { | |
458 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::IssueCopyOperations", "count", | |
459 count); | |
460 | |
461 CopyOperation::Deque copy_operations; | |
462 | |
463 { | |
464 base::AutoLock lock(lock_); | |
465 | |
466 for (int64 i = 0; i < count; ++i) { | |
467 DCHECK(!pending_copy_operations_.empty()); | |
468 copy_operations.push_back(pending_copy_operations_.take_front()); | |
469 } | |
470 | |
471 // Increment |issued_copy_operation_count_| to reflect the transition of | |
472 // copy operations from "pending" to "issued" state. | |
473 issued_copy_operation_count_ += copy_operations.size(); | |
474 } | |
475 | |
476 while (!copy_operations.empty()) { | |
477 scoped_ptr<CopyOperation> copy_operation = copy_operations.take_front(); | |
478 | |
479 // Remove the write lock. | |
480 copy_operation->src_write_lock.reset(); | |
481 | |
482 // Copy contents of source resource to destination resource. | |
483 resource_provider_->CopyResource(copy_operation->src->id(), | |
484 copy_operation->dst->id(), | |
485 copy_operation->rect); | |
486 } | |
487 } | |
488 | |
489 void OneCopyTileTaskWorkerPool:: | |
490 ScheduleCheckForCompletedCopyOperationsWithLockAcquired( | |
491 bool wait_if_needed) { | |
492 lock_.AssertAcquired(); | |
493 | |
494 if (check_for_completed_copy_operations_pending_) | |
495 return; | |
496 | |
497 base::TimeTicks now = base::TimeTicks::Now(); | |
498 | |
499 // Schedule a check for completed copy operations as soon as possible but | |
500 // don't allow two consecutive checks to be scheduled to run less than the | |
501 // tick rate apart. | |
502 base::TimeTicks next_check_for_completed_copy_operations_time = | |
503 std::max(last_check_for_completed_copy_operations_time_ + | |
504 base::TimeDelta::FromMilliseconds( | |
505 kCheckForCompletedCopyOperationsTickRateMs), | |
506 now); | |
507 | |
508 task_runner_->PostDelayedTask( | |
509 FROM_HERE, | |
510 base::Bind(&OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations, | |
511 weak_ptr_factory_.GetWeakPtr(), wait_if_needed), | |
512 next_check_for_completed_copy_operations_time - now); | |
513 | |
514 last_check_for_completed_copy_operations_time_ = | |
515 next_check_for_completed_copy_operations_time; | |
516 check_for_completed_copy_operations_pending_ = true; | |
517 } | |
518 | |
519 void OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations( | |
520 bool wait_if_needed) { | |
521 TRACE_EVENT1("cc", | |
522 "OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations", | |
523 "wait_if_needed", wait_if_needed); | |
524 | |
525 resource_pool_->CheckBusyResources(wait_if_needed); | |
526 | |
527 { | |
528 base::AutoLock lock(lock_); | |
529 | |
530 DCHECK(check_for_completed_copy_operations_pending_); | |
531 check_for_completed_copy_operations_pending_ = false; | |
532 | |
533 // The number of busy resources in the pool reflects the number of issued | |
534 // copy operations that have not yet completed. | |
535 issued_copy_operation_count_ = resource_pool_->busy_resource_count(); | |
536 | |
537 // There may be work blocked on too many in-flight copy operations, so wake | |
538 // up a worker thread. | |
539 copy_operation_count_cv_.Signal(); | |
540 } | |
541 } | |
542 | |
543 scoped_refptr<base::trace_event::ConvertableToTraceFormat> | 354 scoped_refptr<base::trace_event::ConvertableToTraceFormat> |
544 OneCopyTileTaskWorkerPool::StateAsValue() const { | 355 OneCopyTileTaskWorkerPool::StateAsValue() const { |
545 scoped_refptr<base::trace_event::TracedValue> state = | 356 scoped_refptr<base::trace_event::TracedValue> state = |
546 new base::trace_event::TracedValue(); | 357 new base::trace_event::TracedValue(); |
547 | 358 |
548 state->BeginArray("tasks_pending"); | 359 state->BeginArray("tasks_pending"); |
549 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) | 360 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) |
550 state->AppendBoolean(tasks_pending_[task_set]); | 361 state->AppendBoolean(tasks_pending_[task_set]); |
551 state->EndArray(); | 362 state->EndArray(); |
552 state->BeginDictionary("staging_state"); | 363 state->BeginDictionary("staging_state"); |
(...skipping 15 matching lines...) Expand all Loading... | |
568 "pending_copy_count", | 379 "pending_copy_count", |
569 static_cast<int>(resource_pool_->total_resource_count() - | 380 static_cast<int>(resource_pool_->total_resource_count() - |
570 resource_pool_->acquired_resource_count())); | 381 resource_pool_->acquired_resource_count())); |
571 staging_state->SetInteger( | 382 staging_state->SetInteger( |
572 "bytes_pending_copy", | 383 "bytes_pending_copy", |
573 static_cast<int>(resource_pool_->total_memory_usage_bytes() - | 384 static_cast<int>(resource_pool_->total_memory_usage_bytes() - |
574 resource_pool_->acquired_memory_usage_bytes())); | 385 resource_pool_->acquired_memory_usage_bytes())); |
575 } | 386 } |
576 | 387 |
577 } // namespace cc | 388 } // namespace cc |
OLD | NEW |