Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(51)

Side by Side Diff: cc/raster/one_copy_tile_task_worker_pool.cc

Issue 1157943004: cc: [WIP] Use worker context and OrderingBarrierCHROMIUM for one-copy. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: handle sync while copying texture. Created 5 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2014 The Chromium Authors. All rights reserved. 1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "cc/raster/one_copy_tile_task_worker_pool.h" 5 #include "cc/raster/one_copy_tile_task_worker_pool.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <limits> 8 #include <limits>
9 9
10 #include "base/strings/stringprintf.h" 10 #include "base/strings/stringprintf.h"
11 #include "base/trace_event/trace_event.h" 11 #include "base/trace_event/trace_event.h"
12 #include "base/trace_event/trace_event_argument.h" 12 #include "base/trace_event/trace_event_argument.h"
13 #include "cc/base/math_util.h" 13 #include "cc/base/math_util.h"
14 #include "cc/debug/traced_value.h" 14 #include "cc/debug/traced_value.h"
15 #include "cc/raster/raster_buffer.h" 15 #include "cc/raster/raster_buffer.h"
16 #include "cc/resources/resource_pool.h" 16 #include "cc/resources/resource_pool.h"
17 #include "cc/resources/scoped_resource.h" 17 #include "cc/resources/scoped_resource.h"
18 #include "gpu/GLES2/gl2extchromium.h"
18 #include "gpu/command_buffer/client/gles2_interface.h" 19 #include "gpu/command_buffer/client/gles2_interface.h"
19 #include "ui/gfx/gpu_memory_buffer.h" 20 #include "ui/gfx/gpu_memory_buffer.h"
20 21
21 namespace cc { 22 namespace cc {
22 namespace { 23 namespace {
23 24
24 class RasterBufferImpl : public RasterBuffer { 25 class RasterBufferImpl : public RasterBuffer {
25 public: 26 public:
26 RasterBufferImpl(OneCopyTileTaskWorkerPool* worker_pool, 27 RasterBufferImpl(OneCopyTileTaskWorkerPool* worker_pool,
27 ResourceProvider* resource_provider, 28 ResourceProvider* resource_provider,
28 ResourcePool* resource_pool, 29 ResourcePool* resource_pool,
29 ResourceFormat resource_format, 30 ResourceFormat resource_format,
30 const Resource* output_resource, 31 const Resource* output_resource,
31 uint64_t previous_content_id) 32 uint64_t previous_content_id)
32 : worker_pool_(worker_pool), 33 : worker_pool_(worker_pool),
33 resource_provider_(resource_provider), 34 resource_provider_(resource_provider),
34 resource_pool_(resource_pool), 35 resource_pool_(resource_pool),
35 output_resource_(output_resource), 36 output_resource_(output_resource),
36 raster_content_id_(0), 37 raster_content_id_(0) {
37 sequence_(0) {
38 if (worker_pool->have_persistent_gpu_memory_buffers() && 38 if (worker_pool->have_persistent_gpu_memory_buffers() &&
39 previous_content_id) { 39 previous_content_id) {
40 raster_resource_ = 40 raster_resource_ =
41 resource_pool->TryAcquireResourceWithContentId(previous_content_id); 41 resource_pool->TryAcquireResourceWithContentId(previous_content_id);
42 } 42 }
43 if (raster_resource_) { 43 if (raster_resource_) {
44 raster_content_id_ = previous_content_id; 44 raster_content_id_ = previous_content_id;
45 DCHECK_EQ(resource_format, raster_resource_->format()); 45 DCHECK_EQ(resource_format, raster_resource_->format());
46 DCHECK_EQ(output_resource->size().ToString(), 46 DCHECK_EQ(output_resource->size().ToString(),
47 raster_resource_->size().ToString()); 47 raster_resource_->size().ToString());
48 } else { 48 } else {
49 raster_resource_ = resource_pool->AcquireResource(output_resource->size(), 49 raster_resource_ = resource_pool->AcquireResource(output_resource->size(),
50 resource_format); 50 resource_format);
51 } 51 }
52 52
53 lock_.reset(new ResourceProvider::ScopedWriteLockGpuMemoryBuffer( 53 lock_.reset(new ResourceProvider::ScopedWriteLockGpuMemoryBuffer(
54 resource_provider_, raster_resource_->id())); 54 resource_provider_, raster_resource_->id()));
55 } 55 }
56 56
57 ~RasterBufferImpl() override { 57 ~RasterBufferImpl() override {
58 // Release write lock in case a copy was never scheduled. 58 // Release write lock in case a copy was never scheduled.
59 lock_.reset(); 59 lock_.reset();
60 60
61 // Make sure any scheduled copy operations are issued before we release the
62 // raster resource.
63 if (sequence_)
64 worker_pool_->AdvanceLastIssuedCopyTo(sequence_);
65
66 // Return resources to pool so they can be used by another RasterBuffer 61 // Return resources to pool so they can be used by another RasterBuffer
67 // instance. 62 // instance.
68 resource_pool_->ReleaseResource(raster_resource_.Pass(), 63 resource_pool_->ReleaseResource(raster_resource_.Pass(),
69 raster_content_id_); 64 raster_content_id_);
70 } 65 }
71 66
72 // Overridden from RasterBuffer: 67 // Overridden from RasterBuffer:
73 void Playback(const RasterSource* raster_source, 68 void Playback(const RasterSource* raster_source,
74 const gfx::Rect& raster_full_rect, 69 const gfx::Rect& raster_full_rect,
75 const gfx::Rect& raster_dirty_rect, 70 const gfx::Rect& raster_dirty_rect,
76 uint64_t new_content_id, 71 uint64_t new_content_id,
77 float scale) override { 72 float scale) override {
78 // If there's a raster_content_id_, we are reusing a resource with that 73 // If there's a raster_content_id_, we are reusing a resource with that
79 // content id. 74 // content id.
80 bool reusing_raster_resource = raster_content_id_ != 0; 75 bool reusing_raster_resource = raster_content_id_ != 0;
81 sequence_ = worker_pool_->PlaybackAndScheduleCopyOnWorkerThread( 76 worker_pool_->PlaybackAndCopyOnWorkerThread(
82 reusing_raster_resource, lock_.Pass(), raster_resource_.get(), 77 reusing_raster_resource, lock_.Pass(), raster_resource_.get(),
83 output_resource_, raster_source, raster_full_rect, raster_dirty_rect, 78 output_resource_, raster_source, raster_full_rect, raster_dirty_rect,
84 scale); 79 scale);
85 // Store the content id of the resource to return to the pool. 80 // Store the content id of the resource to return to the pool.
86 raster_content_id_ = new_content_id; 81 raster_content_id_ = new_content_id;
87 } 82 }
88 83
89 private: 84 private:
90 OneCopyTileTaskWorkerPool* worker_pool_; 85 OneCopyTileTaskWorkerPool* worker_pool_;
91 ResourceProvider* resource_provider_; 86 ResourceProvider* resource_provider_;
92 ResourcePool* resource_pool_; 87 ResourcePool* resource_pool_;
93 const Resource* output_resource_; 88 const Resource* output_resource_;
94 uint64_t raster_content_id_; 89 uint64_t raster_content_id_;
95 scoped_ptr<ScopedResource> raster_resource_; 90 scoped_ptr<ScopedResource> raster_resource_;
96 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> lock_; 91 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> lock_;
97 CopySequenceNumber sequence_;
98 92
99 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl); 93 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl);
100 }; 94 };
101 95
102 // Number of in-flight copy operations to allow.
103 const int kMaxCopyOperations = 32;
104
105 // Delay been checking for copy operations to complete.
106 const int kCheckForCompletedCopyOperationsTickRateMs = 1;
107
108 // Number of failed attempts to allow before we perform a check that will
109 // wait for copy operations to complete if needed.
110 const int kFailedAttemptsBeforeWaitIfNeeded = 256;
111
112 } // namespace 96 } // namespace
113 97
114 OneCopyTileTaskWorkerPool::CopyOperation::CopyOperation(
115 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> src_write_lock,
116 const Resource* src,
117 const Resource* dst,
118 const gfx::Rect& rect)
119 : src_write_lock(src_write_lock.Pass()), src(src), dst(dst), rect(rect) {
120 }
121
122 OneCopyTileTaskWorkerPool::CopyOperation::~CopyOperation() {
123 }
124
125 // static 98 // static
126 scoped_ptr<TileTaskWorkerPool> OneCopyTileTaskWorkerPool::Create( 99 scoped_ptr<TileTaskWorkerPool> OneCopyTileTaskWorkerPool::Create(
127 base::SequencedTaskRunner* task_runner, 100 base::SequencedTaskRunner* task_runner,
128 TaskGraphRunner* task_graph_runner, 101 TaskGraphRunner* task_graph_runner,
129 ContextProvider* context_provider, 102 ContextProvider* context_provider,
130 ResourceProvider* resource_provider, 103 ResourceProvider* resource_provider,
131 ResourcePool* resource_pool, 104 ResourcePool* resource_pool,
132 int max_bytes_per_copy_operation, 105 int max_bytes_per_copy_operation,
133 bool have_persistent_gpu_memory_buffers) { 106 bool have_persistent_gpu_memory_buffers) {
134 return make_scoped_ptr<TileTaskWorkerPool>(new OneCopyTileTaskWorkerPool( 107 return make_scoped_ptr<TileTaskWorkerPool>(new OneCopyTileTaskWorkerPool(
(...skipping 11 matching lines...) Expand all
146 int max_bytes_per_copy_operation, 119 int max_bytes_per_copy_operation,
147 bool have_persistent_gpu_memory_buffers) 120 bool have_persistent_gpu_memory_buffers)
148 : task_runner_(task_runner), 121 : task_runner_(task_runner),
149 task_graph_runner_(task_graph_runner), 122 task_graph_runner_(task_graph_runner),
150 namespace_token_(task_graph_runner->GetNamespaceToken()), 123 namespace_token_(task_graph_runner->GetNamespaceToken()),
151 context_provider_(context_provider), 124 context_provider_(context_provider),
152 resource_provider_(resource_provider), 125 resource_provider_(resource_provider),
153 resource_pool_(resource_pool), 126 resource_pool_(resource_pool),
154 max_bytes_per_copy_operation_(max_bytes_per_copy_operation), 127 max_bytes_per_copy_operation_(max_bytes_per_copy_operation),
155 have_persistent_gpu_memory_buffers_(have_persistent_gpu_memory_buffers), 128 have_persistent_gpu_memory_buffers_(have_persistent_gpu_memory_buffers),
156 last_issued_copy_operation_(0),
157 last_flushed_copy_operation_(0),
158 lock_(), 129 lock_(),
159 copy_operation_count_cv_(&lock_),
160 bytes_scheduled_since_last_flush_(0),
161 issued_copy_operation_count_(0),
162 next_copy_operation_sequence_(1),
163 check_for_completed_copy_operations_pending_(false),
164 shutdown_(false), 130 shutdown_(false),
165 weak_ptr_factory_(this), 131 weak_ptr_factory_(this),
166 task_set_finished_weak_ptr_factory_(this) { 132 task_set_finished_weak_ptr_factory_(this) {
167 DCHECK(context_provider_); 133 DCHECK(context_provider_);
168 } 134 }
169 135
170 OneCopyTileTaskWorkerPool::~OneCopyTileTaskWorkerPool() { 136 OneCopyTileTaskWorkerPool::~OneCopyTileTaskWorkerPool() {
171 DCHECK_EQ(pending_copy_operations_.size(), 0u);
172 } 137 }
173 138
174 TileTaskRunner* OneCopyTileTaskWorkerPool::AsTileTaskRunner() { 139 TileTaskRunner* OneCopyTileTaskWorkerPool::AsTileTaskRunner() {
175 return this; 140 return this;
176 } 141 }
177 142
178 void OneCopyTileTaskWorkerPool::SetClient(TileTaskRunnerClient* client) { 143 void OneCopyTileTaskWorkerPool::SetClient(TileTaskRunnerClient* client) {
179 client_ = client; 144 client_ = client;
180 } 145 }
181 146
182 void OneCopyTileTaskWorkerPool::Shutdown() { 147 void OneCopyTileTaskWorkerPool::Shutdown() {
183 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::Shutdown"); 148 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::Shutdown");
184 149
185 { 150 {
186 base::AutoLock lock(lock_); 151 base::AutoLock lock(lock_);
187 152
188 shutdown_ = true; 153 shutdown_ = true;
189 copy_operation_count_cv_.Signal();
190 } 154 }
191 155
192 TaskGraph empty; 156 TaskGraph empty;
193 task_graph_runner_->ScheduleTasks(namespace_token_, &empty); 157 task_graph_runner_->ScheduleTasks(namespace_token_, &empty);
194 task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_); 158 task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_);
195 } 159 }
196 160
197 void OneCopyTileTaskWorkerPool::ScheduleTasks(TileTaskQueue* queue) { 161 void OneCopyTileTaskWorkerPool::ScheduleTasks(TileTaskQueue* queue) {
198 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::ScheduleTasks"); 162 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::ScheduleTasks");
199 163
(...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after
302 new RasterBufferImpl(this, resource_provider_, resource_pool_, 266 new RasterBufferImpl(this, resource_provider_, resource_pool_,
303 resource_provider_->best_texture_format(), resource, 267 resource_provider_->best_texture_format(), resource,
304 previous_content_id)); 268 previous_content_id));
305 } 269 }
306 270
307 void OneCopyTileTaskWorkerPool::ReleaseBufferForRaster( 271 void OneCopyTileTaskWorkerPool::ReleaseBufferForRaster(
308 scoped_ptr<RasterBuffer> buffer) { 272 scoped_ptr<RasterBuffer> buffer) {
309 // Nothing to do here. RasterBufferImpl destructor cleans up after itself. 273 // Nothing to do here. RasterBufferImpl destructor cleans up after itself.
310 } 274 }
311 275
312 CopySequenceNumber 276 void OneCopyTileTaskWorkerPool::PlaybackAndCopyOnWorkerThread(
313 OneCopyTileTaskWorkerPool::PlaybackAndScheduleCopyOnWorkerThread(
314 bool reusing_raster_resource, 277 bool reusing_raster_resource,
315 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> 278 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer>
316 raster_resource_write_lock, 279 raster_resource_write_lock,
317 const Resource* raster_resource, 280 const Resource* raster_resource,
318 const Resource* output_resource, 281 const Resource* output_resource,
319 const RasterSource* raster_source, 282 const RasterSource* raster_source,
320 const gfx::Rect& raster_full_rect, 283 const gfx::Rect& raster_full_rect,
321 const gfx::Rect& raster_dirty_rect, 284 const gfx::Rect& raster_dirty_rect,
322 float scale) { 285 float scale) {
286 TRACE_EVENT0("cc",
287 "OneCopyTileTaskWorkerPool::PlaybackAndCopyOnWorkerThread");
323 gfx::GpuMemoryBuffer* gpu_memory_buffer = 288 gfx::GpuMemoryBuffer* gpu_memory_buffer =
324 raster_resource_write_lock->GetGpuMemoryBuffer(); 289 raster_resource_write_lock->GetGpuMemoryBuffer();
325 if (gpu_memory_buffer) { 290 if (gpu_memory_buffer) {
326 void* data = NULL; 291 void* data = NULL;
327 bool rv = gpu_memory_buffer->Map(&data); 292 bool rv = gpu_memory_buffer->Map(&data);
328 DCHECK(rv); 293 DCHECK(rv);
329 int stride; 294 int stride;
330 gpu_memory_buffer->GetStride(&stride); 295 gpu_memory_buffer->GetStride(&stride);
331 // TileTaskWorkerPool::PlaybackToMemory only supports unsigned strides. 296 // TileTaskWorkerPool::PlaybackToMemory only supports unsigned strides.
332 DCHECK_GE(stride, 0); 297 DCHECK_GE(stride, 0);
333 298
334 gfx::Rect playback_rect = raster_full_rect; 299 gfx::Rect playback_rect = raster_full_rect;
335 if (reusing_raster_resource) { 300 if (reusing_raster_resource) {
336 playback_rect.Intersect(raster_dirty_rect); 301 playback_rect.Intersect(raster_dirty_rect);
337 } 302 }
338 DCHECK(!playback_rect.IsEmpty()) 303 DCHECK(!playback_rect.IsEmpty())
339 << "Why are we rastering a tile that's not dirty?"; 304 << "Why are we rastering a tile that's not dirty?";
340 TileTaskWorkerPool::PlaybackToMemory( 305 TileTaskWorkerPool::PlaybackToMemory(
341 data, raster_resource->format(), raster_resource->size(), 306 data, raster_resource->format(), raster_resource->size(),
342 static_cast<size_t>(stride), raster_source, raster_full_rect, 307 static_cast<size_t>(stride), raster_source, raster_full_rect,
343 playback_rect, scale); 308 playback_rect, scale);
344 gpu_memory_buffer->Unmap(); 309 gpu_memory_buffer->Unmap();
345 } 310 }
346 311
347 base::AutoLock lock(lock_);
348
349 CopySequenceNumber sequence = 0;
350 int bytes_per_row = (BitsPerPixel(raster_resource->format()) * 312 int bytes_per_row = (BitsPerPixel(raster_resource->format()) *
351 raster_resource->size().width()) / 313 raster_resource->size().width()) /
352 8; 314 8;
353 int chunk_size_in_rows = 315 int chunk_size_in_rows =
354 std::max(1, max_bytes_per_copy_operation_ / bytes_per_row); 316 std::max(1, max_bytes_per_copy_operation_ / bytes_per_row);
355 // Align chunk size to 4. Required to support compressed texture formats. 317 // Align chunk size to 4. Required to support compressed texture formats.
356 chunk_size_in_rows = MathUtil::RoundUp(chunk_size_in_rows, 4); 318 chunk_size_in_rows = MathUtil::RoundUp(chunk_size_in_rows, 4);
357 int y = 0; 319 int y = 0;
358 int height = raster_resource->size().height(); 320 int height = raster_resource->size().height();
321 ContextProvider* context_provider =
322 resource_provider_->output_surface()->worker_context_provider();
359 while (y < height) { 323 while (y < height) {
360 int failed_attempts = 0; 324 base::AutoLock context_lock(*context_provider->GetLock());
361 while ((pending_copy_operations_.size() + issued_copy_operation_count_) >= 325 context_provider->DetachFromThread();
362 kMaxCopyOperations) {
363 // Ignore limit when shutdown is set.
364 if (shutdown_)
365 break;
366
367 ++failed_attempts;
368
369 // Schedule a check that will also wait for operations to complete
370 // after too many failed attempts.
371 bool wait_if_needed = failed_attempts > kFailedAttemptsBeforeWaitIfNeeded;
372
373 // Schedule a check for completed copy operations if too many operations
374 // are currently in-flight.
375 ScheduleCheckForCompletedCopyOperationsWithLockAcquired(wait_if_needed);
376
377 {
378 TRACE_EVENT0("cc", "WaitingForCopyOperationsToComplete");
379
380 // Wait for in-flight copy operations to drop below limit.
381 copy_operation_count_cv_.Wait();
382 }
383 }
384
385 // There may be more work available, so wake up another worker thread.
386 copy_operation_count_cv_.Signal();
387
388 // Copy at most |chunk_size_in_rows|. 326 // Copy at most |chunk_size_in_rows|.
389 int rows_to_copy = std::min(chunk_size_in_rows, height - y); 327 int rows_to_copy = std::min(chunk_size_in_rows, height - y);
390 DCHECK_GT(rows_to_copy, 0); 328 DCHECK_GT(rows_to_copy, 0);
391 329
392 // |raster_resource_write_lock| is passed to the first copy operation as it 330 unsigned id = 0;
393 // needs to be released before we can issue a copy. 331 if (context_provider->ContextCapabilities().gpu.sync_query) {
394 pending_copy_operations_.push_back(make_scoped_ptr(new CopyOperation( 332 if (!id)
395 raster_resource_write_lock.Pass(), raster_resource, output_resource, 333 context_provider->ContextGL()->GenQueriesEXT(1, &id);
396 gfx::Rect(0, y, raster_resource->size().width(), rows_to_copy)))); 334 #if defined(OS_CHROMEOS)
335 // TODO(reveman): This avoids a performance problem on some ChromeOS
336 // devices. This needs to be removed to support native GpuMemoryBuffer
337 // implementations. crbug.com/436314
338 context_provider->ContextGL()->BeginQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM,
339 id);
340 #else
341 context_provider->ContextGL()->BeginQueryEXT(
342 GL_COMMANDS_COMPLETED_CHROMIUM, id);
343 #endif
344 }
345 context_provider->ContextGL()->CopySubTextureCHROMIUM(
346 output_resource->format(), raster_resource->id(), output_resource->id(),
347 0, y, 0, y, raster_resource->size().width(), rows_to_copy);
348
397 y += rows_to_copy; 349 y += rows_to_copy;
350 if (id) {
351 // End query and create a read lock fence that will prevent access to
352 // source resource until CopySubTextureCHROMIUM command has completed.
353 #if defined(OS_CHROMEOS)
354 context_provider->ContextGL()->EndQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM);
355 #else
356 context_provider->ContextGL()->EndQueryEXT(
357 GL_COMMANDS_COMPLETED_CHROMIUM);
358 #endif
359 read_lock_fence_ =
360 make_scoped_refptr(new ResourceProvider::CopyTextureFence(
361 context_provider->ContextGL(), id));
362 } else {
363 // Create a SynchronousFence when CHROMIUM_sync_query extension is
364 // missing.
365 // Try to use one synchronous fence for as many CopyResource operations as
366 // possible as that reduce the number of times we have to synchronize with
367 // the GL.
368 if (!synchronous_fence_.get() || synchronous_fence_->has_synchronized())
369 synchronous_fence_ =
370 make_scoped_refptr(new ResourceProvider::SynchronousFence(
371 context_provider->ContextGL()));
372 read_lock_fence_ = synchronous_fence_;
373 read_lock_fence_->Set();
374 }
398 375
399 // Acquire a sequence number for this copy operation. 376 // Sync/Deferred flush worker context to cc context.
400 sequence = next_copy_operation_sequence_++; 377 context_provider->ContextGL()->OrderingBarrierCHROMIUM();
401 378 context_provider->DetachFromThread();
402 // Increment |bytes_scheduled_since_last_flush_| by the amount of memory
403 // used for this copy operation.
404 bytes_scheduled_since_last_flush_ += rows_to_copy * bytes_per_row;
405
406 // Post task that will advance last flushed copy operation to |sequence|
407 // when |bytes_scheduled_since_last_flush_| has reached
408 // |max_bytes_per_copy_operation_|.
409 if (bytes_scheduled_since_last_flush_ >= max_bytes_per_copy_operation_) {
410 task_runner_->PostTask(
411 FROM_HERE,
412 base::Bind(&OneCopyTileTaskWorkerPool::AdvanceLastFlushedCopyTo,
413 weak_ptr_factory_.GetWeakPtr(), sequence));
414 bytes_scheduled_since_last_flush_ = 0;
415 }
416 } 379 }
417
418 return sequence;
419 }
420
421 void OneCopyTileTaskWorkerPool::AdvanceLastIssuedCopyTo(
422 CopySequenceNumber sequence) {
423 if (last_issued_copy_operation_ >= sequence)
424 return;
425
426 IssueCopyOperations(sequence - last_issued_copy_operation_);
427 last_issued_copy_operation_ = sequence;
428 }
429
430 void OneCopyTileTaskWorkerPool::AdvanceLastFlushedCopyTo(
431 CopySequenceNumber sequence) {
432 if (last_flushed_copy_operation_ >= sequence)
433 return;
434
435 AdvanceLastIssuedCopyTo(sequence);
436
437 // Flush all issued copy operations.
438 context_provider_->ContextGL()->ShallowFlushCHROMIUM();
439 last_flushed_copy_operation_ = last_issued_copy_operation_;
440 } 380 }
441 381
442 void OneCopyTileTaskWorkerPool::OnTaskSetFinished(TaskSet task_set) { 382 void OneCopyTileTaskWorkerPool::OnTaskSetFinished(TaskSet task_set) {
443 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::OnTaskSetFinished", "task_set", 383 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::OnTaskSetFinished", "task_set",
444 task_set); 384 task_set);
445 385
446 DCHECK(tasks_pending_[task_set]); 386 DCHECK(tasks_pending_[task_set]);
447 tasks_pending_[task_set] = false; 387 tasks_pending_[task_set] = false;
448 if (tasks_pending_.any()) { 388 if (tasks_pending_.any()) {
449 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", 389 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running",
450 "state", StateAsValue()); 390 "state", StateAsValue());
451 } else { 391 } else {
452 TRACE_EVENT_ASYNC_END0("cc", "ScheduledTasks", this); 392 TRACE_EVENT_ASYNC_END0("cc", "ScheduledTasks", this);
453 } 393 }
454 client_->DidFinishRunningTileTasks(task_set); 394 client_->DidFinishRunningTileTasks(task_set);
455 } 395 }
456 396
457 void OneCopyTileTaskWorkerPool::IssueCopyOperations(int64 count) {
458 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::IssueCopyOperations", "count",
459 count);
460
461 CopyOperation::Deque copy_operations;
462
463 {
464 base::AutoLock lock(lock_);
465
466 for (int64 i = 0; i < count; ++i) {
467 DCHECK(!pending_copy_operations_.empty());
468 copy_operations.push_back(pending_copy_operations_.take_front());
469 }
470
471 // Increment |issued_copy_operation_count_| to reflect the transition of
472 // copy operations from "pending" to "issued" state.
473 issued_copy_operation_count_ += copy_operations.size();
474 }
475
476 while (!copy_operations.empty()) {
477 scoped_ptr<CopyOperation> copy_operation = copy_operations.take_front();
478
479 // Remove the write lock.
480 copy_operation->src_write_lock.reset();
481
482 // Copy contents of source resource to destination resource.
483 resource_provider_->CopyResource(copy_operation->src->id(),
484 copy_operation->dst->id(),
485 copy_operation->rect);
486 }
487 }
488
489 void OneCopyTileTaskWorkerPool::
490 ScheduleCheckForCompletedCopyOperationsWithLockAcquired(
491 bool wait_if_needed) {
492 lock_.AssertAcquired();
493
494 if (check_for_completed_copy_operations_pending_)
495 return;
496
497 base::TimeTicks now = base::TimeTicks::Now();
498
499 // Schedule a check for completed copy operations as soon as possible but
500 // don't allow two consecutive checks to be scheduled to run less than the
501 // tick rate apart.
502 base::TimeTicks next_check_for_completed_copy_operations_time =
503 std::max(last_check_for_completed_copy_operations_time_ +
504 base::TimeDelta::FromMilliseconds(
505 kCheckForCompletedCopyOperationsTickRateMs),
506 now);
507
508 task_runner_->PostDelayedTask(
509 FROM_HERE,
510 base::Bind(&OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations,
511 weak_ptr_factory_.GetWeakPtr(), wait_if_needed),
512 next_check_for_completed_copy_operations_time - now);
513
514 last_check_for_completed_copy_operations_time_ =
515 next_check_for_completed_copy_operations_time;
516 check_for_completed_copy_operations_pending_ = true;
517 }
518
519 void OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations(
520 bool wait_if_needed) {
521 TRACE_EVENT1("cc",
522 "OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations",
523 "wait_if_needed", wait_if_needed);
524
525 resource_pool_->CheckBusyResources(wait_if_needed);
526
527 {
528 base::AutoLock lock(lock_);
529
530 DCHECK(check_for_completed_copy_operations_pending_);
531 check_for_completed_copy_operations_pending_ = false;
532
533 // The number of busy resources in the pool reflects the number of issued
534 // copy operations that have not yet completed.
535 issued_copy_operation_count_ = resource_pool_->busy_resource_count();
536
537 // There may be work blocked on too many in-flight copy operations, so wake
538 // up a worker thread.
539 copy_operation_count_cv_.Signal();
540 }
541 }
542
543 scoped_refptr<base::trace_event::ConvertableToTraceFormat> 397 scoped_refptr<base::trace_event::ConvertableToTraceFormat>
544 OneCopyTileTaskWorkerPool::StateAsValue() const { 398 OneCopyTileTaskWorkerPool::StateAsValue() const {
545 scoped_refptr<base::trace_event::TracedValue> state = 399 scoped_refptr<base::trace_event::TracedValue> state =
546 new base::trace_event::TracedValue(); 400 new base::trace_event::TracedValue();
547 401
548 state->BeginArray("tasks_pending"); 402 state->BeginArray("tasks_pending");
549 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) 403 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set)
550 state->AppendBoolean(tasks_pending_[task_set]); 404 state->AppendBoolean(tasks_pending_[task_set]);
551 state->EndArray(); 405 state->EndArray();
552 state->BeginDictionary("staging_state"); 406 state->BeginDictionary("staging_state");
(...skipping 15 matching lines...) Expand all
568 "pending_copy_count", 422 "pending_copy_count",
569 static_cast<int>(resource_pool_->total_resource_count() - 423 static_cast<int>(resource_pool_->total_resource_count() -
570 resource_pool_->acquired_resource_count())); 424 resource_pool_->acquired_resource_count()));
571 staging_state->SetInteger( 425 staging_state->SetInteger(
572 "bytes_pending_copy", 426 "bytes_pending_copy",
573 static_cast<int>(resource_pool_->total_memory_usage_bytes() - 427 static_cast<int>(resource_pool_->total_memory_usage_bytes() -
574 resource_pool_->acquired_memory_usage_bytes())); 428 resource_pool_->acquired_memory_usage_bytes()));
575 } 429 }
576 430
577 } // namespace cc 431 } // namespace cc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698