Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(11)

Side by Side Diff: cc/raster/one_copy_tile_task_worker_pool.cc

Issue 1157943004: cc: [WIP] Use worker context and OrderingBarrierCHROMIUM for one-copy. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: piman's comments. Created 5 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2014 The Chromium Authors. All rights reserved. 1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "cc/raster/one_copy_tile_task_worker_pool.h" 5 #include "cc/raster/one_copy_tile_task_worker_pool.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <limits> 8 #include <limits>
9 9
10 #include "base/strings/stringprintf.h" 10 #include "base/strings/stringprintf.h"
11 #include "base/trace_event/trace_event.h" 11 #include "base/trace_event/trace_event.h"
12 #include "base/trace_event/trace_event_argument.h" 12 #include "base/trace_event/trace_event_argument.h"
13 #include "cc/base/math_util.h" 13 #include "cc/base/math_util.h"
14 #include "cc/debug/traced_value.h" 14 #include "cc/debug/traced_value.h"
15 #include "cc/raster/raster_buffer.h" 15 #include "cc/raster/raster_buffer.h"
16 #include "cc/resources/platform_color.h" 16 #include "cc/resources/platform_color.h"
17 #include "cc/resources/resource_pool.h" 17 #include "cc/resources/resource_pool.h"
18 #include "cc/resources/scoped_resource.h" 18 #include "cc/resources/scoped_resource.h"
19 #include "gpu/command_buffer/client/gles2_interface.h" 19 #include "gpu/command_buffer/client/gles2_interface.h"
20 #include "ui/gfx/gpu_memory_buffer.h" 20 #include "ui/gfx/gpu_memory_buffer.h"
21 21
22 using gpu::gles2::GLES2Interface;
23
22 namespace cc { 24 namespace cc {
23 namespace { 25 namespace {
24 26
25 class RasterBufferImpl : public RasterBuffer { 27 class RasterBufferImpl : public RasterBuffer {
26 public: 28 public:
27 RasterBufferImpl(OneCopyTileTaskWorkerPool* worker_pool, 29 RasterBufferImpl(OneCopyTileTaskWorkerPool* worker_pool,
28 ResourceProvider* resource_provider, 30 ResourceProvider* resource_provider,
29 ResourcePool* resource_pool, 31 ResourcePool* resource_pool,
30 ResourceFormat resource_format, 32 ResourceFormat resource_format,
31 const Resource* output_resource, 33 const Resource* output_resource,
32 uint64_t previous_content_id) 34 uint64_t previous_content_id)
33 : worker_pool_(worker_pool), 35 : worker_pool_(worker_pool),
34 resource_provider_(resource_provider), 36 resource_provider_(resource_provider),
35 resource_pool_(resource_pool), 37 resource_pool_(resource_pool),
36 output_resource_(output_resource), 38 output_resource_(output_resource),
37 raster_content_id_(0), 39 raster_content_id_(0) {
38 sequence_(0) {
39 if (worker_pool->have_persistent_gpu_memory_buffers() && 40 if (worker_pool->have_persistent_gpu_memory_buffers() &&
40 previous_content_id) { 41 previous_content_id) {
41 raster_resource_ = 42 raster_resource_ =
42 resource_pool->TryAcquireResourceWithContentId(previous_content_id); 43 resource_pool->TryAcquireResourceWithContentId(previous_content_id);
43 } 44 }
44 if (raster_resource_) { 45 if (raster_resource_) {
45 raster_content_id_ = previous_content_id; 46 raster_content_id_ = previous_content_id;
46 DCHECK_EQ(resource_format, raster_resource_->format()); 47 DCHECK_EQ(resource_format, raster_resource_->format());
47 DCHECK_EQ(output_resource->size().ToString(), 48 DCHECK_EQ(output_resource->size().ToString(),
48 raster_resource_->size().ToString()); 49 raster_resource_->size().ToString());
49 } else { 50 } else {
50 raster_resource_ = resource_pool->AcquireResource(output_resource->size(), 51 raster_resource_ = resource_pool->AcquireResource(output_resource->size(),
51 resource_format); 52 resource_format);
52 } 53 }
53 54
54 lock_.reset(new ResourceProvider::ScopedWriteLockGpuMemoryBuffer( 55 lock_.reset(new ResourceProvider::ScopedWriteLockGpuMemoryBufferForThread(
55 resource_provider_, raster_resource_->id())); 56 resource_provider_, raster_resource_->id()));
57
58 GLES2Interface* gl =
59 resource_provider_->output_surface()->context_provider()->ContextGL();
60
61 gl->BindTexture(GL_TEXTURE_2D, raster_resource_->id());
62 gl->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
63 gl->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
64 gl->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
65 gl->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
66 gl->TexStorage2DEXT(GL_TEXTURE_2D, 1, GL_RGBA8_OES,
67 raster_resource_->size().width(),
68 raster_resource_->size().height());
piman 2015/07/01 22:11:05 I don't think you want to do all that for the sour
sohanjg 2015/07/02 14:40:27 Done.
69
70 gl->BindTexture(GL_TEXTURE_2D, output_resource_->id());
piman 2015/07/01 22:11:05 That id is the id in the ResourceProvider, not the
sohanjg 2015/07/02 14:40:28 Done. Ahh..i was assuming that TileManager::Create
71 gl->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
72 gl->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
73 gl->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
74 gl->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
75 gl->TexStorage2DEXT(GL_TEXTURE_2D, 1, GL_RGBA8_OES,
76 output_resource_->size().width(),
77 output_resource_->size().height());
78 gl->OrderingBarrierCHROMIUM();
piman 2015/07/01 22:11:05 I don't think you need this one, because you're ad
sohanjg 2015/07/02 14:40:28 Done.
56 } 79 }
57 80
58 ~RasterBufferImpl() override { 81 ~RasterBufferImpl() override {
59 // Release write lock in case a copy was never scheduled. 82 // Release write lock in case a copy was never scheduled.
60 lock_.reset(); 83 lock_.reset();
61 84
62 // Make sure any scheduled copy operations are issued before we release the
63 // raster resource.
64 if (sequence_)
65 worker_pool_->AdvanceLastIssuedCopyTo(sequence_);
66
67 // Return resources to pool so they can be used by another RasterBuffer 85 // Return resources to pool so they can be used by another RasterBuffer
68 // instance. 86 // instance.
69 resource_pool_->ReleaseResource(raster_resource_.Pass(), 87 resource_pool_->ReleaseResource(raster_resource_.Pass(),
70 raster_content_id_); 88 raster_content_id_);
71 } 89 }
72 90
73 // Overridden from RasterBuffer: 91 // Overridden from RasterBuffer:
74 void Playback(const RasterSource* raster_source, 92 void Playback(const RasterSource* raster_source,
75 const gfx::Rect& raster_full_rect, 93 const gfx::Rect& raster_full_rect,
76 const gfx::Rect& raster_dirty_rect, 94 const gfx::Rect& raster_dirty_rect,
77 uint64_t new_content_id, 95 uint64_t new_content_id,
78 float scale) override { 96 float scale) override {
79 // If there's a raster_content_id_, we are reusing a resource with that 97 // If there's a raster_content_id_, we are reusing a resource with that
80 // content id. 98 // content id.
81 bool reusing_raster_resource = raster_content_id_ != 0; 99 bool reusing_raster_resource = raster_content_id_ != 0;
82 sequence_ = worker_pool_->PlaybackAndScheduleCopyOnWorkerThread( 100 worker_pool_->PlaybackAndCopyOnWorkerThread(
83 reusing_raster_resource, lock_.Pass(), raster_resource_.get(), 101 reusing_raster_resource, lock_.Pass(), raster_resource_.get(),
84 output_resource_, raster_source, raster_full_rect, raster_dirty_rect, 102 output_resource_, raster_source, raster_full_rect, raster_dirty_rect,
85 scale); 103 scale);
86 // Store the content id of the resource to return to the pool. 104 // Store the content id of the resource to return to the pool.
87 raster_content_id_ = new_content_id; 105 raster_content_id_ = new_content_id;
88 } 106 }
89 107
90 private: 108 private:
91 OneCopyTileTaskWorkerPool* worker_pool_; 109 OneCopyTileTaskWorkerPool* worker_pool_;
92 ResourceProvider* resource_provider_; 110 ResourceProvider* resource_provider_;
93 ResourcePool* resource_pool_; 111 ResourcePool* resource_pool_;
94 const Resource* output_resource_; 112 const Resource* output_resource_;
95 uint64_t raster_content_id_; 113 uint64_t raster_content_id_;
96 scoped_ptr<ScopedResource> raster_resource_; 114 scoped_ptr<ScopedResource> raster_resource_;
97 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> lock_; 115 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBufferForThread> lock_;
98 CopySequenceNumber sequence_;
99 116
100 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl); 117 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl);
101 }; 118 };
102 119
103 // Number of in-flight copy operations to allow.
104 const int kMaxCopyOperations = 32;
105
106 // Delay been checking for copy operations to complete.
107 const int kCheckForCompletedCopyOperationsTickRateMs = 1;
108
109 // Number of failed attempts to allow before we perform a check that will
110 // wait for copy operations to complete if needed.
111 const int kFailedAttemptsBeforeWaitIfNeeded = 256;
112
113 // 4MiB is the size of 4 512x512 tiles, which has proven to be a good 120 // 4MiB is the size of 4 512x512 tiles, which has proven to be a good
114 // default batch size for copy operations. 121 // default batch size for copy operations.
115 const int kMaxBytesPerCopyOperation = 1024 * 1024 * 4; 122 const int kMaxBytesPerCopyOperation = 1024 * 1024 * 4;
116 123
117 } // namespace 124 } // namespace
118 125
119 OneCopyTileTaskWorkerPool::CopyOperation::CopyOperation(
120 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> src_write_lock,
121 const Resource* src,
122 const Resource* dst,
123 const gfx::Rect& rect)
124 : src_write_lock(src_write_lock.Pass()), src(src), dst(dst), rect(rect) {
125 }
126
127 OneCopyTileTaskWorkerPool::CopyOperation::~CopyOperation() {
128 }
129
130 // static 126 // static
131 scoped_ptr<TileTaskWorkerPool> OneCopyTileTaskWorkerPool::Create( 127 scoped_ptr<TileTaskWorkerPool> OneCopyTileTaskWorkerPool::Create(
132 base::SequencedTaskRunner* task_runner, 128 base::SequencedTaskRunner* task_runner,
133 TaskGraphRunner* task_graph_runner, 129 TaskGraphRunner* task_graph_runner,
134 ContextProvider* context_provider, 130 ContextProvider* context_provider,
135 ResourceProvider* resource_provider, 131 ResourceProvider* resource_provider,
136 ResourcePool* resource_pool, 132 ResourcePool* resource_pool,
137 int max_copy_texture_chromium_size, 133 int max_copy_texture_chromium_size,
138 bool have_persistent_gpu_memory_buffers) { 134 bool have_persistent_gpu_memory_buffers) {
139 return make_scoped_ptr<TileTaskWorkerPool>(new OneCopyTileTaskWorkerPool( 135 return make_scoped_ptr<TileTaskWorkerPool>(new OneCopyTileTaskWorkerPool(
(...skipping 15 matching lines...) Expand all
155 namespace_token_(task_graph_runner->GetNamespaceToken()), 151 namespace_token_(task_graph_runner->GetNamespaceToken()),
156 context_provider_(context_provider), 152 context_provider_(context_provider),
157 resource_provider_(resource_provider), 153 resource_provider_(resource_provider),
158 resource_pool_(resource_pool), 154 resource_pool_(resource_pool),
159 max_bytes_per_copy_operation_( 155 max_bytes_per_copy_operation_(
160 max_copy_texture_chromium_size 156 max_copy_texture_chromium_size
161 ? std::min(kMaxBytesPerCopyOperation, 157 ? std::min(kMaxBytesPerCopyOperation,
162 max_copy_texture_chromium_size) 158 max_copy_texture_chromium_size)
163 : kMaxBytesPerCopyOperation), 159 : kMaxBytesPerCopyOperation),
164 have_persistent_gpu_memory_buffers_(have_persistent_gpu_memory_buffers), 160 have_persistent_gpu_memory_buffers_(have_persistent_gpu_memory_buffers),
165 last_issued_copy_operation_(0),
166 last_flushed_copy_operation_(0),
167 lock_(), 161 lock_(),
168 copy_operation_count_cv_(&lock_),
169 bytes_scheduled_since_last_flush_(0),
170 issued_copy_operation_count_(0),
171 next_copy_operation_sequence_(1),
172 check_for_completed_copy_operations_pending_(false),
173 shutdown_(false), 162 shutdown_(false),
174 weak_ptr_factory_(this), 163 weak_ptr_factory_(this),
175 task_set_finished_weak_ptr_factory_(this) { 164 task_set_finished_weak_ptr_factory_(this) {
176 DCHECK(context_provider_); 165 DCHECK(context_provider_);
177 } 166 }
178 167
179 OneCopyTileTaskWorkerPool::~OneCopyTileTaskWorkerPool() { 168 OneCopyTileTaskWorkerPool::~OneCopyTileTaskWorkerPool() {
180 DCHECK_EQ(pending_copy_operations_.size(), 0u);
181 } 169 }
182 170
183 TileTaskRunner* OneCopyTileTaskWorkerPool::AsTileTaskRunner() { 171 TileTaskRunner* OneCopyTileTaskWorkerPool::AsTileTaskRunner() {
184 return this; 172 return this;
185 } 173 }
186 174
187 void OneCopyTileTaskWorkerPool::SetClient(TileTaskRunnerClient* client) { 175 void OneCopyTileTaskWorkerPool::SetClient(TileTaskRunnerClient* client) {
188 client_ = client; 176 client_ = client;
189 } 177 }
190 178
191 void OneCopyTileTaskWorkerPool::Shutdown() { 179 void OneCopyTileTaskWorkerPool::Shutdown() {
192 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::Shutdown"); 180 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::Shutdown");
193 181
194 { 182 {
195 base::AutoLock lock(lock_); 183 base::AutoLock lock(lock_);
196 184
197 shutdown_ = true; 185 shutdown_ = true;
198 copy_operation_count_cv_.Signal();
199 } 186 }
200 187
201 TaskGraph empty; 188 TaskGraph empty;
202 task_graph_runner_->ScheduleTasks(namespace_token_, &empty); 189 task_graph_runner_->ScheduleTasks(namespace_token_, &empty);
203 task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_); 190 task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_);
204 } 191 }
205 192
206 void OneCopyTileTaskWorkerPool::ScheduleTasks(TileTaskQueue* queue) { 193 void OneCopyTileTaskWorkerPool::ScheduleTasks(TileTaskQueue* queue) {
207 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::ScheduleTasks"); 194 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::ScheduleTasks");
208 195
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after
258 InsertNodesForRasterTask(&graph_, task, task->dependencies(), priority++); 245 InsertNodesForRasterTask(&graph_, task, task->dependencies(), priority++);
259 } 246 }
260 247
261 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { 248 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
262 InsertNodeForTask(&graph_, new_task_set_finished_tasks[task_set].get(), 249 InsertNodeForTask(&graph_, new_task_set_finished_tasks[task_set].get(),
263 kTaskSetFinishedTaskPriorityBase + task_set, 250 kTaskSetFinishedTaskPriorityBase + task_set,
264 task_count[task_set]); 251 task_count[task_set]);
265 } 252 }
266 253
267 ScheduleTasksOnOriginThread(this, &graph_); 254 ScheduleTasksOnOriginThread(this, &graph_);
255
256 // Barrier to sync any new resources to the worker context.
257 resource_provider_->output_surface()
258 ->context_provider()
259 ->ContextGL()
260 ->OrderingBarrierCHROMIUM();
261
268 task_graph_runner_->ScheduleTasks(namespace_token_, &graph_); 262 task_graph_runner_->ScheduleTasks(namespace_token_, &graph_);
269 263
270 std::copy(new_task_set_finished_tasks, 264 std::copy(new_task_set_finished_tasks,
271 new_task_set_finished_tasks + kNumberOfTaskSets, 265 new_task_set_finished_tasks + kNumberOfTaskSets,
272 task_set_finished_tasks_); 266 task_set_finished_tasks_);
273 267
274 resource_pool_->ReduceResourceUsage(); 268 resource_pool_->ReduceResourceUsage();
275 269
276 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", "state", 270 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", "state",
277 StateAsValue()); 271 StateAsValue());
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
315 new RasterBufferImpl(this, resource_provider_, resource_pool_, 309 new RasterBufferImpl(this, resource_provider_, resource_pool_,
316 resource_provider_->best_texture_format(), resource, 310 resource_provider_->best_texture_format(), resource,
317 previous_content_id)); 311 previous_content_id));
318 } 312 }
319 313
320 void OneCopyTileTaskWorkerPool::ReleaseBufferForRaster( 314 void OneCopyTileTaskWorkerPool::ReleaseBufferForRaster(
321 scoped_ptr<RasterBuffer> buffer) { 315 scoped_ptr<RasterBuffer> buffer) {
322 // Nothing to do here. RasterBufferImpl destructor cleans up after itself. 316 // Nothing to do here. RasterBufferImpl destructor cleans up after itself.
323 } 317 }
324 318
325 CopySequenceNumber 319 void OneCopyTileTaskWorkerPool::PlaybackAndCopyOnWorkerThread(
326 OneCopyTileTaskWorkerPool::PlaybackAndScheduleCopyOnWorkerThread(
327 bool reusing_raster_resource, 320 bool reusing_raster_resource,
328 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> 321 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBufferForThread>
329 raster_resource_write_lock, 322 raster_resource_write_lock,
330 const Resource* raster_resource, 323 const Resource* raster_resource,
331 const Resource* output_resource, 324 const Resource* output_resource,
332 const RasterSource* raster_source, 325 const RasterSource* raster_source,
333 const gfx::Rect& raster_full_rect, 326 const gfx::Rect& raster_full_rect,
334 const gfx::Rect& raster_dirty_rect, 327 const gfx::Rect& raster_dirty_rect,
335 float scale) { 328 float scale) {
329 TRACE_EVENT0("cc",
330 "OneCopyTileTaskWorkerPool::PlaybackAndCopyOnWorkerThread");
331 ContextProvider* context_provider =
332 raster_resource_write_lock->worker_context();
333 if (!context_provider) {
334 return;
335 }
336 GLES2Interface* gl = context_provider->ContextGL();
336 gfx::GpuMemoryBuffer* gpu_memory_buffer = 337 gfx::GpuMemoryBuffer* gpu_memory_buffer =
337 raster_resource_write_lock->GetGpuMemoryBuffer(); 338 raster_resource_write_lock->GetGpuMemoryBuffer();
339
338 if (gpu_memory_buffer) { 340 if (gpu_memory_buffer) {
339 void* data = NULL; 341 void* data = NULL;
340 bool rv = gpu_memory_buffer->Map(&data); 342 bool rv = gpu_memory_buffer->Map(&data);
341 DCHECK(rv); 343 DCHECK(rv);
342 int stride; 344 int stride;
343 gpu_memory_buffer->GetStride(&stride); 345 gpu_memory_buffer->GetStride(&stride);
344 // TileTaskWorkerPool::PlaybackToMemory only supports unsigned strides. 346 // TileTaskWorkerPool::PlaybackToMemory only supports unsigned strides.
345 DCHECK_GE(stride, 0); 347 DCHECK_GE(stride, 0);
346 348
347 gfx::Rect playback_rect = raster_full_rect; 349 gfx::Rect playback_rect = raster_full_rect;
348 if (reusing_raster_resource) { 350 if (reusing_raster_resource) {
349 playback_rect.Intersect(raster_dirty_rect); 351 playback_rect.Intersect(raster_dirty_rect);
350 } 352 }
351 DCHECK(!playback_rect.IsEmpty()) 353 DCHECK(!playback_rect.IsEmpty())
352 << "Why are we rastering a tile that's not dirty?"; 354 << "Why are we rastering a tile that's not dirty?";
353 TileTaskWorkerPool::PlaybackToMemory( 355 TileTaskWorkerPool::PlaybackToMemory(
354 data, raster_resource->format(), raster_resource->size(), 356 data, raster_resource_write_lock->format(),
355 static_cast<size_t>(stride), raster_source, raster_full_rect, 357 raster_resource_write_lock->size(), static_cast<size_t>(stride),
356 playback_rect, scale); 358 raster_source, raster_full_rect, playback_rect, scale);
357 gpu_memory_buffer->Unmap(); 359 gpu_memory_buffer->Unmap();
358 } 360 }
359 361
360 base::AutoLock lock(lock_); 362 raster_resource_write_lock->set_image_id(
363 gl->CreateImageCHROMIUM(gpu_memory_buffer->AsClientBuffer(),
piman 2015/07/01 22:11:05 You need to create the image lazily. Maybe the bes
sohanjg 2015/07/02 14:40:27 Done. thanks! this makes it a lot cleaner.
364 raster_resource_write_lock->size().width(),
365 raster_resource_write_lock->size().height(),
366 raster_resource_write_lock->internal_format()));
361 367
362 CopySequenceNumber sequence = 0; 368 raster_resource_write_lock->set_allocated(true);
363 int bytes_per_row = (BitsPerPixel(raster_resource->format()) * 369 raster_resource_write_lock->set_dirty_image(true);
364 raster_resource->size().width()) / 370 raster_resource_write_lock->set_read_lock_fences_enabled(true);
371
372 int bytes_per_row = (BitsPerPixel(raster_resource_write_lock->format()) *
373 raster_resource_write_lock->size().width()) /
365 8; 374 8;
366 int chunk_size_in_rows = 375 int chunk_size_in_rows =
367 std::max(1, max_bytes_per_copy_operation_ / bytes_per_row); 376 std::max(1, max_bytes_per_copy_operation_ / bytes_per_row);
368 // Align chunk size to 4. Required to support compressed texture formats. 377 // Align chunk size to 4. Required to support compressed texture formats.
369 chunk_size_in_rows = MathUtil::RoundUp(chunk_size_in_rows, 4); 378 chunk_size_in_rows = MathUtil::RoundUp(chunk_size_in_rows, 4);
370 int y = 0; 379 int y = 0;
371 int height = raster_resource->size().height(); 380 int height = raster_resource_write_lock->size().height();
372 while (y < height) { 381 while (y < height) {
373 int failed_attempts = 0; 382 base::AutoLock context_lock(*context_provider->GetLock());
374 while ((pending_copy_operations_.size() + issued_copy_operation_count_) >= 383 context_provider->DetachFromThread();
375 kMaxCopyOperations) {
376 // Ignore limit when shutdown is set.
377 if (shutdown_)
378 break;
379
380 ++failed_attempts;
381
382 // Schedule a check that will also wait for operations to complete
383 // after too many failed attempts.
384 bool wait_if_needed = failed_attempts > kFailedAttemptsBeforeWaitIfNeeded;
385
386 // Schedule a check for completed copy operations if too many operations
387 // are currently in-flight.
388 ScheduleCheckForCompletedCopyOperationsWithLockAcquired(wait_if_needed);
piman 2015/07/01 22:11:05 I think we still want some form of throttling, tho
sohanjg 2015/07/02 14:40:28 hmm..yes we need some mechanism to block too many
389
390 {
391 TRACE_EVENT0("cc", "WaitingForCopyOperationsToComplete");
392
393 // Wait for in-flight copy operations to drop below limit.
394 copy_operation_count_cv_.Wait();
395 }
396 }
397
398 // There may be more work available, so wake up another worker thread.
399 copy_operation_count_cv_.Signal();
400
401 // Copy at most |chunk_size_in_rows|. 384 // Copy at most |chunk_size_in_rows|.
402 int rows_to_copy = std::min(chunk_size_in_rows, height - y); 385 int rows_to_copy = std::min(chunk_size_in_rows, height - y);
403 DCHECK_GT(rows_to_copy, 0); 386 DCHECK_GT(rows_to_copy, 0);
404 387
405 // |raster_resource_write_lock| is passed to the first copy operation as it 388 if (raster_resource_write_lock->image_id() &&
406 // needs to be released before we can issue a copy. 389 raster_resource_write_lock->dirty_image()) {
407 pending_copy_operations_.push_back(make_scoped_ptr(new CopyOperation( 390 gl->BindTexture(GL_TEXTURE_2D,
408 raster_resource_write_lock.Pass(), raster_resource, output_resource, 391 raster_resource_write_lock->source_gl_id());
409 gfx::Rect(0, y, raster_resource->size().width(), rows_to_copy)))); 392
393 if (raster_resource_write_lock->bound_image_id())
394 gl->ReleaseTexImage2DCHROMIUM(
395 GL_TEXTURE_2D, raster_resource_write_lock->bound_image_id());
396 gl->BindTexImage2DCHROMIUM(GL_TEXTURE_2D,
397 raster_resource_write_lock->image_id());
398 raster_resource_write_lock->set_bound_image_id(
399 raster_resource_write_lock->image_id());
400 raster_resource_write_lock->set_dirty_image(false);
401 }
402
403 gl->CopySubTextureCHROMIUM(GL_TEXTURE_2D,
404 raster_resource_write_lock->source_gl_id(),
405 raster_resource_write_lock->dest_gl_id(), 0, y,
406 0, y, raster_resource_write_lock->size().width(),
407 rows_to_copy, false, false, false);
piman 2015/07/01 22:11:05 We need to add the query logic, so that we know wh
sohanjg 2015/07/02 14:40:28 Done.
408
410 y += rows_to_copy; 409 y += rows_to_copy;
411 410 // Sync/Deferred flush worker context to cc context.
412 // Acquire a sequence number for this copy operation. 411 gl->OrderingBarrierCHROMIUM();
413 sequence = next_copy_operation_sequence_++; 412 context_provider->DetachFromThread();
414
415 // Increment |bytes_scheduled_since_last_flush_| by the amount of memory
416 // used for this copy operation.
417 bytes_scheduled_since_last_flush_ += rows_to_copy * bytes_per_row;
418
419 // Post task that will advance last flushed copy operation to |sequence|
420 // when |bytes_scheduled_since_last_flush_| has reached
421 // |max_bytes_per_copy_operation_|.
422 if (bytes_scheduled_since_last_flush_ >= max_bytes_per_copy_operation_) {
423 task_runner_->PostTask(
424 FROM_HERE,
425 base::Bind(&OneCopyTileTaskWorkerPool::AdvanceLastFlushedCopyTo,
426 weak_ptr_factory_.GetWeakPtr(), sequence));
427 bytes_scheduled_since_last_flush_ = 0;
428 }
429 } 413 }
430
431 return sequence;
432 }
433
434 void OneCopyTileTaskWorkerPool::AdvanceLastIssuedCopyTo(
435 CopySequenceNumber sequence) {
436 if (last_issued_copy_operation_ >= sequence)
437 return;
438
439 IssueCopyOperations(sequence - last_issued_copy_operation_);
440 last_issued_copy_operation_ = sequence;
441 }
442
443 void OneCopyTileTaskWorkerPool::AdvanceLastFlushedCopyTo(
444 CopySequenceNumber sequence) {
445 if (last_flushed_copy_operation_ >= sequence)
446 return;
447
448 AdvanceLastIssuedCopyTo(sequence);
449
450 // Flush all issued copy operations.
451 context_provider_->ContextGL()->ShallowFlushCHROMIUM();
452 last_flushed_copy_operation_ = last_issued_copy_operation_;
453 } 414 }
454 415
455 void OneCopyTileTaskWorkerPool::OnTaskSetFinished(TaskSet task_set) { 416 void OneCopyTileTaskWorkerPool::OnTaskSetFinished(TaskSet task_set) {
456 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::OnTaskSetFinished", "task_set", 417 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::OnTaskSetFinished", "task_set",
457 task_set); 418 task_set);
458 419
459 DCHECK(tasks_pending_[task_set]); 420 DCHECK(tasks_pending_[task_set]);
460 tasks_pending_[task_set] = false; 421 tasks_pending_[task_set] = false;
461 if (tasks_pending_.any()) { 422 if (tasks_pending_.any()) {
462 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", 423 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running",
463 "state", StateAsValue()); 424 "state", StateAsValue());
464 } else { 425 } else {
465 TRACE_EVENT_ASYNC_END0("cc", "ScheduledTasks", this); 426 TRACE_EVENT_ASYNC_END0("cc", "ScheduledTasks", this);
466 } 427 }
467 client_->DidFinishRunningTileTasks(task_set); 428 client_->DidFinishRunningTileTasks(task_set);
468 } 429 }
469 430
470 void OneCopyTileTaskWorkerPool::IssueCopyOperations(int64 count) {
471 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::IssueCopyOperations", "count",
472 count);
473
474 CopyOperation::Deque copy_operations;
475
476 {
477 base::AutoLock lock(lock_);
478
479 for (int64 i = 0; i < count; ++i) {
480 DCHECK(!pending_copy_operations_.empty());
481 copy_operations.push_back(pending_copy_operations_.take_front());
482 }
483
484 // Increment |issued_copy_operation_count_| to reflect the transition of
485 // copy operations from "pending" to "issued" state.
486 issued_copy_operation_count_ += copy_operations.size();
487 }
488
489 while (!copy_operations.empty()) {
490 scoped_ptr<CopyOperation> copy_operation = copy_operations.take_front();
491
492 // Remove the write lock.
493 copy_operation->src_write_lock.reset();
494
495 // Copy contents of source resource to destination resource.
496 resource_provider_->CopyResource(copy_operation->src->id(),
497 copy_operation->dst->id(),
498 copy_operation->rect);
499 }
500 }
501
502 void OneCopyTileTaskWorkerPool::
503 ScheduleCheckForCompletedCopyOperationsWithLockAcquired(
504 bool wait_if_needed) {
505 lock_.AssertAcquired();
506
507 if (check_for_completed_copy_operations_pending_)
508 return;
509
510 base::TimeTicks now = base::TimeTicks::Now();
511
512 // Schedule a check for completed copy operations as soon as possible but
513 // don't allow two consecutive checks to be scheduled to run less than the
514 // tick rate apart.
515 base::TimeTicks next_check_for_completed_copy_operations_time =
516 std::max(last_check_for_completed_copy_operations_time_ +
517 base::TimeDelta::FromMilliseconds(
518 kCheckForCompletedCopyOperationsTickRateMs),
519 now);
520
521 task_runner_->PostDelayedTask(
522 FROM_HERE,
523 base::Bind(&OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations,
524 weak_ptr_factory_.GetWeakPtr(), wait_if_needed),
525 next_check_for_completed_copy_operations_time - now);
526
527 last_check_for_completed_copy_operations_time_ =
528 next_check_for_completed_copy_operations_time;
529 check_for_completed_copy_operations_pending_ = true;
530 }
531
532 void OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations(
533 bool wait_if_needed) {
534 TRACE_EVENT1("cc",
535 "OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations",
536 "wait_if_needed", wait_if_needed);
537
538 resource_pool_->CheckBusyResources(wait_if_needed);
539
540 {
541 base::AutoLock lock(lock_);
542
543 DCHECK(check_for_completed_copy_operations_pending_);
544 check_for_completed_copy_operations_pending_ = false;
545
546 // The number of busy resources in the pool reflects the number of issued
547 // copy operations that have not yet completed.
548 issued_copy_operation_count_ = resource_pool_->busy_resource_count();
549
550 // There may be work blocked on too many in-flight copy operations, so wake
551 // up a worker thread.
552 copy_operation_count_cv_.Signal();
553 }
554 }
555
556 scoped_refptr<base::trace_event::ConvertableToTraceFormat> 431 scoped_refptr<base::trace_event::ConvertableToTraceFormat>
557 OneCopyTileTaskWorkerPool::StateAsValue() const { 432 OneCopyTileTaskWorkerPool::StateAsValue() const {
558 scoped_refptr<base::trace_event::TracedValue> state = 433 scoped_refptr<base::trace_event::TracedValue> state =
559 new base::trace_event::TracedValue(); 434 new base::trace_event::TracedValue();
560 435
561 state->BeginArray("tasks_pending"); 436 state->BeginArray("tasks_pending");
562 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) 437 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set)
563 state->AppendBoolean(tasks_pending_[task_set]); 438 state->AppendBoolean(tasks_pending_[task_set]);
564 state->EndArray(); 439 state->EndArray();
565 state->BeginDictionary("staging_state"); 440 state->BeginDictionary("staging_state");
(...skipping 15 matching lines...) Expand all
581 "pending_copy_count", 456 "pending_copy_count",
582 static_cast<int>(resource_pool_->total_resource_count() - 457 static_cast<int>(resource_pool_->total_resource_count() -
583 resource_pool_->acquired_resource_count())); 458 resource_pool_->acquired_resource_count()));
584 staging_state->SetInteger( 459 staging_state->SetInteger(
585 "bytes_pending_copy", 460 "bytes_pending_copy",
586 static_cast<int>(resource_pool_->total_memory_usage_bytes() - 461 static_cast<int>(resource_pool_->total_memory_usage_bytes() -
587 resource_pool_->acquired_memory_usage_bytes())); 462 resource_pool_->acquired_memory_usage_bytes()));
588 } 463 }
589 464
590 } // namespace cc 465 } // namespace cc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698