Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(469)

Side by Side Diff: cc/raster/one_copy_tile_task_worker_pool.cc

Issue 1282183002: Revert of Re-land: cc: Use worker context for one-copy tile initialization. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 5 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « cc/raster/one_copy_tile_task_worker_pool.h ('k') | cc/raster/tile_task_worker_pool_perftest.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2014 The Chromium Authors. All rights reserved. 1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "cc/raster/one_copy_tile_task_worker_pool.h" 5 #include "cc/raster/one_copy_tile_task_worker_pool.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <limits> 8 #include <limits>
9 9
10 #include "base/strings/stringprintf.h" 10 #include "base/strings/stringprintf.h"
11 #include "base/thread_task_runner_handle.h"
12 #include "base/trace_event/memory_dump_manager.h"
13 #include "base/trace_event/trace_event.h" 11 #include "base/trace_event/trace_event.h"
14 #include "base/trace_event/trace_event_argument.h" 12 #include "base/trace_event/trace_event_argument.h"
15 #include "cc/base/math_util.h" 13 #include "cc/base/math_util.h"
16 #include "cc/debug/traced_value.h" 14 #include "cc/debug/traced_value.h"
17 #include "cc/raster/raster_buffer.h" 15 #include "cc/raster/raster_buffer.h"
18 #include "cc/resources/platform_color.h" 16 #include "cc/resources/platform_color.h"
19 #include "cc/resources/resource_format.h" 17 #include "cc/resources/resource_pool.h"
20 #include "cc/resources/resource_util.h"
21 #include "cc/resources/scoped_resource.h" 18 #include "cc/resources/scoped_resource.h"
22 #include "gpu/GLES2/gl2extchromium.h"
23 #include "gpu/command_buffer/client/gles2_interface.h" 19 #include "gpu/command_buffer/client/gles2_interface.h"
24 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h" 20 #include "ui/gfx/gpu_memory_buffer.h"
25 21
26 namespace cc { 22 namespace cc {
27 namespace { 23 namespace {
28 24
29 class RasterBufferImpl : public RasterBuffer { 25 class RasterBufferImpl : public RasterBuffer {
30 public: 26 public:
31 RasterBufferImpl(OneCopyTileTaskWorkerPool* worker_pool, 27 RasterBufferImpl(OneCopyTileTaskWorkerPool* worker_pool,
32 ResourceProvider* resource_provider, 28 ResourceProvider* resource_provider,
29 ResourcePool* resource_pool,
33 ResourceFormat resource_format, 30 ResourceFormat resource_format,
34 const Resource* resource, 31 const Resource* output_resource,
35 uint64_t previous_content_id) 32 uint64_t previous_content_id)
36 : worker_pool_(worker_pool), 33 : worker_pool_(worker_pool),
37 resource_(resource), 34 resource_provider_(resource_provider),
38 lock_(resource_provider, resource->id()), 35 resource_pool_(resource_pool),
39 previous_content_id_(previous_content_id) {} 36 output_resource_(output_resource),
37 raster_content_id_(0),
38 sequence_(0) {
39 if (worker_pool->have_persistent_gpu_memory_buffers() &&
40 previous_content_id) {
41 raster_resource_ =
42 resource_pool->TryAcquireResourceWithContentId(previous_content_id);
43 }
44 if (raster_resource_) {
45 raster_content_id_ = previous_content_id;
46 DCHECK_EQ(resource_format, raster_resource_->format());
47 DCHECK_EQ(output_resource->size().ToString(),
48 raster_resource_->size().ToString());
49 } else {
50 raster_resource_ = resource_pool->AcquireResource(output_resource->size(),
51 resource_format);
52 }
40 53
41 ~RasterBufferImpl() override {} 54 lock_.reset(new ResourceProvider::ScopedWriteLockGpuMemoryBuffer(
55 resource_provider_, raster_resource_->id()));
56 }
57
58 ~RasterBufferImpl() override {
59 // Release write lock in case a copy was never scheduled.
60 lock_.reset();
61
62 // Make sure any scheduled copy operations are issued before we release the
63 // raster resource.
64 if (sequence_)
65 worker_pool_->AdvanceLastIssuedCopyTo(sequence_);
66
67 // Return resources to pool so they can be used by another RasterBuffer
68 // instance.
69 resource_pool_->ReleaseResource(raster_resource_.Pass(),
70 raster_content_id_);
71 }
42 72
43 // Overridden from RasterBuffer: 73 // Overridden from RasterBuffer:
44 void Playback(const RasterSource* raster_source, 74 void Playback(const RasterSource* raster_source,
45 const gfx::Rect& raster_full_rect, 75 const gfx::Rect& raster_full_rect,
46 const gfx::Rect& raster_dirty_rect, 76 const gfx::Rect& raster_dirty_rect,
47 uint64_t new_content_id, 77 uint64_t new_content_id,
48 float scale, 78 float scale,
49 bool include_images) override { 79 bool include_images) override {
50 worker_pool_->PlaybackAndCopyOnWorkerThread( 80 // If there's a raster_content_id_, we are reusing a resource with that
51 resource_, &lock_, raster_source, raster_full_rect, raster_dirty_rect, 81 // content id.
52 scale, include_images, previous_content_id_, new_content_id); 82 bool reusing_raster_resource = raster_content_id_ != 0;
83 sequence_ = worker_pool_->PlaybackAndScheduleCopyOnWorkerThread(
84 reusing_raster_resource, lock_.Pass(), raster_resource_.get(),
85 output_resource_, raster_source, raster_full_rect, raster_dirty_rect,
86 scale, include_images);
87 // Store the content id of the resource to return to the pool.
88 raster_content_id_ = new_content_id;
53 } 89 }
54 90
55 private: 91 private:
56 OneCopyTileTaskWorkerPool* worker_pool_; 92 OneCopyTileTaskWorkerPool* worker_pool_;
57 const Resource* resource_; 93 ResourceProvider* resource_provider_;
58 ResourceProvider::ScopedWriteLockGL lock_; 94 ResourcePool* resource_pool_;
59 uint64_t previous_content_id_; 95 const Resource* output_resource_;
96 uint64_t raster_content_id_;
97 scoped_ptr<ScopedResource> raster_resource_;
98 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> lock_;
99 CopySequenceNumber sequence_;
60 100
61 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl); 101 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl);
62 }; 102 };
63 103
64 // Delay between checking for query result to be available. 104 // Number of in-flight copy operations to allow.
65 const int kCheckForQueryResultAvailableTickRateMs = 1; 105 const int kMaxCopyOperations = 32;
66 106
67 // Number of attempts to allow before we perform a check that will wait for 107 // Delay been checking for copy operations to complete.
68 // query to complete. 108 const int kCheckForCompletedCopyOperationsTickRateMs = 1;
69 const int kMaxCheckForQueryResultAvailableAttempts = 256; 109
110 // Number of failed attempts to allow before we perform a check that will
111 // wait for copy operations to complete if needed.
112 const int kFailedAttemptsBeforeWaitIfNeeded = 256;
70 113
71 // 4MiB is the size of 4 512x512 tiles, which has proven to be a good 114 // 4MiB is the size of 4 512x512 tiles, which has proven to be a good
72 // default batch size for copy operations. 115 // default batch size for copy operations.
73 const int kMaxBytesPerCopyOperation = 1024 * 1024 * 4; 116 const int kMaxBytesPerCopyOperation = 1024 * 1024 * 4;
74 117
75 // Delay before a staging buffer might be released. 118 } // namespace
76 const int kStagingBufferExpirationDelayMs = 1000;
77 119
78 bool CheckForQueryResult(gpu::gles2::GLES2Interface* gl, unsigned query_id) { 120 OneCopyTileTaskWorkerPool::CopyOperation::CopyOperation(
79 unsigned complete = 1; 121 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> src_write_lock,
80 gl->GetQueryObjectuivEXT(query_id, GL_QUERY_RESULT_AVAILABLE_EXT, &complete); 122 const Resource* src,
81 return !!complete; 123 const Resource* dst,
124 const gfx::Rect& rect)
125 : src_write_lock(src_write_lock.Pass()), src(src), dst(dst), rect(rect) {
82 } 126 }
83 127
84 void WaitForQueryResult(gpu::gles2::GLES2Interface* gl, unsigned query_id) { 128 OneCopyTileTaskWorkerPool::CopyOperation::~CopyOperation() {
85 TRACE_EVENT0("cc", "WaitForQueryResult");
86
87 int attempts_left = kMaxCheckForQueryResultAvailableAttempts;
88 while (attempts_left--) {
89 if (CheckForQueryResult(gl, query_id))
90 break;
91
92 base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(
93 kCheckForQueryResultAvailableTickRateMs));
94 }
95
96 unsigned result = 0;
97 gl->GetQueryObjectuivEXT(query_id, GL_QUERY_RESULT_EXT, &result);
98 }
99
100 } // namespace
101
102 OneCopyTileTaskWorkerPool::StagingBuffer::StagingBuffer(const gfx::Size& size)
103 : size(size), texture_id(0), image_id(0), query_id(0), content_id(0) {}
104
105 OneCopyTileTaskWorkerPool::StagingBuffer::~StagingBuffer() {
106 DCHECK_EQ(texture_id, 0u);
107 DCHECK_EQ(image_id, 0u);
108 DCHECK_EQ(query_id, 0u);
109 }
110
111 void OneCopyTileTaskWorkerPool::StagingBuffer::DestroyGLResources(
112 gpu::gles2::GLES2Interface* gl) {
113 if (query_id) {
114 gl->DeleteQueriesEXT(1, &query_id);
115 query_id = 0;
116 }
117 if (image_id) {
118 gl->DestroyImageCHROMIUM(image_id);
119 image_id = 0;
120 }
121 if (texture_id) {
122 gl->DeleteTextures(1, &texture_id);
123 texture_id = 0;
124 }
125 }
126
127 void OneCopyTileTaskWorkerPool::StagingBuffer::OnMemoryDump(
128 base::trace_event::ProcessMemoryDump* pmd,
129 ResourceFormat format,
130 bool in_free_list) const {
131 if (!gpu_memory_buffer)
132 return;
133
134 gfx::GpuMemoryBufferId buffer_id = gpu_memory_buffer->GetId();
135 std::string buffer_dump_name =
136 base::StringPrintf("cc/one_copy/staging_memory/buffer_%d", buffer_id);
137 base::trace_event::MemoryAllocatorDump* buffer_dump =
138 pmd->CreateAllocatorDump(buffer_dump_name);
139
140 uint64_t buffer_size_in_bytes =
141 ResourceUtil::UncheckedSizeInBytes<uint64_t>(size, format);
142 buffer_dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize,
143 base::trace_event::MemoryAllocatorDump::kUnitsBytes,
144 buffer_size_in_bytes);
145 buffer_dump->AddScalar("free_size",
146 base::trace_event::MemoryAllocatorDump::kUnitsBytes,
147 in_free_list ? buffer_size_in_bytes : 0);
148
149 // Emit an ownership edge towards a global allocator dump node.
150 const uint64 tracing_process_id =
151 base::trace_event::MemoryDumpManager::GetInstance()
152 ->GetTracingProcessId();
153 base::trace_event::MemoryAllocatorDumpGuid shared_buffer_guid =
154 gfx::GetGpuMemoryBufferGUIDForTracing(tracing_process_id, buffer_id);
155 pmd->CreateSharedGlobalAllocatorDump(shared_buffer_guid);
156
157 // By creating an edge with a higher |importance| (w.r.t. browser-side dumps)
158 // the tracing UI will account the effective size of the buffer to the child.
159 const int kImportance = 2;
160 pmd->AddOwnershipEdge(buffer_dump->guid(), shared_buffer_guid, kImportance);
161 } 129 }
162 130
163 // static 131 // static
164 scoped_ptr<TileTaskWorkerPool> OneCopyTileTaskWorkerPool::Create( 132 scoped_ptr<TileTaskWorkerPool> OneCopyTileTaskWorkerPool::Create(
165 base::SequencedTaskRunner* task_runner, 133 base::SequencedTaskRunner* task_runner,
166 TaskGraphRunner* task_graph_runner, 134 TaskGraphRunner* task_graph_runner,
167 ContextProvider* context_provider, 135 ContextProvider* context_provider,
168 ResourceProvider* resource_provider, 136 ResourceProvider* resource_provider,
137 ResourcePool* resource_pool,
169 int max_copy_texture_chromium_size, 138 int max_copy_texture_chromium_size,
170 bool use_persistent_gpu_memory_buffers, 139 bool have_persistent_gpu_memory_buffers) {
171 int max_staging_buffers) {
172 return make_scoped_ptr<TileTaskWorkerPool>(new OneCopyTileTaskWorkerPool( 140 return make_scoped_ptr<TileTaskWorkerPool>(new OneCopyTileTaskWorkerPool(
173 task_runner, task_graph_runner, resource_provider, 141 task_runner, task_graph_runner, context_provider, resource_provider,
174 max_copy_texture_chromium_size, use_persistent_gpu_memory_buffers, 142 resource_pool, max_copy_texture_chromium_size,
175 max_staging_buffers)); 143 have_persistent_gpu_memory_buffers));
176 } 144 }
177 145
178 OneCopyTileTaskWorkerPool::OneCopyTileTaskWorkerPool( 146 OneCopyTileTaskWorkerPool::OneCopyTileTaskWorkerPool(
179 base::SequencedTaskRunner* task_runner, 147 base::SequencedTaskRunner* task_runner,
180 TaskGraphRunner* task_graph_runner, 148 TaskGraphRunner* task_graph_runner,
149 ContextProvider* context_provider,
181 ResourceProvider* resource_provider, 150 ResourceProvider* resource_provider,
151 ResourcePool* resource_pool,
182 int max_copy_texture_chromium_size, 152 int max_copy_texture_chromium_size,
183 bool use_persistent_gpu_memory_buffers, 153 bool have_persistent_gpu_memory_buffers)
184 int max_staging_buffers)
185 : task_runner_(task_runner), 154 : task_runner_(task_runner),
186 task_graph_runner_(task_graph_runner), 155 task_graph_runner_(task_graph_runner),
187 namespace_token_(task_graph_runner->GetNamespaceToken()), 156 namespace_token_(task_graph_runner->GetNamespaceToken()),
157 context_provider_(context_provider),
188 resource_provider_(resource_provider), 158 resource_provider_(resource_provider),
159 resource_pool_(resource_pool),
189 max_bytes_per_copy_operation_( 160 max_bytes_per_copy_operation_(
190 max_copy_texture_chromium_size 161 max_copy_texture_chromium_size
191 ? std::min(kMaxBytesPerCopyOperation, 162 ? std::min(kMaxBytesPerCopyOperation,
192 max_copy_texture_chromium_size) 163 max_copy_texture_chromium_size)
193 : kMaxBytesPerCopyOperation), 164 : kMaxBytesPerCopyOperation),
194 use_persistent_gpu_memory_buffers_(use_persistent_gpu_memory_buffers), 165 have_persistent_gpu_memory_buffers_(have_persistent_gpu_memory_buffers),
166 last_issued_copy_operation_(0),
167 last_flushed_copy_operation_(0),
168 lock_(),
169 copy_operation_count_cv_(&lock_),
195 bytes_scheduled_since_last_flush_(0), 170 bytes_scheduled_since_last_flush_(0),
196 max_staging_buffers_(max_staging_buffers), 171 issued_copy_operation_count_(0),
197 staging_buffer_expiration_delay_( 172 next_copy_operation_sequence_(1),
198 base::TimeDelta::FromMilliseconds(kStagingBufferExpirationDelayMs)), 173 check_for_completed_copy_operations_pending_(false),
199 reduce_memory_usage_pending_(false), 174 shutdown_(false),
200 weak_ptr_factory_(this), 175 weak_ptr_factory_(this),
201 task_set_finished_weak_ptr_factory_(this) { 176 task_set_finished_weak_ptr_factory_(this) {
202 base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider( 177 DCHECK(context_provider_);
203 this, base::ThreadTaskRunnerHandle::Get());
204 reduce_memory_usage_callback_ =
205 base::Bind(&OneCopyTileTaskWorkerPool::ReduceMemoryUsage,
206 weak_ptr_factory_.GetWeakPtr());
207 } 178 }
208 179
209 OneCopyTileTaskWorkerPool::~OneCopyTileTaskWorkerPool() { 180 OneCopyTileTaskWorkerPool::~OneCopyTileTaskWorkerPool() {
210 base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider( 181 DCHECK_EQ(pending_copy_operations_.size(), 0u);
211 this);
212 } 182 }
213 183
214 TileTaskRunner* OneCopyTileTaskWorkerPool::AsTileTaskRunner() { 184 TileTaskRunner* OneCopyTileTaskWorkerPool::AsTileTaskRunner() {
215 return this; 185 return this;
216 } 186 }
217 187
218 void OneCopyTileTaskWorkerPool::SetClient(TileTaskRunnerClient* client) { 188 void OneCopyTileTaskWorkerPool::SetClient(TileTaskRunnerClient* client) {
219 client_ = client; 189 client_ = client;
220 } 190 }
221 191
222 void OneCopyTileTaskWorkerPool::Shutdown() { 192 void OneCopyTileTaskWorkerPool::Shutdown() {
223 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::Shutdown"); 193 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::Shutdown");
224 194
195 {
196 base::AutoLock lock(lock_);
197
198 shutdown_ = true;
199 copy_operation_count_cv_.Signal();
200 }
201
225 TaskGraph empty; 202 TaskGraph empty;
226 task_graph_runner_->ScheduleTasks(namespace_token_, &empty); 203 task_graph_runner_->ScheduleTasks(namespace_token_, &empty);
227 task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_); 204 task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_);
228
229 base::AutoLock lock(lock_);
230
231 if (buffers_.empty())
232 return;
233
234 ReleaseBuffersNotUsedSince(base::TimeTicks() + base::TimeDelta::Max());
235 } 205 }
236 206
237 void OneCopyTileTaskWorkerPool::ScheduleTasks(TileTaskQueue* queue) { 207 void OneCopyTileTaskWorkerPool::ScheduleTasks(TileTaskQueue* queue) {
238 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::ScheduleTasks"); 208 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::ScheduleTasks");
239 209
210 #if DCHECK_IS_ON()
211 {
212 base::AutoLock lock(lock_);
213 DCHECK(!shutdown_);
214 }
215 #endif
216
240 if (tasks_pending_.none()) 217 if (tasks_pending_.none())
241 TRACE_EVENT_ASYNC_BEGIN0("cc", "ScheduledTasks", this); 218 TRACE_EVENT_ASYNC_BEGIN0("cc", "ScheduledTasks", this);
242 219
243 // Mark all task sets as pending. 220 // Mark all task sets as pending.
244 tasks_pending_.set(); 221 tasks_pending_.set();
245 222
246 size_t priority = kTileTaskPriorityBase; 223 size_t priority = kTileTaskPriorityBase;
247 224
248 graph_.Reset(); 225 graph_.Reset();
249 226
250 // Cancel existing OnTaskSetFinished callbacks. 227 // Cancel existing OnTaskSetFinished callbacks.
251 task_set_finished_weak_ptr_factory_.InvalidateWeakPtrs(); 228 task_set_finished_weak_ptr_factory_.InvalidateWeakPtrs();
252 229
253 scoped_refptr<TileTask> new_task_set_finished_tasks[kNumberOfTaskSets]; 230 scoped_refptr<TileTask> new_task_set_finished_tasks[kNumberOfTaskSets];
254 231
255 size_t task_count[kNumberOfTaskSets] = {0}; 232 size_t task_count[kNumberOfTaskSets] = {0};
256 233
257 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { 234 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
258 new_task_set_finished_tasks[task_set] = CreateTaskSetFinishedTask( 235 new_task_set_finished_tasks[task_set] = CreateTaskSetFinishedTask(
259 task_runner_.get(), 236 task_runner_.get(),
260 base::Bind(&OneCopyTileTaskWorkerPool::OnTaskSetFinished, 237 base::Bind(&OneCopyTileTaskWorkerPool::OnTaskSetFinished,
261 task_set_finished_weak_ptr_factory_.GetWeakPtr(), task_set)); 238 task_set_finished_weak_ptr_factory_.GetWeakPtr(), task_set));
262 } 239 }
263 240
241 resource_pool_->CheckBusyResources(false);
242
264 for (TileTaskQueue::Item::Vector::const_iterator it = queue->items.begin(); 243 for (TileTaskQueue::Item::Vector::const_iterator it = queue->items.begin();
265 it != queue->items.end(); ++it) { 244 it != queue->items.end(); ++it) {
266 const TileTaskQueue::Item& item = *it; 245 const TileTaskQueue::Item& item = *it;
267 RasterTask* task = item.task; 246 RasterTask* task = item.task;
268 DCHECK(!task->HasCompleted()); 247 DCHECK(!task->HasCompleted());
269 248
270 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { 249 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
271 if (!item.task_sets[task_set]) 250 if (!item.task_sets[task_set])
272 continue; 251 continue;
273 252
274 ++task_count[task_set]; 253 ++task_count[task_set];
275 254
276 graph_.edges.push_back( 255 graph_.edges.push_back(
277 TaskGraph::Edge(task, new_task_set_finished_tasks[task_set].get())); 256 TaskGraph::Edge(task, new_task_set_finished_tasks[task_set].get()));
278 } 257 }
279 258
280 InsertNodesForRasterTask(&graph_, task, task->dependencies(), priority++); 259 InsertNodesForRasterTask(&graph_, task, task->dependencies(), priority++);
281 } 260 }
282 261
283 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { 262 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
284 InsertNodeForTask(&graph_, new_task_set_finished_tasks[task_set].get(), 263 InsertNodeForTask(&graph_, new_task_set_finished_tasks[task_set].get(),
285 kTaskSetFinishedTaskPriorityBase + task_set, 264 kTaskSetFinishedTaskPriorityBase + task_set,
286 task_count[task_set]); 265 task_count[task_set]);
287 } 266 }
288 267
289 ScheduleTasksOnOriginThread(this, &graph_); 268 ScheduleTasksOnOriginThread(this, &graph_);
290
291 // Barrier to sync any new resources to the worker context.
292 resource_provider_->output_surface()
293 ->context_provider()
294 ->ContextGL()
295 ->OrderingBarrierCHROMIUM();
296
297 task_graph_runner_->ScheduleTasks(namespace_token_, &graph_); 269 task_graph_runner_->ScheduleTasks(namespace_token_, &graph_);
298 270
299 std::copy(new_task_set_finished_tasks, 271 std::copy(new_task_set_finished_tasks,
300 new_task_set_finished_tasks + kNumberOfTaskSets, 272 new_task_set_finished_tasks + kNumberOfTaskSets,
301 task_set_finished_tasks_); 273 task_set_finished_tasks_);
302 274
275 resource_pool_->ReduceResourceUsage();
276
303 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", "state", 277 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", "state",
304 StateAsValue()); 278 StateAsValue());
305 } 279 }
306 280
307 void OneCopyTileTaskWorkerPool::CheckForCompletedTasks() { 281 void OneCopyTileTaskWorkerPool::CheckForCompletedTasks() {
308 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::CheckForCompletedTasks"); 282 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::CheckForCompletedTasks");
309 283
310 task_graph_runner_->CollectCompletedTasks(namespace_token_, 284 task_graph_runner_->CollectCompletedTasks(namespace_token_,
311 &completed_tasks_); 285 &completed_tasks_);
312 286
(...skipping 18 matching lines...) Expand all
331 return !PlatformColor::SameComponentOrder(GetResourceFormat()); 305 return !PlatformColor::SameComponentOrder(GetResourceFormat());
332 } 306 }
333 307
334 scoped_ptr<RasterBuffer> OneCopyTileTaskWorkerPool::AcquireBufferForRaster( 308 scoped_ptr<RasterBuffer> OneCopyTileTaskWorkerPool::AcquireBufferForRaster(
335 const Resource* resource, 309 const Resource* resource,
336 uint64_t resource_content_id, 310 uint64_t resource_content_id,
337 uint64_t previous_content_id) { 311 uint64_t previous_content_id) {
338 // TODO(danakj): If resource_content_id != 0, we only need to copy/upload 312 // TODO(danakj): If resource_content_id != 0, we only need to copy/upload
339 // the dirty rect. 313 // the dirty rect.
340 DCHECK_EQ(resource->format(), resource_provider_->best_texture_format()); 314 DCHECK_EQ(resource->format(), resource_provider_->best_texture_format());
341 return make_scoped_ptr<RasterBuffer>(new RasterBufferImpl( 315 return make_scoped_ptr<RasterBuffer>(
342 this, resource_provider_, resource_provider_->best_texture_format(), 316 new RasterBufferImpl(this, resource_provider_, resource_pool_,
343 resource, previous_content_id)); 317 resource_provider_->best_texture_format(), resource,
318 previous_content_id));
344 } 319 }
345 320
346 void OneCopyTileTaskWorkerPool::ReleaseBufferForRaster( 321 void OneCopyTileTaskWorkerPool::ReleaseBufferForRaster(
347 scoped_ptr<RasterBuffer> buffer) { 322 scoped_ptr<RasterBuffer> buffer) {
348 // Nothing to do here. RasterBufferImpl destructor cleans up after itself. 323 // Nothing to do here. RasterBufferImpl destructor cleans up after itself.
349 } 324 }
350 325
351 void OneCopyTileTaskWorkerPool::PlaybackAndCopyOnWorkerThread( 326 CopySequenceNumber
352 const Resource* resource, 327 OneCopyTileTaskWorkerPool::PlaybackAndScheduleCopyOnWorkerThread(
353 const ResourceProvider::ScopedWriteLockGL* resource_lock, 328 bool reusing_raster_resource,
329 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer>
330 raster_resource_write_lock,
331 const Resource* raster_resource,
332 const Resource* output_resource,
354 const RasterSource* raster_source, 333 const RasterSource* raster_source,
355 const gfx::Rect& raster_full_rect, 334 const gfx::Rect& raster_full_rect,
356 const gfx::Rect& raster_dirty_rect, 335 const gfx::Rect& raster_dirty_rect,
357 float scale, 336 float scale,
358 bool include_images, 337 bool include_images) {
359 uint64_t previous_content_id, 338 gfx::GpuMemoryBuffer* gpu_memory_buffer =
360 uint64_t new_content_id) { 339 raster_resource_write_lock->GetGpuMemoryBuffer();
340 if (gpu_memory_buffer) {
341 void* data = NULL;
342 bool rv = gpu_memory_buffer->Map(&data);
343 DCHECK(rv);
344 int stride;
345 gpu_memory_buffer->GetStride(&stride);
346 // TileTaskWorkerPool::PlaybackToMemory only supports unsigned strides.
347 DCHECK_GE(stride, 0);
348
349 gfx::Rect playback_rect = raster_full_rect;
350 if (reusing_raster_resource) {
351 playback_rect.Intersect(raster_dirty_rect);
352 }
353 DCHECK(!playback_rect.IsEmpty())
354 << "Why are we rastering a tile that's not dirty?";
355 TileTaskWorkerPool::PlaybackToMemory(
356 data, raster_resource->format(), raster_resource->size(),
357 static_cast<size_t>(stride), raster_source, raster_full_rect,
358 playback_rect, scale, include_images);
359 gpu_memory_buffer->Unmap();
360 }
361
361 base::AutoLock lock(lock_); 362 base::AutoLock lock(lock_);
362 363
363 scoped_ptr<StagingBuffer> staging_buffer = 364 CopySequenceNumber sequence = 0;
364 AcquireStagingBuffer(resource, previous_content_id); 365 int bytes_per_row = (BitsPerPixel(raster_resource->format()) *
365 DCHECK(staging_buffer); 366 raster_resource->size().width()) /
367 8;
368 int chunk_size_in_rows =
369 std::max(1, max_bytes_per_copy_operation_ / bytes_per_row);
370 // Align chunk size to 4. Required to support compressed texture formats.
371 chunk_size_in_rows = MathUtil::UncheckedRoundUp(chunk_size_in_rows, 4);
372 int y = 0;
373 int height = raster_resource->size().height();
374 while (y < height) {
375 int failed_attempts = 0;
376 while ((pending_copy_operations_.size() + issued_copy_operation_count_) >=
377 kMaxCopyOperations) {
378 // Ignore limit when shutdown is set.
379 if (shutdown_)
380 break;
366 381
367 { 382 ++failed_attempts;
368 base::AutoUnlock unlock(lock_);
369 383
370 // Allocate GpuMemoryBuffer if necessary. 384 // Schedule a check that will also wait for operations to complete
371 if (!staging_buffer->gpu_memory_buffer) { 385 // after too many failed attempts.
372 staging_buffer->gpu_memory_buffer = 386 bool wait_if_needed = failed_attempts > kFailedAttemptsBeforeWaitIfNeeded;
373 resource_provider_->gpu_memory_buffer_manager() 387
374 ->AllocateGpuMemoryBuffer( 388 // Schedule a check for completed copy operations if too many operations
375 staging_buffer->size, 389 // are currently in-flight.
376 BufferFormat(resource_provider_->best_texture_format()), 390 ScheduleCheckForCompletedCopyOperationsWithLockAcquired(wait_if_needed);
377 use_persistent_gpu_memory_buffers_ 391
378 ? gfx::BufferUsage::PERSISTENT_MAP 392 {
379 : gfx::BufferUsage::MAP); 393 TRACE_EVENT0("cc", "WaitingForCopyOperationsToComplete");
394
395 // Wait for in-flight copy operations to drop below limit.
396 copy_operation_count_cv_.Wait();
397 }
380 } 398 }
381 399
382 gfx::Rect playback_rect = raster_full_rect; 400 // There may be more work available, so wake up another worker thread.
383 if (use_persistent_gpu_memory_buffers_ && previous_content_id) { 401 copy_operation_count_cv_.Signal();
384 // Reduce playback rect to dirty region if the content id of the staging
385 // buffer matches the prevous content id.
386 if (previous_content_id == staging_buffer->content_id)
387 playback_rect.Intersect(raster_dirty_rect);
388 }
389 402
390 if (staging_buffer->gpu_memory_buffer) { 403 // Copy at most |chunk_size_in_rows|.
391 void* data = nullptr; 404 int rows_to_copy = std::min(chunk_size_in_rows, height - y);
392 bool rv = staging_buffer->gpu_memory_buffer->Map(&data); 405 DCHECK_GT(rows_to_copy, 0);
393 DCHECK(rv);
394 int stride;
395 staging_buffer->gpu_memory_buffer->GetStride(&stride);
396 // TileTaskWorkerPool::PlaybackToMemory only supports unsigned strides.
397 DCHECK_GE(stride, 0);
398 406
399 DCHECK(!playback_rect.IsEmpty()) 407 // |raster_resource_write_lock| is passed to the first copy operation as it
400 << "Why are we rastering a tile that's not dirty?"; 408 // needs to be released before we can issue a copy.
401 TileTaskWorkerPool::PlaybackToMemory( 409 pending_copy_operations_.push_back(make_scoped_ptr(new CopyOperation(
402 data, resource_provider_->best_texture_format(), staging_buffer->size, 410 raster_resource_write_lock.Pass(), raster_resource, output_resource,
403 static_cast<size_t>(stride), raster_source, raster_full_rect, 411 gfx::Rect(0, y, raster_resource->size().width(), rows_to_copy))));
404 playback_rect, scale, include_images); 412 y += rows_to_copy;
405 staging_buffer->gpu_memory_buffer->Unmap(); 413
406 staging_buffer->content_id = new_content_id; 414 // Acquire a sequence number for this copy operation.
415 sequence = next_copy_operation_sequence_++;
416
417 // Increment |bytes_scheduled_since_last_flush_| by the amount of memory
418 // used for this copy operation.
419 bytes_scheduled_since_last_flush_ += rows_to_copy * bytes_per_row;
420
421 // Post task that will advance last flushed copy operation to |sequence|
422 // when |bytes_scheduled_since_last_flush_| has reached
423 // |max_bytes_per_copy_operation_|.
424 if (bytes_scheduled_since_last_flush_ >= max_bytes_per_copy_operation_) {
425 task_runner_->PostTask(
426 FROM_HERE,
427 base::Bind(&OneCopyTileTaskWorkerPool::AdvanceLastFlushedCopyTo,
428 weak_ptr_factory_.GetWeakPtr(), sequence));
429 bytes_scheduled_since_last_flush_ = 0;
407 } 430 }
408 } 431 }
409 432
410 ContextProvider* context_provider = 433 return sequence;
411 resource_provider_->output_surface()->worker_context_provider();
412 DCHECK(context_provider);
413
414 {
415 ContextProvider::ScopedContextLock scoped_context(context_provider);
416
417 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL();
418 DCHECK(gl);
419
420 unsigned image_target = resource_provider_->GetImageTextureTarget(
421 resource_provider_->best_texture_format());
422
423 // Create and bind staging texture.
424 if (!staging_buffer->texture_id) {
425 gl->GenTextures(1, &staging_buffer->texture_id);
426 gl->BindTexture(image_target, staging_buffer->texture_id);
427 gl->TexParameteri(image_target, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
428 gl->TexParameteri(image_target, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
429 gl->TexParameteri(image_target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
430 gl->TexParameteri(image_target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
431 } else {
432 gl->BindTexture(image_target, staging_buffer->texture_id);
433 }
434
435 // Create and bind image.
436 if (!staging_buffer->image_id) {
437 if (staging_buffer->gpu_memory_buffer) {
438 staging_buffer->image_id = gl->CreateImageCHROMIUM(
439 staging_buffer->gpu_memory_buffer->AsClientBuffer(),
440 staging_buffer->size.width(), staging_buffer->size.height(),
441 GLInternalFormat(resource_provider_->best_texture_format()));
442 gl->BindTexImage2DCHROMIUM(image_target, staging_buffer->image_id);
443 }
444 } else {
445 gl->ReleaseTexImage2DCHROMIUM(image_target, staging_buffer->image_id);
446 gl->BindTexImage2DCHROMIUM(image_target, staging_buffer->image_id);
447 }
448
449 // Unbind staging texture.
450 gl->BindTexture(image_target, 0);
451
452 if (resource_provider_->use_sync_query()) {
453 if (!staging_buffer->query_id)
454 gl->GenQueriesEXT(1, &staging_buffer->query_id);
455
456 #if defined(OS_CHROMEOS)
457 // TODO(reveman): This avoids a performance problem on some ChromeOS
458 // devices. This needs to be removed to support native GpuMemoryBuffer
459 // implementations. crbug.com/436314
460 gl->BeginQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM, staging_buffer->query_id);
461 #else
462 gl->BeginQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM,
463 staging_buffer->query_id);
464 #endif
465 }
466
467 int bytes_per_row =
468 (BitsPerPixel(resource_provider_->best_texture_format()) *
469 resource->size().width()) /
470 8;
471 int chunk_size_in_rows =
472 std::max(1, max_bytes_per_copy_operation_ / bytes_per_row);
473 // Align chunk size to 4. Required to support compressed texture formats.
474 chunk_size_in_rows = MathUtil::UncheckedRoundUp(chunk_size_in_rows, 4);
475 int y = 0;
476 int height = resource->size().height();
477 while (y < height) {
478 // Copy at most |chunk_size_in_rows|.
479 int rows_to_copy = std::min(chunk_size_in_rows, height - y);
480 DCHECK_GT(rows_to_copy, 0);
481
482 gl->CopySubTextureCHROMIUM(GL_TEXTURE_2D, staging_buffer->texture_id,
483 resource_lock->texture_id(), 0, y, 0, y,
484 resource->size().width(), rows_to_copy, false,
485 false, false);
486 y += rows_to_copy;
487
488 // Increment |bytes_scheduled_since_last_flush_| by the amount of memory
489 // used for this copy operation.
490 bytes_scheduled_since_last_flush_ += rows_to_copy * bytes_per_row;
491
492 if (bytes_scheduled_since_last_flush_ >= max_bytes_per_copy_operation_) {
493 gl->ShallowFlushCHROMIUM();
494 bytes_scheduled_since_last_flush_ = 0;
495 }
496 }
497
498 if (resource_provider_->use_sync_query()) {
499 #if defined(OS_CHROMEOS)
500 gl->EndQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM);
501 #else
502 gl->EndQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM);
503 #endif
504 }
505
506 // Barrier to sync worker context output to cc context.
507 gl->OrderingBarrierCHROMIUM();
508 }
509
510 staging_buffer->last_usage = base::TimeTicks::Now();
511 busy_buffers_.push_back(staging_buffer.Pass());
512
513 ScheduleReduceMemoryUsage();
514 } 434 }
515 435
516 bool OneCopyTileTaskWorkerPool::OnMemoryDump( 436 void OneCopyTileTaskWorkerPool::AdvanceLastIssuedCopyTo(
517 const base::trace_event::MemoryDumpArgs& args, 437 CopySequenceNumber sequence) {
518 base::trace_event::ProcessMemoryDump* pmd) { 438 if (last_issued_copy_operation_ >= sequence)
519 base::AutoLock lock(lock_); 439 return;
520 440
521 for (const auto& buffer : buffers_) { 441 IssueCopyOperations(sequence - last_issued_copy_operation_);
522 buffer->OnMemoryDump(pmd, resource_provider_->best_texture_format(), 442 last_issued_copy_operation_ = sequence;
523 std::find(free_buffers_.begin(), free_buffers_.end(),
524 buffer) != free_buffers_.end());
525 }
526
527 return true;
528 } 443 }
529 444
530 scoped_ptr<OneCopyTileTaskWorkerPool::StagingBuffer> 445 void OneCopyTileTaskWorkerPool::AdvanceLastFlushedCopyTo(
531 OneCopyTileTaskWorkerPool::AcquireStagingBuffer(const Resource* resource, 446 CopySequenceNumber sequence) {
532 uint64_t previous_content_id) { 447 if (last_flushed_copy_operation_ >= sequence)
533 lock_.AssertAcquired();
534
535 scoped_ptr<StagingBuffer> staging_buffer;
536
537 ContextProvider* context_provider =
538 resource_provider_->output_surface()->worker_context_provider();
539 DCHECK(context_provider);
540
541 ContextProvider::ScopedContextLock scoped_context(context_provider);
542
543 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL();
544 DCHECK(gl);
545
546 // Check if any busy buffers have become available.
547 if (resource_provider_->use_sync_query()) {
548 while (!busy_buffers_.empty()) {
549 if (!CheckForQueryResult(gl, busy_buffers_.front()->query_id))
550 break;
551
552 free_buffers_.push_back(busy_buffers_.take_front());
553 }
554 }
555
556 // Wait for number of non-free buffers to become less than the limit.
557 while ((buffers_.size() - free_buffers_.size()) >= max_staging_buffers_) {
558 // Stop when there are no more busy buffers to wait for.
559 if (busy_buffers_.empty())
560 break;
561
562 if (resource_provider_->use_sync_query()) {
563 WaitForQueryResult(gl, busy_buffers_.front()->query_id);
564 free_buffers_.push_back(busy_buffers_.take_front());
565 } else {
566 // Fall-back to glFinish if CHROMIUM_sync_query is not available.
567 gl->Finish();
568 while (!busy_buffers_.empty())
569 free_buffers_.push_back(busy_buffers_.take_front());
570 }
571 }
572
573 // Find a staging buffer that allows us to perform partial raster when
574 // using persistent GpuMemoryBuffers.
575 if (use_persistent_gpu_memory_buffers_ && previous_content_id) {
576 StagingBufferDeque::iterator it =
577 std::find_if(free_buffers_.begin(), free_buffers_.end(),
578 [previous_content_id](const StagingBuffer* buffer) {
579 return buffer->content_id == previous_content_id;
580 });
581 if (it != free_buffers_.end())
582 staging_buffer = free_buffers_.take(it);
583 }
584
585 // Find staging buffer of correct size.
586 if (!staging_buffer) {
587 StagingBufferDeque::iterator it =
588 std::find_if(free_buffers_.begin(), free_buffers_.end(),
589 [resource](const StagingBuffer* buffer) {
590 return buffer->size == resource->size();
591 });
592 if (it != free_buffers_.end())
593 staging_buffer = free_buffers_.take(it);
594 }
595
596 // Create new staging buffer if necessary.
597 if (!staging_buffer) {
598 staging_buffer = make_scoped_ptr(new StagingBuffer(resource->size()));
599 buffers_.insert(staging_buffer.get());
600 }
601
602 // Release enough free buffers to stay within the limit.
603 while (buffers_.size() > max_staging_buffers_) {
604 if (free_buffers_.empty())
605 break;
606
607 free_buffers_.front()->DestroyGLResources(gl);
608 buffers_.erase(free_buffers_.front());
609 free_buffers_.take_front();
610 }
611
612 return staging_buffer.Pass();
613 }
614
615 base::TimeTicks OneCopyTileTaskWorkerPool::GetUsageTimeForLRUBuffer() {
616 lock_.AssertAcquired();
617
618 if (!free_buffers_.empty())
619 return free_buffers_.front()->last_usage;
620
621 if (!busy_buffers_.empty())
622 return busy_buffers_.front()->last_usage;
623
624 return base::TimeTicks();
625 }
626
627 void OneCopyTileTaskWorkerPool::ScheduleReduceMemoryUsage() {
628 lock_.AssertAcquired();
629
630 if (reduce_memory_usage_pending_)
631 return; 448 return;
632 449
633 reduce_memory_usage_pending_ = true; 450 AdvanceLastIssuedCopyTo(sequence);
634 451
635 // Schedule a call to ReduceMemoryUsage at the time when the LRU buffer 452 // Flush all issued copy operations.
636 // should be released. 453 context_provider_->ContextGL()->ShallowFlushCHROMIUM();
637 base::TimeTicks reduce_memory_usage_time = 454 last_flushed_copy_operation_ = last_issued_copy_operation_;
638 GetUsageTimeForLRUBuffer() + staging_buffer_expiration_delay_;
639 task_runner_->PostDelayedTask(
640 FROM_HERE, reduce_memory_usage_callback_,
641 reduce_memory_usage_time - base::TimeTicks::Now());
642 }
643
644 void OneCopyTileTaskWorkerPool::ReduceMemoryUsage() {
645 base::AutoLock lock(lock_);
646
647 reduce_memory_usage_pending_ = false;
648
649 if (free_buffers_.empty() && busy_buffers_.empty())
650 return;
651
652 base::TimeTicks current_time = base::TimeTicks::Now();
653 ReleaseBuffersNotUsedSince(current_time - staging_buffer_expiration_delay_);
654
655 if (free_buffers_.empty() && busy_buffers_.empty())
656 return;
657
658 reduce_memory_usage_pending_ = true;
659
660 // Schedule another call to ReduceMemoryUsage at the time when the next
661 // buffer should be released.
662 base::TimeTicks reduce_memory_usage_time =
663 GetUsageTimeForLRUBuffer() + staging_buffer_expiration_delay_;
664 task_runner_->PostDelayedTask(FROM_HERE, reduce_memory_usage_callback_,
665 reduce_memory_usage_time - current_time);
666 }
667
668 void OneCopyTileTaskWorkerPool::ReleaseBuffersNotUsedSince(
669 base::TimeTicks time) {
670 lock_.AssertAcquired();
671
672 ContextProvider* context_provider =
673 resource_provider_->output_surface()->worker_context_provider();
674 DCHECK(context_provider);
675
676 {
677 ContextProvider::ScopedContextLock scoped_context(context_provider);
678
679 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL();
680 DCHECK(gl);
681
682 // Note: Front buffer is guaranteed to be LRU so we can stop releasing
683 // buffers as soon as we find a buffer that has been used since |time|.
684 while (!free_buffers_.empty()) {
685 if (free_buffers_.front()->last_usage > time)
686 return;
687
688 free_buffers_.front()->DestroyGLResources(gl);
689 buffers_.erase(free_buffers_.front());
690 free_buffers_.take_front();
691 }
692
693 while (!busy_buffers_.empty()) {
694 if (busy_buffers_.front()->last_usage > time)
695 return;
696
697 busy_buffers_.front()->DestroyGLResources(gl);
698 buffers_.erase(busy_buffers_.front());
699 busy_buffers_.take_front();
700 }
701 }
702 } 455 }
703 456
704 void OneCopyTileTaskWorkerPool::OnTaskSetFinished(TaskSet task_set) { 457 void OneCopyTileTaskWorkerPool::OnTaskSetFinished(TaskSet task_set) {
705 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::OnTaskSetFinished", "task_set", 458 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::OnTaskSetFinished", "task_set",
706 task_set); 459 task_set);
707 460
708 DCHECK(tasks_pending_[task_set]); 461 DCHECK(tasks_pending_[task_set]);
709 tasks_pending_[task_set] = false; 462 tasks_pending_[task_set] = false;
710 if (tasks_pending_.any()) { 463 if (tasks_pending_.any()) {
711 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", 464 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running",
712 "state", StateAsValue()); 465 "state", StateAsValue());
713 } else { 466 } else {
714 TRACE_EVENT_ASYNC_END0("cc", "ScheduledTasks", this); 467 TRACE_EVENT_ASYNC_END0("cc", "ScheduledTasks", this);
715 } 468 }
716 client_->DidFinishRunningTileTasks(task_set); 469 client_->DidFinishRunningTileTasks(task_set);
717 } 470 }
718 471
472 void OneCopyTileTaskWorkerPool::IssueCopyOperations(int64 count) {
473 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::IssueCopyOperations", "count",
474 count);
475
476 CopyOperation::Deque copy_operations;
477
478 {
479 base::AutoLock lock(lock_);
480
481 for (int64 i = 0; i < count; ++i) {
482 DCHECK(!pending_copy_operations_.empty());
483 copy_operations.push_back(pending_copy_operations_.take_front());
484 }
485
486 // Increment |issued_copy_operation_count_| to reflect the transition of
487 // copy operations from "pending" to "issued" state.
488 issued_copy_operation_count_ += copy_operations.size();
489 }
490
491 while (!copy_operations.empty()) {
492 scoped_ptr<CopyOperation> copy_operation = copy_operations.take_front();
493
494 // Remove the write lock.
495 copy_operation->src_write_lock.reset();
496
497 // Copy contents of source resource to destination resource.
498 resource_provider_->CopyResource(copy_operation->src->id(),
499 copy_operation->dst->id(),
500 copy_operation->rect);
501 }
502 }
503
504 void OneCopyTileTaskWorkerPool::
505 ScheduleCheckForCompletedCopyOperationsWithLockAcquired(
506 bool wait_if_needed) {
507 lock_.AssertAcquired();
508
509 if (check_for_completed_copy_operations_pending_)
510 return;
511
512 base::TimeTicks now = base::TimeTicks::Now();
513
514 // Schedule a check for completed copy operations as soon as possible but
515 // don't allow two consecutive checks to be scheduled to run less than the
516 // tick rate apart.
517 base::TimeTicks next_check_for_completed_copy_operations_time =
518 std::max(last_check_for_completed_copy_operations_time_ +
519 base::TimeDelta::FromMilliseconds(
520 kCheckForCompletedCopyOperationsTickRateMs),
521 now);
522
523 task_runner_->PostDelayedTask(
524 FROM_HERE,
525 base::Bind(&OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations,
526 weak_ptr_factory_.GetWeakPtr(), wait_if_needed),
527 next_check_for_completed_copy_operations_time - now);
528
529 last_check_for_completed_copy_operations_time_ =
530 next_check_for_completed_copy_operations_time;
531 check_for_completed_copy_operations_pending_ = true;
532 }
533
534 void OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations(
535 bool wait_if_needed) {
536 TRACE_EVENT1("cc",
537 "OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations",
538 "wait_if_needed", wait_if_needed);
539
540 resource_pool_->CheckBusyResources(wait_if_needed);
541
542 {
543 base::AutoLock lock(lock_);
544
545 DCHECK(check_for_completed_copy_operations_pending_);
546 check_for_completed_copy_operations_pending_ = false;
547
548 // The number of busy resources in the pool reflects the number of issued
549 // copy operations that have not yet completed.
550 issued_copy_operation_count_ = resource_pool_->busy_resource_count();
551
552 // There may be work blocked on too many in-flight copy operations, so wake
553 // up a worker thread.
554 copy_operation_count_cv_.Signal();
555 }
556 }
557
719 scoped_refptr<base::trace_event::ConvertableToTraceFormat> 558 scoped_refptr<base::trace_event::ConvertableToTraceFormat>
720 OneCopyTileTaskWorkerPool::StateAsValue() const { 559 OneCopyTileTaskWorkerPool::StateAsValue() const {
721 scoped_refptr<base::trace_event::TracedValue> state = 560 scoped_refptr<base::trace_event::TracedValue> state =
722 new base::trace_event::TracedValue(); 561 new base::trace_event::TracedValue();
723 562
724 state->BeginArray("tasks_pending"); 563 state->BeginArray("tasks_pending");
725 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) 564 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set)
726 state->AppendBoolean(tasks_pending_[task_set]); 565 state->AppendBoolean(tasks_pending_[task_set]);
727 state->EndArray(); 566 state->EndArray();
728 state->BeginDictionary("staging_state"); 567 state->BeginDictionary("staging_state");
729 StagingStateAsValueInto(state.get()); 568 StagingStateAsValueInto(state.get());
730 state->EndDictionary(); 569 state->EndDictionary();
731 570
732 return state; 571 return state;
733 } 572 }
734 573
735 void OneCopyTileTaskWorkerPool::StagingStateAsValueInto( 574 void OneCopyTileTaskWorkerPool::StagingStateAsValueInto(
736 base::trace_event::TracedValue* staging_state) const { 575 base::trace_event::TracedValue* staging_state) const {
737 base::AutoLock lock(lock_); 576 staging_state->SetInteger(
738 577 "staging_resource_count",
739 staging_state->SetInteger("staging_buffer_count", 578 static_cast<int>(resource_pool_->total_resource_count()));
740 static_cast<int>(buffers_.size())); 579 staging_state->SetInteger(
741 staging_state->SetInteger("busy_count", 580 "bytes_used_for_staging_resources",
742 static_cast<int>(busy_buffers_.size())); 581 static_cast<int>(resource_pool_->total_memory_usage_bytes()));
743 staging_state->SetInteger("free_count", 582 staging_state->SetInteger(
744 static_cast<int>(free_buffers_.size())); 583 "pending_copy_count",
584 static_cast<int>(resource_pool_->total_resource_count() -
585 resource_pool_->acquired_resource_count()));
586 staging_state->SetInteger(
587 "bytes_pending_copy",
588 static_cast<int>(resource_pool_->total_memory_usage_bytes() -
589 resource_pool_->acquired_memory_usage_bytes()));
745 } 590 }
746 591
747 } // namespace cc 592 } // namespace cc
OLDNEW
« no previous file with comments | « cc/raster/one_copy_tile_task_worker_pool.h ('k') | cc/raster/tile_task_worker_pool_perftest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698