Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(3)

Side by Side Diff: cc/raster/one_copy_tile_task_worker_pool.cc

Issue 1273163004: Revert of cc: Use worker context for one-copy tile initialization. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 5 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « cc/raster/one_copy_tile_task_worker_pool.h ('k') | cc/raster/tile_task_worker_pool_perftest.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2014 The Chromium Authors. All rights reserved. 1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "cc/raster/one_copy_tile_task_worker_pool.h" 5 #include "cc/raster/one_copy_tile_task_worker_pool.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <limits> 8 #include <limits>
9 9
10 #include "base/strings/stringprintf.h" 10 #include "base/strings/stringprintf.h"
11 #include "base/thread_task_runner_handle.h"
12 #include "base/trace_event/memory_dump_manager.h"
13 #include "base/trace_event/trace_event.h" 11 #include "base/trace_event/trace_event.h"
14 #include "base/trace_event/trace_event_argument.h" 12 #include "base/trace_event/trace_event_argument.h"
15 #include "cc/base/math_util.h" 13 #include "cc/base/math_util.h"
16 #include "cc/debug/traced_value.h" 14 #include "cc/debug/traced_value.h"
17 #include "cc/raster/raster_buffer.h" 15 #include "cc/raster/raster_buffer.h"
18 #include "cc/resources/platform_color.h" 16 #include "cc/resources/platform_color.h"
19 #include "cc/resources/resource_format.h" 17 #include "cc/resources/resource_pool.h"
20 #include "cc/resources/resource_util.h"
21 #include "cc/resources/scoped_resource.h" 18 #include "cc/resources/scoped_resource.h"
22 #include "gpu/GLES2/gl2extchromium.h"
23 #include "gpu/command_buffer/client/gles2_interface.h" 19 #include "gpu/command_buffer/client/gles2_interface.h"
24 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h" 20 #include "ui/gfx/gpu_memory_buffer.h"
25 21
26 namespace cc { 22 namespace cc {
27 namespace { 23 namespace {
28 24
29 class RasterBufferImpl : public RasterBuffer { 25 class RasterBufferImpl : public RasterBuffer {
30 public: 26 public:
31 RasterBufferImpl(OneCopyTileTaskWorkerPool* worker_pool, 27 RasterBufferImpl(OneCopyTileTaskWorkerPool* worker_pool,
32 ResourceProvider* resource_provider, 28 ResourceProvider* resource_provider,
29 ResourcePool* resource_pool,
33 ResourceFormat resource_format, 30 ResourceFormat resource_format,
34 const Resource* resource, 31 const Resource* output_resource,
35 uint64_t previous_content_id) 32 uint64_t previous_content_id)
36 : worker_pool_(worker_pool), 33 : worker_pool_(worker_pool),
37 resource_(resource), 34 resource_provider_(resource_provider),
38 lock_(resource_provider, resource->id()), 35 resource_pool_(resource_pool),
39 previous_content_id_(previous_content_id) {} 36 output_resource_(output_resource),
37 raster_content_id_(0),
38 sequence_(0) {
39 if (worker_pool->have_persistent_gpu_memory_buffers() &&
40 previous_content_id) {
41 raster_resource_ =
42 resource_pool->TryAcquireResourceWithContentId(previous_content_id);
43 }
44 if (raster_resource_) {
45 raster_content_id_ = previous_content_id;
46 DCHECK_EQ(resource_format, raster_resource_->format());
47 DCHECK_EQ(output_resource->size().ToString(),
48 raster_resource_->size().ToString());
49 } else {
50 raster_resource_ = resource_pool->AcquireResource(output_resource->size(),
51 resource_format);
52 }
40 53
41 ~RasterBufferImpl() override {} 54 lock_.reset(new ResourceProvider::ScopedWriteLockGpuMemoryBuffer(
55 resource_provider_, raster_resource_->id()));
56 }
57
58 ~RasterBufferImpl() override {
59 // Release write lock in case a copy was never scheduled.
60 lock_.reset();
61
62 // Make sure any scheduled copy operations are issued before we release the
63 // raster resource.
64 if (sequence_)
65 worker_pool_->AdvanceLastIssuedCopyTo(sequence_);
66
67 // Return resources to pool so they can be used by another RasterBuffer
68 // instance.
69 resource_pool_->ReleaseResource(raster_resource_.Pass(),
70 raster_content_id_);
71 }
42 72
43 // Overridden from RasterBuffer: 73 // Overridden from RasterBuffer:
44 void Playback(const RasterSource* raster_source, 74 void Playback(const RasterSource* raster_source,
45 const gfx::Rect& raster_full_rect, 75 const gfx::Rect& raster_full_rect,
46 const gfx::Rect& raster_dirty_rect, 76 const gfx::Rect& raster_dirty_rect,
47 uint64_t new_content_id, 77 uint64_t new_content_id,
48 float scale, 78 float scale,
49 bool include_images) override { 79 bool include_images) override {
50 worker_pool_->PlaybackAndCopyOnWorkerThread( 80 // If there's a raster_content_id_, we are reusing a resource with that
51 resource_, &lock_, raster_source, raster_full_rect, raster_dirty_rect, 81 // content id.
52 scale, include_images, previous_content_id_, new_content_id); 82 bool reusing_raster_resource = raster_content_id_ != 0;
83 sequence_ = worker_pool_->PlaybackAndScheduleCopyOnWorkerThread(
84 reusing_raster_resource, lock_.Pass(), raster_resource_.get(),
85 output_resource_, raster_source, raster_full_rect, raster_dirty_rect,
86 scale, include_images);
87 // Store the content id of the resource to return to the pool.
88 raster_content_id_ = new_content_id;
53 } 89 }
54 90
55 private: 91 private:
56 OneCopyTileTaskWorkerPool* worker_pool_; 92 OneCopyTileTaskWorkerPool* worker_pool_;
57 const Resource* resource_; 93 ResourceProvider* resource_provider_;
58 ResourceProvider::ScopedWriteLockGL lock_; 94 ResourcePool* resource_pool_;
59 uint64_t previous_content_id_; 95 const Resource* output_resource_;
96 uint64_t raster_content_id_;
97 scoped_ptr<ScopedResource> raster_resource_;
98 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> lock_;
99 CopySequenceNumber sequence_;
60 100
61 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl); 101 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl);
62 }; 102 };
63 103
64 // Delay between checking for query result to be available. 104 // Number of in-flight copy operations to allow.
65 const int kCheckForQueryResultAvailableTickRateMs = 1; 105 const int kMaxCopyOperations = 32;
66 106
67 // Number of attempts to allow before we perform a check that will wait for 107 // Delay been checking for copy operations to complete.
68 // query to complete. 108 const int kCheckForCompletedCopyOperationsTickRateMs = 1;
69 const int kMaxCheckForQueryResultAvailableAttempts = 256; 109
110 // Number of failed attempts to allow before we perform a check that will
111 // wait for copy operations to complete if needed.
112 const int kFailedAttemptsBeforeWaitIfNeeded = 256;
70 113
71 // 4MiB is the size of 4 512x512 tiles, which has proven to be a good 114 // 4MiB is the size of 4 512x512 tiles, which has proven to be a good
72 // default batch size for copy operations. 115 // default batch size for copy operations.
73 const int kMaxBytesPerCopyOperation = 1024 * 1024 * 4; 116 const int kMaxBytesPerCopyOperation = 1024 * 1024 * 4;
74 117
75 // Delay before a staging buffer might be released. 118 } // namespace
76 const int kStagingBufferExpirationDelayMs = 1000;
77 119
78 bool CheckForQueryResult(gpu::gles2::GLES2Interface* gl, unsigned query_id) { 120 OneCopyTileTaskWorkerPool::CopyOperation::CopyOperation(
79 unsigned complete = 1; 121 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> src_write_lock,
80 gl->GetQueryObjectuivEXT(query_id, GL_QUERY_RESULT_AVAILABLE_EXT, &complete); 122 const Resource* src,
81 return !!complete; 123 const Resource* dst,
124 const gfx::Rect& rect)
125 : src_write_lock(src_write_lock.Pass()), src(src), dst(dst), rect(rect) {
82 } 126 }
83 127
84 void WaitForQueryResult(gpu::gles2::GLES2Interface* gl, unsigned query_id) { 128 OneCopyTileTaskWorkerPool::CopyOperation::~CopyOperation() {
85 TRACE_EVENT0("cc", "WaitForQueryResult");
86
87 int attempts_left = kMaxCheckForQueryResultAvailableAttempts;
88 while (attempts_left--) {
89 if (CheckForQueryResult(gl, query_id))
90 break;
91
92 base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(
93 kCheckForQueryResultAvailableTickRateMs));
94 }
95
96 unsigned result = 0;
97 gl->GetQueryObjectuivEXT(query_id, GL_QUERY_RESULT_EXT, &result);
98 }
99
100 } // namespace
101
102 OneCopyTileTaskWorkerPool::StagingBuffer::StagingBuffer(const gfx::Size& size)
103 : size(size), texture_id(0), image_id(0), query_id(0), content_id(0) {}
104
105 OneCopyTileTaskWorkerPool::StagingBuffer::~StagingBuffer() {
106 DCHECK_EQ(texture_id, 0u);
107 DCHECK_EQ(image_id, 0u);
108 DCHECK_EQ(query_id, 0u);
109 }
110
111 void OneCopyTileTaskWorkerPool::StagingBuffer::DestroyGLResources(
112 gpu::gles2::GLES2Interface* gl) {
113 if (query_id) {
114 gl->DeleteQueriesEXT(1, &query_id);
115 query_id = 0;
116 }
117 if (image_id) {
118 gl->DestroyImageCHROMIUM(image_id);
119 image_id = 0;
120 }
121 if (texture_id) {
122 gl->DeleteTextures(1, &texture_id);
123 texture_id = 0;
124 }
125 }
126
127 void OneCopyTileTaskWorkerPool::StagingBuffer::OnMemoryDump(
128 base::trace_event::ProcessMemoryDump* pmd,
129 ResourceFormat format,
130 bool in_free_list) const {
131 if (!gpu_memory_buffer)
132 return;
133
134 gfx::GpuMemoryBufferId buffer_id = gpu_memory_buffer->GetId();
135 std::string buffer_dump_name =
136 base::StringPrintf("cc/one_copy/staging_memory/buffer_%d", buffer_id);
137 base::trace_event::MemoryAllocatorDump* buffer_dump =
138 pmd->CreateAllocatorDump(buffer_dump_name);
139
140 uint64_t buffer_size_in_bytes =
141 ResourceUtil::UncheckedSizeInBytes<uint64_t>(size, format);
142 buffer_dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize,
143 base::trace_event::MemoryAllocatorDump::kUnitsBytes,
144 buffer_size_in_bytes);
145 buffer_dump->AddScalar("free_size",
146 base::trace_event::MemoryAllocatorDump::kUnitsBytes,
147 in_free_list ? buffer_size_in_bytes : 0);
148
149 // Emit an ownership edge towards a global allocator dump node.
150 const uint64 tracing_process_id =
151 base::trace_event::MemoryDumpManager::GetInstance()
152 ->GetTracingProcessId();
153 base::trace_event::MemoryAllocatorDumpGuid shared_buffer_guid =
154 gfx::GetGpuMemoryBufferGUIDForTracing(tracing_process_id, buffer_id);
155 pmd->CreateSharedGlobalAllocatorDump(shared_buffer_guid);
156
157 // By creating an edge with a higher |importance| (w.r.t. browser-side dumps)
158 // the tracing UI will account the effective size of the buffer to the child.
159 const int kImportance = 2;
160 pmd->AddOwnershipEdge(buffer_dump->guid(), shared_buffer_guid, kImportance);
161 } 129 }
162 130
163 // static 131 // static
164 scoped_ptr<TileTaskWorkerPool> OneCopyTileTaskWorkerPool::Create( 132 scoped_ptr<TileTaskWorkerPool> OneCopyTileTaskWorkerPool::Create(
165 base::SequencedTaskRunner* task_runner, 133 base::SequencedTaskRunner* task_runner,
166 TaskGraphRunner* task_graph_runner, 134 TaskGraphRunner* task_graph_runner,
167 ContextProvider* context_provider, 135 ContextProvider* context_provider,
168 ResourceProvider* resource_provider, 136 ResourceProvider* resource_provider,
137 ResourcePool* resource_pool,
169 int max_copy_texture_chromium_size, 138 int max_copy_texture_chromium_size,
170 bool use_persistent_gpu_memory_buffers, 139 bool have_persistent_gpu_memory_buffers) {
171 unsigned image_target,
172 int max_staging_buffers) {
173 return make_scoped_ptr<TileTaskWorkerPool>(new OneCopyTileTaskWorkerPool( 140 return make_scoped_ptr<TileTaskWorkerPool>(new OneCopyTileTaskWorkerPool(
174 task_runner, task_graph_runner, resource_provider, 141 task_runner, task_graph_runner, context_provider, resource_provider,
175 max_copy_texture_chromium_size, use_persistent_gpu_memory_buffers, 142 resource_pool, max_copy_texture_chromium_size,
176 image_target, max_staging_buffers)); 143 have_persistent_gpu_memory_buffers));
177 } 144 }
178 145
179 OneCopyTileTaskWorkerPool::OneCopyTileTaskWorkerPool( 146 OneCopyTileTaskWorkerPool::OneCopyTileTaskWorkerPool(
180 base::SequencedTaskRunner* task_runner, 147 base::SequencedTaskRunner* task_runner,
181 TaskGraphRunner* task_graph_runner, 148 TaskGraphRunner* task_graph_runner,
149 ContextProvider* context_provider,
182 ResourceProvider* resource_provider, 150 ResourceProvider* resource_provider,
151 ResourcePool* resource_pool,
183 int max_copy_texture_chromium_size, 152 int max_copy_texture_chromium_size,
184 bool use_persistent_gpu_memory_buffers, 153 bool have_persistent_gpu_memory_buffers)
185 unsigned image_target,
186 int max_staging_buffers)
187 : task_runner_(task_runner), 154 : task_runner_(task_runner),
188 task_graph_runner_(task_graph_runner), 155 task_graph_runner_(task_graph_runner),
189 namespace_token_(task_graph_runner->GetNamespaceToken()), 156 namespace_token_(task_graph_runner->GetNamespaceToken()),
157 context_provider_(context_provider),
190 resource_provider_(resource_provider), 158 resource_provider_(resource_provider),
159 resource_pool_(resource_pool),
191 max_bytes_per_copy_operation_( 160 max_bytes_per_copy_operation_(
192 max_copy_texture_chromium_size 161 max_copy_texture_chromium_size
193 ? std::min(kMaxBytesPerCopyOperation, 162 ? std::min(kMaxBytesPerCopyOperation,
194 max_copy_texture_chromium_size) 163 max_copy_texture_chromium_size)
195 : kMaxBytesPerCopyOperation), 164 : kMaxBytesPerCopyOperation),
196 use_persistent_gpu_memory_buffers_(use_persistent_gpu_memory_buffers), 165 have_persistent_gpu_memory_buffers_(have_persistent_gpu_memory_buffers),
197 image_target_(image_target), 166 last_issued_copy_operation_(0),
167 last_flushed_copy_operation_(0),
168 lock_(),
169 copy_operation_count_cv_(&lock_),
198 bytes_scheduled_since_last_flush_(0), 170 bytes_scheduled_since_last_flush_(0),
199 max_staging_buffers_(max_staging_buffers), 171 issued_copy_operation_count_(0),
200 staging_buffer_expiration_delay_( 172 next_copy_operation_sequence_(1),
201 base::TimeDelta::FromMilliseconds(kStagingBufferExpirationDelayMs)), 173 check_for_completed_copy_operations_pending_(false),
202 reduce_memory_usage_pending_(false), 174 shutdown_(false),
203 weak_ptr_factory_(this), 175 weak_ptr_factory_(this),
204 task_set_finished_weak_ptr_factory_(this) { 176 task_set_finished_weak_ptr_factory_(this) {
205 base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider( 177 DCHECK(context_provider_);
206 this, base::ThreadTaskRunnerHandle::Get());
207 reduce_memory_usage_callback_ =
208 base::Bind(&OneCopyTileTaskWorkerPool::ReduceMemoryUsage,
209 weak_ptr_factory_.GetWeakPtr());
210 } 178 }
211 179
212 OneCopyTileTaskWorkerPool::~OneCopyTileTaskWorkerPool() { 180 OneCopyTileTaskWorkerPool::~OneCopyTileTaskWorkerPool() {
213 base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider( 181 DCHECK_EQ(pending_copy_operations_.size(), 0u);
214 this);
215 } 182 }
216 183
217 TileTaskRunner* OneCopyTileTaskWorkerPool::AsTileTaskRunner() { 184 TileTaskRunner* OneCopyTileTaskWorkerPool::AsTileTaskRunner() {
218 return this; 185 return this;
219 } 186 }
220 187
221 void OneCopyTileTaskWorkerPool::SetClient(TileTaskRunnerClient* client) { 188 void OneCopyTileTaskWorkerPool::SetClient(TileTaskRunnerClient* client) {
222 client_ = client; 189 client_ = client;
223 } 190 }
224 191
225 void OneCopyTileTaskWorkerPool::Shutdown() { 192 void OneCopyTileTaskWorkerPool::Shutdown() {
226 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::Shutdown"); 193 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::Shutdown");
227 194
195 {
196 base::AutoLock lock(lock_);
197
198 shutdown_ = true;
199 copy_operation_count_cv_.Signal();
200 }
201
228 TaskGraph empty; 202 TaskGraph empty;
229 task_graph_runner_->ScheduleTasks(namespace_token_, &empty); 203 task_graph_runner_->ScheduleTasks(namespace_token_, &empty);
230 task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_); 204 task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_);
231
232 base::AutoLock lock(lock_);
233
234 if (buffers_.empty())
235 return;
236
237 ReleaseBuffersNotUsedSince(base::TimeTicks() + base::TimeDelta::Max());
238 } 205 }
239 206
240 void OneCopyTileTaskWorkerPool::ScheduleTasks(TileTaskQueue* queue) { 207 void OneCopyTileTaskWorkerPool::ScheduleTasks(TileTaskQueue* queue) {
241 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::ScheduleTasks"); 208 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::ScheduleTasks");
242 209
210 #if DCHECK_IS_ON()
211 {
212 base::AutoLock lock(lock_);
213 DCHECK(!shutdown_);
214 }
215 #endif
216
243 if (tasks_pending_.none()) 217 if (tasks_pending_.none())
244 TRACE_EVENT_ASYNC_BEGIN0("cc", "ScheduledTasks", this); 218 TRACE_EVENT_ASYNC_BEGIN0("cc", "ScheduledTasks", this);
245 219
246 // Mark all task sets as pending. 220 // Mark all task sets as pending.
247 tasks_pending_.set(); 221 tasks_pending_.set();
248 222
249 size_t priority = kTileTaskPriorityBase; 223 size_t priority = kTileTaskPriorityBase;
250 224
251 graph_.Reset(); 225 graph_.Reset();
252 226
253 // Cancel existing OnTaskSetFinished callbacks. 227 // Cancel existing OnTaskSetFinished callbacks.
254 task_set_finished_weak_ptr_factory_.InvalidateWeakPtrs(); 228 task_set_finished_weak_ptr_factory_.InvalidateWeakPtrs();
255 229
256 scoped_refptr<TileTask> new_task_set_finished_tasks[kNumberOfTaskSets]; 230 scoped_refptr<TileTask> new_task_set_finished_tasks[kNumberOfTaskSets];
257 231
258 size_t task_count[kNumberOfTaskSets] = {0}; 232 size_t task_count[kNumberOfTaskSets] = {0};
259 233
260 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { 234 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
261 new_task_set_finished_tasks[task_set] = CreateTaskSetFinishedTask( 235 new_task_set_finished_tasks[task_set] = CreateTaskSetFinishedTask(
262 task_runner_.get(), 236 task_runner_.get(),
263 base::Bind(&OneCopyTileTaskWorkerPool::OnTaskSetFinished, 237 base::Bind(&OneCopyTileTaskWorkerPool::OnTaskSetFinished,
264 task_set_finished_weak_ptr_factory_.GetWeakPtr(), task_set)); 238 task_set_finished_weak_ptr_factory_.GetWeakPtr(), task_set));
265 } 239 }
266 240
241 resource_pool_->CheckBusyResources(false);
242
267 for (TileTaskQueue::Item::Vector::const_iterator it = queue->items.begin(); 243 for (TileTaskQueue::Item::Vector::const_iterator it = queue->items.begin();
268 it != queue->items.end(); ++it) { 244 it != queue->items.end(); ++it) {
269 const TileTaskQueue::Item& item = *it; 245 const TileTaskQueue::Item& item = *it;
270 RasterTask* task = item.task; 246 RasterTask* task = item.task;
271 DCHECK(!task->HasCompleted()); 247 DCHECK(!task->HasCompleted());
272 248
273 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { 249 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
274 if (!item.task_sets[task_set]) 250 if (!item.task_sets[task_set])
275 continue; 251 continue;
276 252
277 ++task_count[task_set]; 253 ++task_count[task_set];
278 254
279 graph_.edges.push_back( 255 graph_.edges.push_back(
280 TaskGraph::Edge(task, new_task_set_finished_tasks[task_set].get())); 256 TaskGraph::Edge(task, new_task_set_finished_tasks[task_set].get()));
281 } 257 }
282 258
283 InsertNodesForRasterTask(&graph_, task, task->dependencies(), priority++); 259 InsertNodesForRasterTask(&graph_, task, task->dependencies(), priority++);
284 } 260 }
285 261
286 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { 262 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
287 InsertNodeForTask(&graph_, new_task_set_finished_tasks[task_set].get(), 263 InsertNodeForTask(&graph_, new_task_set_finished_tasks[task_set].get(),
288 kTaskSetFinishedTaskPriorityBase + task_set, 264 kTaskSetFinishedTaskPriorityBase + task_set,
289 task_count[task_set]); 265 task_count[task_set]);
290 } 266 }
291 267
292 ScheduleTasksOnOriginThread(this, &graph_); 268 ScheduleTasksOnOriginThread(this, &graph_);
293
294 // Barrier to sync any new resources to the worker context.
295 resource_provider_->output_surface()
296 ->context_provider()
297 ->ContextGL()
298 ->OrderingBarrierCHROMIUM();
299
300 task_graph_runner_->ScheduleTasks(namespace_token_, &graph_); 269 task_graph_runner_->ScheduleTasks(namespace_token_, &graph_);
301 270
302 std::copy(new_task_set_finished_tasks, 271 std::copy(new_task_set_finished_tasks,
303 new_task_set_finished_tasks + kNumberOfTaskSets, 272 new_task_set_finished_tasks + kNumberOfTaskSets,
304 task_set_finished_tasks_); 273 task_set_finished_tasks_);
305 274
275 resource_pool_->ReduceResourceUsage();
276
306 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", "state", 277 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", "state",
307 StateAsValue()); 278 StateAsValue());
308 } 279 }
309 280
310 void OneCopyTileTaskWorkerPool::CheckForCompletedTasks() { 281 void OneCopyTileTaskWorkerPool::CheckForCompletedTasks() {
311 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::CheckForCompletedTasks"); 282 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::CheckForCompletedTasks");
312 283
313 task_graph_runner_->CollectCompletedTasks(namespace_token_, 284 task_graph_runner_->CollectCompletedTasks(namespace_token_,
314 &completed_tasks_); 285 &completed_tasks_);
315 286
(...skipping 18 matching lines...) Expand all
334 return !PlatformColor::SameComponentOrder(GetResourceFormat()); 305 return !PlatformColor::SameComponentOrder(GetResourceFormat());
335 } 306 }
336 307
337 scoped_ptr<RasterBuffer> OneCopyTileTaskWorkerPool::AcquireBufferForRaster( 308 scoped_ptr<RasterBuffer> OneCopyTileTaskWorkerPool::AcquireBufferForRaster(
338 const Resource* resource, 309 const Resource* resource,
339 uint64_t resource_content_id, 310 uint64_t resource_content_id,
340 uint64_t previous_content_id) { 311 uint64_t previous_content_id) {
341 // TODO(danakj): If resource_content_id != 0, we only need to copy/upload 312 // TODO(danakj): If resource_content_id != 0, we only need to copy/upload
342 // the dirty rect. 313 // the dirty rect.
343 DCHECK_EQ(resource->format(), resource_provider_->best_texture_format()); 314 DCHECK_EQ(resource->format(), resource_provider_->best_texture_format());
344 return make_scoped_ptr<RasterBuffer>(new RasterBufferImpl( 315 return make_scoped_ptr<RasterBuffer>(
345 this, resource_provider_, resource_provider_->best_texture_format(), 316 new RasterBufferImpl(this, resource_provider_, resource_pool_,
346 resource, previous_content_id)); 317 resource_provider_->best_texture_format(), resource,
318 previous_content_id));
347 } 319 }
348 320
349 void OneCopyTileTaskWorkerPool::ReleaseBufferForRaster( 321 void OneCopyTileTaskWorkerPool::ReleaseBufferForRaster(
350 scoped_ptr<RasterBuffer> buffer) { 322 scoped_ptr<RasterBuffer> buffer) {
351 // Nothing to do here. RasterBufferImpl destructor cleans up after itself. 323 // Nothing to do here. RasterBufferImpl destructor cleans up after itself.
352 } 324 }
353 325
354 void OneCopyTileTaskWorkerPool::PlaybackAndCopyOnWorkerThread( 326 CopySequenceNumber
355 const Resource* resource, 327 OneCopyTileTaskWorkerPool::PlaybackAndScheduleCopyOnWorkerThread(
356 const ResourceProvider::ScopedWriteLockGL* resource_lock, 328 bool reusing_raster_resource,
329 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer>
330 raster_resource_write_lock,
331 const Resource* raster_resource,
332 const Resource* output_resource,
357 const RasterSource* raster_source, 333 const RasterSource* raster_source,
358 const gfx::Rect& raster_full_rect, 334 const gfx::Rect& raster_full_rect,
359 const gfx::Rect& raster_dirty_rect, 335 const gfx::Rect& raster_dirty_rect,
360 float scale, 336 float scale,
361 bool include_images, 337 bool include_images) {
362 uint64_t previous_content_id, 338 gfx::GpuMemoryBuffer* gpu_memory_buffer =
363 uint64_t new_content_id) { 339 raster_resource_write_lock->GetGpuMemoryBuffer();
340 if (gpu_memory_buffer) {
341 void* data = NULL;
342 bool rv = gpu_memory_buffer->Map(&data);
343 DCHECK(rv);
344 int stride;
345 gpu_memory_buffer->GetStride(&stride);
346 // TileTaskWorkerPool::PlaybackToMemory only supports unsigned strides.
347 DCHECK_GE(stride, 0);
348
349 gfx::Rect playback_rect = raster_full_rect;
350 if (reusing_raster_resource) {
351 playback_rect.Intersect(raster_dirty_rect);
352 }
353 DCHECK(!playback_rect.IsEmpty())
354 << "Why are we rastering a tile that's not dirty?";
355 TileTaskWorkerPool::PlaybackToMemory(
356 data, raster_resource->format(), raster_resource->size(),
357 static_cast<size_t>(stride), raster_source, raster_full_rect,
358 playback_rect, scale, include_images);
359 gpu_memory_buffer->Unmap();
360 }
361
364 base::AutoLock lock(lock_); 362 base::AutoLock lock(lock_);
365 363
366 scoped_ptr<StagingBuffer> staging_buffer = 364 CopySequenceNumber sequence = 0;
367 AcquireStagingBuffer(resource, previous_content_id); 365 int bytes_per_row = (BitsPerPixel(raster_resource->format()) *
368 DCHECK(staging_buffer); 366 raster_resource->size().width()) /
367 8;
368 int chunk_size_in_rows =
369 std::max(1, max_bytes_per_copy_operation_ / bytes_per_row);
370 // Align chunk size to 4. Required to support compressed texture formats.
371 chunk_size_in_rows = MathUtil::UncheckedRoundUp(chunk_size_in_rows, 4);
372 int y = 0;
373 int height = raster_resource->size().height();
374 while (y < height) {
375 int failed_attempts = 0;
376 while ((pending_copy_operations_.size() + issued_copy_operation_count_) >=
377 kMaxCopyOperations) {
378 // Ignore limit when shutdown is set.
379 if (shutdown_)
380 break;
369 381
370 { 382 ++failed_attempts;
371 base::AutoUnlock unlock(lock_);
372 383
373 // Allocate GpuMemoryBuffer if necessary. 384 // Schedule a check that will also wait for operations to complete
374 if (!staging_buffer->gpu_memory_buffer) { 385 // after too many failed attempts.
375 staging_buffer->gpu_memory_buffer = 386 bool wait_if_needed = failed_attempts > kFailedAttemptsBeforeWaitIfNeeded;
376 resource_provider_->gpu_memory_buffer_manager() 387
377 ->AllocateGpuMemoryBuffer( 388 // Schedule a check for completed copy operations if too many operations
378 staging_buffer->size, 389 // are currently in-flight.
379 BufferFormat(resource_provider_->best_texture_format()), 390 ScheduleCheckForCompletedCopyOperationsWithLockAcquired(wait_if_needed);
380 use_persistent_gpu_memory_buffers_ 391
381 ? gfx::BufferUsage::PERSISTENT_MAP 392 {
382 : gfx::BufferUsage::MAP); 393 TRACE_EVENT0("cc", "WaitingForCopyOperationsToComplete");
394
395 // Wait for in-flight copy operations to drop below limit.
396 copy_operation_count_cv_.Wait();
397 }
383 } 398 }
384 399
385 gfx::Rect playback_rect = raster_full_rect; 400 // There may be more work available, so wake up another worker thread.
386 if (use_persistent_gpu_memory_buffers_ && previous_content_id) { 401 copy_operation_count_cv_.Signal();
387 // Reduce playback rect to dirty region if the content id of the staging
388 // buffer matches the prevous content id.
389 if (previous_content_id == staging_buffer->content_id)
390 playback_rect.Intersect(raster_dirty_rect);
391 }
392 402
393 if (staging_buffer->gpu_memory_buffer) { 403 // Copy at most |chunk_size_in_rows|.
394 void* data = nullptr; 404 int rows_to_copy = std::min(chunk_size_in_rows, height - y);
395 bool rv = staging_buffer->gpu_memory_buffer->Map(&data); 405 DCHECK_GT(rows_to_copy, 0);
396 DCHECK(rv);
397 int stride;
398 staging_buffer->gpu_memory_buffer->GetStride(&stride);
399 // TileTaskWorkerPool::PlaybackToMemory only supports unsigned strides.
400 DCHECK_GE(stride, 0);
401 406
402 DCHECK(!playback_rect.IsEmpty()) 407 // |raster_resource_write_lock| is passed to the first copy operation as it
403 << "Why are we rastering a tile that's not dirty?"; 408 // needs to be released before we can issue a copy.
404 TileTaskWorkerPool::PlaybackToMemory( 409 pending_copy_operations_.push_back(make_scoped_ptr(new CopyOperation(
405 data, resource_provider_->best_texture_format(), staging_buffer->size, 410 raster_resource_write_lock.Pass(), raster_resource, output_resource,
406 static_cast<size_t>(stride), raster_source, raster_full_rect, 411 gfx::Rect(0, y, raster_resource->size().width(), rows_to_copy))));
407 playback_rect, scale, include_images); 412 y += rows_to_copy;
408 staging_buffer->gpu_memory_buffer->Unmap(); 413
409 staging_buffer->content_id = new_content_id; 414 // Acquire a sequence number for this copy operation.
415 sequence = next_copy_operation_sequence_++;
416
417 // Increment |bytes_scheduled_since_last_flush_| by the amount of memory
418 // used for this copy operation.
419 bytes_scheduled_since_last_flush_ += rows_to_copy * bytes_per_row;
420
421 // Post task that will advance last flushed copy operation to |sequence|
422 // when |bytes_scheduled_since_last_flush_| has reached
423 // |max_bytes_per_copy_operation_|.
424 if (bytes_scheduled_since_last_flush_ >= max_bytes_per_copy_operation_) {
425 task_runner_->PostTask(
426 FROM_HERE,
427 base::Bind(&OneCopyTileTaskWorkerPool::AdvanceLastFlushedCopyTo,
428 weak_ptr_factory_.GetWeakPtr(), sequence));
429 bytes_scheduled_since_last_flush_ = 0;
410 } 430 }
411 } 431 }
412 432
413 ContextProvider* context_provider = 433 return sequence;
414 resource_provider_->output_surface()->worker_context_provider();
415 DCHECK(context_provider);
416
417 {
418 ContextProvider::ScopedContextLock scoped_context(context_provider);
419
420 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL();
421 DCHECK(gl);
422
423 if (!staging_buffer->texture_id) {
424 gl->GenTextures(1, &staging_buffer->texture_id);
425 gl->BindTexture(image_target_, staging_buffer->texture_id);
426 gl->TexParameteri(image_target_, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
427 gl->TexParameteri(image_target_, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
428 gl->TexParameteri(image_target_, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
429 gl->TexParameteri(image_target_, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
430 if (staging_buffer->gpu_memory_buffer) {
431 DCHECK(!staging_buffer->image_id);
432 staging_buffer->image_id = gl->CreateImageCHROMIUM(
433 staging_buffer->gpu_memory_buffer->AsClientBuffer(),
434 staging_buffer->size.width(), staging_buffer->size.height(),
435 GLInternalFormat(resource_provider_->best_texture_format()));
436 gl->BindTexImage2DCHROMIUM(image_target_, staging_buffer->image_id);
437 } else {
438 gl->BindTexture(image_target_, staging_buffer->texture_id);
439 gl->ReleaseTexImage2DCHROMIUM(image_target_, staging_buffer->image_id);
440 gl->BindTexImage2DCHROMIUM(image_target_, staging_buffer->image_id);
441 }
442 }
443
444 if (resource_provider_->use_sync_query()) {
445 if (!staging_buffer->query_id)
446 gl->GenQueriesEXT(1, &staging_buffer->query_id);
447
448 #if defined(OS_CHROMEOS)
449 // TODO(reveman): This avoids a performance problem on some ChromeOS
450 // devices. This needs to be removed to support native GpuMemoryBuffer
451 // implementations. crbug.com/436314
452 gl->BeginQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM, staging_buffer->query_id);
453 #else
454 gl->BeginQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM,
455 staging_buffer->query_id);
456 #endif
457 }
458
459 int bytes_per_row =
460 (BitsPerPixel(resource_provider_->best_texture_format()) *
461 resource->size().width()) /
462 8;
463 int chunk_size_in_rows =
464 std::max(1, max_bytes_per_copy_operation_ / bytes_per_row);
465 // Align chunk size to 4. Required to support compressed texture formats.
466 chunk_size_in_rows = MathUtil::UncheckedRoundUp(chunk_size_in_rows, 4);
467 int y = 0;
468 int height = resource->size().height();
469 while (y < height) {
470 // Copy at most |chunk_size_in_rows|.
471 int rows_to_copy = std::min(chunk_size_in_rows, height - y);
472 DCHECK_GT(rows_to_copy, 0);
473
474 gl->CopySubTextureCHROMIUM(GL_TEXTURE_2D, staging_buffer->texture_id,
475 resource_lock->texture_id(), 0, y, 0, y,
476 resource->size().width(), rows_to_copy, false,
477 false, false);
478 y += rows_to_copy;
479
480 // Increment |bytes_scheduled_since_last_flush_| by the amount of memory
481 // used for this copy operation.
482 bytes_scheduled_since_last_flush_ += rows_to_copy * bytes_per_row;
483
484 if (bytes_scheduled_since_last_flush_ >= max_bytes_per_copy_operation_) {
485 gl->ShallowFlushCHROMIUM();
486 bytes_scheduled_since_last_flush_ = 0;
487 }
488 }
489
490 if (resource_provider_->use_sync_query()) {
491 #if defined(OS_CHROMEOS)
492 gl->EndQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM);
493 #else
494 gl->EndQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM);
495 #endif
496 }
497
498 // Barrier to sync worker context output to cc context.
499 gl->OrderingBarrierCHROMIUM();
500 }
501
502 staging_buffer->last_usage = base::TimeTicks::Now();
503 busy_buffers_.push_back(staging_buffer.Pass());
504
505 ScheduleReduceMemoryUsage();
506 } 434 }
507 435
508 bool OneCopyTileTaskWorkerPool::OnMemoryDump( 436 void OneCopyTileTaskWorkerPool::AdvanceLastIssuedCopyTo(
509 const base::trace_event::MemoryDumpArgs& args, 437 CopySequenceNumber sequence) {
510 base::trace_event::ProcessMemoryDump* pmd) { 438 if (last_issued_copy_operation_ >= sequence)
511 base::AutoLock lock(lock_); 439 return;
512 440
513 for (const auto& buffer : buffers_) { 441 IssueCopyOperations(sequence - last_issued_copy_operation_);
514 buffer->OnMemoryDump(pmd, resource_provider_->best_texture_format(), 442 last_issued_copy_operation_ = sequence;
515 std::find(free_buffers_.begin(), free_buffers_.end(),
516 buffer) != free_buffers_.end());
517 }
518
519 return true;
520 } 443 }
521 444
522 scoped_ptr<OneCopyTileTaskWorkerPool::StagingBuffer> 445 void OneCopyTileTaskWorkerPool::AdvanceLastFlushedCopyTo(
523 OneCopyTileTaskWorkerPool::AcquireStagingBuffer(const Resource* resource, 446 CopySequenceNumber sequence) {
524 uint64_t previous_content_id) { 447 if (last_flushed_copy_operation_ >= sequence)
525 lock_.AssertAcquired();
526
527 scoped_ptr<StagingBuffer> staging_buffer;
528
529 ContextProvider* context_provider =
530 resource_provider_->output_surface()->worker_context_provider();
531 DCHECK(context_provider);
532
533 ContextProvider::ScopedContextLock scoped_context(context_provider);
534
535 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL();
536 DCHECK(gl);
537
538 // Check if any busy buffers have become available.
539 if (resource_provider_->use_sync_query()) {
540 while (!busy_buffers_.empty()) {
541 if (!CheckForQueryResult(gl, busy_buffers_.front()->query_id))
542 break;
543
544 free_buffers_.push_back(busy_buffers_.take_front());
545 }
546 }
547
548 // Wait for number of non-free buffers to become less than the limit.
549 while ((buffers_.size() - free_buffers_.size()) >= max_staging_buffers_) {
550 // Stop when there are no more busy buffers to wait for.
551 if (busy_buffers_.empty())
552 break;
553
554 if (resource_provider_->use_sync_query()) {
555 WaitForQueryResult(gl, busy_buffers_.front()->query_id);
556 free_buffers_.push_back(busy_buffers_.take_front());
557 } else {
558 // Fall-back to glFinish if CHROMIUM_sync_query is not available.
559 gl->Finish();
560 while (!busy_buffers_.empty())
561 free_buffers_.push_back(busy_buffers_.take_front());
562 }
563 }
564
565 // Find a staging buffer that allows us to perform partial raster when
566 // using persistent GpuMemoryBuffers.
567 if (use_persistent_gpu_memory_buffers_ && previous_content_id) {
568 StagingBufferDeque::iterator it =
569 std::find_if(free_buffers_.begin(), free_buffers_.end(),
570 [previous_content_id](const StagingBuffer* buffer) {
571 return buffer->content_id == previous_content_id;
572 });
573 if (it != free_buffers_.end())
574 staging_buffer = free_buffers_.take(it);
575 }
576
577 // Find staging buffer of correct size.
578 if (!staging_buffer) {
579 StagingBufferDeque::iterator it =
580 std::find_if(free_buffers_.begin(), free_buffers_.end(),
581 [resource](const StagingBuffer* buffer) {
582 return buffer->size == resource->size();
583 });
584 if (it != free_buffers_.end())
585 staging_buffer = free_buffers_.take(it);
586 }
587
588 // Create new staging buffer if necessary.
589 if (!staging_buffer) {
590 staging_buffer = make_scoped_ptr(new StagingBuffer(resource->size()));
591 buffers_.insert(staging_buffer.get());
592 }
593
594 // Release enough free buffers to stay within the limit.
595 while (buffers_.size() > max_staging_buffers_) {
596 if (free_buffers_.empty())
597 break;
598
599 free_buffers_.front()->DestroyGLResources(gl);
600 buffers_.erase(free_buffers_.front());
601 free_buffers_.take_front();
602 }
603
604 return staging_buffer.Pass();
605 }
606
607 base::TimeTicks OneCopyTileTaskWorkerPool::GetUsageTimeForLRUBuffer() {
608 lock_.AssertAcquired();
609
610 if (!free_buffers_.empty())
611 return free_buffers_.front()->last_usage;
612
613 if (!busy_buffers_.empty())
614 return busy_buffers_.front()->last_usage;
615
616 return base::TimeTicks();
617 }
618
619 void OneCopyTileTaskWorkerPool::ScheduleReduceMemoryUsage() {
620 lock_.AssertAcquired();
621
622 if (reduce_memory_usage_pending_)
623 return; 448 return;
624 449
625 reduce_memory_usage_pending_ = true; 450 AdvanceLastIssuedCopyTo(sequence);
626 451
627 // Schedule a call to ReduceMemoryUsage at the time when the LRU buffer 452 // Flush all issued copy operations.
628 // should be released. 453 context_provider_->ContextGL()->ShallowFlushCHROMIUM();
629 base::TimeTicks reduce_memory_usage_time = 454 last_flushed_copy_operation_ = last_issued_copy_operation_;
630 GetUsageTimeForLRUBuffer() + staging_buffer_expiration_delay_;
631 task_runner_->PostDelayedTask(
632 FROM_HERE, reduce_memory_usage_callback_,
633 reduce_memory_usage_time - base::TimeTicks::Now());
634 }
635
636 void OneCopyTileTaskWorkerPool::ReduceMemoryUsage() {
637 base::AutoLock lock(lock_);
638
639 reduce_memory_usage_pending_ = false;
640
641 if (free_buffers_.empty() && busy_buffers_.empty())
642 return;
643
644 base::TimeTicks current_time = base::TimeTicks::Now();
645 ReleaseBuffersNotUsedSince(current_time - staging_buffer_expiration_delay_);
646
647 if (free_buffers_.empty() && busy_buffers_.empty())
648 return;
649
650 reduce_memory_usage_pending_ = true;
651
652 // Schedule another call to ReduceMemoryUsage at the time when the next
653 // buffer should be released.
654 base::TimeTicks reduce_memory_usage_time =
655 GetUsageTimeForLRUBuffer() + staging_buffer_expiration_delay_;
656 task_runner_->PostDelayedTask(FROM_HERE, reduce_memory_usage_callback_,
657 reduce_memory_usage_time - current_time);
658 }
659
660 void OneCopyTileTaskWorkerPool::ReleaseBuffersNotUsedSince(
661 base::TimeTicks time) {
662 lock_.AssertAcquired();
663
664 ContextProvider* context_provider =
665 resource_provider_->output_surface()->worker_context_provider();
666 DCHECK(context_provider);
667
668 {
669 ContextProvider::ScopedContextLock scoped_context(context_provider);
670
671 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL();
672 DCHECK(gl);
673
674 // Note: Front buffer is guaranteed to be LRU so we can stop releasing
675 // buffers as soon as we find a buffer that has been used since |time|.
676 while (!free_buffers_.empty()) {
677 if (free_buffers_.front()->last_usage > time)
678 return;
679
680 free_buffers_.front()->DestroyGLResources(gl);
681 buffers_.erase(free_buffers_.front());
682 free_buffers_.take_front();
683 }
684
685 while (!busy_buffers_.empty()) {
686 if (busy_buffers_.front()->last_usage > time)
687 return;
688
689 busy_buffers_.front()->DestroyGLResources(gl);
690 buffers_.erase(busy_buffers_.front());
691 busy_buffers_.take_front();
692 }
693 }
694 } 455 }
695 456
696 void OneCopyTileTaskWorkerPool::OnTaskSetFinished(TaskSet task_set) { 457 void OneCopyTileTaskWorkerPool::OnTaskSetFinished(TaskSet task_set) {
697 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::OnTaskSetFinished", "task_set", 458 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::OnTaskSetFinished", "task_set",
698 task_set); 459 task_set);
699 460
700 DCHECK(tasks_pending_[task_set]); 461 DCHECK(tasks_pending_[task_set]);
701 tasks_pending_[task_set] = false; 462 tasks_pending_[task_set] = false;
702 if (tasks_pending_.any()) { 463 if (tasks_pending_.any()) {
703 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", 464 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running",
704 "state", StateAsValue()); 465 "state", StateAsValue());
705 } else { 466 } else {
706 TRACE_EVENT_ASYNC_END0("cc", "ScheduledTasks", this); 467 TRACE_EVENT_ASYNC_END0("cc", "ScheduledTasks", this);
707 } 468 }
708 client_->DidFinishRunningTileTasks(task_set); 469 client_->DidFinishRunningTileTasks(task_set);
709 } 470 }
710 471
472 void OneCopyTileTaskWorkerPool::IssueCopyOperations(int64 count) {
473 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::IssueCopyOperations", "count",
474 count);
475
476 CopyOperation::Deque copy_operations;
477
478 {
479 base::AutoLock lock(lock_);
480
481 for (int64 i = 0; i < count; ++i) {
482 DCHECK(!pending_copy_operations_.empty());
483 copy_operations.push_back(pending_copy_operations_.take_front());
484 }
485
486 // Increment |issued_copy_operation_count_| to reflect the transition of
487 // copy operations from "pending" to "issued" state.
488 issued_copy_operation_count_ += copy_operations.size();
489 }
490
491 while (!copy_operations.empty()) {
492 scoped_ptr<CopyOperation> copy_operation = copy_operations.take_front();
493
494 // Remove the write lock.
495 copy_operation->src_write_lock.reset();
496
497 // Copy contents of source resource to destination resource.
498 resource_provider_->CopyResource(copy_operation->src->id(),
499 copy_operation->dst->id(),
500 copy_operation->rect);
501 }
502 }
503
504 void OneCopyTileTaskWorkerPool::
505 ScheduleCheckForCompletedCopyOperationsWithLockAcquired(
506 bool wait_if_needed) {
507 lock_.AssertAcquired();
508
509 if (check_for_completed_copy_operations_pending_)
510 return;
511
512 base::TimeTicks now = base::TimeTicks::Now();
513
514 // Schedule a check for completed copy operations as soon as possible but
515 // don't allow two consecutive checks to be scheduled to run less than the
516 // tick rate apart.
517 base::TimeTicks next_check_for_completed_copy_operations_time =
518 std::max(last_check_for_completed_copy_operations_time_ +
519 base::TimeDelta::FromMilliseconds(
520 kCheckForCompletedCopyOperationsTickRateMs),
521 now);
522
523 task_runner_->PostDelayedTask(
524 FROM_HERE,
525 base::Bind(&OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations,
526 weak_ptr_factory_.GetWeakPtr(), wait_if_needed),
527 next_check_for_completed_copy_operations_time - now);
528
529 last_check_for_completed_copy_operations_time_ =
530 next_check_for_completed_copy_operations_time;
531 check_for_completed_copy_operations_pending_ = true;
532 }
533
534 void OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations(
535 bool wait_if_needed) {
536 TRACE_EVENT1("cc",
537 "OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations",
538 "wait_if_needed", wait_if_needed);
539
540 resource_pool_->CheckBusyResources(wait_if_needed);
541
542 {
543 base::AutoLock lock(lock_);
544
545 DCHECK(check_for_completed_copy_operations_pending_);
546 check_for_completed_copy_operations_pending_ = false;
547
548 // The number of busy resources in the pool reflects the number of issued
549 // copy operations that have not yet completed.
550 issued_copy_operation_count_ = resource_pool_->busy_resource_count();
551
552 // There may be work blocked on too many in-flight copy operations, so wake
553 // up a worker thread.
554 copy_operation_count_cv_.Signal();
555 }
556 }
557
711 scoped_refptr<base::trace_event::ConvertableToTraceFormat> 558 scoped_refptr<base::trace_event::ConvertableToTraceFormat>
712 OneCopyTileTaskWorkerPool::StateAsValue() const { 559 OneCopyTileTaskWorkerPool::StateAsValue() const {
713 scoped_refptr<base::trace_event::TracedValue> state = 560 scoped_refptr<base::trace_event::TracedValue> state =
714 new base::trace_event::TracedValue(); 561 new base::trace_event::TracedValue();
715 562
716 state->BeginArray("tasks_pending"); 563 state->BeginArray("tasks_pending");
717 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) 564 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set)
718 state->AppendBoolean(tasks_pending_[task_set]); 565 state->AppendBoolean(tasks_pending_[task_set]);
719 state->EndArray(); 566 state->EndArray();
720 state->BeginDictionary("staging_state"); 567 state->BeginDictionary("staging_state");
721 StagingStateAsValueInto(state.get()); 568 StagingStateAsValueInto(state.get());
722 state->EndDictionary(); 569 state->EndDictionary();
723 570
724 return state; 571 return state;
725 } 572 }
726 573
727 void OneCopyTileTaskWorkerPool::StagingStateAsValueInto( 574 void OneCopyTileTaskWorkerPool::StagingStateAsValueInto(
728 base::trace_event::TracedValue* staging_state) const { 575 base::trace_event::TracedValue* staging_state) const {
729 base::AutoLock lock(lock_); 576 staging_state->SetInteger(
730 577 "staging_resource_count",
731 staging_state->SetInteger("staging_buffer_count", 578 static_cast<int>(resource_pool_->total_resource_count()));
732 static_cast<int>(buffers_.size())); 579 staging_state->SetInteger(
733 staging_state->SetInteger("busy_count", 580 "bytes_used_for_staging_resources",
734 static_cast<int>(busy_buffers_.size())); 581 static_cast<int>(resource_pool_->total_memory_usage_bytes()));
735 staging_state->SetInteger("free_count", 582 staging_state->SetInteger(
736 static_cast<int>(free_buffers_.size())); 583 "pending_copy_count",
584 static_cast<int>(resource_pool_->total_resource_count() -
585 resource_pool_->acquired_resource_count()));
586 staging_state->SetInteger(
587 "bytes_pending_copy",
588 static_cast<int>(resource_pool_->total_memory_usage_bytes() -
589 resource_pool_->acquired_memory_usage_bytes()));
737 } 590 }
738 591
739 } // namespace cc 592 } // namespace cc
OLDNEW
« no previous file with comments | « cc/raster/one_copy_tile_task_worker_pool.h ('k') | cc/raster/tile_task_worker_pool_perftest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698