Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(226)

Side by Side Diff: cc/raster/one_copy_tile_task_worker_pool.cc

Issue 1298143003: Revert of Re-land: cc: Use worker context for one-copy tile initialization. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 5 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « cc/raster/one_copy_tile_task_worker_pool.h ('k') | cc/raster/tile_task_worker_pool_perftest.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2014 The Chromium Authors. All rights reserved. 1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "cc/raster/one_copy_tile_task_worker_pool.h" 5 #include "cc/raster/one_copy_tile_task_worker_pool.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <limits> 8 #include <limits>
9 9
10 #include "base/strings/stringprintf.h" 10 #include "base/strings/stringprintf.h"
11 #include "base/thread_task_runner_handle.h"
12 #include "base/trace_event/memory_dump_manager.h"
13 #include "base/trace_event/trace_event.h" 11 #include "base/trace_event/trace_event.h"
14 #include "base/trace_event/trace_event_argument.h" 12 #include "base/trace_event/trace_event_argument.h"
15 #include "cc/base/math_util.h" 13 #include "cc/base/math_util.h"
16 #include "cc/debug/traced_value.h" 14 #include "cc/debug/traced_value.h"
17 #include "cc/raster/raster_buffer.h" 15 #include "cc/raster/raster_buffer.h"
18 #include "cc/resources/platform_color.h" 16 #include "cc/resources/platform_color.h"
19 #include "cc/resources/resource_format.h" 17 #include "cc/resources/resource_pool.h"
20 #include "cc/resources/resource_util.h"
21 #include "cc/resources/scoped_resource.h" 18 #include "cc/resources/scoped_resource.h"
22 #include "gpu/GLES2/gl2extchromium.h"
23 #include "gpu/command_buffer/client/gles2_interface.h" 19 #include "gpu/command_buffer/client/gles2_interface.h"
24 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h"
25 #include "ui/gfx/buffer_format_util.h" 20 #include "ui/gfx/buffer_format_util.h"
21 #include "ui/gfx/gpu_memory_buffer.h"
26 22
27 namespace cc { 23 namespace cc {
28 namespace { 24 namespace {
29 25
30 class RasterBufferImpl : public RasterBuffer { 26 class RasterBufferImpl : public RasterBuffer {
31 public: 27 public:
32 RasterBufferImpl(OneCopyTileTaskWorkerPool* worker_pool, 28 RasterBufferImpl(OneCopyTileTaskWorkerPool* worker_pool,
33 ResourceProvider* resource_provider, 29 ResourceProvider* resource_provider,
30 ResourcePool* resource_pool,
34 ResourceFormat resource_format, 31 ResourceFormat resource_format,
35 const Resource* resource, 32 const Resource* output_resource,
36 uint64_t previous_content_id) 33 uint64_t previous_content_id)
37 : worker_pool_(worker_pool), 34 : worker_pool_(worker_pool),
38 resource_(resource), 35 resource_provider_(resource_provider),
39 lock_(resource_provider, resource->id()), 36 resource_pool_(resource_pool),
40 previous_content_id_(previous_content_id) {} 37 output_resource_(output_resource),
38 raster_content_id_(0),
39 sequence_(0) {
40 if (worker_pool->have_persistent_gpu_memory_buffers() &&
41 previous_content_id) {
42 raster_resource_ =
43 resource_pool->TryAcquireResourceWithContentId(previous_content_id);
44 }
45 if (raster_resource_) {
46 raster_content_id_ = previous_content_id;
47 DCHECK_EQ(resource_format, raster_resource_->format());
48 DCHECK_EQ(output_resource->size().ToString(),
49 raster_resource_->size().ToString());
50 } else {
51 raster_resource_ = resource_pool->AcquireResource(output_resource->size(),
52 resource_format);
53 }
41 54
42 ~RasterBufferImpl() override {} 55 lock_.reset(new ResourceProvider::ScopedWriteLockGpuMemoryBuffer(
56 resource_provider_, raster_resource_->id()));
57 }
58
59 ~RasterBufferImpl() override {
60 // Release write lock in case a copy was never scheduled.
61 lock_.reset();
62
63 // Make sure any scheduled copy operations are issued before we release the
64 // raster resource.
65 if (sequence_)
66 worker_pool_->AdvanceLastIssuedCopyTo(sequence_);
67
68 // Return resources to pool so they can be used by another RasterBuffer
69 // instance.
70 resource_pool_->ReleaseResource(raster_resource_.Pass(),
71 raster_content_id_);
72 }
43 73
44 // Overridden from RasterBuffer: 74 // Overridden from RasterBuffer:
45 void Playback(const RasterSource* raster_source, 75 void Playback(const RasterSource* raster_source,
46 const gfx::Rect& raster_full_rect, 76 const gfx::Rect& raster_full_rect,
47 const gfx::Rect& raster_dirty_rect, 77 const gfx::Rect& raster_dirty_rect,
48 uint64_t new_content_id, 78 uint64_t new_content_id,
49 float scale, 79 float scale,
50 bool include_images) override { 80 bool include_images) override {
51 worker_pool_->PlaybackAndCopyOnWorkerThread( 81 // If there's a raster_content_id_, we are reusing a resource with that
52 resource_, &lock_, raster_source, raster_full_rect, raster_dirty_rect, 82 // content id.
53 scale, include_images, previous_content_id_, new_content_id); 83 bool reusing_raster_resource = raster_content_id_ != 0;
84 sequence_ = worker_pool_->PlaybackAndScheduleCopyOnWorkerThread(
85 reusing_raster_resource, lock_.Pass(), raster_resource_.get(),
86 output_resource_, raster_source, raster_full_rect, raster_dirty_rect,
87 scale, include_images);
88 // Store the content id of the resource to return to the pool.
89 raster_content_id_ = new_content_id;
54 } 90 }
55 91
56 private: 92 private:
57 OneCopyTileTaskWorkerPool* worker_pool_; 93 OneCopyTileTaskWorkerPool* worker_pool_;
58 const Resource* resource_; 94 ResourceProvider* resource_provider_;
59 ResourceProvider::ScopedWriteLockGL lock_; 95 ResourcePool* resource_pool_;
60 uint64_t previous_content_id_; 96 const Resource* output_resource_;
97 uint64_t raster_content_id_;
98 scoped_ptr<ScopedResource> raster_resource_;
99 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> lock_;
100 CopySequenceNumber sequence_;
61 101
62 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl); 102 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl);
63 }; 103 };
64 104
65 // Delay between checking for query result to be available. 105 // Number of in-flight copy operations to allow.
66 const int kCheckForQueryResultAvailableTickRateMs = 1; 106 const int kMaxCopyOperations = 32;
67 107
68 // Number of attempts to allow before we perform a check that will wait for 108 // Delay been checking for copy operations to complete.
69 // query to complete. 109 const int kCheckForCompletedCopyOperationsTickRateMs = 1;
70 const int kMaxCheckForQueryResultAvailableAttempts = 256; 110
111 // Number of failed attempts to allow before we perform a check that will
112 // wait for copy operations to complete if needed.
113 const int kFailedAttemptsBeforeWaitIfNeeded = 256;
71 114
72 // 4MiB is the size of 4 512x512 tiles, which has proven to be a good 115 // 4MiB is the size of 4 512x512 tiles, which has proven to be a good
73 // default batch size for copy operations. 116 // default batch size for copy operations.
74 const int kMaxBytesPerCopyOperation = 1024 * 1024 * 4; 117 const int kMaxBytesPerCopyOperation = 1024 * 1024 * 4;
75 118
76 // Delay before a staging buffer might be released. 119 } // namespace
77 const int kStagingBufferExpirationDelayMs = 1000;
78 120
79 bool CheckForQueryResult(gpu::gles2::GLES2Interface* gl, unsigned query_id) { 121 OneCopyTileTaskWorkerPool::CopyOperation::CopyOperation(
80 unsigned complete = 1; 122 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> src_write_lock,
81 gl->GetQueryObjectuivEXT(query_id, GL_QUERY_RESULT_AVAILABLE_EXT, &complete); 123 const Resource* src,
82 return !!complete; 124 const Resource* dst,
125 const gfx::Rect& rect)
126 : src_write_lock(src_write_lock.Pass()), src(src), dst(dst), rect(rect) {
83 } 127 }
84 128
85 void WaitForQueryResult(gpu::gles2::GLES2Interface* gl, unsigned query_id) { 129 OneCopyTileTaskWorkerPool::CopyOperation::~CopyOperation() {
86 TRACE_EVENT0("cc", "WaitForQueryResult");
87
88 int attempts_left = kMaxCheckForQueryResultAvailableAttempts;
89 while (attempts_left--) {
90 if (CheckForQueryResult(gl, query_id))
91 break;
92
93 base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(
94 kCheckForQueryResultAvailableTickRateMs));
95 }
96
97 unsigned result = 0;
98 gl->GetQueryObjectuivEXT(query_id, GL_QUERY_RESULT_EXT, &result);
99 }
100
101 } // namespace
102
103 OneCopyTileTaskWorkerPool::StagingBuffer::StagingBuffer(const gfx::Size& size)
104 : size(size), texture_id(0), image_id(0), query_id(0), content_id(0) {}
105
106 OneCopyTileTaskWorkerPool::StagingBuffer::~StagingBuffer() {
107 DCHECK_EQ(texture_id, 0u);
108 DCHECK_EQ(image_id, 0u);
109 DCHECK_EQ(query_id, 0u);
110 }
111
112 void OneCopyTileTaskWorkerPool::StagingBuffer::DestroyGLResources(
113 gpu::gles2::GLES2Interface* gl) {
114 if (query_id) {
115 gl->DeleteQueriesEXT(1, &query_id);
116 query_id = 0;
117 }
118 if (image_id) {
119 gl->DestroyImageCHROMIUM(image_id);
120 image_id = 0;
121 }
122 if (texture_id) {
123 gl->DeleteTextures(1, &texture_id);
124 texture_id = 0;
125 }
126 }
127
128 void OneCopyTileTaskWorkerPool::StagingBuffer::OnMemoryDump(
129 base::trace_event::ProcessMemoryDump* pmd,
130 ResourceFormat format,
131 bool in_free_list) const {
132 if (!gpu_memory_buffer)
133 return;
134
135 gfx::GpuMemoryBufferId buffer_id = gpu_memory_buffer->GetId();
136 std::string buffer_dump_name =
137 base::StringPrintf("cc/one_copy/staging_memory/buffer_%d", buffer_id.id);
138 base::trace_event::MemoryAllocatorDump* buffer_dump =
139 pmd->CreateAllocatorDump(buffer_dump_name);
140
141 uint64_t buffer_size_in_bytes =
142 ResourceUtil::UncheckedSizeInBytes<uint64_t>(size, format);
143 buffer_dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize,
144 base::trace_event::MemoryAllocatorDump::kUnitsBytes,
145 buffer_size_in_bytes);
146 buffer_dump->AddScalar("free_size",
147 base::trace_event::MemoryAllocatorDump::kUnitsBytes,
148 in_free_list ? buffer_size_in_bytes : 0);
149
150 // Emit an ownership edge towards a global allocator dump node.
151 const uint64 tracing_process_id =
152 base::trace_event::MemoryDumpManager::GetInstance()
153 ->GetTracingProcessId();
154 base::trace_event::MemoryAllocatorDumpGuid shared_buffer_guid =
155 gfx::GetGpuMemoryBufferGUIDForTracing(tracing_process_id, buffer_id);
156 pmd->CreateSharedGlobalAllocatorDump(shared_buffer_guid);
157
158 // By creating an edge with a higher |importance| (w.r.t. browser-side dumps)
159 // the tracing UI will account the effective size of the buffer to the child.
160 const int kImportance = 2;
161 pmd->AddOwnershipEdge(buffer_dump->guid(), shared_buffer_guid, kImportance);
162 } 130 }
163 131
164 // static 132 // static
165 scoped_ptr<TileTaskWorkerPool> OneCopyTileTaskWorkerPool::Create( 133 scoped_ptr<TileTaskWorkerPool> OneCopyTileTaskWorkerPool::Create(
166 base::SequencedTaskRunner* task_runner, 134 base::SequencedTaskRunner* task_runner,
167 TaskGraphRunner* task_graph_runner, 135 TaskGraphRunner* task_graph_runner,
168 ContextProvider* context_provider, 136 ContextProvider* context_provider,
169 ResourceProvider* resource_provider, 137 ResourceProvider* resource_provider,
138 ResourcePool* resource_pool,
170 int max_copy_texture_chromium_size, 139 int max_copy_texture_chromium_size,
171 bool use_persistent_gpu_memory_buffers, 140 bool have_persistent_gpu_memory_buffers) {
172 int max_staging_buffers) {
173 return make_scoped_ptr<TileTaskWorkerPool>(new OneCopyTileTaskWorkerPool( 141 return make_scoped_ptr<TileTaskWorkerPool>(new OneCopyTileTaskWorkerPool(
174 task_runner, task_graph_runner, resource_provider, 142 task_runner, task_graph_runner, context_provider, resource_provider,
175 max_copy_texture_chromium_size, use_persistent_gpu_memory_buffers, 143 resource_pool, max_copy_texture_chromium_size,
176 max_staging_buffers)); 144 have_persistent_gpu_memory_buffers));
177 } 145 }
178 146
179 OneCopyTileTaskWorkerPool::OneCopyTileTaskWorkerPool( 147 OneCopyTileTaskWorkerPool::OneCopyTileTaskWorkerPool(
180 base::SequencedTaskRunner* task_runner, 148 base::SequencedTaskRunner* task_runner,
181 TaskGraphRunner* task_graph_runner, 149 TaskGraphRunner* task_graph_runner,
150 ContextProvider* context_provider,
182 ResourceProvider* resource_provider, 151 ResourceProvider* resource_provider,
152 ResourcePool* resource_pool,
183 int max_copy_texture_chromium_size, 153 int max_copy_texture_chromium_size,
184 bool use_persistent_gpu_memory_buffers, 154 bool have_persistent_gpu_memory_buffers)
185 int max_staging_buffers)
186 : task_runner_(task_runner), 155 : task_runner_(task_runner),
187 task_graph_runner_(task_graph_runner), 156 task_graph_runner_(task_graph_runner),
188 namespace_token_(task_graph_runner->GetNamespaceToken()), 157 namespace_token_(task_graph_runner->GetNamespaceToken()),
158 context_provider_(context_provider),
189 resource_provider_(resource_provider), 159 resource_provider_(resource_provider),
160 resource_pool_(resource_pool),
190 max_bytes_per_copy_operation_( 161 max_bytes_per_copy_operation_(
191 max_copy_texture_chromium_size 162 max_copy_texture_chromium_size
192 ? std::min(kMaxBytesPerCopyOperation, 163 ? std::min(kMaxBytesPerCopyOperation,
193 max_copy_texture_chromium_size) 164 max_copy_texture_chromium_size)
194 : kMaxBytesPerCopyOperation), 165 : kMaxBytesPerCopyOperation),
195 use_persistent_gpu_memory_buffers_(use_persistent_gpu_memory_buffers), 166 have_persistent_gpu_memory_buffers_(have_persistent_gpu_memory_buffers),
167 last_issued_copy_operation_(0),
168 last_flushed_copy_operation_(0),
169 lock_(),
170 copy_operation_count_cv_(&lock_),
196 bytes_scheduled_since_last_flush_(0), 171 bytes_scheduled_since_last_flush_(0),
197 max_staging_buffers_(max_staging_buffers), 172 issued_copy_operation_count_(0),
198 staging_buffer_expiration_delay_( 173 next_copy_operation_sequence_(1),
199 base::TimeDelta::FromMilliseconds(kStagingBufferExpirationDelayMs)), 174 check_for_completed_copy_operations_pending_(false),
200 reduce_memory_usage_pending_(false), 175 shutdown_(false),
201 weak_ptr_factory_(this), 176 weak_ptr_factory_(this),
202 task_set_finished_weak_ptr_factory_(this) { 177 task_set_finished_weak_ptr_factory_(this) {
203 base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider( 178 DCHECK(context_provider_);
204 this, base::ThreadTaskRunnerHandle::Get());
205 reduce_memory_usage_callback_ =
206 base::Bind(&OneCopyTileTaskWorkerPool::ReduceMemoryUsage,
207 weak_ptr_factory_.GetWeakPtr());
208 } 179 }
209 180
210 OneCopyTileTaskWorkerPool::~OneCopyTileTaskWorkerPool() { 181 OneCopyTileTaskWorkerPool::~OneCopyTileTaskWorkerPool() {
211 base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider( 182 DCHECK_EQ(pending_copy_operations_.size(), 0u);
212 this);
213 } 183 }
214 184
215 TileTaskRunner* OneCopyTileTaskWorkerPool::AsTileTaskRunner() { 185 TileTaskRunner* OneCopyTileTaskWorkerPool::AsTileTaskRunner() {
216 return this; 186 return this;
217 } 187 }
218 188
219 void OneCopyTileTaskWorkerPool::SetClient(TileTaskRunnerClient* client) { 189 void OneCopyTileTaskWorkerPool::SetClient(TileTaskRunnerClient* client) {
220 client_ = client; 190 client_ = client;
221 } 191 }
222 192
223 void OneCopyTileTaskWorkerPool::Shutdown() { 193 void OneCopyTileTaskWorkerPool::Shutdown() {
224 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::Shutdown"); 194 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::Shutdown");
225 195
196 {
197 base::AutoLock lock(lock_);
198
199 shutdown_ = true;
200 copy_operation_count_cv_.Signal();
201 }
202
226 TaskGraph empty; 203 TaskGraph empty;
227 task_graph_runner_->ScheduleTasks(namespace_token_, &empty); 204 task_graph_runner_->ScheduleTasks(namespace_token_, &empty);
228 task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_); 205 task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_);
229
230 base::AutoLock lock(lock_);
231
232 if (buffers_.empty())
233 return;
234
235 ReleaseBuffersNotUsedSince(base::TimeTicks() + base::TimeDelta::Max());
236 } 206 }
237 207
238 void OneCopyTileTaskWorkerPool::ScheduleTasks(TileTaskQueue* queue) { 208 void OneCopyTileTaskWorkerPool::ScheduleTasks(TileTaskQueue* queue) {
239 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::ScheduleTasks"); 209 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::ScheduleTasks");
240 210
211 #if DCHECK_IS_ON()
212 {
213 base::AutoLock lock(lock_);
214 DCHECK(!shutdown_);
215 }
216 #endif
217
241 if (tasks_pending_.none()) 218 if (tasks_pending_.none())
242 TRACE_EVENT_ASYNC_BEGIN0("cc", "ScheduledTasks", this); 219 TRACE_EVENT_ASYNC_BEGIN0("cc", "ScheduledTasks", this);
243 220
244 // Mark all task sets as pending. 221 // Mark all task sets as pending.
245 tasks_pending_.set(); 222 tasks_pending_.set();
246 223
247 size_t priority = kTileTaskPriorityBase; 224 size_t priority = kTileTaskPriorityBase;
248 225
249 graph_.Reset(); 226 graph_.Reset();
250 227
251 // Cancel existing OnTaskSetFinished callbacks. 228 // Cancel existing OnTaskSetFinished callbacks.
252 task_set_finished_weak_ptr_factory_.InvalidateWeakPtrs(); 229 task_set_finished_weak_ptr_factory_.InvalidateWeakPtrs();
253 230
254 scoped_refptr<TileTask> new_task_set_finished_tasks[kNumberOfTaskSets]; 231 scoped_refptr<TileTask> new_task_set_finished_tasks[kNumberOfTaskSets];
255 232
256 size_t task_count[kNumberOfTaskSets] = {0}; 233 size_t task_count[kNumberOfTaskSets] = {0};
257 234
258 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { 235 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
259 new_task_set_finished_tasks[task_set] = CreateTaskSetFinishedTask( 236 new_task_set_finished_tasks[task_set] = CreateTaskSetFinishedTask(
260 task_runner_.get(), 237 task_runner_.get(),
261 base::Bind(&OneCopyTileTaskWorkerPool::OnTaskSetFinished, 238 base::Bind(&OneCopyTileTaskWorkerPool::OnTaskSetFinished,
262 task_set_finished_weak_ptr_factory_.GetWeakPtr(), task_set)); 239 task_set_finished_weak_ptr_factory_.GetWeakPtr(), task_set));
263 } 240 }
264 241
242 resource_pool_->CheckBusyResources(false);
243
265 for (TileTaskQueue::Item::Vector::const_iterator it = queue->items.begin(); 244 for (TileTaskQueue::Item::Vector::const_iterator it = queue->items.begin();
266 it != queue->items.end(); ++it) { 245 it != queue->items.end(); ++it) {
267 const TileTaskQueue::Item& item = *it; 246 const TileTaskQueue::Item& item = *it;
268 RasterTask* task = item.task; 247 RasterTask* task = item.task;
269 DCHECK(!task->HasCompleted()); 248 DCHECK(!task->HasCompleted());
270 249
271 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { 250 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
272 if (!item.task_sets[task_set]) 251 if (!item.task_sets[task_set])
273 continue; 252 continue;
274 253
275 ++task_count[task_set]; 254 ++task_count[task_set];
276 255
277 graph_.edges.push_back( 256 graph_.edges.push_back(
278 TaskGraph::Edge(task, new_task_set_finished_tasks[task_set].get())); 257 TaskGraph::Edge(task, new_task_set_finished_tasks[task_set].get()));
279 } 258 }
280 259
281 InsertNodesForRasterTask(&graph_, task, task->dependencies(), priority++); 260 InsertNodesForRasterTask(&graph_, task, task->dependencies(), priority++);
282 } 261 }
283 262
284 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { 263 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
285 InsertNodeForTask(&graph_, new_task_set_finished_tasks[task_set].get(), 264 InsertNodeForTask(&graph_, new_task_set_finished_tasks[task_set].get(),
286 kTaskSetFinishedTaskPriorityBase + task_set, 265 kTaskSetFinishedTaskPriorityBase + task_set,
287 task_count[task_set]); 266 task_count[task_set]);
288 } 267 }
289 268
290 ScheduleTasksOnOriginThread(this, &graph_); 269 ScheduleTasksOnOriginThread(this, &graph_);
291
292 // Barrier to sync any new resources to the worker context.
293 resource_provider_->output_surface()
294 ->context_provider()
295 ->ContextGL()
296 ->OrderingBarrierCHROMIUM();
297
298 task_graph_runner_->ScheduleTasks(namespace_token_, &graph_); 270 task_graph_runner_->ScheduleTasks(namespace_token_, &graph_);
299 271
300 std::copy(new_task_set_finished_tasks, 272 std::copy(new_task_set_finished_tasks,
301 new_task_set_finished_tasks + kNumberOfTaskSets, 273 new_task_set_finished_tasks + kNumberOfTaskSets,
302 task_set_finished_tasks_); 274 task_set_finished_tasks_);
303 275
276 resource_pool_->ReduceResourceUsage();
277
304 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", "state", 278 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", "state",
305 StateAsValue()); 279 StateAsValue());
306 } 280 }
307 281
308 void OneCopyTileTaskWorkerPool::CheckForCompletedTasks() { 282 void OneCopyTileTaskWorkerPool::CheckForCompletedTasks() {
309 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::CheckForCompletedTasks"); 283 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::CheckForCompletedTasks");
310 284
311 task_graph_runner_->CollectCompletedTasks(namespace_token_, 285 task_graph_runner_->CollectCompletedTasks(namespace_token_,
312 &completed_tasks_); 286 &completed_tasks_);
313 287
(...skipping 18 matching lines...) Expand all
332 return !PlatformColor::SameComponentOrder(GetResourceFormat()); 306 return !PlatformColor::SameComponentOrder(GetResourceFormat());
333 } 307 }
334 308
335 scoped_ptr<RasterBuffer> OneCopyTileTaskWorkerPool::AcquireBufferForRaster( 309 scoped_ptr<RasterBuffer> OneCopyTileTaskWorkerPool::AcquireBufferForRaster(
336 const Resource* resource, 310 const Resource* resource,
337 uint64_t resource_content_id, 311 uint64_t resource_content_id,
338 uint64_t previous_content_id) { 312 uint64_t previous_content_id) {
339 // TODO(danakj): If resource_content_id != 0, we only need to copy/upload 313 // TODO(danakj): If resource_content_id != 0, we only need to copy/upload
340 // the dirty rect. 314 // the dirty rect.
341 DCHECK_EQ(resource->format(), resource_provider_->best_texture_format()); 315 DCHECK_EQ(resource->format(), resource_provider_->best_texture_format());
342 return make_scoped_ptr<RasterBuffer>(new RasterBufferImpl( 316 return make_scoped_ptr<RasterBuffer>(
343 this, resource_provider_, resource_provider_->best_texture_format(), 317 new RasterBufferImpl(this, resource_provider_, resource_pool_,
344 resource, previous_content_id)); 318 resource_provider_->best_texture_format(), resource,
319 previous_content_id));
345 } 320 }
346 321
347 void OneCopyTileTaskWorkerPool::ReleaseBufferForRaster( 322 void OneCopyTileTaskWorkerPool::ReleaseBufferForRaster(
348 scoped_ptr<RasterBuffer> buffer) { 323 scoped_ptr<RasterBuffer> buffer) {
349 // Nothing to do here. RasterBufferImpl destructor cleans up after itself. 324 // Nothing to do here. RasterBufferImpl destructor cleans up after itself.
350 } 325 }
351 326
352 void OneCopyTileTaskWorkerPool::PlaybackAndCopyOnWorkerThread( 327 CopySequenceNumber
353 const Resource* resource, 328 OneCopyTileTaskWorkerPool::PlaybackAndScheduleCopyOnWorkerThread(
354 const ResourceProvider::ScopedWriteLockGL* resource_lock, 329 bool reusing_raster_resource,
330 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer>
331 raster_resource_write_lock,
332 const Resource* raster_resource,
333 const Resource* output_resource,
355 const RasterSource* raster_source, 334 const RasterSource* raster_source,
356 const gfx::Rect& raster_full_rect, 335 const gfx::Rect& raster_full_rect,
357 const gfx::Rect& raster_dirty_rect, 336 const gfx::Rect& raster_dirty_rect,
358 float scale, 337 float scale,
359 bool include_images, 338 bool include_images) {
360 uint64_t previous_content_id, 339 gfx::GpuMemoryBuffer* gpu_memory_buffer =
361 uint64_t new_content_id) { 340 raster_resource_write_lock->GetGpuMemoryBuffer();
341 if (gpu_memory_buffer) {
342 DCHECK_EQ(
343 1u, gfx::NumberOfPlanesForBufferFormat(gpu_memory_buffer->GetFormat()));
344 void* data = NULL;
345 bool rv = gpu_memory_buffer->Map(&data);
346 DCHECK(rv);
347 int stride;
348 gpu_memory_buffer->GetStride(&stride);
349 // TileTaskWorkerPool::PlaybackToMemory only supports unsigned strides.
350 DCHECK_GE(stride, 0);
351
352 gfx::Rect playback_rect = raster_full_rect;
353 if (reusing_raster_resource) {
354 playback_rect.Intersect(raster_dirty_rect);
355 }
356 DCHECK(!playback_rect.IsEmpty())
357 << "Why are we rastering a tile that's not dirty?";
358 TileTaskWorkerPool::PlaybackToMemory(
359 data, raster_resource->format(), raster_resource->size(),
360 static_cast<size_t>(stride), raster_source, raster_full_rect,
361 playback_rect, scale, include_images);
362 gpu_memory_buffer->Unmap();
363 }
364
362 base::AutoLock lock(lock_); 365 base::AutoLock lock(lock_);
363 366
364 scoped_ptr<StagingBuffer> staging_buffer = 367 CopySequenceNumber sequence = 0;
365 AcquireStagingBuffer(resource, previous_content_id); 368 int bytes_per_row = (BitsPerPixel(raster_resource->format()) *
366 DCHECK(staging_buffer); 369 raster_resource->size().width()) /
370 8;
371 int chunk_size_in_rows =
372 std::max(1, max_bytes_per_copy_operation_ / bytes_per_row);
373 // Align chunk size to 4. Required to support compressed texture formats.
374 chunk_size_in_rows = MathUtil::UncheckedRoundUp(chunk_size_in_rows, 4);
375 int y = 0;
376 int height = raster_resource->size().height();
377 while (y < height) {
378 int failed_attempts = 0;
379 while ((pending_copy_operations_.size() + issued_copy_operation_count_) >=
380 kMaxCopyOperations) {
381 // Ignore limit when shutdown is set.
382 if (shutdown_)
383 break;
367 384
368 { 385 ++failed_attempts;
369 base::AutoUnlock unlock(lock_);
370 386
371 // Allocate GpuMemoryBuffer if necessary. 387 // Schedule a check that will also wait for operations to complete
372 if (!staging_buffer->gpu_memory_buffer) { 388 // after too many failed attempts.
373 staging_buffer->gpu_memory_buffer = 389 bool wait_if_needed = failed_attempts > kFailedAttemptsBeforeWaitIfNeeded;
374 resource_provider_->gpu_memory_buffer_manager() 390
375 ->AllocateGpuMemoryBuffer( 391 // Schedule a check for completed copy operations if too many operations
376 staging_buffer->size, 392 // are currently in-flight.
377 BufferFormat(resource_provider_->best_texture_format()), 393 ScheduleCheckForCompletedCopyOperationsWithLockAcquired(wait_if_needed);
378 use_persistent_gpu_memory_buffers_ 394
379 ? gfx::BufferUsage::PERSISTENT_MAP 395 {
380 : gfx::BufferUsage::MAP); 396 TRACE_EVENT0("cc", "WaitingForCopyOperationsToComplete");
381 DCHECK_EQ(gfx::NumberOfPlanesForBufferFormat( 397
382 staging_buffer->gpu_memory_buffer->GetFormat()), 398 // Wait for in-flight copy operations to drop below limit.
383 1u); 399 copy_operation_count_cv_.Wait();
400 }
384 } 401 }
385 402
386 gfx::Rect playback_rect = raster_full_rect; 403 // There may be more work available, so wake up another worker thread.
387 if (use_persistent_gpu_memory_buffers_ && previous_content_id) { 404 copy_operation_count_cv_.Signal();
388 // Reduce playback rect to dirty region if the content id of the staging
389 // buffer matches the prevous content id.
390 if (previous_content_id == staging_buffer->content_id)
391 playback_rect.Intersect(raster_dirty_rect);
392 }
393 405
394 if (staging_buffer->gpu_memory_buffer) { 406 // Copy at most |chunk_size_in_rows|.
395 void* data = nullptr; 407 int rows_to_copy = std::min(chunk_size_in_rows, height - y);
396 bool rv = staging_buffer->gpu_memory_buffer->Map(&data); 408 DCHECK_GT(rows_to_copy, 0);
397 DCHECK(rv);
398 int stride;
399 staging_buffer->gpu_memory_buffer->GetStride(&stride);
400 // TileTaskWorkerPool::PlaybackToMemory only supports unsigned strides.
401 DCHECK_GE(stride, 0);
402 409
403 DCHECK(!playback_rect.IsEmpty()) 410 // |raster_resource_write_lock| is passed to the first copy operation as it
404 << "Why are we rastering a tile that's not dirty?"; 411 // needs to be released before we can issue a copy.
405 TileTaskWorkerPool::PlaybackToMemory( 412 pending_copy_operations_.push_back(make_scoped_ptr(new CopyOperation(
406 data, resource_provider_->best_texture_format(), staging_buffer->size, 413 raster_resource_write_lock.Pass(), raster_resource, output_resource,
407 static_cast<size_t>(stride), raster_source, raster_full_rect, 414 gfx::Rect(0, y, raster_resource->size().width(), rows_to_copy))));
408 playback_rect, scale, include_images); 415 y += rows_to_copy;
409 staging_buffer->gpu_memory_buffer->Unmap(); 416
410 staging_buffer->content_id = new_content_id; 417 // Acquire a sequence number for this copy operation.
418 sequence = next_copy_operation_sequence_++;
419
420 // Increment |bytes_scheduled_since_last_flush_| by the amount of memory
421 // used for this copy operation.
422 bytes_scheduled_since_last_flush_ += rows_to_copy * bytes_per_row;
423
424 // Post task that will advance last flushed copy operation to |sequence|
425 // when |bytes_scheduled_since_last_flush_| has reached
426 // |max_bytes_per_copy_operation_|.
427 if (bytes_scheduled_since_last_flush_ >= max_bytes_per_copy_operation_) {
428 task_runner_->PostTask(
429 FROM_HERE,
430 base::Bind(&OneCopyTileTaskWorkerPool::AdvanceLastFlushedCopyTo,
431 weak_ptr_factory_.GetWeakPtr(), sequence));
432 bytes_scheduled_since_last_flush_ = 0;
411 } 433 }
412 } 434 }
413 435
414 ContextProvider* context_provider = 436 return sequence;
415 resource_provider_->output_surface()->worker_context_provider();
416 DCHECK(context_provider);
417
418 {
419 ContextProvider::ScopedContextLock scoped_context(context_provider);
420
421 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL();
422 DCHECK(gl);
423
424 unsigned image_target = resource_provider_->GetImageTextureTarget(
425 resource_provider_->best_texture_format());
426
427 // Create and bind staging texture.
428 if (!staging_buffer->texture_id) {
429 gl->GenTextures(1, &staging_buffer->texture_id);
430 gl->BindTexture(image_target, staging_buffer->texture_id);
431 gl->TexParameteri(image_target, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
432 gl->TexParameteri(image_target, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
433 gl->TexParameteri(image_target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
434 gl->TexParameteri(image_target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
435 } else {
436 gl->BindTexture(image_target, staging_buffer->texture_id);
437 }
438
439 // Create and bind image.
440 if (!staging_buffer->image_id) {
441 if (staging_buffer->gpu_memory_buffer) {
442 staging_buffer->image_id = gl->CreateImageCHROMIUM(
443 staging_buffer->gpu_memory_buffer->AsClientBuffer(),
444 staging_buffer->size.width(), staging_buffer->size.height(),
445 GLInternalFormat(resource_provider_->best_texture_format()));
446 gl->BindTexImage2DCHROMIUM(image_target, staging_buffer->image_id);
447 }
448 } else {
449 gl->ReleaseTexImage2DCHROMIUM(image_target, staging_buffer->image_id);
450 gl->BindTexImage2DCHROMIUM(image_target, staging_buffer->image_id);
451 }
452
453 // Unbind staging texture.
454 gl->BindTexture(image_target, 0);
455
456 if (resource_provider_->use_sync_query()) {
457 if (!staging_buffer->query_id)
458 gl->GenQueriesEXT(1, &staging_buffer->query_id);
459
460 #if defined(OS_CHROMEOS)
461 // TODO(reveman): This avoids a performance problem on some ChromeOS
462 // devices. This needs to be removed to support native GpuMemoryBuffer
463 // implementations. crbug.com/436314
464 gl->BeginQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM, staging_buffer->query_id);
465 #else
466 gl->BeginQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM,
467 staging_buffer->query_id);
468 #endif
469 }
470
471 int bytes_per_row =
472 (BitsPerPixel(resource_provider_->best_texture_format()) *
473 resource->size().width()) /
474 8;
475 int chunk_size_in_rows =
476 std::max(1, max_bytes_per_copy_operation_ / bytes_per_row);
477 // Align chunk size to 4. Required to support compressed texture formats.
478 chunk_size_in_rows = MathUtil::UncheckedRoundUp(chunk_size_in_rows, 4);
479 int y = 0;
480 int height = resource->size().height();
481 while (y < height) {
482 // Copy at most |chunk_size_in_rows|.
483 int rows_to_copy = std::min(chunk_size_in_rows, height - y);
484 DCHECK_GT(rows_to_copy, 0);
485
486 gl->CopySubTextureCHROMIUM(GL_TEXTURE_2D, staging_buffer->texture_id,
487 resource_lock->texture_id(), 0, y, 0, y,
488 resource->size().width(), rows_to_copy, false,
489 false, false);
490 y += rows_to_copy;
491
492 // Increment |bytes_scheduled_since_last_flush_| by the amount of memory
493 // used for this copy operation.
494 bytes_scheduled_since_last_flush_ += rows_to_copy * bytes_per_row;
495
496 if (bytes_scheduled_since_last_flush_ >= max_bytes_per_copy_operation_) {
497 gl->ShallowFlushCHROMIUM();
498 bytes_scheduled_since_last_flush_ = 0;
499 }
500 }
501
502 if (resource_provider_->use_sync_query()) {
503 #if defined(OS_CHROMEOS)
504 gl->EndQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM);
505 #else
506 gl->EndQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM);
507 #endif
508 }
509
510 // Barrier to sync worker context output to cc context.
511 gl->OrderingBarrierCHROMIUM();
512 }
513
514 staging_buffer->last_usage = base::TimeTicks::Now();
515 busy_buffers_.push_back(staging_buffer.Pass());
516
517 ScheduleReduceMemoryUsage();
518 } 437 }
519 438
520 bool OneCopyTileTaskWorkerPool::OnMemoryDump( 439 void OneCopyTileTaskWorkerPool::AdvanceLastIssuedCopyTo(
521 const base::trace_event::MemoryDumpArgs& args, 440 CopySequenceNumber sequence) {
522 base::trace_event::ProcessMemoryDump* pmd) { 441 if (last_issued_copy_operation_ >= sequence)
523 base::AutoLock lock(lock_); 442 return;
524 443
525 for (const auto& buffer : buffers_) { 444 IssueCopyOperations(sequence - last_issued_copy_operation_);
526 buffer->OnMemoryDump(pmd, resource_provider_->best_texture_format(), 445 last_issued_copy_operation_ = sequence;
527 std::find(free_buffers_.begin(), free_buffers_.end(),
528 buffer) != free_buffers_.end());
529 }
530
531 return true;
532 } 446 }
533 447
534 scoped_ptr<OneCopyTileTaskWorkerPool::StagingBuffer> 448 void OneCopyTileTaskWorkerPool::AdvanceLastFlushedCopyTo(
535 OneCopyTileTaskWorkerPool::AcquireStagingBuffer(const Resource* resource, 449 CopySequenceNumber sequence) {
536 uint64_t previous_content_id) { 450 if (last_flushed_copy_operation_ >= sequence)
537 lock_.AssertAcquired();
538
539 scoped_ptr<StagingBuffer> staging_buffer;
540
541 ContextProvider* context_provider =
542 resource_provider_->output_surface()->worker_context_provider();
543 DCHECK(context_provider);
544
545 ContextProvider::ScopedContextLock scoped_context(context_provider);
546
547 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL();
548 DCHECK(gl);
549
550 // Check if any busy buffers have become available.
551 if (resource_provider_->use_sync_query()) {
552 while (!busy_buffers_.empty()) {
553 if (!CheckForQueryResult(gl, busy_buffers_.front()->query_id))
554 break;
555
556 free_buffers_.push_back(busy_buffers_.take_front());
557 }
558 }
559
560 // Wait for number of non-free buffers to become less than the limit.
561 while ((buffers_.size() - free_buffers_.size()) >= max_staging_buffers_) {
562 // Stop when there are no more busy buffers to wait for.
563 if (busy_buffers_.empty())
564 break;
565
566 if (resource_provider_->use_sync_query()) {
567 WaitForQueryResult(gl, busy_buffers_.front()->query_id);
568 free_buffers_.push_back(busy_buffers_.take_front());
569 } else {
570 // Fall-back to glFinish if CHROMIUM_sync_query is not available.
571 gl->Finish();
572 while (!busy_buffers_.empty())
573 free_buffers_.push_back(busy_buffers_.take_front());
574 }
575 }
576
577 // Find a staging buffer that allows us to perform partial raster when
578 // using persistent GpuMemoryBuffers.
579 if (use_persistent_gpu_memory_buffers_ && previous_content_id) {
580 StagingBufferDeque::iterator it =
581 std::find_if(free_buffers_.begin(), free_buffers_.end(),
582 [previous_content_id](const StagingBuffer* buffer) {
583 return buffer->content_id == previous_content_id;
584 });
585 if (it != free_buffers_.end())
586 staging_buffer = free_buffers_.take(it);
587 }
588
589 // Find staging buffer of correct size.
590 if (!staging_buffer) {
591 StagingBufferDeque::iterator it =
592 std::find_if(free_buffers_.begin(), free_buffers_.end(),
593 [resource](const StagingBuffer* buffer) {
594 return buffer->size == resource->size();
595 });
596 if (it != free_buffers_.end())
597 staging_buffer = free_buffers_.take(it);
598 }
599
600 // Create new staging buffer if necessary.
601 if (!staging_buffer) {
602 staging_buffer = make_scoped_ptr(new StagingBuffer(resource->size()));
603 buffers_.insert(staging_buffer.get());
604 }
605
606 // Release enough free buffers to stay within the limit.
607 while (buffers_.size() > max_staging_buffers_) {
608 if (free_buffers_.empty())
609 break;
610
611 free_buffers_.front()->DestroyGLResources(gl);
612 buffers_.erase(free_buffers_.front());
613 free_buffers_.take_front();
614 }
615
616 return staging_buffer.Pass();
617 }
618
619 base::TimeTicks OneCopyTileTaskWorkerPool::GetUsageTimeForLRUBuffer() {
620 lock_.AssertAcquired();
621
622 if (!free_buffers_.empty())
623 return free_buffers_.front()->last_usage;
624
625 if (!busy_buffers_.empty())
626 return busy_buffers_.front()->last_usage;
627
628 return base::TimeTicks();
629 }
630
631 void OneCopyTileTaskWorkerPool::ScheduleReduceMemoryUsage() {
632 lock_.AssertAcquired();
633
634 if (reduce_memory_usage_pending_)
635 return; 451 return;
636 452
637 reduce_memory_usage_pending_ = true; 453 AdvanceLastIssuedCopyTo(sequence);
638 454
639 // Schedule a call to ReduceMemoryUsage at the time when the LRU buffer 455 // Flush all issued copy operations.
640 // should be released. 456 context_provider_->ContextGL()->ShallowFlushCHROMIUM();
641 base::TimeTicks reduce_memory_usage_time = 457 last_flushed_copy_operation_ = last_issued_copy_operation_;
642 GetUsageTimeForLRUBuffer() + staging_buffer_expiration_delay_;
643 task_runner_->PostDelayedTask(
644 FROM_HERE, reduce_memory_usage_callback_,
645 reduce_memory_usage_time - base::TimeTicks::Now());
646 }
647
648 void OneCopyTileTaskWorkerPool::ReduceMemoryUsage() {
649 base::AutoLock lock(lock_);
650
651 reduce_memory_usage_pending_ = false;
652
653 if (free_buffers_.empty() && busy_buffers_.empty())
654 return;
655
656 base::TimeTicks current_time = base::TimeTicks::Now();
657 ReleaseBuffersNotUsedSince(current_time - staging_buffer_expiration_delay_);
658
659 if (free_buffers_.empty() && busy_buffers_.empty())
660 return;
661
662 reduce_memory_usage_pending_ = true;
663
664 // Schedule another call to ReduceMemoryUsage at the time when the next
665 // buffer should be released.
666 base::TimeTicks reduce_memory_usage_time =
667 GetUsageTimeForLRUBuffer() + staging_buffer_expiration_delay_;
668 task_runner_->PostDelayedTask(FROM_HERE, reduce_memory_usage_callback_,
669 reduce_memory_usage_time - current_time);
670 }
671
672 void OneCopyTileTaskWorkerPool::ReleaseBuffersNotUsedSince(
673 base::TimeTicks time) {
674 lock_.AssertAcquired();
675
676 ContextProvider* context_provider =
677 resource_provider_->output_surface()->worker_context_provider();
678 DCHECK(context_provider);
679
680 {
681 ContextProvider::ScopedContextLock scoped_context(context_provider);
682
683 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL();
684 DCHECK(gl);
685
686 // Note: Front buffer is guaranteed to be LRU so we can stop releasing
687 // buffers as soon as we find a buffer that has been used since |time|.
688 while (!free_buffers_.empty()) {
689 if (free_buffers_.front()->last_usage > time)
690 return;
691
692 free_buffers_.front()->DestroyGLResources(gl);
693 buffers_.erase(free_buffers_.front());
694 free_buffers_.take_front();
695 }
696
697 while (!busy_buffers_.empty()) {
698 if (busy_buffers_.front()->last_usage > time)
699 return;
700
701 busy_buffers_.front()->DestroyGLResources(gl);
702 buffers_.erase(busy_buffers_.front());
703 busy_buffers_.take_front();
704 }
705 }
706 } 458 }
707 459
708 void OneCopyTileTaskWorkerPool::OnTaskSetFinished(TaskSet task_set) { 460 void OneCopyTileTaskWorkerPool::OnTaskSetFinished(TaskSet task_set) {
709 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::OnTaskSetFinished", "task_set", 461 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::OnTaskSetFinished", "task_set",
710 task_set); 462 task_set);
711 463
712 DCHECK(tasks_pending_[task_set]); 464 DCHECK(tasks_pending_[task_set]);
713 tasks_pending_[task_set] = false; 465 tasks_pending_[task_set] = false;
714 if (tasks_pending_.any()) { 466 if (tasks_pending_.any()) {
715 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", 467 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running",
716 "state", StateAsValue()); 468 "state", StateAsValue());
717 } else { 469 } else {
718 TRACE_EVENT_ASYNC_END0("cc", "ScheduledTasks", this); 470 TRACE_EVENT_ASYNC_END0("cc", "ScheduledTasks", this);
719 } 471 }
720 client_->DidFinishRunningTileTasks(task_set); 472 client_->DidFinishRunningTileTasks(task_set);
721 } 473 }
722 474
475 void OneCopyTileTaskWorkerPool::IssueCopyOperations(int64 count) {
476 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::IssueCopyOperations", "count",
477 count);
478
479 CopyOperation::Deque copy_operations;
480
481 {
482 base::AutoLock lock(lock_);
483
484 for (int64 i = 0; i < count; ++i) {
485 DCHECK(!pending_copy_operations_.empty());
486 copy_operations.push_back(pending_copy_operations_.take_front());
487 }
488
489 // Increment |issued_copy_operation_count_| to reflect the transition of
490 // copy operations from "pending" to "issued" state.
491 issued_copy_operation_count_ += copy_operations.size();
492 }
493
494 while (!copy_operations.empty()) {
495 scoped_ptr<CopyOperation> copy_operation = copy_operations.take_front();
496
497 // Remove the write lock.
498 copy_operation->src_write_lock.reset();
499
500 // Copy contents of source resource to destination resource.
501 resource_provider_->CopyResource(copy_operation->src->id(),
502 copy_operation->dst->id(),
503 copy_operation->rect);
504 }
505 }
506
507 void OneCopyTileTaskWorkerPool::
508 ScheduleCheckForCompletedCopyOperationsWithLockAcquired(
509 bool wait_if_needed) {
510 lock_.AssertAcquired();
511
512 if (check_for_completed_copy_operations_pending_)
513 return;
514
515 base::TimeTicks now = base::TimeTicks::Now();
516
517 // Schedule a check for completed copy operations as soon as possible but
518 // don't allow two consecutive checks to be scheduled to run less than the
519 // tick rate apart.
520 base::TimeTicks next_check_for_completed_copy_operations_time =
521 std::max(last_check_for_completed_copy_operations_time_ +
522 base::TimeDelta::FromMilliseconds(
523 kCheckForCompletedCopyOperationsTickRateMs),
524 now);
525
526 task_runner_->PostDelayedTask(
527 FROM_HERE,
528 base::Bind(&OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations,
529 weak_ptr_factory_.GetWeakPtr(), wait_if_needed),
530 next_check_for_completed_copy_operations_time - now);
531
532 last_check_for_completed_copy_operations_time_ =
533 next_check_for_completed_copy_operations_time;
534 check_for_completed_copy_operations_pending_ = true;
535 }
536
537 void OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations(
538 bool wait_if_needed) {
539 TRACE_EVENT1("cc",
540 "OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations",
541 "wait_if_needed", wait_if_needed);
542
543 resource_pool_->CheckBusyResources(wait_if_needed);
544
545 {
546 base::AutoLock lock(lock_);
547
548 DCHECK(check_for_completed_copy_operations_pending_);
549 check_for_completed_copy_operations_pending_ = false;
550
551 // The number of busy resources in the pool reflects the number of issued
552 // copy operations that have not yet completed.
553 issued_copy_operation_count_ = resource_pool_->busy_resource_count();
554
555 // There may be work blocked on too many in-flight copy operations, so wake
556 // up a worker thread.
557 copy_operation_count_cv_.Signal();
558 }
559 }
560
723 scoped_refptr<base::trace_event::ConvertableToTraceFormat> 561 scoped_refptr<base::trace_event::ConvertableToTraceFormat>
724 OneCopyTileTaskWorkerPool::StateAsValue() const { 562 OneCopyTileTaskWorkerPool::StateAsValue() const {
725 scoped_refptr<base::trace_event::TracedValue> state = 563 scoped_refptr<base::trace_event::TracedValue> state =
726 new base::trace_event::TracedValue(); 564 new base::trace_event::TracedValue();
727 565
728 state->BeginArray("tasks_pending"); 566 state->BeginArray("tasks_pending");
729 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) 567 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set)
730 state->AppendBoolean(tasks_pending_[task_set]); 568 state->AppendBoolean(tasks_pending_[task_set]);
731 state->EndArray(); 569 state->EndArray();
732 state->BeginDictionary("staging_state"); 570 state->BeginDictionary("staging_state");
733 StagingStateAsValueInto(state.get()); 571 StagingStateAsValueInto(state.get());
734 state->EndDictionary(); 572 state->EndDictionary();
735 573
736 return state; 574 return state;
737 } 575 }
738 576
739 void OneCopyTileTaskWorkerPool::StagingStateAsValueInto( 577 void OneCopyTileTaskWorkerPool::StagingStateAsValueInto(
740 base::trace_event::TracedValue* staging_state) const { 578 base::trace_event::TracedValue* staging_state) const {
741 base::AutoLock lock(lock_); 579 staging_state->SetInteger(
742 580 "staging_resource_count",
743 staging_state->SetInteger("staging_buffer_count", 581 static_cast<int>(resource_pool_->total_resource_count()));
744 static_cast<int>(buffers_.size())); 582 staging_state->SetInteger(
745 staging_state->SetInteger("busy_count", 583 "bytes_used_for_staging_resources",
746 static_cast<int>(busy_buffers_.size())); 584 static_cast<int>(resource_pool_->total_memory_usage_bytes()));
747 staging_state->SetInteger("free_count", 585 staging_state->SetInteger(
748 static_cast<int>(free_buffers_.size())); 586 "pending_copy_count",
587 static_cast<int>(resource_pool_->total_resource_count() -
588 resource_pool_->acquired_resource_count()));
589 staging_state->SetInteger(
590 "bytes_pending_copy",
591 static_cast<int>(resource_pool_->total_memory_usage_bytes() -
592 resource_pool_->acquired_memory_usage_bytes()));
749 } 593 }
750 594
751 } // namespace cc 595 } // namespace cc
OLDNEW
« no previous file with comments | « cc/raster/one_copy_tile_task_worker_pool.h ('k') | cc/raster/tile_task_worker_pool_perftest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698