OLD | NEW |
---|---|
1 // Copyright 2014 The Chromium Authors. All rights reserved. | 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "cc/raster/one_copy_tile_task_worker_pool.h" | 5 #include "cc/raster/one_copy_tile_task_worker_pool.h" |
6 | 6 |
7 #include <algorithm> | 7 #include <algorithm> |
8 #include <limits> | 8 #include <limits> |
9 | 9 |
10 #include "base/metrics/histogram.h" | |
10 #include "base/strings/stringprintf.h" | 11 #include "base/strings/stringprintf.h" |
11 #include "base/trace_event/trace_event.h" | 12 #include "base/trace_event/trace_event.h" |
12 #include "base/trace_event/trace_event_argument.h" | 13 #include "base/trace_event/trace_event_argument.h" |
13 #include "cc/base/math_util.h" | 14 #include "cc/base/math_util.h" |
14 #include "cc/debug/traced_value.h" | 15 #include "cc/debug/traced_value.h" |
15 #include "cc/raster/raster_buffer.h" | 16 #include "cc/raster/raster_buffer.h" |
16 #include "cc/resources/platform_color.h" | 17 #include "cc/resources/platform_color.h" |
17 #include "cc/resources/resource_pool.h" | |
18 #include "cc/resources/scoped_resource.h" | 18 #include "cc/resources/scoped_resource.h" |
19 #include "gpu/GLES2/gl2extchromium.h" | |
19 #include "gpu/command_buffer/client/gles2_interface.h" | 20 #include "gpu/command_buffer/client/gles2_interface.h" |
20 #include "ui/gfx/gpu_memory_buffer.h" | 21 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h" |
21 | 22 |
22 namespace cc { | 23 namespace cc { |
23 namespace { | 24 namespace { |
24 | 25 |
25 class RasterBufferImpl : public RasterBuffer { | 26 class RasterBufferImpl : public RasterBuffer { |
26 public: | 27 public: |
27 RasterBufferImpl(OneCopyTileTaskWorkerPool* worker_pool, | 28 RasterBufferImpl(OneCopyTileTaskWorkerPool* worker_pool, |
28 ResourceProvider* resource_provider, | 29 ResourceProvider* resource_provider, |
29 ResourcePool* resource_pool, | |
30 ResourceFormat resource_format, | 30 ResourceFormat resource_format, |
31 const Resource* output_resource, | 31 const Resource* resource, |
32 uint64_t previous_content_id) | 32 uint64_t previous_content_id) |
33 : worker_pool_(worker_pool), | 33 : worker_pool_(worker_pool), |
34 resource_provider_(resource_provider), | 34 resource_(resource), |
35 resource_pool_(resource_pool), | 35 lock_(resource_provider, resource->id()), |
36 output_resource_(output_resource), | 36 previous_content_id_(previous_content_id) {} |
37 raster_content_id_(0), | |
38 sequence_(0) { | |
39 if (worker_pool->have_persistent_gpu_memory_buffers() && | |
40 previous_content_id) { | |
41 raster_resource_ = | |
42 resource_pool->TryAcquireResourceWithContentId(previous_content_id); | |
43 } | |
44 if (raster_resource_) { | |
45 raster_content_id_ = previous_content_id; | |
46 DCHECK_EQ(resource_format, raster_resource_->format()); | |
47 DCHECK_EQ(output_resource->size().ToString(), | |
48 raster_resource_->size().ToString()); | |
49 } else { | |
50 raster_resource_ = resource_pool->AcquireResource(output_resource->size(), | |
51 resource_format); | |
52 } | |
53 | 37 |
54 lock_.reset(new ResourceProvider::ScopedWriteLockGpuMemoryBuffer( | 38 ~RasterBufferImpl() override {} |
55 resource_provider_, raster_resource_->id())); | |
56 } | |
57 | |
58 ~RasterBufferImpl() override { | |
59 // Release write lock in case a copy was never scheduled. | |
60 lock_.reset(); | |
61 | |
62 // Make sure any scheduled copy operations are issued before we release the | |
63 // raster resource. | |
64 if (sequence_) | |
65 worker_pool_->AdvanceLastIssuedCopyTo(sequence_); | |
66 | |
67 // Return resources to pool so they can be used by another RasterBuffer | |
68 // instance. | |
69 resource_pool_->ReleaseResource(raster_resource_.Pass(), | |
70 raster_content_id_); | |
71 } | |
72 | 39 |
73 // Overridden from RasterBuffer: | 40 // Overridden from RasterBuffer: |
74 void Playback(const RasterSource* raster_source, | 41 void Playback(const RasterSource* raster_source, |
75 const gfx::Rect& raster_full_rect, | 42 const gfx::Rect& raster_full_rect, |
76 const gfx::Rect& raster_dirty_rect, | 43 const gfx::Rect& raster_dirty_rect, |
77 uint64_t new_content_id, | 44 uint64_t new_content_id, |
78 float scale) override { | 45 float scale) override { |
79 // If there's a raster_content_id_, we are reusing a resource with that | 46 worker_pool_->PlaybackAndScheduleCopyOnWorkerThread( |
80 // content id. | 47 resource_, &lock_, raster_source, raster_full_rect, raster_dirty_rect, |
81 bool reusing_raster_resource = raster_content_id_ != 0; | 48 scale, previous_content_id_, new_content_id); |
82 sequence_ = worker_pool_->PlaybackAndScheduleCopyOnWorkerThread( | |
83 reusing_raster_resource, lock_.Pass(), raster_resource_.get(), | |
84 output_resource_, raster_source, raster_full_rect, raster_dirty_rect, | |
85 scale); | |
86 // Store the content id of the resource to return to the pool. | |
87 raster_content_id_ = new_content_id; | |
88 } | 49 } |
89 | 50 |
90 private: | 51 private: |
91 OneCopyTileTaskWorkerPool* worker_pool_; | 52 OneCopyTileTaskWorkerPool* worker_pool_; |
92 ResourceProvider* resource_provider_; | 53 const Resource* resource_; |
93 ResourcePool* resource_pool_; | 54 ResourceProvider::ScopedWriteLockGL lock_; |
94 const Resource* output_resource_; | 55 uint64_t previous_content_id_; |
95 uint64_t raster_content_id_; | |
96 scoped_ptr<ScopedResource> raster_resource_; | |
97 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> lock_; | |
98 CopySequenceNumber sequence_; | |
99 | 56 |
100 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl); | 57 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl); |
101 }; | 58 }; |
102 | 59 |
103 // Number of in-flight copy operations to allow. | 60 // Number of staging buffers to use. |
104 const int kMaxCopyOperations = 32; | 61 const size_t kMaxStagingBuffers = 32; |
105 | 62 |
106 // Delay been checking for copy operations to complete. | 63 // Delay between checking for query result to be available. |
107 const int kCheckForCompletedCopyOperationsTickRateMs = 1; | 64 const int kCheckForQueryResultAvailableTickRateMs = 1; |
108 | 65 |
109 // Number of failed attempts to allow before we perform a check that will | 66 // Number of attempts to allow before we perform a check that will wait for |
110 // wait for copy operations to complete if needed. | 67 // query to complete. |
111 const int kFailedAttemptsBeforeWaitIfNeeded = 256; | 68 const int kMaxCheckForQueryResultAvailableAttempts = 256; |
112 | 69 |
113 // 4MiB is the size of 4 512x512 tiles, which has proven to be a good | 70 // 4MiB is the size of 4 512x512 tiles, which has proven to be a good |
114 // default batch size for copy operations. | 71 // default batch size for copy operations. |
115 const int kMaxBytesPerCopyOperation = 1024 * 1024 * 4; | 72 const int kMaxBytesPerCopyOperation = 1024 * 1024 * 4; |
116 | 73 |
74 void WaitForQueryResult(gpu::gles2::GLES2Interface* gl, unsigned query_id) { | |
75 TRACE_EVENT0("cc", "WaitForQueryResult"); | |
76 | |
77 int attempts_left = kMaxCheckForQueryResultAvailableAttempts; | |
78 while (attempts_left--) { | |
79 GLuint complete = 1; | |
80 gl->GetQueryObjectuivEXT(query_id, GL_QUERY_RESULT_AVAILABLE_EXT, | |
81 &complete); | |
82 if (complete) | |
83 break; | |
84 | |
85 usleep(kCheckForQueryResultAvailableTickRateMs * 1000); | |
vmpstr
2015/07/16 22:25:26
PlatformThread::Sleep?
reveman
2015/07/22 22:39:15
Done.
| |
86 } | |
87 | |
88 unsigned time_elapsed_us = 0; | |
89 gl->GetQueryObjectuivEXT(query_id, GL_QUERY_RESULT_EXT, &time_elapsed_us); | |
90 UMA_HISTOGRAM_CUSTOM_COUNTS("Renderer4.CopyTextureLatency", time_elapsed_us, | |
vmpstr
2015/07/16 22:25:26
CopyTextureLatencyUS (we usually the time units, I
reveman
2015/07/22 22:39:15
This is the same as before but I removed it from l
| |
91 0, 256000, 50); | |
92 } | |
93 | |
117 } // namespace | 94 } // namespace |
118 | 95 |
119 OneCopyTileTaskWorkerPool::CopyOperation::CopyOperation( | 96 OneCopyTileTaskWorkerPool::StagingBuffer::StagingBuffer( |
120 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> src_write_lock, | 97 scoped_ptr<gfx::GpuMemoryBuffer> gpu_memory_buffer, |
121 const Resource* src, | 98 const gfx::Size& size) |
122 const Resource* dst, | 99 : gpu_memory_buffer(gpu_memory_buffer.Pass()), |
123 const gfx::Rect& rect) | 100 size(size), |
124 : src_write_lock(src_write_lock.Pass()), src(src), dst(dst), rect(rect) { | 101 texture_id(0), |
102 image_id(0), | |
103 query_id(0), | |
104 content_id(0), | |
105 sequence_id(0) { | |
125 } | 106 } |
126 | 107 |
127 OneCopyTileTaskWorkerPool::CopyOperation::~CopyOperation() { | 108 OneCopyTileTaskWorkerPool::StagingBuffer::~StagingBuffer() { |
109 DCHECK_EQ(texture_id, 0u); | |
110 DCHECK_EQ(image_id, 0u); | |
111 DCHECK_EQ(query_id, 0u); | |
112 } | |
113 | |
114 void OneCopyTileTaskWorkerPool::StagingBuffer::DestroyGLResources( | |
115 gpu::gles2::GLES2Interface* gl) { | |
116 if (query_id) { | |
117 gl->DeleteQueriesEXT(1, &query_id); | |
118 query_id = 0; | |
119 } | |
120 if (image_id) { | |
121 gl->DestroyImageCHROMIUM(image_id); | |
122 image_id = 0; | |
123 } | |
124 if (texture_id) { | |
125 gl->DeleteTextures(1, &texture_id); | |
126 texture_id = 0; | |
127 } | |
128 } | 128 } |
129 | 129 |
130 // static | 130 // static |
131 scoped_ptr<TileTaskWorkerPool> OneCopyTileTaskWorkerPool::Create( | 131 scoped_ptr<TileTaskWorkerPool> OneCopyTileTaskWorkerPool::Create( |
132 base::SequencedTaskRunner* task_runner, | 132 base::SequencedTaskRunner* task_runner, |
133 TaskGraphRunner* task_graph_runner, | 133 TaskGraphRunner* task_graph_runner, |
134 ContextProvider* context_provider, | 134 ContextProvider* context_provider, |
135 ResourceProvider* resource_provider, | 135 ResourceProvider* resource_provider, |
136 ResourcePool* resource_pool, | |
137 int max_copy_texture_chromium_size, | 136 int max_copy_texture_chromium_size, |
138 bool have_persistent_gpu_memory_buffers) { | 137 bool use_persistent_gpu_memory_buffers, |
138 unsigned image_target) { | |
139 return make_scoped_ptr<TileTaskWorkerPool>(new OneCopyTileTaskWorkerPool( | 139 return make_scoped_ptr<TileTaskWorkerPool>(new OneCopyTileTaskWorkerPool( |
140 task_runner, task_graph_runner, context_provider, resource_provider, | 140 task_runner, task_graph_runner, resource_provider, |
141 resource_pool, max_copy_texture_chromium_size, | 141 max_copy_texture_chromium_size, use_persistent_gpu_memory_buffers, |
142 have_persistent_gpu_memory_buffers)); | 142 image_target)); |
143 } | 143 } |
144 | 144 |
145 OneCopyTileTaskWorkerPool::OneCopyTileTaskWorkerPool( | 145 OneCopyTileTaskWorkerPool::OneCopyTileTaskWorkerPool( |
146 base::SequencedTaskRunner* task_runner, | 146 base::SequencedTaskRunner* task_runner, |
147 TaskGraphRunner* task_graph_runner, | 147 TaskGraphRunner* task_graph_runner, |
148 ContextProvider* context_provider, | |
149 ResourceProvider* resource_provider, | 148 ResourceProvider* resource_provider, |
150 ResourcePool* resource_pool, | |
151 int max_copy_texture_chromium_size, | 149 int max_copy_texture_chromium_size, |
152 bool have_persistent_gpu_memory_buffers) | 150 bool use_persistent_gpu_memory_buffers, |
151 unsigned image_target) | |
153 : task_runner_(task_runner), | 152 : task_runner_(task_runner), |
154 task_graph_runner_(task_graph_runner), | 153 task_graph_runner_(task_graph_runner), |
155 namespace_token_(task_graph_runner->GetNamespaceToken()), | 154 namespace_token_(task_graph_runner->GetNamespaceToken()), |
156 context_provider_(context_provider), | |
157 resource_provider_(resource_provider), | 155 resource_provider_(resource_provider), |
158 resource_pool_(resource_pool), | |
159 max_bytes_per_copy_operation_( | 156 max_bytes_per_copy_operation_( |
160 max_copy_texture_chromium_size | 157 max_copy_texture_chromium_size |
161 ? std::min(kMaxBytesPerCopyOperation, | 158 ? std::min(kMaxBytesPerCopyOperation, |
162 max_copy_texture_chromium_size) | 159 max_copy_texture_chromium_size) |
163 : kMaxBytesPerCopyOperation), | 160 : kMaxBytesPerCopyOperation), |
164 have_persistent_gpu_memory_buffers_(have_persistent_gpu_memory_buffers), | 161 use_persistent_gpu_memory_buffers_(use_persistent_gpu_memory_buffers), |
165 last_issued_copy_operation_(0), | 162 image_target_(image_target), |
166 last_flushed_copy_operation_(0), | 163 next_sequence_id_(1), |
167 lock_(), | |
168 copy_operation_count_cv_(&lock_), | |
169 bytes_scheduled_since_last_flush_(0), | 164 bytes_scheduled_since_last_flush_(0), |
170 issued_copy_operation_count_(0), | |
171 next_copy_operation_sequence_(1), | |
172 check_for_completed_copy_operations_pending_(false), | |
173 shutdown_(false), | |
174 weak_ptr_factory_(this), | 165 weak_ptr_factory_(this), |
175 task_set_finished_weak_ptr_factory_(this) { | 166 task_set_finished_weak_ptr_factory_(this) { |
176 DCHECK(context_provider_); | |
177 } | 167 } |
178 | 168 |
179 OneCopyTileTaskWorkerPool::~OneCopyTileTaskWorkerPool() { | 169 OneCopyTileTaskWorkerPool::~OneCopyTileTaskWorkerPool() { |
180 DCHECK_EQ(pending_copy_operations_.size(), 0u); | 170 } |
171 | |
172 void OneCopyTileTaskWorkerPool::ReleaseFreeMemory() { | |
173 base::AutoLock lock(lock_); | |
174 | |
175 if (free_buffers_.empty() && busy_buffers_.empty()) | |
176 return; | |
177 | |
178 ContextProvider* context_provider = | |
179 resource_provider_->output_surface()->worker_context_provider(); | |
180 DCHECK(context_provider); | |
181 | |
182 { | |
183 ContextProvider::ScopedContextGL scoped_context(context_provider); | |
184 | |
185 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL(); | |
186 DCHECK(gl); | |
187 | |
188 std::for_each( | |
189 free_buffers_.begin(), free_buffers_.end(), | |
190 [gl](StagingBuffer* buffer) { buffer->DestroyGLResources(gl); }); | |
191 std::for_each( | |
192 busy_buffers_.begin(), busy_buffers_.end(), | |
193 [gl](StagingBuffer* buffer) { buffer->DestroyGLResources(gl); }); | |
194 } | |
181 } | 195 } |
182 | 196 |
183 TileTaskRunner* OneCopyTileTaskWorkerPool::AsTileTaskRunner() { | 197 TileTaskRunner* OneCopyTileTaskWorkerPool::AsTileTaskRunner() { |
184 return this; | 198 return this; |
185 } | 199 } |
186 | 200 |
187 void OneCopyTileTaskWorkerPool::SetClient(TileTaskRunnerClient* client) { | 201 void OneCopyTileTaskWorkerPool::SetClient(TileTaskRunnerClient* client) { |
188 client_ = client; | 202 client_ = client; |
189 } | 203 } |
190 | 204 |
191 void OneCopyTileTaskWorkerPool::Shutdown() { | 205 void OneCopyTileTaskWorkerPool::Shutdown() { |
192 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::Shutdown"); | 206 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::Shutdown"); |
193 | 207 |
194 { | |
195 base::AutoLock lock(lock_); | |
196 | |
197 shutdown_ = true; | |
198 copy_operation_count_cv_.Signal(); | |
199 } | |
200 | |
201 TaskGraph empty; | 208 TaskGraph empty; |
202 task_graph_runner_->ScheduleTasks(namespace_token_, &empty); | 209 task_graph_runner_->ScheduleTasks(namespace_token_, &empty); |
203 task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_); | 210 task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_); |
211 | |
212 ReleaseFreeMemory(); | |
204 } | 213 } |
205 | 214 |
206 void OneCopyTileTaskWorkerPool::ScheduleTasks(TileTaskQueue* queue) { | 215 void OneCopyTileTaskWorkerPool::ScheduleTasks(TileTaskQueue* queue) { |
207 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::ScheduleTasks"); | 216 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::ScheduleTasks"); |
208 | 217 |
209 #if DCHECK_IS_ON() | |
210 { | |
211 base::AutoLock lock(lock_); | |
212 DCHECK(!shutdown_); | |
213 } | |
214 #endif | |
215 | |
216 if (tasks_pending_.none()) | 218 if (tasks_pending_.none()) |
217 TRACE_EVENT_ASYNC_BEGIN0("cc", "ScheduledTasks", this); | 219 TRACE_EVENT_ASYNC_BEGIN0("cc", "ScheduledTasks", this); |
218 | 220 |
219 // Mark all task sets as pending. | 221 // Mark all task sets as pending. |
220 tasks_pending_.set(); | 222 tasks_pending_.set(); |
221 | 223 |
222 size_t priority = kTileTaskPriorityBase; | 224 size_t priority = kTileTaskPriorityBase; |
223 | 225 |
224 graph_.Reset(); | 226 graph_.Reset(); |
225 | 227 |
226 // Cancel existing OnTaskSetFinished callbacks. | 228 // Cancel existing OnTaskSetFinished callbacks. |
227 task_set_finished_weak_ptr_factory_.InvalidateWeakPtrs(); | 229 task_set_finished_weak_ptr_factory_.InvalidateWeakPtrs(); |
228 | 230 |
229 scoped_refptr<TileTask> new_task_set_finished_tasks[kNumberOfTaskSets]; | 231 scoped_refptr<TileTask> new_task_set_finished_tasks[kNumberOfTaskSets]; |
230 | 232 |
231 size_t task_count[kNumberOfTaskSets] = {0}; | 233 size_t task_count[kNumberOfTaskSets] = {0}; |
232 | 234 |
233 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { | 235 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { |
234 new_task_set_finished_tasks[task_set] = CreateTaskSetFinishedTask( | 236 new_task_set_finished_tasks[task_set] = CreateTaskSetFinishedTask( |
235 task_runner_.get(), | 237 task_runner_.get(), |
236 base::Bind(&OneCopyTileTaskWorkerPool::OnTaskSetFinished, | 238 base::Bind(&OneCopyTileTaskWorkerPool::OnTaskSetFinished, |
237 task_set_finished_weak_ptr_factory_.GetWeakPtr(), task_set)); | 239 task_set_finished_weak_ptr_factory_.GetWeakPtr(), task_set)); |
238 } | 240 } |
239 | 241 |
240 resource_pool_->CheckBusyResources(false); | |
241 | |
242 for (TileTaskQueue::Item::Vector::const_iterator it = queue->items.begin(); | 242 for (TileTaskQueue::Item::Vector::const_iterator it = queue->items.begin(); |
243 it != queue->items.end(); ++it) { | 243 it != queue->items.end(); ++it) { |
244 const TileTaskQueue::Item& item = *it; | 244 const TileTaskQueue::Item& item = *it; |
245 RasterTask* task = item.task; | 245 RasterTask* task = item.task; |
246 DCHECK(!task->HasCompleted()); | 246 DCHECK(!task->HasCompleted()); |
247 | 247 |
248 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { | 248 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { |
249 if (!item.task_sets[task_set]) | 249 if (!item.task_sets[task_set]) |
250 continue; | 250 continue; |
251 | 251 |
252 ++task_count[task_set]; | 252 ++task_count[task_set]; |
253 | 253 |
254 graph_.edges.push_back( | 254 graph_.edges.push_back( |
255 TaskGraph::Edge(task, new_task_set_finished_tasks[task_set].get())); | 255 TaskGraph::Edge(task, new_task_set_finished_tasks[task_set].get())); |
256 } | 256 } |
257 | 257 |
258 InsertNodesForRasterTask(&graph_, task, task->dependencies(), priority++); | 258 InsertNodesForRasterTask(&graph_, task, task->dependencies(), priority++); |
259 } | 259 } |
260 | 260 |
261 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { | 261 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { |
262 InsertNodeForTask(&graph_, new_task_set_finished_tasks[task_set].get(), | 262 InsertNodeForTask(&graph_, new_task_set_finished_tasks[task_set].get(), |
263 kTaskSetFinishedTaskPriorityBase + task_set, | 263 kTaskSetFinishedTaskPriorityBase + task_set, |
264 task_count[task_set]); | 264 task_count[task_set]); |
265 } | 265 } |
266 | 266 |
267 ScheduleTasksOnOriginThread(this, &graph_); | 267 ScheduleTasksOnOriginThread(this, &graph_); |
268 | |
269 // Barrier to sync any new resources to the worker context. | |
270 resource_provider_->output_surface() | |
271 ->context_provider() | |
272 ->ContextGL() | |
273 ->OrderingBarrierCHROMIUM(); | |
274 | |
268 task_graph_runner_->ScheduleTasks(namespace_token_, &graph_); | 275 task_graph_runner_->ScheduleTasks(namespace_token_, &graph_); |
269 | 276 |
270 std::copy(new_task_set_finished_tasks, | 277 std::copy(new_task_set_finished_tasks, |
271 new_task_set_finished_tasks + kNumberOfTaskSets, | 278 new_task_set_finished_tasks + kNumberOfTaskSets, |
272 task_set_finished_tasks_); | 279 task_set_finished_tasks_); |
273 | 280 |
274 resource_pool_->ReduceResourceUsage(); | |
275 | |
276 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", "state", | 281 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", "state", |
277 StateAsValue()); | 282 StateAsValue()); |
278 } | 283 } |
279 | 284 |
280 void OneCopyTileTaskWorkerPool::CheckForCompletedTasks() { | 285 void OneCopyTileTaskWorkerPool::CheckForCompletedTasks() { |
281 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::CheckForCompletedTasks"); | 286 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::CheckForCompletedTasks"); |
282 | 287 |
283 task_graph_runner_->CollectCompletedTasks(namespace_token_, | 288 task_graph_runner_->CollectCompletedTasks(namespace_token_, |
284 &completed_tasks_); | 289 &completed_tasks_); |
285 | 290 |
(...skipping 18 matching lines...) Expand all Loading... | |
304 return !PlatformColor::SameComponentOrder(GetResourceFormat()); | 309 return !PlatformColor::SameComponentOrder(GetResourceFormat()); |
305 } | 310 } |
306 | 311 |
307 scoped_ptr<RasterBuffer> OneCopyTileTaskWorkerPool::AcquireBufferForRaster( | 312 scoped_ptr<RasterBuffer> OneCopyTileTaskWorkerPool::AcquireBufferForRaster( |
308 const Resource* resource, | 313 const Resource* resource, |
309 uint64_t resource_content_id, | 314 uint64_t resource_content_id, |
310 uint64_t previous_content_id) { | 315 uint64_t previous_content_id) { |
311 // TODO(danakj): If resource_content_id != 0, we only need to copy/upload | 316 // TODO(danakj): If resource_content_id != 0, we only need to copy/upload |
312 // the dirty rect. | 317 // the dirty rect. |
313 DCHECK_EQ(resource->format(), resource_provider_->best_texture_format()); | 318 DCHECK_EQ(resource->format(), resource_provider_->best_texture_format()); |
314 return make_scoped_ptr<RasterBuffer>( | 319 return make_scoped_ptr<RasterBuffer>(new RasterBufferImpl( |
315 new RasterBufferImpl(this, resource_provider_, resource_pool_, | 320 this, resource_provider_, resource_provider_->best_texture_format(), |
316 resource_provider_->best_texture_format(), resource, | 321 resource, previous_content_id)); |
317 previous_content_id)); | |
318 } | 322 } |
319 | 323 |
320 void OneCopyTileTaskWorkerPool::ReleaseBufferForRaster( | 324 void OneCopyTileTaskWorkerPool::ReleaseBufferForRaster( |
321 scoped_ptr<RasterBuffer> buffer) { | 325 scoped_ptr<RasterBuffer> buffer) { |
322 // Nothing to do here. RasterBufferImpl destructor cleans up after itself. | 326 // Nothing to do here. RasterBufferImpl destructor cleans up after itself. |
323 } | 327 } |
324 | 328 |
325 CopySequenceNumber | 329 void OneCopyTileTaskWorkerPool::PlaybackAndScheduleCopyOnWorkerThread( |
vmpstr
2015/07/16 22:25:26
Can you break this function up into smaller logica
reveman
2015/07/22 22:39:15
This has been completely refactored in latest patc
| |
326 OneCopyTileTaskWorkerPool::PlaybackAndScheduleCopyOnWorkerThread( | 330 const Resource* resource, |
327 bool reusing_raster_resource, | 331 const ResourceProvider::ScopedWriteLockGL* resource_lock, |
328 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> | |
329 raster_resource_write_lock, | |
330 const Resource* raster_resource, | |
331 const Resource* output_resource, | |
332 const RasterSource* raster_source, | 332 const RasterSource* raster_source, |
333 const gfx::Rect& raster_full_rect, | 333 const gfx::Rect& raster_full_rect, |
334 const gfx::Rect& raster_dirty_rect, | 334 const gfx::Rect& raster_dirty_rect, |
335 float scale) { | 335 float scale, |
336 gfx::GpuMemoryBuffer* gpu_memory_buffer = | 336 uint64_t previous_content_id, |
337 raster_resource_write_lock->GetGpuMemoryBuffer(); | 337 uint64_t new_content_id) { |
338 if (gpu_memory_buffer) { | |
339 void* data = NULL; | |
340 bool rv = gpu_memory_buffer->Map(&data); | |
341 DCHECK(rv); | |
342 int stride; | |
343 gpu_memory_buffer->GetStride(&stride); | |
344 // TileTaskWorkerPool::PlaybackToMemory only supports unsigned strides. | |
345 DCHECK_GE(stride, 0); | |
346 | |
347 gfx::Rect playback_rect = raster_full_rect; | |
348 if (reusing_raster_resource) { | |
349 playback_rect.Intersect(raster_dirty_rect); | |
350 } | |
351 DCHECK(!playback_rect.IsEmpty()) | |
352 << "Why are we rastering a tile that's not dirty?"; | |
353 TileTaskWorkerPool::PlaybackToMemory( | |
354 data, raster_resource->format(), raster_resource->size(), | |
355 static_cast<size_t>(stride), raster_source, raster_full_rect, | |
356 playback_rect, scale); | |
357 gpu_memory_buffer->Unmap(); | |
358 } | |
359 | |
360 base::AutoLock lock(lock_); | 338 base::AutoLock lock(lock_); |
361 | 339 |
362 CopySequenceNumber sequence = 0; | 340 gfx::Size size = resource->size(); |
363 int bytes_per_row = (BitsPerPixel(raster_resource->format()) * | 341 gfx::Rect playback_rect = raster_full_rect; |
364 raster_resource->size().width()) / | 342 |
365 8; | 343 scoped_ptr<StagingBuffer> staging_buffer; |
366 int chunk_size_in_rows = | 344 |
367 std::max(1, max_bytes_per_copy_operation_ / bytes_per_row); | 345 // Try to find a staging buffer that allows us to perform partial raster when |
368 // Align chunk size to 4. Required to support compressed texture formats. | 346 // using persistent GpuMemoryBuffers. |
369 chunk_size_in_rows = MathUtil::RoundUp(chunk_size_in_rows, 4); | 347 if (use_persistent_gpu_memory_buffers_ && previous_content_id && |
370 int y = 0; | 348 raster_dirty_rect != raster_full_rect) { |
371 int height = raster_resource->size().height(); | 349 for (;;) { |
372 while (y < height) { | 350 StagingBufferVector::iterator it = |
373 int failed_attempts = 0; | 351 std::find_if(free_buffers_.begin(), free_buffers_.end(), |
374 while ((pending_copy_operations_.size() + issued_copy_operation_count_) >= | 352 [previous_content_id](const StagingBuffer* buffer) { |
375 kMaxCopyOperations) { | 353 return buffer->content_id == previous_content_id; |
376 // Ignore limit when shutdown is set. | 354 }); |
377 if (shutdown_) | 355 if (it != free_buffers_.end()) { |
378 break; | 356 std::swap(*it, free_buffers_.back()); |
379 | 357 staging_buffer = make_scoped_ptr(free_buffers_.back()); |
380 ++failed_attempts; | 358 free_buffers_.weak_erase(free_buffers_.end() - 1); |
381 | 359 DCHECK_EQ(staging_buffer->size.ToString(), size.ToString()); |
382 // Schedule a check that will also wait for operations to complete | 360 break; |
383 // after too many failed attempts. | 361 } |
384 bool wait_if_needed = failed_attempts > kFailedAttemptsBeforeWaitIfNeeded; | 362 |
385 | 363 // Fall-back to full raster if sync queries are not available. |
386 // Schedule a check for completed copy operations if too many operations | 364 if (resource_provider_->use_sync_query()) |
387 // are currently in-flight. | 365 break; |
388 ScheduleCheckForCompletedCopyOperationsWithLockAcquired(wait_if_needed); | 366 |
367 // Fall-back to full raster if a staging buffer with |previous_content_id| | |
368 // doesn't exist. | |
369 if (std::find_if(busy_buffers_.begin(), busy_buffers_.end(), | |
370 [previous_content_id](const StagingBuffer* buffer) { | |
371 return buffer->content_id == previous_content_id; | |
372 }) == busy_buffers_.end()) { | |
373 break; | |
374 } | |
375 | |
376 ContextProvider* context_provider = | |
377 resource_provider_->output_surface()->worker_context_provider(); | |
378 DCHECK(context_provider); | |
389 | 379 |
390 { | 380 { |
391 TRACE_EVENT0("cc", "WaitingForCopyOperationsToComplete"); | 381 ContextProvider::ScopedContextGL scoped_context(context_provider); |
392 | 382 |
393 // Wait for in-flight copy operations to drop below limit. | 383 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL(); |
394 copy_operation_count_cv_.Wait(); | 384 DCHECK(gl); |
395 } | 385 |
396 } | 386 DCHECK(!busy_buffers_.empty()); |
397 | 387 WaitForQueryResult(gl, busy_buffers_.front()->query_id); |
vmpstr
2015/07/16 22:25:26
Are we waiting for the first one, because they are
reveman
2015/07/22 22:39:15
The idea is that it would provide more predictable
| |
398 // There may be more work available, so wake up another worker thread. | 388 free_buffers_.push_back(busy_buffers_.take_front()); |
399 copy_operation_count_cv_.Signal(); | 389 } |
400 | 390 } |
401 // Copy at most |chunk_size_in_rows|. | 391 } |
402 int rows_to_copy = std::min(chunk_size_in_rows, height - y); | 392 |
403 DCHECK_GT(rows_to_copy, 0); | 393 if (!staging_buffer) { |
404 | 394 for (;;) { |
405 // |raster_resource_write_lock| is passed to the first copy operation as it | 395 // Sort free buffers based on usage. MRU buffer first. |
406 // needs to be released before we can issue a copy. | 396 std::sort(free_buffers_.begin(), free_buffers_.end(), |
vmpstr
2015/07/16 22:25:26
Should we have some sort of a dirty flag to ensure
reveman
2015/07/22 22:39:15
removed this code form latest patch.
| |
407 pending_copy_operations_.push_back(make_scoped_ptr(new CopyOperation( | 397 [](const StagingBuffer* a, const StagingBuffer* b) { |
408 raster_resource_write_lock.Pass(), raster_resource, output_resource, | 398 return a->sequence_id > b->sequence_id; |
409 gfx::Rect(0, y, raster_resource->size().width(), rows_to_copy)))); | 399 }); |
410 y += rows_to_copy; | 400 |
411 | 401 // Find MRU buffer of correct size. |
412 // Acquire a sequence number for this copy operation. | 402 StagingBufferVector::iterator it = std::find_if( |
413 sequence = next_copy_operation_sequence_++; | 403 free_buffers_.begin(), free_buffers_.end(), |
414 | 404 [size](const StagingBuffer* buffer) { return buffer->size == size; }); |
415 // Increment |bytes_scheduled_since_last_flush_| by the amount of memory | 405 if (it != free_buffers_.end()) { |
416 // used for this copy operation. | 406 std::swap(*it, free_buffers_.back()); |
417 bytes_scheduled_since_last_flush_ += rows_to_copy * bytes_per_row; | 407 staging_buffer = make_scoped_ptr(free_buffers_.back()); |
418 | 408 free_buffers_.weak_erase(free_buffers_.end() - 1); |
419 // Post task that will advance last flushed copy operation to |sequence| | 409 break; |
420 // when |bytes_scheduled_since_last_flush_| has reached | 410 } |
421 // |max_bytes_per_copy_operation_|. | 411 |
422 if (bytes_scheduled_since_last_flush_ >= max_bytes_per_copy_operation_) { | 412 ContextProvider* context_provider = |
423 task_runner_->PostTask( | 413 resource_provider_->output_surface()->worker_context_provider(); |
424 FROM_HERE, | 414 DCHECK(context_provider); |
425 base::Bind(&OneCopyTileTaskWorkerPool::AdvanceLastFlushedCopyTo, | 415 |
426 weak_ptr_factory_.GetWeakPtr(), sequence)); | 416 { |
427 bytes_scheduled_since_last_flush_ = 0; | 417 ContextProvider::ScopedContextGL scoped_context(context_provider); |
428 } | 418 |
429 } | 419 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL(); |
430 | 420 DCHECK(gl); |
431 return sequence; | 421 |
422 // First check if the query result for the next busy buffer is already | |
423 // available. | |
424 if (!busy_buffers_.empty() && resource_provider_->use_sync_query()) { | |
425 GLuint complete = 1; | |
426 gl->GetQueryObjectuivEXT(busy_buffers_.front()->query_id, | |
427 GL_QUERY_RESULT_AVAILABLE_EXT, &complete); | |
428 if (complete) { | |
429 free_buffers_.push_back(busy_buffers_.take_front()); | |
430 continue; | |
431 } | |
432 } | |
433 | |
434 // If we haven't reached the staging buffer limit then prefer to | |
435 // allocate a new buffer instead of releasing a free buffer or waiting | |
436 // for a busy buffer to become available. | |
437 if ((free_buffers_.size() + busy_buffers_.size()) < kMaxStagingBuffers) | |
438 break; | |
439 | |
440 // Release LRU buffer instead of waiting for a busy buffer to become | |
441 // available. | |
442 if (!free_buffers_.empty()) { | |
443 free_buffers_.back()->DestroyGLResources(gl); | |
444 free_buffers_.pop_back(); | |
445 // Note: we 'continue' here in case the number of staging buffers is | |
446 // above the limit. | |
447 continue; | |
448 } | |
449 | |
450 DCHECK(!busy_buffers_.empty()); | |
451 | |
452 // Use CHROMIUM_sync_query if available, otherwise fallback to glFinish. | |
453 if (resource_provider_->use_sync_query()) { | |
454 WaitForQueryResult(gl, busy_buffers_.front()->query_id); | |
455 free_buffers_.push_back(busy_buffers_.take_front()); | |
456 } else { | |
457 gl->Finish(); | |
458 while (!busy_buffers_.empty()) | |
459 free_buffers_.push_back(busy_buffers_.take_front()); | |
460 } | |
461 } | |
462 } | |
463 } | |
464 | |
465 { | |
466 base::AutoUnlock unlock(lock_); | |
467 | |
468 // Allocate new staging buffer if necessary. | |
469 if (!staging_buffer) { | |
470 staging_buffer = make_scoped_ptr(new StagingBuffer( | |
471 resource_provider_->gpu_memory_buffer_manager() | |
472 ->AllocateGpuMemoryBuffer( | |
473 size, ToGpuMemoryBufferFormat( | |
474 resource_provider_->best_texture_format()), | |
475 use_persistent_gpu_memory_buffers_ | |
476 ? gfx::GpuMemoryBuffer::PERSISTENT_MAP | |
477 : gfx::GpuMemoryBuffer::MAP), | |
478 size)); | |
479 } | |
480 | |
481 if (use_persistent_gpu_memory_buffers_ && previous_content_id) { | |
482 // Reduce playback rect to dirty region if the content id of the staging | |
483 // buffer matches the prevous content id. | |
484 if (previous_content_id == staging_buffer->content_id) | |
485 playback_rect.Intersect(raster_dirty_rect); | |
486 } | |
487 | |
488 if (staging_buffer->gpu_memory_buffer) { | |
489 void* data = NULL; | |
490 bool rv = staging_buffer->gpu_memory_buffer->Map(&data); | |
491 DCHECK(rv); | |
492 int stride; | |
493 staging_buffer->gpu_memory_buffer->GetStride(&stride); | |
494 // TileTaskWorkerPool::PlaybackToMemory only supports unsigned strides. | |
495 DCHECK_GE(stride, 0); | |
496 | |
497 DCHECK(!playback_rect.IsEmpty()) | |
498 << "Why are we rastering a tile that's not dirty?"; | |
499 TileTaskWorkerPool::PlaybackToMemory( | |
500 data, resource_provider_->best_texture_format(), staging_buffer->size, | |
501 static_cast<size_t>(stride), raster_source, raster_full_rect, | |
502 playback_rect, scale); | |
503 staging_buffer->gpu_memory_buffer->Unmap(); | |
504 } | |
505 } | |
506 | |
507 ContextProvider* context_provider = | |
508 resource_provider_->output_surface()->worker_context_provider(); | |
509 | |
510 { | |
511 ContextProvider::ScopedContextGL scoped_context(context_provider); | |
512 | |
513 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL(); | |
514 DCHECK(gl); | |
515 | |
516 if (!staging_buffer->texture_id) { | |
517 gl->GenTextures(1, &staging_buffer->texture_id); | |
518 gl->BindTexture(image_target_, staging_buffer->texture_id); | |
519 gl->TexParameteri(image_target_, GL_TEXTURE_MIN_FILTER, GL_NEAREST); | |
520 gl->TexParameteri(image_target_, GL_TEXTURE_MAG_FILTER, GL_NEAREST); | |
521 gl->TexParameteri(image_target_, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); | |
522 gl->TexParameteri(image_target_, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); | |
523 if (staging_buffer->gpu_memory_buffer) { | |
524 staging_buffer->image_id = gl->CreateImageCHROMIUM( | |
525 staging_buffer->gpu_memory_buffer->AsClientBuffer(), | |
526 staging_buffer->size.width(), staging_buffer->size.height(), | |
527 GLInternalFormat(resource_provider_->best_texture_format())); | |
528 gl->BindTexImage2DCHROMIUM(image_target_, staging_buffer->image_id); | |
529 } | |
530 } else { | |
531 gl->BindTexture(image_target_, staging_buffer->texture_id); | |
532 if (staging_buffer->image_id) { | |
533 gl->ReleaseTexImage2DCHROMIUM(image_target_, staging_buffer->image_id); | |
534 gl->BindTexImage2DCHROMIUM(image_target_, staging_buffer->image_id); | |
535 } | |
536 } | |
537 | |
538 if (resource_provider_->use_sync_query()) { | |
539 if (!staging_buffer->query_id) | |
540 gl->GenQueriesEXT(1, &staging_buffer->query_id); | |
541 | |
542 #if defined(OS_CHROMEOS) | |
543 // TODO(reveman): This avoids a performance problem on some ChromeOS | |
544 // devices. This needs to be removed to support native GpuMemoryBuffer | |
545 // implementations. crbug.com/436314 | |
546 gl->BeginQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM, staging_buffer->query_id); | |
547 #else | |
548 gl->BeginQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM, | |
549 staging_buffer->query_id); | |
550 #endif | |
551 } | |
552 | |
553 int bytes_per_row = | |
554 (BitsPerPixel(resource_provider_->best_texture_format()) * | |
555 size.width()) / | |
556 8; | |
557 int chunk_size_in_rows = | |
558 std::max(1, max_bytes_per_copy_operation_ / bytes_per_row); | |
559 // Align chunk size to 4. Required to support compressed texture formats. | |
560 chunk_size_in_rows = MathUtil::RoundUp(chunk_size_in_rows, 4); | |
561 int y = 0; | |
562 int height = size.height(); | |
563 while (y < height) { | |
564 // Copy at most |chunk_size_in_rows|. | |
565 int rows_to_copy = std::min(chunk_size_in_rows, height - y); | |
566 DCHECK_GT(rows_to_copy, 0); | |
567 | |
568 gl->CopySubTextureCHROMIUM(GL_TEXTURE_2D, staging_buffer->texture_id, | |
569 resource_lock->texture_id(), 0, y, 0, y, | |
570 size.width(), rows_to_copy, false, false, | |
571 false); | |
572 y += rows_to_copy; | |
573 | |
574 // Increment |bytes_scheduled_since_last_flush_| by the amount of memory | |
575 // used for this copy operation. | |
576 bytes_scheduled_since_last_flush_ += rows_to_copy * bytes_per_row; | |
577 | |
578 if (bytes_scheduled_since_last_flush_ >= max_bytes_per_copy_operation_) { | |
579 gl->ShallowFlushCHROMIUM(); | |
580 bytes_scheduled_since_last_flush_ = 0; | |
581 } | |
582 } | |
583 | |
584 if (resource_provider_->use_sync_query()) { | |
585 #if defined(OS_CHROMEOS) | |
586 gl->EndQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM); | |
587 #else | |
588 gl->EndQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM); | |
589 #endif | |
590 } | |
591 | |
592 // Barrier to sync worker context output to cc context. | |
593 gl->OrderingBarrierCHROMIUM(); | |
594 } | |
595 | |
596 staging_buffer->content_id = new_content_id; | |
597 staging_buffer->sequence_id = next_sequence_id_++; | |
598 | |
599 busy_buffers_.push_back(staging_buffer.Pass()); | |
432 } | 600 } |
433 | 601 |
434 void OneCopyTileTaskWorkerPool::AdvanceLastIssuedCopyTo( | |
435 CopySequenceNumber sequence) { | |
436 if (last_issued_copy_operation_ >= sequence) | |
437 return; | |
438 | |
439 IssueCopyOperations(sequence - last_issued_copy_operation_); | |
440 last_issued_copy_operation_ = sequence; | |
441 } | |
442 | |
443 void OneCopyTileTaskWorkerPool::AdvanceLastFlushedCopyTo( | |
444 CopySequenceNumber sequence) { | |
445 if (last_flushed_copy_operation_ >= sequence) | |
446 return; | |
447 | |
448 AdvanceLastIssuedCopyTo(sequence); | |
449 | |
450 // Flush all issued copy operations. | |
451 context_provider_->ContextGL()->ShallowFlushCHROMIUM(); | |
452 last_flushed_copy_operation_ = last_issued_copy_operation_; | |
453 } | |
454 | |
455 void OneCopyTileTaskWorkerPool::OnTaskSetFinished(TaskSet task_set) { | 602 void OneCopyTileTaskWorkerPool::OnTaskSetFinished(TaskSet task_set) { |
456 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::OnTaskSetFinished", "task_set", | 603 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::OnTaskSetFinished", "task_set", |
457 task_set); | 604 task_set); |
458 | 605 |
459 DCHECK(tasks_pending_[task_set]); | 606 DCHECK(tasks_pending_[task_set]); |
460 tasks_pending_[task_set] = false; | 607 tasks_pending_[task_set] = false; |
461 if (tasks_pending_.any()) { | 608 if (tasks_pending_.any()) { |
462 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", | 609 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", |
463 "state", StateAsValue()); | 610 "state", StateAsValue()); |
464 } else { | 611 } else { |
465 TRACE_EVENT_ASYNC_END0("cc", "ScheduledTasks", this); | 612 TRACE_EVENT_ASYNC_END0("cc", "ScheduledTasks", this); |
466 } | 613 } |
467 client_->DidFinishRunningTileTasks(task_set); | 614 client_->DidFinishRunningTileTasks(task_set); |
468 } | 615 } |
469 | 616 |
470 void OneCopyTileTaskWorkerPool::IssueCopyOperations(int64 count) { | |
471 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::IssueCopyOperations", "count", | |
472 count); | |
473 | |
474 CopyOperation::Deque copy_operations; | |
475 | |
476 { | |
477 base::AutoLock lock(lock_); | |
478 | |
479 for (int64 i = 0; i < count; ++i) { | |
480 DCHECK(!pending_copy_operations_.empty()); | |
481 copy_operations.push_back(pending_copy_operations_.take_front()); | |
482 } | |
483 | |
484 // Increment |issued_copy_operation_count_| to reflect the transition of | |
485 // copy operations from "pending" to "issued" state. | |
486 issued_copy_operation_count_ += copy_operations.size(); | |
487 } | |
488 | |
489 while (!copy_operations.empty()) { | |
490 scoped_ptr<CopyOperation> copy_operation = copy_operations.take_front(); | |
491 | |
492 // Remove the write lock. | |
493 copy_operation->src_write_lock.reset(); | |
494 | |
495 // Copy contents of source resource to destination resource. | |
496 resource_provider_->CopyResource(copy_operation->src->id(), | |
497 copy_operation->dst->id(), | |
498 copy_operation->rect); | |
499 } | |
500 } | |
501 | |
502 void OneCopyTileTaskWorkerPool:: | |
503 ScheduleCheckForCompletedCopyOperationsWithLockAcquired( | |
504 bool wait_if_needed) { | |
505 lock_.AssertAcquired(); | |
506 | |
507 if (check_for_completed_copy_operations_pending_) | |
508 return; | |
509 | |
510 base::TimeTicks now = base::TimeTicks::Now(); | |
511 | |
512 // Schedule a check for completed copy operations as soon as possible but | |
513 // don't allow two consecutive checks to be scheduled to run less than the | |
514 // tick rate apart. | |
515 base::TimeTicks next_check_for_completed_copy_operations_time = | |
516 std::max(last_check_for_completed_copy_operations_time_ + | |
517 base::TimeDelta::FromMilliseconds( | |
518 kCheckForCompletedCopyOperationsTickRateMs), | |
519 now); | |
520 | |
521 task_runner_->PostDelayedTask( | |
522 FROM_HERE, | |
523 base::Bind(&OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations, | |
524 weak_ptr_factory_.GetWeakPtr(), wait_if_needed), | |
525 next_check_for_completed_copy_operations_time - now); | |
526 | |
527 last_check_for_completed_copy_operations_time_ = | |
528 next_check_for_completed_copy_operations_time; | |
529 check_for_completed_copy_operations_pending_ = true; | |
530 } | |
531 | |
532 void OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations( | |
533 bool wait_if_needed) { | |
534 TRACE_EVENT1("cc", | |
535 "OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations", | |
536 "wait_if_needed", wait_if_needed); | |
537 | |
538 resource_pool_->CheckBusyResources(wait_if_needed); | |
539 | |
540 { | |
541 base::AutoLock lock(lock_); | |
542 | |
543 DCHECK(check_for_completed_copy_operations_pending_); | |
544 check_for_completed_copy_operations_pending_ = false; | |
545 | |
546 // The number of busy resources in the pool reflects the number of issued | |
547 // copy operations that have not yet completed. | |
548 issued_copy_operation_count_ = resource_pool_->busy_resource_count(); | |
549 | |
550 // There may be work blocked on too many in-flight copy operations, so wake | |
551 // up a worker thread. | |
552 copy_operation_count_cv_.Signal(); | |
553 } | |
554 } | |
555 | |
556 scoped_refptr<base::trace_event::ConvertableToTraceFormat> | 617 scoped_refptr<base::trace_event::ConvertableToTraceFormat> |
557 OneCopyTileTaskWorkerPool::StateAsValue() const { | 618 OneCopyTileTaskWorkerPool::StateAsValue() const { |
558 scoped_refptr<base::trace_event::TracedValue> state = | 619 scoped_refptr<base::trace_event::TracedValue> state = |
559 new base::trace_event::TracedValue(); | 620 new base::trace_event::TracedValue(); |
560 | 621 |
561 state->BeginArray("tasks_pending"); | 622 state->BeginArray("tasks_pending"); |
562 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) | 623 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) |
563 state->AppendBoolean(tasks_pending_[task_set]); | 624 state->AppendBoolean(tasks_pending_[task_set]); |
564 state->EndArray(); | 625 state->EndArray(); |
565 state->BeginDictionary("staging_state"); | 626 state->BeginDictionary("staging_state"); |
566 StagingStateAsValueInto(state.get()); | 627 StagingStateAsValueInto(state.get()); |
567 state->EndDictionary(); | 628 state->EndDictionary(); |
568 | 629 |
569 return state; | 630 return state; |
570 } | 631 } |
571 | 632 |
572 void OneCopyTileTaskWorkerPool::StagingStateAsValueInto( | 633 void OneCopyTileTaskWorkerPool::StagingStateAsValueInto( |
573 base::trace_event::TracedValue* staging_state) const { | 634 base::trace_event::TracedValue* staging_state) const { |
635 base::AutoLock lock(lock_); | |
636 | |
574 staging_state->SetInteger( | 637 staging_state->SetInteger( |
575 "staging_resource_count", | 638 "staging_resource_count", |
576 static_cast<int>(resource_pool_->total_resource_count())); | 639 static_cast<int>(free_buffers_.size() + busy_buffers_.size())); |
577 staging_state->SetInteger( | 640 staging_state->SetInteger("pending_copy_count", |
578 "bytes_used_for_staging_resources", | 641 static_cast<int>(busy_buffers_.size())); |
579 static_cast<int>(resource_pool_->total_memory_usage_bytes())); | |
580 staging_state->SetInteger( | |
581 "pending_copy_count", | |
582 static_cast<int>(resource_pool_->total_resource_count() - | |
583 resource_pool_->acquired_resource_count())); | |
584 staging_state->SetInteger( | |
585 "bytes_pending_copy", | |
586 static_cast<int>(resource_pool_->total_memory_usage_bytes() - | |
587 resource_pool_->acquired_memory_usage_bytes())); | |
588 } | 642 } |
589 | 643 |
590 } // namespace cc | 644 } // namespace cc |
OLD | NEW |