OLD | NEW |
1 // Copyright 2014 The Chromium Authors. All rights reserved. | 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "cc/raster/one_copy_tile_task_worker_pool.h" | 5 #include "cc/raster/one_copy_tile_task_worker_pool.h" |
6 | 6 |
7 #include <algorithm> | 7 #include <algorithm> |
8 #include <limits> | 8 #include <limits> |
9 | 9 |
10 #include "base/strings/stringprintf.h" | 10 #include "base/strings/stringprintf.h" |
| 11 #include "base/thread_task_runner_handle.h" |
| 12 #include "base/trace_event/memory_dump_manager.h" |
11 #include "base/trace_event/trace_event.h" | 13 #include "base/trace_event/trace_event.h" |
12 #include "base/trace_event/trace_event_argument.h" | 14 #include "base/trace_event/trace_event_argument.h" |
13 #include "cc/base/math_util.h" | 15 #include "cc/base/math_util.h" |
14 #include "cc/debug/traced_value.h" | 16 #include "cc/debug/traced_value.h" |
15 #include "cc/raster/raster_buffer.h" | 17 #include "cc/raster/raster_buffer.h" |
16 #include "cc/resources/platform_color.h" | 18 #include "cc/resources/platform_color.h" |
17 #include "cc/resources/resource_pool.h" | |
18 #include "cc/resources/scoped_resource.h" | 19 #include "cc/resources/scoped_resource.h" |
| 20 #include "gpu/GLES2/gl2extchromium.h" |
19 #include "gpu/command_buffer/client/gles2_interface.h" | 21 #include "gpu/command_buffer/client/gles2_interface.h" |
20 #include "ui/gfx/gpu_memory_buffer.h" | 22 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h" |
21 | 23 |
22 namespace cc { | 24 namespace cc { |
23 namespace { | 25 namespace { |
24 | 26 |
25 class RasterBufferImpl : public RasterBuffer { | 27 class RasterBufferImpl : public RasterBuffer { |
26 public: | 28 public: |
27 RasterBufferImpl(OneCopyTileTaskWorkerPool* worker_pool, | 29 RasterBufferImpl(OneCopyTileTaskWorkerPool* worker_pool, |
28 ResourceProvider* resource_provider, | 30 ResourceProvider* resource_provider, |
29 ResourcePool* resource_pool, | |
30 ResourceFormat resource_format, | 31 ResourceFormat resource_format, |
31 const Resource* output_resource, | 32 const Resource* resource, |
32 uint64_t previous_content_id) | 33 uint64_t previous_content_id) |
33 : worker_pool_(worker_pool), | 34 : worker_pool_(worker_pool), |
34 resource_provider_(resource_provider), | 35 resource_(resource), |
35 resource_pool_(resource_pool), | 36 lock_(resource_provider, resource->id()), |
36 output_resource_(output_resource), | 37 previous_content_id_(previous_content_id) {} |
37 raster_content_id_(0), | |
38 sequence_(0) { | |
39 if (worker_pool->have_persistent_gpu_memory_buffers() && | |
40 previous_content_id) { | |
41 raster_resource_ = | |
42 resource_pool->TryAcquireResourceWithContentId(previous_content_id); | |
43 } | |
44 if (raster_resource_) { | |
45 raster_content_id_ = previous_content_id; | |
46 DCHECK_EQ(resource_format, raster_resource_->format()); | |
47 DCHECK_EQ(output_resource->size().ToString(), | |
48 raster_resource_->size().ToString()); | |
49 } else { | |
50 raster_resource_ = resource_pool->AcquireResource(output_resource->size(), | |
51 resource_format); | |
52 } | |
53 | 38 |
54 lock_.reset(new ResourceProvider::ScopedWriteLockGpuMemoryBuffer( | 39 ~RasterBufferImpl() override {} |
55 resource_provider_, raster_resource_->id())); | |
56 } | |
57 | |
58 ~RasterBufferImpl() override { | |
59 // Release write lock in case a copy was never scheduled. | |
60 lock_.reset(); | |
61 | |
62 // Make sure any scheduled copy operations are issued before we release the | |
63 // raster resource. | |
64 if (sequence_) | |
65 worker_pool_->AdvanceLastIssuedCopyTo(sequence_); | |
66 | |
67 // Return resources to pool so they can be used by another RasterBuffer | |
68 // instance. | |
69 resource_pool_->ReleaseResource(raster_resource_.Pass(), | |
70 raster_content_id_); | |
71 } | |
72 | 40 |
73 // Overridden from RasterBuffer: | 41 // Overridden from RasterBuffer: |
74 void Playback(const RasterSource* raster_source, | 42 void Playback(const RasterSource* raster_source, |
75 const gfx::Rect& raster_full_rect, | 43 const gfx::Rect& raster_full_rect, |
76 const gfx::Rect& raster_dirty_rect, | 44 const gfx::Rect& raster_dirty_rect, |
77 uint64_t new_content_id, | 45 uint64_t new_content_id, |
78 float scale) override { | 46 float scale) override { |
79 // If there's a raster_content_id_, we are reusing a resource with that | 47 worker_pool_->PlaybackAndCopyOnWorkerThread( |
80 // content id. | 48 resource_, &lock_, raster_source, raster_full_rect, raster_dirty_rect, |
81 bool reusing_raster_resource = raster_content_id_ != 0; | 49 scale, previous_content_id_, new_content_id); |
82 sequence_ = worker_pool_->PlaybackAndScheduleCopyOnWorkerThread( | |
83 reusing_raster_resource, lock_.Pass(), raster_resource_.get(), | |
84 output_resource_, raster_source, raster_full_rect, raster_dirty_rect, | |
85 scale); | |
86 // Store the content id of the resource to return to the pool. | |
87 raster_content_id_ = new_content_id; | |
88 } | 50 } |
89 | 51 |
90 private: | 52 private: |
91 OneCopyTileTaskWorkerPool* worker_pool_; | 53 OneCopyTileTaskWorkerPool* worker_pool_; |
92 ResourceProvider* resource_provider_; | 54 const Resource* resource_; |
93 ResourcePool* resource_pool_; | 55 ResourceProvider::ScopedWriteLockGL lock_; |
94 const Resource* output_resource_; | 56 uint64_t previous_content_id_; |
95 uint64_t raster_content_id_; | |
96 scoped_ptr<ScopedResource> raster_resource_; | |
97 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> lock_; | |
98 CopySequenceNumber sequence_; | |
99 | 57 |
100 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl); | 58 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl); |
101 }; | 59 }; |
102 | 60 |
103 // Number of in-flight copy operations to allow. | 61 // Delay between checking for query result to be available. |
104 const int kMaxCopyOperations = 32; | 62 const int kCheckForQueryResultAvailableTickRateMs = 1; |
105 | 63 |
106 // Delay been checking for copy operations to complete. | 64 // Number of attempts to allow before we perform a check that will wait for |
107 const int kCheckForCompletedCopyOperationsTickRateMs = 1; | 65 // query to complete. |
108 | 66 const int kMaxCheckForQueryResultAvailableAttempts = 256; |
109 // Number of failed attempts to allow before we perform a check that will | |
110 // wait for copy operations to complete if needed. | |
111 const int kFailedAttemptsBeforeWaitIfNeeded = 256; | |
112 | 67 |
113 // 4MiB is the size of 4 512x512 tiles, which has proven to be a good | 68 // 4MiB is the size of 4 512x512 tiles, which has proven to be a good |
114 // default batch size for copy operations. | 69 // default batch size for copy operations. |
115 const int kMaxBytesPerCopyOperation = 1024 * 1024 * 4; | 70 const int kMaxBytesPerCopyOperation = 1024 * 1024 * 4; |
116 | 71 |
| 72 // Delay before a staging buffer might be released. |
| 73 const int kStagingBufferExpirationDelayMs = 1000; |
| 74 |
| 75 bool CheckForQueryResult(gpu::gles2::GLES2Interface* gl, unsigned query_id) { |
| 76 GLuint complete = 1; |
| 77 gl->GetQueryObjectuivEXT(query_id, GL_QUERY_RESULT_AVAILABLE_EXT, &complete); |
| 78 return complete; |
| 79 } |
| 80 |
| 81 void WaitForQueryResult(gpu::gles2::GLES2Interface* gl, unsigned query_id) { |
| 82 TRACE_EVENT0("cc", "WaitForQueryResult"); |
| 83 |
| 84 int attempts_left = kMaxCheckForQueryResultAvailableAttempts; |
| 85 while (attempts_left--) { |
| 86 if (CheckForQueryResult(gl, query_id)) |
| 87 break; |
| 88 |
| 89 base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds( |
| 90 kCheckForQueryResultAvailableTickRateMs)); |
| 91 } |
| 92 |
| 93 unsigned result = 0; |
| 94 gl->GetQueryObjectuivEXT(query_id, GL_QUERY_RESULT_EXT, &result); |
| 95 } |
| 96 |
117 } // namespace | 97 } // namespace |
118 | 98 |
119 OneCopyTileTaskWorkerPool::CopyOperation::CopyOperation( | 99 OneCopyTileTaskWorkerPool::StagingBuffer::StagingBuffer(const gfx::Size& size) |
120 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> src_write_lock, | 100 : size(size), texture_id(0), image_id(0), query_id(0), content_id(0) {} |
121 const Resource* src, | 101 |
122 const Resource* dst, | 102 OneCopyTileTaskWorkerPool::StagingBuffer::~StagingBuffer() { |
123 const gfx::Rect& rect) | 103 DCHECK_EQ(texture_id, 0u); |
124 : src_write_lock(src_write_lock.Pass()), src(src), dst(dst), rect(rect) { | 104 DCHECK_EQ(image_id, 0u); |
| 105 DCHECK_EQ(query_id, 0u); |
125 } | 106 } |
126 | 107 |
127 OneCopyTileTaskWorkerPool::CopyOperation::~CopyOperation() { | 108 void OneCopyTileTaskWorkerPool::StagingBuffer::DestroyGLResources( |
| 109 gpu::gles2::GLES2Interface* gl) { |
| 110 if (query_id) { |
| 111 gl->DeleteQueriesEXT(1, &query_id); |
| 112 query_id = 0; |
| 113 } |
| 114 if (image_id) { |
| 115 gl->DestroyImageCHROMIUM(image_id); |
| 116 image_id = 0; |
| 117 } |
| 118 if (texture_id) { |
| 119 gl->DeleteTextures(1, &texture_id); |
| 120 texture_id = 0; |
| 121 } |
| 122 } |
| 123 |
| 124 void OneCopyTileTaskWorkerPool::StagingBuffer::OnMemoryDump( |
| 125 base::trace_event::ProcessMemoryDump* pmd, |
| 126 ResourceFormat format) const { |
| 127 if (!gpu_memory_buffer) |
| 128 return; |
| 129 |
| 130 gfx::GpuMemoryBufferId buffer_id = gpu_memory_buffer->GetId(); |
| 131 std::string buffer_dump_name = |
| 132 base::StringPrintf("gpumemorybuffer/buffer_%d", buffer_id); |
| 133 base::trace_event::MemoryAllocatorDump* buffer_dump = |
| 134 pmd->CreateAllocatorDump(buffer_dump_name); |
| 135 |
| 136 size_t buffer_size_in_bytes = |
| 137 Resource::UncheckedMemorySizeBytes(size, format); |
| 138 buffer_dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize, |
| 139 base::trace_event::MemoryAllocatorDump::kUnitsBytes, |
| 140 static_cast<uint64_t>(buffer_size_in_bytes)); |
| 141 |
| 142 // Emit an ownership edge towards a global allocator dump node. |
| 143 const uint64 tracing_process_id = |
| 144 base::trace_event::MemoryDumpManager::GetInstance()->tracing_process_id(); |
| 145 base::trace_event::MemoryAllocatorDumpGuid shared_buffer_guid = |
| 146 gfx::GetGpuMemoryBufferGUIDForTracing(tracing_process_id, buffer_id); |
| 147 pmd->CreateSharedGlobalAllocatorDump(shared_buffer_guid); |
| 148 |
| 149 // By creating an edge with a higher |importance| (w.r.t. browser-side dumps) |
| 150 // the tracing UI will account the effective size of the buffer to the child. |
| 151 const int kImportance = 2; |
| 152 pmd->AddOwnershipEdge(buffer_dump->guid(), shared_buffer_guid, kImportance); |
128 } | 153 } |
129 | 154 |
130 // static | 155 // static |
131 scoped_ptr<TileTaskWorkerPool> OneCopyTileTaskWorkerPool::Create( | 156 scoped_ptr<TileTaskWorkerPool> OneCopyTileTaskWorkerPool::Create( |
132 base::SequencedTaskRunner* task_runner, | 157 base::SequencedTaskRunner* task_runner, |
133 TaskGraphRunner* task_graph_runner, | 158 TaskGraphRunner* task_graph_runner, |
134 ContextProvider* context_provider, | 159 ContextProvider* context_provider, |
135 ResourceProvider* resource_provider, | 160 ResourceProvider* resource_provider, |
136 ResourcePool* resource_pool, | |
137 int max_copy_texture_chromium_size, | 161 int max_copy_texture_chromium_size, |
138 bool have_persistent_gpu_memory_buffers) { | 162 bool use_persistent_gpu_memory_buffers, |
| 163 unsigned image_target, |
| 164 int max_staging_buffers) { |
139 return make_scoped_ptr<TileTaskWorkerPool>(new OneCopyTileTaskWorkerPool( | 165 return make_scoped_ptr<TileTaskWorkerPool>(new OneCopyTileTaskWorkerPool( |
140 task_runner, task_graph_runner, context_provider, resource_provider, | 166 task_runner, task_graph_runner, resource_provider, |
141 resource_pool, max_copy_texture_chromium_size, | 167 max_copy_texture_chromium_size, use_persistent_gpu_memory_buffers, |
142 have_persistent_gpu_memory_buffers)); | 168 image_target, max_staging_buffers)); |
143 } | 169 } |
144 | 170 |
145 OneCopyTileTaskWorkerPool::OneCopyTileTaskWorkerPool( | 171 OneCopyTileTaskWorkerPool::OneCopyTileTaskWorkerPool( |
146 base::SequencedTaskRunner* task_runner, | 172 base::SequencedTaskRunner* task_runner, |
147 TaskGraphRunner* task_graph_runner, | 173 TaskGraphRunner* task_graph_runner, |
148 ContextProvider* context_provider, | |
149 ResourceProvider* resource_provider, | 174 ResourceProvider* resource_provider, |
150 ResourcePool* resource_pool, | |
151 int max_copy_texture_chromium_size, | 175 int max_copy_texture_chromium_size, |
152 bool have_persistent_gpu_memory_buffers) | 176 bool use_persistent_gpu_memory_buffers, |
| 177 unsigned image_target, |
| 178 int max_staging_buffers) |
153 : task_runner_(task_runner), | 179 : task_runner_(task_runner), |
154 task_graph_runner_(task_graph_runner), | 180 task_graph_runner_(task_graph_runner), |
155 namespace_token_(task_graph_runner->GetNamespaceToken()), | 181 namespace_token_(task_graph_runner->GetNamespaceToken()), |
156 context_provider_(context_provider), | |
157 resource_provider_(resource_provider), | 182 resource_provider_(resource_provider), |
158 resource_pool_(resource_pool), | |
159 max_bytes_per_copy_operation_( | 183 max_bytes_per_copy_operation_( |
160 max_copy_texture_chromium_size | 184 max_copy_texture_chromium_size |
161 ? std::min(kMaxBytesPerCopyOperation, | 185 ? std::min(kMaxBytesPerCopyOperation, |
162 max_copy_texture_chromium_size) | 186 max_copy_texture_chromium_size) |
163 : kMaxBytesPerCopyOperation), | 187 : kMaxBytesPerCopyOperation), |
164 have_persistent_gpu_memory_buffers_(have_persistent_gpu_memory_buffers), | 188 use_persistent_gpu_memory_buffers_(use_persistent_gpu_memory_buffers), |
165 last_issued_copy_operation_(0), | 189 image_target_(image_target), |
166 last_flushed_copy_operation_(0), | |
167 lock_(), | |
168 copy_operation_count_cv_(&lock_), | |
169 bytes_scheduled_since_last_flush_(0), | 190 bytes_scheduled_since_last_flush_(0), |
170 issued_copy_operation_count_(0), | 191 max_staging_buffers_(max_staging_buffers), |
171 next_copy_operation_sequence_(1), | 192 staging_buffer_expiration_delay_( |
172 check_for_completed_copy_operations_pending_(false), | 193 base::TimeDelta::FromMilliseconds(kStagingBufferExpirationDelayMs)), |
173 shutdown_(false), | 194 reduce_memory_usage_pending_(false), |
174 weak_ptr_factory_(this), | 195 weak_ptr_factory_(this), |
175 task_set_finished_weak_ptr_factory_(this) { | 196 task_set_finished_weak_ptr_factory_(this) { |
176 DCHECK(context_provider_); | 197 base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider( |
| 198 this, base::ThreadTaskRunnerHandle::Get()); |
| 199 reduce_memory_usage_callback_ = |
| 200 base::Bind(&OneCopyTileTaskWorkerPool::ReduceMemoryUsage, |
| 201 weak_ptr_factory_.GetWeakPtr()); |
177 } | 202 } |
178 | 203 |
179 OneCopyTileTaskWorkerPool::~OneCopyTileTaskWorkerPool() { | 204 OneCopyTileTaskWorkerPool::~OneCopyTileTaskWorkerPool() { |
180 DCHECK_EQ(pending_copy_operations_.size(), 0u); | 205 base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider( |
| 206 this); |
181 } | 207 } |
182 | 208 |
183 TileTaskRunner* OneCopyTileTaskWorkerPool::AsTileTaskRunner() { | 209 TileTaskRunner* OneCopyTileTaskWorkerPool::AsTileTaskRunner() { |
184 return this; | 210 return this; |
185 } | 211 } |
186 | 212 |
187 void OneCopyTileTaskWorkerPool::SetClient(TileTaskRunnerClient* client) { | 213 void OneCopyTileTaskWorkerPool::SetClient(TileTaskRunnerClient* client) { |
188 client_ = client; | 214 client_ = client; |
189 } | 215 } |
190 | 216 |
191 void OneCopyTileTaskWorkerPool::Shutdown() { | 217 void OneCopyTileTaskWorkerPool::Shutdown() { |
192 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::Shutdown"); | 218 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::Shutdown"); |
193 | 219 |
194 { | |
195 base::AutoLock lock(lock_); | |
196 | |
197 shutdown_ = true; | |
198 copy_operation_count_cv_.Signal(); | |
199 } | |
200 | |
201 TaskGraph empty; | 220 TaskGraph empty; |
202 task_graph_runner_->ScheduleTasks(namespace_token_, &empty); | 221 task_graph_runner_->ScheduleTasks(namespace_token_, &empty); |
203 task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_); | 222 task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_); |
| 223 |
| 224 base::AutoLock lock(lock_); |
| 225 |
| 226 if (buffers_.empty()) |
| 227 return; |
| 228 |
| 229 ReleaseBuffersNotUsedSince(base::TimeTicks() + base::TimeDelta::Max()); |
204 } | 230 } |
205 | 231 |
206 void OneCopyTileTaskWorkerPool::ScheduleTasks(TileTaskQueue* queue) { | 232 void OneCopyTileTaskWorkerPool::ScheduleTasks(TileTaskQueue* queue) { |
207 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::ScheduleTasks"); | 233 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::ScheduleTasks"); |
208 | 234 |
209 #if DCHECK_IS_ON() | |
210 { | |
211 base::AutoLock lock(lock_); | |
212 DCHECK(!shutdown_); | |
213 } | |
214 #endif | |
215 | |
216 if (tasks_pending_.none()) | 235 if (tasks_pending_.none()) |
217 TRACE_EVENT_ASYNC_BEGIN0("cc", "ScheduledTasks", this); | 236 TRACE_EVENT_ASYNC_BEGIN0("cc", "ScheduledTasks", this); |
218 | 237 |
219 // Mark all task sets as pending. | 238 // Mark all task sets as pending. |
220 tasks_pending_.set(); | 239 tasks_pending_.set(); |
221 | 240 |
222 size_t priority = kTileTaskPriorityBase; | 241 size_t priority = kTileTaskPriorityBase; |
223 | 242 |
224 graph_.Reset(); | 243 graph_.Reset(); |
225 | 244 |
226 // Cancel existing OnTaskSetFinished callbacks. | 245 // Cancel existing OnTaskSetFinished callbacks. |
227 task_set_finished_weak_ptr_factory_.InvalidateWeakPtrs(); | 246 task_set_finished_weak_ptr_factory_.InvalidateWeakPtrs(); |
228 | 247 |
229 scoped_refptr<TileTask> new_task_set_finished_tasks[kNumberOfTaskSets]; | 248 scoped_refptr<TileTask> new_task_set_finished_tasks[kNumberOfTaskSets]; |
230 | 249 |
231 size_t task_count[kNumberOfTaskSets] = {0}; | 250 size_t task_count[kNumberOfTaskSets] = {0}; |
232 | 251 |
233 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { | 252 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { |
234 new_task_set_finished_tasks[task_set] = CreateTaskSetFinishedTask( | 253 new_task_set_finished_tasks[task_set] = CreateTaskSetFinishedTask( |
235 task_runner_.get(), | 254 task_runner_.get(), |
236 base::Bind(&OneCopyTileTaskWorkerPool::OnTaskSetFinished, | 255 base::Bind(&OneCopyTileTaskWorkerPool::OnTaskSetFinished, |
237 task_set_finished_weak_ptr_factory_.GetWeakPtr(), task_set)); | 256 task_set_finished_weak_ptr_factory_.GetWeakPtr(), task_set)); |
238 } | 257 } |
239 | 258 |
240 resource_pool_->CheckBusyResources(false); | |
241 | |
242 for (TileTaskQueue::Item::Vector::const_iterator it = queue->items.begin(); | 259 for (TileTaskQueue::Item::Vector::const_iterator it = queue->items.begin(); |
243 it != queue->items.end(); ++it) { | 260 it != queue->items.end(); ++it) { |
244 const TileTaskQueue::Item& item = *it; | 261 const TileTaskQueue::Item& item = *it; |
245 RasterTask* task = item.task; | 262 RasterTask* task = item.task; |
246 DCHECK(!task->HasCompleted()); | 263 DCHECK(!task->HasCompleted()); |
247 | 264 |
248 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { | 265 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { |
249 if (!item.task_sets[task_set]) | 266 if (!item.task_sets[task_set]) |
250 continue; | 267 continue; |
251 | 268 |
252 ++task_count[task_set]; | 269 ++task_count[task_set]; |
253 | 270 |
254 graph_.edges.push_back( | 271 graph_.edges.push_back( |
255 TaskGraph::Edge(task, new_task_set_finished_tasks[task_set].get())); | 272 TaskGraph::Edge(task, new_task_set_finished_tasks[task_set].get())); |
256 } | 273 } |
257 | 274 |
258 InsertNodesForRasterTask(&graph_, task, task->dependencies(), priority++); | 275 InsertNodesForRasterTask(&graph_, task, task->dependencies(), priority++); |
259 } | 276 } |
260 | 277 |
261 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { | 278 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { |
262 InsertNodeForTask(&graph_, new_task_set_finished_tasks[task_set].get(), | 279 InsertNodeForTask(&graph_, new_task_set_finished_tasks[task_set].get(), |
263 kTaskSetFinishedTaskPriorityBase + task_set, | 280 kTaskSetFinishedTaskPriorityBase + task_set, |
264 task_count[task_set]); | 281 task_count[task_set]); |
265 } | 282 } |
266 | 283 |
267 ScheduleTasksOnOriginThread(this, &graph_); | 284 ScheduleTasksOnOriginThread(this, &graph_); |
| 285 |
| 286 // Barrier to sync any new resources to the worker context. |
| 287 resource_provider_->output_surface() |
| 288 ->context_provider() |
| 289 ->ContextGL() |
| 290 ->OrderingBarrierCHROMIUM(); |
| 291 |
268 task_graph_runner_->ScheduleTasks(namespace_token_, &graph_); | 292 task_graph_runner_->ScheduleTasks(namespace_token_, &graph_); |
269 | 293 |
270 std::copy(new_task_set_finished_tasks, | 294 std::copy(new_task_set_finished_tasks, |
271 new_task_set_finished_tasks + kNumberOfTaskSets, | 295 new_task_set_finished_tasks + kNumberOfTaskSets, |
272 task_set_finished_tasks_); | 296 task_set_finished_tasks_); |
273 | 297 |
274 resource_pool_->ReduceResourceUsage(); | |
275 | |
276 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", "state", | 298 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", "state", |
277 StateAsValue()); | 299 StateAsValue()); |
278 } | 300 } |
279 | 301 |
280 void OneCopyTileTaskWorkerPool::CheckForCompletedTasks() { | 302 void OneCopyTileTaskWorkerPool::CheckForCompletedTasks() { |
281 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::CheckForCompletedTasks"); | 303 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::CheckForCompletedTasks"); |
282 | 304 |
283 task_graph_runner_->CollectCompletedTasks(namespace_token_, | 305 task_graph_runner_->CollectCompletedTasks(namespace_token_, |
284 &completed_tasks_); | 306 &completed_tasks_); |
285 | 307 |
(...skipping 18 matching lines...) Expand all Loading... |
304 return !PlatformColor::SameComponentOrder(GetResourceFormat()); | 326 return !PlatformColor::SameComponentOrder(GetResourceFormat()); |
305 } | 327 } |
306 | 328 |
307 scoped_ptr<RasterBuffer> OneCopyTileTaskWorkerPool::AcquireBufferForRaster( | 329 scoped_ptr<RasterBuffer> OneCopyTileTaskWorkerPool::AcquireBufferForRaster( |
308 const Resource* resource, | 330 const Resource* resource, |
309 uint64_t resource_content_id, | 331 uint64_t resource_content_id, |
310 uint64_t previous_content_id) { | 332 uint64_t previous_content_id) { |
311 // TODO(danakj): If resource_content_id != 0, we only need to copy/upload | 333 // TODO(danakj): If resource_content_id != 0, we only need to copy/upload |
312 // the dirty rect. | 334 // the dirty rect. |
313 DCHECK_EQ(resource->format(), resource_provider_->best_texture_format()); | 335 DCHECK_EQ(resource->format(), resource_provider_->best_texture_format()); |
314 return make_scoped_ptr<RasterBuffer>( | 336 return make_scoped_ptr<RasterBuffer>(new RasterBufferImpl( |
315 new RasterBufferImpl(this, resource_provider_, resource_pool_, | 337 this, resource_provider_, resource_provider_->best_texture_format(), |
316 resource_provider_->best_texture_format(), resource, | 338 resource, previous_content_id)); |
317 previous_content_id)); | |
318 } | 339 } |
319 | 340 |
320 void OneCopyTileTaskWorkerPool::ReleaseBufferForRaster( | 341 void OneCopyTileTaskWorkerPool::ReleaseBufferForRaster( |
321 scoped_ptr<RasterBuffer> buffer) { | 342 scoped_ptr<RasterBuffer> buffer) { |
322 // Nothing to do here. RasterBufferImpl destructor cleans up after itself. | 343 // Nothing to do here. RasterBufferImpl destructor cleans up after itself. |
323 } | 344 } |
324 | 345 |
325 CopySequenceNumber | 346 void OneCopyTileTaskWorkerPool::PlaybackAndCopyOnWorkerThread( |
326 OneCopyTileTaskWorkerPool::PlaybackAndScheduleCopyOnWorkerThread( | 347 const Resource* resource, |
327 bool reusing_raster_resource, | 348 const ResourceProvider::ScopedWriteLockGL* resource_lock, |
328 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> | |
329 raster_resource_write_lock, | |
330 const Resource* raster_resource, | |
331 const Resource* output_resource, | |
332 const RasterSource* raster_source, | 349 const RasterSource* raster_source, |
333 const gfx::Rect& raster_full_rect, | 350 const gfx::Rect& raster_full_rect, |
334 const gfx::Rect& raster_dirty_rect, | 351 const gfx::Rect& raster_dirty_rect, |
335 float scale) { | 352 float scale, |
336 gfx::GpuMemoryBuffer* gpu_memory_buffer = | 353 uint64_t previous_content_id, |
337 raster_resource_write_lock->GetGpuMemoryBuffer(); | 354 uint64_t new_content_id) { |
338 if (gpu_memory_buffer) { | 355 base::AutoLock lock(lock_); |
339 void* data = NULL; | 356 |
340 bool rv = gpu_memory_buffer->Map(&data); | 357 scoped_ptr<StagingBuffer> staging_buffer = |
341 DCHECK(rv); | 358 AcquireStagingBuffer(resource, previous_content_id); |
342 int stride; | 359 DCHECK(staging_buffer); |
343 gpu_memory_buffer->GetStride(&stride); | 360 |
344 // TileTaskWorkerPool::PlaybackToMemory only supports unsigned strides. | 361 { |
345 DCHECK_GE(stride, 0); | 362 base::AutoUnlock unlock(lock_); |
| 363 |
| 364 // Allocate GpuMemoryBuffer if necessary. |
| 365 if (!staging_buffer->gpu_memory_buffer) { |
| 366 staging_buffer->gpu_memory_buffer = |
| 367 resource_provider_->gpu_memory_buffer_manager() |
| 368 ->AllocateGpuMemoryBuffer( |
| 369 staging_buffer->size, |
| 370 ToGpuMemoryBufferFormat( |
| 371 resource_provider_->best_texture_format()), |
| 372 use_persistent_gpu_memory_buffers_ |
| 373 ? gfx::GpuMemoryBuffer::PERSISTENT_MAP |
| 374 : gfx::GpuMemoryBuffer::MAP); |
| 375 } |
346 | 376 |
347 gfx::Rect playback_rect = raster_full_rect; | 377 gfx::Rect playback_rect = raster_full_rect; |
348 if (reusing_raster_resource) { | 378 if (use_persistent_gpu_memory_buffers_ && previous_content_id) { |
349 playback_rect.Intersect(raster_dirty_rect); | 379 // Reduce playback rect to dirty region if the content id of the staging |
350 } | 380 // buffer matches the prevous content id. |
351 DCHECK(!playback_rect.IsEmpty()) | 381 if (previous_content_id == staging_buffer->content_id) |
352 << "Why are we rastering a tile that's not dirty?"; | 382 playback_rect.Intersect(raster_dirty_rect); |
353 TileTaskWorkerPool::PlaybackToMemory( | 383 } |
354 data, raster_resource->format(), raster_resource->size(), | 384 |
355 static_cast<size_t>(stride), raster_source, raster_full_rect, | 385 if (staging_buffer->gpu_memory_buffer) { |
356 playback_rect, scale); | 386 void* data = nullptr; |
357 gpu_memory_buffer->Unmap(); | 387 bool rv = staging_buffer->gpu_memory_buffer->Map(&data); |
358 } | 388 DCHECK(rv); |
359 | 389 int stride; |
| 390 staging_buffer->gpu_memory_buffer->GetStride(&stride); |
| 391 // TileTaskWorkerPool::PlaybackToMemory only supports unsigned strides. |
| 392 DCHECK_GE(stride, 0); |
| 393 |
| 394 DCHECK(!playback_rect.IsEmpty()) |
| 395 << "Why are we rastering a tile that's not dirty?"; |
| 396 TileTaskWorkerPool::PlaybackToMemory( |
| 397 data, resource_provider_->best_texture_format(), staging_buffer->size, |
| 398 static_cast<size_t>(stride), raster_source, raster_full_rect, |
| 399 playback_rect, scale); |
| 400 staging_buffer->gpu_memory_buffer->Unmap(); |
| 401 staging_buffer->content_id = new_content_id; |
| 402 } |
| 403 } |
| 404 |
| 405 ContextProvider* context_provider = |
| 406 resource_provider_->output_surface()->worker_context_provider(); |
| 407 DCHECK(context_provider); |
| 408 |
| 409 { |
| 410 ContextProvider::ScopedContextGL scoped_context(context_provider); |
| 411 |
| 412 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL(); |
| 413 DCHECK(gl); |
| 414 |
| 415 if (!staging_buffer->texture_id) { |
| 416 gl->GenTextures(1, &staging_buffer->texture_id); |
| 417 gl->BindTexture(image_target_, staging_buffer->texture_id); |
| 418 gl->TexParameteri(image_target_, GL_TEXTURE_MIN_FILTER, GL_NEAREST); |
| 419 gl->TexParameteri(image_target_, GL_TEXTURE_MAG_FILTER, GL_NEAREST); |
| 420 gl->TexParameteri(image_target_, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); |
| 421 gl->TexParameteri(image_target_, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); |
| 422 if (staging_buffer->gpu_memory_buffer) { |
| 423 DCHECK(!staging_buffer->image_id); |
| 424 staging_buffer->image_id = gl->CreateImageCHROMIUM( |
| 425 staging_buffer->gpu_memory_buffer->AsClientBuffer(), |
| 426 staging_buffer->size.width(), staging_buffer->size.height(), |
| 427 GLInternalFormat(resource_provider_->best_texture_format())); |
| 428 gl->BindTexImage2DCHROMIUM(image_target_, staging_buffer->image_id); |
| 429 } else { |
| 430 gl->BindTexture(image_target_, staging_buffer->texture_id); |
| 431 gl->ReleaseTexImage2DCHROMIUM(image_target_, staging_buffer->image_id); |
| 432 gl->BindTexImage2DCHROMIUM(image_target_, staging_buffer->image_id); |
| 433 } |
| 434 } |
| 435 |
| 436 if (resource_provider_->use_sync_query()) { |
| 437 if (!staging_buffer->query_id) |
| 438 gl->GenQueriesEXT(1, &staging_buffer->query_id); |
| 439 |
| 440 #if defined(OS_CHROMEOS) |
| 441 // TODO(reveman): This avoids a performance problem on some ChromeOS |
| 442 // devices. This needs to be removed to support native GpuMemoryBuffer |
| 443 // implementations. crbug.com/436314 |
| 444 gl->BeginQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM, staging_buffer->query_id); |
| 445 #else |
| 446 gl->BeginQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM, |
| 447 staging_buffer->query_id); |
| 448 #endif |
| 449 } |
| 450 |
| 451 int bytes_per_row = |
| 452 (BitsPerPixel(resource_provider_->best_texture_format()) * |
| 453 resource->size().width()) / |
| 454 8; |
| 455 int chunk_size_in_rows = |
| 456 std::max(1, max_bytes_per_copy_operation_ / bytes_per_row); |
| 457 // Align chunk size to 4. Required to support compressed texture formats. |
| 458 chunk_size_in_rows = MathUtil::RoundUp(chunk_size_in_rows, 4); |
| 459 int y = 0; |
| 460 int height = resource->size().height(); |
| 461 while (y < height) { |
| 462 // Copy at most |chunk_size_in_rows|. |
| 463 int rows_to_copy = std::min(chunk_size_in_rows, height - y); |
| 464 DCHECK_GT(rows_to_copy, 0); |
| 465 |
| 466 gl->CopySubTextureCHROMIUM(GL_TEXTURE_2D, staging_buffer->texture_id, |
| 467 resource_lock->texture_id(), 0, y, 0, y, |
| 468 resource->size().width(), rows_to_copy, false, |
| 469 false, false); |
| 470 y += rows_to_copy; |
| 471 |
| 472 // Increment |bytes_scheduled_since_last_flush_| by the amount of memory |
| 473 // used for this copy operation. |
| 474 bytes_scheduled_since_last_flush_ += rows_to_copy * bytes_per_row; |
| 475 |
| 476 if (bytes_scheduled_since_last_flush_ >= max_bytes_per_copy_operation_) { |
| 477 gl->ShallowFlushCHROMIUM(); |
| 478 bytes_scheduled_since_last_flush_ = 0; |
| 479 } |
| 480 } |
| 481 |
| 482 if (resource_provider_->use_sync_query()) { |
| 483 #if defined(OS_CHROMEOS) |
| 484 gl->EndQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM); |
| 485 #else |
| 486 gl->EndQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM); |
| 487 #endif |
| 488 } |
| 489 |
| 490 // Barrier to sync worker context output to cc context. |
| 491 gl->OrderingBarrierCHROMIUM(); |
| 492 } |
| 493 |
| 494 staging_buffer->last_usage = base::TimeTicks::Now(); |
| 495 busy_buffers_.push_back(staging_buffer.Pass()); |
| 496 |
| 497 ScheduleReduceMemoryUsage(); |
| 498 } |
| 499 |
| 500 bool OneCopyTileTaskWorkerPool::OnMemoryDump( |
| 501 base::trace_event::ProcessMemoryDump* pmd) { |
360 base::AutoLock lock(lock_); | 502 base::AutoLock lock(lock_); |
361 | 503 |
362 CopySequenceNumber sequence = 0; | 504 ResourceFormat format = resource_provider_->best_texture_format(); |
363 int bytes_per_row = (BitsPerPixel(raster_resource->format()) * | 505 std::for_each(buffers_.begin(), buffers_.end(), |
364 raster_resource->size().width()) / | 506 [pmd, format](const StagingBuffer* buffer) { |
365 8; | 507 buffer->OnMemoryDump(pmd, format); |
366 int chunk_size_in_rows = | 508 }); |
367 std::max(1, max_bytes_per_copy_operation_ / bytes_per_row); | 509 |
368 // Align chunk size to 4. Required to support compressed texture formats. | 510 return true; |
369 chunk_size_in_rows = MathUtil::RoundUp(chunk_size_in_rows, 4); | 511 } |
370 int y = 0; | 512 |
371 int height = raster_resource->size().height(); | 513 scoped_ptr<OneCopyTileTaskWorkerPool::StagingBuffer> |
372 while (y < height) { | 514 OneCopyTileTaskWorkerPool::AcquireStagingBuffer(const Resource* resource, |
373 int failed_attempts = 0; | 515 uint64_t previous_content_id) { |
374 while ((pending_copy_operations_.size() + issued_copy_operation_count_) >= | 516 lock_.AssertAcquired(); |
375 kMaxCopyOperations) { | 517 |
376 // Ignore limit when shutdown is set. | 518 scoped_ptr<StagingBuffer> staging_buffer; |
377 if (shutdown_) | 519 |
| 520 ContextProvider* context_provider = |
| 521 resource_provider_->output_surface()->worker_context_provider(); |
| 522 DCHECK(context_provider); |
| 523 |
| 524 ContextProvider::ScopedContextGL scoped_context(context_provider); |
| 525 |
| 526 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL(); |
| 527 DCHECK(gl); |
| 528 |
| 529 // Check if any busy buffers have become available. |
| 530 if (resource_provider_->use_sync_query()) { |
| 531 while (!busy_buffers_.empty()) { |
| 532 if (!CheckForQueryResult(gl, busy_buffers_.front()->query_id)) |
378 break; | 533 break; |
379 | 534 |
380 ++failed_attempts; | 535 free_buffers_.push_back(busy_buffers_.take_front()); |
381 | 536 } |
382 // Schedule a check that will also wait for operations to complete | 537 } |
383 // after too many failed attempts. | 538 |
384 bool wait_if_needed = failed_attempts > kFailedAttemptsBeforeWaitIfNeeded; | 539 // Wait for number of non-free buffers to become less than the limit. |
385 | 540 while ((buffers_.size() - free_buffers_.size()) >= max_staging_buffers_) { |
386 // Schedule a check for completed copy operations if too many operations | 541 // Stop when there are no more busy buffers to wait for. |
387 // are currently in-flight. | 542 if (busy_buffers_.empty()) |
388 ScheduleCheckForCompletedCopyOperationsWithLockAcquired(wait_if_needed); | 543 break; |
389 | 544 |
390 { | 545 if (resource_provider_->use_sync_query()) { |
391 TRACE_EVENT0("cc", "WaitingForCopyOperationsToComplete"); | 546 WaitForQueryResult(gl, busy_buffers_.front()->query_id); |
392 | 547 free_buffers_.push_back(busy_buffers_.take_front()); |
393 // Wait for in-flight copy operations to drop below limit. | 548 } else { |
394 copy_operation_count_cv_.Wait(); | 549 // Fall-back to glFinish if CHROMIUM_sync_query is not available. |
395 } | 550 gl->Finish(); |
396 } | 551 while (!busy_buffers_.empty()) |
397 | 552 free_buffers_.push_back(busy_buffers_.take_front()); |
398 // There may be more work available, so wake up another worker thread. | 553 } |
399 copy_operation_count_cv_.Signal(); | 554 } |
400 | 555 |
401 // Copy at most |chunk_size_in_rows|. | 556 // Find a staging buffer that allows us to perform partial raster when |
402 int rows_to_copy = std::min(chunk_size_in_rows, height - y); | 557 // using persistent GpuMemoryBuffers. |
403 DCHECK_GT(rows_to_copy, 0); | 558 if (use_persistent_gpu_memory_buffers_ && previous_content_id) { |
404 | 559 StagingBufferDeque::iterator it = |
405 // |raster_resource_write_lock| is passed to the first copy operation as it | 560 std::find_if(free_buffers_.begin(), free_buffers_.end(), |
406 // needs to be released before we can issue a copy. | 561 [previous_content_id](const StagingBuffer* buffer) { |
407 pending_copy_operations_.push_back(make_scoped_ptr(new CopyOperation( | 562 return buffer->content_id == previous_content_id; |
408 raster_resource_write_lock.Pass(), raster_resource, output_resource, | 563 }); |
409 gfx::Rect(0, y, raster_resource->size().width(), rows_to_copy)))); | 564 if (it != free_buffers_.end()) |
410 y += rows_to_copy; | 565 staging_buffer = free_buffers_.take(it); |
411 | 566 } |
412 // Acquire a sequence number for this copy operation. | 567 |
413 sequence = next_copy_operation_sequence_++; | 568 // Find staging buffer of correct size. |
414 | 569 if (!staging_buffer) { |
415 // Increment |bytes_scheduled_since_last_flush_| by the amount of memory | 570 StagingBufferDeque::iterator it = |
416 // used for this copy operation. | 571 std::find_if(free_buffers_.begin(), free_buffers_.end(), |
417 bytes_scheduled_since_last_flush_ += rows_to_copy * bytes_per_row; | 572 [resource](const StagingBuffer* buffer) { |
418 | 573 return buffer->size == resource->size(); |
419 // Post task that will advance last flushed copy operation to |sequence| | 574 }); |
420 // when |bytes_scheduled_since_last_flush_| has reached | 575 if (it != free_buffers_.end()) |
421 // |max_bytes_per_copy_operation_|. | 576 staging_buffer = free_buffers_.take(it); |
422 if (bytes_scheduled_since_last_flush_ >= max_bytes_per_copy_operation_) { | 577 } |
423 task_runner_->PostTask( | 578 |
424 FROM_HERE, | 579 // Create new staging buffer if necessary. |
425 base::Bind(&OneCopyTileTaskWorkerPool::AdvanceLastFlushedCopyTo, | 580 if (!staging_buffer) { |
426 weak_ptr_factory_.GetWeakPtr(), sequence)); | 581 staging_buffer = make_scoped_ptr(new StagingBuffer(resource->size())); |
427 bytes_scheduled_since_last_flush_ = 0; | 582 buffers_.insert(staging_buffer.get()); |
428 } | 583 } |
429 } | 584 |
430 | 585 // Release enough free buffers to stay within the limit. |
431 return sequence; | 586 while (buffers_.size() > max_staging_buffers_) { |
432 } | 587 if (free_buffers_.empty()) |
433 | 588 break; |
434 void OneCopyTileTaskWorkerPool::AdvanceLastIssuedCopyTo( | 589 |
435 CopySequenceNumber sequence) { | 590 free_buffers_.front()->DestroyGLResources(gl); |
436 if (last_issued_copy_operation_ >= sequence) | 591 buffers_.erase(free_buffers_.front()); |
| 592 free_buffers_.take_front(); |
| 593 } |
| 594 |
| 595 return staging_buffer.Pass(); |
| 596 } |
| 597 |
| 598 base::TimeTicks OneCopyTileTaskWorkerPool::GetUsageTimeForLRUBuffer() { |
| 599 lock_.AssertAcquired(); |
| 600 |
| 601 if (!free_buffers_.empty()) |
| 602 return free_buffers_.front()->last_usage; |
| 603 |
| 604 if (!busy_buffers_.empty()) |
| 605 return busy_buffers_.front()->last_usage; |
| 606 |
| 607 return base::TimeTicks(); |
| 608 } |
| 609 |
| 610 void OneCopyTileTaskWorkerPool::ScheduleReduceMemoryUsage() { |
| 611 lock_.AssertAcquired(); |
| 612 |
| 613 if (reduce_memory_usage_pending_) |
437 return; | 614 return; |
438 | 615 |
439 IssueCopyOperations(sequence - last_issued_copy_operation_); | 616 reduce_memory_usage_pending_ = true; |
440 last_issued_copy_operation_ = sequence; | 617 |
441 } | 618 // Schedule a call to ReduceMemoryUsage at the time when the LRU buffer |
442 | 619 // should be released. |
443 void OneCopyTileTaskWorkerPool::AdvanceLastFlushedCopyTo( | 620 base::TimeTicks reduce_memory_usage_time = |
444 CopySequenceNumber sequence) { | 621 GetUsageTimeForLRUBuffer() + staging_buffer_expiration_delay_; |
445 if (last_flushed_copy_operation_ >= sequence) | 622 task_runner_->PostDelayedTask( |
| 623 FROM_HERE, reduce_memory_usage_callback_, |
| 624 reduce_memory_usage_time - base::TimeTicks::Now()); |
| 625 } |
| 626 |
| 627 void OneCopyTileTaskWorkerPool::ReduceMemoryUsage() { |
| 628 base::AutoLock lock(lock_); |
| 629 |
| 630 reduce_memory_usage_pending_ = false; |
| 631 |
| 632 if (free_buffers_.empty() && busy_buffers_.empty()) |
446 return; | 633 return; |
447 | 634 |
448 AdvanceLastIssuedCopyTo(sequence); | 635 base::TimeTicks current_time = base::TimeTicks::Now(); |
449 | 636 ReleaseBuffersNotUsedSince(current_time - staging_buffer_expiration_delay_); |
450 // Flush all issued copy operations. | 637 |
451 context_provider_->ContextGL()->ShallowFlushCHROMIUM(); | 638 if (free_buffers_.empty() && busy_buffers_.empty()) |
452 last_flushed_copy_operation_ = last_issued_copy_operation_; | 639 return; |
| 640 |
| 641 reduce_memory_usage_pending_ = true; |
| 642 |
| 643 // Schedule another call to ReduceMemoryUsage at the time when the next |
| 644 // buffer should be released. |
| 645 base::TimeTicks reduce_memory_usage_time = |
| 646 GetUsageTimeForLRUBuffer() + staging_buffer_expiration_delay_; |
| 647 task_runner_->PostDelayedTask(FROM_HERE, reduce_memory_usage_callback_, |
| 648 reduce_memory_usage_time - current_time); |
| 649 } |
| 650 |
| 651 void OneCopyTileTaskWorkerPool::ReleaseBuffersNotUsedSince( |
| 652 base::TimeTicks time) { |
| 653 lock_.AssertAcquired(); |
| 654 |
| 655 ContextProvider* context_provider = |
| 656 resource_provider_->output_surface()->worker_context_provider(); |
| 657 DCHECK(context_provider); |
| 658 |
| 659 { |
| 660 ContextProvider::ScopedContextGL scoped_context(context_provider); |
| 661 |
| 662 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL(); |
| 663 DCHECK(gl); |
| 664 |
| 665 // Note: Front buffer is guaranteed to be LRU so we can stop releasing |
| 666 // buffers as soon as we find a buffer that has been used since |time|. |
| 667 while (!free_buffers_.empty()) { |
| 668 if (free_buffers_.front()->last_usage > time) |
| 669 return; |
| 670 |
| 671 free_buffers_.front()->DestroyGLResources(gl); |
| 672 buffers_.erase(free_buffers_.front()); |
| 673 free_buffers_.take_front(); |
| 674 } |
| 675 |
| 676 while (!busy_buffers_.empty()) { |
| 677 if (busy_buffers_.front()->last_usage > time) |
| 678 return; |
| 679 |
| 680 busy_buffers_.front()->DestroyGLResources(gl); |
| 681 buffers_.erase(busy_buffers_.front()); |
| 682 busy_buffers_.take_front(); |
| 683 } |
| 684 } |
453 } | 685 } |
454 | 686 |
455 void OneCopyTileTaskWorkerPool::OnTaskSetFinished(TaskSet task_set) { | 687 void OneCopyTileTaskWorkerPool::OnTaskSetFinished(TaskSet task_set) { |
456 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::OnTaskSetFinished", "task_set", | 688 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::OnTaskSetFinished", "task_set", |
457 task_set); | 689 task_set); |
458 | 690 |
459 DCHECK(tasks_pending_[task_set]); | 691 DCHECK(tasks_pending_[task_set]); |
460 tasks_pending_[task_set] = false; | 692 tasks_pending_[task_set] = false; |
461 if (tasks_pending_.any()) { | 693 if (tasks_pending_.any()) { |
462 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", | 694 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", |
463 "state", StateAsValue()); | 695 "state", StateAsValue()); |
464 } else { | 696 } else { |
465 TRACE_EVENT_ASYNC_END0("cc", "ScheduledTasks", this); | 697 TRACE_EVENT_ASYNC_END0("cc", "ScheduledTasks", this); |
466 } | 698 } |
467 client_->DidFinishRunningTileTasks(task_set); | 699 client_->DidFinishRunningTileTasks(task_set); |
468 } | 700 } |
469 | 701 |
470 void OneCopyTileTaskWorkerPool::IssueCopyOperations(int64 count) { | |
471 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::IssueCopyOperations", "count", | |
472 count); | |
473 | |
474 CopyOperation::Deque copy_operations; | |
475 | |
476 { | |
477 base::AutoLock lock(lock_); | |
478 | |
479 for (int64 i = 0; i < count; ++i) { | |
480 DCHECK(!pending_copy_operations_.empty()); | |
481 copy_operations.push_back(pending_copy_operations_.take_front()); | |
482 } | |
483 | |
484 // Increment |issued_copy_operation_count_| to reflect the transition of | |
485 // copy operations from "pending" to "issued" state. | |
486 issued_copy_operation_count_ += copy_operations.size(); | |
487 } | |
488 | |
489 while (!copy_operations.empty()) { | |
490 scoped_ptr<CopyOperation> copy_operation = copy_operations.take_front(); | |
491 | |
492 // Remove the write lock. | |
493 copy_operation->src_write_lock.reset(); | |
494 | |
495 // Copy contents of source resource to destination resource. | |
496 resource_provider_->CopyResource(copy_operation->src->id(), | |
497 copy_operation->dst->id(), | |
498 copy_operation->rect); | |
499 } | |
500 } | |
501 | |
502 void OneCopyTileTaskWorkerPool:: | |
503 ScheduleCheckForCompletedCopyOperationsWithLockAcquired( | |
504 bool wait_if_needed) { | |
505 lock_.AssertAcquired(); | |
506 | |
507 if (check_for_completed_copy_operations_pending_) | |
508 return; | |
509 | |
510 base::TimeTicks now = base::TimeTicks::Now(); | |
511 | |
512 // Schedule a check for completed copy operations as soon as possible but | |
513 // don't allow two consecutive checks to be scheduled to run less than the | |
514 // tick rate apart. | |
515 base::TimeTicks next_check_for_completed_copy_operations_time = | |
516 std::max(last_check_for_completed_copy_operations_time_ + | |
517 base::TimeDelta::FromMilliseconds( | |
518 kCheckForCompletedCopyOperationsTickRateMs), | |
519 now); | |
520 | |
521 task_runner_->PostDelayedTask( | |
522 FROM_HERE, | |
523 base::Bind(&OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations, | |
524 weak_ptr_factory_.GetWeakPtr(), wait_if_needed), | |
525 next_check_for_completed_copy_operations_time - now); | |
526 | |
527 last_check_for_completed_copy_operations_time_ = | |
528 next_check_for_completed_copy_operations_time; | |
529 check_for_completed_copy_operations_pending_ = true; | |
530 } | |
531 | |
532 void OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations( | |
533 bool wait_if_needed) { | |
534 TRACE_EVENT1("cc", | |
535 "OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations", | |
536 "wait_if_needed", wait_if_needed); | |
537 | |
538 resource_pool_->CheckBusyResources(wait_if_needed); | |
539 | |
540 { | |
541 base::AutoLock lock(lock_); | |
542 | |
543 DCHECK(check_for_completed_copy_operations_pending_); | |
544 check_for_completed_copy_operations_pending_ = false; | |
545 | |
546 // The number of busy resources in the pool reflects the number of issued | |
547 // copy operations that have not yet completed. | |
548 issued_copy_operation_count_ = resource_pool_->busy_resource_count(); | |
549 | |
550 // There may be work blocked on too many in-flight copy operations, so wake | |
551 // up a worker thread. | |
552 copy_operation_count_cv_.Signal(); | |
553 } | |
554 } | |
555 | |
556 scoped_refptr<base::trace_event::ConvertableToTraceFormat> | 702 scoped_refptr<base::trace_event::ConvertableToTraceFormat> |
557 OneCopyTileTaskWorkerPool::StateAsValue() const { | 703 OneCopyTileTaskWorkerPool::StateAsValue() const { |
558 scoped_refptr<base::trace_event::TracedValue> state = | 704 scoped_refptr<base::trace_event::TracedValue> state = |
559 new base::trace_event::TracedValue(); | 705 new base::trace_event::TracedValue(); |
560 | 706 |
561 state->BeginArray("tasks_pending"); | 707 state->BeginArray("tasks_pending"); |
562 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) | 708 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) |
563 state->AppendBoolean(tasks_pending_[task_set]); | 709 state->AppendBoolean(tasks_pending_[task_set]); |
564 state->EndArray(); | 710 state->EndArray(); |
565 state->BeginDictionary("staging_state"); | 711 state->BeginDictionary("staging_state"); |
566 StagingStateAsValueInto(state.get()); | 712 StagingStateAsValueInto(state.get()); |
567 state->EndDictionary(); | 713 state->EndDictionary(); |
568 | 714 |
569 return state; | 715 return state; |
570 } | 716 } |
571 | 717 |
572 void OneCopyTileTaskWorkerPool::StagingStateAsValueInto( | 718 void OneCopyTileTaskWorkerPool::StagingStateAsValueInto( |
573 base::trace_event::TracedValue* staging_state) const { | 719 base::trace_event::TracedValue* staging_state) const { |
574 staging_state->SetInteger( | 720 base::AutoLock lock(lock_); |
575 "staging_resource_count", | 721 |
576 static_cast<int>(resource_pool_->total_resource_count())); | 722 staging_state->SetInteger("staging_buffer_count", |
577 staging_state->SetInteger( | 723 static_cast<int>(buffers_.size())); |
578 "bytes_used_for_staging_resources", | 724 staging_state->SetInteger("busy_count", |
579 static_cast<int>(resource_pool_->total_memory_usage_bytes())); | 725 static_cast<int>(busy_buffers_.size())); |
580 staging_state->SetInteger( | 726 staging_state->SetInteger("free_count", |
581 "pending_copy_count", | 727 static_cast<int>(free_buffers_.size())); |
582 static_cast<int>(resource_pool_->total_resource_count() - | |
583 resource_pool_->acquired_resource_count())); | |
584 staging_state->SetInteger( | |
585 "bytes_pending_copy", | |
586 static_cast<int>(resource_pool_->total_memory_usage_bytes() - | |
587 resource_pool_->acquired_memory_usage_bytes())); | |
588 } | 728 } |
589 | 729 |
590 } // namespace cc | 730 } // namespace cc |
OLD | NEW |