OLD | NEW |
1 // Copyright 2014 The Chromium Authors. All rights reserved. | 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "cc/raster/one_copy_tile_task_worker_pool.h" | 5 #include "cc/raster/one_copy_tile_task_worker_pool.h" |
6 | 6 |
7 #include <algorithm> | 7 #include <algorithm> |
8 #include <limits> | 8 #include <limits> |
9 | 9 |
10 #include "base/strings/stringprintf.h" | 10 #include "base/strings/stringprintf.h" |
| 11 #include "base/thread_task_runner_handle.h" |
| 12 #include "base/trace_event/memory_dump_manager.h" |
11 #include "base/trace_event/trace_event.h" | 13 #include "base/trace_event/trace_event.h" |
12 #include "base/trace_event/trace_event_argument.h" | 14 #include "base/trace_event/trace_event_argument.h" |
13 #include "cc/base/math_util.h" | 15 #include "cc/base/math_util.h" |
14 #include "cc/debug/traced_value.h" | 16 #include "cc/debug/traced_value.h" |
15 #include "cc/raster/raster_buffer.h" | 17 #include "cc/raster/raster_buffer.h" |
16 #include "cc/resources/platform_color.h" | 18 #include "cc/resources/platform_color.h" |
17 #include "cc/resources/resource_pool.h" | |
18 #include "cc/resources/scoped_resource.h" | 19 #include "cc/resources/scoped_resource.h" |
| 20 #include "gpu/GLES2/gl2extchromium.h" |
19 #include "gpu/command_buffer/client/gles2_interface.h" | 21 #include "gpu/command_buffer/client/gles2_interface.h" |
20 #include "ui/gfx/gpu_memory_buffer.h" | 22 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h" |
21 | 23 |
22 namespace cc { | 24 namespace cc { |
23 namespace { | 25 namespace { |
24 | 26 |
25 class RasterBufferImpl : public RasterBuffer { | 27 class RasterBufferImpl : public RasterBuffer { |
26 public: | 28 public: |
27 RasterBufferImpl(OneCopyTileTaskWorkerPool* worker_pool, | 29 RasterBufferImpl(OneCopyTileTaskWorkerPool* worker_pool, |
28 ResourceProvider* resource_provider, | 30 ResourceProvider* resource_provider, |
29 ResourcePool* resource_pool, | |
30 ResourceFormat resource_format, | 31 ResourceFormat resource_format, |
31 const Resource* output_resource, | 32 const Resource* resource, |
32 uint64_t previous_content_id) | 33 uint64_t previous_content_id) |
33 : worker_pool_(worker_pool), | 34 : worker_pool_(worker_pool), |
34 resource_provider_(resource_provider), | 35 resource_(resource), |
35 resource_pool_(resource_pool), | 36 lock_(resource_provider, resource->id()), |
36 output_resource_(output_resource), | 37 previous_content_id_(previous_content_id) {} |
37 raster_content_id_(0), | |
38 sequence_(0) { | |
39 if (worker_pool->have_persistent_gpu_memory_buffers() && | |
40 previous_content_id) { | |
41 raster_resource_ = | |
42 resource_pool->TryAcquireResourceWithContentId(previous_content_id); | |
43 } | |
44 if (raster_resource_) { | |
45 raster_content_id_ = previous_content_id; | |
46 DCHECK_EQ(resource_format, raster_resource_->format()); | |
47 DCHECK_EQ(output_resource->size().ToString(), | |
48 raster_resource_->size().ToString()); | |
49 } else { | |
50 raster_resource_ = resource_pool->AcquireResource(output_resource->size(), | |
51 resource_format); | |
52 } | |
53 | 38 |
54 lock_.reset(new ResourceProvider::ScopedWriteLockGpuMemoryBuffer( | 39 ~RasterBufferImpl() override {} |
55 resource_provider_, raster_resource_->id())); | |
56 } | |
57 | |
58 ~RasterBufferImpl() override { | |
59 // Release write lock in case a copy was never scheduled. | |
60 lock_.reset(); | |
61 | |
62 // Make sure any scheduled copy operations are issued before we release the | |
63 // raster resource. | |
64 if (sequence_) | |
65 worker_pool_->AdvanceLastIssuedCopyTo(sequence_); | |
66 | |
67 // Return resources to pool so they can be used by another RasterBuffer | |
68 // instance. | |
69 resource_pool_->ReleaseResource(raster_resource_.Pass(), | |
70 raster_content_id_); | |
71 } | |
72 | 40 |
73 // Overridden from RasterBuffer: | 41 // Overridden from RasterBuffer: |
74 void Playback(const RasterSource* raster_source, | 42 void Playback(const RasterSource* raster_source, |
75 const gfx::Rect& raster_full_rect, | 43 const gfx::Rect& raster_full_rect, |
76 const gfx::Rect& raster_dirty_rect, | 44 const gfx::Rect& raster_dirty_rect, |
77 uint64_t new_content_id, | 45 uint64_t new_content_id, |
78 float scale) override { | 46 float scale) override { |
79 // If there's a raster_content_id_, we are reusing a resource with that | 47 worker_pool_->PlaybackAndCopyOnWorkerThread( |
80 // content id. | 48 resource_, &lock_, raster_source, raster_full_rect, raster_dirty_rect, |
81 bool reusing_raster_resource = raster_content_id_ != 0; | 49 scale, previous_content_id_, new_content_id); |
82 sequence_ = worker_pool_->PlaybackAndScheduleCopyOnWorkerThread( | |
83 reusing_raster_resource, lock_.Pass(), raster_resource_.get(), | |
84 output_resource_, raster_source, raster_full_rect, raster_dirty_rect, | |
85 scale); | |
86 // Store the content id of the resource to return to the pool. | |
87 raster_content_id_ = new_content_id; | |
88 } | 50 } |
89 | 51 |
90 private: | 52 private: |
91 OneCopyTileTaskWorkerPool* worker_pool_; | 53 OneCopyTileTaskWorkerPool* worker_pool_; |
92 ResourceProvider* resource_provider_; | 54 const Resource* resource_; |
93 ResourcePool* resource_pool_; | 55 ResourceProvider::ScopedWriteLockGL lock_; |
94 const Resource* output_resource_; | 56 uint64_t previous_content_id_; |
95 uint64_t raster_content_id_; | |
96 scoped_ptr<ScopedResource> raster_resource_; | |
97 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> lock_; | |
98 CopySequenceNumber sequence_; | |
99 | 57 |
100 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl); | 58 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl); |
101 }; | 59 }; |
102 | 60 |
103 // Number of in-flight copy operations to allow. | 61 // Delay between checking for query result to be available. |
104 const int kMaxCopyOperations = 32; | 62 const int kCheckForQueryResultAvailableTickRateMs = 1; |
105 | 63 |
106 // Delay been checking for copy operations to complete. | 64 // Number of attempts to allow before we perform a check that will wait for |
107 const int kCheckForCompletedCopyOperationsTickRateMs = 1; | 65 // query to complete. |
108 | 66 const int kMaxCheckForQueryResultAvailableAttempts = 256; |
109 // Number of failed attempts to allow before we perform a check that will | |
110 // wait for copy operations to complete if needed. | |
111 const int kFailedAttemptsBeforeWaitIfNeeded = 256; | |
112 | 67 |
113 // 4MiB is the size of 4 512x512 tiles, which has proven to be a good | 68 // 4MiB is the size of 4 512x512 tiles, which has proven to be a good |
114 // default batch size for copy operations. | 69 // default batch size for copy operations. |
115 const int kMaxBytesPerCopyOperation = 1024 * 1024 * 4; | 70 const int kMaxBytesPerCopyOperation = 1024 * 1024 * 4; |
116 | 71 |
| 72 // Delay before a staging buffer might be released. |
| 73 const int kStagingBufferExpirationDelayMs = 1000; |
| 74 |
| 75 bool CheckForQueryResult(gpu::gles2::GLES2Interface* gl, unsigned query_id) { |
| 76 unsigned complete = 1; |
| 77 gl->GetQueryObjectuivEXT(query_id, GL_QUERY_RESULT_AVAILABLE_EXT, &complete); |
| 78 return complete; |
| 79 } |
| 80 |
| 81 void WaitForQueryResult(gpu::gles2::GLES2Interface* gl, unsigned query_id) { |
| 82 TRACE_EVENT0("cc", "WaitForQueryResult"); |
| 83 |
| 84 int attempts_left = kMaxCheckForQueryResultAvailableAttempts; |
| 85 while (attempts_left--) { |
| 86 if (CheckForQueryResult(gl, query_id)) |
| 87 break; |
| 88 |
| 89 base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds( |
| 90 kCheckForQueryResultAvailableTickRateMs)); |
| 91 } |
| 92 |
| 93 unsigned result = 0; |
| 94 gl->GetQueryObjectuivEXT(query_id, GL_QUERY_RESULT_EXT, &result); |
| 95 } |
| 96 |
117 } // namespace | 97 } // namespace |
118 | 98 |
119 OneCopyTileTaskWorkerPool::CopyOperation::CopyOperation( | 99 OneCopyTileTaskWorkerPool::StagingBuffer::StagingBuffer(const gfx::Size& size) |
120 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> src_write_lock, | 100 : size(size), texture_id(0), image_id(0), query_id(0), content_id(0) {} |
121 const Resource* src, | 101 |
122 const Resource* dst, | 102 OneCopyTileTaskWorkerPool::StagingBuffer::~StagingBuffer() { |
123 const gfx::Rect& rect) | 103 DCHECK_EQ(texture_id, 0u); |
124 : src_write_lock(src_write_lock.Pass()), src(src), dst(dst), rect(rect) { | 104 DCHECK_EQ(image_id, 0u); |
| 105 DCHECK_EQ(query_id, 0u); |
125 } | 106 } |
126 | 107 |
127 OneCopyTileTaskWorkerPool::CopyOperation::~CopyOperation() { | 108 void OneCopyTileTaskWorkerPool::StagingBuffer::DestroyGLResources( |
| 109 gpu::gles2::GLES2Interface* gl) { |
| 110 if (query_id) { |
| 111 gl->DeleteQueriesEXT(1, &query_id); |
| 112 query_id = 0; |
| 113 } |
| 114 if (image_id) { |
| 115 gl->DestroyImageCHROMIUM(image_id); |
| 116 image_id = 0; |
| 117 } |
| 118 if (texture_id) { |
| 119 gl->DeleteTextures(1, &texture_id); |
| 120 texture_id = 0; |
| 121 } |
| 122 } |
| 123 |
| 124 void OneCopyTileTaskWorkerPool::StagingBuffer::OnMemoryDump( |
| 125 base::trace_event::ProcessMemoryDump* pmd, |
| 126 ResourceFormat format, |
| 127 bool in_free_list) const { |
| 128 if (!gpu_memory_buffer) |
| 129 return; |
| 130 |
| 131 gfx::GpuMemoryBufferId buffer_id = gpu_memory_buffer->GetId(); |
| 132 std::string buffer_dump_name = |
| 133 base::StringPrintf("cc/one_copy/staging_memory/buffer_%d", buffer_id); |
| 134 base::trace_event::MemoryAllocatorDump* buffer_dump = |
| 135 pmd->CreateAllocatorDump(buffer_dump_name); |
| 136 |
| 137 size_t buffer_size_in_bytes = |
| 138 Resource::UncheckedMemorySizeBytes(size, format); |
| 139 buffer_dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize, |
| 140 base::trace_event::MemoryAllocatorDump::kUnitsBytes, |
| 141 static_cast<uint64_t>(buffer_size_in_bytes)); |
| 142 buffer_dump->AddScalar("free_size", |
| 143 base::trace_event::MemoryAllocatorDump::kUnitsBytes, |
| 144 in_free_list ? buffer_size_in_bytes : 0); |
| 145 |
| 146 // Emit an ownership edge towards a global allocator dump node. |
| 147 const uint64 tracing_process_id = |
| 148 base::trace_event::MemoryDumpManager::GetInstance()->tracing_process_id(); |
| 149 base::trace_event::MemoryAllocatorDumpGuid shared_buffer_guid = |
| 150 gfx::GetGpuMemoryBufferGUIDForTracing(tracing_process_id, buffer_id); |
| 151 pmd->CreateSharedGlobalAllocatorDump(shared_buffer_guid); |
| 152 |
| 153 // By creating an edge with a higher |importance| (w.r.t. browser-side dumps) |
| 154 // the tracing UI will account the effective size of the buffer to the child. |
| 155 const int kImportance = 2; |
| 156 pmd->AddOwnershipEdge(buffer_dump->guid(), shared_buffer_guid, kImportance); |
128 } | 157 } |
129 | 158 |
130 // static | 159 // static |
131 scoped_ptr<TileTaskWorkerPool> OneCopyTileTaskWorkerPool::Create( | 160 scoped_ptr<TileTaskWorkerPool> OneCopyTileTaskWorkerPool::Create( |
132 base::SequencedTaskRunner* task_runner, | 161 base::SequencedTaskRunner* task_runner, |
133 TaskGraphRunner* task_graph_runner, | 162 TaskGraphRunner* task_graph_runner, |
134 ContextProvider* context_provider, | 163 ContextProvider* context_provider, |
135 ResourceProvider* resource_provider, | 164 ResourceProvider* resource_provider, |
136 ResourcePool* resource_pool, | |
137 int max_copy_texture_chromium_size, | 165 int max_copy_texture_chromium_size, |
138 bool have_persistent_gpu_memory_buffers) { | 166 bool use_persistent_gpu_memory_buffers, |
| 167 unsigned image_target, |
| 168 int max_staging_buffers) { |
139 return make_scoped_ptr<TileTaskWorkerPool>(new OneCopyTileTaskWorkerPool( | 169 return make_scoped_ptr<TileTaskWorkerPool>(new OneCopyTileTaskWorkerPool( |
140 task_runner, task_graph_runner, context_provider, resource_provider, | 170 task_runner, task_graph_runner, resource_provider, |
141 resource_pool, max_copy_texture_chromium_size, | 171 max_copy_texture_chromium_size, use_persistent_gpu_memory_buffers, |
142 have_persistent_gpu_memory_buffers)); | 172 image_target, max_staging_buffers)); |
143 } | 173 } |
144 | 174 |
145 OneCopyTileTaskWorkerPool::OneCopyTileTaskWorkerPool( | 175 OneCopyTileTaskWorkerPool::OneCopyTileTaskWorkerPool( |
146 base::SequencedTaskRunner* task_runner, | 176 base::SequencedTaskRunner* task_runner, |
147 TaskGraphRunner* task_graph_runner, | 177 TaskGraphRunner* task_graph_runner, |
148 ContextProvider* context_provider, | |
149 ResourceProvider* resource_provider, | 178 ResourceProvider* resource_provider, |
150 ResourcePool* resource_pool, | |
151 int max_copy_texture_chromium_size, | 179 int max_copy_texture_chromium_size, |
152 bool have_persistent_gpu_memory_buffers) | 180 bool use_persistent_gpu_memory_buffers, |
| 181 unsigned image_target, |
| 182 int max_staging_buffers) |
153 : task_runner_(task_runner), | 183 : task_runner_(task_runner), |
154 task_graph_runner_(task_graph_runner), | 184 task_graph_runner_(task_graph_runner), |
155 namespace_token_(task_graph_runner->GetNamespaceToken()), | 185 namespace_token_(task_graph_runner->GetNamespaceToken()), |
156 context_provider_(context_provider), | |
157 resource_provider_(resource_provider), | 186 resource_provider_(resource_provider), |
158 resource_pool_(resource_pool), | |
159 max_bytes_per_copy_operation_( | 187 max_bytes_per_copy_operation_( |
160 max_copy_texture_chromium_size | 188 max_copy_texture_chromium_size |
161 ? std::min(kMaxBytesPerCopyOperation, | 189 ? std::min(kMaxBytesPerCopyOperation, |
162 max_copy_texture_chromium_size) | 190 max_copy_texture_chromium_size) |
163 : kMaxBytesPerCopyOperation), | 191 : kMaxBytesPerCopyOperation), |
164 have_persistent_gpu_memory_buffers_(have_persistent_gpu_memory_buffers), | 192 use_persistent_gpu_memory_buffers_(use_persistent_gpu_memory_buffers), |
165 last_issued_copy_operation_(0), | 193 image_target_(image_target), |
166 last_flushed_copy_operation_(0), | |
167 lock_(), | |
168 copy_operation_count_cv_(&lock_), | |
169 bytes_scheduled_since_last_flush_(0), | 194 bytes_scheduled_since_last_flush_(0), |
170 issued_copy_operation_count_(0), | 195 max_staging_buffers_(max_staging_buffers), |
171 next_copy_operation_sequence_(1), | 196 staging_buffer_expiration_delay_( |
172 check_for_completed_copy_operations_pending_(false), | 197 base::TimeDelta::FromMilliseconds(kStagingBufferExpirationDelayMs)), |
173 shutdown_(false), | 198 reduce_memory_usage_pending_(false), |
174 weak_ptr_factory_(this), | 199 weak_ptr_factory_(this), |
175 task_set_finished_weak_ptr_factory_(this) { | 200 task_set_finished_weak_ptr_factory_(this) { |
176 DCHECK(context_provider_); | 201 base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider( |
| 202 this, base::ThreadTaskRunnerHandle::Get()); |
| 203 reduce_memory_usage_callback_ = |
| 204 base::Bind(&OneCopyTileTaskWorkerPool::ReduceMemoryUsage, |
| 205 weak_ptr_factory_.GetWeakPtr()); |
177 } | 206 } |
178 | 207 |
179 OneCopyTileTaskWorkerPool::~OneCopyTileTaskWorkerPool() { | 208 OneCopyTileTaskWorkerPool::~OneCopyTileTaskWorkerPool() { |
180 DCHECK_EQ(pending_copy_operations_.size(), 0u); | 209 base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider( |
| 210 this); |
181 } | 211 } |
182 | 212 |
183 TileTaskRunner* OneCopyTileTaskWorkerPool::AsTileTaskRunner() { | 213 TileTaskRunner* OneCopyTileTaskWorkerPool::AsTileTaskRunner() { |
184 return this; | 214 return this; |
185 } | 215 } |
186 | 216 |
187 void OneCopyTileTaskWorkerPool::SetClient(TileTaskRunnerClient* client) { | 217 void OneCopyTileTaskWorkerPool::SetClient(TileTaskRunnerClient* client) { |
188 client_ = client; | 218 client_ = client; |
189 } | 219 } |
190 | 220 |
191 void OneCopyTileTaskWorkerPool::Shutdown() { | 221 void OneCopyTileTaskWorkerPool::Shutdown() { |
192 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::Shutdown"); | 222 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::Shutdown"); |
193 | 223 |
194 { | |
195 base::AutoLock lock(lock_); | |
196 | |
197 shutdown_ = true; | |
198 copy_operation_count_cv_.Signal(); | |
199 } | |
200 | |
201 TaskGraph empty; | 224 TaskGraph empty; |
202 task_graph_runner_->ScheduleTasks(namespace_token_, &empty); | 225 task_graph_runner_->ScheduleTasks(namespace_token_, &empty); |
203 task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_); | 226 task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_); |
| 227 |
| 228 base::AutoLock lock(lock_); |
| 229 |
| 230 if (buffers_.empty()) |
| 231 return; |
| 232 |
| 233 ReleaseBuffersNotUsedSince(base::TimeTicks() + base::TimeDelta::Max()); |
204 } | 234 } |
205 | 235 |
206 void OneCopyTileTaskWorkerPool::ScheduleTasks(TileTaskQueue* queue) { | 236 void OneCopyTileTaskWorkerPool::ScheduleTasks(TileTaskQueue* queue) { |
207 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::ScheduleTasks"); | 237 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::ScheduleTasks"); |
208 | 238 |
209 #if DCHECK_IS_ON() | |
210 { | |
211 base::AutoLock lock(lock_); | |
212 DCHECK(!shutdown_); | |
213 } | |
214 #endif | |
215 | |
216 if (tasks_pending_.none()) | 239 if (tasks_pending_.none()) |
217 TRACE_EVENT_ASYNC_BEGIN0("cc", "ScheduledTasks", this); | 240 TRACE_EVENT_ASYNC_BEGIN0("cc", "ScheduledTasks", this); |
218 | 241 |
219 // Mark all task sets as pending. | 242 // Mark all task sets as pending. |
220 tasks_pending_.set(); | 243 tasks_pending_.set(); |
221 | 244 |
222 size_t priority = kTileTaskPriorityBase; | 245 size_t priority = kTileTaskPriorityBase; |
223 | 246 |
224 graph_.Reset(); | 247 graph_.Reset(); |
225 | 248 |
226 // Cancel existing OnTaskSetFinished callbacks. | 249 // Cancel existing OnTaskSetFinished callbacks. |
227 task_set_finished_weak_ptr_factory_.InvalidateWeakPtrs(); | 250 task_set_finished_weak_ptr_factory_.InvalidateWeakPtrs(); |
228 | 251 |
229 scoped_refptr<TileTask> new_task_set_finished_tasks[kNumberOfTaskSets]; | 252 scoped_refptr<TileTask> new_task_set_finished_tasks[kNumberOfTaskSets]; |
230 | 253 |
231 size_t task_count[kNumberOfTaskSets] = {0}; | 254 size_t task_count[kNumberOfTaskSets] = {0}; |
232 | 255 |
233 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { | 256 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { |
234 new_task_set_finished_tasks[task_set] = CreateTaskSetFinishedTask( | 257 new_task_set_finished_tasks[task_set] = CreateTaskSetFinishedTask( |
235 task_runner_.get(), | 258 task_runner_.get(), |
236 base::Bind(&OneCopyTileTaskWorkerPool::OnTaskSetFinished, | 259 base::Bind(&OneCopyTileTaskWorkerPool::OnTaskSetFinished, |
237 task_set_finished_weak_ptr_factory_.GetWeakPtr(), task_set)); | 260 task_set_finished_weak_ptr_factory_.GetWeakPtr(), task_set)); |
238 } | 261 } |
239 | 262 |
240 resource_pool_->CheckBusyResources(false); | |
241 | |
242 for (TileTaskQueue::Item::Vector::const_iterator it = queue->items.begin(); | 263 for (TileTaskQueue::Item::Vector::const_iterator it = queue->items.begin(); |
243 it != queue->items.end(); ++it) { | 264 it != queue->items.end(); ++it) { |
244 const TileTaskQueue::Item& item = *it; | 265 const TileTaskQueue::Item& item = *it; |
245 RasterTask* task = item.task; | 266 RasterTask* task = item.task; |
246 DCHECK(!task->HasCompleted()); | 267 DCHECK(!task->HasCompleted()); |
247 | 268 |
248 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { | 269 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { |
249 if (!item.task_sets[task_set]) | 270 if (!item.task_sets[task_set]) |
250 continue; | 271 continue; |
251 | 272 |
252 ++task_count[task_set]; | 273 ++task_count[task_set]; |
253 | 274 |
254 graph_.edges.push_back( | 275 graph_.edges.push_back( |
255 TaskGraph::Edge(task, new_task_set_finished_tasks[task_set].get())); | 276 TaskGraph::Edge(task, new_task_set_finished_tasks[task_set].get())); |
256 } | 277 } |
257 | 278 |
258 InsertNodesForRasterTask(&graph_, task, task->dependencies(), priority++); | 279 InsertNodesForRasterTask(&graph_, task, task->dependencies(), priority++); |
259 } | 280 } |
260 | 281 |
261 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { | 282 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { |
262 InsertNodeForTask(&graph_, new_task_set_finished_tasks[task_set].get(), | 283 InsertNodeForTask(&graph_, new_task_set_finished_tasks[task_set].get(), |
263 kTaskSetFinishedTaskPriorityBase + task_set, | 284 kTaskSetFinishedTaskPriorityBase + task_set, |
264 task_count[task_set]); | 285 task_count[task_set]); |
265 } | 286 } |
266 | 287 |
267 ScheduleTasksOnOriginThread(this, &graph_); | 288 ScheduleTasksOnOriginThread(this, &graph_); |
| 289 |
| 290 // Barrier to sync any new resources to the worker context. |
| 291 resource_provider_->output_surface() |
| 292 ->context_provider() |
| 293 ->ContextGL() |
| 294 ->OrderingBarrierCHROMIUM(); |
| 295 |
268 task_graph_runner_->ScheduleTasks(namespace_token_, &graph_); | 296 task_graph_runner_->ScheduleTasks(namespace_token_, &graph_); |
269 | 297 |
270 std::copy(new_task_set_finished_tasks, | 298 std::copy(new_task_set_finished_tasks, |
271 new_task_set_finished_tasks + kNumberOfTaskSets, | 299 new_task_set_finished_tasks + kNumberOfTaskSets, |
272 task_set_finished_tasks_); | 300 task_set_finished_tasks_); |
273 | 301 |
274 resource_pool_->ReduceResourceUsage(); | |
275 | |
276 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", "state", | 302 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", "state", |
277 StateAsValue()); | 303 StateAsValue()); |
278 } | 304 } |
279 | 305 |
280 void OneCopyTileTaskWorkerPool::CheckForCompletedTasks() { | 306 void OneCopyTileTaskWorkerPool::CheckForCompletedTasks() { |
281 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::CheckForCompletedTasks"); | 307 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::CheckForCompletedTasks"); |
282 | 308 |
283 task_graph_runner_->CollectCompletedTasks(namespace_token_, | 309 task_graph_runner_->CollectCompletedTasks(namespace_token_, |
284 &completed_tasks_); | 310 &completed_tasks_); |
285 | 311 |
(...skipping 18 matching lines...) Expand all Loading... |
304 return !PlatformColor::SameComponentOrder(GetResourceFormat()); | 330 return !PlatformColor::SameComponentOrder(GetResourceFormat()); |
305 } | 331 } |
306 | 332 |
307 scoped_ptr<RasterBuffer> OneCopyTileTaskWorkerPool::AcquireBufferForRaster( | 333 scoped_ptr<RasterBuffer> OneCopyTileTaskWorkerPool::AcquireBufferForRaster( |
308 const Resource* resource, | 334 const Resource* resource, |
309 uint64_t resource_content_id, | 335 uint64_t resource_content_id, |
310 uint64_t previous_content_id) { | 336 uint64_t previous_content_id) { |
311 // TODO(danakj): If resource_content_id != 0, we only need to copy/upload | 337 // TODO(danakj): If resource_content_id != 0, we only need to copy/upload |
312 // the dirty rect. | 338 // the dirty rect. |
313 DCHECK_EQ(resource->format(), resource_provider_->best_texture_format()); | 339 DCHECK_EQ(resource->format(), resource_provider_->best_texture_format()); |
314 return make_scoped_ptr<RasterBuffer>( | 340 return make_scoped_ptr<RasterBuffer>(new RasterBufferImpl( |
315 new RasterBufferImpl(this, resource_provider_, resource_pool_, | 341 this, resource_provider_, resource_provider_->best_texture_format(), |
316 resource_provider_->best_texture_format(), resource, | 342 resource, previous_content_id)); |
317 previous_content_id)); | |
318 } | 343 } |
319 | 344 |
320 void OneCopyTileTaskWorkerPool::ReleaseBufferForRaster( | 345 void OneCopyTileTaskWorkerPool::ReleaseBufferForRaster( |
321 scoped_ptr<RasterBuffer> buffer) { | 346 scoped_ptr<RasterBuffer> buffer) { |
322 // Nothing to do here. RasterBufferImpl destructor cleans up after itself. | 347 // Nothing to do here. RasterBufferImpl destructor cleans up after itself. |
323 } | 348 } |
324 | 349 |
325 CopySequenceNumber | 350 void OneCopyTileTaskWorkerPool::PlaybackAndCopyOnWorkerThread( |
326 OneCopyTileTaskWorkerPool::PlaybackAndScheduleCopyOnWorkerThread( | 351 const Resource* resource, |
327 bool reusing_raster_resource, | 352 const ResourceProvider::ScopedWriteLockGL* resource_lock, |
328 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> | |
329 raster_resource_write_lock, | |
330 const Resource* raster_resource, | |
331 const Resource* output_resource, | |
332 const RasterSource* raster_source, | 353 const RasterSource* raster_source, |
333 const gfx::Rect& raster_full_rect, | 354 const gfx::Rect& raster_full_rect, |
334 const gfx::Rect& raster_dirty_rect, | 355 const gfx::Rect& raster_dirty_rect, |
335 float scale) { | 356 float scale, |
336 gfx::GpuMemoryBuffer* gpu_memory_buffer = | 357 uint64_t previous_content_id, |
337 raster_resource_write_lock->GetGpuMemoryBuffer(); | 358 uint64_t new_content_id) { |
338 if (gpu_memory_buffer) { | 359 base::AutoLock lock(lock_); |
339 void* data = NULL; | 360 |
340 bool rv = gpu_memory_buffer->Map(&data); | 361 scoped_ptr<StagingBuffer> staging_buffer = |
341 DCHECK(rv); | 362 AcquireStagingBuffer(resource, previous_content_id); |
342 int stride; | 363 DCHECK(staging_buffer); |
343 gpu_memory_buffer->GetStride(&stride); | 364 |
344 // TileTaskWorkerPool::PlaybackToMemory only supports unsigned strides. | 365 { |
345 DCHECK_GE(stride, 0); | 366 base::AutoUnlock unlock(lock_); |
| 367 |
| 368 // Allocate GpuMemoryBuffer if necessary. |
| 369 if (!staging_buffer->gpu_memory_buffer) { |
| 370 staging_buffer->gpu_memory_buffer = |
| 371 resource_provider_->gpu_memory_buffer_manager() |
| 372 ->AllocateGpuMemoryBuffer( |
| 373 staging_buffer->size, |
| 374 ToGpuMemoryBufferFormat( |
| 375 resource_provider_->best_texture_format()), |
| 376 use_persistent_gpu_memory_buffers_ |
| 377 ? gfx::GpuMemoryBuffer::PERSISTENT_MAP |
| 378 : gfx::GpuMemoryBuffer::MAP); |
| 379 } |
346 | 380 |
347 gfx::Rect playback_rect = raster_full_rect; | 381 gfx::Rect playback_rect = raster_full_rect; |
348 if (reusing_raster_resource) { | 382 if (use_persistent_gpu_memory_buffers_ && previous_content_id) { |
349 playback_rect.Intersect(raster_dirty_rect); | 383 // Reduce playback rect to dirty region if the content id of the staging |
350 } | 384 // buffer matches the prevous content id. |
351 DCHECK(!playback_rect.IsEmpty()) | 385 if (previous_content_id == staging_buffer->content_id) |
352 << "Why are we rastering a tile that's not dirty?"; | 386 playback_rect.Intersect(raster_dirty_rect); |
353 TileTaskWorkerPool::PlaybackToMemory( | 387 } |
354 data, raster_resource->format(), raster_resource->size(), | 388 |
355 static_cast<size_t>(stride), raster_source, raster_full_rect, | 389 if (staging_buffer->gpu_memory_buffer) { |
356 playback_rect, scale); | 390 void* data = nullptr; |
357 gpu_memory_buffer->Unmap(); | 391 bool rv = staging_buffer->gpu_memory_buffer->Map(&data); |
358 } | 392 DCHECK(rv); |
359 | 393 int stride; |
| 394 staging_buffer->gpu_memory_buffer->GetStride(&stride); |
| 395 // TileTaskWorkerPool::PlaybackToMemory only supports unsigned strides. |
| 396 DCHECK_GE(stride, 0); |
| 397 |
| 398 DCHECK(!playback_rect.IsEmpty()) |
| 399 << "Why are we rastering a tile that's not dirty?"; |
| 400 TileTaskWorkerPool::PlaybackToMemory( |
| 401 data, resource_provider_->best_texture_format(), staging_buffer->size, |
| 402 static_cast<size_t>(stride), raster_source, raster_full_rect, |
| 403 playback_rect, scale); |
| 404 staging_buffer->gpu_memory_buffer->Unmap(); |
| 405 staging_buffer->content_id = new_content_id; |
| 406 } |
| 407 } |
| 408 |
| 409 ContextProvider* context_provider = |
| 410 resource_provider_->output_surface()->worker_context_provider(); |
| 411 DCHECK(context_provider); |
| 412 |
| 413 { |
| 414 ContextProvider::ScopedContextLock scoped_context(context_provider); |
| 415 |
| 416 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL(); |
| 417 DCHECK(gl); |
| 418 |
| 419 if (!staging_buffer->texture_id) { |
| 420 gl->GenTextures(1, &staging_buffer->texture_id); |
| 421 gl->BindTexture(image_target_, staging_buffer->texture_id); |
| 422 gl->TexParameteri(image_target_, GL_TEXTURE_MIN_FILTER, GL_NEAREST); |
| 423 gl->TexParameteri(image_target_, GL_TEXTURE_MAG_FILTER, GL_NEAREST); |
| 424 gl->TexParameteri(image_target_, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); |
| 425 gl->TexParameteri(image_target_, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); |
| 426 if (staging_buffer->gpu_memory_buffer) { |
| 427 DCHECK(!staging_buffer->image_id); |
| 428 staging_buffer->image_id = gl->CreateImageCHROMIUM( |
| 429 staging_buffer->gpu_memory_buffer->AsClientBuffer(), |
| 430 staging_buffer->size.width(), staging_buffer->size.height(), |
| 431 GLInternalFormat(resource_provider_->best_texture_format())); |
| 432 gl->BindTexImage2DCHROMIUM(image_target_, staging_buffer->image_id); |
| 433 } else { |
| 434 gl->BindTexture(image_target_, staging_buffer->texture_id); |
| 435 gl->ReleaseTexImage2DCHROMIUM(image_target_, staging_buffer->image_id); |
| 436 gl->BindTexImage2DCHROMIUM(image_target_, staging_buffer->image_id); |
| 437 } |
| 438 } |
| 439 |
| 440 if (resource_provider_->use_sync_query()) { |
| 441 if (!staging_buffer->query_id) |
| 442 gl->GenQueriesEXT(1, &staging_buffer->query_id); |
| 443 |
| 444 #if defined(OS_CHROMEOS) |
| 445 // TODO(reveman): This avoids a performance problem on some ChromeOS |
| 446 // devices. This needs to be removed to support native GpuMemoryBuffer |
| 447 // implementations. crbug.com/436314 |
| 448 gl->BeginQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM, staging_buffer->query_id); |
| 449 #else |
| 450 gl->BeginQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM, |
| 451 staging_buffer->query_id); |
| 452 #endif |
| 453 } |
| 454 |
| 455 int bytes_per_row = |
| 456 (BitsPerPixel(resource_provider_->best_texture_format()) * |
| 457 resource->size().width()) / |
| 458 8; |
| 459 int chunk_size_in_rows = |
| 460 std::max(1, max_bytes_per_copy_operation_ / bytes_per_row); |
| 461 // Align chunk size to 4. Required to support compressed texture formats. |
| 462 chunk_size_in_rows = MathUtil::RoundUp(chunk_size_in_rows, 4); |
| 463 int y = 0; |
| 464 int height = resource->size().height(); |
| 465 while (y < height) { |
| 466 // Copy at most |chunk_size_in_rows|. |
| 467 int rows_to_copy = std::min(chunk_size_in_rows, height - y); |
| 468 DCHECK_GT(rows_to_copy, 0); |
| 469 |
| 470 gl->CopySubTextureCHROMIUM(GL_TEXTURE_2D, staging_buffer->texture_id, |
| 471 resource_lock->texture_id(), 0, y, 0, y, |
| 472 resource->size().width(), rows_to_copy, false, |
| 473 false, false); |
| 474 y += rows_to_copy; |
| 475 |
| 476 // Increment |bytes_scheduled_since_last_flush_| by the amount of memory |
| 477 // used for this copy operation. |
| 478 bytes_scheduled_since_last_flush_ += rows_to_copy * bytes_per_row; |
| 479 |
| 480 if (bytes_scheduled_since_last_flush_ >= max_bytes_per_copy_operation_) { |
| 481 gl->ShallowFlushCHROMIUM(); |
| 482 bytes_scheduled_since_last_flush_ = 0; |
| 483 } |
| 484 } |
| 485 |
| 486 if (resource_provider_->use_sync_query()) { |
| 487 #if defined(OS_CHROMEOS) |
| 488 gl->EndQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM); |
| 489 #else |
| 490 gl->EndQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM); |
| 491 #endif |
| 492 } |
| 493 |
| 494 // Barrier to sync worker context output to cc context. |
| 495 gl->OrderingBarrierCHROMIUM(); |
| 496 } |
| 497 |
| 498 staging_buffer->last_usage = base::TimeTicks::Now(); |
| 499 busy_buffers_.push_back(staging_buffer.Pass()); |
| 500 |
| 501 ScheduleReduceMemoryUsage(); |
| 502 } |
| 503 |
| 504 bool OneCopyTileTaskWorkerPool::OnMemoryDump( |
| 505 base::trace_event::ProcessMemoryDump* pmd) { |
360 base::AutoLock lock(lock_); | 506 base::AutoLock lock(lock_); |
361 | 507 |
362 CopySequenceNumber sequence = 0; | 508 for (const auto& buffer : buffers_) { |
363 int bytes_per_row = (BitsPerPixel(raster_resource->format()) * | 509 buffer->OnMemoryDump(pmd, resource_provider_->best_texture_format(), |
364 raster_resource->size().width()) / | 510 std::find(free_buffers_.begin(), free_buffers_.end(), |
365 8; | 511 buffer) != free_buffers_.end()); |
366 int chunk_size_in_rows = | 512 } |
367 std::max(1, max_bytes_per_copy_operation_ / bytes_per_row); | 513 |
368 // Align chunk size to 4. Required to support compressed texture formats. | 514 return true; |
369 chunk_size_in_rows = MathUtil::RoundUp(chunk_size_in_rows, 4); | 515 } |
370 int y = 0; | 516 |
371 int height = raster_resource->size().height(); | 517 scoped_ptr<OneCopyTileTaskWorkerPool::StagingBuffer> |
372 while (y < height) { | 518 OneCopyTileTaskWorkerPool::AcquireStagingBuffer(const Resource* resource, |
373 int failed_attempts = 0; | 519 uint64_t previous_content_id) { |
374 while ((pending_copy_operations_.size() + issued_copy_operation_count_) >= | 520 lock_.AssertAcquired(); |
375 kMaxCopyOperations) { | 521 |
376 // Ignore limit when shutdown is set. | 522 scoped_ptr<StagingBuffer> staging_buffer; |
377 if (shutdown_) | 523 |
| 524 ContextProvider* context_provider = |
| 525 resource_provider_->output_surface()->worker_context_provider(); |
| 526 DCHECK(context_provider); |
| 527 |
| 528 ContextProvider::ScopedContextLock scoped_context(context_provider); |
| 529 |
| 530 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL(); |
| 531 DCHECK(gl); |
| 532 |
| 533 // Check if any busy buffers have become available. |
| 534 if (resource_provider_->use_sync_query()) { |
| 535 while (!busy_buffers_.empty()) { |
| 536 if (!CheckForQueryResult(gl, busy_buffers_.front()->query_id)) |
378 break; | 537 break; |
379 | 538 |
380 ++failed_attempts; | 539 free_buffers_.push_back(busy_buffers_.take_front()); |
381 | 540 } |
382 // Schedule a check that will also wait for operations to complete | 541 } |
383 // after too many failed attempts. | 542 |
384 bool wait_if_needed = failed_attempts > kFailedAttemptsBeforeWaitIfNeeded; | 543 // Wait for number of non-free buffers to become less than the limit. |
385 | 544 while ((buffers_.size() - free_buffers_.size()) >= max_staging_buffers_) { |
386 // Schedule a check for completed copy operations if too many operations | 545 // Stop when there are no more busy buffers to wait for. |
387 // are currently in-flight. | 546 if (busy_buffers_.empty()) |
388 ScheduleCheckForCompletedCopyOperationsWithLockAcquired(wait_if_needed); | 547 break; |
389 | 548 |
390 { | 549 if (resource_provider_->use_sync_query()) { |
391 TRACE_EVENT0("cc", "WaitingForCopyOperationsToComplete"); | 550 WaitForQueryResult(gl, busy_buffers_.front()->query_id); |
392 | 551 free_buffers_.push_back(busy_buffers_.take_front()); |
393 // Wait for in-flight copy operations to drop below limit. | 552 } else { |
394 copy_operation_count_cv_.Wait(); | 553 // Fall-back to glFinish if CHROMIUM_sync_query is not available. |
395 } | 554 gl->Finish(); |
396 } | 555 while (!busy_buffers_.empty()) |
397 | 556 free_buffers_.push_back(busy_buffers_.take_front()); |
398 // There may be more work available, so wake up another worker thread. | 557 } |
399 copy_operation_count_cv_.Signal(); | 558 } |
400 | 559 |
401 // Copy at most |chunk_size_in_rows|. | 560 // Find a staging buffer that allows us to perform partial raster when |
402 int rows_to_copy = std::min(chunk_size_in_rows, height - y); | 561 // using persistent GpuMemoryBuffers. |
403 DCHECK_GT(rows_to_copy, 0); | 562 if (use_persistent_gpu_memory_buffers_ && previous_content_id) { |
404 | 563 StagingBufferDeque::iterator it = |
405 // |raster_resource_write_lock| is passed to the first copy operation as it | 564 std::find_if(free_buffers_.begin(), free_buffers_.end(), |
406 // needs to be released before we can issue a copy. | 565 [previous_content_id](const StagingBuffer* buffer) { |
407 pending_copy_operations_.push_back(make_scoped_ptr(new CopyOperation( | 566 return buffer->content_id == previous_content_id; |
408 raster_resource_write_lock.Pass(), raster_resource, output_resource, | 567 }); |
409 gfx::Rect(0, y, raster_resource->size().width(), rows_to_copy)))); | 568 if (it != free_buffers_.end()) |
410 y += rows_to_copy; | 569 staging_buffer = free_buffers_.take(it); |
411 | 570 } |
412 // Acquire a sequence number for this copy operation. | 571 |
413 sequence = next_copy_operation_sequence_++; | 572 // Find staging buffer of correct size. |
414 | 573 if (!staging_buffer) { |
415 // Increment |bytes_scheduled_since_last_flush_| by the amount of memory | 574 StagingBufferDeque::iterator it = |
416 // used for this copy operation. | 575 std::find_if(free_buffers_.begin(), free_buffers_.end(), |
417 bytes_scheduled_since_last_flush_ += rows_to_copy * bytes_per_row; | 576 [resource](const StagingBuffer* buffer) { |
418 | 577 return buffer->size == resource->size(); |
419 // Post task that will advance last flushed copy operation to |sequence| | 578 }); |
420 // when |bytes_scheduled_since_last_flush_| has reached | 579 if (it != free_buffers_.end()) |
421 // |max_bytes_per_copy_operation_|. | 580 staging_buffer = free_buffers_.take(it); |
422 if (bytes_scheduled_since_last_flush_ >= max_bytes_per_copy_operation_) { | 581 } |
423 task_runner_->PostTask( | 582 |
424 FROM_HERE, | 583 // Create new staging buffer if necessary. |
425 base::Bind(&OneCopyTileTaskWorkerPool::AdvanceLastFlushedCopyTo, | 584 if (!staging_buffer) { |
426 weak_ptr_factory_.GetWeakPtr(), sequence)); | 585 staging_buffer = make_scoped_ptr(new StagingBuffer(resource->size())); |
427 bytes_scheduled_since_last_flush_ = 0; | 586 buffers_.insert(staging_buffer.get()); |
428 } | 587 } |
429 } | 588 |
430 | 589 // Release enough free buffers to stay within the limit. |
431 return sequence; | 590 while (buffers_.size() > max_staging_buffers_) { |
432 } | 591 if (free_buffers_.empty()) |
433 | 592 break; |
434 void OneCopyTileTaskWorkerPool::AdvanceLastIssuedCopyTo( | 593 |
435 CopySequenceNumber sequence) { | 594 free_buffers_.front()->DestroyGLResources(gl); |
436 if (last_issued_copy_operation_ >= sequence) | 595 buffers_.erase(free_buffers_.front()); |
| 596 free_buffers_.take_front(); |
| 597 } |
| 598 |
| 599 return staging_buffer.Pass(); |
| 600 } |
| 601 |
| 602 base::TimeTicks OneCopyTileTaskWorkerPool::GetUsageTimeForLRUBuffer() { |
| 603 lock_.AssertAcquired(); |
| 604 |
| 605 if (!free_buffers_.empty()) |
| 606 return free_buffers_.front()->last_usage; |
| 607 |
| 608 if (!busy_buffers_.empty()) |
| 609 return busy_buffers_.front()->last_usage; |
| 610 |
| 611 return base::TimeTicks(); |
| 612 } |
| 613 |
| 614 void OneCopyTileTaskWorkerPool::ScheduleReduceMemoryUsage() { |
| 615 lock_.AssertAcquired(); |
| 616 |
| 617 if (reduce_memory_usage_pending_) |
437 return; | 618 return; |
438 | 619 |
439 IssueCopyOperations(sequence - last_issued_copy_operation_); | 620 reduce_memory_usage_pending_ = true; |
440 last_issued_copy_operation_ = sequence; | 621 |
441 } | 622 // Schedule a call to ReduceMemoryUsage at the time when the LRU buffer |
442 | 623 // should be released. |
443 void OneCopyTileTaskWorkerPool::AdvanceLastFlushedCopyTo( | 624 base::TimeTicks reduce_memory_usage_time = |
444 CopySequenceNumber sequence) { | 625 GetUsageTimeForLRUBuffer() + staging_buffer_expiration_delay_; |
445 if (last_flushed_copy_operation_ >= sequence) | 626 task_runner_->PostDelayedTask( |
| 627 FROM_HERE, reduce_memory_usage_callback_, |
| 628 reduce_memory_usage_time - base::TimeTicks::Now()); |
| 629 } |
| 630 |
| 631 void OneCopyTileTaskWorkerPool::ReduceMemoryUsage() { |
| 632 base::AutoLock lock(lock_); |
| 633 |
| 634 reduce_memory_usage_pending_ = false; |
| 635 |
| 636 if (free_buffers_.empty() && busy_buffers_.empty()) |
446 return; | 637 return; |
447 | 638 |
448 AdvanceLastIssuedCopyTo(sequence); | 639 base::TimeTicks current_time = base::TimeTicks::Now(); |
449 | 640 ReleaseBuffersNotUsedSince(current_time - staging_buffer_expiration_delay_); |
450 // Flush all issued copy operations. | 641 |
451 context_provider_->ContextGL()->ShallowFlushCHROMIUM(); | 642 if (free_buffers_.empty() && busy_buffers_.empty()) |
452 last_flushed_copy_operation_ = last_issued_copy_operation_; | 643 return; |
| 644 |
| 645 reduce_memory_usage_pending_ = true; |
| 646 |
| 647 // Schedule another call to ReduceMemoryUsage at the time when the next |
| 648 // buffer should be released. |
| 649 base::TimeTicks reduce_memory_usage_time = |
| 650 GetUsageTimeForLRUBuffer() + staging_buffer_expiration_delay_; |
| 651 task_runner_->PostDelayedTask(FROM_HERE, reduce_memory_usage_callback_, |
| 652 reduce_memory_usage_time - current_time); |
| 653 } |
| 654 |
| 655 void OneCopyTileTaskWorkerPool::ReleaseBuffersNotUsedSince( |
| 656 base::TimeTicks time) { |
| 657 lock_.AssertAcquired(); |
| 658 |
| 659 ContextProvider* context_provider = |
| 660 resource_provider_->output_surface()->worker_context_provider(); |
| 661 DCHECK(context_provider); |
| 662 |
| 663 { |
| 664 ContextProvider::ScopedContextLock scoped_context(context_provider); |
| 665 |
| 666 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL(); |
| 667 DCHECK(gl); |
| 668 |
| 669 // Note: Front buffer is guaranteed to be LRU so we can stop releasing |
| 670 // buffers as soon as we find a buffer that has been used since |time|. |
| 671 while (!free_buffers_.empty()) { |
| 672 if (free_buffers_.front()->last_usage > time) |
| 673 return; |
| 674 |
| 675 free_buffers_.front()->DestroyGLResources(gl); |
| 676 buffers_.erase(free_buffers_.front()); |
| 677 free_buffers_.take_front(); |
| 678 } |
| 679 |
| 680 while (!busy_buffers_.empty()) { |
| 681 if (busy_buffers_.front()->last_usage > time) |
| 682 return; |
| 683 |
| 684 busy_buffers_.front()->DestroyGLResources(gl); |
| 685 buffers_.erase(busy_buffers_.front()); |
| 686 busy_buffers_.take_front(); |
| 687 } |
| 688 } |
453 } | 689 } |
454 | 690 |
455 void OneCopyTileTaskWorkerPool::OnTaskSetFinished(TaskSet task_set) { | 691 void OneCopyTileTaskWorkerPool::OnTaskSetFinished(TaskSet task_set) { |
456 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::OnTaskSetFinished", "task_set", | 692 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::OnTaskSetFinished", "task_set", |
457 task_set); | 693 task_set); |
458 | 694 |
459 DCHECK(tasks_pending_[task_set]); | 695 DCHECK(tasks_pending_[task_set]); |
460 tasks_pending_[task_set] = false; | 696 tasks_pending_[task_set] = false; |
461 if (tasks_pending_.any()) { | 697 if (tasks_pending_.any()) { |
462 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", | 698 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", |
463 "state", StateAsValue()); | 699 "state", StateAsValue()); |
464 } else { | 700 } else { |
465 TRACE_EVENT_ASYNC_END0("cc", "ScheduledTasks", this); | 701 TRACE_EVENT_ASYNC_END0("cc", "ScheduledTasks", this); |
466 } | 702 } |
467 client_->DidFinishRunningTileTasks(task_set); | 703 client_->DidFinishRunningTileTasks(task_set); |
468 } | 704 } |
469 | 705 |
470 void OneCopyTileTaskWorkerPool::IssueCopyOperations(int64 count) { | |
471 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::IssueCopyOperations", "count", | |
472 count); | |
473 | |
474 CopyOperation::Deque copy_operations; | |
475 | |
476 { | |
477 base::AutoLock lock(lock_); | |
478 | |
479 for (int64 i = 0; i < count; ++i) { | |
480 DCHECK(!pending_copy_operations_.empty()); | |
481 copy_operations.push_back(pending_copy_operations_.take_front()); | |
482 } | |
483 | |
484 // Increment |issued_copy_operation_count_| to reflect the transition of | |
485 // copy operations from "pending" to "issued" state. | |
486 issued_copy_operation_count_ += copy_operations.size(); | |
487 } | |
488 | |
489 while (!copy_operations.empty()) { | |
490 scoped_ptr<CopyOperation> copy_operation = copy_operations.take_front(); | |
491 | |
492 // Remove the write lock. | |
493 copy_operation->src_write_lock.reset(); | |
494 | |
495 // Copy contents of source resource to destination resource. | |
496 resource_provider_->CopyResource(copy_operation->src->id(), | |
497 copy_operation->dst->id(), | |
498 copy_operation->rect); | |
499 } | |
500 } | |
501 | |
502 void OneCopyTileTaskWorkerPool:: | |
503 ScheduleCheckForCompletedCopyOperationsWithLockAcquired( | |
504 bool wait_if_needed) { | |
505 lock_.AssertAcquired(); | |
506 | |
507 if (check_for_completed_copy_operations_pending_) | |
508 return; | |
509 | |
510 base::TimeTicks now = base::TimeTicks::Now(); | |
511 | |
512 // Schedule a check for completed copy operations as soon as possible but | |
513 // don't allow two consecutive checks to be scheduled to run less than the | |
514 // tick rate apart. | |
515 base::TimeTicks next_check_for_completed_copy_operations_time = | |
516 std::max(last_check_for_completed_copy_operations_time_ + | |
517 base::TimeDelta::FromMilliseconds( | |
518 kCheckForCompletedCopyOperationsTickRateMs), | |
519 now); | |
520 | |
521 task_runner_->PostDelayedTask( | |
522 FROM_HERE, | |
523 base::Bind(&OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations, | |
524 weak_ptr_factory_.GetWeakPtr(), wait_if_needed), | |
525 next_check_for_completed_copy_operations_time - now); | |
526 | |
527 last_check_for_completed_copy_operations_time_ = | |
528 next_check_for_completed_copy_operations_time; | |
529 check_for_completed_copy_operations_pending_ = true; | |
530 } | |
531 | |
532 void OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations( | |
533 bool wait_if_needed) { | |
534 TRACE_EVENT1("cc", | |
535 "OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations", | |
536 "wait_if_needed", wait_if_needed); | |
537 | |
538 resource_pool_->CheckBusyResources(wait_if_needed); | |
539 | |
540 { | |
541 base::AutoLock lock(lock_); | |
542 | |
543 DCHECK(check_for_completed_copy_operations_pending_); | |
544 check_for_completed_copy_operations_pending_ = false; | |
545 | |
546 // The number of busy resources in the pool reflects the number of issued | |
547 // copy operations that have not yet completed. | |
548 issued_copy_operation_count_ = resource_pool_->busy_resource_count(); | |
549 | |
550 // There may be work blocked on too many in-flight copy operations, so wake | |
551 // up a worker thread. | |
552 copy_operation_count_cv_.Signal(); | |
553 } | |
554 } | |
555 | |
556 scoped_refptr<base::trace_event::ConvertableToTraceFormat> | 706 scoped_refptr<base::trace_event::ConvertableToTraceFormat> |
557 OneCopyTileTaskWorkerPool::StateAsValue() const { | 707 OneCopyTileTaskWorkerPool::StateAsValue() const { |
558 scoped_refptr<base::trace_event::TracedValue> state = | 708 scoped_refptr<base::trace_event::TracedValue> state = |
559 new base::trace_event::TracedValue(); | 709 new base::trace_event::TracedValue(); |
560 | 710 |
561 state->BeginArray("tasks_pending"); | 711 state->BeginArray("tasks_pending"); |
562 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) | 712 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) |
563 state->AppendBoolean(tasks_pending_[task_set]); | 713 state->AppendBoolean(tasks_pending_[task_set]); |
564 state->EndArray(); | 714 state->EndArray(); |
565 state->BeginDictionary("staging_state"); | 715 state->BeginDictionary("staging_state"); |
566 StagingStateAsValueInto(state.get()); | 716 StagingStateAsValueInto(state.get()); |
567 state->EndDictionary(); | 717 state->EndDictionary(); |
568 | 718 |
569 return state; | 719 return state; |
570 } | 720 } |
571 | 721 |
572 void OneCopyTileTaskWorkerPool::StagingStateAsValueInto( | 722 void OneCopyTileTaskWorkerPool::StagingStateAsValueInto( |
573 base::trace_event::TracedValue* staging_state) const { | 723 base::trace_event::TracedValue* staging_state) const { |
574 staging_state->SetInteger( | 724 base::AutoLock lock(lock_); |
575 "staging_resource_count", | 725 |
576 static_cast<int>(resource_pool_->total_resource_count())); | 726 staging_state->SetInteger("staging_buffer_count", |
577 staging_state->SetInteger( | 727 static_cast<int>(buffers_.size())); |
578 "bytes_used_for_staging_resources", | 728 staging_state->SetInteger("busy_count", |
579 static_cast<int>(resource_pool_->total_memory_usage_bytes())); | 729 static_cast<int>(busy_buffers_.size())); |
580 staging_state->SetInteger( | 730 staging_state->SetInteger("free_count", |
581 "pending_copy_count", | 731 static_cast<int>(free_buffers_.size())); |
582 static_cast<int>(resource_pool_->total_resource_count() - | |
583 resource_pool_->acquired_resource_count())); | |
584 staging_state->SetInteger( | |
585 "bytes_pending_copy", | |
586 static_cast<int>(resource_pool_->total_memory_usage_bytes() - | |
587 resource_pool_->acquired_memory_usage_bytes())); | |
588 } | 732 } |
589 | 733 |
590 } // namespace cc | 734 } // namespace cc |
OLD | NEW |