OLD | NEW |
---|---|
(Empty) | |
1 // Copyright 2016 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "cc/raster/staging_buffer_pool.h" | |
prashant.n
2016/04/08 16:58:54
Should this be in cc/resources OR
cc/raster is oka
| |
6 | |
7 #include "base/strings/stringprintf.h" | |
8 #include "base/thread_task_runner_handle.h" | |
9 #include "base/trace_event/memory_dump_manager.h" | |
10 #include "cc/base/container_util.h" | |
11 #include "cc/debug/traced_value.h" | |
12 #include "cc/resources/scoped_resource.h" | |
13 #include "gpu/command_buffer/client/gles2_interface.h" | |
14 | |
15 namespace cc { | |
16 namespace { | |
17 | |
18 // Delay between checking for query result to be available. | |
19 const int kCheckForQueryResultAvailableTickRateMs = 1; | |
20 | |
21 // Number of attempts to allow before we perform a check that will wait for | |
22 // query to complete. | |
23 const int kMaxCheckForQueryResultAvailableAttempts = 256; | |
24 | |
25 // Delay before a staging buffer might be released. | |
26 const int kStagingBufferExpirationDelayMs = 1000; | |
27 | |
28 bool CheckForQueryResult(gpu::gles2::GLES2Interface* gl, unsigned query_id) { | |
29 unsigned complete = 1; | |
30 gl->GetQueryObjectuivEXT(query_id, GL_QUERY_RESULT_AVAILABLE_EXT, &complete); | |
31 return !!complete; | |
32 } | |
33 | |
34 void WaitForQueryResult(gpu::gles2::GLES2Interface* gl, unsigned query_id) { | |
35 TRACE_EVENT0("cc", "WaitForQueryResult"); | |
36 | |
37 int attempts_left = kMaxCheckForQueryResultAvailableAttempts; | |
38 while (attempts_left--) { | |
39 if (CheckForQueryResult(gl, query_id)) | |
40 break; | |
41 | |
42 // We have to flush the context to be guaranteed that a query result will | |
43 // be available in a finite amount of time. | |
44 gl->ShallowFlushCHROMIUM(); | |
45 | |
46 base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds( | |
47 kCheckForQueryResultAvailableTickRateMs)); | |
48 } | |
49 | |
50 unsigned result = 0; | |
51 gl->GetQueryObjectuivEXT(query_id, GL_QUERY_RESULT_EXT, &result); | |
52 } | |
53 | |
54 } // namespace | |
55 | |
56 StagingBuffer::StagingBuffer(const gfx::Size& size, ResourceFormat format) | |
57 : size(size), | |
58 format(format), | |
59 texture_id(0), | |
60 image_id(0), | |
61 query_id(0), | |
62 content_id(0) {} | |
63 | |
64 StagingBuffer::~StagingBuffer() { | |
65 DCHECK_EQ(texture_id, 0u); | |
66 DCHECK_EQ(image_id, 0u); | |
67 DCHECK_EQ(query_id, 0u); | |
68 } | |
69 | |
70 void StagingBuffer::DestroyGLResources(gpu::gles2::GLES2Interface* gl) { | |
71 if (query_id) { | |
72 gl->DeleteQueriesEXT(1, &query_id); | |
73 query_id = 0; | |
74 } | |
75 if (image_id) { | |
76 gl->DestroyImageCHROMIUM(image_id); | |
77 image_id = 0; | |
78 } | |
79 if (texture_id) { | |
80 gl->DeleteTextures(1, &texture_id); | |
81 texture_id = 0; | |
82 } | |
83 } | |
84 | |
85 void StagingBuffer::OnMemoryDump(base::trace_event::ProcessMemoryDump* pmd, | |
86 ResourceFormat format, | |
87 bool in_free_list) const { | |
88 if (!gpu_memory_buffer) | |
89 return; | |
90 | |
91 gfx::GpuMemoryBufferId buffer_id = gpu_memory_buffer->GetId(); | |
92 std::string buffer_dump_name = | |
93 base::StringPrintf("cc/one_copy/staging_memory/buffer_%d", buffer_id.id); | |
94 base::trace_event::MemoryAllocatorDump* buffer_dump = | |
95 pmd->CreateAllocatorDump(buffer_dump_name); | |
96 | |
97 uint64_t buffer_size_in_bytes = | |
98 ResourceUtil::UncheckedSizeInBytes<uint64_t>(size, format); | |
99 buffer_dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize, | |
100 base::trace_event::MemoryAllocatorDump::kUnitsBytes, | |
101 buffer_size_in_bytes); | |
102 buffer_dump->AddScalar("free_size", | |
103 base::trace_event::MemoryAllocatorDump::kUnitsBytes, | |
104 in_free_list ? buffer_size_in_bytes : 0); | |
105 | |
106 // Emit an ownership edge towards a global allocator dump node. | |
107 const uint64_t tracing_process_id = | |
108 base::trace_event::MemoryDumpManager::GetInstance() | |
109 ->GetTracingProcessId(); | |
110 base::trace_event::MemoryAllocatorDumpGuid shared_buffer_guid = | |
111 gfx::GetGpuMemoryBufferGUIDForTracing(tracing_process_id, buffer_id); | |
112 pmd->CreateSharedGlobalAllocatorDump(shared_buffer_guid); | |
113 | |
114 // By creating an edge with a higher |importance| (w.r.t. browser-side dumps) | |
115 // the tracing UI will account the effective size of the buffer to the child. | |
116 const int kImportance = 2; | |
117 pmd->AddOwnershipEdge(buffer_dump->guid(), shared_buffer_guid, kImportance); | |
118 } | |
119 | |
120 // static | |
121 scoped_ptr<StagingBufferPool> StagingBufferPool::Create( | |
122 base::SequencedTaskRunner* task_runner, | |
123 ResourceProvider* resource_provider, | |
124 bool use_partial_raster, | |
125 int max_staging_buffer_usage_in_bytes) { | |
126 return make_scoped_ptr<StagingBufferPool>( | |
127 new StagingBufferPool(task_runner, resource_provider, use_partial_raster, | |
128 max_staging_buffer_usage_in_bytes)); | |
129 } | |
130 | |
131 StagingBufferPool::StagingBufferPool(base::SequencedTaskRunner* task_runner, | |
132 ResourceProvider* resource_provider, | |
133 bool use_partial_raster, | |
134 int max_staging_buffer_usage_in_bytes) | |
135 : task_runner_(task_runner), | |
136 resource_provider_(resource_provider), | |
137 use_partial_raster_(use_partial_raster), | |
138 max_staging_buffer_usage_in_bytes_(max_staging_buffer_usage_in_bytes), | |
139 staging_buffer_usage_in_bytes_(0), | |
140 free_staging_buffer_usage_in_bytes_(0), | |
141 staging_buffer_expiration_delay_( | |
142 base::TimeDelta::FromMilliseconds(kStagingBufferExpirationDelayMs)), | |
143 reduce_memory_usage_pending_(false), | |
144 weak_ptr_factory_(this) { | |
145 base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider( | |
146 this, "cc::StagingBufferPool", base::ThreadTaskRunnerHandle::Get()); | |
147 reduce_memory_usage_callback_ = base::Bind( | |
148 &StagingBufferPool::ReduceMemoryUsage, weak_ptr_factory_.GetWeakPtr()); | |
149 } | |
150 | |
151 StagingBufferPool::~StagingBufferPool() { | |
152 base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider( | |
153 this); | |
154 } | |
155 | |
156 void StagingBufferPool::Shutdown() { | |
157 base::AutoLock lock(lock_); | |
158 if (buffers_.empty()) | |
159 return; | |
160 | |
161 ReleaseBuffersNotUsedSince(base::TimeTicks() + base::TimeDelta::Max()); | |
162 DCHECK_EQ(staging_buffer_usage_in_bytes_, 0); | |
163 DCHECK_EQ(free_staging_buffer_usage_in_bytes_, 0); | |
164 } | |
165 | |
166 void StagingBufferPool::ReleaseStagingBuffer( | |
167 scoped_ptr<StagingBuffer> staging_buffer) { | |
168 base::AutoLock lock(lock_); | |
169 | |
170 staging_buffer->last_usage = base::TimeTicks::Now(); | |
171 busy_buffers_.push_back(std::move(staging_buffer)); | |
172 | |
173 ScheduleReduceMemoryUsage(); | |
174 } | |
175 | |
176 bool StagingBufferPool::OnMemoryDump( | |
177 const base::trace_event::MemoryDumpArgs& args, | |
178 base::trace_event::ProcessMemoryDump* pmd) { | |
179 base::AutoLock lock(lock_); | |
180 | |
181 for (const auto* buffer : buffers_) { | |
182 auto in_free_buffers = | |
183 std::find_if(free_buffers_.begin(), free_buffers_.end(), | |
184 [buffer](const scoped_ptr<StagingBuffer>& b) { | |
185 return b.get() == buffer; | |
186 }); | |
187 buffer->OnMemoryDump(pmd, buffer->format, | |
188 in_free_buffers != free_buffers_.end()); | |
189 } | |
190 | |
191 return true; | |
192 } | |
193 | |
194 void StagingBufferPool::AddStagingBuffer(const StagingBuffer* staging_buffer, | |
195 ResourceFormat format) { | |
196 lock_.AssertAcquired(); | |
197 | |
198 DCHECK(buffers_.find(staging_buffer) == buffers_.end()); | |
199 buffers_.insert(staging_buffer); | |
200 int buffer_usage_in_bytes = | |
201 ResourceUtil::UncheckedSizeInBytes<int>(staging_buffer->size, format); | |
202 staging_buffer_usage_in_bytes_ += buffer_usage_in_bytes; | |
203 } | |
204 | |
205 void StagingBufferPool::RemoveStagingBuffer( | |
206 const StagingBuffer* staging_buffer) { | |
207 lock_.AssertAcquired(); | |
208 | |
209 DCHECK(buffers_.find(staging_buffer) != buffers_.end()); | |
210 buffers_.erase(staging_buffer); | |
211 int buffer_usage_in_bytes = ResourceUtil::UncheckedSizeInBytes<int>( | |
212 staging_buffer->size, staging_buffer->format); | |
213 DCHECK_GE(staging_buffer_usage_in_bytes_, buffer_usage_in_bytes); | |
214 staging_buffer_usage_in_bytes_ -= buffer_usage_in_bytes; | |
215 } | |
216 | |
217 void StagingBufferPool::MarkStagingBufferAsFree( | |
218 const StagingBuffer* staging_buffer) { | |
219 lock_.AssertAcquired(); | |
220 | |
221 int buffer_usage_in_bytes = ResourceUtil::UncheckedSizeInBytes<int>( | |
222 staging_buffer->size, staging_buffer->format); | |
223 free_staging_buffer_usage_in_bytes_ += buffer_usage_in_bytes; | |
224 } | |
225 | |
226 void StagingBufferPool::MarkStagingBufferAsBusy( | |
227 const StagingBuffer* staging_buffer) { | |
228 lock_.AssertAcquired(); | |
229 | |
230 int buffer_usage_in_bytes = ResourceUtil::UncheckedSizeInBytes<int>( | |
231 staging_buffer->size, staging_buffer->format); | |
232 DCHECK_GE(free_staging_buffer_usage_in_bytes_, buffer_usage_in_bytes); | |
233 free_staging_buffer_usage_in_bytes_ -= buffer_usage_in_bytes; | |
234 } | |
235 | |
236 scoped_ptr<StagingBuffer> StagingBufferPool::AcquireStagingBuffer( | |
237 const Resource* resource, | |
238 uint64_t previous_content_id) { | |
239 base::AutoLock lock(lock_); | |
240 | |
241 scoped_ptr<StagingBuffer> staging_buffer; | |
242 | |
243 ContextProvider* context_provider = | |
244 resource_provider_->output_surface()->worker_context_provider(); | |
245 DCHECK(context_provider); | |
246 | |
247 ContextProvider::ScopedContextLock scoped_context(context_provider); | |
248 | |
249 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL(); | |
250 DCHECK(gl); | |
251 | |
252 // Check if any busy buffers have become available. | |
253 if (resource_provider_->use_sync_query()) { | |
254 while (!busy_buffers_.empty()) { | |
255 if (!CheckForQueryResult(gl, busy_buffers_.front()->query_id)) | |
256 break; | |
257 | |
258 MarkStagingBufferAsFree(busy_buffers_.front().get()); | |
259 free_buffers_.push_back(PopFront(&busy_buffers_)); | |
260 } | |
261 } | |
262 | |
263 // Wait for memory usage of non-free buffers to become less than the limit. | |
264 while ( | |
265 (staging_buffer_usage_in_bytes_ - free_staging_buffer_usage_in_bytes_) >= | |
266 max_staging_buffer_usage_in_bytes_) { | |
267 // Stop when there are no more busy buffers to wait for. | |
268 if (busy_buffers_.empty()) | |
269 break; | |
270 | |
271 if (resource_provider_->use_sync_query()) { | |
272 WaitForQueryResult(gl, busy_buffers_.front()->query_id); | |
273 MarkStagingBufferAsFree(busy_buffers_.front().get()); | |
274 free_buffers_.push_back(PopFront(&busy_buffers_)); | |
275 } else { | |
276 // Fall-back to glFinish if CHROMIUM_sync_query is not available. | |
277 gl->Finish(); | |
278 while (!busy_buffers_.empty()) { | |
279 MarkStagingBufferAsFree(busy_buffers_.front().get()); | |
280 free_buffers_.push_back(PopFront(&busy_buffers_)); | |
281 } | |
282 } | |
283 } | |
284 | |
285 // Find a staging buffer that allows us to perform partial raster when | |
286 // using persistent GpuMemoryBuffers. | |
287 if (use_partial_raster_ && previous_content_id) { | |
288 StagingBufferDeque::iterator it = std::find_if( | |
289 free_buffers_.begin(), free_buffers_.end(), | |
290 [previous_content_id](const scoped_ptr<StagingBuffer>& buffer) { | |
291 return buffer->content_id == previous_content_id; | |
292 }); | |
293 if (it != free_buffers_.end()) { | |
294 staging_buffer = std::move(*it); | |
295 free_buffers_.erase(it); | |
296 MarkStagingBufferAsBusy(staging_buffer.get()); | |
297 } | |
298 } | |
299 | |
300 // Find staging buffer of correct size and format. | |
301 if (!staging_buffer) { | |
302 StagingBufferDeque::iterator it = | |
303 std::find_if(free_buffers_.begin(), free_buffers_.end(), | |
304 [resource](const scoped_ptr<StagingBuffer>& buffer) { | |
305 return buffer->size == resource->size() && | |
306 buffer->format == resource->format(); | |
307 }); | |
308 if (it != free_buffers_.end()) { | |
309 staging_buffer = std::move(*it); | |
310 free_buffers_.erase(it); | |
311 MarkStagingBufferAsBusy(staging_buffer.get()); | |
312 } | |
313 } | |
314 | |
315 // Create new staging buffer if necessary. | |
316 if (!staging_buffer) { | |
317 staging_buffer = make_scoped_ptr( | |
318 new StagingBuffer(resource->size(), resource->format())); | |
319 AddStagingBuffer(staging_buffer.get(), resource->format()); | |
320 } | |
321 | |
322 // Release enough free buffers to stay within the limit. | |
323 while (staging_buffer_usage_in_bytes_ > max_staging_buffer_usage_in_bytes_) { | |
324 if (free_buffers_.empty()) | |
325 break; | |
326 | |
327 free_buffers_.front()->DestroyGLResources(gl); | |
328 MarkStagingBufferAsBusy(free_buffers_.front().get()); | |
329 RemoveStagingBuffer(free_buffers_.front().get()); | |
330 free_buffers_.pop_front(); | |
331 } | |
332 | |
333 return staging_buffer; | |
334 } | |
335 | |
336 base::TimeTicks StagingBufferPool::GetUsageTimeForLRUBuffer() { | |
337 lock_.AssertAcquired(); | |
338 | |
339 if (!free_buffers_.empty()) | |
340 return free_buffers_.front()->last_usage; | |
341 | |
342 if (!busy_buffers_.empty()) | |
343 return busy_buffers_.front()->last_usage; | |
344 | |
345 return base::TimeTicks(); | |
346 } | |
347 | |
348 void StagingBufferPool::ScheduleReduceMemoryUsage() { | |
349 lock_.AssertAcquired(); | |
350 | |
351 if (reduce_memory_usage_pending_) | |
352 return; | |
353 | |
354 reduce_memory_usage_pending_ = true; | |
355 | |
356 // Schedule a call to ReduceMemoryUsage at the time when the LRU buffer | |
357 // should be released. | |
358 base::TimeTicks reduce_memory_usage_time = | |
359 GetUsageTimeForLRUBuffer() + staging_buffer_expiration_delay_; | |
360 task_runner_->PostDelayedTask( | |
361 FROM_HERE, reduce_memory_usage_callback_, | |
362 reduce_memory_usage_time - base::TimeTicks::Now()); | |
363 } | |
364 | |
365 void StagingBufferPool::ReduceMemoryUsage() { | |
366 base::AutoLock lock(lock_); | |
367 | |
368 reduce_memory_usage_pending_ = false; | |
369 | |
370 if (free_buffers_.empty() && busy_buffers_.empty()) | |
371 return; | |
372 | |
373 base::TimeTicks current_time = base::TimeTicks::Now(); | |
374 ReleaseBuffersNotUsedSince(current_time - staging_buffer_expiration_delay_); | |
375 | |
376 if (free_buffers_.empty() && busy_buffers_.empty()) | |
377 return; | |
378 | |
379 reduce_memory_usage_pending_ = true; | |
380 | |
381 // Schedule another call to ReduceMemoryUsage at the time when the next | |
382 // buffer should be released. | |
383 base::TimeTicks reduce_memory_usage_time = | |
384 GetUsageTimeForLRUBuffer() + staging_buffer_expiration_delay_; | |
385 task_runner_->PostDelayedTask(FROM_HERE, reduce_memory_usage_callback_, | |
386 reduce_memory_usage_time - current_time); | |
387 } | |
388 | |
389 void StagingBufferPool::ReleaseBuffersNotUsedSince(base::TimeTicks time) { | |
390 lock_.AssertAcquired(); | |
391 | |
392 ContextProvider* context_provider = | |
393 resource_provider_->output_surface()->worker_context_provider(); | |
394 DCHECK(context_provider); | |
395 | |
396 { | |
397 ContextProvider::ScopedContextLock scoped_context(context_provider); | |
398 | |
399 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL(); | |
400 DCHECK(gl); | |
401 | |
402 // Note: Front buffer is guaranteed to be LRU so we can stop releasing | |
403 // buffers as soon as we find a buffer that has been used since |time|. | |
404 while (!free_buffers_.empty()) { | |
405 if (free_buffers_.front()->last_usage > time) | |
406 return; | |
407 | |
408 free_buffers_.front()->DestroyGLResources(gl); | |
409 MarkStagingBufferAsBusy(free_buffers_.front().get()); | |
410 RemoveStagingBuffer(free_buffers_.front().get()); | |
411 free_buffers_.pop_front(); | |
412 } | |
413 | |
414 while (!busy_buffers_.empty()) { | |
415 if (busy_buffers_.front()->last_usage > time) | |
416 return; | |
417 | |
418 busy_buffers_.front()->DestroyGLResources(gl); | |
419 RemoveStagingBuffer(busy_buffers_.front().get()); | |
420 busy_buffers_.pop_front(); | |
421 } | |
422 } | |
423 } | |
424 | |
425 } // namespace cc | |
OLD | NEW |