Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2016 The Chromium Authors. All rights reserved. | 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "cc/raster/staging_buffer_pool.h" | 5 #include "cc/raster/staging_buffer_pool.h" |
| 6 | 6 |
| 7 #include <memory> | 7 #include <memory> |
| 8 | 8 |
| 9 #include "base/memory/memory_coordinator_client_registry.h" | 9 #include "base/memory/memory_coordinator_client_registry.h" |
| 10 #include "base/memory/ptr_util.h" | 10 #include "base/memory/ptr_util.h" |
| 11 #include "base/strings/stringprintf.h" | 11 #include "base/strings/stringprintf.h" |
| 12 #include "base/threading/thread_task_runner_handle.h" | 12 #include "base/threading/thread_task_runner_handle.h" |
| 13 #include "base/trace_event/memory_dump_manager.h" | 13 #include "base/trace_event/memory_dump_manager.h" |
| 14 #include "cc/base/container_util.h" | 14 #include "cc/base/container_util.h" |
| 15 #include "cc/debug/traced_value.h" | 15 #include "cc/debug/traced_value.h" |
| 16 #include "cc/resources/scoped_resource.h" | 16 #include "cc/resources/scoped_resource.h" |
| 17 #include "gpu/command_buffer/client/gles2_interface.h" | 17 #include "gpu/command_buffer/client/gles2_interface.h" |
| 18 #include "ui/gfx/gpu_fence.h" | |
| 18 #include "ui/gfx/gpu_memory_buffer_tracing.h" | 19 #include "ui/gfx/gpu_memory_buffer_tracing.h" |
| 19 | 20 |
| 20 namespace cc { | 21 namespace cc { |
| 21 namespace { | 22 namespace { |
| 22 | 23 |
| 23 // Delay between checking for query result to be available. | 24 // Maximum amount of time to wait for fence before we call glFinish(). |
| 24 const int kCheckForQueryResultAvailableTickRateMs = 1; | 25 const int kWaitForFenceMaxTimeMs = 256; |
| 25 | |
| 26 // Number of attempts to allow before we perform a check that will wait for | |
| 27 // query to complete. | |
| 28 const int kMaxCheckForQueryResultAvailableAttempts = 256; | |
| 29 | 26 |
| 30 // Delay before a staging buffer might be released. | 27 // Delay before a staging buffer might be released. |
| 31 const int kStagingBufferExpirationDelayMs = 1000; | 28 const int kStagingBufferExpirationDelayMs = 1000; |
| 32 | 29 |
| 33 bool CheckForQueryResult(gpu::gles2::GLES2Interface* gl, unsigned query_id) { | 30 bool WaitForGpuFence(gfx::GpuFence* gpu_fence) { |
| 34 unsigned complete = 1; | 31 TRACE_EVENT0("cc", "WaitForGpuFence"); |
| 35 gl->GetQueryObjectuivEXT(query_id, GL_QUERY_RESULT_AVAILABLE_EXT, &complete); | |
| 36 return !!complete; | |
| 37 } | |
| 38 | 32 |
| 39 void WaitForQueryResult(gpu::gles2::GLES2Interface* gl, unsigned query_id) { | 33 base::TimeTicks now = base::TimeTicks::Now(); |
| 40 TRACE_EVENT0("cc", "WaitForQueryResult"); | 34 base::TimeTicks end = |
| 41 | 35 now + base::TimeDelta::FromMilliseconds(kWaitForFenceMaxTimeMs); |
| 42 int attempts_left = kMaxCheckForQueryResultAvailableAttempts; | 36 while (now < end) { |
| 43 while (attempts_left--) { | 37 if (gpu_fence->Wait(end - now)) |
|
ericrk
2017/01/10 03:50:31
nit: I understand that things like this sometimes
reveman
2017/01/10 19:37:55
That was my initial approach but I decided it was
| |
| 44 if (CheckForQueryResult(gl, query_id)) | 38 return true; |
| 45 break; | 39 now = base::TimeTicks::Now(); |
| 46 | |
| 47 // We have to flush the context to be guaranteed that a query result will | |
| 48 // be available in a finite amount of time. | |
| 49 gl->ShallowFlushCHROMIUM(); | |
| 50 | |
| 51 base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds( | |
| 52 kCheckForQueryResultAvailableTickRateMs)); | |
| 53 } | 40 } |
| 54 | 41 return false; |
| 55 unsigned result = 0; | |
| 56 gl->GetQueryObjectuivEXT(query_id, GL_QUERY_RESULT_EXT, &result); | |
| 57 } | 42 } |
| 58 | 43 |
| 59 } // namespace | 44 } // namespace |
| 60 | 45 |
| 61 StagingBuffer::StagingBuffer(const gfx::Size& size, ResourceFormat format) | 46 StagingBuffer::StagingBuffer(const gfx::Size& size, ResourceFormat format) |
| 62 : size(size), | 47 : size(size), format(format), image_id(0), fence_id(0), content_id(0) {} |
| 63 format(format), | |
| 64 texture_id(0), | |
| 65 image_id(0), | |
| 66 query_id(0), | |
| 67 content_id(0) {} | |
| 68 | 48 |
| 69 StagingBuffer::~StagingBuffer() { | 49 StagingBuffer::~StagingBuffer() { |
| 70 DCHECK_EQ(texture_id, 0u); | |
| 71 DCHECK_EQ(image_id, 0u); | 50 DCHECK_EQ(image_id, 0u); |
| 72 DCHECK_EQ(query_id, 0u); | 51 DCHECK_EQ(fence_id, 0u); |
| 73 } | 52 } |
| 74 | 53 |
| 75 void StagingBuffer::DestroyGLResources(gpu::gles2::GLES2Interface* gl) { | 54 void StagingBuffer::DestroyGLResources(gpu::gles2::GLES2Interface* gl) { |
| 76 if (query_id) { | 55 if (fence_id) { |
| 77 gl->DeleteQueriesEXT(1, &query_id); | 56 gl->DestroyFenceCHROMIUM(fence_id); |
| 78 query_id = 0; | 57 fence_id = 0; |
| 79 } | 58 } |
| 80 if (image_id) { | 59 if (image_id) { |
| 81 gl->DestroyImageCHROMIUM(image_id); | 60 gl->DestroyImageCHROMIUM(image_id); |
| 82 image_id = 0; | 61 image_id = 0; |
| 83 } | 62 } |
| 84 if (texture_id) { | |
| 85 gl->DeleteTextures(1, &texture_id); | |
| 86 texture_id = 0; | |
| 87 } | |
| 88 } | 63 } |
| 89 | 64 |
| 90 void StagingBuffer::OnMemoryDump(base::trace_event::ProcessMemoryDump* pmd, | 65 void StagingBuffer::OnMemoryDump(base::trace_event::ProcessMemoryDump* pmd, |
| 91 ResourceFormat format, | 66 ResourceFormat format, |
| 92 bool in_free_list) const { | 67 bool in_free_list) const { |
| 93 if (!gpu_memory_buffer) | 68 if (!gpu_memory_buffer) |
| 94 return; | 69 return; |
| 95 | 70 |
| 96 gfx::GpuMemoryBufferId buffer_id = gpu_memory_buffer->GetId(); | 71 gfx::GpuMemoryBufferId buffer_id = gpu_memory_buffer->GetId(); |
| 97 std::string buffer_dump_name = | 72 std::string buffer_dump_name = |
| (...skipping 144 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 242 free_staging_buffer_usage_in_bytes_ -= buffer_usage_in_bytes; | 217 free_staging_buffer_usage_in_bytes_ -= buffer_usage_in_bytes; |
| 243 } | 218 } |
| 244 | 219 |
| 245 std::unique_ptr<StagingBuffer> StagingBufferPool::AcquireStagingBuffer( | 220 std::unique_ptr<StagingBuffer> StagingBufferPool::AcquireStagingBuffer( |
| 246 const Resource* resource, | 221 const Resource* resource, |
| 247 uint64_t previous_content_id) { | 222 uint64_t previous_content_id) { |
| 248 base::AutoLock lock(lock_); | 223 base::AutoLock lock(lock_); |
| 249 | 224 |
| 250 std::unique_ptr<StagingBuffer> staging_buffer; | 225 std::unique_ptr<StagingBuffer> staging_buffer; |
| 251 | 226 |
| 252 ContextProvider::ScopedContextLock scoped_context(worker_context_provider_); | 227 // Check if any busy buffers have become available. |
| 228 while (!busy_buffers_.empty()) { | |
| 229 if (!busy_buffers_.front()->gpu_fence->IsSignaled()) | |
| 230 break; | |
| 253 | 231 |
| 254 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL(); | 232 busy_buffers_.front()->gpu_fence->Reset(); |
| 255 DCHECK(gl); | 233 MarkStagingBufferAsFree(busy_buffers_.front().get()); |
| 234 free_buffers_.push_back(PopFront(&busy_buffers_)); | |
| 235 } | |
| 256 | 236 |
| 257 // Check if any busy buffers have become available. | 237 bool flushed = false; |
| 258 if (resource_provider_->use_sync_query()) { | |
| 259 while (!busy_buffers_.empty()) { | |
| 260 if (!CheckForQueryResult(gl, busy_buffers_.front()->query_id)) | |
| 261 break; | |
| 262 | |
| 263 MarkStagingBufferAsFree(busy_buffers_.front().get()); | |
| 264 free_buffers_.push_back(PopFront(&busy_buffers_)); | |
| 265 } | |
| 266 } | |
| 267 | 238 |
| 268 // Wait for memory usage of non-free buffers to become less than the limit. | 239 // Wait for memory usage of non-free buffers to become less than the limit. |
| 269 while ( | 240 while ( |
| 270 (staging_buffer_usage_in_bytes_ - free_staging_buffer_usage_in_bytes_) >= | 241 (staging_buffer_usage_in_bytes_ - free_staging_buffer_usage_in_bytes_) >= |
| 271 max_staging_buffer_usage_in_bytes_) { | 242 max_staging_buffer_usage_in_bytes_) { |
| 272 // Stop when there are no more busy buffers to wait for. | 243 // Stop when there are no more busy buffers to wait for. |
| 273 if (busy_buffers_.empty()) | 244 if (busy_buffers_.empty()) |
| 274 break; | 245 break; |
| 275 | 246 |
| 276 if (resource_provider_->use_sync_query()) { | 247 if (!flushed) { |
| 277 WaitForQueryResult(gl, busy_buffers_.front()->query_id); | 248 ContextProvider::ScopedContextLock scoped_context( |
| 249 worker_context_provider_); | |
| 250 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL(); | |
| 251 DCHECK(gl); | |
| 252 | |
| 253 // We have to flush the context to be guaranteed that fence will signel in | |
| 254 // a finite amount of time. | |
| 255 gl->ShallowFlushCHROMIUM(); | |
| 256 flushed = true; | |
| 257 } | |
| 258 | |
| 259 if (WaitForGpuFence(busy_buffers_.front()->gpu_fence.get())) { | |
| 260 busy_buffers_.front()->gpu_fence->Reset(); | |
| 278 MarkStagingBufferAsFree(busy_buffers_.front().get()); | 261 MarkStagingBufferAsFree(busy_buffers_.front().get()); |
| 279 free_buffers_.push_back(PopFront(&busy_buffers_)); | 262 free_buffers_.push_back(PopFront(&busy_buffers_)); |
| 280 } else { | 263 } else { |
| 281 // Fall-back to glFinish if CHROMIUM_sync_query is not available. | 264 ContextProvider::ScopedContextLock scoped_context( |
| 265 worker_context_provider_); | |
| 266 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL(); | |
| 267 DCHECK(gl); | |
| 268 | |
| 269 // glFinish() should result in the fence signaling or the context is lost | |
| 270 // and fence will never signal. | |
| 282 gl->Finish(); | 271 gl->Finish(); |
| 283 while (!busy_buffers_.empty()) { | 272 while (!busy_buffers_.empty()) { |
| 273 busy_buffers_.front()->gpu_fence->Reset(); | |
| 284 MarkStagingBufferAsFree(busy_buffers_.front().get()); | 274 MarkStagingBufferAsFree(busy_buffers_.front().get()); |
| 285 free_buffers_.push_back(PopFront(&busy_buffers_)); | 275 free_buffers_.push_back(PopFront(&busy_buffers_)); |
| 286 } | 276 } |
| 287 } | 277 } |
| 288 } | 278 } |
| 289 | 279 |
| 290 // Find a staging buffer that allows us to perform partial raster when | 280 // Find a staging buffer that allows us to perform partial raster when |
| 291 // using persistent GpuMemoryBuffers. | 281 // using persistent GpuMemoryBuffers. |
| 292 if (use_partial_raster_ && previous_content_id) { | 282 if (use_partial_raster_ && previous_content_id) { |
| 293 StagingBufferDeque::iterator it = std::find_if( | 283 StagingBufferDeque::iterator it = std::find_if( |
| (...skipping 28 matching lines...) Expand all Loading... | |
| 322 staging_buffer = | 312 staging_buffer = |
| 323 base::MakeUnique<StagingBuffer>(resource->size(), resource->format()); | 313 base::MakeUnique<StagingBuffer>(resource->size(), resource->format()); |
| 324 AddStagingBuffer(staging_buffer.get(), resource->format()); | 314 AddStagingBuffer(staging_buffer.get(), resource->format()); |
| 325 } | 315 } |
| 326 | 316 |
| 327 // Release enough free buffers to stay within the limit. | 317 // Release enough free buffers to stay within the limit. |
| 328 while (staging_buffer_usage_in_bytes_ > max_staging_buffer_usage_in_bytes_) { | 318 while (staging_buffer_usage_in_bytes_ > max_staging_buffer_usage_in_bytes_) { |
| 329 if (free_buffers_.empty()) | 319 if (free_buffers_.empty()) |
| 330 break; | 320 break; |
| 331 | 321 |
| 332 free_buffers_.front()->DestroyGLResources(gl); | 322 { |
| 323 ContextProvider::ScopedContextLock scoped_context( | |
|
ericrk
2017/01/10 03:50:31
not sure what the overhead of locking/re-locking,
reveman
2017/01/10 19:37:55
Makes sense. I'll address this as I rebase this pa
| |
| 324 worker_context_provider_); | |
| 325 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL(); | |
| 326 DCHECK(gl); | |
| 327 | |
| 328 free_buffers_.front()->DestroyGLResources(gl); | |
| 329 } | |
| 330 | |
| 333 MarkStagingBufferAsBusy(free_buffers_.front().get()); | 331 MarkStagingBufferAsBusy(free_buffers_.front().get()); |
| 334 RemoveStagingBuffer(free_buffers_.front().get()); | 332 RemoveStagingBuffer(free_buffers_.front().get()); |
| 335 free_buffers_.pop_front(); | 333 free_buffers_.pop_front(); |
| 336 } | 334 } |
| 337 | 335 |
| 338 return staging_buffer; | 336 return staging_buffer; |
| 339 } | 337 } |
| 340 | 338 |
| 341 base::TimeTicks StagingBufferPool::GetUsageTimeForLRUBuffer() { | 339 base::TimeTicks StagingBufferPool::GetUsageTimeForLRUBuffer() { |
| 342 lock_.AssertAcquired(); | 340 lock_.AssertAcquired(); |
| (...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 437 // Release all buffers, regardless of how recently they were used. | 435 // Release all buffers, regardless of how recently they were used. |
| 438 ReleaseBuffersNotUsedSince(base::TimeTicks() + base::TimeDelta::Max()); | 436 ReleaseBuffersNotUsedSince(base::TimeTicks() + base::TimeDelta::Max()); |
| 439 } break; | 437 } break; |
| 440 case base::MemoryState::UNKNOWN: | 438 case base::MemoryState::UNKNOWN: |
| 441 // NOT_REACHED. | 439 // NOT_REACHED. |
| 442 break; | 440 break; |
| 443 } | 441 } |
| 444 } | 442 } |
| 445 | 443 |
| 446 } // namespace cc | 444 } // namespace cc |
| OLD | NEW |