| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2016 Google Inc. | 2 * Copyright 2016 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #include "GrVkGpuCommandBuffer.h" | 8 #include "GrVkGpuCommandBuffer.h" |
| 9 | 9 |
| 10 #include "GrMesh.h" | 10 #include "GrMesh.h" |
| (...skipping 277 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 288 } | 288 } |
| 289 | 289 |
| 290 //////////////////////////////////////////////////////////////////////////////// | 290 //////////////////////////////////////////////////////////////////////////////// |
| 291 | 291 |
| 292 void GrVkGpuCommandBuffer::bindGeometry(const GrPrimitiveProcessor& primProc, | 292 void GrVkGpuCommandBuffer::bindGeometry(const GrPrimitiveProcessor& primProc, |
| 293 const GrNonInstancedMesh& mesh) { | 293 const GrNonInstancedMesh& mesh) { |
| 294 // There is no need to put any memory barriers to make sure host writes have
finished here. | 294 // There is no need to put any memory barriers to make sure host writes have
finished here. |
| 295 // When a command buffer is submitted to a queue, there is an implicit memor
y barrier that | 295 // When a command buffer is submitted to a queue, there is an implicit memor
y barrier that |
| 296 // occurs for all host writes. Additionally, BufferMemoryBarriers are not al
lowed inside of | 296 // occurs for all host writes. Additionally, BufferMemoryBarriers are not al
lowed inside of |
| 297 // an active RenderPass. | 297 // an active RenderPass. |
| 298 SkASSERT(!mesh.vertexBuffer()->isCPUBacked()); |
| 298 GrVkVertexBuffer* vbuf; | 299 GrVkVertexBuffer* vbuf; |
| 299 vbuf = (GrVkVertexBuffer*)mesh.vertexBuffer(); | 300 vbuf = (GrVkVertexBuffer*)mesh.vertexBuffer(); |
| 300 SkASSERT(vbuf); | 301 SkASSERT(vbuf); |
| 301 SkASSERT(!vbuf->isMapped()); | 302 SkASSERT(!vbuf->isMapped()); |
| 302 | 303 |
| 303 fCommandBuffer->bindVertexBuffer(fGpu, vbuf); | 304 fCommandBuffer->bindVertexBuffer(fGpu, vbuf); |
| 304 | 305 |
| 305 if (mesh.isIndexed()) { | 306 if (mesh.isIndexed()) { |
| 307 SkASSERT(!mesh.indexBuffer()->isCPUBacked()); |
| 306 GrVkIndexBuffer* ibuf = (GrVkIndexBuffer*)mesh.indexBuffer(); | 308 GrVkIndexBuffer* ibuf = (GrVkIndexBuffer*)mesh.indexBuffer(); |
| 307 SkASSERT(ibuf); | 309 SkASSERT(ibuf); |
| 308 SkASSERT(!ibuf->isMapped()); | 310 SkASSERT(!ibuf->isMapped()); |
| 309 | 311 |
| 310 fCommandBuffer->bindIndexBuffer(fGpu, ibuf); | 312 fCommandBuffer->bindIndexBuffer(fGpu, ibuf); |
| 311 } | 313 } |
| 312 } | 314 } |
| 313 | 315 |
| 314 sk_sp<GrVkPipelineState> GrVkGpuCommandBuffer::prepareDrawState( | 316 sk_sp<GrVkPipelineState> GrVkGpuCommandBuffer::prepareDrawState( |
| 315 const GrPipeline&
pipeline, | 317 const GrPipeline&
pipeline, |
| (...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 428 fGpu->stats()->incNumDraws(); | 430 fGpu->stats()->incNumDraws(); |
| 429 } while ((nonIdxMesh = iter.next())); | 431 } while ((nonIdxMesh = iter.next())); |
| 430 } | 432 } |
| 431 | 433 |
| 432 // Technically we don't have to call this here (since there is a safety chec
k in | 434 // Technically we don't have to call this here (since there is a safety chec
k in |
| 433 // pipelineState:setData but this will allow for quicker freeing of resource
s if the | 435 // pipelineState:setData but this will allow for quicker freeing of resource
s if the |
| 434 // pipelineState sits in a cache for a while. | 436 // pipelineState sits in a cache for a while. |
| 435 pipelineState->freeTempResources(fGpu); | 437 pipelineState->freeTempResources(fGpu); |
| 436 } | 438 } |
| 437 | 439 |
| OLD | NEW |