Index: src/gpu/vk/GrVkGpu.cpp |
diff --git a/src/gpu/vk/GrVkGpu.cpp b/src/gpu/vk/GrVkGpu.cpp |
index 8083509877105c8c68bde6279db2afb0870f730f..91c8756073706002660e708f07bde870fe91678b 100644 |
--- a/src/gpu/vk/GrVkGpu.cpp |
+++ b/src/gpu/vk/GrVkGpu.cpp |
@@ -710,18 +710,15 @@ GrRenderTarget* GrVkGpu::onWrapBackendRenderTarget(const GrBackendRenderTargetDe |
void GrVkGpu::bindGeometry(const GrPrimitiveProcessor& primProc, |
const GrNonInstancedMesh& mesh) { |
+ // There is no need to put any memory barriers to make sure host writes have finished here. |
+ // When a command buffer is submitted to a queue, there is an implicit memory barrier that |
+ // occurs for all host writes. Additionally, BufferMemoryBarriers are not allowed inside of |
+ // an active RenderPass. |
GrVkVertexBuffer* vbuf; |
vbuf = (GrVkVertexBuffer*)mesh.vertexBuffer(); |
SkASSERT(vbuf); |
SkASSERT(!vbuf->isMapped()); |
- vbuf->addMemoryBarrier(this, |
- VK_ACCESS_HOST_WRITE_BIT, |
- VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT, |
- VK_PIPELINE_STAGE_HOST_BIT, |
- VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, |
- false); |
- |
fCurrentCmdBuffer->bindVertexBuffer(this, vbuf); |
if (mesh.isIndexed()) { |
@@ -729,13 +726,6 @@ void GrVkGpu::bindGeometry(const GrPrimitiveProcessor& primProc, |
SkASSERT(ibuf); |
SkASSERT(!ibuf->isMapped()); |
- ibuf->addMemoryBarrier(this, |
- VK_ACCESS_HOST_WRITE_BIT, |
- VK_ACCESS_INDEX_READ_BIT, |
- VK_PIPELINE_STAGE_HOST_BIT, |
- VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, |
- false); |
- |
fCurrentCmdBuffer->bindIndexBuffer(this, ibuf); |
} |
} |
@@ -874,6 +864,7 @@ GrBackendObject GrVkGpu::createTestingOnlyBackendTexture(void* srcData, int w, i |
info->fAlloc = alloc; |
info->fImageTiling = imageTiling; |
info->fImageLayout = initialLayout; |
+ info->fFormat = pixelFormat; |
return (GrBackendObject)info; |
} |
@@ -1024,8 +1015,7 @@ void GrVkGpu::onClearStencilClip(GrRenderTarget* target, const SkIRect& rect, bo |
VkImageLayout origDstLayout = vkStencil->currentLayout(); |
VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout); |
VkAccessFlags dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT; |
- VkPipelineStageFlags srcStageMask = |
- GrVkMemory::LayoutToPipelineStageFlags(origDstLayout); |
+ VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout); |
VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT; |
vkStencil->setImageLayout(this, |
VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, |
@@ -1035,6 +1025,21 @@ void GrVkGpu::onClearStencilClip(GrRenderTarget* target, const SkIRect& rect, bo |
dstStageMask, |
false); |
+ // Change layout of our render target so it can be used as the color attachment. This is what |
+ // the render pass expects when it begins. |
+ VkImageLayout layout = vkRT->currentLayout(); |
+ srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout); |
+ dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT; |
+ srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout); |
+ dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; |
+ vkRT->setImageLayout(this, |
+ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, |
+ srcAccessMask, |
+ dstAccessMask, |
+ srcStageMask, |
+ dstStageMask, |
+ false); |
+ |
VkClearRect clearRect; |
// Flip rect if necessary |
SkIRect vkRect = rect; |
@@ -1081,8 +1086,7 @@ void GrVkGpu::onClear(GrRenderTarget* target, const SkIRect& rect, GrColor color |
if (rect.width() != target->width() || rect.height() != target->height()) { |
VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout); |
VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; |
- VkPipelineStageFlags srcStageMask = |
- GrVkMemory::LayoutToPipelineStageFlags(origDstLayout); |
+ VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout); |
VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT; |
vkRT->setImageLayout(this, |
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, |
@@ -1092,6 +1096,25 @@ void GrVkGpu::onClear(GrRenderTarget* target, const SkIRect& rect, GrColor color |
dstStageMask, |
false); |
+ // If we are using a stencil attachment we also need to change its layout to what the render |
+ // pass is expecting. |
+ if (GrStencilAttachment* stencil = vkRT->renderTargetPriv().getStencilAttachment()) { |
+ GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil; |
+ origDstLayout = vkStencil->currentLayout(); |
+ srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout); |
+ dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | |
+ VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT; |
+ srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout); |
+ dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT; |
+ vkStencil->setImageLayout(this, |
+ VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, |
+ srcAccessMask, |
+ dstAccessMask, |
+ srcStageMask, |
+ dstStageMask, |
+ false); |
+ } |
+ |
VkClearRect clearRect; |
// Flip rect if necessary |
SkIRect vkRect = rect; |
@@ -1574,7 +1597,6 @@ void GrVkGpu::onDraw(const GrPipeline& pipeline, |
const GrVkRenderPass* renderPass = vkRT->simpleRenderPass(); |
SkASSERT(renderPass); |
- fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT); |
GrPrimitiveType primitiveType = meshes[0].primitiveType(); |
sk_sp<GrVkPipelineState> pipelineState = this->prepareDrawState(pipeline, |
@@ -1587,8 +1609,6 @@ void GrVkGpu::onDraw(const GrPipeline& pipeline, |
// Change layout of our render target so it can be used as the color attachment |
VkImageLayout layout = vkRT->currentLayout(); |
- // Our color attachment is purely a destination and won't be read so don't need to flush or |
- // invalidate any caches |
VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout); |
VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT; |
VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout); |
@@ -1602,8 +1622,7 @@ void GrVkGpu::onDraw(const GrPipeline& pipeline, |
false); |
// If we are using a stencil attachment we also need to update its layout |
- if (!pipeline.getStencil().isDisabled()) { |
- GrStencilAttachment* stencil = vkRT->renderTargetPriv().getStencilAttachment(); |
+ if (GrStencilAttachment* stencil = vkRT->renderTargetPriv().getStencilAttachment()) { |
GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil; |
VkImageLayout origDstLayout = vkStencil->currentLayout(); |
VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout); |
@@ -1621,6 +1640,7 @@ void GrVkGpu::onDraw(const GrPipeline& pipeline, |
false); |
} |
+ fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT); |
for (int i = 0; i < meshCount; ++i) { |
if (GrXferBarrierType barrierType = pipeline.xferBarrierType(*this->caps())) { |
@@ -1638,6 +1658,10 @@ void GrVkGpu::onDraw(const GrPipeline& pipeline, |
pipelineState->freeTempResources(this); |
SkDEBUGCODE(pipelineState = nullptr); |
primitiveType = nonIdxMesh->primitiveType(); |
+ // It is illegal for us to have the necessary memory barriers for when we write and |
+ // update the uniform buffers in prepareDrawState while in an active render pass. |
+ // Thus we must end the current one and then start it up again. |
+ fCurrentCmdBuffer->endRenderPass(this); |
jvanverth1
2016/04/21 13:25:24
On the first iteration, it doesn't look like we do
egdaniel
2016/04/21 14:09:20
Well we should never get here on the first iterati
|
pipelineState = this->prepareDrawState(pipeline, |
primProc, |
primitiveType, |
@@ -1645,6 +1669,7 @@ void GrVkGpu::onDraw(const GrPipeline& pipeline, |
if (!pipelineState) { |
return; |
} |
+ fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT); |
} |
SkASSERT(pipelineState); |
this->bindGeometry(primProc, *nonIdxMesh); |