Index: src/gpu/vk/GrVkCommandBuffer.cpp |
diff --git a/src/gpu/vk/GrVkCommandBuffer.cpp b/src/gpu/vk/GrVkCommandBuffer.cpp |
index bc8c20f016d19feaef9b5a4b6198845d8e3a531f..9604355aad2bf5a388897f1afacff0d77280af0a 100644 |
--- a/src/gpu/vk/GrVkCommandBuffer.cpp |
+++ b/src/gpu/vk/GrVkCommandBuffer.cpp |
@@ -16,24 +16,6 @@ |
#include "GrVkTransferBuffer.h" |
#include "GrVkUtil.h" |
-GrVkCommandBuffer* GrVkCommandBuffer::Create(const GrVkGpu* gpu, VkCommandPool cmdPool) { |
- const VkCommandBufferAllocateInfo cmdInfo = { |
- VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType |
- NULL, // pNext |
- cmdPool, // commandPool |
- VK_COMMAND_BUFFER_LEVEL_PRIMARY, // level |
- 1 // bufferCount |
- }; |
- |
- VkCommandBuffer cmdBuffer; |
- VkResult err = GR_VK_CALL(gpu->vkInterface(), AllocateCommandBuffers(gpu->device(), |
- &cmdInfo, |
- &cmdBuffer)); |
- if (err) { |
- return nullptr; |
- } |
- return new GrVkCommandBuffer(cmdBuffer); |
-} |
GrVkCommandBuffer::~GrVkCommandBuffer() { |
// Should have ended any render pass we're in the middle of |
@@ -79,7 +61,206 @@ void GrVkCommandBuffer::abandonSubResources() const { |
} |
} |
-void GrVkCommandBuffer::begin(const GrVkGpu* gpu) { |
+//////////////////////////////////////////////////////////////////////////////// |
+// CommandBuffer commands |
+//////////////////////////////////////////////////////////////////////////////// |
+ |
+void GrVkCommandBuffer::pipelineBarrier(const GrVkGpu* gpu, |
+ VkPipelineStageFlags srcStageMask, |
+ VkPipelineStageFlags dstStageMask, |
+ bool byRegion, |
+ BarrierType barrierType, |
+ void* barrier) const { |
+ SkASSERT(fIsActive); |
+ // For images we can have barriers inside of render passes but they require us to add more |
+ // support in subpasses which need self dependencies to have barriers inside them. Also, we can |
+ // never have buffer barriers inside of a render pass. For now we will just assert that we are |
+ // not in a render pass. |
+ SkASSERT(!fActiveRenderPass); |
+ VkDependencyFlags dependencyFlags = byRegion ? VK_DEPENDENCY_BY_REGION_BIT : 0; |
+ |
+ switch (barrierType) { |
+ case kMemory_BarrierType: { |
+ const VkMemoryBarrier* barrierPtr = reinterpret_cast<VkMemoryBarrier*>(barrier); |
+ GR_VK_CALL(gpu->vkInterface(), CmdPipelineBarrier(fCmdBuffer, srcStageMask, |
+ dstStageMask, dependencyFlags, |
+ 1, barrierPtr, |
+ 0, nullptr, |
+ 0, nullptr)); |
+ break; |
+ } |
+ |
+ case kBufferMemory_BarrierType: { |
+ const VkBufferMemoryBarrier* barrierPtr = |
+ reinterpret_cast<VkBufferMemoryBarrier*>(barrier); |
+ GR_VK_CALL(gpu->vkInterface(), CmdPipelineBarrier(fCmdBuffer, srcStageMask, |
+ dstStageMask, dependencyFlags, |
+ 0, nullptr, |
+ 1, barrierPtr, |
+ 0, nullptr)); |
+ break; |
+ } |
+ |
+ case kImageMemory_BarrierType: { |
+ const VkImageMemoryBarrier* barrierPtr = |
+ reinterpret_cast<VkImageMemoryBarrier*>(barrier); |
+ GR_VK_CALL(gpu->vkInterface(), CmdPipelineBarrier(fCmdBuffer, srcStageMask, |
+ dstStageMask, dependencyFlags, |
+ 0, nullptr, |
+ 0, nullptr, |
+ 1, barrierPtr)); |
+ break; |
+ } |
+ } |
+ |
+} |
+ |
+void GrVkCommandBuffer::clearAttachments(const GrVkGpu* gpu, |
+ int numAttachments, |
+ const VkClearAttachment* attachments, |
+ int numRects, |
+ const VkClearRect* clearRects) const { |
+ SkASSERT(fIsActive); |
+ SkASSERT(fActiveRenderPass); |
+ SkASSERT(numAttachments > 0); |
+ SkASSERT(numRects > 0); |
+#ifdef SK_DEBUG |
+ for (int i = 0; i < numAttachments; ++i) { |
+ if (attachments[i].aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) { |
+ uint32_t testIndex; |
+ SkAssertResult(fActiveRenderPass->colorAttachmentIndex(&testIndex)); |
+ SkASSERT(testIndex == attachments[i].colorAttachment); |
+ } |
+ } |
+#endif |
+ GR_VK_CALL(gpu->vkInterface(), CmdClearAttachments(fCmdBuffer, |
+ numAttachments, |
+ attachments, |
+ numRects, |
+ clearRects)); |
+} |
+ |
+void GrVkCommandBuffer::bindDescriptorSets(const GrVkGpu* gpu, |
+ GrVkPipelineState* pipelineState, |
+ VkPipelineLayout layout, |
+ uint32_t firstSet, |
+ uint32_t setCount, |
+ const VkDescriptorSet* descriptorSets, |
+ uint32_t dynamicOffsetCount, |
+ const uint32_t* dynamicOffsets) { |
+ SkASSERT(fIsActive); |
+ GR_VK_CALL(gpu->vkInterface(), CmdBindDescriptorSets(fCmdBuffer, |
+ VK_PIPELINE_BIND_POINT_GRAPHICS, |
+ layout, |
+ firstSet, |
+ setCount, |
+ descriptorSets, |
+ dynamicOffsetCount, |
+ dynamicOffsets)); |
+ pipelineState->addUniformResources(*this); |
+} |
+ |
+void GrVkCommandBuffer::bindPipeline(const GrVkGpu* gpu, const GrVkPipeline* pipeline) { |
+ SkASSERT(fIsActive); |
+ GR_VK_CALL(gpu->vkInterface(), CmdBindPipeline(fCmdBuffer, |
+ VK_PIPELINE_BIND_POINT_GRAPHICS, |
+ pipeline->pipeline())); |
+ addResource(pipeline); |
+} |
+ |
+void GrVkCommandBuffer::drawIndexed(const GrVkGpu* gpu, |
+ uint32_t indexCount, |
+ uint32_t instanceCount, |
+ uint32_t firstIndex, |
+ int32_t vertexOffset, |
+ uint32_t firstInstance) const { |
+ SkASSERT(fIsActive); |
+ SkASSERT(fActiveRenderPass); |
+ GR_VK_CALL(gpu->vkInterface(), CmdDrawIndexed(fCmdBuffer, |
+ indexCount, |
+ instanceCount, |
+ firstIndex, |
+ vertexOffset, |
+ firstInstance)); |
+} |
+ |
+void GrVkCommandBuffer::draw(const GrVkGpu* gpu, |
+ uint32_t vertexCount, |
+ uint32_t instanceCount, |
+ uint32_t firstVertex, |
+ uint32_t firstInstance) const { |
+ SkASSERT(fIsActive); |
+ SkASSERT(fActiveRenderPass); |
+ GR_VK_CALL(gpu->vkInterface(), CmdDraw(fCmdBuffer, |
+ vertexCount, |
+ instanceCount, |
+ firstVertex, |
+ firstInstance)); |
+} |
+ |
+void GrVkCommandBuffer::setViewport(const GrVkGpu* gpu, |
+ uint32_t firstViewport, |
+ uint32_t viewportCount, |
+ const VkViewport* viewports) { |
+ SkASSERT(fIsActive); |
+ SkASSERT(1 == viewportCount); |
+ if (memcmp(viewports, &fCachedViewport, sizeof(VkViewport))) { |
+ GR_VK_CALL(gpu->vkInterface(), CmdSetViewport(fCmdBuffer, |
+ firstViewport, |
+ viewportCount, |
+ viewports)); |
+ fCachedViewport = viewports[0]; |
+ } |
+} |
+ |
+void GrVkCommandBuffer::setScissor(const GrVkGpu* gpu, |
+ uint32_t firstScissor, |
+ uint32_t scissorCount, |
+ const VkRect2D* scissors) { |
+ SkASSERT(fIsActive); |
+ SkASSERT(1 == scissorCount); |
+ if (memcmp(scissors, &fCachedScissor, sizeof(VkRect2D))) { |
+ GR_VK_CALL(gpu->vkInterface(), CmdSetScissor(fCmdBuffer, |
+ firstScissor, |
+ scissorCount, |
+ scissors)); |
+ fCachedScissor = scissors[0]; |
+ } |
+} |
+ |
+void GrVkCommandBuffer::setBlendConstants(const GrVkGpu* gpu, |
+ const float blendConstants[4]) { |
+ SkASSERT(fIsActive); |
+ if (memcmp(blendConstants, fCachedBlendConstant, 4 * sizeof(float))) { |
+ GR_VK_CALL(gpu->vkInterface(), CmdSetBlendConstants(fCmdBuffer, blendConstants)); |
+ memcpy(fCachedBlendConstant, blendConstants, 4 * sizeof(float)); |
+ } |
+} |
+ |
+/////////////////////////////////////////////////////////////////////////////// |
+// PrimaryCommandBuffer |
+//////////////////////////////////////////////////////////////////////////////// |
+GrVkPrimaryCommandBuffer* GrVkPrimaryCommandBuffer::Create(const GrVkGpu* gpu, |
+ VkCommandPool cmdPool) { |
+ const VkCommandBufferAllocateInfo cmdInfo = { |
+ VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType |
+ NULL, // pNext |
+ cmdPool, // commandPool |
+ VK_COMMAND_BUFFER_LEVEL_PRIMARY, // level |
+ 1 // bufferCount |
+ }; |
+ |
+ VkCommandBuffer cmdBuffer; |
+ VkResult err = GR_VK_CALL(gpu->vkInterface(), AllocateCommandBuffers(gpu->device(), |
+ &cmdInfo, |
+ &cmdBuffer)); |
+ if (err) { |
+ return nullptr; |
+ } |
+ return new GrVkPrimaryCommandBuffer(cmdBuffer); |
+} |
+ |
+void GrVkPrimaryCommandBuffer::begin(const GrVkGpu* gpu) { |
SkASSERT(!fIsActive); |
VkCommandBufferBeginInfo cmdBufferBeginInfo; |
memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo)); |
@@ -93,7 +274,7 @@ void GrVkCommandBuffer::begin(const GrVkGpu* gpu) { |
fIsActive = true; |
} |
-void GrVkCommandBuffer::end(const GrVkGpu* gpu) { |
+void GrVkPrimaryCommandBuffer::end(const GrVkGpu* gpu) { |
SkASSERT(fIsActive); |
SkASSERT(!fActiveRenderPass); |
GR_VK_CALL_ERRCHECK(gpu->vkInterface(), EndCommandBuffer(fCmdBuffer)); |
@@ -101,9 +282,7 @@ void GrVkCommandBuffer::end(const GrVkGpu* gpu) { |
fIsActive = false; |
} |
-/////////////////////////////////////////////////////////////////////////////// |
- |
-void GrVkCommandBuffer::beginRenderPass(const GrVkGpu* gpu, |
+void GrVkPrimaryCommandBuffer::beginRenderPass(const GrVkGpu* gpu, |
const GrVkRenderPass* renderPass, |
const GrVkRenderTarget& target) { |
SkASSERT(fIsActive); |
@@ -117,14 +296,26 @@ void GrVkCommandBuffer::beginRenderPass(const GrVkGpu* gpu, |
target.addResources(*this); |
} |
-void GrVkCommandBuffer::endRenderPass(const GrVkGpu* gpu) { |
+void GrVkPrimaryCommandBuffer::endRenderPass(const GrVkGpu* gpu) { |
SkASSERT(fIsActive); |
SkASSERT(fActiveRenderPass); |
GR_VK_CALL(gpu->vkInterface(), CmdEndRenderPass(fCmdBuffer)); |
fActiveRenderPass = nullptr; |
} |
-void GrVkCommandBuffer::submitToQueue(const GrVkGpu* gpu, VkQueue queue, GrVkGpu::SyncQueue sync) { |
+void GrVkPrimaryCommandBuffer::executeCommands(const GrVkGpu* gpu, |
+ const GrVkSecondaryCommandBuffer* buffer) { |
+ SkASSERT(fIsActive); |
+ SkASSERT(fActiveRenderPass); |
+ SkASSERT(fActiveRenderPass->isCompatible(*buffer->fActiveRenderPass)); |
+ |
+ GR_VK_CALL(gpu->vkInterface(), CmdExecuteCommands(fCmdBuffer, 1, &buffer->fCmdBuffer)); |
+ this->addResource(buffer); |
+} |
+ |
+void GrVkPrimaryCommandBuffer::submitToQueue(const GrVkGpu* gpu, |
+ VkQueue queue, |
+ GrVkGpu::SyncQueue sync) { |
SkASSERT(!fIsActive); |
VkResult err; |
@@ -163,7 +354,7 @@ void GrVkCommandBuffer::submitToQueue(const GrVkGpu* gpu, VkQueue queue, GrVkGpu |
} |
} |
-bool GrVkCommandBuffer::finished(const GrVkGpu* gpu) const { |
+bool GrVkPrimaryCommandBuffer::finished(const GrVkGpu* gpu) const { |
if (VK_NULL_HANDLE == fSubmitFence) { |
return true; |
} |
@@ -185,67 +376,13 @@ bool GrVkCommandBuffer::finished(const GrVkGpu* gpu) const { |
return false; |
} |
-//////////////////////////////////////////////////////////////////////////////// |
-// CommandBuffer commands |
-//////////////////////////////////////////////////////////////////////////////// |
- |
-void GrVkCommandBuffer::pipelineBarrier(const GrVkGpu* gpu, |
- VkPipelineStageFlags srcStageMask, |
- VkPipelineStageFlags dstStageMask, |
- bool byRegion, |
- BarrierType barrierType, |
- void* barrier) const { |
- SkASSERT(fIsActive); |
- // For images we can have barriers inside of render passes but they require us to add more |
- // support in subpasses which need self dependencies to have barriers inside them. Also, we can |
- // never have buffer barriers inside of a render pass. For now we will just assert that we are |
- // not in a render pass. |
- SkASSERT(!fActiveRenderPass); |
- VkDependencyFlags dependencyFlags = byRegion ? VK_DEPENDENCY_BY_REGION_BIT : 0; |
- |
- switch (barrierType) { |
- case kMemory_BarrierType: { |
- const VkMemoryBarrier* barrierPtr = reinterpret_cast<VkMemoryBarrier*>(barrier); |
- GR_VK_CALL(gpu->vkInterface(), CmdPipelineBarrier(fCmdBuffer, srcStageMask, |
- dstStageMask, dependencyFlags, |
- 1, barrierPtr, |
- 0, nullptr, |
- 0, nullptr)); |
- break; |
- } |
- |
- case kBufferMemory_BarrierType: { |
- const VkBufferMemoryBarrier* barrierPtr = |
- reinterpret_cast<VkBufferMemoryBarrier*>(barrier); |
- GR_VK_CALL(gpu->vkInterface(), CmdPipelineBarrier(fCmdBuffer, srcStageMask, |
- dstStageMask, dependencyFlags, |
- 0, nullptr, |
- 1, barrierPtr, |
- 0, nullptr)); |
- break; |
- } |
- |
- case kImageMemory_BarrierType: { |
- const VkImageMemoryBarrier* barrierPtr = |
- reinterpret_cast<VkImageMemoryBarrier*>(barrier); |
- GR_VK_CALL(gpu->vkInterface(), CmdPipelineBarrier(fCmdBuffer, srcStageMask, |
- dstStageMask, dependencyFlags, |
- 0, nullptr, |
- 0, nullptr, |
- 1, barrierPtr)); |
- break; |
- } |
- } |
- |
-} |
- |
-void GrVkCommandBuffer::copyImage(const GrVkGpu* gpu, |
- GrVkImage* srcImage, |
- VkImageLayout srcLayout, |
- GrVkImage* dstImage, |
- VkImageLayout dstLayout, |
- uint32_t copyRegionCount, |
- const VkImageCopy* copyRegions) { |
+void GrVkPrimaryCommandBuffer::copyImage(const GrVkGpu* gpu, |
+ GrVkImage* srcImage, |
+ VkImageLayout srcLayout, |
+ GrVkImage* dstImage, |
+ VkImageLayout dstLayout, |
+ uint32_t copyRegionCount, |
+ const VkImageCopy* copyRegions) { |
SkASSERT(fIsActive); |
SkASSERT(!fActiveRenderPass); |
this->addResource(srcImage->resource()); |
@@ -259,16 +396,16 @@ void GrVkCommandBuffer::copyImage(const GrVkGpu* gpu, |
copyRegions)); |
} |
-void GrVkCommandBuffer::blitImage(const GrVkGpu* gpu, |
- const GrVkResource* srcResource, |
- VkImage srcImage, |
- VkImageLayout srcLayout, |
- const GrVkResource* dstResource, |
- VkImage dstImage, |
- VkImageLayout dstLayout, |
- uint32_t blitRegionCount, |
- const VkImageBlit* blitRegions, |
- VkFilter filter) { |
+void GrVkPrimaryCommandBuffer::blitImage(const GrVkGpu* gpu, |
+ const GrVkResource* srcResource, |
+ VkImage srcImage, |
+ VkImageLayout srcLayout, |
+ const GrVkResource* dstResource, |
+ VkImage dstImage, |
+ VkImageLayout dstLayout, |
+ uint32_t blitRegionCount, |
+ const VkImageBlit* blitRegions, |
+ VkFilter filter) { |
SkASSERT(fIsActive); |
SkASSERT(!fActiveRenderPass); |
this->addResource(srcResource); |
@@ -283,12 +420,12 @@ void GrVkCommandBuffer::blitImage(const GrVkGpu* gpu, |
filter)); |
} |
-void GrVkCommandBuffer::copyImageToBuffer(const GrVkGpu* gpu, |
- GrVkImage* srcImage, |
- VkImageLayout srcLayout, |
- GrVkTransferBuffer* dstBuffer, |
- uint32_t copyRegionCount, |
- const VkBufferImageCopy* copyRegions) { |
+void GrVkPrimaryCommandBuffer::copyImageToBuffer(const GrVkGpu* gpu, |
+ GrVkImage* srcImage, |
+ VkImageLayout srcLayout, |
+ GrVkTransferBuffer* dstBuffer, |
+ uint32_t copyRegionCount, |
+ const VkBufferImageCopy* copyRegions) { |
SkASSERT(fIsActive); |
SkASSERT(!fActiveRenderPass); |
this->addResource(srcImage->resource()); |
@@ -301,12 +438,12 @@ void GrVkCommandBuffer::copyImageToBuffer(const GrVkGpu* gpu, |
copyRegions)); |
} |
-void GrVkCommandBuffer::copyBufferToImage(const GrVkGpu* gpu, |
- GrVkTransferBuffer* srcBuffer, |
- GrVkImage* dstImage, |
- VkImageLayout dstLayout, |
- uint32_t copyRegionCount, |
- const VkBufferImageCopy* copyRegions) { |
+void GrVkPrimaryCommandBuffer::copyBufferToImage(const GrVkGpu* gpu, |
+ GrVkTransferBuffer* srcBuffer, |
+ GrVkImage* dstImage, |
+ VkImageLayout dstLayout, |
+ uint32_t copyRegionCount, |
+ const VkBufferImageCopy* copyRegions) { |
SkASSERT(fIsActive); |
SkASSERT(!fActiveRenderPass); |
this->addResource(srcBuffer->resource()); |
@@ -319,11 +456,11 @@ void GrVkCommandBuffer::copyBufferToImage(const GrVkGpu* gpu, |
copyRegions)); |
} |
-void GrVkCommandBuffer::clearColorImage(const GrVkGpu* gpu, |
- GrVkImage* image, |
- const VkClearColorValue* color, |
- uint32_t subRangeCount, |
- const VkImageSubresourceRange* subRanges) { |
+void GrVkPrimaryCommandBuffer::clearColorImage(const GrVkGpu* gpu, |
+ GrVkImage* image, |
+ const VkClearColorValue* color, |
+ uint32_t subRangeCount, |
+ const VkImageSubresourceRange* subRanges) { |
SkASSERT(fIsActive); |
SkASSERT(!fActiveRenderPass); |
this->addResource(image->resource()); |
@@ -335,11 +472,11 @@ void GrVkCommandBuffer::clearColorImage(const GrVkGpu* gpu, |
subRanges)); |
} |
-void GrVkCommandBuffer::clearDepthStencilImage(const GrVkGpu* gpu, |
- GrVkImage* image, |
- const VkClearDepthStencilValue* color, |
- uint32_t subRangeCount, |
- const VkImageSubresourceRange* subRanges) { |
+void GrVkPrimaryCommandBuffer::clearDepthStencilImage(const GrVkGpu* gpu, |
+ GrVkImage* image, |
+ const VkClearDepthStencilValue* color, |
+ uint32_t subRangeCount, |
+ const VkImageSubresourceRange* subRanges) { |
SkASSERT(fIsActive); |
SkASSERT(!fActiveRenderPass); |
this->addResource(image->resource()); |
@@ -351,124 +488,65 @@ void GrVkCommandBuffer::clearDepthStencilImage(const GrVkGpu* gpu, |
subRanges)); |
} |
-void GrVkCommandBuffer::clearAttachments(const GrVkGpu* gpu, |
- int numAttachments, |
- const VkClearAttachment* attachments, |
- int numRects, |
- const VkClearRect* clearRects) const { |
- SkASSERT(fIsActive); |
- SkASSERT(fActiveRenderPass); |
- SkASSERT(numAttachments > 0); |
- SkASSERT(numRects > 0); |
-#ifdef SK_DEBUG |
- for (int i = 0; i < numAttachments; ++i) { |
- if (attachments[i].aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) { |
- uint32_t testIndex; |
- SkAssertResult(fActiveRenderPass->colorAttachmentIndex(&testIndex)); |
- SkASSERT(testIndex == attachments[i].colorAttachment); |
- } |
- } |
-#endif |
- GR_VK_CALL(gpu->vkInterface(), CmdClearAttachments(fCmdBuffer, |
- numAttachments, |
- attachments, |
- numRects, |
- clearRects)); |
-} |
+/////////////////////////////////////////////////////////////////////////////// |
+// SecondaryCommandBuffer |
+//////////////////////////////////////////////////////////////////////////////// |
-void GrVkCommandBuffer::bindDescriptorSets(const GrVkGpu* gpu, |
- GrVkPipelineState* pipelineState, |
- VkPipelineLayout layout, |
- uint32_t firstSet, |
- uint32_t setCount, |
- const VkDescriptorSet* descriptorSets, |
- uint32_t dynamicOffsetCount, |
- const uint32_t* dynamicOffsets) { |
- SkASSERT(fIsActive); |
- GR_VK_CALL(gpu->vkInterface(), CmdBindDescriptorSets(fCmdBuffer, |
- VK_PIPELINE_BIND_POINT_GRAPHICS, |
- layout, |
- firstSet, |
- setCount, |
- descriptorSets, |
- dynamicOffsetCount, |
- dynamicOffsets)); |
- pipelineState->addUniformResources(*this); |
-} |
+GrVkSecondaryCommandBuffer* GrVkSecondaryCommandBuffer::Create( |
+ const GrVkGpu* gpu, |
+ VkCommandPool cmdPool, |
+ const GrVkRenderPass* compatibleRenderPass) { |
+ const VkCommandBufferAllocateInfo cmdInfo = { |
+ VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType |
+ NULL, // pNext |
+ cmdPool, // commandPool |
+ VK_COMMAND_BUFFER_LEVEL_SECONDARY, // level |
+ 1 // bufferCount |
+ }; |
-void GrVkCommandBuffer::bindPipeline(const GrVkGpu* gpu, const GrVkPipeline* pipeline) { |
- SkASSERT(fIsActive); |
- GR_VK_CALL(gpu->vkInterface(), CmdBindPipeline(fCmdBuffer, |
- VK_PIPELINE_BIND_POINT_GRAPHICS, |
- pipeline->pipeline())); |
- addResource(pipeline); |
+ VkCommandBuffer cmdBuffer; |
+ VkResult err = GR_VK_CALL(gpu->vkInterface(), AllocateCommandBuffers(gpu->device(), |
+ &cmdInfo, |
+ &cmdBuffer)); |
+ if (err) { |
+ return nullptr; |
+ } |
+ return new GrVkSecondaryCommandBuffer(cmdBuffer, compatibleRenderPass); |
} |
-void GrVkCommandBuffer::drawIndexed(const GrVkGpu* gpu, |
- uint32_t indexCount, |
- uint32_t instanceCount, |
- uint32_t firstIndex, |
- int32_t vertexOffset, |
- uint32_t firstInstance) const { |
- SkASSERT(fIsActive); |
- SkASSERT(fActiveRenderPass); |
- GR_VK_CALL(gpu->vkInterface(), CmdDrawIndexed(fCmdBuffer, |
- indexCount, |
- instanceCount, |
- firstIndex, |
- vertexOffset, |
- firstInstance)); |
-} |
-void GrVkCommandBuffer::draw(const GrVkGpu* gpu, |
- uint32_t vertexCount, |
- uint32_t instanceCount, |
- uint32_t firstVertex, |
- uint32_t firstInstance) const { |
- SkASSERT(fIsActive); |
+void GrVkSecondaryCommandBuffer::begin(const GrVkGpu* gpu, const GrVkFramebuffer* framebuffer) { |
+ SkASSERT(!fIsActive); |
SkASSERT(fActiveRenderPass); |
- GR_VK_CALL(gpu->vkInterface(), CmdDraw(fCmdBuffer, |
- vertexCount, |
- instanceCount, |
- firstVertex, |
- firstInstance)); |
-} |
-void GrVkCommandBuffer::setViewport(const GrVkGpu* gpu, |
- uint32_t firstViewport, |
- uint32_t viewportCount, |
- const VkViewport* viewports) { |
- SkASSERT(fIsActive); |
- SkASSERT(1 == viewportCount); |
- if (memcmp(viewports, &fCachedViewport, sizeof(VkViewport))) { |
- GR_VK_CALL(gpu->vkInterface(), CmdSetViewport(fCmdBuffer, |
- firstViewport, |
- viewportCount, |
- viewports)); |
- fCachedViewport = viewports[0]; |
- } |
-} |
+ VkCommandBufferInheritanceInfo inheritanceInfo; |
+ memset(&inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo)); |
+ inheritanceInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO; |
+ inheritanceInfo.pNext = nullptr; |
+ inheritanceInfo.renderPass = fActiveRenderPass->vkRenderPass(); |
+ inheritanceInfo.subpass = 0; // Currently only using 1 subpass for each render pass |
+ inheritanceInfo.framebuffer = framebuffer ? framebuffer->framebuffer() : VK_NULL_HANDLE; |
+ inheritanceInfo.occlusionQueryEnable = false; |
+ inheritanceInfo.queryFlags = 0; |
+ inheritanceInfo.pipelineStatistics = 0; |
-void GrVkCommandBuffer::setScissor(const GrVkGpu* gpu, |
- uint32_t firstScissor, |
- uint32_t scissorCount, |
- const VkRect2D* scissors) { |
- SkASSERT(fIsActive); |
- SkASSERT(1 == scissorCount); |
- if (memcmp(scissors, &fCachedScissor, sizeof(VkRect2D))) { |
- GR_VK_CALL(gpu->vkInterface(), CmdSetScissor(fCmdBuffer, |
- firstScissor, |
- scissorCount, |
- scissors)); |
- fCachedScissor = scissors[0]; |
- } |
+ VkCommandBufferBeginInfo cmdBufferBeginInfo; |
+ memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo)); |
+ cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; |
+ cmdBufferBeginInfo.pNext = nullptr; |
+ cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT | |
+ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT; |
+ cmdBufferBeginInfo.pInheritanceInfo = &inheritanceInfo; |
+ |
+ GR_VK_CALL_ERRCHECK(gpu->vkInterface(), BeginCommandBuffer(fCmdBuffer, |
+ &cmdBufferBeginInfo)); |
+ fIsActive = true; |
} |
-void GrVkCommandBuffer::setBlendConstants(const GrVkGpu* gpu, |
- const float blendConstants[4]) { |
+void GrVkSecondaryCommandBuffer::end(const GrVkGpu* gpu) { |
SkASSERT(fIsActive); |
- if (memcmp(blendConstants, fCachedBlendConstant, 4 * sizeof(float))) { |
- GR_VK_CALL(gpu->vkInterface(), CmdSetBlendConstants(fCmdBuffer, blendConstants)); |
- memcpy(fCachedBlendConstant, blendConstants, 4 * sizeof(float)); |
- } |
+ GR_VK_CALL_ERRCHECK(gpu->vkInterface(), EndCommandBuffer(fCmdBuffer)); |
+ this->invalidateState(); |
+ fIsActive = false; |
} |
+ |