OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2016 Google Inc. | 2 * Copyright 2016 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #include "GrVkGpuCommandBuffer.h" | 8 #include "GrVkGpuCommandBuffer.h" |
9 | 9 |
| 10 #include "GrMesh.h" |
| 11 #include "GrPipeline.h" |
| 12 #include "GrRenderTargetPriv.h" |
| 13 #include "GrTextureAccess.h" |
| 14 #include "GrTexturePriv.h" |
10 #include "GrVkCommandBuffer.h" | 15 #include "GrVkCommandBuffer.h" |
11 #include "GrVkGpu.h" | 16 #include "GrVkGpu.h" |
| 17 #include "GrVkPipeline.h" |
12 #include "GrVkRenderPass.h" | 18 #include "GrVkRenderPass.h" |
13 #include "GrVkRenderTarget.h" | 19 #include "GrVkRenderTarget.h" |
14 #include "GrVkResourceProvider.h" | 20 #include "GrVkResourceProvider.h" |
| 21 #include "GrVkTexture.h" |
15 | 22 |
16 void get_vk_load_store_ops(GrGpuCommandBuffer::LoadAndStoreOp op, | 23 void get_vk_load_store_ops(GrGpuCommandBuffer::LoadAndStoreOp op, |
17 VkAttachmentLoadOp* loadOp, VkAttachmentStoreOp* stor
eOp) { | 24 VkAttachmentLoadOp* loadOp, VkAttachmentStoreOp* stor
eOp) { |
18 switch (op) { | 25 switch (op) { |
19 case GrGpuCommandBuffer::kLoadAndStore_LoadAndStoreOp: | 26 case GrGpuCommandBuffer::kLoadAndStore_LoadAndStoreOp: |
20 *loadOp = VK_ATTACHMENT_LOAD_OP_LOAD; | 27 *loadOp = VK_ATTACHMENT_LOAD_OP_LOAD; |
21 *storeOp = VK_ATTACHMENT_STORE_OP_STORE; | 28 *storeOp = VK_ATTACHMENT_STORE_OP_STORE; |
22 break; | 29 break; |
23 case GrGpuCommandBuffer::kLoadAndDiscard_LoadAndStoreOp: | 30 case GrGpuCommandBuffer::kLoadAndDiscard_LoadAndStoreOp: |
24 *loadOp = VK_ATTACHMENT_LOAD_OP_LOAD; | 31 *loadOp = VK_ATTACHMENT_LOAD_OP_LOAD; |
(...skipping 17 matching lines...) Expand all Loading... |
42 break; | 49 break; |
43 default: | 50 default: |
44 SK_ABORT("Invalid LoadAndStoreOp"); | 51 SK_ABORT("Invalid LoadAndStoreOp"); |
45 *loadOp = VK_ATTACHMENT_LOAD_OP_LOAD; | 52 *loadOp = VK_ATTACHMENT_LOAD_OP_LOAD; |
46 *storeOp = VK_ATTACHMENT_STORE_OP_STORE; | 53 *storeOp = VK_ATTACHMENT_STORE_OP_STORE; |
47 break; | 54 break; |
48 } | 55 } |
49 } | 56 } |
50 | 57 |
51 GrVkGpuCommandBuffer::GrVkGpuCommandBuffer(GrVkGpu* gpu, | 58 GrVkGpuCommandBuffer::GrVkGpuCommandBuffer(GrVkGpu* gpu, |
52 const GrVkRenderTarget& target, | 59 GrVkRenderTarget* target, |
53 LoadAndStoreOp colorOp, GrColor color
Clear, | 60 LoadAndStoreOp colorOp, GrColor color
Clear, |
54 LoadAndStoreOp stencilOp, GrColor ste
ncilClear) | 61 LoadAndStoreOp stencilOp, GrColor ste
ncilClear) |
55 : fGpu(gpu) { | 62 : fGpu(gpu) |
| 63 , fRenderTarget(target) |
| 64 , fIsEmpty(true) { |
56 VkAttachmentLoadOp vkLoadOp; | 65 VkAttachmentLoadOp vkLoadOp; |
57 VkAttachmentStoreOp vkStoreOp; | 66 VkAttachmentStoreOp vkStoreOp; |
58 | 67 |
59 get_vk_load_store_ops(colorOp, &vkLoadOp, &vkStoreOp); | 68 get_vk_load_store_ops(colorOp, &vkLoadOp, &vkStoreOp); |
60 GrVkRenderPass::LoadStoreOps vkColorOps(vkLoadOp, vkStoreOp); | 69 GrVkRenderPass::LoadStoreOps vkColorOps(vkLoadOp, vkStoreOp); |
61 | 70 |
62 get_vk_load_store_ops(stencilOp, &vkLoadOp, &vkStoreOp); | 71 get_vk_load_store_ops(stencilOp, &vkLoadOp, &vkStoreOp); |
63 GrVkRenderPass::LoadStoreOps vkStencilOps(vkLoadOp, vkStoreOp); | 72 GrVkRenderPass::LoadStoreOps vkStencilOps(vkLoadOp, vkStoreOp); |
64 | 73 |
65 GrVkRenderPass::LoadStoreOps vkResolveOps(VK_ATTACHMENT_LOAD_OP_LOAD, | 74 GrVkRenderPass::LoadStoreOps vkResolveOps(VK_ATTACHMENT_LOAD_OP_LOAD, |
66 VK_ATTACHMENT_STORE_OP_STORE); | 75 VK_ATTACHMENT_STORE_OP_STORE); |
67 | 76 |
68 const GrVkResourceProvider::CompatibleRPHandle& rpHandle = target.compatible
RenderPassHandle(); | 77 const GrVkResourceProvider::CompatibleRPHandle& rpHandle = target->compatibl
eRenderPassHandle(); |
69 if (rpHandle.isValid()) { | 78 if (rpHandle.isValid()) { |
70 fRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle, | 79 fRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle, |
71 vkColorOps, | 80 vkColorOps, |
72 vkResolveOps, | 81 vkResolveOps, |
73 vkStencilOps); | 82 vkStencilOps); |
74 } else { | 83 } else { |
75 fRenderPass = fGpu->resourceProvider().findRenderPass(target, | 84 fRenderPass = fGpu->resourceProvider().findRenderPass(*target, |
76 vkColorOps, | 85 vkColorOps, |
77 vkResolveOps, | 86 vkResolveOps, |
78 vkStencilOps); | 87 vkStencilOps); |
79 } | 88 } |
80 | 89 |
| 90 GrColorToRGBAFloat(colorClear, fColorClearValue.color.float32); |
| 91 |
81 fCommandBuffer = GrVkSecondaryCommandBuffer::Create(gpu, gpu->cmdPool(), fRe
nderPass); | 92 fCommandBuffer = GrVkSecondaryCommandBuffer::Create(gpu, gpu->cmdPool(), fRe
nderPass); |
82 fCommandBuffer->begin(gpu, target.framebuffer()); | 93 fCommandBuffer->begin(gpu, target->framebuffer()); |
83 } | 94 } |
84 | 95 |
85 GrVkGpuCommandBuffer::~GrVkGpuCommandBuffer() { | 96 GrVkGpuCommandBuffer::~GrVkGpuCommandBuffer() { |
86 fCommandBuffer->unref(fGpu); | 97 fCommandBuffer->unref(fGpu); |
87 fRenderPass->unref(fGpu); | 98 fRenderPass->unref(fGpu); |
88 } | 99 } |
89 | 100 |
| 101 GrGpu* GrVkGpuCommandBuffer::gpu() { return fGpu; } |
| 102 |
90 void GrVkGpuCommandBuffer::end() { | 103 void GrVkGpuCommandBuffer::end() { |
91 fCommandBuffer->end(fGpu); | 104 fCommandBuffer->end(fGpu); |
92 } | 105 } |
93 | 106 |
94 void GrVkGpuCommandBuffer::submit() { | 107 void GrVkGpuCommandBuffer::onSubmit(const SkIRect& bounds) { |
95 fGpu->submitSecondaryCommandBuffer(fCommandBuffer); | 108 // Change layout of our render target so it can be used as the color attachm
ent |
96 } | 109 fRenderTarget->setImageLayout(fGpu, |
97 | 110 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, |
| 111 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, |
| 112 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, |
| 113 false); |
| 114 |
| 115 // If we are using a stencil attachment we also need to update its layout |
| 116 if (GrStencilAttachment* stencil = fRenderTarget->renderTargetPriv().getSten
cilAttachment()) { |
| 117 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil; |
| 118 vkStencil->setImageLayout(fGpu, |
| 119 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIM
AL, |
| 120 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | |
| 121 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT, |
| 122 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, |
| 123 false); |
| 124 } |
| 125 |
| 126 for (int i = 0; i < fSampledImages.count(); ++i) { |
| 127 fSampledImages[i]->setImageLayout(fGpu, |
| 128 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIM
AL, |
| 129 VK_ACCESS_SHADER_READ_BIT, |
| 130 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, |
| 131 false); |
| 132 } |
| 133 |
| 134 fGpu->submitSecondaryCommandBuffer(fCommandBuffer, fRenderPass, &fColorClear
Value, |
| 135 fRenderTarget, bounds); |
| 136 } |
| 137 |
| 138 void GrVkGpuCommandBuffer::onClearStencilClip(GrRenderTarget* target, |
| 139 const SkIRect& rect, |
| 140 bool insideClip) { |
| 141 SkASSERT(target); |
| 142 |
| 143 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(target); |
| 144 GrStencilAttachment* sb = target->renderTargetPriv().getStencilAttachment(); |
| 145 // this should only be called internally when we know we have a |
| 146 // stencil buffer. |
| 147 SkASSERT(sb); |
| 148 int stencilBitCount = sb->bits(); |
| 149 |
| 150 // The contract with the callers does not guarantee that we preserve all bit
s in the stencil |
| 151 // during this clear. Thus we will clear the entire stencil to the desired v
alue. |
| 152 |
| 153 VkClearDepthStencilValue vkStencilColor; |
| 154 memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue)); |
| 155 if (insideClip) { |
| 156 vkStencilColor.stencil = (1 << (stencilBitCount - 1)); |
| 157 } else { |
| 158 vkStencilColor.stencil = 0; |
| 159 } |
| 160 |
| 161 VkClearRect clearRect; |
| 162 // Flip rect if necessary |
| 163 SkIRect vkRect = rect; |
| 164 |
| 165 if (kBottomLeft_GrSurfaceOrigin == vkRT->origin()) { |
| 166 vkRect.fTop = vkRT->height() - rect.fBottom; |
| 167 vkRect.fBottom = vkRT->height() - rect.fTop; |
| 168 } |
| 169 |
| 170 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop }; |
| 171 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height(
) }; |
| 172 |
| 173 clearRect.baseArrayLayer = 0; |
| 174 clearRect.layerCount = 1; |
| 175 |
| 176 uint32_t stencilIndex; |
| 177 SkAssertResult(fRenderPass->stencilAttachmentIndex(&stencilIndex)); |
| 178 |
| 179 VkClearAttachment attachment; |
| 180 attachment.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT; |
| 181 attachment.colorAttachment = 0; // this value shouldn't matter |
| 182 attachment.clearValue.depthStencil = vkStencilColor; |
| 183 |
| 184 fCommandBuffer->clearAttachments(fGpu, 1, &attachment, 1, &clearRect); |
| 185 fIsEmpty = false; |
| 186 } |
| 187 |
| 188 void GrVkGpuCommandBuffer::onClear(GrRenderTarget* target, const SkIRect& rect,
GrColor color) { |
| 189 // parent class should never let us get here with no RT |
| 190 SkASSERT(target); |
| 191 |
| 192 VkClearColorValue vkColor; |
| 193 GrColorToRGBAFloat(color, vkColor.float32); |
| 194 |
| 195 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(target); |
| 196 |
| 197 if (fIsEmpty) { |
| 198 // We will change the render pass to do a clear load instead |
| 199 GrVkRenderPass::LoadStoreOps vkColorOps(VK_ATTACHMENT_LOAD_OP_CLEAR, |
| 200 VK_ATTACHMENT_STORE_OP_STORE); |
| 201 GrVkRenderPass::LoadStoreOps vkStencilOps(VK_ATTACHMENT_LOAD_OP_LOAD, |
| 202 VK_ATTACHMENT_STORE_OP_STORE); |
| 203 GrVkRenderPass::LoadStoreOps vkResolveOps(VK_ATTACHMENT_LOAD_OP_LOAD, |
| 204 VK_ATTACHMENT_STORE_OP_STORE); |
| 205 |
| 206 const GrVkRenderPass* oldRP = fRenderPass; |
| 207 |
| 208 const GrVkResourceProvider::CompatibleRPHandle& rpHandle = |
| 209 vkRT->compatibleRenderPassHandle(); |
| 210 if (rpHandle.isValid()) { |
| 211 fRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle, |
| 212 vkColorOps, |
| 213 vkResolveOps, |
| 214 vkStencilOps); |
| 215 } else { |
| 216 fRenderPass = fGpu->resourceProvider().findRenderPass(*vkRT, |
| 217 vkColorOps, |
| 218 vkResolveOps, |
| 219 vkStencilOps); |
| 220 } |
| 221 |
| 222 SkASSERT(fRenderPass->isCompatible(*oldRP)); |
| 223 oldRP->unref(fGpu); |
| 224 |
| 225 GrColorToRGBAFloat(color, fColorClearValue.color.float32); |
| 226 return; |
| 227 } |
| 228 |
| 229 // We always do a sub rect clear with clearAttachments since we are inside a
render pass |
| 230 VkClearRect clearRect; |
| 231 // Flip rect if necessary |
| 232 SkIRect vkRect = rect; |
| 233 if (kBottomLeft_GrSurfaceOrigin == vkRT->origin()) { |
| 234 vkRect.fTop = vkRT->height() - rect.fBottom; |
| 235 vkRect.fBottom = vkRT->height() - rect.fTop; |
| 236 } |
| 237 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop }; |
| 238 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height(
) }; |
| 239 clearRect.baseArrayLayer = 0; |
| 240 clearRect.layerCount = 1; |
| 241 |
| 242 uint32_t colorIndex; |
| 243 SkAssertResult(fRenderPass->colorAttachmentIndex(&colorIndex)); |
| 244 |
| 245 VkClearAttachment attachment; |
| 246 attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; |
| 247 attachment.colorAttachment = colorIndex; |
| 248 attachment.clearValue.color = vkColor; |
| 249 |
| 250 fCommandBuffer->clearAttachments(fGpu, 1, &attachment, 1, &clearRect); |
| 251 fIsEmpty = false; |
| 252 return; |
| 253 } |
| 254 |
| 255 //////////////////////////////////////////////////////////////////////////////// |
| 256 |
| 257 void GrVkGpuCommandBuffer::bindGeometry(const GrPrimitiveProcessor& primProc, |
| 258 const GrNonInstancedMesh& mesh) { |
| 259 // There is no need to put any memory barriers to make sure host writes have
finished here. |
| 260 // When a command buffer is submitted to a queue, there is an implicit memor
y barrier that |
| 261 // occurs for all host writes. Additionally, BufferMemoryBarriers are not al
lowed inside of |
| 262 // an active RenderPass. |
| 263 GrVkVertexBuffer* vbuf; |
| 264 vbuf = (GrVkVertexBuffer*)mesh.vertexBuffer(); |
| 265 SkASSERT(vbuf); |
| 266 SkASSERT(!vbuf->isMapped()); |
| 267 |
| 268 fCommandBuffer->bindVertexBuffer(fGpu, vbuf); |
| 269 |
| 270 if (mesh.isIndexed()) { |
| 271 GrVkIndexBuffer* ibuf = (GrVkIndexBuffer*)mesh.indexBuffer(); |
| 272 SkASSERT(ibuf); |
| 273 SkASSERT(!ibuf->isMapped()); |
| 274 |
| 275 fCommandBuffer->bindIndexBuffer(fGpu, ibuf); |
| 276 } |
| 277 } |
| 278 |
| 279 sk_sp<GrVkPipelineState> GrVkGpuCommandBuffer::prepareDrawState( |
| 280 const GrPipeline&
pipeline, |
| 281 const GrPrimitive
Processor& primProc, |
| 282 GrPrimitiveType p
rimitiveType, |
| 283 const GrVkRenderP
ass& renderPass) { |
| 284 sk_sp<GrVkPipelineState> pipelineState = |
| 285 fGpu->resourceProvider().findOrCreateCompatiblePipelineState(pipeline, |
| 286 primProc, |
| 287 primitiveTy
pe, |
| 288 renderPass)
; |
| 289 if (!pipelineState) { |
| 290 return pipelineState; |
| 291 } |
| 292 |
| 293 pipelineState->setData(fGpu, primProc, pipeline); |
| 294 |
| 295 pipelineState->bind(fGpu, fCommandBuffer); |
| 296 |
| 297 GrVkPipeline::SetDynamicState(fGpu, fCommandBuffer, pipeline); |
| 298 |
| 299 return pipelineState; |
| 300 } |
| 301 |
| 302 static void append_sampled_images(const GrProcessor& processor, |
| 303 const GrVkGpu* gpu, |
| 304 SkTArray<GrVkImage*>* sampledImages) { |
| 305 if (int numTextures = processor.numTextures()) { |
| 306 GrVkImage** images = sampledImages->push_back_n(numTextures); |
| 307 int i = 0; |
| 308 do { |
| 309 const GrTextureAccess& texAccess = processor.textureAccess(i); |
| 310 GrVkTexture* vkTexture = static_cast<GrVkTexture*>(processor.texture
(i)); |
| 311 SkASSERT(vkTexture); |
| 312 const GrTextureParams& params = texAccess.getParams(); |
| 313 // Check if we need to regenerate any mip maps |
| 314 if (GrTextureParams::kMipMap_FilterMode == params.filterMode()) { |
| 315 if (vkTexture->texturePriv().mipMapsAreDirty()) { |
| 316 gpu->generateMipmap(vkTexture); |
| 317 vkTexture->texturePriv().dirtyMipMaps(false); |
| 318 } |
| 319 } |
| 320 |
| 321 images[i] = vkTexture; |
| 322 } while (++i < numTextures); |
| 323 |
| 324 } |
| 325 } |
| 326 |
| 327 void GrVkGpuCommandBuffer::onDraw(const GrPipeline& pipeline, |
| 328 const GrPrimitiveProcessor& primProc, |
| 329 const GrMesh* meshes, |
| 330 int meshCount) { |
| 331 if (!meshCount) { |
| 332 return; |
| 333 } |
| 334 GrRenderTarget* rt = pipeline.getRenderTarget(); |
| 335 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt); |
| 336 const GrVkRenderPass* renderPass = vkRT->simpleRenderPass(); |
| 337 SkASSERT(renderPass); |
| 338 |
| 339 GrPrimitiveType primitiveType = meshes[0].primitiveType(); |
| 340 sk_sp<GrVkPipelineState> pipelineState = this->prepareDrawState(pipeline, |
| 341 primProc, |
| 342 primitiveTyp
e, |
| 343 *renderPass)
; |
| 344 if (!pipelineState) { |
| 345 return; |
| 346 } |
| 347 |
| 348 append_sampled_images(primProc, fGpu, &fSampledImages); |
| 349 for (int i = 0; i < pipeline.numFragmentProcessors(); ++i) { |
| 350 append_sampled_images(pipeline.getFragmentProcessor(i), fGpu, &fSampledI
mages); |
| 351 } |
| 352 append_sampled_images(pipeline.getXferProcessor(), fGpu, &fSampledImages); |
| 353 |
| 354 for (int i = 0; i < meshCount; ++i) { |
| 355 const GrMesh& mesh = meshes[i]; |
| 356 GrMesh::Iterator iter; |
| 357 const GrNonInstancedMesh* nonIdxMesh = iter.init(mesh); |
| 358 do { |
| 359 if (nonIdxMesh->primitiveType() != primitiveType) { |
| 360 // Technically we don't have to call this here (since there is a
safety check in |
| 361 // pipelineState:setData but this will allow for quicker freeing
of resources if the |
| 362 // pipelineState sits in a cache for a while. |
| 363 pipelineState->freeTempResources(fGpu); |
| 364 SkDEBUGCODE(pipelineState = nullptr); |
| 365 primitiveType = nonIdxMesh->primitiveType(); |
| 366 pipelineState = this->prepareDrawState(pipeline, |
| 367 primProc, |
| 368 primitiveType, |
| 369 *renderPass); |
| 370 if (!pipelineState) { |
| 371 return; |
| 372 } |
| 373 } |
| 374 SkASSERT(pipelineState); |
| 375 this->bindGeometry(primProc, *nonIdxMesh); |
| 376 |
| 377 if (nonIdxMesh->isIndexed()) { |
| 378 fCommandBuffer->drawIndexed(fGpu, |
| 379 nonIdxMesh->indexCount(), |
| 380 1, |
| 381 nonIdxMesh->startIndex(), |
| 382 nonIdxMesh->startVertex(), |
| 383 0); |
| 384 } else { |
| 385 fCommandBuffer->draw(fGpu, |
| 386 nonIdxMesh->vertexCount(), |
| 387 1, |
| 388 nonIdxMesh->startVertex(), |
| 389 0); |
| 390 } |
| 391 fIsEmpty = false; |
| 392 |
| 393 fGpu->stats()->incNumDraws(); |
| 394 } while ((nonIdxMesh = iter.next())); |
| 395 } |
| 396 |
| 397 // Technically we don't have to call this here (since there is a safety chec
k in |
| 398 // pipelineState:setData but this will allow for quicker freeing of resource
s if the |
| 399 // pipelineState sits in a cache for a while. |
| 400 pipelineState->freeTempResources(fGpu); |
| 401 } |
| 402 |
OLD | NEW |