| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2016 Google Inc. | 2 * Copyright 2016 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #include "GrVkGpuCommandBuffer.h" | 8 #include "GrVkGpuCommandBuffer.h" |
| 9 | 9 |
| 10 #include "GrFixedClip.h" | 10 #include "GrFixedClip.h" |
| (...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 96 | 96 |
| 97 GrGpu* GrVkGpuCommandBuffer::gpu() { return fGpu; } | 97 GrGpu* GrVkGpuCommandBuffer::gpu() { return fGpu; } |
| 98 | 98 |
| 99 void GrVkGpuCommandBuffer::end() { | 99 void GrVkGpuCommandBuffer::end() { |
| 100 fCommandBuffer->end(fGpu); | 100 fCommandBuffer->end(fGpu); |
| 101 } | 101 } |
| 102 | 102 |
| 103 void GrVkGpuCommandBuffer::onSubmit(const SkIRect& bounds) { | 103 void GrVkGpuCommandBuffer::onSubmit(const SkIRect& bounds) { |
| 104 // Change layout of our render target so it can be used as the color attachm
ent. Currently | 104 // Change layout of our render target so it can be used as the color attachm
ent. Currently |
| 105 // we don't attach the resolve to the framebuffer so no need to change its l
ayout. | 105 // we don't attach the resolve to the framebuffer so no need to change its l
ayout. |
| 106 GrVkImage* targetImage = fRenderTarget->msaaImage() ? fRenderTarget->msaaIma
ge() | 106 GrVkImage* targetImage = fRenderTarget->msaaImage() ? fRenderTarget->msaaIma
ge() |
| 107 : fRenderTarget; | 107 : fRenderTarget; |
| 108 targetImage->setImageLayout(fGpu, | 108 targetImage->setImageLayout(fGpu, |
| 109 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, | 109 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, |
| 110 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, | 110 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, |
| 111 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, | 111 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, |
| 112 false); | 112 false); |
| 113 | 113 |
| 114 // If we are using a stencil attachment we also need to update its layout | 114 // If we are using a stencil attachment we also need to update its layout |
| 115 if (GrStencilAttachment* stencil = fRenderTarget->renderTargetPriv().getSten
cilAttachment()) { | 115 if (GrStencilAttachment* stencil = fRenderTarget->renderTargetPriv().getSten
cilAttachment()) { |
| 116 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil; | 116 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil; |
| 117 vkStencil->setImageLayout(fGpu, | 117 vkStencil->setImageLayout(fGpu, |
| 118 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIM
AL, | 118 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIM
AL, |
| 119 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | | 119 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | |
| 120 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT, | 120 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT, |
| 121 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, | 121 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, |
| 122 false); | 122 false); |
| 123 } | 123 } |
| 124 | 124 |
| 125 for (int i = 0; i < fSampledImages.count(); ++i) { | |
| 126 fSampledImages[i]->setImageLayout(fGpu, | |
| 127 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIM
AL, | |
| 128 VK_ACCESS_SHADER_READ_BIT, | |
| 129 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, | |
| 130 false); | |
| 131 } | |
| 132 | |
| 133 fGpu->submitSecondaryCommandBuffer(fCommandBuffer, fRenderPass, &fColorClear
Value, | 125 fGpu->submitSecondaryCommandBuffer(fCommandBuffer, fRenderPass, &fColorClear
Value, |
| 134 fRenderTarget, bounds); | 126 fRenderTarget, bounds); |
| 135 } | 127 } |
| 136 | 128 |
| 137 void GrVkGpuCommandBuffer::discard(GrRenderTarget* target) { | 129 void GrVkGpuCommandBuffer::discard(GrRenderTarget* target) { |
| 138 if (fIsEmpty) { | 130 if (fIsEmpty) { |
| 139 // We will change the render pass to do a clear load instead | 131 // We will change the render pass to do a clear load instead |
| 140 GrVkRenderPass::LoadStoreOps vkColorOps(VK_ATTACHMENT_LOAD_OP_DONT_CARE, | 132 GrVkRenderPass::LoadStoreOps vkColorOps(VK_ATTACHMENT_LOAD_OP_DONT_CARE, |
| 141 VK_ATTACHMENT_STORE_OP_STORE); | 133 VK_ATTACHMENT_STORE_OP_STORE); |
| 142 GrVkRenderPass::LoadStoreOps vkStencilOps(VK_ATTACHMENT_LOAD_OP_DONT_CAR
E, | 134 GrVkRenderPass::LoadStoreOps vkStencilOps(VK_ATTACHMENT_LOAD_OP_DONT_CAR
E, |
| (...skipping 191 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 334 | 326 |
| 335 pipelineState->setData(fGpu, primProc, pipeline); | 327 pipelineState->setData(fGpu, primProc, pipeline); |
| 336 | 328 |
| 337 pipelineState->bind(fGpu, fCommandBuffer); | 329 pipelineState->bind(fGpu, fCommandBuffer); |
| 338 | 330 |
| 339 GrVkPipeline::SetDynamicState(fGpu, fCommandBuffer, pipeline); | 331 GrVkPipeline::SetDynamicState(fGpu, fCommandBuffer, pipeline); |
| 340 | 332 |
| 341 return pipelineState; | 333 return pipelineState; |
| 342 } | 334 } |
| 343 | 335 |
| 344 static void append_sampled_images(const GrProcessor& processor, | 336 static void prepare_sampled_images(const GrProcessor& processor, GrVkGpu* gpu) { |
| 345 GrVkGpu* gpu, | 337 for (int i = 0; i < processor.numTextures(); ++i) { |
| 346 SkTArray<GrVkImage*>* sampledImages) { | 338 const GrTextureAccess& texAccess = processor.textureAccess(i); |
| 347 if (int numTextures = processor.numTextures()) { | 339 GrVkTexture* vkTexture = static_cast<GrVkTexture*>(processor.texture(i))
; |
| 348 GrVkImage** images = sampledImages->push_back_n(numTextures); | 340 SkASSERT(vkTexture); |
| 349 int i = 0; | |
| 350 do { | |
| 351 const GrTextureAccess& texAccess = processor.textureAccess(i); | |
| 352 GrVkTexture* vkTexture = static_cast<GrVkTexture*>(processor.texture
(i)); | |
| 353 SkASSERT(vkTexture); | |
| 354 | 341 |
| 355 // We may need to resolve the texture first if it is also a render t
arget | 342 // We may need to resolve the texture first if it is also a render targe
t |
| 356 GrVkRenderTarget* texRT = static_cast<GrVkRenderTarget*>(vkTexture->
asRenderTarget()); | 343 GrVkRenderTarget* texRT = static_cast<GrVkRenderTarget*>(vkTexture->asRe
nderTarget()); |
| 357 if (texRT) { | 344 if (texRT) { |
| 358 gpu->onResolveRenderTarget(texRT); | 345 gpu->onResolveRenderTarget(texRT); |
| 346 } |
| 347 |
| 348 const GrTextureParams& params = texAccess.getParams(); |
| 349 // Check if we need to regenerate any mip maps |
| 350 if (GrTextureParams::kMipMap_FilterMode == params.filterMode()) { |
| 351 if (vkTexture->texturePriv().mipMapsAreDirty()) { |
| 352 gpu->generateMipmap(vkTexture); |
| 353 vkTexture->texturePriv().dirtyMipMaps(false); |
| 359 } | 354 } |
| 355 } |
| 360 | 356 |
| 361 const GrTextureParams& params = texAccess.getParams(); | 357 // TODO: If we ever decide to create the secondary command buffers ahead
of time before we |
| 362 // Check if we need to regenerate any mip maps | 358 // are actually going to submit them, we will need to track the sampled
images and delay |
| 363 if (GrTextureParams::kMipMap_FilterMode == params.filterMode()) { | 359 // adding the layout change/barrier until we are ready to submit. |
| 364 if (vkTexture->texturePriv().mipMapsAreDirty()) { | 360 vkTexture->setImageLayout(gpu, |
| 365 gpu->generateMipmap(vkTexture); | 361 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, |
| 366 vkTexture->texturePriv().dirtyMipMaps(false); | 362 VK_ACCESS_SHADER_READ_BIT, |
| 367 } | 363 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, |
| 368 } | 364 false); |
| 369 | |
| 370 images[i] = vkTexture; | |
| 371 } while (++i < numTextures); | |
| 372 | |
| 373 } | 365 } |
| 374 } | 366 } |
| 375 | 367 |
| 376 void GrVkGpuCommandBuffer::onDraw(const GrPipeline& pipeline, | 368 void GrVkGpuCommandBuffer::onDraw(const GrPipeline& pipeline, |
| 377 const GrPrimitiveProcessor& primProc, | 369 const GrPrimitiveProcessor& primProc, |
| 378 const GrMesh* meshes, | 370 const GrMesh* meshes, |
| 379 int meshCount) { | 371 int meshCount) { |
| 380 if (!meshCount) { | 372 if (!meshCount) { |
| 381 return; | 373 return; |
| 382 } | 374 } |
| 383 GrRenderTarget* rt = pipeline.getRenderTarget(); | 375 GrRenderTarget* rt = pipeline.getRenderTarget(); |
| 384 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt); | 376 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt); |
| 385 const GrVkRenderPass* renderPass = vkRT->simpleRenderPass(); | 377 const GrVkRenderPass* renderPass = vkRT->simpleRenderPass(); |
| 386 SkASSERT(renderPass); | 378 SkASSERT(renderPass); |
| 387 | 379 |
| 388 append_sampled_images(primProc, fGpu, &fSampledImages); | 380 prepare_sampled_images(primProc, fGpu); |
| 389 for (int i = 0; i < pipeline.numFragmentProcessors(); ++i) { | 381 for (int i = 0; i < pipeline.numFragmentProcessors(); ++i) { |
| 390 append_sampled_images(pipeline.getFragmentProcessor(i), fGpu, &fSampledI
mages); | 382 prepare_sampled_images(pipeline.getFragmentProcessor(i), fGpu); |
| 391 } | 383 } |
| 392 append_sampled_images(pipeline.getXferProcessor(), fGpu, &fSampledImages); | 384 prepare_sampled_images(pipeline.getXferProcessor(), fGpu); |
| 393 | 385 |
| 394 GrPrimitiveType primitiveType = meshes[0].primitiveType(); | 386 GrPrimitiveType primitiveType = meshes[0].primitiveType(); |
| 395 sk_sp<GrVkPipelineState> pipelineState = this->prepareDrawState(pipeline, | 387 sk_sp<GrVkPipelineState> pipelineState = this->prepareDrawState(pipeline, |
| 396 primProc, | 388 primProc, |
| 397 primitiveTyp
e, | 389 primitiveTyp
e, |
| 398 *renderPass)
; | 390 *renderPass)
; |
| 399 if (!pipelineState) { | 391 if (!pipelineState) { |
| 400 return; | 392 return; |
| 401 } | 393 } |
| 402 | 394 |
| (...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 442 fGpu->stats()->incNumDraws(); | 434 fGpu->stats()->incNumDraws(); |
| 443 } while ((nonIdxMesh = iter.next())); | 435 } while ((nonIdxMesh = iter.next())); |
| 444 } | 436 } |
| 445 | 437 |
| 446 // Technically we don't have to call this here (since there is a safety chec
k in | 438 // Technically we don't have to call this here (since there is a safety chec
k in |
| 447 // pipelineState:setData but this will allow for quicker freeing of resource
s if the | 439 // pipelineState:setData but this will allow for quicker freeing of resource
s if the |
| 448 // pipelineState sits in a cache for a while. | 440 // pipelineState sits in a cache for a while. |
| 449 pipelineState->freeTempResources(fGpu); | 441 pipelineState->freeTempResources(fGpu); |
| 450 } | 442 } |
| 451 | 443 |
| OLD | NEW |