Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright 2016 Google Inc. | 2 * Copyright 2016 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #include "GrVkGpuCommandBuffer.h" | 8 #include "GrVkGpuCommandBuffer.h" |
| 9 | 9 |
| 10 #include "GrMesh.h" | |
| 11 #include "GrPipeline.h" | |
| 12 #include "GrRenderTargetPriv.h" | |
| 13 #include "GrTextureAccess.h" | |
| 14 #include "GrTexturePriv.h" | |
| 10 #include "GrVkCommandBuffer.h" | 15 #include "GrVkCommandBuffer.h" |
| 11 #include "GrVkGpu.h" | 16 #include "GrVkGpu.h" |
| 17 #include "GrVkPipeline.h" | |
| 12 #include "GrVkRenderPass.h" | 18 #include "GrVkRenderPass.h" |
| 13 #include "GrVkRenderTarget.h" | 19 #include "GrVkRenderTarget.h" |
| 14 #include "GrVkResourceProvider.h" | 20 #include "GrVkResourceProvider.h" |
| 21 #include "GrVkTexture.h" | |
| 15 | 22 |
| 16 void get_vk_load_store_ops(GrGpuCommandBuffer::LoadAndStoreOp op, | 23 void get_vk_load_store_ops(GrGpuCommandBuffer::LoadAndStoreOp op, |
| 17 VkAttachmentLoadOp* loadOp, VkAttachmentStoreOp* stor eOp) { | 24 VkAttachmentLoadOp* loadOp, VkAttachmentStoreOp* stor eOp) { |
| 18 switch (op) { | 25 switch (op) { |
| 19 case GrGpuCommandBuffer::kLoadAndStore_LoadAndStoreOp: | 26 case GrGpuCommandBuffer::kLoadAndStore_LoadAndStoreOp: |
| 20 *loadOp = VK_ATTACHMENT_LOAD_OP_LOAD; | 27 *loadOp = VK_ATTACHMENT_LOAD_OP_LOAD; |
| 21 *storeOp = VK_ATTACHMENT_STORE_OP_STORE; | 28 *storeOp = VK_ATTACHMENT_STORE_OP_STORE; |
| 22 break; | 29 break; |
| 23 case GrGpuCommandBuffer::kLoadAndDiscard_LoadAndStoreOp: | 30 case GrGpuCommandBuffer::kLoadAndDiscard_LoadAndStoreOp: |
| 24 *loadOp = VK_ATTACHMENT_LOAD_OP_LOAD; | 31 *loadOp = VK_ATTACHMENT_LOAD_OP_LOAD; |
| (...skipping 17 matching lines...) Expand all Loading... | |
| 42 break; | 49 break; |
| 43 default: | 50 default: |
| 44 SK_ABORT("Invalid LoadAndStoreOp"); | 51 SK_ABORT("Invalid LoadAndStoreOp"); |
| 45 *loadOp = VK_ATTACHMENT_LOAD_OP_LOAD; | 52 *loadOp = VK_ATTACHMENT_LOAD_OP_LOAD; |
| 46 *storeOp = VK_ATTACHMENT_STORE_OP_STORE; | 53 *storeOp = VK_ATTACHMENT_STORE_OP_STORE; |
| 47 break; | 54 break; |
| 48 } | 55 } |
| 49 } | 56 } |
| 50 | 57 |
| 51 GrVkGpuCommandBuffer::GrVkGpuCommandBuffer(GrVkGpu* gpu, | 58 GrVkGpuCommandBuffer::GrVkGpuCommandBuffer(GrVkGpu* gpu, |
| 52 const GrVkRenderTarget& target, | 59 GrVkRenderTarget* target, |
| 53 LoadAndStoreOp colorOp, GrColor color Clear, | 60 LoadAndStoreOp colorOp, GrColor color Clear, |
| 54 LoadAndStoreOp stencilOp, GrColor ste ncilClear) | 61 LoadAndStoreOp stencilOp, GrColor ste ncilClear) |
| 55 : fGpu(gpu) { | 62 : fGpu(gpu) |
| 63 , fRenderTarget(target) { | |
|
jvanverth1
2016/06/17 15:25:16
Why a non-const pointer rather than a const refere
egdaniel
2016/06/17 15:57:35
setImageLayout call on the RT is non const
jvanverth1
2016/06/17 16:49:13
Acknowledged.
| |
| 56 VkAttachmentLoadOp vkLoadOp; | 64 VkAttachmentLoadOp vkLoadOp; |
| 57 VkAttachmentStoreOp vkStoreOp; | 65 VkAttachmentStoreOp vkStoreOp; |
| 58 | 66 |
| 59 get_vk_load_store_ops(colorOp, &vkLoadOp, &vkStoreOp); | 67 get_vk_load_store_ops(colorOp, &vkLoadOp, &vkStoreOp); |
| 60 GrVkRenderPass::LoadStoreOps vkColorOps(vkLoadOp, vkStoreOp); | 68 GrVkRenderPass::LoadStoreOps vkColorOps(vkLoadOp, vkStoreOp); |
| 61 | 69 |
| 62 get_vk_load_store_ops(stencilOp, &vkLoadOp, &vkStoreOp); | 70 get_vk_load_store_ops(stencilOp, &vkLoadOp, &vkStoreOp); |
| 63 GrVkRenderPass::LoadStoreOps vkStencilOps(vkLoadOp, vkStoreOp); | 71 GrVkRenderPass::LoadStoreOps vkStencilOps(vkLoadOp, vkStoreOp); |
| 64 | 72 |
| 65 GrVkRenderPass::LoadStoreOps vkResolveOps(VK_ATTACHMENT_LOAD_OP_LOAD, | 73 GrVkRenderPass::LoadStoreOps vkResolveOps(VK_ATTACHMENT_LOAD_OP_LOAD, |
| 66 VK_ATTACHMENT_STORE_OP_STORE); | 74 VK_ATTACHMENT_STORE_OP_STORE); |
| 67 | 75 |
| 68 const GrVkResourceProvider::CompatibleRPHandle& rpHandle = target.compatible RenderPassHandle(); | 76 const GrVkResourceProvider::CompatibleRPHandle& rpHandle = target->compatibl eRenderPassHandle(); |
| 69 if (rpHandle.isValid()) { | 77 if (rpHandle.isValid()) { |
| 70 fRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle, | 78 fRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle, |
| 71 vkColorOps, | 79 vkColorOps, |
| 72 vkResolveOps, | 80 vkResolveOps, |
| 73 vkStencilOps); | 81 vkStencilOps); |
| 74 } else { | 82 } else { |
| 75 fRenderPass = fGpu->resourceProvider().findRenderPass(target, | 83 fRenderPass = fGpu->resourceProvider().findRenderPass(*target, |
| 76 vkColorOps, | 84 vkColorOps, |
| 77 vkResolveOps, | 85 vkResolveOps, |
| 78 vkStencilOps); | 86 vkStencilOps); |
| 79 } | 87 } |
| 80 | 88 |
| 81 fCommandBuffer = GrVkSecondaryCommandBuffer::Create(gpu, gpu->cmdPool(), fRe nderPass); | 89 fCommandBuffer = GrVkSecondaryCommandBuffer::Create(gpu, gpu->cmdPool(), fRe nderPass); |
| 82 fCommandBuffer->begin(gpu, target.framebuffer()); | 90 fCommandBuffer->begin(gpu, target->framebuffer()); |
| 83 } | 91 } |
| 84 | 92 |
| 85 GrVkGpuCommandBuffer::~GrVkGpuCommandBuffer() { | 93 GrVkGpuCommandBuffer::~GrVkGpuCommandBuffer() { |
| 86 fCommandBuffer->unref(fGpu); | 94 fCommandBuffer->unref(fGpu); |
| 87 fRenderPass->unref(fGpu); | 95 fRenderPass->unref(fGpu); |
| 88 } | 96 } |
| 89 | 97 |
| 98 GrGpu* GrVkGpuCommandBuffer::gpu() { return fGpu; } | |
| 99 | |
| 90 void GrVkGpuCommandBuffer::end() { | 100 void GrVkGpuCommandBuffer::end() { |
| 91 fCommandBuffer->end(fGpu); | 101 fCommandBuffer->end(fGpu); |
| 92 } | 102 } |
| 93 | 103 |
| 94 void GrVkGpuCommandBuffer::submit() { | 104 void GrVkGpuCommandBuffer::onSubmit(const SkIRect& bounds) { |
| 95 fGpu->submitSecondaryCommandBuffer(fCommandBuffer); | 105 // Change layout of our render target so it can be used as the color attachm ent |
| 96 } | 106 fRenderTarget->setImageLayout(fGpu, |
| 97 | 107 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, |
| 108 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, | |
| 109 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, | |
| 110 false); | |
| 111 | |
| 112 // If we are using a stencil attachment we also need to update its layout | |
| 113 if (GrStencilAttachment* stencil = fRenderTarget->renderTargetPriv().getSten cilAttachment()) { | |
| 114 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil; | |
| 115 vkStencil->setImageLayout(fGpu, | |
| 116 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIM AL, | |
| 117 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | | |
| 118 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT, | |
| 119 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, | |
| 120 false); | |
| 121 } | |
| 122 | |
| 123 for (int i = 0; i < fSampledImages.count(); ++i) { | |
| 124 fSampledImages[i]->setImageLayout(fGpu, | |
| 125 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIM AL, | |
| 126 VK_ACCESS_SHADER_READ_BIT, | |
| 127 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, | |
| 128 false); | |
| 129 } | |
| 130 | |
| 131 fGpu->submitSecondaryCommandBuffer(fCommandBuffer, fRenderPass, fRenderTarge t, bounds); | |
| 132 } | |
| 133 | |
| 134 void GrVkGpuCommandBuffer::onClearStencilClip(GrRenderTarget* target, | |
| 135 const SkIRect& rect, | |
| 136 bool insideClip) { | |
| 137 SkASSERT(target); | |
| 138 | |
| 139 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(target); | |
| 140 GrStencilAttachment* sb = target->renderTargetPriv().getStencilAttachment(); | |
| 141 // this should only be called internally when we know we have a | |
| 142 // stencil buffer. | |
| 143 SkASSERT(sb); | |
| 144 int stencilBitCount = sb->bits(); | |
| 145 | |
| 146 // The contract with the callers does not guarantee that we preserve all bit s in the stencil | |
| 147 // during this clear. Thus we will clear the entire stencil to the desired v alue. | |
| 148 | |
| 149 VkClearDepthStencilValue vkStencilColor; | |
| 150 memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue)); | |
| 151 if (insideClip) { | |
| 152 vkStencilColor.stencil = (1 << (stencilBitCount - 1)); | |
| 153 } else { | |
| 154 vkStencilColor.stencil = 0; | |
| 155 } | |
| 156 | |
| 157 VkClearRect clearRect; | |
| 158 // Flip rect if necessary | |
| 159 SkIRect vkRect = rect; | |
| 160 | |
| 161 if (kBottomLeft_GrSurfaceOrigin == vkRT->origin()) { | |
| 162 vkRect.fTop = vkRT->height() - rect.fBottom; | |
| 163 vkRect.fBottom = vkRT->height() - rect.fTop; | |
| 164 } | |
| 165 | |
| 166 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop }; | |
| 167 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height( ) }; | |
| 168 | |
| 169 clearRect.baseArrayLayer = 0; | |
| 170 clearRect.layerCount = 1; | |
| 171 | |
| 172 uint32_t stencilIndex; | |
| 173 SkAssertResult(fRenderPass->stencilAttachmentIndex(&stencilIndex)); | |
| 174 | |
| 175 VkClearAttachment attachment; | |
| 176 attachment.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT; | |
| 177 attachment.colorAttachment = 0; // this value shouldn't matter | |
| 178 attachment.clearValue.depthStencil = vkStencilColor; | |
| 179 | |
| 180 fCommandBuffer->clearAttachments(fGpu, 1, &attachment, 1, &clearRect); | |
| 181 } | |
| 182 | |
| 183 void GrVkGpuCommandBuffer::onClear(GrRenderTarget* target, const SkIRect& rect, GrColor color) { | |
| 184 // Currently we should not see clears in vulkan since we are converting them all to draws. | |
| 185 // We do this since some clears currently can come in the must happen outsid e a render pass | |
|
jvanverth1
2016/06/17 15:25:16
Nit: that must happen
Also: seems like this might
egdaniel
2016/06/17 15:57:35
Yes I believe this is a perf hit (though it hopefu
jvanverth1
2016/06/17 16:49:13
Acknowledged.
egdaniel
2016/06/17 18:49:14
Done.
| |
| 186 // and we assume all commands in this buffer are inside a renderpass. | |
| 187 SkASSERT(false); | |
| 188 #if 0 | |
| 189 // parent class should never let us get here with no RT | |
| 190 SkASSERT(target); | |
| 191 | |
| 192 VkClearColorValue vkColor; | |
| 193 GrColorToRGBAFloat(color, vkColor.float32); | |
| 194 | |
| 195 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(target); | |
| 196 | |
| 197 if (rect.width() != target->width() || rect.height() != target->height()) { | |
| 198 vkRT->setImageLayout(fGpu, | |
| 199 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, | |
| 200 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, | |
| 201 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, | |
| 202 false); | |
| 203 | |
| 204 // If we are using a stencil attachment we also need to change its layou t to what the render | |
| 205 // pass is expecting. | |
| 206 if (GrStencilAttachment* stencil = vkRT->renderTargetPriv().getStencilAt tachment()) { | |
| 207 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil; | |
| 208 vkStencil->setImageLayout(fGpu, | |
| 209 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_O PTIMAL, | |
| 210 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_B IT | | |
| 211 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BI T, | |
| 212 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, | |
| 213 false); | |
| 214 } | |
| 215 | |
| 216 VkClearRect clearRect; | |
| 217 // Flip rect if necessary | |
| 218 SkIRect vkRect = rect; | |
| 219 if (kBottomLeft_GrSurfaceOrigin == vkRT->origin()) { | |
| 220 vkRect.fTop = vkRT->height() - rect.fBottom; | |
| 221 vkRect.fBottom = vkRT->height() - rect.fTop; | |
| 222 } | |
| 223 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop }; | |
| 224 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.hei ght() }; | |
| 225 clearRect.baseArrayLayer = 0; | |
| 226 clearRect.layerCount = 1; | |
| 227 | |
| 228 const GrVkRenderPass* renderPass = vkRT->simpleRenderPass(); | |
| 229 SkASSERT(renderPass); | |
| 230 fCommandBuffer->beginRenderPass(fGpu, renderPass, *vkRT); | |
| 231 | |
| 232 uint32_t colorIndex; | |
| 233 SkAssertResult(renderPass->colorAttachmentIndex(&colorIndex)); | |
| 234 | |
| 235 VkClearAttachment attachment; | |
| 236 attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; | |
| 237 attachment.colorAttachment = colorIndex; | |
| 238 attachment.clearValue.color = vkColor; | |
| 239 | |
| 240 fCurrentCmdBuffer->clearAttachments(fGpu, 1, &attachment, 1, &clearRect) ; | |
| 241 fCurrentCmdBuffer->endRenderPass(fGpu); | |
| 242 return; | |
| 243 } | |
| 244 | |
| 245 vkRT->setImageLayout(this, | |
| 246 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, | |
| 247 VK_ACCESS_TRANSFER_WRITE_BIT, | |
| 248 VK_PIPELINE_STAGE_TRANSFER_BIT, | |
| 249 false); | |
| 250 | |
| 251 VkImageSubresourceRange subRange; | |
| 252 memset(&subRange, 0, sizeof(VkImageSubresourceRange)); | |
| 253 subRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; | |
| 254 subRange.baseMipLevel = 0; | |
| 255 subRange.levelCount = 1; | |
| 256 subRange.baseArrayLayer = 0; | |
| 257 subRange.layerCount = 1; | |
| 258 | |
| 259 // In the future we may not actually be doing this type of clear at all. If we are inside a | |
| 260 // render pass or doing a non full clear then we will use CmdClearColorAttac hment. The more | |
| 261 // common use case will be clearing an attachment at the start of a render p ass, in which case | |
| 262 // we will use the clear load ops. | |
| 263 fCommandBuffer->clearColorImage(this, | |
| 264 vkRT, | |
| 265 &vkColor, | |
| 266 1, &subRange); | |
| 267 #endif | |
| 268 } | |
| 269 | |
| 270 //////////////////////////////////////////////////////////////////////////////// | |
| 271 | |
| 272 void GrVkGpuCommandBuffer::bindGeometry(const GrPrimitiveProcessor& primProc, | |
| 273 const GrNonInstancedMesh& mesh) { | |
| 274 // There is no need to put any memory barriers to make sure host writes have finished here. | |
| 275 // When a command buffer is submitted to a queue, there is an implicit memor y barrier that | |
| 276 // occurs for all host writes. Additionally, BufferMemoryBarriers are not al lowed inside of | |
| 277 // an active RenderPass. | |
| 278 GrVkVertexBuffer* vbuf; | |
| 279 vbuf = (GrVkVertexBuffer*)mesh.vertexBuffer(); | |
| 280 SkASSERT(vbuf); | |
| 281 SkASSERT(!vbuf->isMapped()); | |
| 282 | |
| 283 fCommandBuffer->bindVertexBuffer(fGpu, vbuf); | |
| 284 | |
| 285 if (mesh.isIndexed()) { | |
| 286 GrVkIndexBuffer* ibuf = (GrVkIndexBuffer*)mesh.indexBuffer(); | |
| 287 SkASSERT(ibuf); | |
| 288 SkASSERT(!ibuf->isMapped()); | |
| 289 | |
| 290 fCommandBuffer->bindIndexBuffer(fGpu, ibuf); | |
| 291 } | |
| 292 } | |
| 293 | |
| 294 sk_sp<GrVkPipelineState> GrVkGpuCommandBuffer::prepareDrawState( | |
| 295 const GrPipeline& pipeline, | |
| 296 const GrPrimitive Processor& primProc, | |
| 297 GrPrimitiveType p rimitiveType, | |
| 298 const GrVkRenderP ass& renderPass) { | |
| 299 sk_sp<GrVkPipelineState> pipelineState = | |
| 300 fGpu->resourceProvider().findOrCreateCompatiblePipelineState(pipeline, | |
| 301 primProc, | |
| 302 primitiveTy pe, | |
| 303 renderPass) ; | |
| 304 if (!pipelineState) { | |
| 305 return pipelineState; | |
| 306 } | |
| 307 | |
| 308 pipelineState->setData(fGpu, primProc, pipeline); | |
| 309 | |
| 310 pipelineState->bind(fGpu, fCommandBuffer); | |
| 311 | |
| 312 GrVkPipeline::SetDynamicState(fGpu, fCommandBuffer, pipeline); | |
| 313 | |
| 314 return pipelineState; | |
| 315 } | |
| 316 | |
| 317 static void append_sampled_images(const GrProcessor& processor, | |
| 318 const GrVkGpu* gpu, | |
| 319 SkTArray<GrVkImage*>* sampledImages) { | |
| 320 if (int numTextures = processor.numTextures()) { | |
| 321 GrVkImage** images = sampledImages->push_back_n(numTextures); | |
| 322 int i = 0; | |
| 323 do { | |
| 324 const GrTextureAccess& texAccess = processor.textureAccess(i); | |
| 325 GrVkTexture* vkTexture = static_cast<GrVkTexture*>(processor.texture (i)); | |
| 326 SkASSERT(vkTexture); | |
| 327 const GrTextureParams& params = texAccess.getParams(); | |
| 328 // Check if we need to regenerate any mip maps | |
| 329 if (GrTextureParams::kMipMap_FilterMode == params.filterMode()) { | |
| 330 if (vkTexture->texturePriv().mipMapsAreDirty()) { | |
| 331 gpu->generateMipmap(vkTexture); | |
| 332 vkTexture->texturePriv().dirtyMipMaps(false); | |
| 333 } | |
| 334 } | |
| 335 | |
| 336 images[i] = vkTexture; | |
| 337 } while (++i < numTextures); | |
| 338 | |
| 339 } | |
| 340 } | |
| 341 | |
| 342 void GrVkGpuCommandBuffer::onDraw(const GrPipeline& pipeline, | |
| 343 const GrPrimitiveProcessor& primProc, | |
| 344 const GrMesh* meshes, | |
| 345 int meshCount) { | |
| 346 if (!meshCount) { | |
| 347 return; | |
| 348 } | |
| 349 GrRenderTarget* rt = pipeline.getRenderTarget(); | |
| 350 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt); | |
| 351 const GrVkRenderPass* renderPass = vkRT->simpleRenderPass(); | |
| 352 SkASSERT(renderPass); | |
| 353 | |
| 354 GrPrimitiveType primitiveType = meshes[0].primitiveType(); | |
| 355 sk_sp<GrVkPipelineState> pipelineState = this->prepareDrawState(pipeline, | |
| 356 primProc, | |
| 357 primitiveTyp e, | |
| 358 *renderPass) ; | |
| 359 if (!pipelineState) { | |
| 360 return; | |
| 361 } | |
| 362 | |
| 363 append_sampled_images(primProc, fGpu, &fSampledImages); | |
| 364 for (int i = 0; i < pipeline.numFragmentProcessors(); ++i) { | |
| 365 append_sampled_images(pipeline.getFragmentProcessor(i), fGpu, &fSampledI mages); | |
| 366 } | |
| 367 append_sampled_images(pipeline.getXferProcessor(), fGpu, &fSampledImages); | |
| 368 | |
| 369 for (int i = 0; i < meshCount; ++i) { | |
| 370 const GrMesh& mesh = meshes[i]; | |
| 371 GrMesh::Iterator iter; | |
| 372 const GrNonInstancedMesh* nonIdxMesh = iter.init(mesh); | |
| 373 do { | |
| 374 if (nonIdxMesh->primitiveType() != primitiveType) { | |
| 375 // Technically we don't have to call this here (since there is a safety check in | |
| 376 // pipelineState:setData but this will allow for quicker freeing of resources if the | |
| 377 // pipelineState sits in a cache for a while. | |
| 378 pipelineState->freeTempResources(fGpu); | |
| 379 SkDEBUGCODE(pipelineState = nullptr); | |
| 380 primitiveType = nonIdxMesh->primitiveType(); | |
| 381 pipelineState = this->prepareDrawState(pipeline, | |
| 382 primProc, | |
| 383 primitiveType, | |
| 384 *renderPass); | |
| 385 if (!pipelineState) { | |
| 386 return; | |
| 387 } | |
| 388 } | |
| 389 SkASSERT(pipelineState); | |
| 390 this->bindGeometry(primProc, *nonIdxMesh); | |
| 391 | |
| 392 if (nonIdxMesh->isIndexed()) { | |
| 393 fCommandBuffer->drawIndexed(fGpu, | |
| 394 nonIdxMesh->indexCount(), | |
| 395 1, | |
| 396 nonIdxMesh->startIndex(), | |
| 397 nonIdxMesh->startVertex(), | |
| 398 0); | |
| 399 } else { | |
| 400 fCommandBuffer->draw(fGpu, | |
| 401 nonIdxMesh->vertexCount(), | |
| 402 1, | |
| 403 nonIdxMesh->startVertex(), | |
| 404 0); | |
| 405 } | |
| 406 | |
| 407 fGpu->stats()->incNumDraws(); | |
| 408 } while ((nonIdxMesh = iter.next())); | |
| 409 } | |
| 410 | |
| 411 // Technically we don't have to call this here (since there is a safety chec k in | |
| 412 // pipelineState:setData but this will allow for quicker freeing of resource s if the | |
| 413 // pipelineState sits in a cache for a while. | |
| 414 pipelineState->freeTempResources(fGpu); | |
| 415 } | |
| 416 | |
| OLD | NEW |