OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2016 Google Inc. | 2 * Copyright 2016 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #include "GrVkGpuCommandBuffer.h" | 8 #include "GrVkGpuCommandBuffer.h" |
9 | 9 |
| 10 #include "GrMesh.h" |
| 11 #include "GrPipeline.h" |
| 12 #include "GrRenderTargetPriv.h" |
| 13 #include "GrTextureAccess.h" |
| 14 #include "GrTexturePriv.h" |
10 #include "GrVkCommandBuffer.h" | 15 #include "GrVkCommandBuffer.h" |
11 #include "GrVkGpu.h" | 16 #include "GrVkGpu.h" |
| 17 #include "GrVkPipeline.h" |
12 #include "GrVkRenderPass.h" | 18 #include "GrVkRenderPass.h" |
13 #include "GrVkRenderTarget.h" | 19 #include "GrVkRenderTarget.h" |
14 #include "GrVkResourceProvider.h" | 20 #include "GrVkResourceProvider.h" |
| 21 #include "GrVkTexture.h" |
15 | 22 |
16 void get_vk_load_store_ops(GrGpuCommandBuffer::LoadAndStoreOp op, | 23 void get_vk_load_store_ops(const GrGpuCommandBuffer::LoadAndStoreInfo& info, |
17 VkAttachmentLoadOp* loadOp, VkAttachmentStoreOp* stor
eOp) { | 24 VkAttachmentLoadOp* loadOp, VkAttachmentStoreOp* stor
eOp) { |
18 switch (op) { | 25 switch (info.fLoadOp) { |
19 case GrGpuCommandBuffer::kLoadAndStore_LoadAndStoreOp: | 26 case GrGpuCommandBuffer::kLoad_LoadOp: |
20 *loadOp = VK_ATTACHMENT_LOAD_OP_LOAD; | 27 *loadOp = VK_ATTACHMENT_LOAD_OP_LOAD; |
| 28 break; |
| 29 case GrGpuCommandBuffer::kClear_LoadOp: |
| 30 *loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; |
| 31 break; |
| 32 case GrGpuCommandBuffer::kDiscard_LoadOp: |
| 33 *loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; |
| 34 break; |
| 35 default: |
| 36 SK_ABORT("Invalid LoadOp"); |
| 37 *loadOp = VK_ATTACHMENT_LOAD_OP_LOAD; |
| 38 } |
| 39 |
| 40 switch (info.fStoreOp) { |
| 41 case GrGpuCommandBuffer::kStore_StoreOp: |
21 *storeOp = VK_ATTACHMENT_STORE_OP_STORE; | 42 *storeOp = VK_ATTACHMENT_STORE_OP_STORE; |
22 break; | 43 break; |
23 case GrGpuCommandBuffer::kLoadAndDiscard_LoadAndStoreOp: | 44 case GrGpuCommandBuffer::kDiscard_StoreOp: |
24 *loadOp = VK_ATTACHMENT_LOAD_OP_LOAD; | |
25 *storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; | |
26 break; | |
27 case GrGpuCommandBuffer::kClearAndStore_LoadAndStoreOp: | |
28 *loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; | |
29 *storeOp = VK_ATTACHMENT_STORE_OP_STORE; | |
30 break; | |
31 case GrGpuCommandBuffer::kClearAndDiscard_LoadAndStoreOp: | |
32 *loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; | |
33 *storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; | |
34 break; | |
35 case GrGpuCommandBuffer::kDiscardAndStore_LoadAndStoreOp: | |
36 *loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; | |
37 *storeOp = VK_ATTACHMENT_STORE_OP_STORE; | |
38 break; | |
39 case GrGpuCommandBuffer::kDiscardAndDiscard_LoadAndStoreOp: | |
40 *loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; | |
41 *storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; | 45 *storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; |
42 break; | 46 break; |
43 default: | 47 default: |
44 SK_ABORT("Invalid LoadAndStoreOp"); | 48 SK_ABORT("Invalid StoreOp"); |
45 *loadOp = VK_ATTACHMENT_LOAD_OP_LOAD; | |
46 *storeOp = VK_ATTACHMENT_STORE_OP_STORE; | 49 *storeOp = VK_ATTACHMENT_STORE_OP_STORE; |
47 break; | |
48 } | 50 } |
49 } | 51 } |
50 | 52 |
51 GrVkGpuCommandBuffer::GrVkGpuCommandBuffer(GrVkGpu* gpu, | 53 GrVkGpuCommandBuffer::GrVkGpuCommandBuffer(GrVkGpu* gpu, |
52 const GrVkRenderTarget& target, | 54 GrVkRenderTarget* target, |
53 LoadAndStoreOp colorOp, GrColor color
Clear, | 55 const LoadAndStoreInfo& colorInfo, |
54 LoadAndStoreOp stencilOp, GrColor ste
ncilClear) | 56 const LoadAndStoreInfo& stencilInfo) |
55 : fGpu(gpu) { | 57 : fGpu(gpu) |
| 58 , fRenderTarget(target) |
| 59 , fIsEmpty(true) { |
56 VkAttachmentLoadOp vkLoadOp; | 60 VkAttachmentLoadOp vkLoadOp; |
57 VkAttachmentStoreOp vkStoreOp; | 61 VkAttachmentStoreOp vkStoreOp; |
58 | 62 |
59 get_vk_load_store_ops(colorOp, &vkLoadOp, &vkStoreOp); | 63 get_vk_load_store_ops(colorInfo, &vkLoadOp, &vkStoreOp); |
60 GrVkRenderPass::LoadStoreOps vkColorOps(vkLoadOp, vkStoreOp); | 64 GrVkRenderPass::LoadStoreOps vkColorOps(vkLoadOp, vkStoreOp); |
61 | 65 |
62 get_vk_load_store_ops(stencilOp, &vkLoadOp, &vkStoreOp); | 66 get_vk_load_store_ops(stencilInfo, &vkLoadOp, &vkStoreOp); |
63 GrVkRenderPass::LoadStoreOps vkStencilOps(vkLoadOp, vkStoreOp); | 67 GrVkRenderPass::LoadStoreOps vkStencilOps(vkLoadOp, vkStoreOp); |
64 | 68 |
65 GrVkRenderPass::LoadStoreOps vkResolveOps(VK_ATTACHMENT_LOAD_OP_LOAD, | 69 GrVkRenderPass::LoadStoreOps vkResolveOps(VK_ATTACHMENT_LOAD_OP_LOAD, |
66 VK_ATTACHMENT_STORE_OP_STORE); | 70 VK_ATTACHMENT_STORE_OP_STORE); |
67 | 71 |
68 const GrVkResourceProvider::CompatibleRPHandle& rpHandle = target.compatible
RenderPassHandle(); | 72 const GrVkResourceProvider::CompatibleRPHandle& rpHandle = target->compatibl
eRenderPassHandle(); |
69 if (rpHandle.isValid()) { | 73 if (rpHandle.isValid()) { |
70 fRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle, | 74 fRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle, |
71 vkColorOps, | 75 vkColorOps, |
72 vkResolveOps, | 76 vkResolveOps, |
73 vkStencilOps); | 77 vkStencilOps); |
74 } else { | 78 } else { |
75 fRenderPass = fGpu->resourceProvider().findRenderPass(target, | 79 fRenderPass = fGpu->resourceProvider().findRenderPass(*target, |
76 vkColorOps, | 80 vkColorOps, |
77 vkResolveOps, | 81 vkResolveOps, |
78 vkStencilOps); | 82 vkStencilOps); |
79 } | 83 } |
80 | 84 |
| 85 GrColorToRGBAFloat(colorInfo.fClearColor, fColorClearValue.color.float32); |
| 86 |
81 fCommandBuffer = GrVkSecondaryCommandBuffer::Create(gpu, gpu->cmdPool(), fRe
nderPass); | 87 fCommandBuffer = GrVkSecondaryCommandBuffer::Create(gpu, gpu->cmdPool(), fRe
nderPass); |
82 fCommandBuffer->begin(gpu, target.framebuffer()); | 88 fCommandBuffer->begin(gpu, target->framebuffer()); |
83 } | 89 } |
84 | 90 |
85 GrVkGpuCommandBuffer::~GrVkGpuCommandBuffer() { | 91 GrVkGpuCommandBuffer::~GrVkGpuCommandBuffer() { |
86 fCommandBuffer->unref(fGpu); | 92 fCommandBuffer->unref(fGpu); |
87 fRenderPass->unref(fGpu); | 93 fRenderPass->unref(fGpu); |
88 } | 94 } |
89 | 95 |
| 96 GrGpu* GrVkGpuCommandBuffer::gpu() { return fGpu; } |
| 97 |
90 void GrVkGpuCommandBuffer::end() { | 98 void GrVkGpuCommandBuffer::end() { |
91 fCommandBuffer->end(fGpu); | 99 fCommandBuffer->end(fGpu); |
92 } | 100 } |
93 | 101 |
94 void GrVkGpuCommandBuffer::submit() { | 102 void GrVkGpuCommandBuffer::onSubmit(const SkIRect& bounds) { |
95 fGpu->submitSecondaryCommandBuffer(fCommandBuffer); | 103 // Change layout of our render target so it can be used as the color attachm
ent |
96 } | 104 fRenderTarget->setImageLayout(fGpu, |
97 | 105 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, |
| 106 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, |
| 107 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, |
| 108 false); |
| 109 |
| 110 // If we are using a stencil attachment we also need to update its layout |
| 111 if (GrStencilAttachment* stencil = fRenderTarget->renderTargetPriv().getSten
cilAttachment()) { |
| 112 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil; |
| 113 vkStencil->setImageLayout(fGpu, |
| 114 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIM
AL, |
| 115 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | |
| 116 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT, |
| 117 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, |
| 118 false); |
| 119 } |
| 120 |
| 121 for (int i = 0; i < fSampledImages.count(); ++i) { |
| 122 fSampledImages[i]->setImageLayout(fGpu, |
| 123 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIM
AL, |
| 124 VK_ACCESS_SHADER_READ_BIT, |
| 125 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, |
| 126 false); |
| 127 } |
| 128 |
| 129 fGpu->submitSecondaryCommandBuffer(fCommandBuffer, fRenderPass, &fColorClear
Value, |
| 130 fRenderTarget, bounds); |
| 131 } |
| 132 |
| 133 void GrVkGpuCommandBuffer::onClearStencilClip(GrRenderTarget* target, |
| 134 const SkIRect& rect, |
| 135 bool insideClip) { |
| 136 SkASSERT(target); |
| 137 |
| 138 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(target); |
| 139 GrStencilAttachment* sb = target->renderTargetPriv().getStencilAttachment(); |
| 140 // this should only be called internally when we know we have a |
| 141 // stencil buffer. |
| 142 SkASSERT(sb); |
| 143 int stencilBitCount = sb->bits(); |
| 144 |
| 145 // The contract with the callers does not guarantee that we preserve all bit
s in the stencil |
| 146 // during this clear. Thus we will clear the entire stencil to the desired v
alue. |
| 147 |
| 148 VkClearDepthStencilValue vkStencilColor; |
| 149 memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue)); |
| 150 if (insideClip) { |
| 151 vkStencilColor.stencil = (1 << (stencilBitCount - 1)); |
| 152 } else { |
| 153 vkStencilColor.stencil = 0; |
| 154 } |
| 155 |
| 156 VkClearRect clearRect; |
| 157 // Flip rect if necessary |
| 158 SkIRect vkRect = rect; |
| 159 |
| 160 if (kBottomLeft_GrSurfaceOrigin == vkRT->origin()) { |
| 161 vkRect.fTop = vkRT->height() - rect.fBottom; |
| 162 vkRect.fBottom = vkRT->height() - rect.fTop; |
| 163 } |
| 164 |
| 165 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop }; |
| 166 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height(
) }; |
| 167 |
| 168 clearRect.baseArrayLayer = 0; |
| 169 clearRect.layerCount = 1; |
| 170 |
| 171 uint32_t stencilIndex; |
| 172 SkAssertResult(fRenderPass->stencilAttachmentIndex(&stencilIndex)); |
| 173 |
| 174 VkClearAttachment attachment; |
| 175 attachment.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT; |
| 176 attachment.colorAttachment = 0; // this value shouldn't matter |
| 177 attachment.clearValue.depthStencil = vkStencilColor; |
| 178 |
| 179 fCommandBuffer->clearAttachments(fGpu, 1, &attachment, 1, &clearRect); |
| 180 fIsEmpty = false; |
| 181 } |
| 182 |
| 183 void GrVkGpuCommandBuffer::onClear(GrRenderTarget* target, const SkIRect& rect,
GrColor color) { |
| 184 // parent class should never let us get here with no RT |
| 185 SkASSERT(target); |
| 186 |
| 187 VkClearColorValue vkColor; |
| 188 GrColorToRGBAFloat(color, vkColor.float32); |
| 189 |
| 190 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(target); |
| 191 |
| 192 if (fIsEmpty) { |
| 193 // We will change the render pass to do a clear load instead |
| 194 GrVkRenderPass::LoadStoreOps vkColorOps(VK_ATTACHMENT_LOAD_OP_CLEAR, |
| 195 VK_ATTACHMENT_STORE_OP_STORE); |
| 196 GrVkRenderPass::LoadStoreOps vkStencilOps(VK_ATTACHMENT_LOAD_OP_LOAD, |
| 197 VK_ATTACHMENT_STORE_OP_STORE); |
| 198 GrVkRenderPass::LoadStoreOps vkResolveOps(VK_ATTACHMENT_LOAD_OP_LOAD, |
| 199 VK_ATTACHMENT_STORE_OP_STORE); |
| 200 |
| 201 const GrVkRenderPass* oldRP = fRenderPass; |
| 202 |
| 203 const GrVkResourceProvider::CompatibleRPHandle& rpHandle = |
| 204 vkRT->compatibleRenderPassHandle(); |
| 205 if (rpHandle.isValid()) { |
| 206 fRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle, |
| 207 vkColorOps, |
| 208 vkResolveOps, |
| 209 vkStencilOps); |
| 210 } else { |
| 211 fRenderPass = fGpu->resourceProvider().findRenderPass(*vkRT, |
| 212 vkColorOps, |
| 213 vkResolveOps, |
| 214 vkStencilOps); |
| 215 } |
| 216 |
| 217 SkASSERT(fRenderPass->isCompatible(*oldRP)); |
| 218 oldRP->unref(fGpu); |
| 219 |
| 220 GrColorToRGBAFloat(color, fColorClearValue.color.float32); |
| 221 return; |
| 222 } |
| 223 |
| 224 // We always do a sub rect clear with clearAttachments since we are inside a
render pass |
| 225 VkClearRect clearRect; |
| 226 // Flip rect if necessary |
| 227 SkIRect vkRect = rect; |
| 228 if (kBottomLeft_GrSurfaceOrigin == vkRT->origin()) { |
| 229 vkRect.fTop = vkRT->height() - rect.fBottom; |
| 230 vkRect.fBottom = vkRT->height() - rect.fTop; |
| 231 } |
| 232 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop }; |
| 233 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height(
) }; |
| 234 clearRect.baseArrayLayer = 0; |
| 235 clearRect.layerCount = 1; |
| 236 |
| 237 uint32_t colorIndex; |
| 238 SkAssertResult(fRenderPass->colorAttachmentIndex(&colorIndex)); |
| 239 |
| 240 VkClearAttachment attachment; |
| 241 attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; |
| 242 attachment.colorAttachment = colorIndex; |
| 243 attachment.clearValue.color = vkColor; |
| 244 |
| 245 fCommandBuffer->clearAttachments(fGpu, 1, &attachment, 1, &clearRect); |
| 246 fIsEmpty = false; |
| 247 return; |
| 248 } |
| 249 |
| 250 //////////////////////////////////////////////////////////////////////////////// |
| 251 |
| 252 void GrVkGpuCommandBuffer::bindGeometry(const GrPrimitiveProcessor& primProc, |
| 253 const GrNonInstancedMesh& mesh) { |
| 254 // There is no need to put any memory barriers to make sure host writes have
finished here. |
| 255 // When a command buffer is submitted to a queue, there is an implicit memor
y barrier that |
| 256 // occurs for all host writes. Additionally, BufferMemoryBarriers are not al
lowed inside of |
| 257 // an active RenderPass. |
| 258 GrVkVertexBuffer* vbuf; |
| 259 vbuf = (GrVkVertexBuffer*)mesh.vertexBuffer(); |
| 260 SkASSERT(vbuf); |
| 261 SkASSERT(!vbuf->isMapped()); |
| 262 |
| 263 fCommandBuffer->bindVertexBuffer(fGpu, vbuf); |
| 264 |
| 265 if (mesh.isIndexed()) { |
| 266 GrVkIndexBuffer* ibuf = (GrVkIndexBuffer*)mesh.indexBuffer(); |
| 267 SkASSERT(ibuf); |
| 268 SkASSERT(!ibuf->isMapped()); |
| 269 |
| 270 fCommandBuffer->bindIndexBuffer(fGpu, ibuf); |
| 271 } |
| 272 } |
| 273 |
| 274 sk_sp<GrVkPipelineState> GrVkGpuCommandBuffer::prepareDrawState( |
| 275 const GrPipeline&
pipeline, |
| 276 const GrPrimitive
Processor& primProc, |
| 277 GrPrimitiveType p
rimitiveType, |
| 278 const GrVkRenderP
ass& renderPass) { |
| 279 sk_sp<GrVkPipelineState> pipelineState = |
| 280 fGpu->resourceProvider().findOrCreateCompatiblePipelineState(pipeline, |
| 281 primProc, |
| 282 primitiveTy
pe, |
| 283 renderPass)
; |
| 284 if (!pipelineState) { |
| 285 return pipelineState; |
| 286 } |
| 287 |
| 288 pipelineState->setData(fGpu, primProc, pipeline); |
| 289 |
| 290 pipelineState->bind(fGpu, fCommandBuffer); |
| 291 |
| 292 GrVkPipeline::SetDynamicState(fGpu, fCommandBuffer, pipeline); |
| 293 |
| 294 return pipelineState; |
| 295 } |
| 296 |
| 297 static void append_sampled_images(const GrProcessor& processor, |
| 298 const GrVkGpu* gpu, |
| 299 SkTArray<GrVkImage*>* sampledImages) { |
| 300 if (int numTextures = processor.numTextures()) { |
| 301 GrVkImage** images = sampledImages->push_back_n(numTextures); |
| 302 int i = 0; |
| 303 do { |
| 304 const GrTextureAccess& texAccess = processor.textureAccess(i); |
| 305 GrVkTexture* vkTexture = static_cast<GrVkTexture*>(processor.texture
(i)); |
| 306 SkASSERT(vkTexture); |
| 307 const GrTextureParams& params = texAccess.getParams(); |
| 308 // Check if we need to regenerate any mip maps |
| 309 if (GrTextureParams::kMipMap_FilterMode == params.filterMode()) { |
| 310 if (vkTexture->texturePriv().mipMapsAreDirty()) { |
| 311 gpu->generateMipmap(vkTexture); |
| 312 vkTexture->texturePriv().dirtyMipMaps(false); |
| 313 } |
| 314 } |
| 315 |
| 316 images[i] = vkTexture; |
| 317 } while (++i < numTextures); |
| 318 |
| 319 } |
| 320 } |
| 321 |
| 322 void GrVkGpuCommandBuffer::onDraw(const GrPipeline& pipeline, |
| 323 const GrPrimitiveProcessor& primProc, |
| 324 const GrMesh* meshes, |
| 325 int meshCount) { |
| 326 if (!meshCount) { |
| 327 return; |
| 328 } |
| 329 GrRenderTarget* rt = pipeline.getRenderTarget(); |
| 330 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt); |
| 331 const GrVkRenderPass* renderPass = vkRT->simpleRenderPass(); |
| 332 SkASSERT(renderPass); |
| 333 |
| 334 GrPrimitiveType primitiveType = meshes[0].primitiveType(); |
| 335 sk_sp<GrVkPipelineState> pipelineState = this->prepareDrawState(pipeline, |
| 336 primProc, |
| 337 primitiveTyp
e, |
| 338 *renderPass)
; |
| 339 if (!pipelineState) { |
| 340 return; |
| 341 } |
| 342 |
| 343 append_sampled_images(primProc, fGpu, &fSampledImages); |
| 344 for (int i = 0; i < pipeline.numFragmentProcessors(); ++i) { |
| 345 append_sampled_images(pipeline.getFragmentProcessor(i), fGpu, &fSampledI
mages); |
| 346 } |
| 347 append_sampled_images(pipeline.getXferProcessor(), fGpu, &fSampledImages); |
| 348 |
| 349 for (int i = 0; i < meshCount; ++i) { |
| 350 const GrMesh& mesh = meshes[i]; |
| 351 GrMesh::Iterator iter; |
| 352 const GrNonInstancedMesh* nonIdxMesh = iter.init(mesh); |
| 353 do { |
| 354 if (nonIdxMesh->primitiveType() != primitiveType) { |
| 355 // Technically we don't have to call this here (since there is a
safety check in |
| 356 // pipelineState:setData but this will allow for quicker freeing
of resources if the |
| 357 // pipelineState sits in a cache for a while. |
| 358 pipelineState->freeTempResources(fGpu); |
| 359 SkDEBUGCODE(pipelineState = nullptr); |
| 360 primitiveType = nonIdxMesh->primitiveType(); |
| 361 pipelineState = this->prepareDrawState(pipeline, |
| 362 primProc, |
| 363 primitiveType, |
| 364 *renderPass); |
| 365 if (!pipelineState) { |
| 366 return; |
| 367 } |
| 368 } |
| 369 SkASSERT(pipelineState); |
| 370 this->bindGeometry(primProc, *nonIdxMesh); |
| 371 |
| 372 if (nonIdxMesh->isIndexed()) { |
| 373 fCommandBuffer->drawIndexed(fGpu, |
| 374 nonIdxMesh->indexCount(), |
| 375 1, |
| 376 nonIdxMesh->startIndex(), |
| 377 nonIdxMesh->startVertex(), |
| 378 0); |
| 379 } else { |
| 380 fCommandBuffer->draw(fGpu, |
| 381 nonIdxMesh->vertexCount(), |
| 382 1, |
| 383 nonIdxMesh->startVertex(), |
| 384 0); |
| 385 } |
| 386 fIsEmpty = false; |
| 387 |
| 388 fGpu->stats()->incNumDraws(); |
| 389 } while ((nonIdxMesh = iter.next())); |
| 390 } |
| 391 |
| 392 // Technically we don't have to call this here (since there is a safety chec
k in |
| 393 // pipelineState:setData but this will allow for quicker freeing of resource
s if the |
| 394 // pipelineState sits in a cache for a while. |
| 395 pipelineState->freeTempResources(fGpu); |
| 396 } |
| 397 |
OLD | NEW |