Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #include "GrTargetCommands.h" | 8 #include "GrTargetCommands.h" |
| 9 | 9 |
| 10 #include "GrColor.h" | 10 #include "GrColor.h" |
| (...skipping 29 matching lines...) Expand all Loading... | |
| 40 SkASSERT(!fCmdBuffer.empty()); | 40 SkASSERT(!fCmdBuffer.empty()); |
| 41 SkASSERT(info.isInstanced()); | 41 SkASSERT(info.isInstanced()); |
| 42 | 42 |
| 43 const GrIndexBuffer* ib; | 43 const GrIndexBuffer* ib; |
| 44 if (!iodb->canConcatToIndexBuffer(&ib)) { | 44 if (!iodb->canConcatToIndexBuffer(&ib)) { |
| 45 return 0; | 45 return 0; |
| 46 } | 46 } |
| 47 | 47 |
| 48 // Check if there is a draw info that is compatible that uses the same VB fr om the pool and | 48 // Check if there is a draw info that is compatible that uses the same VB fr om the pool and |
| 49 // the same IB | 49 // the same IB |
| 50 if (Cmd::kDraw_CmdType != fCmdBuffer.back().type()) { | 50 CmdBuffer::ReverseIter reverseIter(fCmdBuffer); |
| 51 if (Cmd::kBlendBarrier_CmdType == reverseIter->type()) { | |
| 52 // Instanced draw commands can collapse through a blend barrier if they don't overlap. | |
| 53 // TODO: We could also collapse through a dst texture setup if they had a dedicated command. | |
| 54 SkAssertResult(reverseIter.previous()); | |
| 55 SkASSERT(Cmd::kBlendBarrier_CmdType != reverseIter->type()); | |
| 56 } | |
| 57 | |
| 58 if (Cmd::kDraw_CmdType != reverseIter->type()) { | |
| 51 return 0; | 59 return 0; |
| 52 } | 60 } |
| 53 | 61 |
| 54 Draw* draw = static_cast<Draw*>(&fCmdBuffer.back()); | 62 Draw* draw = static_cast<Draw*>(reverseIter.get()); |
| 63 bool allowOverlap = (draw == &fCmdBuffer.back()); | |
| 55 | 64 |
| 56 if (!draw->fInfo.isInstanced() || | 65 if (!draw->fInfo.isInstanced() || |
| 57 draw->fInfo.primitiveType() != info.primitiveType() || | 66 draw->fInfo.primitiveType() != info.primitiveType() || |
| 58 draw->fInfo.verticesPerInstance() != info.verticesPerInstance() || | 67 draw->fInfo.verticesPerInstance() != info.verticesPerInstance() || |
| 59 draw->fInfo.indicesPerInstance() != info.indicesPerInstance() || | 68 draw->fInfo.indicesPerInstance() != info.indicesPerInstance() || |
| 60 draw->fInfo.vertexBuffer() != info.vertexBuffer() || | 69 draw->fInfo.vertexBuffer() != info.vertexBuffer() || |
| 61 draw->fInfo.indexBuffer() != ib) { | 70 draw->fInfo.indexBuffer() != ib) { |
| 62 return 0; | 71 return 0; |
| 63 } | 72 } |
| 64 if (draw->fInfo.startVertex() + draw->fInfo.vertexCount() != info.startVerte x()) { | 73 if (draw->fInfo.startVertex() + draw->fInfo.vertexCount() != info.startVerte x()) { |
| 65 return 0; | 74 return 0; |
| 66 } | 75 } |
| 76 if (!allowOverlap) { | |
| 77 if (draw->fInfo.getDevBounds()->isEmpty() || info.getDevBounds()->isEmpt y()) { | |
| 78 // Draw bounds are unknown. | |
| 79 return 0; | |
| 80 } | |
| 81 SkIRect existingBounds, incomingBounds; | |
| 82 draw->fInfo.getDevBounds()->roundOut(&existingBounds); | |
| 83 info.getDevBounds()->roundOut(&incomingBounds); | |
| 84 if (SkIRect::Intersects(existingBounds, incomingBounds)) { | |
| 85 // Draw bounds overlap. | |
| 86 return 0; | |
| 87 } | |
| 88 } | |
| 67 | 89 |
| 68 // how many instances can be concat'ed onto draw given the size of the index buffer | 90 // how many instances can be concat'ed onto draw given the size of the index buffer |
| 69 int instancesToConcat = iodb->indexCountInCurrentSource() / info.indicesPerI nstance(); | 91 int instancesToConcat = iodb->indexCountInCurrentSource() / info.indicesPerI nstance(); |
| 70 instancesToConcat -= draw->fInfo.instanceCount(); | 92 instancesToConcat -= draw->fInfo.instanceCount(); |
| 71 instancesToConcat = SkTMin(instancesToConcat, info.instanceCount()); | 93 instancesToConcat = SkTMin(instancesToConcat, info.instanceCount()); |
| 72 | 94 |
| 73 draw->fInfo.adjustInstanceCount(instancesToConcat); | 95 draw->fInfo.adjustInstanceCount(instancesToConcat); |
| 74 | 96 |
| 97 // Join the draw bounds. | |
| 98 if (!draw->fInfo.getDevBounds()->isEmpty()) { | |
| 99 if (info.getDevBounds()->isEmpty()) { | |
| 100 draw->fInfo.setDevBounds(SkRect::MakeEmpty()); | |
| 101 } else { | |
| 102 SkRect newBounds(*draw->fInfo.getDevBounds()); | |
| 103 newBounds.join(*info.getDevBounds()); | |
| 104 draw->fInfo.setDevBounds(newBounds); | |
| 105 } | |
| 106 } | |
| 107 | |
| 108 // Remove the blend barrier, if any. | |
| 109 if (Cmd::kBlendBarrier_CmdType == fCmdBuffer.back().type()) { | |
| 110 fCmdBuffer.pop_back(); | |
|
Mark Kilgard
2015/04/02 23:02:38
does something avoid all this checking to eliminat
| |
| 111 } | |
| 112 | |
| 75 // update last fGpuCmdMarkers to include any additional trace markers that h ave been added | 113 // update last fGpuCmdMarkers to include any additional trace markers that h ave been added |
| 76 iodb->recordTraceMarkersIfNecessary(draw); | 114 iodb->recordTraceMarkersIfNecessary(draw); |
| 77 return instancesToConcat; | 115 return instancesToConcat; |
| 78 } | 116 } |
| 79 | 117 |
| 80 GrTargetCommands::Cmd* GrTargetCommands::recordDraw( | 118 GrTargetCommands::Cmd* GrTargetCommands::recordDraw( |
| 81 GrInOrderDrawBuffer* iodb, | 119 GrInOrderDrawBuffer* iodb, |
| 82 const GrGeometryProcessor* gp, | 120 const GrGeometryProcessor* gp, |
| 83 const GrDrawTarget::DrawInfo& info, | 121 const GrDrawTarget::DrawInfo& info, |
| 84 const GrDrawTarget::PipelineIn fo& pipelineInfo) { | 122 const GrDrawTarget::PipelineIn fo& pipelineInfo) { |
| (...skipping 23 matching lines...) Expand all Loading... | |
| 108 GrTargetCommands::Cmd* GrTargetCommands::recordDrawBatch( | 146 GrTargetCommands::Cmd* GrTargetCommands::recordDrawBatch( |
| 109 GrInOrderDrawBuffer* iodb, | 147 GrInOrderDrawBuffer* iodb, |
| 110 GrBatch* batch, | 148 GrBatch* batch, |
| 111 const GrDrawTarget::PipelineIn fo& pipelineInfo) { | 149 const GrDrawTarget::PipelineIn fo& pipelineInfo) { |
| 112 if (!this->setupPipelineAndShouldDraw(iodb, batch, pipelineInfo)) { | 150 if (!this->setupPipelineAndShouldDraw(iodb, batch, pipelineInfo)) { |
| 113 return NULL; | 151 return NULL; |
| 114 } | 152 } |
| 115 | 153 |
| 116 // Check if there is a Batch Draw we can batch with | 154 // Check if there is a Batch Draw we can batch with |
| 117 if (Cmd::kDrawBatch_CmdType != fCmdBuffer.back().type() || !fDrawBatch) { | 155 if (Cmd::kDrawBatch_CmdType != fCmdBuffer.back().type() || !fDrawBatch) { |
| 156 // TODO: Batch through blend barriers and dst texture setups when there is no overlap. | |
| 118 fDrawBatch = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawBatch, (batch, &fB atchTarget)); | 157 fDrawBatch = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawBatch, (batch, &fB atchTarget)); |
| 119 return fDrawBatch; | 158 return fDrawBatch; |
| 120 } | 159 } |
| 121 | 160 |
| 122 SkASSERT(&fCmdBuffer.back() == fDrawBatch); | 161 SkASSERT(&fCmdBuffer.back() == fDrawBatch); |
| 123 if (!fDrawBatch->fBatch->combineIfPossible(batch)) { | 162 if (!fDrawBatch->fBatch->combineIfPossible(batch)) { |
| 124 this->closeBatch(); | 163 this->closeBatch(); |
| 125 fDrawBatch = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawBatch, (batch, &fB atchTarget)); | 164 fDrawBatch = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawBatch, (batch, &fB atchTarget)); |
| 126 } | 165 } |
| 127 | 166 |
| (...skipping 18 matching lines...) Expand all Loading... | |
| 146 sp->fStencil = stencilSettings; | 185 sp->fStencil = stencilSettings; |
| 147 return sp; | 186 return sp; |
| 148 } | 187 } |
| 149 | 188 |
| 150 GrTargetCommands::Cmd* GrTargetCommands::recordDrawPath( | 189 GrTargetCommands::Cmd* GrTargetCommands::recordDrawPath( |
| 151 GrInOrderDrawBuffer* iodb, | 190 GrInOrderDrawBuffer* iodb, |
| 152 const GrPathProcessor* pathPro c, | 191 const GrPathProcessor* pathPro c, |
| 153 const GrPath* path, | 192 const GrPath* path, |
| 154 const GrStencilSettings& stenc ilSettings, | 193 const GrStencilSettings& stenc ilSettings, |
| 155 const GrDrawTarget::PipelineIn fo& pipelineInfo) { | 194 const GrDrawTarget::PipelineIn fo& pipelineInfo) { |
| 195 // Path commands don't require a preceeding blend barrier (per the spec). | |
| 196 SkASSERT(fCmdBuffer.empty() || Cmd::kBlendBarrier_CmdType != fCmdBuffer.back ().type()); | |
| 197 | |
| 156 this->closeBatch(); | 198 this->closeBatch(); |
| 157 | 199 |
| 158 // TODO: Only compare the subset of GrPipelineBuilder relevant to path cover ing? | 200 // TODO: Only compare the subset of GrPipelineBuilder relevant to path cover ing? |
| 159 if (!this->setupPipelineAndShouldDraw(iodb, pathProc, pipelineInfo)) { | 201 if (!this->setupPipelineAndShouldDraw(iodb, pathProc, pipelineInfo)) { |
| 160 return NULL; | 202 return NULL; |
| 161 } | 203 } |
| 162 DrawPath* dp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawPath, (path)); | 204 DrawPath* dp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawPath, (path)); |
| 163 dp->fStencilSettings = stencilSettings; | 205 dp->fStencilSettings = stencilSettings; |
| 164 return dp; | 206 return dp; |
| 165 } | 207 } |
| 166 | 208 |
| 167 GrTargetCommands::Cmd* GrTargetCommands::recordDrawPaths( | 209 GrTargetCommands::Cmd* GrTargetCommands::recordDrawPaths( |
| 168 GrInOrderDrawBuffer* iodb, | 210 GrInOrderDrawBuffer* iodb, |
| 169 const GrPathProcessor* pathPro c, | 211 const GrPathProcessor* pathPro c, |
| 170 const GrPathRange* pathRange, | 212 const GrPathRange* pathRange, |
| 171 const void* indexValues, | 213 const void* indexValues, |
| 172 GrDrawTarget::PathIndexType in dexType, | 214 GrDrawTarget::PathIndexType in dexType, |
| 173 const float transformValues[], | 215 const float transformValues[], |
| 174 GrDrawTarget::PathTransformTyp e transformType, | 216 GrDrawTarget::PathTransformTyp e transformType, |
| 175 int count, | 217 int count, |
| 176 const GrStencilSettings& stenc ilSettings, | 218 const GrStencilSettings& stenc ilSettings, |
| 177 const GrDrawTarget::PipelineIn fo& pipelineInfo) { | 219 const GrDrawTarget::PipelineIn fo& pipelineInfo) { |
| 178 SkASSERT(pathRange); | 220 SkASSERT(pathRange); |
| 179 SkASSERT(indexValues); | 221 SkASSERT(indexValues); |
| 180 SkASSERT(transformValues); | 222 SkASSERT(transformValues); |
| 223 | |
| 224 // Path commands don't require a preceeding blend barrier (per the spec). | |
| 225 SkASSERT(fCmdBuffer.empty() || Cmd::kBlendBarrier_CmdType != fCmdBuffer.back ().type()); | |
| 226 | |
| 181 this->closeBatch(); | 227 this->closeBatch(); |
| 182 | 228 |
| 183 if (!this->setupPipelineAndShouldDraw(iodb, pathProc, pipelineInfo)) { | 229 if (!this->setupPipelineAndShouldDraw(iodb, pathProc, pipelineInfo)) { |
| 184 return NULL; | 230 return NULL; |
| 185 } | 231 } |
| 186 | 232 |
| 187 char* savedIndices; | 233 char* savedIndices; |
| 188 float* savedTransforms; | 234 float* savedTransforms; |
| 189 | 235 |
| 190 iodb->appendIndicesAndTransforms(indexValues, indexType, | 236 iodb->appendIndicesAndTransforms(indexValues, indexType, |
| 191 transformValues, transformType, | 237 transformValues, transformType, |
| 192 count, &savedIndices, &savedTransforms); | 238 count, &savedIndices, &savedTransforms); |
| 193 | 239 |
| 194 if (Cmd::kDrawPaths_CmdType == fCmdBuffer.back().type()) { | 240 if (Cmd::kDrawPaths_CmdType == fCmdBuffer.back().type()) { |
| 195 // The previous command was also DrawPaths. Try to collapse this call in to the one | 241 // The previous command was also DrawPaths. Try to collapse this call in to the one |
| 196 // before. Note that stenciling all the paths at once, then covering, ma y not be | 242 // before. Note that when there is overlap, this is not equivalent to tw o separate |
| 197 // equivalent to two separate draw calls if there is overlap. Blending w on't work, | 243 // draw calls. We don't worry about this case because instanced path com mands are |
| 198 // and the combined calls may also cancel each other's winding numbers i n some | 244 // only used for text. (It also means we get to combine calls when the b lend mode |
| 199 // places. For now the winding numbers are only an issue if the fill is even/odd, | 245 // doesn't allow overlap; a single cover pass is non-overlapping by defi nition.) |
| 200 // because DrawPaths is currently only used for glyphs, and glyphs in th e same | |
| 201 // font tend to all wind in the same direction. | |
| 202 DrawPaths* previous = static_cast<DrawPaths*>(&fCmdBuffer.back()); | 246 DrawPaths* previous = static_cast<DrawPaths*>(&fCmdBuffer.back()); |
| 203 if (pathRange == previous->pathRange() && | 247 if (pathRange == previous->pathRange() && |
| 204 indexType == previous->fIndexType && | 248 indexType == previous->fIndexType && |
| 205 transformType == previous->fTransformType && | 249 transformType == previous->fTransformType && |
| 206 stencilSettings == previous->fStencilSettings && | 250 stencilSettings == previous->fStencilSettings && |
| 207 path_fill_type_is_winding(stencilSettings) && | 251 path_fill_type_is_winding(stencilSettings)) { // Even/odd could canc el on overlap. |
| 208 !pipelineInfo.willBlendWithDst(pathProc)) { | 252 const int indexBytes = GrPathRange::PathIndexSizeInBytes(indexType); |
| 209 const int indexBytes = GrPathRange::PathIndexSizeInBytes(indexTy pe); | 253 const int xformSize = GrPathRendering::PathTransformSize(transformTy pe); |
| 210 const int xformSize = GrPathRendering::PathTransformSize(transfo rmType); | 254 if (&previous->fIndices[previous->fCount*indexBytes] == savedIndices && |
| 211 if (&previous->fIndices[previous->fCount*indexBytes] == savedInd ices && | 255 (0 == xformSize || |
| 212 (0 == xformSize || | 256 &previous->fTransforms[previous->fCount*xformSize] == savedTran sforms)) { |
| 213 &previous->fTransforms[previous->fCount*xformSize] == saved Transforms)) { | 257 // Fold this DrawPaths call into the one previous. |
| 214 // Fold this DrawPaths call into the one previous. | 258 previous->fCount += count; |
| 215 previous->fCount += count; | 259 return NULL; |
| 216 return NULL; | 260 } |
| 217 } | |
| 218 } | 261 } |
| 219 } | 262 } |
| 220 | 263 |
| 221 DrawPaths* dp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawPaths, (pathRange)) ; | 264 DrawPaths* dp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawPaths, (pathRange)) ; |
| 222 dp->fIndices = savedIndices; | 265 dp->fIndices = savedIndices; |
| 223 dp->fIndexType = indexType; | 266 dp->fIndexType = indexType; |
| 224 dp->fTransforms = savedTransforms; | 267 dp->fTransforms = savedTransforms; |
| 225 dp->fTransformType = transformType; | 268 dp->fTransformType = transformType; |
| 226 dp->fCount = count; | 269 dp->fCount = count; |
| 227 dp->fStencilSettings = stencilSettings; | 270 dp->fStencilSettings = stencilSettings; |
| (...skipping 167 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 395 } | 438 } |
| 396 | 439 |
| 397 void GrTargetCommands::ClearStencilClip::execute(GrGpu* gpu, const SetState*) { | 440 void GrTargetCommands::ClearStencilClip::execute(GrGpu* gpu, const SetState*) { |
| 398 gpu->clearStencilClip(fRect, fInsideClip, this->renderTarget()); | 441 gpu->clearStencilClip(fRect, fInsideClip, this->renderTarget()); |
| 399 } | 442 } |
| 400 | 443 |
| 401 void GrTargetCommands::CopySurface::execute(GrGpu* gpu, const SetState*) { | 444 void GrTargetCommands::CopySurface::execute(GrGpu* gpu, const SetState*) { |
| 402 gpu->copySurface(this->dst(), this->src(), fSrcRect, fDstPoint); | 445 gpu->copySurface(this->dst(), this->src(), fSrcRect, fDstPoint); |
| 403 } | 446 } |
| 404 | 447 |
| 448 void GrTargetCommands::BlendBarrier::execute(GrGpu* gpu, const SetState*) { | |
| 449 gpu->blendBarrier(); | |
| 450 } | |
| 451 | |
| 405 GrTargetCommands::Cmd* GrTargetCommands::recordCopySurface(GrInOrderDrawBuffer* iodb, | 452 GrTargetCommands::Cmd* GrTargetCommands::recordCopySurface(GrInOrderDrawBuffer* iodb, |
| 406 GrSurface* dst, | 453 GrSurface* dst, |
| 407 GrSurface* src, | 454 GrSurface* src, |
| 408 const SkIRect& srcRec t, | 455 const SkIRect& srcRec t, |
| 409 const SkIPoint& dstPo int) { | 456 const SkIPoint& dstPo int) { |
| 410 if (iodb->getGpu()->canCopySurface(dst, src, srcRect, dstPoint)) { | 457 if (iodb->getGpu()->canCopySurface(dst, src, srcRect, dstPoint)) { |
| 411 this->closeBatch(); | 458 this->closeBatch(); |
| 412 CopySurface* cs = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, CopySurface, (dst , src)); | 459 CopySurface* cs = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, CopySurface, (dst , src)); |
| 413 cs->fSrcRect = srcRect; | 460 cs->fSrcRect = srcRect; |
| 414 cs->fDstPoint = dstPoint; | 461 cs->fDstPoint = dstPoint; |
| 415 return cs; | 462 return cs; |
| 416 } | 463 } |
| 417 return NULL; | 464 return NULL; |
| 418 } | 465 } |
| 419 | 466 |
| 467 GrTargetCommands::Cmd* GrTargetCommands::recordBlendBarrier(GrInOrderDrawBuffer* iodb) { | |
| 468 if (!fCmdBuffer.empty() && Cmd::kBlendBarrier_CmdType == fCmdBuffer.back().t ype()) { | |
| 469 return NULL; | |
| 470 } | |
| 471 this->closeBatch(); | |
| 472 return GrNEW_APPEND_TO_RECORDER(fCmdBuffer, BlendBarrier, ()); | |
| 473 } | |
| 474 | |
| 420 bool GrTargetCommands::setupPipelineAndShouldDraw(GrInOrderDrawBuffer* iodb, | 475 bool GrTargetCommands::setupPipelineAndShouldDraw(GrInOrderDrawBuffer* iodb, |
| 421 const GrPrimitiveProcessor* pr imProc, | 476 const GrPrimitiveProcessor* pr imProc, |
| 422 const GrDrawTarget::PipelineIn fo& pipelineInfo) { | 477 const GrDrawTarget::PipelineIn fo& pipelineInfo) { |
| 423 SetState* ss = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, SetState, (primProc)); | 478 SetState* ss = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, SetState, (primProc)); |
| 424 iodb->setupPipeline(pipelineInfo, ss->pipelineLocation()); | 479 iodb->setupPipeline(pipelineInfo, ss->pipelineLocation()); |
| 425 | 480 |
| 426 if (ss->getPipeline()->mustSkip()) { | 481 if (ss->getPipeline()->mustSkip()) { |
| 427 fCmdBuffer.pop_back(); | 482 fCmdBuffer.pop_back(); |
| 428 return false; | 483 return false; |
| 429 } | 484 } |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 461 fPrevState->getPipeline()->isEqual(*ss->getPipeline())) { | 516 fPrevState->getPipeline()->isEqual(*ss->getPipeline())) { |
| 462 fCmdBuffer.pop_back(); | 517 fCmdBuffer.pop_back(); |
| 463 } else { | 518 } else { |
| 464 this->closeBatch(); | 519 this->closeBatch(); |
| 465 fPrevState = ss; | 520 fPrevState = ss; |
| 466 iodb->recordTraceMarkersIfNecessary(ss); | 521 iodb->recordTraceMarkersIfNecessary(ss); |
| 467 } | 522 } |
| 468 return true; | 523 return true; |
| 469 } | 524 } |
| 470 | 525 |
| OLD | NEW |