| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #include "GrTargetCommands.h" | 8 #include "GrTargetCommands.h" |
| 9 | 9 |
| 10 #include "GrColor.h" | |
| 11 #include "GrDefaultGeoProcFactory.h" | |
| 12 #include "GrInOrderDrawBuffer.h" | 10 #include "GrInOrderDrawBuffer.h" |
| 13 #include "GrTemplates.h" | |
| 14 #include "SkPoint.h" | |
| 15 | |
| 16 static bool path_fill_type_is_winding(const GrStencilSettings& pathStencilSettin
gs) { | |
| 17 static const GrStencilSettings::Face pathFace = GrStencilSettings::kFront_Fa
ce; | |
| 18 bool isWinding = kInvert_StencilOp != pathStencilSettings.passOp(pathFace); | |
| 19 if (isWinding) { | |
| 20 // Double check that it is in fact winding. | |
| 21 SkASSERT(kIncClamp_StencilOp == pathStencilSettings.passOp(pathFace)); | |
| 22 SkASSERT(kIncClamp_StencilOp == pathStencilSettings.failOp(pathFace)); | |
| 23 SkASSERT(0x1 != pathStencilSettings.writeMask(pathFace)); | |
| 24 SkASSERT(!pathStencilSettings.isTwoSided()); | |
| 25 } | |
| 26 return isWinding; | |
| 27 } | |
| 28 | |
| 29 GrTargetCommands::Cmd* GrTargetCommands::recordDrawBatch(State* state, GrBatch*
batch) { | |
| 30 // Check if there is a Batch Draw we can batch with | |
| 31 if (!fCmdBuffer.empty() && Cmd::kDrawBatch_CmdType == fCmdBuffer.back().type
()) { | |
| 32 DrawBatch* previous = static_cast<DrawBatch*>(&fCmdBuffer.back()); | |
| 33 if (previous->fState == state && previous->fBatch->combineIfPossible(bat
ch)) { | |
| 34 return NULL; | |
| 35 } | |
| 36 } | |
| 37 | |
| 38 return GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawBatch, (state, batch, &fBatc
hTarget)); | |
| 39 } | |
| 40 | |
| 41 GrTargetCommands::Cmd* GrTargetCommands::recordStencilPath( | |
| 42 const GrPipelineBuilder&
pipelineBuilder, | |
| 43 const GrPathProcessor* p
athProc, | |
| 44 const GrPath* path, | |
| 45 const GrScissorState& sc
issorState, | |
| 46 const GrStencilSettings&
stencilSettings) { | |
| 47 StencilPath* sp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, StencilPath, | |
| 48 (path, pipelineBuilder.getRenderT
arget())); | |
| 49 | |
| 50 sp->fScissor = scissorState; | |
| 51 sp->fUseHWAA = pipelineBuilder.isHWAntialias(); | |
| 52 sp->fViewMatrix = pathProc->viewMatrix(); | |
| 53 sp->fStencil = stencilSettings; | |
| 54 return sp; | |
| 55 } | |
| 56 | |
| 57 GrTargetCommands::Cmd* GrTargetCommands::recordDrawPath( | |
| 58 State* state, | |
| 59 const GrPathProcessor* pathPro
c, | |
| 60 const GrPath* path, | |
| 61 const GrStencilSettings& stenc
ilSettings) { | |
| 62 DrawPath* dp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawPath, (state, path))
; | |
| 63 dp->fStencilSettings = stencilSettings; | |
| 64 return dp; | |
| 65 } | |
| 66 | |
| 67 GrTargetCommands::Cmd* GrTargetCommands::recordDrawPaths( | |
| 68 State* state, | |
| 69 GrInOrderDrawBuffer* iodb, | |
| 70 const GrPathProcessor* pathPro
c, | |
| 71 const GrPathRange* pathRange, | |
| 72 const void* indexValues, | |
| 73 GrDrawTarget::PathIndexType in
dexType, | |
| 74 const float transformValues[], | |
| 75 GrDrawTarget::PathTransformTyp
e transformType, | |
| 76 int count, | |
| 77 const GrStencilSettings& stenc
ilSettings, | |
| 78 const GrDrawTarget::PipelineIn
fo& pipelineInfo) { | |
| 79 SkASSERT(pathRange); | |
| 80 SkASSERT(indexValues); | |
| 81 SkASSERT(transformValues); | |
| 82 | |
| 83 char* savedIndices; | |
| 84 float* savedTransforms; | |
| 85 | |
| 86 iodb->appendIndicesAndTransforms(indexValues, indexType, | |
| 87 transformValues, transformType, | |
| 88 count, &savedIndices, &savedTransforms); | |
| 89 | |
| 90 if (!fCmdBuffer.empty() && Cmd::kDrawPaths_CmdType == fCmdBuffer.back().type
()) { | |
| 91 // The previous command was also DrawPaths. Try to collapse this call in
to the one | |
| 92 // before. Note that stenciling all the paths at once, then covering, ma
y not be | |
| 93 // equivalent to two separate draw calls if there is overlap. Blending w
on't work, | |
| 94 // and the combined calls may also cancel each other's winding numbers i
n some | |
| 95 // places. For now the winding numbers are only an issue if the fill is
even/odd, | |
| 96 // because DrawPaths is currently only used for glyphs, and glyphs in th
e same | |
| 97 // font tend to all wind in the same direction. | |
| 98 DrawPaths* previous = static_cast<DrawPaths*>(&fCmdBuffer.back()); | |
| 99 if (pathRange == previous->pathRange() && | |
| 100 indexType == previous->fIndexType && | |
| 101 transformType == previous->fTransformType && | |
| 102 stencilSettings == previous->fStencilSettings && | |
| 103 path_fill_type_is_winding(stencilSettings) && | |
| 104 !pipelineInfo.willBlendWithDst(pathProc) && | |
| 105 previous->fState == state) { | |
| 106 const int indexBytes = GrPathRange::PathIndexSizeInBytes(indexTy
pe); | |
| 107 const int xformSize = GrPathRendering::PathTransformSize(transfo
rmType); | |
| 108 if (&previous->fIndices[previous->fCount*indexBytes] == savedInd
ices && | |
| 109 (0 == xformSize || | |
| 110 &previous->fTransforms[previous->fCount*xformSize] == saved
Transforms)) { | |
| 111 // Fold this DrawPaths call into the one previous. | |
| 112 previous->fCount += count; | |
| 113 return NULL; | |
| 114 } | |
| 115 } | |
| 116 } | |
| 117 | |
| 118 DrawPaths* dp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawPaths, (state, path
Range)); | |
| 119 dp->fIndices = savedIndices; | |
| 120 dp->fIndexType = indexType; | |
| 121 dp->fTransforms = savedTransforms; | |
| 122 dp->fTransformType = transformType; | |
| 123 dp->fCount = count; | |
| 124 dp->fStencilSettings = stencilSettings; | |
| 125 return dp; | |
| 126 } | |
| 127 | |
| 128 GrTargetCommands::Cmd* GrTargetCommands::recordClear(const SkIRect* rect, | |
| 129 GrColor color, | |
| 130 bool canIgnoreRect, | |
| 131 GrRenderTarget* renderTarge
t) { | |
| 132 SkASSERT(renderTarget); | |
| 133 | |
| 134 SkIRect r; | |
| 135 if (NULL == rect) { | |
| 136 // We could do something smart and remove previous draws and clears to | |
| 137 // the current render target. If we get that smart we have to make sure | |
| 138 // those draws aren't read before this clear (render-to-texture). | |
| 139 r.setLTRB(0, 0, renderTarget->width(), renderTarget->height()); | |
| 140 rect = &r; | |
| 141 } | |
| 142 Clear* clr = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Clear, (renderTarget)); | |
| 143 GrColorIsPMAssert(color); | |
| 144 clr->fColor = color; | |
| 145 clr->fRect = *rect; | |
| 146 clr->fCanIgnoreRect = canIgnoreRect; | |
| 147 return clr; | |
| 148 } | |
| 149 | |
| 150 GrTargetCommands::Cmd* GrTargetCommands::recordClearStencilClip(const SkIRect& r
ect, | |
| 151 bool insideClip, | |
| 152 GrRenderTarget*
renderTarget) { | |
| 153 SkASSERT(renderTarget); | |
| 154 | |
| 155 ClearStencilClip* clr = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, ClearStencilCli
p, (renderTarget)); | |
| 156 clr->fRect = rect; | |
| 157 clr->fInsideClip = insideClip; | |
| 158 return clr; | |
| 159 } | |
| 160 | |
| 161 GrTargetCommands::Cmd* GrTargetCommands::recordDiscard(GrRenderTarget* renderTar
get) { | |
| 162 SkASSERT(renderTarget); | |
| 163 | |
| 164 Clear* clr = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Clear, (renderTarget)); | |
| 165 clr->fColor = GrColor_ILLEGAL; | |
| 166 return clr; | |
| 167 } | |
| 168 | 11 |
| 169 void GrTargetCommands::reset() { | 12 void GrTargetCommands::reset() { |
| 170 fCmdBuffer.reset(); | 13 fCmdBuffer.reset(); |
| 171 } | 14 } |
| 172 | 15 |
| 173 void GrTargetCommands::flush(GrInOrderDrawBuffer* iodb) { | 16 void GrTargetCommands::flush(GrInOrderDrawBuffer* iodb) { |
| 174 if (fCmdBuffer.empty()) { | 17 if (fCmdBuffer.empty()) { |
| 175 return; | 18 return; |
| 176 } | 19 } |
| 177 | 20 |
| (...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 264 gpu->clearStencilClip(fRect, fInsideClip, this->renderTarget()); | 107 gpu->clearStencilClip(fRect, fInsideClip, this->renderTarget()); |
| 265 } | 108 } |
| 266 | 109 |
| 267 void GrTargetCommands::CopySurface::execute(GrGpu* gpu) { | 110 void GrTargetCommands::CopySurface::execute(GrGpu* gpu) { |
| 268 gpu->copySurface(this->dst(), this->src(), fSrcRect, fDstPoint); | 111 gpu->copySurface(this->dst(), this->src(), fSrcRect, fDstPoint); |
| 269 } | 112 } |
| 270 | 113 |
| 271 void GrTargetCommands::XferBarrier::execute(GrGpu* gpu) { | 114 void GrTargetCommands::XferBarrier::execute(GrGpu* gpu) { |
| 272 gpu->xferBarrier(fBarrierType); | 115 gpu->xferBarrier(fBarrierType); |
| 273 } | 116 } |
| 274 | |
| 275 GrTargetCommands::Cmd* GrTargetCommands::recordCopySurface(GrSurface* dst, | |
| 276 GrSurface* src, | |
| 277 const SkIRect& srcRec
t, | |
| 278 const SkIPoint& dstPo
int) { | |
| 279 CopySurface* cs = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, CopySurface, (dst, sr
c)); | |
| 280 cs->fSrcRect = srcRect; | |
| 281 cs->fDstPoint = dstPoint; | |
| 282 return cs; | |
| 283 } | |
| 284 | |
| 285 void GrTargetCommands::recordXferBarrierIfNecessary(const GrPipeline& pipeline, | |
| 286 GrInOrderDrawBuffer* iodb) { | |
| 287 const GrXferProcessor& xp = *pipeline.getXferProcessor(); | |
| 288 GrRenderTarget* rt = pipeline.getRenderTarget(); | |
| 289 | |
| 290 GrXferBarrierType barrierType; | |
| 291 if (!xp.willNeedXferBarrier(rt, *iodb->caps(), &barrierType)) { | |
| 292 return; | |
| 293 } | |
| 294 | |
| 295 XferBarrier* xb = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, XferBarrier, ()); | |
| 296 xb->fBarrierType = barrierType; | |
| 297 | |
| 298 iodb->recordTraceMarkersIfNecessary(xb); | |
| 299 } | |
| 300 | |
| OLD | NEW |