OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #include "GrTargetCommands.h" | 8 #include "GrTargetCommands.h" |
9 | 9 |
10 #include "GrColor.h" | 10 #include "GrColor.h" |
11 #include "GrDefaultGeoProcFactory.h" | 11 #include "GrDefaultGeoProcFactory.h" |
12 #include "GrInOrderDrawBuffer.h" | 12 #include "GrInOrderDrawBuffer.h" |
13 #include "GrTemplates.h" | 13 #include "GrTemplates.h" |
14 #include "SkPoint.h" | 14 #include "SkPoint.h" |
15 | 15 |
16 void GrTargetCommands::closeBatch() { | |
17 if (fDrawBatch) { | |
18 fBatchTarget.resetNumberOfDraws(); | |
19 fDrawBatch->execute(NULL, fPrevState); | |
20 fDrawBatch->fBatch->setNumberOfDraws(fBatchTarget.numberOfDraws()); | |
21 fDrawBatch = NULL; | |
22 } | |
23 } | |
24 | |
25 static bool path_fill_type_is_winding(const GrStencilSettings& pathStencilSettin
gs) { | 16 static bool path_fill_type_is_winding(const GrStencilSettings& pathStencilSettin
gs) { |
26 static const GrStencilSettings::Face pathFace = GrStencilSettings::kFront_Fa
ce; | 17 static const GrStencilSettings::Face pathFace = GrStencilSettings::kFront_Fa
ce; |
27 bool isWinding = kInvert_StencilOp != pathStencilSettings.passOp(pathFace); | 18 bool isWinding = kInvert_StencilOp != pathStencilSettings.passOp(pathFace); |
28 if (isWinding) { | 19 if (isWinding) { |
29 // Double check that it is in fact winding. | 20 // Double check that it is in fact winding. |
30 SkASSERT(kIncClamp_StencilOp == pathStencilSettings.passOp(pathFace)); | 21 SkASSERT(kIncClamp_StencilOp == pathStencilSettings.passOp(pathFace)); |
31 SkASSERT(kIncClamp_StencilOp == pathStencilSettings.failOp(pathFace)); | 22 SkASSERT(kIncClamp_StencilOp == pathStencilSettings.failOp(pathFace)); |
32 SkASSERT(0x1 != pathStencilSettings.writeMask(pathFace)); | 23 SkASSERT(0x1 != pathStencilSettings.writeMask(pathFace)); |
33 SkASSERT(!pathStencilSettings.isTwoSided()); | 24 SkASSERT(!pathStencilSettings.isTwoSided()); |
34 } | 25 } |
35 return isWinding; | 26 return isWinding; |
36 } | 27 } |
37 | 28 |
38 int GrTargetCommands::concatInstancedDraw(GrInOrderDrawBuffer* iodb, | |
39 const GrDrawTarget::DrawInfo& info) { | |
40 SkASSERT(!fCmdBuffer.empty()); | |
41 SkASSERT(info.isInstanced()); | |
42 | |
43 const GrIndexBuffer* ib; | |
44 if (!iodb->canConcatToIndexBuffer(&ib)) { | |
45 return 0; | |
46 } | |
47 | |
48 // Check if there is a draw info that is compatible that uses the same VB fr
om the pool and | |
49 // the same IB | |
50 if (Cmd::kDraw_CmdType != fCmdBuffer.back().type()) { | |
51 return 0; | |
52 } | |
53 | |
54 Draw* draw = static_cast<Draw*>(&fCmdBuffer.back()); | |
55 | |
56 if (!draw->fInfo.isInstanced() || | |
57 draw->fInfo.primitiveType() != info.primitiveType() || | |
58 draw->fInfo.verticesPerInstance() != info.verticesPerInstance() || | |
59 draw->fInfo.indicesPerInstance() != info.indicesPerInstance() || | |
60 draw->fInfo.vertexBuffer() != info.vertexBuffer() || | |
61 draw->fInfo.indexBuffer() != ib) { | |
62 return 0; | |
63 } | |
64 if (draw->fInfo.startVertex() + draw->fInfo.vertexCount() != info.startVerte
x()) { | |
65 return 0; | |
66 } | |
67 | |
68 // how many instances can be concat'ed onto draw given the size of the index
buffer | |
69 int instancesToConcat = iodb->indexCountInCurrentSource() / info.indicesPerI
nstance(); | |
70 instancesToConcat -= draw->fInfo.instanceCount(); | |
71 instancesToConcat = SkTMin(instancesToConcat, info.instanceCount()); | |
72 | |
73 draw->fInfo.adjustInstanceCount(instancesToConcat); | |
74 | |
75 // update last fGpuCmdMarkers to include any additional trace markers that h
ave been added | |
76 iodb->recordTraceMarkersIfNecessary(draw); | |
77 return instancesToConcat; | |
78 } | |
79 | |
80 GrTargetCommands::Cmd* GrTargetCommands::recordDraw( | |
81 GrInOrderDrawBuffer* iodb, | |
82 const GrGeometryProcessor* gp, | |
83 const GrDrawTarget::DrawInfo&
info, | |
84 const GrDrawTarget::PipelineIn
fo& pipelineInfo) { | |
85 #ifdef USE_BITMAP_TEXTBLOBS | |
86 SkFAIL("Non-batch no longer supported\n"); | |
87 #endif | |
88 SkASSERT(info.vertexBuffer() && (!info.isIndexed() || info.indexBuffer())); | |
89 CLOSE_BATCH | |
90 | |
91 if (!this->setupPipelineAndShouldDraw(iodb, gp, pipelineInfo)) { | |
92 return NULL; | |
93 } | |
94 | |
95 Draw* draw; | |
96 if (info.isInstanced()) { | |
97 int instancesConcated = this->concatInstancedDraw(iodb, info); | |
98 if (info.instanceCount() > instancesConcated) { | |
99 draw = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Draw, (info)); | |
100 draw->fInfo.adjustInstanceCount(-instancesConcated); | |
101 } else { | |
102 return NULL; | |
103 } | |
104 } else { | |
105 draw = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Draw, (info)); | |
106 } | |
107 | |
108 return draw; | |
109 } | |
110 | |
111 GrTargetCommands::Cmd* GrTargetCommands::recordDrawBatch( | 29 GrTargetCommands::Cmd* GrTargetCommands::recordDrawBatch( |
112 GrInOrderDrawBuffer* iodb, | 30 GrInOrderDrawBuffer* iodb, |
113 GrBatch* batch, | 31 GrBatch* batch, |
114 const GrDrawTarget::PipelineIn
fo& pipelineInfo) { | 32 const GrDrawTarget::PipelineIn
fo& pipelineInfo) { |
115 if (!this->setupPipelineAndShouldDraw(iodb, batch, pipelineInfo)) { | 33 if (!this->setupPipelineAndShouldDraw(iodb, batch, pipelineInfo)) { |
116 return NULL; | 34 return NULL; |
117 } | 35 } |
118 | 36 |
119 // Check if there is a Batch Draw we can batch with | 37 // Check if there is a Batch Draw we can batch with |
120 if (Cmd::kDrawBatch_CmdType != fCmdBuffer.back().type() || !fDrawBatch) { | 38 if (Cmd::kDrawBatch_CmdType != fCmdBuffer.back().type() || !fDrawBatch) { |
121 fDrawBatch = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawBatch, (batch, &fB
atchTarget)); | 39 fDrawBatch = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawBatch, (batch, &fB
atchTarget)); |
122 return fDrawBatch; | 40 return fDrawBatch; |
123 } | 41 } |
124 | 42 |
125 SkASSERT(&fCmdBuffer.back() == fDrawBatch); | 43 SkASSERT(&fCmdBuffer.back() == fDrawBatch); |
126 if (!fDrawBatch->fBatch->combineIfPossible(batch)) { | 44 if (!fDrawBatch->fBatch->combineIfPossible(batch)) { |
127 CLOSE_BATCH | |
128 fDrawBatch = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawBatch, (batch, &fB
atchTarget)); | 45 fDrawBatch = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawBatch, (batch, &fB
atchTarget)); |
129 } | 46 } |
130 | 47 |
131 return fDrawBatch; | 48 return fDrawBatch; |
132 } | 49 } |
133 | 50 |
134 GrTargetCommands::Cmd* GrTargetCommands::recordStencilPath( | 51 GrTargetCommands::Cmd* GrTargetCommands::recordStencilPath( |
135 GrInOrderDrawBuffer* iod
b, | 52 GrInOrderDrawBuffer* iod
b, |
136 const GrPipelineBuilder&
pipelineBuilder, | 53 const GrPipelineBuilder&
pipelineBuilder, |
137 const GrPathProcessor* p
athProc, | 54 const GrPathProcessor* p
athProc, |
138 const GrPath* path, | 55 const GrPath* path, |
139 const GrScissorState& sc
issorState, | 56 const GrScissorState& sc
issorState, |
140 const GrStencilSettings&
stencilSettings) { | 57 const GrStencilSettings&
stencilSettings) { |
141 CLOSE_BATCH | |
142 | |
143 StencilPath* sp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, StencilPath, | 58 StencilPath* sp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, StencilPath, |
144 (path, pipelineBuilder.getRenderT
arget())); | 59 (path, pipelineBuilder.getRenderT
arget())); |
145 | 60 |
146 sp->fScissor = scissorState; | 61 sp->fScissor = scissorState; |
147 sp->fUseHWAA = pipelineBuilder.isHWAntialias(); | 62 sp->fUseHWAA = pipelineBuilder.isHWAntialias(); |
148 sp->fViewMatrix = pathProc->viewMatrix(); | 63 sp->fViewMatrix = pathProc->viewMatrix(); |
149 sp->fStencil = stencilSettings; | 64 sp->fStencil = stencilSettings; |
150 return sp; | 65 return sp; |
151 } | 66 } |
152 | 67 |
153 GrTargetCommands::Cmd* GrTargetCommands::recordDrawPath( | 68 GrTargetCommands::Cmd* GrTargetCommands::recordDrawPath( |
154 GrInOrderDrawBuffer* iodb, | 69 GrInOrderDrawBuffer* iodb, |
155 const GrPathProcessor* pathPro
c, | 70 const GrPathProcessor* pathPro
c, |
156 const GrPath* path, | 71 const GrPath* path, |
157 const GrStencilSettings& stenc
ilSettings, | 72 const GrStencilSettings& stenc
ilSettings, |
158 const GrDrawTarget::PipelineIn
fo& pipelineInfo) { | 73 const GrDrawTarget::PipelineIn
fo& pipelineInfo) { |
159 CLOSE_BATCH | |
160 | |
161 // TODO: Only compare the subset of GrPipelineBuilder relevant to path cover
ing? | 74 // TODO: Only compare the subset of GrPipelineBuilder relevant to path cover
ing? |
162 if (!this->setupPipelineAndShouldDraw(iodb, pathProc, pipelineInfo)) { | 75 if (!this->setupPipelineAndShouldDraw(iodb, pathProc, pipelineInfo)) { |
163 return NULL; | 76 return NULL; |
164 } | 77 } |
165 DrawPath* dp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawPath, (path)); | 78 DrawPath* dp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawPath, (path)); |
166 dp->fStencilSettings = stencilSettings; | 79 dp->fStencilSettings = stencilSettings; |
167 return dp; | 80 return dp; |
168 } | 81 } |
169 | 82 |
170 GrTargetCommands::Cmd* GrTargetCommands::recordDrawPaths( | 83 GrTargetCommands::Cmd* GrTargetCommands::recordDrawPaths( |
171 GrInOrderDrawBuffer* iodb, | 84 GrInOrderDrawBuffer* iodb, |
172 const GrPathProcessor* pathPro
c, | 85 const GrPathProcessor* pathPro
c, |
173 const GrPathRange* pathRange, | 86 const GrPathRange* pathRange, |
174 const void* indexValues, | 87 const void* indexValues, |
175 GrDrawTarget::PathIndexType in
dexType, | 88 GrDrawTarget::PathIndexType in
dexType, |
176 const float transformValues[], | 89 const float transformValues[], |
177 GrDrawTarget::PathTransformTyp
e transformType, | 90 GrDrawTarget::PathTransformTyp
e transformType, |
178 int count, | 91 int count, |
179 const GrStencilSettings& stenc
ilSettings, | 92 const GrStencilSettings& stenc
ilSettings, |
180 const GrDrawTarget::PipelineIn
fo& pipelineInfo) { | 93 const GrDrawTarget::PipelineIn
fo& pipelineInfo) { |
181 SkASSERT(pathRange); | 94 SkASSERT(pathRange); |
182 SkASSERT(indexValues); | 95 SkASSERT(indexValues); |
183 SkASSERT(transformValues); | 96 SkASSERT(transformValues); |
184 CLOSE_BATCH | |
185 | 97 |
186 if (!this->setupPipelineAndShouldDraw(iodb, pathProc, pipelineInfo)) { | 98 if (!this->setupPipelineAndShouldDraw(iodb, pathProc, pipelineInfo)) { |
187 return NULL; | 99 return NULL; |
188 } | 100 } |
189 | 101 |
190 char* savedIndices; | 102 char* savedIndices; |
191 float* savedTransforms; | 103 float* savedTransforms; |
192 | 104 |
193 iodb->appendIndicesAndTransforms(indexValues, indexType, | 105 iodb->appendIndicesAndTransforms(indexValues, indexType, |
194 transformValues, transformType, | 106 transformValues, transformType, |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
230 dp->fStencilSettings = stencilSettings; | 142 dp->fStencilSettings = stencilSettings; |
231 return dp; | 143 return dp; |
232 } | 144 } |
233 | 145 |
234 GrTargetCommands::Cmd* GrTargetCommands::recordClear(GrInOrderDrawBuffer* iodb, | 146 GrTargetCommands::Cmd* GrTargetCommands::recordClear(GrInOrderDrawBuffer* iodb, |
235 const SkIRect* rect, | 147 const SkIRect* rect, |
236 GrColor color, | 148 GrColor color, |
237 bool canIgnoreRect, | 149 bool canIgnoreRect, |
238 GrRenderTarget* renderTarge
t) { | 150 GrRenderTarget* renderTarge
t) { |
239 SkASSERT(renderTarget); | 151 SkASSERT(renderTarget); |
240 CLOSE_BATCH | |
241 | 152 |
242 SkIRect r; | 153 SkIRect r; |
243 if (NULL == rect) { | 154 if (NULL == rect) { |
244 // We could do something smart and remove previous draws and clears to | 155 // We could do something smart and remove previous draws and clears to |
245 // the current render target. If we get that smart we have to make sure | 156 // the current render target. If we get that smart we have to make sure |
246 // those draws aren't read before this clear (render-to-texture). | 157 // those draws aren't read before this clear (render-to-texture). |
247 r.setLTRB(0, 0, renderTarget->width(), renderTarget->height()); | 158 r.setLTRB(0, 0, renderTarget->width(), renderTarget->height()); |
248 rect = &r; | 159 rect = &r; |
249 } | 160 } |
250 Clear* clr = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Clear, (renderTarget)); | 161 Clear* clr = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Clear, (renderTarget)); |
251 GrColorIsPMAssert(color); | 162 GrColorIsPMAssert(color); |
252 clr->fColor = color; | 163 clr->fColor = color; |
253 clr->fRect = *rect; | 164 clr->fRect = *rect; |
254 clr->fCanIgnoreRect = canIgnoreRect; | 165 clr->fCanIgnoreRect = canIgnoreRect; |
255 return clr; | 166 return clr; |
256 } | 167 } |
257 | 168 |
258 GrTargetCommands::Cmd* GrTargetCommands::recordClearStencilClip(GrInOrderDrawBuf
fer* iodb, | 169 GrTargetCommands::Cmd* GrTargetCommands::recordClearStencilClip(GrInOrderDrawBuf
fer* iodb, |
259 const SkIRect& r
ect, | 170 const SkIRect& r
ect, |
260 bool insideClip, | 171 bool insideClip, |
261 GrRenderTarget*
renderTarget) { | 172 GrRenderTarget*
renderTarget) { |
262 SkASSERT(renderTarget); | 173 SkASSERT(renderTarget); |
263 CLOSE_BATCH | |
264 | 174 |
265 ClearStencilClip* clr = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, ClearStencilCli
p, (renderTarget)); | 175 ClearStencilClip* clr = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, ClearStencilCli
p, (renderTarget)); |
266 clr->fRect = rect; | 176 clr->fRect = rect; |
267 clr->fInsideClip = insideClip; | 177 clr->fInsideClip = insideClip; |
268 return clr; | 178 return clr; |
269 } | 179 } |
270 | 180 |
271 GrTargetCommands::Cmd* GrTargetCommands::recordDiscard(GrInOrderDrawBuffer* iodb
, | 181 GrTargetCommands::Cmd* GrTargetCommands::recordDiscard(GrInOrderDrawBuffer* iodb
, |
272 GrRenderTarget* renderTar
get) { | 182 GrRenderTarget* renderTar
get) { |
273 SkASSERT(renderTarget); | 183 SkASSERT(renderTarget); |
274 CLOSE_BATCH | |
275 | 184 |
276 Clear* clr = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Clear, (renderTarget)); | 185 Clear* clr = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Clear, (renderTarget)); |
277 clr->fColor = GrColor_ILLEGAL; | 186 clr->fColor = GrColor_ILLEGAL; |
278 return clr; | 187 return clr; |
279 } | 188 } |
280 | 189 |
281 void GrTargetCommands::reset() { | 190 void GrTargetCommands::reset() { |
282 fCmdBuffer.reset(); | 191 fCmdBuffer.reset(); |
283 fPrevState = NULL; | 192 fPrevState = NULL; |
284 fDrawBatch = NULL; | 193 fDrawBatch = NULL; |
285 } | 194 } |
286 | 195 |
287 void GrTargetCommands::flush(GrInOrderDrawBuffer* iodb) { | 196 void GrTargetCommands::flush(GrInOrderDrawBuffer* iodb) { |
288 if (fCmdBuffer.empty()) { | 197 if (fCmdBuffer.empty()) { |
289 return; | 198 return; |
290 } | 199 } |
291 | 200 |
292 // TODO this is temporary while batch is being rolled out | |
293 CLOSE_BATCH | |
294 | |
295 // Updated every time we find a set state cmd to reflect the current state i
n the playback | 201 // Updated every time we find a set state cmd to reflect the current state i
n the playback |
296 // stream. | 202 // stream. |
297 SetState* currentState = NULL; | 203 SetState* currentState = NULL; |
298 | 204 |
299 GrGpu* gpu = iodb->getGpu(); | 205 GrGpu* gpu = iodb->getGpu(); |
300 | 206 |
301 #ifdef USE_BITMAP_TEXTBLOBS | 207 #ifdef USE_BITMAP_TEXTBLOBS |
302 // Loop over all batches and generate geometry | 208 // Loop over all batches and generate geometry |
303 CmdBuffer::Iter genIter(fCmdBuffer); | 209 CmdBuffer::Iter genIter(fCmdBuffer); |
304 while (genIter.next()) { | 210 while (genIter.next()) { |
(...skipping 129 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
434 void GrTargetCommands::CopySurface::execute(GrGpu* gpu, const SetState*) { | 340 void GrTargetCommands::CopySurface::execute(GrGpu* gpu, const SetState*) { |
435 gpu->copySurface(this->dst(), this->src(), fSrcRect, fDstPoint); | 341 gpu->copySurface(this->dst(), this->src(), fSrcRect, fDstPoint); |
436 } | 342 } |
437 | 343 |
438 GrTargetCommands::Cmd* GrTargetCommands::recordCopySurface(GrInOrderDrawBuffer*
iodb, | 344 GrTargetCommands::Cmd* GrTargetCommands::recordCopySurface(GrInOrderDrawBuffer*
iodb, |
439 GrSurface* dst, | 345 GrSurface* dst, |
440 GrSurface* src, | 346 GrSurface* src, |
441 const SkIRect& srcRec
t, | 347 const SkIRect& srcRec
t, |
442 const SkIPoint& dstPo
int) { | 348 const SkIPoint& dstPo
int) { |
443 if (iodb->getGpu()->canCopySurface(dst, src, srcRect, dstPoint)) { | 349 if (iodb->getGpu()->canCopySurface(dst, src, srcRect, dstPoint)) { |
444 CLOSE_BATCH | |
445 CopySurface* cs = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, CopySurface, (dst
, src)); | 350 CopySurface* cs = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, CopySurface, (dst
, src)); |
446 cs->fSrcRect = srcRect; | 351 cs->fSrcRect = srcRect; |
447 cs->fDstPoint = dstPoint; | 352 cs->fDstPoint = dstPoint; |
448 return cs; | 353 return cs; |
449 } | 354 } |
450 return NULL; | 355 return NULL; |
451 } | 356 } |
452 | 357 |
453 bool GrTargetCommands::setupPipelineAndShouldDraw(GrInOrderDrawBuffer* iodb, | 358 bool GrTargetCommands::setupPipelineAndShouldDraw(GrInOrderDrawBuffer* iodb, |
454 const GrPrimitiveProcessor* pr
imProc, | 359 const GrPrimitiveProcessor* pr
imProc, |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
487 fCmdBuffer.pop_back(); | 392 fCmdBuffer.pop_back(); |
488 return false; | 393 return false; |
489 } | 394 } |
490 | 395 |
491 batch->initBatchTracker(ss->getPipeline()->getInitBatchTracker()); | 396 batch->initBatchTracker(ss->getPipeline()->getInitBatchTracker()); |
492 | 397 |
493 if (fPrevState && !fPrevState->fPrimitiveProcessor.get() && | 398 if (fPrevState && !fPrevState->fPrimitiveProcessor.get() && |
494 fPrevState->getPipeline()->isEqual(*ss->getPipeline())) { | 399 fPrevState->getPipeline()->isEqual(*ss->getPipeline())) { |
495 fCmdBuffer.pop_back(); | 400 fCmdBuffer.pop_back(); |
496 } else { | 401 } else { |
497 CLOSE_BATCH | |
498 fPrevState = ss; | 402 fPrevState = ss; |
499 iodb->recordTraceMarkersIfNecessary(ss); | 403 iodb->recordTraceMarkersIfNecessary(ss); |
500 } | 404 } |
501 return true; | 405 return true; |
502 } | 406 } |
503 | 407 |
OLD | NEW |