Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1160)

Side by Side Diff: src/gpu/GrTargetCommands.cpp

Issue 1119353002: Revert of Move state management to GrInOrderDrawBuffer (Closed) Base URL: https://skia.googlesource.com/skia.git@batchownsbounds
Patch Set: Created 5 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/gpu/GrTargetCommands.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright 2015 Google Inc. 2 * Copyright 2015 Google Inc.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license that can be 4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file. 5 * found in the LICENSE file.
6 */ 6 */
7 7
8 #include "GrTargetCommands.h" 8 #include "GrTargetCommands.h"
9 9
10 #include "GrColor.h" 10 #include "GrColor.h"
11 #include "GrDefaultGeoProcFactory.h" 11 #include "GrDefaultGeoProcFactory.h"
12 #include "GrInOrderDrawBuffer.h" 12 #include "GrInOrderDrawBuffer.h"
13 #include "GrTemplates.h" 13 #include "GrTemplates.h"
14 #include "SkPoint.h" 14 #include "SkPoint.h"
15 15
16 static bool path_fill_type_is_winding(const GrStencilSettings& pathStencilSettin gs) { 16 static bool path_fill_type_is_winding(const GrStencilSettings& pathStencilSettin gs) {
17 static const GrStencilSettings::Face pathFace = GrStencilSettings::kFront_Fa ce; 17 static const GrStencilSettings::Face pathFace = GrStencilSettings::kFront_Fa ce;
18 bool isWinding = kInvert_StencilOp != pathStencilSettings.passOp(pathFace); 18 bool isWinding = kInvert_StencilOp != pathStencilSettings.passOp(pathFace);
19 if (isWinding) { 19 if (isWinding) {
20 // Double check that it is in fact winding. 20 // Double check that it is in fact winding.
21 SkASSERT(kIncClamp_StencilOp == pathStencilSettings.passOp(pathFace)); 21 SkASSERT(kIncClamp_StencilOp == pathStencilSettings.passOp(pathFace));
22 SkASSERT(kIncClamp_StencilOp == pathStencilSettings.failOp(pathFace)); 22 SkASSERT(kIncClamp_StencilOp == pathStencilSettings.failOp(pathFace));
23 SkASSERT(0x1 != pathStencilSettings.writeMask(pathFace)); 23 SkASSERT(0x1 != pathStencilSettings.writeMask(pathFace));
24 SkASSERT(!pathStencilSettings.isTwoSided()); 24 SkASSERT(!pathStencilSettings.isTwoSided());
25 } 25 }
26 return isWinding; 26 return isWinding;
27 } 27 }
28 28
29 GrTargetCommands::Cmd* GrTargetCommands::recordDrawBatch(State* state, GrBatch* batch) { 29 GrTargetCommands::Cmd* GrTargetCommands::recordDrawBatch(
30 GrInOrderDrawBuffer* iodb,
31 GrBatch* batch,
32 const GrDrawTarget::PipelineIn fo& pipelineInfo) {
33 if (!this->setupPipelineAndShouldDraw(iodb, batch, pipelineInfo)) {
34 return NULL;
35 }
36
30 // Check if there is a Batch Draw we can batch with 37 // Check if there is a Batch Draw we can batch with
31 if (!fCmdBuffer.empty() && Cmd::kDrawBatch_CmdType == fCmdBuffer.back().type ()) { 38 if (Cmd::kDrawBatch_CmdType == fCmdBuffer.back().type()) {
32 DrawBatch* previous = static_cast<DrawBatch*>(&fCmdBuffer.back()); 39 DrawBatch* previous = static_cast<DrawBatch*>(&fCmdBuffer.back());
33 if (previous->fState == state && previous->fBatch->combineIfPossible(bat ch)) { 40 if (previous->fBatch->combineIfPossible(batch)) {
34 return NULL; 41 return NULL;
35 } 42 }
36 } 43 }
37 44
38 return GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawBatch, (state, batch, &fBatc hTarget)); 45 return GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawBatch, (batch, &fBatchTarget ));
39 } 46 }
40 47
41 GrTargetCommands::Cmd* GrTargetCommands::recordStencilPath( 48 GrTargetCommands::Cmd* GrTargetCommands::recordStencilPath(
49 GrInOrderDrawBuffer* iod b,
42 const GrPipelineBuilder& pipelineBuilder, 50 const GrPipelineBuilder& pipelineBuilder,
43 const GrPathProcessor* p athProc, 51 const GrPathProcessor* p athProc,
44 const GrPath* path, 52 const GrPath* path,
45 const GrScissorState& sc issorState, 53 const GrScissorState& sc issorState,
46 const GrStencilSettings& stencilSettings) { 54 const GrStencilSettings& stencilSettings) {
47 StencilPath* sp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, StencilPath, 55 StencilPath* sp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, StencilPath,
48 (path, pipelineBuilder.getRenderT arget())); 56 (path, pipelineBuilder.getRenderT arget()));
49 57
50 sp->fScissor = scissorState; 58 sp->fScissor = scissorState;
51 sp->fUseHWAA = pipelineBuilder.isHWAntialias(); 59 sp->fUseHWAA = pipelineBuilder.isHWAntialias();
52 sp->fViewMatrix = pathProc->viewMatrix(); 60 sp->fViewMatrix = pathProc->viewMatrix();
53 sp->fStencil = stencilSettings; 61 sp->fStencil = stencilSettings;
54 return sp; 62 return sp;
55 } 63 }
56 64
57 GrTargetCommands::Cmd* GrTargetCommands::recordDrawPath( 65 GrTargetCommands::Cmd* GrTargetCommands::recordDrawPath(
58 State* state, 66 GrInOrderDrawBuffer* iodb,
59 const GrPathProcessor* pathPro c, 67 const GrPathProcessor* pathPro c,
60 const GrPath* path, 68 const GrPath* path,
61 const GrStencilSettings& stenc ilSettings) { 69 const GrStencilSettings& stenc ilSettings,
62 DrawPath* dp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawPath, (state, path)) ; 70 const GrDrawTarget::PipelineIn fo& pipelineInfo) {
71 // TODO: Only compare the subset of GrPipelineBuilder relevant to path cover ing?
72 if (!this->setupPipelineAndShouldDraw(iodb, pathProc, pipelineInfo)) {
73 return NULL;
74 }
75 DrawPath* dp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawPath, (path));
63 dp->fStencilSettings = stencilSettings; 76 dp->fStencilSettings = stencilSettings;
64 return dp; 77 return dp;
65 } 78 }
66 79
67 GrTargetCommands::Cmd* GrTargetCommands::recordDrawPaths( 80 GrTargetCommands::Cmd* GrTargetCommands::recordDrawPaths(
68 State* state,
69 GrInOrderDrawBuffer* iodb, 81 GrInOrderDrawBuffer* iodb,
70 const GrPathProcessor* pathPro c, 82 const GrPathProcessor* pathPro c,
71 const GrPathRange* pathRange, 83 const GrPathRange* pathRange,
72 const void* indexValues, 84 const void* indexValues,
73 GrDrawTarget::PathIndexType in dexType, 85 GrDrawTarget::PathIndexType in dexType,
74 const float transformValues[], 86 const float transformValues[],
75 GrDrawTarget::PathTransformTyp e transformType, 87 GrDrawTarget::PathTransformTyp e transformType,
76 int count, 88 int count,
77 const GrStencilSettings& stenc ilSettings, 89 const GrStencilSettings& stenc ilSettings,
78 const GrDrawTarget::PipelineIn fo& pipelineInfo) { 90 const GrDrawTarget::PipelineIn fo& pipelineInfo) {
79 SkASSERT(pathRange); 91 SkASSERT(pathRange);
80 SkASSERT(indexValues); 92 SkASSERT(indexValues);
81 SkASSERT(transformValues); 93 SkASSERT(transformValues);
82 94
95 if (!this->setupPipelineAndShouldDraw(iodb, pathProc, pipelineInfo)) {
96 return NULL;
97 }
98
83 char* savedIndices; 99 char* savedIndices;
84 float* savedTransforms; 100 float* savedTransforms;
85 101
86 iodb->appendIndicesAndTransforms(indexValues, indexType, 102 iodb->appendIndicesAndTransforms(indexValues, indexType,
87 transformValues, transformType, 103 transformValues, transformType,
88 count, &savedIndices, &savedTransforms); 104 count, &savedIndices, &savedTransforms);
89 105
90 if (!fCmdBuffer.empty() && Cmd::kDrawPaths_CmdType == fCmdBuffer.back().type ()) { 106 if (Cmd::kDrawPaths_CmdType == fCmdBuffer.back().type()) {
91 // The previous command was also DrawPaths. Try to collapse this call in to the one 107 // The previous command was also DrawPaths. Try to collapse this call in to the one
92 // before. Note that stenciling all the paths at once, then covering, ma y not be 108 // before. Note that stenciling all the paths at once, then covering, ma y not be
93 // equivalent to two separate draw calls if there is overlap. Blending w on't work, 109 // equivalent to two separate draw calls if there is overlap. Blending w on't work,
94 // and the combined calls may also cancel each other's winding numbers i n some 110 // and the combined calls may also cancel each other's winding numbers i n some
95 // places. For now the winding numbers are only an issue if the fill is even/odd, 111 // places. For now the winding numbers are only an issue if the fill is even/odd,
96 // because DrawPaths is currently only used for glyphs, and glyphs in th e same 112 // because DrawPaths is currently only used for glyphs, and glyphs in th e same
97 // font tend to all wind in the same direction. 113 // font tend to all wind in the same direction.
98 DrawPaths* previous = static_cast<DrawPaths*>(&fCmdBuffer.back()); 114 DrawPaths* previous = static_cast<DrawPaths*>(&fCmdBuffer.back());
99 if (pathRange == previous->pathRange() && 115 if (pathRange == previous->pathRange() &&
100 indexType == previous->fIndexType && 116 indexType == previous->fIndexType &&
101 transformType == previous->fTransformType && 117 transformType == previous->fTransformType &&
102 stencilSettings == previous->fStencilSettings && 118 stencilSettings == previous->fStencilSettings &&
103 path_fill_type_is_winding(stencilSettings) && 119 path_fill_type_is_winding(stencilSettings) &&
104 !pipelineInfo.willBlendWithDst(pathProc) && 120 !pipelineInfo.willBlendWithDst(pathProc)) {
105 previous->fState == state) {
106 const int indexBytes = GrPathRange::PathIndexSizeInBytes(indexTy pe); 121 const int indexBytes = GrPathRange::PathIndexSizeInBytes(indexTy pe);
107 const int xformSize = GrPathRendering::PathTransformSize(transfo rmType); 122 const int xformSize = GrPathRendering::PathTransformSize(transfo rmType);
108 if (&previous->fIndices[previous->fCount*indexBytes] == savedInd ices && 123 if (&previous->fIndices[previous->fCount*indexBytes] == savedInd ices &&
109 (0 == xformSize || 124 (0 == xformSize ||
110 &previous->fTransforms[previous->fCount*xformSize] == saved Transforms)) { 125 &previous->fTransforms[previous->fCount*xformSize] == saved Transforms)) {
111 // Fold this DrawPaths call into the one previous. 126 // Fold this DrawPaths call into the one previous.
112 previous->fCount += count; 127 previous->fCount += count;
113 return NULL; 128 return NULL;
114 } 129 }
115 } 130 }
116 } 131 }
117 132
118 DrawPaths* dp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawPaths, (state, path Range)); 133 DrawPaths* dp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawPaths, (pathRange)) ;
119 dp->fIndices = savedIndices; 134 dp->fIndices = savedIndices;
120 dp->fIndexType = indexType; 135 dp->fIndexType = indexType;
121 dp->fTransforms = savedTransforms; 136 dp->fTransforms = savedTransforms;
122 dp->fTransformType = transformType; 137 dp->fTransformType = transformType;
123 dp->fCount = count; 138 dp->fCount = count;
124 dp->fStencilSettings = stencilSettings; 139 dp->fStencilSettings = stencilSettings;
125 return dp; 140 return dp;
126 } 141 }
127 142
128 GrTargetCommands::Cmd* GrTargetCommands::recordClear(const SkIRect* rect, 143 GrTargetCommands::Cmd* GrTargetCommands::recordClear(GrInOrderDrawBuffer* iodb,
144 const SkIRect* rect,
129 GrColor color, 145 GrColor color,
130 bool canIgnoreRect, 146 bool canIgnoreRect,
131 GrRenderTarget* renderTarge t) { 147 GrRenderTarget* renderTarge t) {
132 SkASSERT(renderTarget); 148 SkASSERT(renderTarget);
133 149
134 SkIRect r; 150 SkIRect r;
135 if (NULL == rect) { 151 if (NULL == rect) {
136 // We could do something smart and remove previous draws and clears to 152 // We could do something smart and remove previous draws and clears to
137 // the current render target. If we get that smart we have to make sure 153 // the current render target. If we get that smart we have to make sure
138 // those draws aren't read before this clear (render-to-texture). 154 // those draws aren't read before this clear (render-to-texture).
139 r.setLTRB(0, 0, renderTarget->width(), renderTarget->height()); 155 r.setLTRB(0, 0, renderTarget->width(), renderTarget->height());
140 rect = &r; 156 rect = &r;
141 } 157 }
142 Clear* clr = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Clear, (renderTarget)); 158 Clear* clr = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Clear, (renderTarget));
143 GrColorIsPMAssert(color); 159 GrColorIsPMAssert(color);
144 clr->fColor = color; 160 clr->fColor = color;
145 clr->fRect = *rect; 161 clr->fRect = *rect;
146 clr->fCanIgnoreRect = canIgnoreRect; 162 clr->fCanIgnoreRect = canIgnoreRect;
147 return clr; 163 return clr;
148 } 164 }
149 165
150 GrTargetCommands::Cmd* GrTargetCommands::recordClearStencilClip(const SkIRect& r ect, 166 GrTargetCommands::Cmd* GrTargetCommands::recordClearStencilClip(GrInOrderDrawBuf fer* iodb,
167 const SkIRect& r ect,
151 bool insideClip, 168 bool insideClip,
152 GrRenderTarget* renderTarget) { 169 GrRenderTarget* renderTarget) {
153 SkASSERT(renderTarget); 170 SkASSERT(renderTarget);
154 171
155 ClearStencilClip* clr = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, ClearStencilCli p, (renderTarget)); 172 ClearStencilClip* clr = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, ClearStencilCli p, (renderTarget));
156 clr->fRect = rect; 173 clr->fRect = rect;
157 clr->fInsideClip = insideClip; 174 clr->fInsideClip = insideClip;
158 return clr; 175 return clr;
159 } 176 }
160 177
161 GrTargetCommands::Cmd* GrTargetCommands::recordDiscard(GrRenderTarget* renderTar get) { 178 GrTargetCommands::Cmd* GrTargetCommands::recordDiscard(GrInOrderDrawBuffer* iodb ,
179 GrRenderTarget* renderTar get) {
162 SkASSERT(renderTarget); 180 SkASSERT(renderTarget);
163 181
164 Clear* clr = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Clear, (renderTarget)); 182 Clear* clr = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Clear, (renderTarget));
165 clr->fColor = GrColor_ILLEGAL; 183 clr->fColor = GrColor_ILLEGAL;
166 return clr; 184 return clr;
167 } 185 }
168 186
169 void GrTargetCommands::reset() { 187 void GrTargetCommands::reset() {
170 fCmdBuffer.reset(); 188 fCmdBuffer.reset();
189 fPrevState = NULL;
171 } 190 }
172 191
173 void GrTargetCommands::flush(GrInOrderDrawBuffer* iodb) { 192 void GrTargetCommands::flush(GrInOrderDrawBuffer* iodb) {
174 if (fCmdBuffer.empty()) { 193 if (fCmdBuffer.empty()) {
175 return; 194 return;
176 } 195 }
177 196
197 // Updated every time we find a set state cmd to reflect the current state i n the playback
198 // stream.
199 SetState* currentState = NULL;
200
178 GrGpu* gpu = iodb->getGpu(); 201 GrGpu* gpu = iodb->getGpu();
179 202
180 // Loop over all batches and generate geometry 203 // Loop over all batches and generate geometry
181 CmdBuffer::Iter genIter(fCmdBuffer); 204 CmdBuffer::Iter genIter(fCmdBuffer);
182 while (genIter.next()) { 205 while (genIter.next()) {
183 if (Cmd::kDrawBatch_CmdType == genIter->type()) { 206 if (Cmd::kDrawBatch_CmdType == genIter->type()) {
184 DrawBatch* db = reinterpret_cast<DrawBatch*>(genIter.get()); 207 DrawBatch* db = reinterpret_cast<DrawBatch*>(genIter.get());
185 fBatchTarget.resetNumberOfDraws(); 208 fBatchTarget.resetNumberOfDraws();
186 db->fBatch->generateGeometry(&fBatchTarget, db->fState->getPipeline( )); 209 db->execute(NULL, currentState);
187 db->fBatch->setNumberOfDraws(fBatchTarget.numberOfDraws()); 210 db->fBatch->setNumberOfDraws(fBatchTarget.numberOfDraws());
211 } else if (Cmd::kSetState_CmdType == genIter->type()) {
212 SetState* ss = reinterpret_cast<SetState*>(genIter.get());
213
214 ss->execute(gpu, currentState);
215 currentState = ss;
188 } 216 }
189 } 217 }
190 218
191 iodb->getVertexAllocPool()->unmap(); 219 iodb->getVertexAllocPool()->unmap();
192 iodb->getIndexAllocPool()->unmap(); 220 iodb->getIndexAllocPool()->unmap();
193 fBatchTarget.preFlush(); 221 fBatchTarget.preFlush();
194 222
195 CmdBuffer::Iter iter(fCmdBuffer); 223 CmdBuffer::Iter iter(fCmdBuffer);
196 224
197 while (iter.next()) { 225 while (iter.next()) {
198 GrGpuTraceMarker newMarker("", -1); 226 GrGpuTraceMarker newMarker("", -1);
199 SkString traceString; 227 SkString traceString;
200 if (iter->isTraced()) { 228 if (iter->isTraced()) {
201 traceString = iodb->getCmdString(iter->markerID()); 229 traceString = iodb->getCmdString(iter->markerID());
202 newMarker.fMarker = traceString.c_str(); 230 newMarker.fMarker = traceString.c_str();
203 gpu->addGpuTraceMarker(&newMarker); 231 gpu->addGpuTraceMarker(&newMarker);
204 } 232 }
205 233
206 iter->execute(gpu); 234 if (Cmd::kDrawBatch_CmdType == iter->type()) {
235 DrawBatch* db = reinterpret_cast<DrawBatch*>(iter.get());
236 fBatchTarget.flushNext(db->fBatch->numberOfDraws());
237
238 if (iter->isTraced()) {
239 gpu->removeGpuTraceMarker(&newMarker);
240 }
241 continue;
242 }
243
244 if (Cmd::kSetState_CmdType == iter->type()) {
245 // TODO this is just until NVPR is in batch
246 SetState* ss = reinterpret_cast<SetState*>(iter.get());
247
248 if (ss->fPrimitiveProcessor) {
249 ss->execute(gpu, currentState);
250 }
251 currentState = ss;
252
253 } else {
254 iter->execute(gpu, currentState);
255 }
256
207 if (iter->isTraced()) { 257 if (iter->isTraced()) {
208 gpu->removeGpuTraceMarker(&newMarker); 258 gpu->removeGpuTraceMarker(&newMarker);
209 } 259 }
210 } 260 }
211 261
212 fBatchTarget.postFlush(); 262 fBatchTarget.postFlush();
213 } 263 }
214 264
215 void GrTargetCommands::StencilPath::execute(GrGpu* gpu) { 265 void GrTargetCommands::StencilPath::execute(GrGpu* gpu, const SetState*) {
216 GrGpu::StencilPathState state; 266 GrGpu::StencilPathState state;
217 state.fRenderTarget = fRenderTarget.get(); 267 state.fRenderTarget = fRenderTarget.get();
218 state.fScissor = &fScissor; 268 state.fScissor = &fScissor;
219 state.fStencil = &fStencil; 269 state.fStencil = &fStencil;
220 state.fUseHWAA = fUseHWAA; 270 state.fUseHWAA = fUseHWAA;
221 state.fViewMatrix = &fViewMatrix; 271 state.fViewMatrix = &fViewMatrix;
222 272
223 gpu->stencilPath(this->path(), state); 273 gpu->stencilPath(this->path(), state);
224 } 274 }
225 275
226 void GrTargetCommands::DrawPath::execute(GrGpu* gpu) { 276 void GrTargetCommands::DrawPath::execute(GrGpu* gpu, const SetState* state) {
227 if (!fState->fCompiled) { 277 SkASSERT(state);
228 gpu->buildProgramDesc(&fState->fDesc, *fState->fPrimitiveProcessor, *fSt ate->getPipeline(), 278 DrawArgs args(state->fPrimitiveProcessor.get(), state->getPipeline(), &state ->fDesc,
229 fState->fBatchTracker); 279 &state->fBatchTracker);
230 fState->fCompiled = true;
231 }
232 DrawArgs args(fState->fPrimitiveProcessor.get(), fState->getPipeline(),
233 &fState->fDesc, &fState->fBatchTracker);
234 gpu->drawPath(args, this->path(), fStencilSettings); 280 gpu->drawPath(args, this->path(), fStencilSettings);
235 } 281 }
236 282
237 void GrTargetCommands::DrawPaths::execute(GrGpu* gpu) { 283 void GrTargetCommands::DrawPaths::execute(GrGpu* gpu, const SetState* state) {
238 if (!fState->fCompiled) { 284 SkASSERT(state);
239 gpu->buildProgramDesc(&fState->fDesc, *fState->fPrimitiveProcessor, *fSt ate->getPipeline(), 285 DrawArgs args(state->fPrimitiveProcessor.get(), state->getPipeline(), &state ->fDesc,
240 fState->fBatchTracker); 286 &state->fBatchTracker);
241 fState->fCompiled = true;
242 }
243 DrawArgs args(fState->fPrimitiveProcessor.get(), fState->getPipeline(),
244 &fState->fDesc, &fState->fBatchTracker);
245 gpu->drawPaths(args, this->pathRange(), 287 gpu->drawPaths(args, this->pathRange(),
246 fIndices, fIndexType, 288 fIndices, fIndexType,
247 fTransforms, fTransformType, 289 fTransforms, fTransformType,
248 fCount, fStencilSettings); 290 fCount, fStencilSettings);
249 } 291 }
250 292
251 void GrTargetCommands::DrawBatch::execute(GrGpu*) { 293 void GrTargetCommands::DrawBatch::execute(GrGpu*, const SetState* state) {
252 fBatchTarget->flushNext(fBatch->numberOfDraws()); 294 SkASSERT(state);
295 fBatch->generateGeometry(fBatchTarget, state->getPipeline());
253 } 296 }
254 297
255 void GrTargetCommands::Clear::execute(GrGpu* gpu) { 298 void GrTargetCommands::SetState::execute(GrGpu* gpu, const SetState*) {
299 // TODO sometimes we have a prim proc, othertimes we have a GrBatch. Eventu ally we
300 // will only have GrBatch and we can delete this
301 if (fPrimitiveProcessor) {
302 gpu->buildProgramDesc(&fDesc, *fPrimitiveProcessor, *getPipeline(), fBat chTracker);
303 }
304 }
305
306 void GrTargetCommands::Clear::execute(GrGpu* gpu, const SetState*) {
256 if (GrColor_ILLEGAL == fColor) { 307 if (GrColor_ILLEGAL == fColor) {
257 gpu->discard(this->renderTarget()); 308 gpu->discard(this->renderTarget());
258 } else { 309 } else {
259 gpu->clear(&fRect, fColor, fCanIgnoreRect, this->renderTarget()); 310 gpu->clear(&fRect, fColor, fCanIgnoreRect, this->renderTarget());
260 } 311 }
261 } 312 }
262 313
263 void GrTargetCommands::ClearStencilClip::execute(GrGpu* gpu) { 314 void GrTargetCommands::ClearStencilClip::execute(GrGpu* gpu, const SetState*) {
264 gpu->clearStencilClip(fRect, fInsideClip, this->renderTarget()); 315 gpu->clearStencilClip(fRect, fInsideClip, this->renderTarget());
265 } 316 }
266 317
267 void GrTargetCommands::CopySurface::execute(GrGpu* gpu) { 318 void GrTargetCommands::CopySurface::execute(GrGpu* gpu, const SetState*) {
268 gpu->copySurface(this->dst(), this->src(), fSrcRect, fDstPoint); 319 gpu->copySurface(this->dst(), this->src(), fSrcRect, fDstPoint);
269 } 320 }
270 321
271 void GrTargetCommands::XferBarrier::execute(GrGpu* gpu) { 322 void GrTargetCommands::XferBarrier::execute(GrGpu* gpu, const SetState* state) {
272 gpu->xferBarrier(fBarrierType); 323 gpu->xferBarrier(fBarrierType);
273 } 324 }
274 325
275 GrTargetCommands::Cmd* GrTargetCommands::recordCopySurface(GrSurface* dst, 326 GrTargetCommands::Cmd* GrTargetCommands::recordCopySurface(GrSurface* dst,
276 GrSurface* src, 327 GrSurface* src,
277 const SkIRect& srcRec t, 328 const SkIRect& srcRec t,
278 const SkIPoint& dstPo int) { 329 const SkIPoint& dstPo int) {
279 CopySurface* cs = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, CopySurface, (dst, sr c)); 330 CopySurface* cs = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, CopySurface, (dst, sr c));
280 cs->fSrcRect = srcRect; 331 cs->fSrcRect = srcRect;
281 cs->fDstPoint = dstPoint; 332 cs->fDstPoint = dstPoint;
282 return cs; 333 return cs;
283 } 334 }
284 335
285 void GrTargetCommands::recordXferBarrierIfNecessary(const GrPipeline& pipeline, 336 bool GrTargetCommands::setupPipelineAndShouldDraw(GrInOrderDrawBuffer* iodb,
286 GrInOrderDrawBuffer* iodb) { 337 const GrPrimitiveProcessor* pr imProc,
287 const GrXferProcessor& xp = *pipeline.getXferProcessor(); 338 const GrDrawTarget::PipelineIn fo& pipelineInfo) {
288 GrRenderTarget* rt = pipeline.getRenderTarget(); 339 SetState* ss = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, SetState, (primProc));
340 iodb->setupPipeline(pipelineInfo, ss->pipelineLocation());
341
342 if (ss->getPipeline()->mustSkip()) {
343 fCmdBuffer.pop_back();
344 return false;
345 }
346
347 ss->fPrimitiveProcessor->initBatchTracker(&ss->fBatchTracker,
348 ss->getPipeline()->getInitBatchTra cker());
349
350 if (fPrevState && fPrevState->fPrimitiveProcessor.get() &&
351 fPrevState->fPrimitiveProcessor->canMakeEqual(fPrevState->fBatchTracker,
352 *ss->fPrimitiveProcessor,
353 ss->fBatchTracker) &&
354 fPrevState->getPipeline()->isEqual(*ss->getPipeline())) {
355 fCmdBuffer.pop_back();
356 } else {
357 fPrevState = ss;
358 iodb->recordTraceMarkersIfNecessary(ss);
359 }
360
361 this->recordXferBarrierIfNecessary(iodb, pipelineInfo);
362 return true;
363 }
364
365 bool GrTargetCommands::setupPipelineAndShouldDraw(GrInOrderDrawBuffer* iodb,
366 GrBatch* batch,
367 const GrDrawTarget::PipelineIn fo& pipelineInfo) {
368 SetState* ss = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, SetState, ());
369 iodb->setupPipeline(pipelineInfo, ss->pipelineLocation());
370
371 if (ss->getPipeline()->mustSkip()) {
372 fCmdBuffer.pop_back();
373 return false;
374 }
375
376 batch->initBatchTracker(ss->getPipeline()->getInitBatchTracker());
377
378 if (fPrevState && !fPrevState->fPrimitiveProcessor.get() &&
379 fPrevState->getPipeline()->isEqual(*ss->getPipeline())) {
380 fCmdBuffer.pop_back();
381 } else {
382 fPrevState = ss;
383 iodb->recordTraceMarkersIfNecessary(ss);
384 }
385
386 this->recordXferBarrierIfNecessary(iodb, pipelineInfo);
387 return true;
388 }
389
390 void GrTargetCommands::recordXferBarrierIfNecessary(GrInOrderDrawBuffer* iodb,
391 const GrDrawTarget::Pipeline Info& info) {
392 SkASSERT(fPrevState);
393 const GrXferProcessor& xp = *fPrevState->getXferProcessor();
394 GrRenderTarget* rt = fPrevState->getRenderTarget();
289 395
290 GrXferBarrierType barrierType; 396 GrXferBarrierType barrierType;
291 if (!xp.willNeedXferBarrier(rt, *iodb->caps(), &barrierType)) { 397 if (!xp.willNeedXferBarrier(rt, *iodb->caps(), &barrierType)) {
292 return; 398 return;
293 } 399 }
294 400
295 XferBarrier* xb = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, XferBarrier, ()); 401 XferBarrier* xb = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, XferBarrier, ());
296 xb->fBarrierType = barrierType; 402 xb->fBarrierType = barrierType;
297 403
298 iodb->recordTraceMarkersIfNecessary(xb); 404 iodb->recordTraceMarkersIfNecessary(xb);
299 } 405 }
300 406
OLDNEW
« no previous file with comments | « src/gpu/GrTargetCommands.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698