Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(267)

Side by Side Diff: src/gpu/GrInOrderDrawBuffer.cpp

Issue 654863003: Revert of Create a single command buffer for GrInOrderDrawBuffer (Closed) Base URL: https://skia.googlesource.com/skia.git@master
Patch Set: Created 6 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/gpu/GrInOrderDrawBuffer.h ('k') | src/gpu/GrTRecorder.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright 2011 Google Inc. 2 * Copyright 2011 Google Inc.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license that can be 4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file. 5 * found in the LICENSE file.
6 */ 6 */
7 7
8 #include "GrInOrderDrawBuffer.h" 8 #include "GrInOrderDrawBuffer.h"
9 9
10 #include "GrBufferAllocPool.h" 10 #include "GrBufferAllocPool.h"
11 #include "GrDrawTargetCaps.h" 11 #include "GrDrawTargetCaps.h"
12 #include "GrTextStrike.h" 12 #include "GrTextStrike.h"
13 #include "GrGpu.h" 13 #include "GrGpu.h"
14 #include "GrTemplates.h" 14 #include "GrTemplates.h"
15 #include "GrTexture.h" 15 #include "GrTexture.h"
16 16
17 GrInOrderDrawBuffer::GrInOrderDrawBuffer(GrGpu* gpu, 17 GrInOrderDrawBuffer::GrInOrderDrawBuffer(GrGpu* gpu,
18 GrVertexBufferAllocPool* vertexPool, 18 GrVertexBufferAllocPool* vertexPool,
19 GrIndexBufferAllocPool* indexPool) 19 GrIndexBufferAllocPool* indexPool)
20 : GrDrawTarget(gpu->getContext()) 20 : GrDrawTarget(gpu->getContext())
21 , fCmdBuffer(kCmdBufferInitialSizeInBytes)
22 , fLastState(NULL)
23 , fLastClip(NULL)
24 , fDstGpu(gpu) 21 , fDstGpu(gpu)
25 , fClipSet(true) 22 , fClipSet(true)
26 , fClipProxyState(kUnknown_ClipProxyState) 23 , fClipProxyState(kUnknown_ClipProxyState)
27 , fVertexPool(*vertexPool) 24 , fVertexPool(*vertexPool)
28 , fIndexPool(*indexPool) 25 , fIndexPool(*indexPool)
29 , fFlushing(false) 26 , fFlushing(false)
30 , fDrawID(0) { 27 , fDrawID(0) {
31 28
32 fDstGpu->ref(); 29 fDstGpu->ref();
33 fCaps.reset(SkRef(fDstGpu->caps())); 30 fCaps.reset(SkRef(fDstGpu->caps()));
(...skipping 178 matching lines...) Expand 10 before | Expand all | Expand 10 after
212 return fClipProxy.contains(devBounds); 209 return fClipProxy.contains(devBounds);
213 } 210 }
214 SkPoint originOffset = {SkIntToScalar(this->getClip()->fOrigin.fX), 211 SkPoint originOffset = {SkIntToScalar(this->getClip()->fOrigin.fX),
215 SkIntToScalar(this->getClip()->fOrigin.fY)}; 212 SkIntToScalar(this->getClip()->fOrigin.fY)};
216 SkRect clipSpaceBounds = devBounds; 213 SkRect clipSpaceBounds = devBounds;
217 clipSpaceBounds.offset(originOffset); 214 clipSpaceBounds.offset(originOffset);
218 return this->getClip()->fClipStack->quickContains(clipSpaceBounds); 215 return this->getClip()->fClipStack->quickContains(clipSpaceBounds);
219 } 216 }
220 217
221 int GrInOrderDrawBuffer::concatInstancedDraw(const DrawInfo& info) { 218 int GrInOrderDrawBuffer::concatInstancedDraw(const DrawInfo& info) {
222 SkASSERT(!fCmdBuffer.empty());
223 SkASSERT(info.isInstanced()); 219 SkASSERT(info.isInstanced());
224 220
225 const GeometrySrcState& geomSrc = this->getGeomSrc(); 221 const GeometrySrcState& geomSrc = this->getGeomSrc();
226 const GrDrawState& drawState = this->getDrawState(); 222 const GrDrawState& drawState = this->getDrawState();
227 223
228 // we only attempt to concat the case when reserved verts are used with a cl ient-specified index 224 // we only attempt to concat the case when reserved verts are used with a cl ient-specified index
229 // buffer. To make this work with client-specified VBs we'd need to know if the VB was updated 225 // buffer. To make this work with client-specified VBs we'd need to know if the VB was updated
230 // between draws. 226 // between draws.
231 if (kReserved_GeometrySrcType != geomSrc.fVertexSrc || 227 if (kReserved_GeometrySrcType != geomSrc.fVertexSrc ||
232 kBuffer_GeometrySrcType != geomSrc.fIndexSrc) { 228 kBuffer_GeometrySrcType != geomSrc.fIndexSrc) {
233 return 0; 229 return 0;
234 } 230 }
235 // Check if there is a draw info that is compatible that uses the same VB fr om the pool and 231 // Check if there is a draw info that is compatible that uses the same VB fr om the pool and
236 // the same IB 232 // the same IB
237 if (kDraw_Cmd != strip_trace_bit(fCmdBuffer.back().fType)) { 233 if (kDraw_Cmd != strip_trace_bit(fCmds.back())) {
238 return 0; 234 return 0;
239 } 235 }
240 236
241 Draw* draw = static_cast<Draw*>(&fCmdBuffer.back()); 237 Draw* draw = &fDraws.back();
242 GeometryPoolState& poolState = fGeoPoolStateStack.back(); 238 GeometryPoolState& poolState = fGeoPoolStateStack.back();
243 const GrVertexBuffer* vertexBuffer = poolState.fPoolVertexBuffer; 239 const GrVertexBuffer* vertexBuffer = poolState.fPoolVertexBuffer;
244 240
245 if (!draw->fInfo.isInstanced() || 241 if (!draw->isInstanced() ||
246 draw->fInfo.verticesPerInstance() != info.verticesPerInstance() || 242 draw->verticesPerInstance() != info.verticesPerInstance() ||
247 draw->fInfo.indicesPerInstance() != info.indicesPerInstance() || 243 draw->indicesPerInstance() != info.indicesPerInstance() ||
248 draw->vertexBuffer() != vertexBuffer || 244 draw->vertexBuffer() != vertexBuffer ||
249 draw->indexBuffer() != geomSrc.fIndexBuffer) { 245 draw->indexBuffer() != geomSrc.fIndexBuffer) {
250 return 0; 246 return 0;
251 } 247 }
252 // info does not yet account for the offset from the start of the pool's VB while the previous 248 // info does not yet account for the offset from the start of the pool's VB while the previous
253 // draw record does. 249 // draw record does.
254 int adjustedStartVertex = poolState.fPoolStartVertex + info.startVertex(); 250 int adjustedStartVertex = poolState.fPoolStartVertex + info.startVertex();
255 if (draw->fInfo.startVertex() + draw->fInfo.vertexCount() != adjustedStartVe rtex) { 251 if (draw->startVertex() + draw->vertexCount() != adjustedStartVertex) {
256 return 0; 252 return 0;
257 } 253 }
258 254
259 SkASSERT(poolState.fPoolStartVertex == draw->fInfo.startVertex() + draw->fIn fo.vertexCount()); 255 SkASSERT(poolState.fPoolStartVertex == draw->startVertex() + draw->vertexCou nt());
260 256
261 // how many instances can be concat'ed onto draw given the size of the index buffer 257 // how many instances can be concat'ed onto draw given the size of the index buffer
262 int instancesToConcat = this->indexCountInCurrentSource() / info.indicesPerI nstance(); 258 int instancesToConcat = this->indexCountInCurrentSource() / info.indicesPerI nstance();
263 instancesToConcat -= draw->fInfo.instanceCount(); 259 instancesToConcat -= draw->instanceCount();
264 instancesToConcat = SkTMin(instancesToConcat, info.instanceCount()); 260 instancesToConcat = SkTMin(instancesToConcat, info.instanceCount());
265 261
266 // update the amount of reserved vertex data actually referenced in draws 262 // update the amount of reserved vertex data actually referenced in draws
267 size_t vertexBytes = instancesToConcat * info.verticesPerInstance() * 263 size_t vertexBytes = instancesToConcat * info.verticesPerInstance() *
268 drawState.getVertexStride(); 264 drawState.getVertexStride();
269 poolState.fUsedPoolVertexBytes = SkTMax(poolState.fUsedPoolVertexBytes, vert exBytes); 265 poolState.fUsedPoolVertexBytes = SkTMax(poolState.fUsedPoolVertexBytes, vert exBytes);
270 266
271 draw->fInfo.adjustInstanceCount(instancesToConcat); 267 draw->adjustInstanceCount(instancesToConcat);
272 268
273 // update last fGpuCmdMarkers to include any additional trace markers that h ave been added 269 // update last fGpuCmdMarkers to include any additional trace markers that h ave been added
274 if (this->getActiveTraceMarkers().count() > 0) { 270 if (this->getActiveTraceMarkers().count() > 0) {
275 if (cmd_has_trace_marker(draw->fType)) { 271 if (cmd_has_trace_marker(fCmds.back())) {
276 fGpuCmdMarkers.back().addSet(this->getActiveTraceMarkers()); 272 fGpuCmdMarkers.back().addSet(this->getActiveTraceMarkers());
277 } else { 273 } else {
278 fGpuCmdMarkers.push_back(this->getActiveTraceMarkers()); 274 fGpuCmdMarkers.push_back(this->getActiveTraceMarkers());
279 draw->fType = add_trace_bit(draw->fType); 275 fCmds.back() = add_trace_bit(fCmds.back());
280 } 276 }
281 } 277 }
282 278
283 return instancesToConcat; 279 return instancesToConcat;
284 } 280 }
285 281
286 class AutoClipReenable { 282 class AutoClipReenable {
287 public: 283 public:
288 AutoClipReenable() : fDrawState(NULL) {} 284 AutoClipReenable() : fDrawState(NULL) {}
289 ~AutoClipReenable() { 285 ~AutoClipReenable() {
(...skipping 16 matching lines...) Expand all
306 GeometryPoolState& poolState = fGeoPoolStateStack.back(); 302 GeometryPoolState& poolState = fGeoPoolStateStack.back();
307 const GrDrawState& drawState = this->getDrawState(); 303 const GrDrawState& drawState = this->getDrawState();
308 AutoClipReenable acr; 304 AutoClipReenable acr;
309 305
310 if (drawState.isClipState() && 306 if (drawState.isClipState() &&
311 info.getDevBounds() && 307 info.getDevBounds() &&
312 this->quickInsideClip(*info.getDevBounds())) { 308 this->quickInsideClip(*info.getDevBounds())) {
313 acr.set(this->drawState()); 309 acr.set(this->drawState());
314 } 310 }
315 311
316 this->recordClipIfNecessary(); 312 if (this->needsNewClip()) {
313 this->recordClip();
314 }
317 this->recordStateIfNecessary(); 315 this->recordStateIfNecessary();
318 316
319 const GrVertexBuffer* vb; 317 const GrVertexBuffer* vb;
320 if (kBuffer_GeometrySrcType == this->getGeomSrc().fVertexSrc) { 318 if (kBuffer_GeometrySrcType == this->getGeomSrc().fVertexSrc) {
321 vb = this->getGeomSrc().fVertexBuffer; 319 vb = this->getGeomSrc().fVertexBuffer;
322 } else { 320 } else {
323 vb = poolState.fPoolVertexBuffer; 321 vb = poolState.fPoolVertexBuffer;
324 } 322 }
325 323
326 const GrIndexBuffer* ib = NULL; 324 const GrIndexBuffer* ib = NULL;
327 if (info.isIndexed()) { 325 if (info.isIndexed()) {
328 if (kBuffer_GeometrySrcType == this->getGeomSrc().fIndexSrc) { 326 if (kBuffer_GeometrySrcType == this->getGeomSrc().fIndexSrc) {
329 ib = this->getGeomSrc().fIndexBuffer; 327 ib = this->getGeomSrc().fIndexBuffer;
330 } else { 328 } else {
331 ib = poolState.fPoolIndexBuffer; 329 ib = poolState.fPoolIndexBuffer;
332 } 330 }
333 } 331 }
334 332
335 Draw* draw; 333 Draw* draw;
336 if (info.isInstanced()) { 334 if (info.isInstanced()) {
337 int instancesConcated = this->concatInstancedDraw(info); 335 int instancesConcated = this->concatInstancedDraw(info);
338 if (info.instanceCount() > instancesConcated) { 336 if (info.instanceCount() > instancesConcated) {
339 draw = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Draw, (info, vb, ib)); 337 draw = this->recordDraw(info, vb, ib);
340 draw->fInfo.adjustInstanceCount(-instancesConcated); 338 draw->adjustInstanceCount(-instancesConcated);
341 } else { 339 } else {
342 return; 340 return;
343 } 341 }
344 } else { 342 } else {
345 draw = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Draw, (info, vb, ib)); 343 draw = this->recordDraw(info, vb, ib);
346 } 344 }
347 this->recordTraceMarkersIfNecessary();
348 345
349 // Adjust the starting vertex and index when we are using reserved or array sources to 346 // Adjust the starting vertex and index when we are using reserved or array sources to
350 // compensate for the fact that the data was inserted into a larger vb/ib ow ned by the pool. 347 // compensate for the fact that the data was inserted into a larger vb/ib ow ned by the pool.
351 if (kBuffer_GeometrySrcType != this->getGeomSrc().fVertexSrc) { 348 if (kBuffer_GeometrySrcType != this->getGeomSrc().fVertexSrc) {
352 size_t bytes = (info.vertexCount() + info.startVertex()) * drawState.get VertexStride(); 349 size_t bytes = (info.vertexCount() + info.startVertex()) * drawState.get VertexStride();
353 poolState.fUsedPoolVertexBytes = SkTMax(poolState.fUsedPoolVertexBytes, bytes); 350 poolState.fUsedPoolVertexBytes = SkTMax(poolState.fUsedPoolVertexBytes, bytes);
354 draw->fInfo.adjustStartVertex(poolState.fPoolStartVertex); 351 draw->adjustStartVertex(poolState.fPoolStartVertex);
355 } 352 }
356 353
357 if (info.isIndexed() && kBuffer_GeometrySrcType != this->getGeomSrc().fIndex Src) { 354 if (info.isIndexed() && kBuffer_GeometrySrcType != this->getGeomSrc().fIndex Src) {
358 size_t bytes = (info.indexCount() + info.startIndex()) * sizeof(uint16_t ); 355 size_t bytes = (info.indexCount() + info.startIndex()) * sizeof(uint16_t );
359 poolState.fUsedPoolIndexBytes = SkTMax(poolState.fUsedPoolIndexBytes, by tes); 356 poolState.fUsedPoolIndexBytes = SkTMax(poolState.fUsedPoolIndexBytes, by tes);
360 draw->fInfo.adjustStartIndex(poolState.fPoolStartIndex); 357 draw->adjustStartIndex(poolState.fPoolStartIndex);
361 } 358 }
362 } 359 }
363 360
364 void GrInOrderDrawBuffer::onStencilPath(const GrPath* path, SkPath::FillType fil l) { 361 void GrInOrderDrawBuffer::onStencilPath(const GrPath* path, SkPath::FillType fil l) {
365 this->recordClipIfNecessary(); 362 if (this->needsNewClip()) {
363 this->recordClip();
364 }
366 // Only compare the subset of GrDrawState relevant to path stenciling? 365 // Only compare the subset of GrDrawState relevant to path stenciling?
367 this->recordStateIfNecessary(); 366 this->recordStateIfNecessary();
368 StencilPath* sp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, StencilPath, (path)); 367 StencilPath* sp = this->recordStencilPath(path);
369 sp->fFill = fill; 368 sp->fFill = fill;
370 this->recordTraceMarkersIfNecessary();
371 } 369 }
372 370
373 void GrInOrderDrawBuffer::onDrawPath(const GrPath* path, 371 void GrInOrderDrawBuffer::onDrawPath(const GrPath* path,
374 SkPath::FillType fill, const GrDeviceCoordT exture* dstCopy) { 372 SkPath::FillType fill, const GrDeviceCoordT exture* dstCopy) {
375 this->recordClipIfNecessary(); 373 if (this->needsNewClip()) {
374 this->recordClip();
375 }
376 // TODO: Only compare the subset of GrDrawState relevant to path covering? 376 // TODO: Only compare the subset of GrDrawState relevant to path covering?
377 this->recordStateIfNecessary(); 377 this->recordStateIfNecessary();
378 DrawPath* dp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawPath, (path)); 378 DrawPath* cp = this->recordDrawPath(path);
379 dp->fFill = fill; 379 cp->fFill = fill;
380 if (dstCopy) { 380 if (dstCopy) {
381 dp->fDstCopy = *dstCopy; 381 cp->fDstCopy = *dstCopy;
382 } 382 }
383 this->recordTraceMarkersIfNecessary();
384 } 383 }
385 384
386 void GrInOrderDrawBuffer::onDrawPaths(const GrPathRange* pathRange, 385 void GrInOrderDrawBuffer::onDrawPaths(const GrPathRange* pathRange,
387 const uint32_t indices[], int count, 386 const uint32_t indices[], int count,
388 const float transforms[], PathTransformTyp e transformsType, 387 const float transforms[], PathTransformTyp e transformsType,
389 SkPath::FillType fill, const GrDeviceCoord Texture* dstCopy) { 388 SkPath::FillType fill, const GrDeviceCoord Texture* dstCopy) {
390 SkASSERT(pathRange); 389 SkASSERT(pathRange);
391 SkASSERT(indices); 390 SkASSERT(indices);
392 SkASSERT(transforms); 391 SkASSERT(transforms);
393 392
394 this->recordClipIfNecessary(); 393 if (this->needsNewClip()) {
394 this->recordClip();
395 }
395 this->recordStateIfNecessary(); 396 this->recordStateIfNecessary();
397 DrawPaths* dp = this->recordDrawPaths(pathRange);
398 dp->fIndices = SkNEW_ARRAY(uint32_t, count); // TODO: Accomplish this withou t a malloc
399 memcpy(dp->fIndices, indices, sizeof(uint32_t) * count);
400 dp->fCount = count;
396 401
397 int sizeOfIndices = sizeof(uint32_t) * count; 402 const int transformsLength = GrPathRendering::PathTransformSize(transformsTy pe) * count;
398 int sizeOfTransforms = sizeof(float) * count * 403 dp->fTransforms = SkNEW_ARRAY(float, transformsLength);
399 GrPathRendering::PathTransformSize(transformsType); 404 memcpy(dp->fTransforms, transforms, sizeof(float) * transformsLength);
405 dp->fTransformsType = transformsType;
400 406
401 DrawPaths* dp = GrNEW_APPEND_WITH_DATA_TO_RECORDER(fCmdBuffer, DrawPaths, (p athRange),
402 sizeOfIndices + sizeOfTra nsforms);
403 memcpy(dp->indices(), indices, sizeOfIndices);
404 dp->fCount = count;
405 memcpy(dp->transforms(), transforms, sizeOfTransforms);
406 dp->fTransformsType = transformsType;
407 dp->fFill = fill; 407 dp->fFill = fill;
408
408 if (dstCopy) { 409 if (dstCopy) {
409 dp->fDstCopy = *dstCopy; 410 dp->fDstCopy = *dstCopy;
410 } 411 }
411
412 this->recordTraceMarkersIfNecessary();
413 } 412 }
414 413
415 void GrInOrderDrawBuffer::clear(const SkIRect* rect, GrColor color, 414 void GrInOrderDrawBuffer::clear(const SkIRect* rect, GrColor color,
416 bool canIgnoreRect, GrRenderTarget* renderTarget ) { 415 bool canIgnoreRect, GrRenderTarget* renderTarget ) {
417 SkIRect r; 416 SkIRect r;
418 if (NULL == renderTarget) { 417 if (NULL == renderTarget) {
419 renderTarget = this->drawState()->getRenderTarget(); 418 renderTarget = this->drawState()->getRenderTarget();
420 SkASSERT(renderTarget); 419 SkASSERT(renderTarget);
421 } 420 }
422 if (NULL == rect) { 421 if (NULL == rect) {
423 // We could do something smart and remove previous draws and clears to 422 // We could do something smart and remove previous draws and clears to
424 // the current render target. If we get that smart we have to make sure 423 // the current render target. If we get that smart we have to make sure
425 // those draws aren't read before this clear (render-to-texture). 424 // those draws aren't read before this clear (render-to-texture).
426 r.setLTRB(0, 0, renderTarget->width(), renderTarget->height()); 425 r.setLTRB(0, 0, renderTarget->width(), renderTarget->height());
427 rect = &r; 426 rect = &r;
428 } 427 }
429 Clear* clr = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Clear, (renderTarget)); 428 Clear* clr = this->recordClear(renderTarget);
430 GrColorIsPMAssert(color); 429 GrColorIsPMAssert(color);
431 clr->fColor = color; 430 clr->fColor = color;
432 clr->fRect = *rect; 431 clr->fRect = *rect;
433 clr->fCanIgnoreRect = canIgnoreRect; 432 clr->fCanIgnoreRect = canIgnoreRect;
434 this->recordTraceMarkersIfNecessary();
435 } 433 }
436 434
437 void GrInOrderDrawBuffer::discard(GrRenderTarget* renderTarget) { 435 void GrInOrderDrawBuffer::discard(GrRenderTarget* renderTarget) {
438 if (!this->caps()->discardRenderTargetSupport()) { 436 if (!this->caps()->discardRenderTargetSupport()) {
439 return; 437 return;
440 } 438 }
441 if (NULL == renderTarget) { 439 if (NULL == renderTarget) {
442 renderTarget = this->drawState()->getRenderTarget(); 440 renderTarget = this->drawState()->getRenderTarget();
443 SkASSERT(renderTarget); 441 SkASSERT(renderTarget);
444 } 442 }
445 Clear* clr = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Clear, (renderTarget)); 443 Clear* clr = this->recordClear(renderTarget);
446 clr->fColor = GrColor_ILLEGAL; 444 clr->fColor = GrColor_ILLEGAL;
447 this->recordTraceMarkersIfNecessary();
448 } 445 }
449 446
450 void GrInOrderDrawBuffer::reset() { 447 void GrInOrderDrawBuffer::reset() {
451 SkASSERT(1 == fGeoPoolStateStack.count()); 448 SkASSERT(1 == fGeoPoolStateStack.count());
452 this->resetVertexSource(); 449 this->resetVertexSource();
453 this->resetIndexSource(); 450 this->resetIndexSource();
454 451
455 fCmdBuffer.reset(); 452 fCmds.reset();
456 fLastState = NULL; 453 fDraws.reset();
457 fLastClip = NULL; 454 fStencilPaths.reset();
455 fDrawPath.reset();
456 fDrawPaths.reset();
457 fStates.reset();
458 fClears.reset();
458 fVertexPool.reset(); 459 fVertexPool.reset();
459 fIndexPool.reset(); 460 fIndexPool.reset();
461 fClips.reset();
462 fCopySurfaces.reset();
460 fGpuCmdMarkers.reset(); 463 fGpuCmdMarkers.reset();
461 fClipSet = true; 464 fClipSet = true;
462 } 465 }
463 466
464 void GrInOrderDrawBuffer::flush() { 467 void GrInOrderDrawBuffer::flush() {
465 if (fFlushing) { 468 if (fFlushing) {
466 return; 469 return;
467 } 470 }
468 471
469 this->getContext()->getFontCache()->updateTextures(); 472 this->getContext()->getFontCache()->updateTextures();
470 473
471 SkASSERT(kReserved_GeometrySrcType != this->getGeomSrc().fVertexSrc); 474 SkASSERT(kReserved_GeometrySrcType != this->getGeomSrc().fVertexSrc);
472 SkASSERT(kReserved_GeometrySrcType != this->getGeomSrc().fIndexSrc); 475 SkASSERT(kReserved_GeometrySrcType != this->getGeomSrc().fIndexSrc);
473 476
474 if (fCmdBuffer.empty()) { 477 int numCmds = fCmds.count();
478 if (0 == numCmds) {
475 return; 479 return;
476 } 480 }
477 481
478 GrAutoTRestore<bool> flushRestore(&fFlushing); 482 GrAutoTRestore<bool> flushRestore(&fFlushing);
479 fFlushing = true; 483 fFlushing = true;
480 484
481 fVertexPool.unmap(); 485 fVertexPool.unmap();
482 fIndexPool.unmap(); 486 fIndexPool.unmap();
483 487
484 GrDrawTarget::AutoClipRestore acr(fDstGpu); 488 GrDrawTarget::AutoClipRestore acr(fDstGpu);
485 AutoGeometryAndStatePush agasp(fDstGpu, kPreserve_ASRInit); 489 AutoGeometryAndStatePush agasp(fDstGpu, kPreserve_ASRInit);
486 490
487 GrDrawState* prevDrawState = SkRef(fDstGpu->drawState()); 491 GrDrawState* prevDrawState = SkRef(fDstGpu->drawState());
488 492
489 CmdBuffer::Iter iter(fCmdBuffer); 493 GrClipData clipData;
490 494
491 int currCmdMarker = 0; 495 StateAllocator::Iter stateIter(&fStates);
496 ClipAllocator::Iter clipIter(&fClips);
497 ClearAllocator::Iter clearIter(&fClears);
498 DrawAllocator::Iter drawIter(&fDraws);
499 StencilPathAllocator::Iter stencilPathIter(&fStencilPaths);
500 DrawPathAllocator::Iter drawPathIter(&fDrawPath);
501 DrawPathsAllocator::Iter drawPathsIter(&fDrawPaths);
502 CopySurfaceAllocator::Iter copySurfaceIter(&fCopySurfaces);
503
504 int currCmdMarker = 0;
505
492 fDstGpu->saveActiveTraceMarkers(); 506 fDstGpu->saveActiveTraceMarkers();
493 507 for (int c = 0; c < numCmds; ++c) {
494 while (iter.next()) {
495 GrGpuTraceMarker newMarker("", -1); 508 GrGpuTraceMarker newMarker("", -1);
496 SkString traceString; 509 SkString traceString;
497 if (cmd_has_trace_marker(iter->fType)) { 510 if (cmd_has_trace_marker(fCmds[c])) {
498 traceString = fGpuCmdMarkers[currCmdMarker].toString(); 511 traceString = fGpuCmdMarkers[currCmdMarker].toString();
499 newMarker.fMarker = traceString.c_str(); 512 newMarker.fMarker = traceString.c_str();
500 fDstGpu->addGpuTraceMarker(&newMarker); 513 fDstGpu->addGpuTraceMarker(&newMarker);
501 ++currCmdMarker; 514 ++currCmdMarker;
502 } 515 }
503 516 switch (strip_trace_bit(fCmds[c])) {
504 SkDEBUGCODE(bool isDraw = kDraw_Cmd == strip_trace_bit(iter->fType) || 517 case kDraw_Cmd: {
505 kStencilPath_Cmd == strip_trace_bit(iter->fTyp e) || 518 SkASSERT(fDstGpu->drawState() != prevDrawState);
506 kDrawPath_Cmd == strip_trace_bit(iter->fType) || 519 SkAssertResult(drawIter.next());
507 kDrawPaths_Cmd == strip_trace_bit(iter->fType) ); 520 fDstGpu->setVertexSourceToBuffer(drawIter->vertexBuffer());
508 SkASSERT(!isDraw || fDstGpu->drawState() != prevDrawState); 521 if (drawIter->isIndexed()) {
509 522 fDstGpu->setIndexSourceToBuffer(drawIter->indexBuffer());
510 iter->execute(fDstGpu); 523 }
511 524 fDstGpu->executeDraw(*drawIter);
512 if (cmd_has_trace_marker(iter->fType)) { 525 break;
526 }
527 case kStencilPath_Cmd: {
528 SkASSERT(fDstGpu->drawState() != prevDrawState);
529 SkAssertResult(stencilPathIter.next());
530 fDstGpu->stencilPath(stencilPathIter->path(), stencilPathIter->f Fill);
531 break;
532 }
533 case kDrawPath_Cmd: {
534 SkASSERT(fDstGpu->drawState() != prevDrawState);
535 SkAssertResult(drawPathIter.next());
536 fDstGpu->executeDrawPath(drawPathIter->path(), drawPathIter->fFi ll,
537 drawPathIter->fDstCopy.texture() ?
538 &drawPathIter->fDstCopy :
539 NULL);
540 break;
541 }
542 case kDrawPaths_Cmd: {
543 SkASSERT(fDstGpu->drawState() != prevDrawState);
544 SkAssertResult(drawPathsIter.next());
545 const GrDeviceCoordTexture* dstCopy =
546 drawPathsIter->fDstCopy.texture() ? &drawPathsIter->fDstCopy : NULL;
547 fDstGpu->executeDrawPaths(drawPathsIter->pathRange(),
548 drawPathsIter->fIndices,
549 drawPathsIter->fCount,
550 drawPathsIter->fTransforms,
551 drawPathsIter->fTransformsType,
552 drawPathsIter->fFill,
553 dstCopy);
554 break;
555 }
556 case kSetState_Cmd:
557 SkAssertResult(stateIter.next());
558 fDstGpu->setDrawState(stateIter.get());
559 break;
560 case kSetClip_Cmd:
561 SkAssertResult(clipIter.next());
562 clipData.fClipStack = &clipIter->fStack;
563 clipData.fOrigin = clipIter->fOrigin;
564 fDstGpu->setClip(&clipData);
565 break;
566 case kClear_Cmd:
567 SkAssertResult(clearIter.next());
568 if (GrColor_ILLEGAL == clearIter->fColor) {
569 fDstGpu->discard(clearIter->renderTarget());
570 } else {
571 fDstGpu->clear(&clearIter->fRect,
572 clearIter->fColor,
573 clearIter->fCanIgnoreRect,
574 clearIter->renderTarget());
575 }
576 break;
577 case kCopySurface_Cmd:
578 SkAssertResult(copySurfaceIter.next());
579 fDstGpu->copySurface(copySurfaceIter->dst(),
580 copySurfaceIter->src(),
581 copySurfaceIter->fSrcRect,
582 copySurfaceIter->fDstPoint);
583 break;
584 }
585 if (cmd_has_trace_marker(fCmds[c])) {
513 fDstGpu->removeGpuTraceMarker(&newMarker); 586 fDstGpu->removeGpuTraceMarker(&newMarker);
514 } 587 }
515 } 588 }
589 fDstGpu->restoreActiveTraceMarkers();
590 // we should have consumed all the states, clips, etc.
591 SkASSERT(!stateIter.next());
592 SkASSERT(!clipIter.next());
593 SkASSERT(!clearIter.next());
594 SkASSERT(!drawIter.next());
595 SkASSERT(!copySurfaceIter.next());
596 SkASSERT(!stencilPathIter.next());
597 SkASSERT(!drawPathIter.next());
598 SkASSERT(!drawPathsIter.next());
516 599
517 fDstGpu->restoreActiveTraceMarkers();
518 SkASSERT(fGpuCmdMarkers.count() == currCmdMarker); 600 SkASSERT(fGpuCmdMarkers.count() == currCmdMarker);
519 601
520 fDstGpu->setDrawState(prevDrawState); 602 fDstGpu->setDrawState(prevDrawState);
521 prevDrawState->unref(); 603 prevDrawState->unref();
522 this->reset(); 604 this->reset();
523 ++fDrawID; 605 ++fDrawID;
524 } 606 }
525 607
526 void GrInOrderDrawBuffer::Draw::execute(GrDrawTarget* gpu) {
527 gpu->setVertexSourceToBuffer(this->vertexBuffer());
528 if (fInfo.isIndexed()) {
529 gpu->setIndexSourceToBuffer(this->indexBuffer());
530 }
531 gpu->executeDraw(fInfo);
532 }
533
534 void GrInOrderDrawBuffer::StencilPath::execute(GrDrawTarget* gpu) {
535 gpu->stencilPath(this->path(), fFill);
536 }
537
538 void GrInOrderDrawBuffer::DrawPath::execute(GrDrawTarget* gpu) {
539 gpu->executeDrawPath(this->path(), fFill, fDstCopy.texture() ? &fDstCopy : N ULL);
540 }
541
542 void GrInOrderDrawBuffer::DrawPaths::execute(GrDrawTarget* gpu) {
543 gpu->executeDrawPaths(this->pathRange(), this->indices(), fCount, this->tran sforms(),
544 fTransformsType, fFill, fDstCopy.texture() ? &fDstCopy : NULL);
545 }
546
547 void GrInOrderDrawBuffer::SetState::execute(GrDrawTarget* gpu) {
548 gpu->setDrawState(&fState);
549 }
550
551 void GrInOrderDrawBuffer::SetClip::execute(GrDrawTarget* gpu) {
552 // Our fClipData is referenced directly, so we must remain alive for the ent ire
553 // duration of the flush (after which the gpu's previous clip is restored).
554 gpu->setClip(&fClipData);
555 }
556
557 void GrInOrderDrawBuffer::Clear::execute(GrDrawTarget* gpu) {
558 if (GrColor_ILLEGAL == fColor) {
559 gpu->discard(this->renderTarget());
560 } else {
561 gpu->clear(&fRect, fColor, fCanIgnoreRect, this->renderTarget());
562 }
563 }
564
565 void GrInOrderDrawBuffer::CopySurface::execute(GrDrawTarget* gpu) {
566 gpu->copySurface(this->dst(), this->src(), fSrcRect, fDstPoint);
567 }
568
569 bool GrInOrderDrawBuffer::onCopySurface(GrSurface* dst, 608 bool GrInOrderDrawBuffer::onCopySurface(GrSurface* dst,
570 GrSurface* src, 609 GrSurface* src,
571 const SkIRect& srcRect, 610 const SkIRect& srcRect,
572 const SkIPoint& dstPoint) { 611 const SkIPoint& dstPoint) {
573 if (fDstGpu->canCopySurface(dst, src, srcRect, dstPoint)) { 612 if (fDstGpu->canCopySurface(dst, src, srcRect, dstPoint)) {
574 CopySurface* cs = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, CopySurface, (dst , src)); 613 CopySurface* cs = this->recordCopySurface(dst, src);
575 cs->fSrcRect = srcRect; 614 cs->fSrcRect = srcRect;
576 cs->fDstPoint = dstPoint; 615 cs->fDstPoint = dstPoint;
577 this->recordTraceMarkersIfNecessary();
578 return true; 616 return true;
579 } else { 617 } else {
580 return false; 618 return false;
581 } 619 }
582 } 620 }
583 621
584 bool GrInOrderDrawBuffer::onCanCopySurface(GrSurface* dst, 622 bool GrInOrderDrawBuffer::onCanCopySurface(GrSurface* dst,
585 GrSurface* src, 623 GrSurface* src,
586 const SkIRect& srcRect, 624 const SkIRect& srcRect,
587 const SkIPoint& dstPoint) { 625 const SkIPoint& dstPoint) {
(...skipping 199 matching lines...) Expand 10 before | Expand all | Expand 10 after
787 poolState.fUsedPoolVertexBytes = restoredState.fVertexSize * restoredSta te.fVertexCount; 825 poolState.fUsedPoolVertexBytes = restoredState.fVertexSize * restoredSta te.fVertexCount;
788 } 826 }
789 if (kReserved_GeometrySrcType == restoredState.fIndexSrc || 827 if (kReserved_GeometrySrcType == restoredState.fIndexSrc ||
790 kArray_GeometrySrcType == restoredState.fIndexSrc) { 828 kArray_GeometrySrcType == restoredState.fIndexSrc) {
791 poolState.fUsedPoolIndexBytes = sizeof(uint16_t) * 829 poolState.fUsedPoolIndexBytes = sizeof(uint16_t) *
792 restoredState.fIndexCount; 830 restoredState.fIndexCount;
793 } 831 }
794 } 832 }
795 833
796 void GrInOrderDrawBuffer::recordStateIfNecessary() { 834 void GrInOrderDrawBuffer::recordStateIfNecessary() {
797 if (!fLastState) { 835 if (fStates.empty()) {
798 SetState* ss = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, SetState, (this->get DrawState())); 836 this->convertDrawStateToPendingExec(&fStates.push_back(this->getDrawStat e()));
799 fLastState = &ss->fState; 837 this->addToCmdBuffer(kSetState_Cmd);
800 this->convertDrawStateToPendingExec(fLastState);
801 this->recordTraceMarkersIfNecessary();
802 return; 838 return;
803 } 839 }
804 const GrDrawState& curr = this->getDrawState(); 840 const GrDrawState& curr = this->getDrawState();
805 switch (GrDrawState::CombineIfPossible(*fLastState, curr, *this->caps())) { 841 GrDrawState& prev = fStates.back();
842 switch (GrDrawState::CombineIfPossible(prev, curr, *this->caps())) {
806 case GrDrawState::kIncompatible_CombinedState: 843 case GrDrawState::kIncompatible_CombinedState:
807 fLastState = &GrNEW_APPEND_TO_RECORDER(fCmdBuffer, SetState, (curr)) ->fState; 844 this->convertDrawStateToPendingExec(&fStates.push_back(curr));
808 this->convertDrawStateToPendingExec(fLastState); 845 this->addToCmdBuffer(kSetState_Cmd);
809 this->recordTraceMarkersIfNecessary();
810 break; 846 break;
811 case GrDrawState::kA_CombinedState: 847 case GrDrawState::kA_CombinedState:
812 case GrDrawState::kAOrB_CombinedState: // Treat the same as kA. 848 case GrDrawState::kAOrB_CombinedState: // Treat the same as kA.
813 break; 849 break;
814 case GrDrawState::kB_CombinedState: 850 case GrDrawState::kB_CombinedState:
815 // prev has already been converted to pending execution. That is a o ne-way ticket. 851 // prev has already been converted to pending execution. That is a o ne-way ticket.
816 // So here we just destruct the previous state and reinit with a new copy of curr. 852 // So here we just delete prev and push back a new copy of curr. Not e that this
817 // Note that this goes away when we move GrIODB over to taking optim ized snapshots 853 // goes away when we move GrIODB over to taking optimized snapshots of draw states.
818 // of draw states. 854 fStates.pop_back();
819 fLastState->~GrDrawState(); 855 this->convertDrawStateToPendingExec(&fStates.push_back(curr));
820 SkNEW_PLACEMENT_ARGS(fLastState, GrDrawState, (curr));
821 this->convertDrawStateToPendingExec(fLastState);
822 break; 856 break;
823 } 857 }
824 } 858 }
825 859
826 void GrInOrderDrawBuffer::recordClipIfNecessary() { 860 bool GrInOrderDrawBuffer::needsNewClip() const {
827 if (this->getDrawState().isClipState() && 861 if (this->getDrawState().isClipState()) {
828 fClipSet && 862 if (fClipSet &&
829 (!fLastClip || *fLastClip != *this->getClip())) { 863 (fClips.empty() ||
830 fLastClip = &GrNEW_APPEND_TO_RECORDER(fCmdBuffer, SetClip, (this->getCli p()))->fClipData; 864 fClips.back().fStack != *this->getClip()->fClipStack ||
831 this->recordTraceMarkersIfNecessary(); 865 fClips.back().fOrigin != this->getClip()->fOrigin)) {
832 fClipSet = false; 866 return true;
867 }
868 }
869 return false;
870 }
871
872 void GrInOrderDrawBuffer::addToCmdBuffer(uint8_t cmd) {
873 SkASSERT(!cmd_has_trace_marker(cmd));
874 const GrTraceMarkerSet& activeTraceMarkers = this->getActiveTraceMarkers();
875 if (activeTraceMarkers.count() > 0) {
876 fCmds.push_back(add_trace_bit(cmd));
877 fGpuCmdMarkers.push_back(activeTraceMarkers);
878 } else {
879 fCmds.push_back(cmd);
833 } 880 }
834 } 881 }
835 882
836 void GrInOrderDrawBuffer::recordTraceMarkersIfNecessary() { 883 void GrInOrderDrawBuffer::recordClip() {
837 SkASSERT(!fCmdBuffer.empty()); 884 fClips.push_back().fStack = *this->getClip()->fClipStack;
838 SkASSERT(!cmd_has_trace_marker(fCmdBuffer.back().fType)); 885 fClips.back().fOrigin = this->getClip()->fOrigin;
839 const GrTraceMarkerSet& activeTraceMarkers = this->getActiveTraceMarkers(); 886 fClipSet = false;
840 if (activeTraceMarkers.count() > 0) { 887 this->addToCmdBuffer(kSetClip_Cmd);
841 fCmdBuffer.back().fType = add_trace_bit(fCmdBuffer.back().fType); 888 }
842 fGpuCmdMarkers.push_back(activeTraceMarkers); 889
843 } 890 GrInOrderDrawBuffer::Draw* GrInOrderDrawBuffer::recordDraw(const DrawInfo& info,
891 const GrVertexBuffer* vb,
892 const GrIndexBuffer* ib) {
893 this->addToCmdBuffer(kDraw_Cmd);
894 return GrNEW_APPEND_TO_ALLOCATOR(&fDraws, Draw, (info, vb, ib));
895 }
896
897 GrInOrderDrawBuffer::StencilPath* GrInOrderDrawBuffer::recordStencilPath(const G rPath* path) {
898 this->addToCmdBuffer(kStencilPath_Cmd);
899 return GrNEW_APPEND_TO_ALLOCATOR(&fStencilPaths, StencilPath, (path));
900 }
901
902 GrInOrderDrawBuffer::DrawPath* GrInOrderDrawBuffer::recordDrawPath(const GrPath* path) {
903 this->addToCmdBuffer(kDrawPath_Cmd);
904 return GrNEW_APPEND_TO_ALLOCATOR(&fDrawPath, DrawPath, (path));
905 }
906
907 GrInOrderDrawBuffer::DrawPaths* GrInOrderDrawBuffer::recordDrawPaths(const GrPat hRange* pathRange) {
908 this->addToCmdBuffer(kDrawPaths_Cmd);
909 return GrNEW_APPEND_TO_ALLOCATOR(&fDrawPaths, DrawPaths, (pathRange));
910 }
911
912 GrInOrderDrawBuffer::Clear* GrInOrderDrawBuffer::recordClear(GrRenderTarget* rt) {
913 this->addToCmdBuffer(kClear_Cmd);
914 return GrNEW_APPEND_TO_ALLOCATOR(&fClears, Clear, (rt));
915 }
916
917 GrInOrderDrawBuffer::CopySurface* GrInOrderDrawBuffer::recordCopySurface(GrSurfa ce* dst,
918 GrSurfa ce* src) {
919 this->addToCmdBuffer(kCopySurface_Cmd);
920 return GrNEW_APPEND_TO_ALLOCATOR(&fCopySurfaces, CopySurface, (dst, src));
844 } 921 }
845 922
846 void GrInOrderDrawBuffer::clipWillBeSet(const GrClipData* newClipData) { 923 void GrInOrderDrawBuffer::clipWillBeSet(const GrClipData* newClipData) {
847 INHERITED::clipWillBeSet(newClipData); 924 INHERITED::clipWillBeSet(newClipData);
848 fClipSet = true; 925 fClipSet = true;
849 fClipProxyState = kUnknown_ClipProxyState; 926 fClipProxyState = kUnknown_ClipProxyState;
850 } 927 }
OLDNEW
« no previous file with comments | « src/gpu/GrInOrderDrawBuffer.h ('k') | src/gpu/GrTRecorder.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698