| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2011 Google Inc. | 2 * Copyright 2011 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #include "GrInOrderDrawBuffer.h" | 8 #include "GrInOrderDrawBuffer.h" |
| 9 | 9 |
| 10 #include "GrBufferAllocPool.h" | 10 #include "GrBufferAllocPool.h" |
| (...skipping 228 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 239 | 239 |
| 240 return instancesToConcat; | 240 return instancesToConcat; |
| 241 } | 241 } |
| 242 | 242 |
| 243 void GrInOrderDrawBuffer::onDraw(const DrawInfo& info, | 243 void GrInOrderDrawBuffer::onDraw(const DrawInfo& info, |
| 244 const GrClipMaskManager::ScissorState& scissorS
tate) { | 244 const GrClipMaskManager::ScissorState& scissorS
tate) { |
| 245 | 245 |
| 246 GeometryPoolState& poolState = fGeoPoolStateStack.back(); | 246 GeometryPoolState& poolState = fGeoPoolStateStack.back(); |
| 247 const GrDrawState& drawState = this->getDrawState(); | 247 const GrDrawState& drawState = this->getDrawState(); |
| 248 | 248 |
| 249 this->recordStateIfNecessary(); | 249 this->recordStateIfNecessary(GrGpu::PrimTypeToDrawType(info.primitiveType())
, |
| 250 info.getDstCopy()); |
| 250 | 251 |
| 251 const GrVertexBuffer* vb; | 252 const GrVertexBuffer* vb; |
| 252 if (kBuffer_GeometrySrcType == this->getGeomSrc().fVertexSrc) { | 253 if (kBuffer_GeometrySrcType == this->getGeomSrc().fVertexSrc) { |
| 253 vb = this->getGeomSrc().fVertexBuffer; | 254 vb = this->getGeomSrc().fVertexBuffer; |
| 254 } else { | 255 } else { |
| 255 vb = poolState.fPoolVertexBuffer; | 256 vb = poolState.fPoolVertexBuffer; |
| 256 } | 257 } |
| 257 | 258 |
| 258 const GrIndexBuffer* ib = NULL; | 259 const GrIndexBuffer* ib = NULL; |
| 259 if (info.isIndexed()) { | 260 if (info.isIndexed()) { |
| (...skipping 30 matching lines...) Expand all Loading... |
| 290 size_t bytes = (info.indexCount() + info.startIndex()) * sizeof(uint16_t
); | 291 size_t bytes = (info.indexCount() + info.startIndex()) * sizeof(uint16_t
); |
| 291 poolState.fUsedPoolIndexBytes = SkTMax(poolState.fUsedPoolIndexBytes, by
tes); | 292 poolState.fUsedPoolIndexBytes = SkTMax(poolState.fUsedPoolIndexBytes, by
tes); |
| 292 draw->fInfo.adjustStartIndex(poolState.fPoolStartIndex); | 293 draw->fInfo.adjustStartIndex(poolState.fPoolStartIndex); |
| 293 } | 294 } |
| 294 } | 295 } |
| 295 | 296 |
| 296 void GrInOrderDrawBuffer::onStencilPath(const GrPath* path, | 297 void GrInOrderDrawBuffer::onStencilPath(const GrPath* path, |
| 297 const GrClipMaskManager::ScissorState& s
cissorState, | 298 const GrClipMaskManager::ScissorState& s
cissorState, |
| 298 const GrStencilSettings& stencilSettings
) { | 299 const GrStencilSettings& stencilSettings
) { |
| 299 // Only compare the subset of GrDrawState relevant to path stenciling? | 300 // Only compare the subset of GrDrawState relevant to path stenciling? |
| 300 this->recordStateIfNecessary(); | 301 this->recordStateIfNecessary(GrGpu::kStencilPath_DrawType, NULL); |
| 301 StencilPath* sp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, StencilPath, (path)); | 302 StencilPath* sp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, StencilPath, (path)); |
| 302 sp->fScissorState = scissorState; | 303 sp->fScissorState = scissorState; |
| 303 sp->fStencilSettings = stencilSettings; | 304 sp->fStencilSettings = stencilSettings; |
| 304 this->recordTraceMarkersIfNecessary(); | 305 this->recordTraceMarkersIfNecessary(); |
| 305 } | 306 } |
| 306 | 307 |
| 307 void GrInOrderDrawBuffer::onDrawPath(const GrPath* path, | 308 void GrInOrderDrawBuffer::onDrawPath(const GrPath* path, |
| 308 const GrClipMaskManager::ScissorState& scis
sorState, | 309 const GrClipMaskManager::ScissorState& scis
sorState, |
| 309 const GrStencilSettings& stencilSettings, | 310 const GrStencilSettings& stencilSettings, |
| 310 const GrDeviceCoordTexture* dstCopy) { | 311 const GrDeviceCoordTexture* dstCopy) { |
| 311 // TODO: Only compare the subset of GrDrawState relevant to path covering? | 312 // TODO: Only compare the subset of GrDrawState relevant to path covering? |
| 312 this->recordStateIfNecessary(); | 313 this->recordStateIfNecessary(GrGpu::kDrawPath_DrawType, dstCopy); |
| 313 DrawPath* dp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawPath, (path)); | 314 DrawPath* dp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawPath, (path)); |
| 314 if (dstCopy) { | 315 if (dstCopy) { |
| 315 dp->fDstCopy = *dstCopy; | 316 dp->fDstCopy = *dstCopy; |
| 316 } | 317 } |
| 317 dp->fScissorState = scissorState; | 318 dp->fScissorState = scissorState; |
| 318 dp->fStencilSettings = stencilSettings; | 319 dp->fStencilSettings = stencilSettings; |
| 319 this->recordTraceMarkersIfNecessary(); | 320 this->recordTraceMarkersIfNecessary(); |
| 320 } | 321 } |
| 321 | 322 |
| 322 void GrInOrderDrawBuffer::onDrawPaths(const GrPathRange* pathRange, | 323 void GrInOrderDrawBuffer::onDrawPaths(const GrPathRange* pathRange, |
| 323 const uint32_t indices[], | 324 const uint32_t indices[], |
| 324 int count, | 325 int count, |
| 325 const float transforms[], | 326 const float transforms[], |
| 326 PathTransformType transformsType, | 327 PathTransformType transformsType, |
| 327 const GrClipMaskManager::ScissorState& sci
ssorState, | 328 const GrClipMaskManager::ScissorState& sci
ssorState, |
| 328 const GrStencilSettings& stencilSettings, | 329 const GrStencilSettings& stencilSettings, |
| 329 const GrDeviceCoordTexture* dstCopy) { | 330 const GrDeviceCoordTexture* dstCopy) { |
| 330 SkASSERT(pathRange); | 331 SkASSERT(pathRange); |
| 331 SkASSERT(indices); | 332 SkASSERT(indices); |
| 332 SkASSERT(transforms); | 333 SkASSERT(transforms); |
| 333 | 334 |
| 334 this->recordStateIfNecessary(); | 335 this->recordStateIfNecessary(GrGpu::kDrawPaths_DrawType, dstCopy); |
| 335 | 336 |
| 336 int sizeOfIndices = sizeof(uint32_t) * count; | 337 int sizeOfIndices = sizeof(uint32_t) * count; |
| 337 int sizeOfTransforms = sizeof(float) * count * | 338 int sizeOfTransforms = sizeof(float) * count * |
| 338 GrPathRendering::PathTransformSize(transformsType); | 339 GrPathRendering::PathTransformSize(transformsType); |
| 339 | 340 |
| 340 DrawPaths* dp = GrNEW_APPEND_WITH_DATA_TO_RECORDER(fCmdBuffer, DrawPaths, (p
athRange), | 341 DrawPaths* dp = GrNEW_APPEND_WITH_DATA_TO_RECORDER(fCmdBuffer, DrawPaths, (p
athRange), |
| 341 sizeOfIndices + sizeOfTra
nsforms); | 342 sizeOfIndices + sizeOfTra
nsforms); |
| 342 memcpy(dp->indices(), indices, sizeOfIndices); | 343 memcpy(dp->indices(), indices, sizeOfIndices); |
| 343 dp->fCount = count; | 344 dp->fCount = count; |
| 344 memcpy(dp->transforms(), transforms, sizeOfTransforms); | 345 memcpy(dp->transforms(), transforms, sizeOfTransforms); |
| (...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 422 if (fCmdBuffer.empty()) { | 423 if (fCmdBuffer.empty()) { |
| 423 return; | 424 return; |
| 424 } | 425 } |
| 425 | 426 |
| 426 GrAutoTRestore<bool> flushRestore(&fFlushing); | 427 GrAutoTRestore<bool> flushRestore(&fFlushing); |
| 427 fFlushing = true; | 428 fFlushing = true; |
| 428 | 429 |
| 429 fVertexPool.unmap(); | 430 fVertexPool.unmap(); |
| 430 fIndexPool.unmap(); | 431 fIndexPool.unmap(); |
| 431 | 432 |
| 432 GrDrawState* prevDrawState = SkRef(fDstGpu->drawState()); | |
| 433 | |
| 434 CmdBuffer::Iter iter(fCmdBuffer); | 433 CmdBuffer::Iter iter(fCmdBuffer); |
| 435 | 434 |
| 436 int currCmdMarker = 0; | 435 int currCmdMarker = 0; |
| 437 fDstGpu->saveActiveTraceMarkers(); | 436 fDstGpu->saveActiveTraceMarkers(); |
| 438 | 437 |
| 439 while (iter.next()) { | 438 while (iter.next()) { |
| 440 GrGpuTraceMarker newMarker("", -1); | 439 GrGpuTraceMarker newMarker("", -1); |
| 441 SkString traceString; | 440 SkString traceString; |
| 442 if (cmd_has_trace_marker(iter->fType)) { | 441 if (cmd_has_trace_marker(iter->fType)) { |
| 443 traceString = fGpuCmdMarkers[currCmdMarker].toString(); | 442 traceString = fGpuCmdMarkers[currCmdMarker].toString(); |
| 444 newMarker.fMarker = traceString.c_str(); | 443 newMarker.fMarker = traceString.c_str(); |
| 445 fDstGpu->addGpuTraceMarker(&newMarker); | 444 fDstGpu->addGpuTraceMarker(&newMarker); |
| 446 ++currCmdMarker; | 445 ++currCmdMarker; |
| 447 } | 446 } |
| 448 | 447 |
| 449 SkDEBUGCODE(bool isDraw = kDraw_Cmd == strip_trace_bit(iter->fType) || | 448 if (kSetState_Cmd == strip_trace_bit(iter->fType)) { |
| 450 kStencilPath_Cmd == strip_trace_bit(iter->fTyp
e) || | 449 const SetState* ss = reinterpret_cast<const SetState*>(iter.get()); |
| 451 kDrawPath_Cmd == strip_trace_bit(iter->fType)
|| | 450 fCurrentOptDrawState.reset(GrOptDrawState::Create(ss->fState, |
| 452 kDrawPaths_Cmd == strip_trace_bit(iter->fType)
); | 451 fDstGpu, |
| 453 SkASSERT(!isDraw || fDstGpu->drawState() != prevDrawState); | 452 &ss->fDstCopy, |
| 454 | 453 ss->fDrawType)); |
| 455 iter->execute(fDstGpu); | 454 } else { |
| 455 iter->execute(fDstGpu, fCurrentOptDrawState.get()); |
| 456 } |
| 456 | 457 |
| 457 if (cmd_has_trace_marker(iter->fType)) { | 458 if (cmd_has_trace_marker(iter->fType)) { |
| 458 fDstGpu->removeGpuTraceMarker(&newMarker); | 459 fDstGpu->removeGpuTraceMarker(&newMarker); |
| 459 } | 460 } |
| 460 } | 461 } |
| 461 | 462 |
| 462 fDstGpu->restoreActiveTraceMarkers(); | 463 fDstGpu->restoreActiveTraceMarkers(); |
| 463 SkASSERT(fGpuCmdMarkers.count() == currCmdMarker); | 464 SkASSERT(fGpuCmdMarkers.count() == currCmdMarker); |
| 464 | 465 |
| 465 fDstGpu->setDrawState(prevDrawState); | |
| 466 prevDrawState->unref(); | |
| 467 this->reset(); | 466 this->reset(); |
| 468 ++fDrawID; | 467 ++fDrawID; |
| 469 } | 468 } |
| 470 | 469 |
| 471 void GrInOrderDrawBuffer::Draw::execute(GrGpu* gpu) { | 470 void GrInOrderDrawBuffer::Draw::execute(GrGpu* gpu, const GrOptDrawState* optSta
te) { |
| 472 gpu->setVertexSourceToBuffer(this->vertexBuffer()); | 471 if (!optState) { |
| 472 return; |
| 473 } |
| 474 gpu->setVertexSourceToBuffer(this->vertexBuffer(), optState->getVertexStride
()); |
| 473 if (fInfo.isIndexed()) { | 475 if (fInfo.isIndexed()) { |
| 474 gpu->setIndexSourceToBuffer(this->indexBuffer()); | 476 gpu->setIndexSourceToBuffer(this->indexBuffer()); |
| 475 } | 477 } |
| 476 gpu->draw(fInfo, fScissorState); | 478 gpu->draw(*optState, fInfo, fScissorState); |
| 477 } | 479 } |
| 478 | 480 |
| 479 void GrInOrderDrawBuffer::StencilPath::execute(GrGpu* gpu) { | 481 void GrInOrderDrawBuffer::StencilPath::execute(GrGpu* gpu, const GrOptDrawState*
optState) { |
| 480 gpu->stencilPath(this->path(), fScissorState, fStencilSettings); | 482 if (!optState) { |
| 483 return; |
| 484 } |
| 485 gpu->stencilPath(*optState, this->path(), fScissorState, fStencilSettings); |
| 481 } | 486 } |
| 482 | 487 |
| 483 void GrInOrderDrawBuffer::DrawPath::execute(GrGpu* gpu) { | 488 void GrInOrderDrawBuffer::DrawPath::execute(GrGpu* gpu, const GrOptDrawState* op
tState) { |
| 484 gpu->drawPath(this->path(), fScissorState, fStencilSettings, | 489 if (!optState) { |
| 485 fDstCopy.texture() ? &fDstCopy : NULL); | 490 return; |
| 491 } |
| 492 gpu->drawPath(*optState, this->path(), fScissorState, fStencilSettings, |
| 493 fDstCopy.texture() ? &fDstCopy : NULL); |
| 486 } | 494 } |
| 487 | 495 |
| 488 void GrInOrderDrawBuffer::DrawPaths::execute(GrGpu* gpu) { | 496 void GrInOrderDrawBuffer::DrawPaths::execute(GrGpu* gpu, const GrOptDrawState* o
ptState) { |
| 489 gpu->drawPaths(this->pathRange(), this->indices(), fCount, this->transforms(
), | 497 if (!optState) { |
| 490 fTransformsType, fScissorState, fStencilSettings, | 498 return; |
| 491 fDstCopy.texture() ? &fDstCopy : NULL); | 499 } |
| 500 gpu->drawPaths(*optState, this->pathRange(), this->indices(), fCount, this->
transforms(), |
| 501 fTransformsType, fScissorState, fStencilSettings, |
| 502 fDstCopy.texture() ? &fDstCopy : NULL); |
| 492 } | 503 } |
| 493 | 504 |
| 494 void GrInOrderDrawBuffer::SetState::execute(GrGpu* gpu) { | 505 void GrInOrderDrawBuffer::SetState::execute(GrGpu* gpu, const GrOptDrawState*) { |
| 495 gpu->setDrawState(&fState); | |
| 496 } | 506 } |
| 497 | 507 |
| 498 void GrInOrderDrawBuffer::Clear::execute(GrGpu* gpu) { | 508 void GrInOrderDrawBuffer::Clear::execute(GrGpu* gpu, const GrOptDrawState*) { |
| 499 if (GrColor_ILLEGAL == fColor) { | 509 if (GrColor_ILLEGAL == fColor) { |
| 500 gpu->discard(this->renderTarget()); | 510 gpu->discard(this->renderTarget()); |
| 501 } else { | 511 } else { |
| 502 gpu->clear(&fRect, fColor, fCanIgnoreRect, this->renderTarget()); | 512 gpu->clear(&fRect, fColor, fCanIgnoreRect, this->renderTarget()); |
| 503 } | 513 } |
| 504 } | 514 } |
| 505 | 515 |
| 506 void GrInOrderDrawBuffer::ClearStencilClip::execute(GrGpu* gpu) { | 516 void GrInOrderDrawBuffer::ClearStencilClip::execute(GrGpu* gpu, const GrOptDrawS
tate*) { |
| 507 gpu->clearStencilClip(fRect, fInsideClip, this->renderTarget()); | 517 gpu->clearStencilClip(fRect, fInsideClip, this->renderTarget()); |
| 508 } | 518 } |
| 509 | 519 |
| 510 void GrInOrderDrawBuffer::CopySurface::execute(GrGpu* gpu) { | 520 void GrInOrderDrawBuffer::CopySurface::execute(GrGpu* gpu, const GrOptDrawState*
){ |
| 511 gpu->copySurface(this->dst(), this->src(), fSrcRect, fDstPoint); | 521 gpu->copySurface(this->dst(), this->src(), fSrcRect, fDstPoint); |
| 512 } | 522 } |
| 513 | 523 |
| 514 bool GrInOrderDrawBuffer::copySurface(GrSurface* dst, | 524 bool GrInOrderDrawBuffer::copySurface(GrSurface* dst, |
| 515 GrSurface* src, | 525 GrSurface* src, |
| 516 const SkIRect& srcRect, | 526 const SkIRect& srcRect, |
| 517 const SkIPoint& dstPoint) { | 527 const SkIPoint& dstPoint) { |
| 518 if (fDstGpu->canCopySurface(dst, src, srcRect, dstPoint)) { | 528 if (fDstGpu->canCopySurface(dst, src, srcRect, dstPoint)) { |
| 519 CopySurface* cs = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, CopySurface, (dst
, src)); | 529 CopySurface* cs = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, CopySurface, (dst
, src)); |
| 520 cs->fSrcRect = srcRect; | 530 cs->fSrcRect = srcRect; |
| (...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 633 | 643 |
| 634 // If we get a release vertex space call then our current source should eith
er be reserved | 644 // If we get a release vertex space call then our current source should eith
er be reserved |
| 635 // or array (which we copied into reserved space). | 645 // or array (which we copied into reserved space). |
| 636 SkASSERT(kReserved_GeometrySrcType == geoSrc.fVertexSrc); | 646 SkASSERT(kReserved_GeometrySrcType == geoSrc.fVertexSrc); |
| 637 | 647 |
| 638 // When the caller reserved vertex buffer space we gave it back a pointer | 648 // When the caller reserved vertex buffer space we gave it back a pointer |
| 639 // provided by the vertex buffer pool. At each draw we tracked the largest | 649 // provided by the vertex buffer pool. At each draw we tracked the largest |
| 640 // offset into the pool's pointer that was referenced. Now we return to the | 650 // offset into the pool's pointer that was referenced. Now we return to the |
| 641 // pool any portion at the tail of the allocation that no draw referenced. | 651 // pool any portion at the tail of the allocation that no draw referenced. |
| 642 size_t reservedVertexBytes = geoSrc.fVertexSize * geoSrc.fVertexCount; | 652 size_t reservedVertexBytes = geoSrc.fVertexSize * geoSrc.fVertexCount; |
| 643 fVertexPool.putBack(reservedVertexBytes - | 653 fVertexPool.putBack(reservedVertexBytes - poolState.fUsedPoolVertexBytes); |
| 644 poolState.fUsedPoolVertexBytes); | |
| 645 poolState.fUsedPoolVertexBytes = 0; | 654 poolState.fUsedPoolVertexBytes = 0; |
| 646 poolState.fPoolVertexBuffer = NULL; | 655 poolState.fPoolVertexBuffer = NULL; |
| 647 poolState.fPoolStartVertex = 0; | 656 poolState.fPoolStartVertex = 0; |
| 648 } | 657 } |
| 649 | 658 |
| 650 void GrInOrderDrawBuffer::releaseReservedIndexSpace() { | 659 void GrInOrderDrawBuffer::releaseReservedIndexSpace() { |
| 651 GeometryPoolState& poolState = fGeoPoolStateStack.back(); | 660 GeometryPoolState& poolState = fGeoPoolStateStack.back(); |
| 652 const GeometrySrcState& geoSrc = this->getGeomSrc(); | 661 const GeometrySrcState& geoSrc = this->getGeomSrc(); |
| 653 | 662 |
| 654 // If we get a release index space call then our current source should eithe
r be reserved | 663 // If we get a release index space call then our current source should eithe
r be reserved |
| (...skipping 25 matching lines...) Expand all Loading... |
| 680 SkASSERT(fGeoPoolStateStack.count() > 1); | 689 SkASSERT(fGeoPoolStateStack.count() > 1); |
| 681 fGeoPoolStateStack.pop_back(); | 690 fGeoPoolStateStack.pop_back(); |
| 682 GeometryPoolState& poolState = fGeoPoolStateStack.back(); | 691 GeometryPoolState& poolState = fGeoPoolStateStack.back(); |
| 683 // we have to assume that any slack we had in our vertex/index data | 692 // we have to assume that any slack we had in our vertex/index data |
| 684 // is now unreleasable because data may have been appended later in the | 693 // is now unreleasable because data may have been appended later in the |
| 685 // pool. | 694 // pool. |
| 686 if (kReserved_GeometrySrcType == restoredState.fVertexSrc) { | 695 if (kReserved_GeometrySrcType == restoredState.fVertexSrc) { |
| 687 poolState.fUsedPoolVertexBytes = restoredState.fVertexSize * restoredSta
te.fVertexCount; | 696 poolState.fUsedPoolVertexBytes = restoredState.fVertexSize * restoredSta
te.fVertexCount; |
| 688 } | 697 } |
| 689 if (kReserved_GeometrySrcType == restoredState.fIndexSrc) { | 698 if (kReserved_GeometrySrcType == restoredState.fIndexSrc) { |
| 690 poolState.fUsedPoolIndexBytes = sizeof(uint16_t) * | 699 poolState.fUsedPoolIndexBytes = sizeof(uint16_t) * restoredState.fIndexC
ount; |
| 691 restoredState.fIndexCount; | |
| 692 } | 700 } |
| 693 } | 701 } |
| 694 | 702 |
| 695 void GrInOrderDrawBuffer::recordStateIfNecessary() { | 703 void GrInOrderDrawBuffer::recordStateIfNecessary(GrGpu::DrawType drawType, |
| 704 const GrDeviceCoordTexture* dst
Copy) { |
| 696 if (!fLastState) { | 705 if (!fLastState) { |
| 697 SetState* ss = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, SetState, (this->get
DrawState())); | 706 SetState* ss = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, SetState, (this->get
DrawState())); |
| 698 fLastState = &ss->fState; | 707 fLastState = &ss->fState; |
| 708 if (dstCopy) { |
| 709 ss->fDstCopy = *dstCopy; |
| 710 } |
| 711 ss->fDrawType = drawType; |
| 699 this->convertDrawStateToPendingExec(fLastState); | 712 this->convertDrawStateToPendingExec(fLastState); |
| 700 this->recordTraceMarkersIfNecessary(); | 713 this->recordTraceMarkersIfNecessary(); |
| 701 return; | 714 return; |
| 702 } | 715 } |
| 703 const GrDrawState& curr = this->getDrawState(); | 716 const GrDrawState& curr = this->getDrawState(); |
| 704 switch (GrDrawState::CombineIfPossible(*fLastState, curr, *this->caps())) { | 717 switch (GrDrawState::CombineIfPossible(*fLastState, curr, *this->caps())) { |
| 705 case GrDrawState::kIncompatible_CombinedState: | 718 case GrDrawState::kIncompatible_CombinedState: { |
| 706 fLastState = &GrNEW_APPEND_TO_RECORDER(fCmdBuffer, SetState, (curr))
->fState; | 719 SetState* ss = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, SetState, (curr)
); |
| 720 fLastState = &ss->fState; |
| 721 if (dstCopy) { |
| 722 ss->fDstCopy = *dstCopy; |
| 723 } |
| 724 ss->fDrawType = drawType; |
| 707 this->convertDrawStateToPendingExec(fLastState); | 725 this->convertDrawStateToPendingExec(fLastState); |
| 708 this->recordTraceMarkersIfNecessary(); | 726 this->recordTraceMarkersIfNecessary(); |
| 709 break; | 727 break; |
| 728 } |
| 710 case GrDrawState::kA_CombinedState: | 729 case GrDrawState::kA_CombinedState: |
| 711 case GrDrawState::kAOrB_CombinedState: // Treat the same as kA. | 730 case GrDrawState::kAOrB_CombinedState: // Treat the same as kA. |
| 712 break; | 731 break; |
| 713 case GrDrawState::kB_CombinedState: | 732 case GrDrawState::kB_CombinedState: |
| 714 // prev has already been converted to pending execution. That is a o
ne-way ticket. | 733 // prev has already been converted to pending execution. That is a o
ne-way ticket. |
| 715 // So here we just destruct the previous state and reinit with a new
copy of curr. | 734 // So here we just destruct the previous state and reinit with a new
copy of curr. |
| 716 // Note that this goes away when we move GrIODB over to taking optim
ized snapshots | 735 // Note that this goes away when we move GrIODB over to taking optim
ized snapshots |
| 717 // of draw states. | 736 // of draw states. |
| 718 fLastState->~GrDrawState(); | 737 fLastState->~GrDrawState(); |
| 719 SkNEW_PLACEMENT_ARGS(fLastState, GrDrawState, (curr)); | 738 SkNEW_PLACEMENT_ARGS(fLastState, GrDrawState, (curr)); |
| 720 this->convertDrawStateToPendingExec(fLastState); | 739 this->convertDrawStateToPendingExec(fLastState); |
| 721 break; | 740 break; |
| 722 } | 741 } |
| 723 } | 742 } |
| 724 | 743 |
| 725 void GrInOrderDrawBuffer::recordTraceMarkersIfNecessary() { | 744 void GrInOrderDrawBuffer::recordTraceMarkersIfNecessary() { |
| 726 SkASSERT(!fCmdBuffer.empty()); | 745 SkASSERT(!fCmdBuffer.empty()); |
| 727 SkASSERT(!cmd_has_trace_marker(fCmdBuffer.back().fType)); | 746 SkASSERT(!cmd_has_trace_marker(fCmdBuffer.back().fType)); |
| 728 const GrTraceMarkerSet& activeTraceMarkers = this->getActiveTraceMarkers(); | 747 const GrTraceMarkerSet& activeTraceMarkers = this->getActiveTraceMarkers(); |
| 729 if (activeTraceMarkers.count() > 0) { | 748 if (activeTraceMarkers.count() > 0) { |
| 730 fCmdBuffer.back().fType = add_trace_bit(fCmdBuffer.back().fType); | 749 fCmdBuffer.back().fType = add_trace_bit(fCmdBuffer.back().fType); |
| 731 fGpuCmdMarkers.push_back(activeTraceMarkers); | 750 fGpuCmdMarkers.push_back(activeTraceMarkers); |
| 732 } | 751 } |
| 733 } | 752 } |
| OLD | NEW |