| Index: src/gpu/GrInOrderDrawBuffer.cpp
|
| diff --git a/src/gpu/GrInOrderDrawBuffer.cpp b/src/gpu/GrInOrderDrawBuffer.cpp
|
| index f504f093176c8c6cb69d00cbcecd6c8e603d2e13..5b8b69b1800424c9d411b52f295104145356dc7b 100644
|
| --- a/src/gpu/GrInOrderDrawBuffer.cpp
|
| +++ b/src/gpu/GrInOrderDrawBuffer.cpp
|
| @@ -20,7 +20,8 @@ GrInOrderDrawBuffer::GrInOrderDrawBuffer(GrGpu* gpu,
|
| : INHERITED(gpu, vertexPool, indexPool)
|
| , fCmdBuffer(kCmdBufferInitialSizeInBytes)
|
| , fPrevState(NULL)
|
| - , fDrawID(0) {
|
| + , fDrawID(0)
|
| + , fBatchBuffer(gpu, vertexPool, indexPool) {
|
|
|
| SkASSERT(vertexPool);
|
| SkASSERT(indexPool);
|
| @@ -266,6 +267,31 @@ void GrInOrderDrawBuffer::onDraw(const GrDrawState& ds,
|
| this->recordTraceMarkersIfNecessary();
|
| }
|
|
|
| +void GrInOrderDrawBuffer::onBatchDraw(GrBatch* batch,
|
| + const GrDrawState& ds,
|
| + GrPrimitiveType type,
|
| + const GrScissorState& scissorState,
|
| + const GrDeviceCoordTexture* dstCopy) {
|
| + if (!this->recordStateAndShouldDraw(batch, ds, GrGpu::PrimTypeToDrawType(type), scissorState,
|
| + dstCopy)) {
|
| + return;
|
| + }
|
| +
|
| + // Check if there is a Batch Draw we can batch with
|
| + if (kBatchDraw != strip_trace_bit(fCmdBuffer.back().fType)) {
|
| + GrNEW_APPEND_TO_RECORDER(fCmdBuffer, BatchDraw, (batch));
|
| + return;
|
| + }
|
| +
|
| + BatchDraw* draw = static_cast<BatchDraw*>(&fCmdBuffer.back());
|
| + if (draw->fBatch->combineIfPossible(batch)) {
|
| + return;
|
| + } else {
|
| + GrNEW_APPEND_TO_RECORDER(fCmdBuffer, BatchDraw, (batch));
|
| + }
|
| + this->recordTraceMarkersIfNecessary();
|
| +}
|
| +
|
| void GrInOrderDrawBuffer::onStencilPath(const GrDrawState& ds,
|
| const GrPathProcessor* pathProc,
|
| const GrPath* path,
|
| @@ -421,6 +447,10 @@ void GrInOrderDrawBuffer::onFlush() {
|
| // stream.
|
| SetState* currentState = NULL;
|
|
|
| + // TODO to prevent flushing the batch buffer too much, we only flush when wasBatch && !isBatch
|
| + // In the long term we can delete this and just flush once at the end of all geometry generation
|
| + bool wasBatch = false;
|
| +
|
| while (iter.next()) {
|
| GrGpuTraceMarker newMarker("", -1);
|
| SkString traceString;
|
| @@ -434,11 +464,22 @@ void GrInOrderDrawBuffer::onFlush() {
|
| if (kSetState_Cmd == strip_trace_bit(iter->fType)) {
|
| SetState* ss = reinterpret_cast<SetState*>(iter.get());
|
|
|
| - this->getGpu()->buildProgramDesc(&ss->fDesc, *ss->fPrimitiveProcessor, ss->fState,
|
| - ss->fState.descInfo(), ss->fState.drawType(),
|
| - ss->fBatchTracker);
|
| - currentState = ss;
|
| -
|
| + // TODO sometimes we have a prim proc, othertimes we have a GrBatch. Eventually we will
|
| + // only have GrBatch and we can delete this
|
| + if (ss->fPrimitiveProcessor) {
|
| + // TODO see note above, this gets deleted once everyone uses batch drawing
|
| + if (wasBatch) {
|
| + wasBatch = false;
|
| + fBatchBuffer.flush();
|
| + }
|
| +
|
| + this->getGpu()->buildProgramDesc(&ss->fDesc, *ss->fPrimitiveProcessor, ss->fState,
|
| + ss->fState.descInfo(), ss->fState.drawType(),
|
| + ss->fBatchTracker);
|
| + } else {
|
| + wasBatch = true;
|
| + }
|
| + currentState = ss;
|
| } else {
|
| iter->execute(this, currentState);
|
| }
|
| @@ -448,6 +489,11 @@ void GrInOrderDrawBuffer::onFlush() {
|
| }
|
| }
|
|
|
| + // TODO see note above, one last catch
|
| + if (wasBatch) {
|
| + fBatchBuffer.flush();
|
| + }
|
| +
|
| SkASSERT(fGpuCmdMarkers.count() == currCmdMarker);
|
| ++fDrawID;
|
| }
|
| @@ -487,6 +533,12 @@ void GrInOrderDrawBuffer::DrawPaths::execute(GrInOrderDrawBuffer* buf, const Set
|
| fCount, fStencilSettings);
|
| }
|
|
|
| +void GrInOrderDrawBuffer::BatchDraw::execute(GrInOrderDrawBuffer* buf, const SetState* state) {
|
| + SkASSERT(state);
|
| + fBatch->generateGeometry(buf->getBatchBuffer(), &state->fState);
|
| +
|
| +}
|
| +
|
| void GrInOrderDrawBuffer::SetState::execute(GrInOrderDrawBuffer*, const SetState*) {}
|
|
|
| void GrInOrderDrawBuffer::Clear::execute(GrInOrderDrawBuffer* buf, const SetState*) {
|
| @@ -535,7 +587,7 @@ bool GrInOrderDrawBuffer::recordStateAndShouldDraw(const GrDrawState& ds,
|
| ss->fPrimitiveProcessor->initBatchTracker(&ss->fBatchTracker,
|
| ss->fState.getInitBatchTracker());
|
|
|
| - if (fPrevState &&
|
| + if (fPrevState && fPrevState->fPrimitiveProcessor.get() &&
|
| fPrevState->fPrimitiveProcessor->canMakeEqual(fPrevState->fBatchTracker,
|
| *ss->fPrimitiveProcessor,
|
| ss->fBatchTracker) &&
|
| @@ -548,6 +600,34 @@ bool GrInOrderDrawBuffer::recordStateAndShouldDraw(const GrDrawState& ds,
|
| return true;
|
| }
|
|
|
| +bool GrInOrderDrawBuffer::recordStateAndShouldDraw(GrBatch* batch,
|
| + const GrDrawState& ds,
|
| + GrGpu::DrawType drawType,
|
| + const GrScissorState& scissor,
|
| + const GrDeviceCoordTexture* dstCopy) {
|
| + // TODO this gets much simpler when we have batches everywhere.
|
| + // If the previous command is also a set state, then we check to see if it has a Batch. If so,
|
| + // and we can make the two batches equal, and we can combine the states, then we make them equal
|
| + SetState* ss = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, SetState,
|
| + (batch, ds, *this->getGpu()->caps(), scissor,
|
| + dstCopy, drawType));
|
| + if (ss->fState.mustSkip()) {
|
| + fCmdBuffer.pop_back();
|
| + return false;
|
| + }
|
| +
|
| + batch->initBatchTracker(ss->fState.getInitBatchTracker());
|
| +
|
| + if (fPrevState && !fPrevState->fPrimitiveProcessor.get() &&
|
| + fPrevState->fState.isEqual(ss->fState)) {
|
| + fCmdBuffer.pop_back();
|
| + } else {
|
| + fPrevState = ss;
|
| + this->recordTraceMarkersIfNecessary();
|
| + }
|
| + return true;
|
| +}
|
| +
|
| void GrInOrderDrawBuffer::recordTraceMarkersIfNecessary() {
|
| SkASSERT(!fCmdBuffer.empty());
|
| SkASSERT(!cmd_has_trace_marker(fCmdBuffer.back().fType));
|
|
|