Chromium Code Reviews| Index: src/gpu/GrTargetCommands.cpp |
| diff --git a/src/gpu/GrTargetCommands.cpp b/src/gpu/GrTargetCommands.cpp |
| index 24ee32d83218f97479fbed65ea68fabc341fae6d..f129db627af4b86ae6fe124c000559424f325b7e 100644 |
| --- a/src/gpu/GrTargetCommands.cpp |
| +++ b/src/gpu/GrTargetCommands.cpp |
| @@ -35,17 +35,17 @@ GrTargetCommands::Cmd* GrTargetCommands::recordDrawBatch( |
| } |
| // Check if there is a Batch Draw we can batch with |
|
robertphillips
2015/04/30 15:51:08
Why don't we need a "fCmdBuffer.count() && ..." in
|
| - if (Cmd::kDrawBatch_CmdType != fCmdBuffer.back().type() || !fDrawBatch) { |
| - fDrawBatch = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawBatch, (batch, &fBatchTarget)); |
| - return fDrawBatch; |
| - } |
| - |
| - SkASSERT(&fCmdBuffer.back() == fDrawBatch); |
| - if (!fDrawBatch->fBatch->combineIfPossible(batch)) { |
| - fDrawBatch = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawBatch, (batch, &fBatchTarget)); |
| + if (Cmd::kDrawBatch_CmdType == fCmdBuffer.back().type()) { |
| + DrawBatch* previous = static_cast<DrawBatch*>(&fCmdBuffer.back()); |
| + if (!previous->fBatch->combineIfPossible(batch)) { |
|
robertphillips
2015/04/30 15:51:08
Why not return NULL here and just fall through oth
|
| + return GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawBatch, (batch, &fBatchTarget)); |
| + } |
| + // We batched |
| + return NULL; |
| } |
| - return fDrawBatch; |
| + DrawBatch* current = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawBatch, (batch, &fBatchTarget)); |
| + return current; |
| } |
| GrTargetCommands::Cmd* GrTargetCommands::recordStencilPath( |
| @@ -190,7 +190,6 @@ GrTargetCommands::Cmd* GrTargetCommands::recordDiscard(GrInOrderDrawBuffer* iodb |
| void GrTargetCommands::reset() { |
| fCmdBuffer.reset(); |
| fPrevState = NULL; |
| - fDrawBatch = NULL; |
| } |
| void GrTargetCommands::flush(GrInOrderDrawBuffer* iodb) { |
| @@ -204,7 +203,6 @@ void GrTargetCommands::flush(GrInOrderDrawBuffer* iodb) { |
| GrGpu* gpu = iodb->getGpu(); |
| -#ifdef USE_BITMAP_TEXTBLOBS |
| // Loop over all batches and generate geometry |
| CmdBuffer::Iter genIter(fCmdBuffer); |
| while (genIter.next()) { |
| @@ -220,7 +218,6 @@ void GrTargetCommands::flush(GrInOrderDrawBuffer* iodb) { |
| currentState = ss; |
| } |
| } |
| -#endif |
| iodb->getVertexAllocPool()->unmap(); |
| iodb->getIndexAllocPool()->unmap(); |
| @@ -237,7 +234,6 @@ void GrTargetCommands::flush(GrInOrderDrawBuffer* iodb) { |
| gpu->addGpuTraceMarker(&newMarker); |
| } |
| - // TODO temporary hack |
| if (Cmd::kDrawBatch_CmdType == iter->type()) { |
| DrawBatch* db = reinterpret_cast<DrawBatch*>(iter.get()); |
| fBatchTarget.flushNext(db->fBatch->numberOfDraws()); |
| @@ -249,12 +245,6 @@ void GrTargetCommands::flush(GrInOrderDrawBuffer* iodb) { |
| } |
| if (Cmd::kSetState_CmdType == iter->type()) { |
| -#ifndef USE_BITMAP_TEXTBLOBS |
| - SetState* ss = reinterpret_cast<SetState*>(iter.get()); |
| - |
| - ss->execute(gpu, currentState); |
| - currentState = ss; |
| -#else |
| // TODO this is just until NVPR is in batch |
| SetState* ss = reinterpret_cast<SetState*>(iter.get()); |
| @@ -262,7 +252,6 @@ void GrTargetCommands::flush(GrInOrderDrawBuffer* iodb) { |
| ss->execute(gpu, currentState); |
| } |
| currentState = ss; |
| -#endif |
| } else { |
| iter->execute(gpu, currentState); |
| @@ -273,17 +262,9 @@ void GrTargetCommands::flush(GrInOrderDrawBuffer* iodb) { |
| } |
| } |
| - // TODO see copious notes about hack |
| fBatchTarget.postFlush(); |
| } |
| -void GrTargetCommands::Draw::execute(GrGpu* gpu, const SetState* state) { |
| - SkASSERT(state); |
| - DrawArgs args(state->fPrimitiveProcessor.get(), state->getPipeline(), &state->fDesc, |
| - &state->fBatchTracker); |
| - gpu->draw(args, fInfo); |
| -} |
| - |
| void GrTargetCommands::StencilPath::execute(GrGpu* gpu, const SetState*) { |
| GrGpu::StencilPathState state; |
| state.fRenderTarget = fRenderTarget.get(); |