| Index: src/gpu/GrTargetCommands.cpp
|
| diff --git a/src/gpu/GrTargetCommands.cpp b/src/gpu/GrTargetCommands.cpp
|
| index 3f9d7c860fab185dc9d731c8e0ce916cc56a294f..c2006d731fcda528a0968c93ed1295caef191258 100644
|
| --- a/src/gpu/GrTargetCommands.cpp
|
| +++ b/src/gpu/GrTargetCommands.cpp
|
| @@ -7,8 +7,9 @@
|
|
|
| #include "GrTargetCommands.h"
|
|
|
| -#include "GrBufferedDrawTarget.h"
|
| -
|
| +#include "GrBatchFlushState.h"
|
| +#include "GrGpu.h"
|
| +#include "GrPathRendering.h"
|
| #include "batches/GrDrawBatch.h"
|
| #include "batches/GrVertexBatch.h"
|
|
|
| @@ -16,91 +17,85 @@ GrBATCH_SPEW(int32_t GrTargetCommands::Cmd::gUniqueID = 0;)
|
|
|
| void GrTargetCommands::reset() {
|
| fCmdBuffer.reset();
|
| - fBatchTarget.reset();
|
| }
|
|
|
| -void GrTargetCommands::flush(GrBufferedDrawTarget* bufferedDrawTarget) {
|
| +void GrTargetCommands::flush(GrGpu* gpu, GrResourceProvider* resourceProvider) {
|
| GrBATCH_INFO("Flushing\n");
|
| if (fCmdBuffer.empty()) {
|
| return;
|
| }
|
| -
|
| - GrGpu* gpu = bufferedDrawTarget->getGpu();
|
| -
|
| + GrBatchFlushState flushState(gpu, resourceProvider, fLastFlushToken);
|
| // Loop over all batches and generate geometry
|
| CmdBuffer::Iter genIter(fCmdBuffer);
|
| while (genIter.next()) {
|
| if (Cmd::kDrawBatch_CmdType == genIter->type()) {
|
| DrawBatch* db = reinterpret_cast<DrawBatch*>(genIter.get());
|
| - fBatchTarget.resetNumberOfDraws();
|
| // TODO: encapsulate the specialization of GrVertexBatch in GrVertexBatch so that we can
|
| // remove this cast. Currently all GrDrawBatches are in fact GrVertexBatch.
|
| GrVertexBatch* vertexBatch = static_cast<GrVertexBatch*>(db->batch());
|
|
|
| - vertexBatch->generateGeometry(&fBatchTarget);
|
| - vertexBatch->setNumberOfDraws(fBatchTarget.numberOfDraws());
|
| + vertexBatch->prepareDraws(&flushState);
|
| }
|
| }
|
|
|
| - fBatchTarget.preFlush();
|
| + flushState.preIssueDraws();
|
|
|
| CmdBuffer::Iter iter(fCmdBuffer);
|
| -
|
| while (iter.next()) {
|
| - iter->execute(gpu);
|
| + iter->execute(&flushState);
|
| }
|
| -
|
| - fBatchTarget.postFlush();
|
| + fLastFlushToken = flushState.lastFlushedToken();
|
| }
|
|
|
| -void GrTargetCommands::StencilPath::execute(GrGpu* gpu) {
|
| +void GrTargetCommands::StencilPath::execute(GrBatchFlushState* state) {
|
| GrPathRendering::StencilPathArgs args(fUseHWAA, fRenderTarget.get(), &fViewMatrix, &fScissor,
|
| &fStencil);
|
| - gpu->pathRendering()->stencilPath(args, this->path());
|
| + state->gpu()->pathRendering()->stencilPath(args, this->path());
|
| }
|
|
|
| -void GrTargetCommands::DrawPath::execute(GrGpu* gpu) {
|
| +void GrTargetCommands::DrawPath::execute(GrBatchFlushState* state) {
|
| if (!fState->fCompiled) {
|
| - gpu->buildProgramDesc(&fState->fDesc, *fState->fPrimitiveProcessor, *fState->getPipeline(),
|
| - fState->fBatchTracker);
|
| + state->gpu()->buildProgramDesc(&fState->fDesc, *fState->fPrimitiveProcessor,
|
| + *fState->getPipeline(), fState->fBatchTracker);
|
| fState->fCompiled = true;
|
| }
|
| GrPathRendering::DrawPathArgs args(fState->fPrimitiveProcessor.get(), fState->getPipeline(),
|
| &fState->fDesc, &fState->fBatchTracker, &fStencilSettings);
|
| - gpu->pathRendering()->drawPath(args, this->path());
|
| + state->gpu()->pathRendering()->drawPath(args, this->path());
|
| }
|
|
|
| -void GrTargetCommands::DrawPaths::execute(GrGpu* gpu) {
|
| +void GrTargetCommands::DrawPaths::execute(GrBatchFlushState* state) {
|
| if (!fState->fCompiled) {
|
| - gpu->buildProgramDesc(&fState->fDesc, *fState->fPrimitiveProcessor, *fState->getPipeline(),
|
| - fState->fBatchTracker);
|
| + state->gpu()->buildProgramDesc(&fState->fDesc, *fState->fPrimitiveProcessor,
|
| + *fState->getPipeline(), fState->fBatchTracker);
|
| fState->fCompiled = true;
|
| }
|
| GrPathRendering::DrawPathArgs args(fState->fPrimitiveProcessor.get(), fState->getPipeline(),
|
| &fState->fDesc, &fState->fBatchTracker, &fStencilSettings);
|
| - gpu->pathRendering()->drawPaths(args, this->pathRange(), fIndices, fIndexType, fTransforms,
|
| - fTransformType, fCount);
|
| + state->gpu()->pathRendering()->drawPaths(args, this->pathRange(), fIndices, fIndexType,
|
| + fTransforms, fTransformType, fCount);
|
| }
|
|
|
| -void GrTargetCommands::DrawBatch::execute(GrGpu* gpu) {
|
| +void GrTargetCommands::DrawBatch::execute(GrBatchFlushState* state) {
|
| // TODO: encapsulate the specialization of GrVertexBatch in GrVertexBatch so that we can
|
| // remove this cast. Currently all GrDrawBatches are in fact GrVertexBatch.
|
| - const GrVertexBatch* vertexBatch = static_cast<const GrVertexBatch*>(fBatch.get());
|
| - fBatchTarget->flushNext(vertexBatch->numberOfDraws());
|
| + GrVertexBatch* vertexBatch = static_cast<GrVertexBatch*>(fBatch.get());
|
| + vertexBatch->issueDraws(state);
|
| }
|
|
|
| -void GrTargetCommands::Clear::execute(GrGpu* gpu) {
|
| +
|
| +void GrTargetCommands::Clear::execute(GrBatchFlushState* state) {
|
| if (GrColor_ILLEGAL == fColor) {
|
| - gpu->discard(this->renderTarget());
|
| + state->gpu()->discard(this->renderTarget());
|
| } else {
|
| - gpu->clear(fRect, fColor, this->renderTarget());
|
| + state->gpu()->clear(fRect, fColor, this->renderTarget());
|
| }
|
| }
|
|
|
| -void GrTargetCommands::ClearStencilClip::execute(GrGpu* gpu) {
|
| - gpu->clearStencilClip(fRect, fInsideClip, this->renderTarget());
|
| +void GrTargetCommands::ClearStencilClip::execute(GrBatchFlushState* state) {
|
| + state->gpu()->clearStencilClip(fRect, fInsideClip, this->renderTarget());
|
| }
|
|
|
| -void GrTargetCommands::CopySurface::execute(GrGpu* gpu) {
|
| - gpu->copySurface(this->dst(), this->src(), fSrcRect, fDstPoint);
|
| +void GrTargetCommands::CopySurface::execute(GrBatchFlushState* state) {
|
| + state->gpu()->copySurface(this->dst(), this->src(), fSrcRect, fDstPoint);
|
| }
|
|
|