| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2011 Google Inc. | 2 * Copyright 2011 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #include "GrInOrderDrawBuffer.h" | 8 #include "GrInOrderDrawBuffer.h" |
| 9 | 9 |
| 10 #include "GrBufferAllocPool.h" | |
| 11 #include "GrDefaultGeoProcFactory.h" | 10 #include "GrDefaultGeoProcFactory.h" |
| 12 #include "GrDrawTargetCaps.h" | 11 #include "GrDrawTargetCaps.h" |
| 13 #include "GrGpu.h" | 12 #include "GrGpu.h" |
| 14 #include "GrTemplates.h" | 13 #include "GrTemplates.h" |
| 15 #include "GrFontCache.h" | 14 #include "GrFontCache.h" |
| 16 #include "GrTexture.h" | 15 #include "GrTexture.h" |
| 17 | 16 |
| 18 GrInOrderDrawBuffer::GrInOrderDrawBuffer(GrGpu* gpu, | 17 GrInOrderDrawBuffer::GrInOrderDrawBuffer(GrGpu* gpu, |
| 19 GrVertexBufferAllocPool* vertexPool, | 18 GrVertexBufferAllocPool* vertexPool, |
| 20 GrIndexBufferAllocPool* indexPool) | 19 GrIndexBufferAllocPool* indexPool) |
| 21 : INHERITED(gpu, vertexPool, indexPool) | 20 : INHERITED(gpu, vertexPool, indexPool) |
| 22 , fCmdBuffer(kCmdBufferInitialSizeInBytes) | 21 , fCmdBuffer(kCmdBufferInitialSizeInBytes) |
| 23 , fPrevState(NULL) | 22 , fPrevState(NULL) |
| 24 , fDrawID(0) | 23 , fDrawID(0) { |
| 25 , fBatchTarget(gpu, vertexPool, indexPool) | |
| 26 , fFlushBatches(false) { | |
| 27 | 24 |
| 28 SkASSERT(vertexPool); | 25 SkASSERT(vertexPool); |
| 29 SkASSERT(indexPool); | 26 SkASSERT(indexPool); |
| 30 | 27 |
| 31 fPathIndexBuffer.setReserve(kPathIdxBufferMinReserve); | 28 fPathIndexBuffer.setReserve(kPathIdxBufferMinReserve); |
| 32 fPathTransformBuffer.setReserve(kPathXformBufferMinReserve); | 29 fPathTransformBuffer.setReserve(kPathXformBufferMinReserve); |
| 33 } | 30 } |
| 34 | 31 |
| 35 GrInOrderDrawBuffer::~GrInOrderDrawBuffer() { | 32 GrInOrderDrawBuffer::~GrInOrderDrawBuffer() { |
| 36 this->reset(); | 33 this->reset(); |
| (...skipping 169 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 206 } | 203 } |
| 207 // Check if there is a draw info that is compatible that uses the same VB fr
om the pool and | 204 // Check if there is a draw info that is compatible that uses the same VB fr
om the pool and |
| 208 // the same IB | 205 // the same IB |
| 209 if (kDraw_Cmd != strip_trace_bit(fCmdBuffer.back().fType)) { | 206 if (kDraw_Cmd != strip_trace_bit(fCmdBuffer.back().fType)) { |
| 210 return 0; | 207 return 0; |
| 211 } | 208 } |
| 212 | 209 |
| 213 Draw* draw = static_cast<Draw*>(&fCmdBuffer.back()); | 210 Draw* draw = static_cast<Draw*>(&fCmdBuffer.back()); |
| 214 | 211 |
| 215 if (!draw->fInfo.isInstanced() || | 212 if (!draw->fInfo.isInstanced() || |
| 216 draw->fInfo.primitiveType() != info.primitiveType() || | |
| 217 draw->fInfo.verticesPerInstance() != info.verticesPerInstance() || | 213 draw->fInfo.verticesPerInstance() != info.verticesPerInstance() || |
| 218 draw->fInfo.indicesPerInstance() != info.indicesPerInstance() || | 214 draw->fInfo.indicesPerInstance() != info.indicesPerInstance() || |
| 219 draw->fInfo.vertexBuffer() != info.vertexBuffer() || | 215 draw->fInfo.vertexBuffer() != info.vertexBuffer() || |
| 220 draw->fInfo.indexBuffer() != geomSrc.fIndexBuffer) { | 216 draw->fInfo.indexBuffer() != geomSrc.fIndexBuffer) { |
| 221 return 0; | 217 return 0; |
| 222 } | 218 } |
| 223 if (draw->fInfo.startVertex() + draw->fInfo.vertexCount() != info.startVerte
x()) { | 219 if (draw->fInfo.startVertex() + draw->fInfo.vertexCount() != info.startVerte
x()) { |
| 224 return 0; | 220 return 0; |
| 225 } | 221 } |
| 226 | 222 |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 262 draw = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Draw, (info)); | 258 draw = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Draw, (info)); |
| 263 draw->fInfo.adjustInstanceCount(-instancesConcated); | 259 draw->fInfo.adjustInstanceCount(-instancesConcated); |
| 264 } else { | 260 } else { |
| 265 return; | 261 return; |
| 266 } | 262 } |
| 267 } else { | 263 } else { |
| 268 draw = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Draw, (info)); | 264 draw = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Draw, (info)); |
| 269 } | 265 } |
| 270 this->recordTraceMarkersIfNecessary(); | 266 this->recordTraceMarkersIfNecessary(); |
| 271 } | 267 } |
| 272 | |
| 273 void GrInOrderDrawBuffer::onDrawBatch(GrBatch* batch, | |
| 274 const GrPipelineBuilder& pipelineBuilder, | |
| 275 const GrScissorState& scissorState, | |
| 276 const GrDeviceCoordTexture* dstCopy) { | |
| 277 if (!this->recordStateAndShouldDraw(batch, pipelineBuilder, scissorState, ds
tCopy)) { | |
| 278 return; | |
| 279 } | |
| 280 | |
| 281 // TODO hack until batch is everywhere | |
| 282 fFlushBatches = true; | |
| 283 | |
| 284 // Check if there is a Batch Draw we can batch with | |
| 285 if (kDrawBatch_Cmd != strip_trace_bit(fCmdBuffer.back().fType)) { | |
| 286 GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawBatch, (batch)); | |
| 287 return; | |
| 288 } | |
| 289 | |
| 290 DrawBatch* draw = static_cast<DrawBatch*>(&fCmdBuffer.back()); | |
| 291 if (draw->fBatch->combineIfPossible(batch)) { | |
| 292 return; | |
| 293 } else { | |
| 294 GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawBatch, (batch)); | |
| 295 } | |
| 296 this->recordTraceMarkersIfNecessary(); | |
| 297 } | |
| 298 | 268 |
| 299 void GrInOrderDrawBuffer::onStencilPath(const GrPipelineBuilder& pipelineBuilder
, | 269 void GrInOrderDrawBuffer::onStencilPath(const GrPipelineBuilder& pipelineBuilder
, |
| 300 const GrPathProcessor* pathProc, | 270 const GrPathProcessor* pathProc, |
| 301 const GrPath* path, | 271 const GrPath* path, |
| 302 const GrScissorState& scissorState, | 272 const GrScissorState& scissorState, |
| 303 const GrStencilSettings& stencilSettings
) { | 273 const GrStencilSettings& stencilSettings
) { |
| 304 StencilPath* sp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, StencilPath, | 274 StencilPath* sp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, StencilPath, |
| 305 (path, pipelineBuilder.getRenderT
arget())); | 275 (path, pipelineBuilder.getRenderT
arget())); |
| 306 sp->fScissor = scissorState; | 276 sp->fScissor = scissorState; |
| 307 sp->fUseHWAA = pipelineBuilder.isHWAntialias(); | 277 sp->fUseHWAA = pipelineBuilder.isHWAntialias(); |
| (...skipping 126 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 434 reset_data_buffer(&fPathTransformBuffer, kPathXformBufferMinReserve); | 404 reset_data_buffer(&fPathTransformBuffer, kPathXformBufferMinReserve); |
| 435 fGpuCmdMarkers.reset(); | 405 fGpuCmdMarkers.reset(); |
| 436 } | 406 } |
| 437 | 407 |
| 438 void GrInOrderDrawBuffer::onFlush() { | 408 void GrInOrderDrawBuffer::onFlush() { |
| 439 if (fCmdBuffer.empty()) { | 409 if (fCmdBuffer.empty()) { |
| 440 return; | 410 return; |
| 441 } | 411 } |
| 442 | 412 |
| 443 | 413 |
| 414 CmdBuffer::Iter iter(fCmdBuffer); |
| 415 |
| 416 int currCmdMarker = 0; |
| 417 |
| 444 // Updated every time we find a set state cmd to reflect the current state i
n the playback | 418 // Updated every time we find a set state cmd to reflect the current state i
n the playback |
| 445 // stream. | 419 // stream. |
| 446 SetState* currentState = NULL; | 420 SetState* currentState = NULL; |
| 447 | 421 |
| 448 // TODO we noticed a huge regression on MacMinis with the initial implementa
tion of GrBatch | |
| 449 // Because of vertex buffer mismanagement between batch and non batch. To c
ompensate we | |
| 450 // flush all the batches into one contigous buffer | |
| 451 if (fFlushBatches) { | |
| 452 fFlushBatches = false; | |
| 453 CmdBuffer::Iter preflush(fCmdBuffer); | |
| 454 while(preflush.next()) { | |
| 455 bool isSetState = kSetState_Cmd == strip_trace_bit(preflush->fType); | |
| 456 if (isSetState) { | |
| 457 SetState* ss = reinterpret_cast<SetState*>(preflush.get()); | |
| 458 if (!ss->fPrimitiveProcessor) { | |
| 459 currentState = ss; | |
| 460 } | |
| 461 } else if (kDrawBatch_Cmd == strip_trace_bit(preflush->fType)) { | |
| 462 preflush->execute(this, currentState); | |
| 463 } | |
| 464 } | |
| 465 } | |
| 466 | |
| 467 // TODO this is temporary while batch is being rolled out | |
| 468 this->getVertexAllocPool()->unmap(); | |
| 469 this->getIndexAllocPool()->unmap(); | |
| 470 fBatchTarget.preFlush(); | |
| 471 | |
| 472 currentState = NULL; | |
| 473 CmdBuffer::Iter iter(fCmdBuffer); | |
| 474 | |
| 475 int currCmdMarker = 0; | |
| 476 | |
| 477 while (iter.next()) { | 422 while (iter.next()) { |
| 478 GrGpuTraceMarker newMarker("", -1); | 423 GrGpuTraceMarker newMarker("", -1); |
| 479 SkString traceString; | 424 SkString traceString; |
| 480 if (cmd_has_trace_marker(iter->fType)) { | 425 if (cmd_has_trace_marker(iter->fType)) { |
| 481 traceString = fGpuCmdMarkers[currCmdMarker].toString(); | 426 traceString = fGpuCmdMarkers[currCmdMarker].toString(); |
| 482 newMarker.fMarker = traceString.c_str(); | 427 newMarker.fMarker = traceString.c_str(); |
| 483 this->getGpu()->addGpuTraceMarker(&newMarker); | 428 this->getGpu()->addGpuTraceMarker(&newMarker); |
| 484 ++currCmdMarker; | 429 ++currCmdMarker; |
| 485 } | 430 } |
| 486 | 431 |
| 487 // TODO temporary hack | 432 if (kSetState_Cmd == strip_trace_bit(iter->fType)) { |
| 488 if (kDrawBatch_Cmd == strip_trace_bit(iter->fType)) { | |
| 489 fBatchTarget.flushNext(); | |
| 490 continue; | |
| 491 } | |
| 492 | |
| 493 bool isSetState = kSetState_Cmd == strip_trace_bit(iter->fType); | |
| 494 if (isSetState) { | |
| 495 SetState* ss = reinterpret_cast<SetState*>(iter.get()); | 433 SetState* ss = reinterpret_cast<SetState*>(iter.get()); |
| 496 | 434 |
| 497 // TODO sometimes we have a prim proc, othertimes we have a GrBatch.
Eventually we will | 435 this->getGpu()->buildProgramDesc(&ss->fDesc, *ss->fPrimitiveProcesso
r, ss->fPipeline, |
| 498 // only have GrBatch and we can delete this | 436 ss->fPipeline.descInfo(), ss->fBatc
hTracker); |
| 499 if (ss->fPrimitiveProcessor) { | |
| 500 this->getGpu()->buildProgramDesc(&ss->fDesc, *ss->fPrimitiveProc
essor, | |
| 501 ss->fPipeline, | |
| 502 ss->fPipeline.descInfo(), | |
| 503 ss->fBatchTracker); | |
| 504 } | |
| 505 currentState = ss; | 437 currentState = ss; |
| 438 |
| 506 } else { | 439 } else { |
| 507 iter->execute(this, currentState); | 440 iter->execute(this, currentState); |
| 508 } | 441 } |
| 509 | 442 |
| 510 if (cmd_has_trace_marker(iter->fType)) { | 443 if (cmd_has_trace_marker(iter->fType)) { |
| 511 this->getGpu()->removeGpuTraceMarker(&newMarker); | 444 this->getGpu()->removeGpuTraceMarker(&newMarker); |
| 512 } | 445 } |
| 513 } | 446 } |
| 514 | 447 |
| 515 // TODO see copious notes about hack | |
| 516 fBatchTarget.postFlush(); | |
| 517 | |
| 518 SkASSERT(fGpuCmdMarkers.count() == currCmdMarker); | 448 SkASSERT(fGpuCmdMarkers.count() == currCmdMarker); |
| 519 ++fDrawID; | 449 ++fDrawID; |
| 520 } | 450 } |
| 521 | 451 |
| 522 void GrInOrderDrawBuffer::Draw::execute(GrInOrderDrawBuffer* buf, const SetState
* state) { | 452 void GrInOrderDrawBuffer::Draw::execute(GrInOrderDrawBuffer* buf, const SetState
* state) { |
| 523 SkASSERT(state); | 453 SkASSERT(state); |
| 524 DrawArgs args(state->fPrimitiveProcessor.get(), &state->fPipeline, &state->f
Desc, | 454 DrawArgs args(state->fPrimitiveProcessor.get(), &state->fPipeline, &state->f
Desc, |
| 525 &state->fBatchTracker); | 455 &state->fBatchTracker); |
| 526 buf->getGpu()->draw(args, fInfo); | 456 buf->getGpu()->draw(args, fInfo); |
| 527 } | 457 } |
| (...skipping 19 matching lines...) Expand all Loading... |
| 547 void GrInOrderDrawBuffer::DrawPaths::execute(GrInOrderDrawBuffer* buf, const Set
State* state) { | 477 void GrInOrderDrawBuffer::DrawPaths::execute(GrInOrderDrawBuffer* buf, const Set
State* state) { |
| 548 SkASSERT(state); | 478 SkASSERT(state); |
| 549 DrawArgs args(state->fPrimitiveProcessor.get(), &state->fPipeline, &state->f
Desc, | 479 DrawArgs args(state->fPrimitiveProcessor.get(), &state->fPipeline, &state->f
Desc, |
| 550 &state->fBatchTracker); | 480 &state->fBatchTracker); |
| 551 buf->getGpu()->drawPaths(args, this->pathRange(), | 481 buf->getGpu()->drawPaths(args, this->pathRange(), |
| 552 &buf->fPathIndexBuffer[fIndicesLocation], fIndexType
, | 482 &buf->fPathIndexBuffer[fIndicesLocation], fIndexType
, |
| 553 &buf->fPathTransformBuffer[fTransformsLocation], fTr
ansformType, | 483 &buf->fPathTransformBuffer[fTransformsLocation], fTr
ansformType, |
| 554 fCount, fStencilSettings); | 484 fCount, fStencilSettings); |
| 555 } | 485 } |
| 556 | 486 |
| 557 void GrInOrderDrawBuffer::DrawBatch::execute(GrInOrderDrawBuffer* buf, const Set
State* state) { | |
| 558 SkASSERT(state); | |
| 559 fBatch->generateGeometry(buf->getBatchTarget(), &state->fPipeline); | |
| 560 } | |
| 561 | |
| 562 void GrInOrderDrawBuffer::SetState::execute(GrInOrderDrawBuffer*, const SetState
*) {} | 487 void GrInOrderDrawBuffer::SetState::execute(GrInOrderDrawBuffer*, const SetState
*) {} |
| 563 | 488 |
| 564 void GrInOrderDrawBuffer::Clear::execute(GrInOrderDrawBuffer* buf, const SetStat
e*) { | 489 void GrInOrderDrawBuffer::Clear::execute(GrInOrderDrawBuffer* buf, const SetStat
e*) { |
| 565 if (GrColor_ILLEGAL == fColor) { | 490 if (GrColor_ILLEGAL == fColor) { |
| 566 buf->getGpu()->discard(this->renderTarget()); | 491 buf->getGpu()->discard(this->renderTarget()); |
| 567 } else { | 492 } else { |
| 568 buf->getGpu()->clear(&fRect, fColor, fCanIgnoreRect, this->renderTarget(
)); | 493 buf->getGpu()->clear(&fRect, fColor, fCanIgnoreRect, this->renderTarget(
)); |
| 569 } | 494 } |
| 570 } | 495 } |
| 571 | 496 |
| (...skipping 27 matching lines...) Expand all Loading... |
| 599 (pipelineBuilder, primProc, *this->g
etGpu()->caps(), | 524 (pipelineBuilder, primProc, *this->g
etGpu()->caps(), |
| 600 scissor, dstCopy)); | 525 scissor, dstCopy)); |
| 601 if (ss->fPipeline.mustSkip()) { | 526 if (ss->fPipeline.mustSkip()) { |
| 602 fCmdBuffer.pop_back(); | 527 fCmdBuffer.pop_back(); |
| 603 return false; | 528 return false; |
| 604 } | 529 } |
| 605 | 530 |
| 606 ss->fPrimitiveProcessor->initBatchTracker(&ss->fBatchTracker, | 531 ss->fPrimitiveProcessor->initBatchTracker(&ss->fBatchTracker, |
| 607 ss->fPipeline.getInitBatchTracker(
)); | 532 ss->fPipeline.getInitBatchTracker(
)); |
| 608 | 533 |
| 609 if (fPrevState && fPrevState->fPrimitiveProcessor.get() && | 534 if (fPrevState && |
| 610 fPrevState->fPrimitiveProcessor->canMakeEqual(fPrevState->fBatchTracker, | 535 fPrevState->fPrimitiveProcessor->canMakeEqual(fPrevState->fBatchTracker, |
| 611 *ss->fPrimitiveProcessor, | 536 *ss->fPrimitiveProcessor, |
| 612 ss->fBatchTracker) && | 537 ss->fBatchTracker) && |
| 613 fPrevState->fPipeline.isEqual(ss->fPipeline)) { | 538 fPrevState->fPipeline.isEqual(ss->fPipeline)) { |
| 614 fCmdBuffer.pop_back(); | 539 fCmdBuffer.pop_back(); |
| 615 } else { | 540 } else { |
| 616 fPrevState = ss; | 541 fPrevState = ss; |
| 617 this->recordTraceMarkersIfNecessary(); | 542 this->recordTraceMarkersIfNecessary(); |
| 618 } | 543 } |
| 619 return true; | 544 return true; |
| 620 } | 545 } |
| 621 | 546 |
| 622 bool GrInOrderDrawBuffer::recordStateAndShouldDraw(GrBatch* batch, | |
| 623 const GrPipelineBuilder& pipe
lineBuilder, | |
| 624 const GrScissorState& scissor
, | |
| 625 const GrDeviceCoordTexture* d
stCopy) { | |
| 626 // TODO this gets much simpler when we have batches everywhere. | |
| 627 // If the previous command is also a set state, then we check to see if it h
as a Batch. If so, | |
| 628 // and we can make the two batches equal, and we can combine the states, the
n we make them equal | |
| 629 SetState* ss = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, SetState, | |
| 630 (batch, pipelineBuilder, *this->getG
pu()->caps(), scissor, | |
| 631 dstCopy)); | |
| 632 if (ss->fPipeline.mustSkip()) { | |
| 633 fCmdBuffer.pop_back(); | |
| 634 return false; | |
| 635 } | |
| 636 | |
| 637 batch->initBatchTracker(ss->fPipeline.getInitBatchTracker()); | |
| 638 | |
| 639 if (fPrevState && !fPrevState->fPrimitiveProcessor.get() && | |
| 640 fPrevState->fPipeline.isEqual(ss->fPipeline)) { | |
| 641 fCmdBuffer.pop_back(); | |
| 642 } else { | |
| 643 fPrevState = ss; | |
| 644 this->recordTraceMarkersIfNecessary(); | |
| 645 } | |
| 646 return true; | |
| 647 } | |
| 648 | |
| 649 void GrInOrderDrawBuffer::recordTraceMarkersIfNecessary() { | 547 void GrInOrderDrawBuffer::recordTraceMarkersIfNecessary() { |
| 650 SkASSERT(!fCmdBuffer.empty()); | 548 SkASSERT(!fCmdBuffer.empty()); |
| 651 SkASSERT(!cmd_has_trace_marker(fCmdBuffer.back().fType)); | 549 SkASSERT(!cmd_has_trace_marker(fCmdBuffer.back().fType)); |
| 652 const GrTraceMarkerSet& activeTraceMarkers = this->getActiveTraceMarkers(); | 550 const GrTraceMarkerSet& activeTraceMarkers = this->getActiveTraceMarkers(); |
| 653 if (activeTraceMarkers.count() > 0) { | 551 if (activeTraceMarkers.count() > 0) { |
| 654 fCmdBuffer.back().fType = add_trace_bit(fCmdBuffer.back().fType); | 552 fCmdBuffer.back().fType = add_trace_bit(fCmdBuffer.back().fType); |
| 655 fGpuCmdMarkers.push_back(activeTraceMarkers); | 553 fGpuCmdMarkers.push_back(activeTraceMarkers); |
| 656 } | 554 } |
| 657 } | 555 } |
| OLD | NEW |