OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright 2011 Google Inc. | 2 * Copyright 2011 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #include "GrInOrderDrawBuffer.h" | 8 #include "GrInOrderDrawBuffer.h" |
9 | 9 |
10 #include "GrBufferAllocPool.h" | 10 #include "GrBufferAllocPool.h" |
11 #include "GrDefaultGeoProcFactory.h" | 11 #include "GrDefaultGeoProcFactory.h" |
12 #include "GrDrawTargetCaps.h" | 12 #include "GrDrawTargetCaps.h" |
13 #include "GrGpu.h" | 13 #include "GrGpu.h" |
14 #include "GrTemplates.h" | 14 #include "GrTemplates.h" |
15 #include "GrFontCache.h" | 15 #include "GrFontCache.h" |
16 #include "GrTexture.h" | 16 #include "GrTexture.h" |
17 | 17 |
18 GrInOrderDrawBuffer::GrInOrderDrawBuffer(GrGpu* gpu, | 18 GrInOrderDrawBuffer::GrInOrderDrawBuffer(GrGpu* gpu, |
19 GrVertexBufferAllocPool* vertexPool, | 19 GrVertexBufferAllocPool* vertexPool, |
20 GrIndexBufferAllocPool* indexPool) | 20 GrIndexBufferAllocPool* indexPool) |
21 : INHERITED(gpu, vertexPool, indexPool) | 21 : INHERITED(gpu, vertexPool, indexPool) |
22 , fCmdBuffer(kCmdBufferInitialSizeInBytes) | 22 , fCmdBuffer(kCmdBufferInitialSizeInBytes) |
23 , fPrevState(NULL) | 23 , fPrevState(NULL) |
24 , fPathIndexBuffer(kPathIdxBufferMinReserve * sizeof(char)/4) | |
25 , fPathTransformBuffer(kPathXformBufferMinReserve * sizeof(float)/4) | |
24 , fDrawID(0) | 26 , fDrawID(0) |
25 , fBatchTarget(gpu, vertexPool, indexPool) | 27 , fBatchTarget(gpu, vertexPool, indexPool) |
26 , fDrawBatch(NULL) { | 28 , fDrawBatch(NULL) { |
27 | 29 |
28 SkASSERT(vertexPool); | 30 SkASSERT(vertexPool); |
29 SkASSERT(indexPool); | 31 SkASSERT(indexPool); |
30 | 32 |
31 fPathIndexBuffer.setReserve(kPathIdxBufferMinReserve); | 33 fPathIndexBuffer.preAlloc(kPathIdxBufferMinReserve * sizeof(char)); |
32 fPathTransformBuffer.setReserve(kPathXformBufferMinReserve); | 34 fPathTransformBuffer.preAlloc(kPathXformBufferMinReserve * sizeof(float)); |
33 } | 35 } |
34 | 36 |
35 GrInOrderDrawBuffer::~GrInOrderDrawBuffer() { | 37 GrInOrderDrawBuffer::~GrInOrderDrawBuffer() { |
36 this->reset(); | 38 this->reset(); |
37 } | 39 } |
38 | 40 |
39 //////////////////////////////////////////////////////////////////////////////// | 41 //////////////////////////////////////////////////////////////////////////////// |
40 | 42 |
41 /** We always use per-vertex colors so that rects can be batched across color ch anges. Sometimes we | 43 /** We always use per-vertex colors so that rects can be batched across color ch anges. Sometimes we |
42 have explicit local coords and sometimes not. We *could* always provide expl icit local coords | 44 have explicit local coords and sometimes not. We *could* always provide expl icit local coords |
(...skipping 26 matching lines...) Expand all Loading... | |
69 if (isWinding) { | 71 if (isWinding) { |
70 // Double check that it is in fact winding. | 72 // Double check that it is in fact winding. |
71 SkASSERT(kIncClamp_StencilOp == pathStencilSettings.passOp(pathFace)); | 73 SkASSERT(kIncClamp_StencilOp == pathStencilSettings.passOp(pathFace)); |
72 SkASSERT(kIncClamp_StencilOp == pathStencilSettings.failOp(pathFace)); | 74 SkASSERT(kIncClamp_StencilOp == pathStencilSettings.failOp(pathFace)); |
73 SkASSERT(0x1 != pathStencilSettings.writeMask(pathFace)); | 75 SkASSERT(0x1 != pathStencilSettings.writeMask(pathFace)); |
74 SkASSERT(!pathStencilSettings.isTwoSided()); | 76 SkASSERT(!pathStencilSettings.isTwoSided()); |
75 } | 77 } |
76 return isWinding; | 78 return isWinding; |
77 } | 79 } |
78 | 80 |
79 template<typename T> static void reset_data_buffer(SkTDArray<T>* buffer, int min Reserve) { | |
80 // Assume the next time this buffer fills up it will use approximately the s ame amount | |
81 // of space as last time. Only resize if we're using less than a third of th e | |
82 // allocated space, and leave enough for 50% growth over last time. | |
83 if (3 * buffer->count() < buffer->reserved() && buffer->reserved() > minRese rve) { | |
84 int reserve = SkTMax(minReserve, buffer->count() * 3 / 2); | |
85 buffer->reset(); | |
86 buffer->setReserve(reserve); | |
87 } else { | |
88 buffer->rewind(); | |
89 } | |
90 } | |
91 | |
92 class RectBatch : public GrBatch { | 81 class RectBatch : public GrBatch { |
93 public: | 82 public: |
94 struct Geometry { | 83 struct Geometry { |
95 GrColor fColor; | 84 GrColor fColor; |
96 SkMatrix fViewMatrix; | 85 SkMatrix fViewMatrix; |
97 SkRect fRect; | 86 SkRect fRect; |
98 bool fHasLocalRect; | 87 bool fHasLocalRect; |
99 bool fHasLocalMatrix; | 88 bool fHasLocalMatrix; |
100 SkRect fLocalRect; | 89 SkRect fLocalRect; |
101 SkMatrix fLocalMatrix; | 90 SkMatrix fLocalMatrix; |
(...skipping 305 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
407 } | 396 } |
408 | 397 |
409 void GrInOrderDrawBuffer::onDrawBatch(GrBatch* batch, | 398 void GrInOrderDrawBuffer::onDrawBatch(GrBatch* batch, |
410 const PipelineInfo& pipelineInfo) { | 399 const PipelineInfo& pipelineInfo) { |
411 if (!this->setupPipelineAndShouldDraw(batch, pipelineInfo)) { | 400 if (!this->setupPipelineAndShouldDraw(batch, pipelineInfo)) { |
412 return; | 401 return; |
413 } | 402 } |
414 | 403 |
415 // Check if there is a Batch Draw we can batch with | 404 // Check if there is a Batch Draw we can batch with |
416 if (Cmd::kDrawBatch_Cmd != fCmdBuffer.back().type()) { | 405 if (Cmd::kDrawBatch_Cmd != fCmdBuffer.back().type()) { |
417 fDrawBatch = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawBatch, (batch)); | 406 fDrawBatch = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawBatch, (batch, &fB atchTarget)); |
418 return; | 407 return; |
419 } | 408 } |
420 | 409 |
421 DrawBatch* draw = static_cast<DrawBatch*>(&fCmdBuffer.back()); | 410 DrawBatch* draw = static_cast<DrawBatch*>(&fCmdBuffer.back()); |
422 if (draw->fBatch->combineIfPossible(batch)) { | 411 if (draw->fBatch->combineIfPossible(batch)) { |
423 return; | 412 return; |
424 } else { | 413 } else { |
425 this->closeBatch(); | 414 this->closeBatch(); |
426 fDrawBatch = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawBatch, (batch)); | 415 fDrawBatch = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawBatch, (batch, &fB atchTarget)); |
427 } | 416 } |
428 this->recordTraceMarkersIfNecessary(); | 417 this->recordTraceMarkersIfNecessary(); |
429 } | 418 } |
430 | 419 |
431 void GrInOrderDrawBuffer::onStencilPath(const GrPipelineBuilder& pipelineBuilder , | 420 void GrInOrderDrawBuffer::onStencilPath(const GrPipelineBuilder& pipelineBuilder , |
432 const GrPathProcessor* pathProc, | 421 const GrPathProcessor* pathProc, |
433 const GrPath* path, | 422 const GrPath* path, |
434 const GrScissorState& scissorState, | 423 const GrScissorState& scissorState, |
435 const GrStencilSettings& stencilSettings ) { | 424 const GrStencilSettings& stencilSettings ) { |
436 this->closeBatch(); | 425 this->closeBatch(); |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
472 SkASSERT(pathRange); | 461 SkASSERT(pathRange); |
473 SkASSERT(indices); | 462 SkASSERT(indices); |
474 SkASSERT(transformValues); | 463 SkASSERT(transformValues); |
475 this->closeBatch(); | 464 this->closeBatch(); |
476 | 465 |
477 if (!this->setupPipelineAndShouldDraw(pathProc, pipelineInfo)) { | 466 if (!this->setupPipelineAndShouldDraw(pathProc, pipelineInfo)) { |
478 return; | 467 return; |
479 } | 468 } |
480 | 469 |
481 int indexBytes = GrPathRange::PathIndexSizeInBytes(indexType); | 470 int indexBytes = GrPathRange::PathIndexSizeInBytes(indexType); |
482 if (int misalign = fPathIndexBuffer.count() % indexBytes) { | 471 char* savedIndices = (char*) fPathIndexBuffer.alloc(count * indexBytes, |
483 // Add padding to the index buffer so the indices are aligned properly. | 472 SkChunkAlloc::kThrow_All ocFailType); |
484 fPathIndexBuffer.append(indexBytes - misalign); | 473 SkASSERT(SkIsAlign4((uintptr_t)savedIndices)); |
474 memcpy(savedIndices, reinterpret_cast<const char*>(indices), count * indexBy tes); | |
475 | |
476 const int xformBytes = GrPathRendering::PathTransformSize(transformType) * s izeof(float); | |
477 float* savedTransforms = NULL; | |
478 if (0 != xformBytes) { | |
479 savedTransforms = (float*) fPathTransformBuffer.alloc(count * xformBytes , | |
480 SkChunkAlloc::kThr ow_AllocFailType); | |
481 SkASSERT(SkIsAlign4((uintptr_t)savedTransforms)); | |
482 memcpy(savedTransforms, transformValues, count * xformBytes); | |
485 } | 483 } |
joshualitt
2015/02/24 21:42:42
Is there anyway to abstract this somehow? On the
| |
486 | 484 |
487 char* savedIndices = fPathIndexBuffer.append(count * indexBytes, | |
488 reinterpret_cast<const char*>(i ndices)); | |
489 float* savedTransforms = fPathTransformBuffer.append( | |
490 count * GrPathRendering::PathTransformSize(tran sformType), | |
491 transformValues); | |
492 | |
493 if (Cmd::kDrawPaths_Cmd == fCmdBuffer.back().type()) { | 485 if (Cmd::kDrawPaths_Cmd == fCmdBuffer.back().type()) { |
494 // The previous command was also DrawPaths. Try to collapse this call in to the one | 486 // The previous command was also DrawPaths. Try to collapse this call in to the one |
495 // before. Note that stenciling all the paths at once, then covering, ma y not be | 487 // before. Note that stenciling all the paths at once, then covering, ma y not be |
496 // equivalent to two separate draw calls if there is overlap. Blending w on't work, | 488 // equivalent to two separate draw calls if there is overlap. Blending w on't work, |
497 // and the combined calls may also cancel each other's winding numbers i n some | 489 // and the combined calls may also cancel each other's winding numbers i n some |
498 // places. For now the winding numbers are only an issue if the fill is even/odd, | 490 // places. For now the winding numbers are only an issue if the fill is even/odd, |
499 // because DrawPaths is currently only used for glyphs, and glyphs in th e same | 491 // because DrawPaths is currently only used for glyphs, and glyphs in th e same |
500 // font tend to all wind in the same direction. | 492 // font tend to all wind in the same direction. |
501 DrawPaths* previous = static_cast<DrawPaths*>(&fCmdBuffer.back()); | 493 DrawPaths* previous = static_cast<DrawPaths*>(&fCmdBuffer.back()); |
502 if (pathRange == previous->pathRange() && | 494 if (pathRange == previous->pathRange() && |
503 indexType == previous->fIndexType && | 495 indexType == previous->fIndexType && |
504 transformType == previous->fTransformType && | 496 transformType == previous->fTransformType && |
505 stencilSettings == previous->fStencilSettings && | 497 stencilSettings == previous->fStencilSettings && |
506 path_fill_type_is_winding(stencilSettings) && | 498 path_fill_type_is_winding(stencilSettings) && |
507 !pipelineInfo.willBlendWithDst(pathProc)) { | 499 !pipelineInfo.willBlendWithDst(pathProc)) { |
508 // Fold this DrawPaths call into the one previous. | 500 // Fold this DrawPaths call into the one previous. |
509 previous->fCount += count; | 501 previous->fCount += count; |
510 return; | 502 return; |
511 } | 503 } |
512 } | 504 } |
513 | 505 |
514 DrawPaths* dp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawPaths, (pathRange)) ; | 506 DrawPaths* dp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawPaths, (pathRange)) ; |
515 dp->fIndicesLocation = SkToU32(savedIndices - fPathIndexBuffer.begin()); | 507 dp->fIndices = savedIndices; |
516 dp->fIndexType = indexType; | 508 dp->fIndexType = indexType; |
517 dp->fTransformsLocation = SkToU32(savedTransforms - fPathTransformBuffer.beg in()); | 509 dp->fTransforms = savedTransforms; |
518 dp->fTransformType = transformType; | 510 dp->fTransformType = transformType; |
519 dp->fCount = count; | 511 dp->fCount = count; |
520 dp->fStencilSettings = stencilSettings; | 512 dp->fStencilSettings = stencilSettings; |
521 | 513 |
522 this->recordTraceMarkersIfNecessary(); | 514 this->recordTraceMarkersIfNecessary(); |
523 } | 515 } |
524 | 516 |
525 void GrInOrderDrawBuffer::onClear(const SkIRect* rect, GrColor color, | 517 void GrInOrderDrawBuffer::onClear(const SkIRect* rect, GrColor color, |
526 bool canIgnoreRect, GrRenderTarget* renderTarg et) { | 518 bool canIgnoreRect, GrRenderTarget* renderTarg et) { |
527 SkASSERT(renderTarget); | 519 SkASSERT(renderTarget); |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
563 return; | 555 return; |
564 } | 556 } |
565 Clear* clr = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Clear, (renderTarget)); | 557 Clear* clr = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Clear, (renderTarget)); |
566 clr->fColor = GrColor_ILLEGAL; | 558 clr->fColor = GrColor_ILLEGAL; |
567 this->recordTraceMarkersIfNecessary(); | 559 this->recordTraceMarkersIfNecessary(); |
568 } | 560 } |
569 | 561 |
570 void GrInOrderDrawBuffer::onReset() { | 562 void GrInOrderDrawBuffer::onReset() { |
571 fCmdBuffer.reset(); | 563 fCmdBuffer.reset(); |
572 fPrevState = NULL; | 564 fPrevState = NULL; |
573 reset_data_buffer(&fPathIndexBuffer, kPathIdxBufferMinReserve); | 565 fPathIndexBuffer.rewind(); |
574 reset_data_buffer(&fPathTransformBuffer, kPathXformBufferMinReserve); | 566 fPathTransformBuffer.rewind(); |
575 fGpuCmdMarkers.reset(); | 567 fGpuCmdMarkers.reset(); |
576 fDrawBatch = NULL; | 568 fDrawBatch = NULL; |
577 } | 569 } |
578 | 570 |
579 void GrInOrderDrawBuffer::onFlush() { | 571 void GrInOrderDrawBuffer::onFlush() { |
580 if (fCmdBuffer.empty()) { | 572 if (fCmdBuffer.empty()) { |
581 return; | 573 return; |
582 } | 574 } |
583 | 575 |
584 // Updated every time we find a set state cmd to reflect the current state i n the playback | 576 // Updated every time we find a set state cmd to reflect the current state i n the playback |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
620 | 612 |
621 // TODO sometimes we have a prim proc, othertimes we have a GrBatch. Eventually we will | 613 // TODO sometimes we have a prim proc, othertimes we have a GrBatch. Eventually we will |
622 // only have GrBatch and we can delete this | 614 // only have GrBatch and we can delete this |
623 if (ss->fPrimitiveProcessor) { | 615 if (ss->fPrimitiveProcessor) { |
624 this->getGpu()->buildProgramDesc(&ss->fDesc, *ss->fPrimitiveProc essor, | 616 this->getGpu()->buildProgramDesc(&ss->fDesc, *ss->fPrimitiveProc essor, |
625 *ss->getPipeline(), | 617 *ss->getPipeline(), |
626 ss->fBatchTracker); | 618 ss->fBatchTracker); |
627 } | 619 } |
628 currentState = ss; | 620 currentState = ss; |
629 } else { | 621 } else { |
630 iter->execute(this, currentState); | 622 iter->execute(this->getGpu(), currentState); |
631 } | 623 } |
632 | 624 |
633 if (iter->isTraced()) { | 625 if (iter->isTraced()) { |
634 this->getGpu()->removeGpuTraceMarker(&newMarker); | 626 this->getGpu()->removeGpuTraceMarker(&newMarker); |
635 } | 627 } |
636 } | 628 } |
637 | 629 |
638 // TODO see copious notes about hack | 630 // TODO see copious notes about hack |
639 fBatchTarget.postFlush(); | 631 fBatchTarget.postFlush(); |
640 | 632 |
641 SkASSERT(fGpuCmdMarkers.count() == currCmdMarker); | 633 SkASSERT(fGpuCmdMarkers.count() == currCmdMarker); |
642 ++fDrawID; | 634 ++fDrawID; |
643 } | 635 } |
644 | 636 |
645 void GrInOrderDrawBuffer::Draw::execute(GrInOrderDrawBuffer* buf, const SetState * state) { | 637 void GrInOrderDrawBuffer::Draw::execute(GrGpu* gpu, const SetState* state) { |
646 SkASSERT(state); | 638 SkASSERT(state); |
647 DrawArgs args(state->fPrimitiveProcessor.get(), state->getPipeline(), &state ->fDesc, | 639 DrawArgs args(state->fPrimitiveProcessor.get(), state->getPipeline(), &state ->fDesc, |
648 &state->fBatchTracker); | 640 &state->fBatchTracker); |
649 buf->getGpu()->draw(args, fInfo); | 641 gpu->draw(args, fInfo); |
650 } | 642 } |
651 | 643 |
652 void GrInOrderDrawBuffer::StencilPath::execute(GrInOrderDrawBuffer* buf, const S etState*) { | 644 void GrInOrderDrawBuffer::StencilPath::execute(GrGpu* gpu, const SetState*) { |
653 GrGpu::StencilPathState state; | 645 GrGpu::StencilPathState state; |
654 state.fRenderTarget = fRenderTarget.get(); | 646 state.fRenderTarget = fRenderTarget.get(); |
655 state.fScissor = &fScissor; | 647 state.fScissor = &fScissor; |
656 state.fStencil = &fStencil; | 648 state.fStencil = &fStencil; |
657 state.fUseHWAA = fUseHWAA; | 649 state.fUseHWAA = fUseHWAA; |
658 state.fViewMatrix = &fViewMatrix; | 650 state.fViewMatrix = &fViewMatrix; |
659 | 651 |
660 buf->getGpu()->stencilPath(this->path(), state); | 652 gpu->stencilPath(this->path(), state); |
661 } | 653 } |
662 | 654 |
663 void GrInOrderDrawBuffer::DrawPath::execute(GrInOrderDrawBuffer* buf, const SetS tate* state) { | 655 void GrInOrderDrawBuffer::DrawPath::execute(GrGpu* gpu, const SetState* state) { |
664 SkASSERT(state); | 656 SkASSERT(state); |
665 DrawArgs args(state->fPrimitiveProcessor.get(), state->getPipeline(), &state ->fDesc, | 657 DrawArgs args(state->fPrimitiveProcessor.get(), state->getPipeline(), &state ->fDesc, |
666 &state->fBatchTracker); | 658 &state->fBatchTracker); |
667 buf->getGpu()->drawPath(args, this->path(), fStencilSettings); | 659 gpu->drawPath(args, this->path(), fStencilSettings); |
668 } | 660 } |
669 | 661 |
670 void GrInOrderDrawBuffer::DrawPaths::execute(GrInOrderDrawBuffer* buf, const Set State* state) { | 662 void GrInOrderDrawBuffer::DrawPaths::execute(GrGpu* gpu, const SetState* state) { |
671 SkASSERT(state); | 663 SkASSERT(state); |
672 DrawArgs args(state->fPrimitiveProcessor.get(), state->getPipeline(), &state ->fDesc, | 664 DrawArgs args(state->fPrimitiveProcessor.get(), state->getPipeline(), &state ->fDesc, |
673 &state->fBatchTracker); | 665 &state->fBatchTracker); |
674 buf->getGpu()->drawPaths(args, this->pathRange(), | 666 gpu->drawPaths(args, this->pathRange(), |
675 &buf->fPathIndexBuffer[fIndicesLocation], fIndexType , | 667 fIndices, fIndexType, |
676 &buf->fPathTransformBuffer[fTransformsLocation], fTr ansformType, | 668 fTransforms, fTransformType, |
677 fCount, fStencilSettings); | 669 fCount, fStencilSettings); |
678 } | 670 } |
679 | 671 |
680 void GrInOrderDrawBuffer::DrawBatch::execute(GrInOrderDrawBuffer* buf, const Set State* state) { | 672 void GrInOrderDrawBuffer::DrawBatch::execute(GrGpu* gpu, const SetState* state) { |
681 SkASSERT(state); | 673 SkASSERT(state); |
682 fBatch->generateGeometry(buf->getBatchTarget(), state->getPipeline()); | 674 fBatch->generateGeometry(fBatchTarget, state->getPipeline()); |
683 } | 675 } |
684 | 676 |
685 void GrInOrderDrawBuffer::SetState::execute(GrInOrderDrawBuffer*, const SetState *) {} | 677 void GrInOrderDrawBuffer::SetState::execute(GrGpu* gpu, const SetState*) {} |
686 | 678 |
687 void GrInOrderDrawBuffer::Clear::execute(GrInOrderDrawBuffer* buf, const SetStat e*) { | 679 void GrInOrderDrawBuffer::Clear::execute(GrGpu* gpu, const SetState*) { |
688 if (GrColor_ILLEGAL == fColor) { | 680 if (GrColor_ILLEGAL == fColor) { |
689 buf->getGpu()->discard(this->renderTarget()); | 681 gpu->discard(this->renderTarget()); |
690 } else { | 682 } else { |
691 buf->getGpu()->clear(&fRect, fColor, fCanIgnoreRect, this->renderTarget( )); | 683 gpu->clear(&fRect, fColor, fCanIgnoreRect, this->renderTarget()); |
692 } | 684 } |
693 } | 685 } |
694 | 686 |
695 void GrInOrderDrawBuffer::ClearStencilClip::execute(GrInOrderDrawBuffer* buf, co nst SetState*) { | 687 void GrInOrderDrawBuffer::ClearStencilClip::execute(GrGpu* gpu, const SetState*) { |
696 buf->getGpu()->clearStencilClip(fRect, fInsideClip, this->renderTarget()); | 688 gpu->clearStencilClip(fRect, fInsideClip, this->renderTarget()); |
697 } | 689 } |
698 | 690 |
699 void GrInOrderDrawBuffer::CopySurface::execute(GrInOrderDrawBuffer* buf, const S etState*) { | 691 void GrInOrderDrawBuffer::CopySurface::execute(GrGpu* gpu, const SetState*) { |
700 buf->getGpu()->copySurface(this->dst(), this->src(), fSrcRect, fDstPoint); | 692 gpu->copySurface(this->dst(), this->src(), fSrcRect, fDstPoint); |
701 } | 693 } |
702 | 694 |
703 bool GrInOrderDrawBuffer::onCopySurface(GrSurface* dst, | 695 bool GrInOrderDrawBuffer::onCopySurface(GrSurface* dst, |
704 GrSurface* src, | 696 GrSurface* src, |
705 const SkIRect& srcRect, | 697 const SkIRect& srcRect, |
706 const SkIPoint& dstPoint) { | 698 const SkIPoint& dstPoint) { |
707 if (getGpu()->canCopySurface(dst, src, srcRect, dstPoint)) { | 699 if (getGpu()->canCopySurface(dst, src, srcRect, dstPoint)) { |
708 this->closeBatch(); | 700 this->closeBatch(); |
709 CopySurface* cs = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, CopySurface, (dst , src)); | 701 CopySurface* cs = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, CopySurface, (dst , src)); |
710 cs->fSrcRect = srcRect; | 702 cs->fSrcRect = srcRect; |
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
774 } | 766 } |
775 } | 767 } |
776 | 768 |
777 void GrInOrderDrawBuffer::willReserveVertexAndIndexSpace(int vertexCount, | 769 void GrInOrderDrawBuffer::willReserveVertexAndIndexSpace(int vertexCount, |
778 size_t vertexStride, | 770 size_t vertexStride, |
779 int indexCount) { | 771 int indexCount) { |
780 this->closeBatch(); | 772 this->closeBatch(); |
781 | 773 |
782 this->INHERITED::willReserveVertexAndIndexSpace(vertexCount, vertexStride, i ndexCount); | 774 this->INHERITED::willReserveVertexAndIndexSpace(vertexCount, vertexStride, i ndexCount); |
783 } | 775 } |
OLD | NEW |