Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(259)

Side by Side Diff: src/gpu/GrInOrderDrawBuffer.cpp

Issue 845103005: GrBatchPrototype (Closed) Base URL: https://skia.googlesource.com/skia.git@lc2
Patch Set: file move Created 5 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright 2011 Google Inc. 2 * Copyright 2011 Google Inc.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license that can be 4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file. 5 * found in the LICENSE file.
6 */ 6 */
7 7
8 #include "GrInOrderDrawBuffer.h" 8 #include "GrInOrderDrawBuffer.h"
9 9
10 #include "GrDefaultGeoProcFactory.h" 10 #include "GrDefaultGeoProcFactory.h"
11 #include "GrDrawTargetCaps.h" 11 #include "GrDrawTargetCaps.h"
12 #include "GrGpu.h" 12 #include "GrGpu.h"
13 #include "GrTemplates.h" 13 #include "GrTemplates.h"
14 #include "GrFontCache.h" 14 #include "GrFontCache.h"
15 #include "GrTexture.h" 15 #include "GrTexture.h"
16 16
17 GrInOrderDrawBuffer::GrInOrderDrawBuffer(GrGpu* gpu, 17 GrInOrderDrawBuffer::GrInOrderDrawBuffer(GrGpu* gpu,
18 GrVertexBufferAllocPool* vertexPool, 18 GrVertexBufferAllocPool* vertexPool,
19 GrIndexBufferAllocPool* indexPool) 19 GrIndexBufferAllocPool* indexPool)
20 : INHERITED(gpu, vertexPool, indexPool) 20 : INHERITED(gpu, vertexPool, indexPool)
21 , fCmdBuffer(kCmdBufferInitialSizeInBytes) 21 , fCmdBuffer(kCmdBufferInitialSizeInBytes)
22 , fPrevState(NULL) 22 , fPrevState(NULL)
23 , fDrawID(0) { 23 , fDrawID(0)
24 , fBatchTarget(gpu, vertexPool, indexPool) {
24 25
25 SkASSERT(vertexPool); 26 SkASSERT(vertexPool);
26 SkASSERT(indexPool); 27 SkASSERT(indexPool);
27 28
28 fPathIndexBuffer.setReserve(kPathIdxBufferMinReserve); 29 fPathIndexBuffer.setReserve(kPathIdxBufferMinReserve);
29 fPathTransformBuffer.setReserve(kPathXformBufferMinReserve); 30 fPathTransformBuffer.setReserve(kPathXformBufferMinReserve);
30 } 31 }
31 32
32 GrInOrderDrawBuffer::~GrInOrderDrawBuffer() { 33 GrInOrderDrawBuffer::~GrInOrderDrawBuffer() {
33 this->reset(); 34 this->reset();
(...skipping 169 matching lines...) Expand 10 before | Expand all | Expand 10 after
203 } 204 }
204 // Check if there is a draw info that is compatible that uses the same VB fr om the pool and 205 // Check if there is a draw info that is compatible that uses the same VB fr om the pool and
205 // the same IB 206 // the same IB
206 if (kDraw_Cmd != strip_trace_bit(fCmdBuffer.back().fType)) { 207 if (kDraw_Cmd != strip_trace_bit(fCmdBuffer.back().fType)) {
207 return 0; 208 return 0;
208 } 209 }
209 210
210 Draw* draw = static_cast<Draw*>(&fCmdBuffer.back()); 211 Draw* draw = static_cast<Draw*>(&fCmdBuffer.back());
211 212
212 if (!draw->fInfo.isInstanced() || 213 if (!draw->fInfo.isInstanced() ||
214 draw->fInfo.primitiveType() != info.primitiveType() ||
213 draw->fInfo.verticesPerInstance() != info.verticesPerInstance() || 215 draw->fInfo.verticesPerInstance() != info.verticesPerInstance() ||
214 draw->fInfo.indicesPerInstance() != info.indicesPerInstance() || 216 draw->fInfo.indicesPerInstance() != info.indicesPerInstance() ||
215 draw->fInfo.vertexBuffer() != info.vertexBuffer() || 217 draw->fInfo.vertexBuffer() != info.vertexBuffer() ||
216 draw->fInfo.indexBuffer() != geomSrc.fIndexBuffer) { 218 draw->fInfo.indexBuffer() != geomSrc.fIndexBuffer) {
217 return 0; 219 return 0;
218 } 220 }
219 if (draw->fInfo.startVertex() + draw->fInfo.vertexCount() != info.startVerte x()) { 221 if (draw->fInfo.startVertex() + draw->fInfo.vertexCount() != info.startVerte x()) {
220 return 0; 222 return 0;
221 } 223 }
222 224
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
259 draw->fInfo.adjustInstanceCount(-instancesConcated); 261 draw->fInfo.adjustInstanceCount(-instancesConcated);
260 } else { 262 } else {
261 return; 263 return;
262 } 264 }
263 } else { 265 } else {
264 draw = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Draw, (info)); 266 draw = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Draw, (info));
265 } 267 }
266 this->recordTraceMarkersIfNecessary(); 268 this->recordTraceMarkersIfNecessary();
267 } 269 }
268 270
271 void GrInOrderDrawBuffer::onDrawBatch(GrBatch* batch,
272 const GrPipelineBuilder& pipelineBuilder,
273 const GrScissorState& scissorState,
274 const GrDeviceCoordTexture* dstCopy) {
275 if (!this->recordStateAndShouldDraw(batch, pipelineBuilder, scissorState, ds tCopy)) {
276 return;
277 }
278
279 // Check if there is a Batch Draw we can batch with
280 if (kDrawBatch_Cmd != strip_trace_bit(fCmdBuffer.back().fType)) {
281 GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawBatch, (batch));
282 return;
283 }
284
285 DrawBatch* draw = static_cast<DrawBatch*>(&fCmdBuffer.back());
286 if (draw->fBatch->combineIfPossible(batch)) {
287 return;
288 } else {
289 GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawBatch, (batch));
290 }
291 this->recordTraceMarkersIfNecessary();
292 }
293
269 void GrInOrderDrawBuffer::onStencilPath(const GrPipelineBuilder& pipelineBuilder , 294 void GrInOrderDrawBuffer::onStencilPath(const GrPipelineBuilder& pipelineBuilder ,
270 const GrPathProcessor* pathProc, 295 const GrPathProcessor* pathProc,
271 const GrPath* path, 296 const GrPath* path,
272 const GrScissorState& scissorState, 297 const GrScissorState& scissorState,
273 const GrStencilSettings& stencilSettings ) { 298 const GrStencilSettings& stencilSettings ) {
274 StencilPath* sp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, StencilPath, 299 StencilPath* sp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, StencilPath,
275 (path, pipelineBuilder.getRenderT arget())); 300 (path, pipelineBuilder.getRenderT arget()));
276 sp->fScissor = scissorState; 301 sp->fScissor = scissorState;
277 sp->fUseHWAA = pipelineBuilder.isHWAntialias(); 302 sp->fUseHWAA = pipelineBuilder.isHWAntialias();
278 sp->fViewMatrix = pathProc->viewMatrix(); 303 sp->fViewMatrix = pathProc->viewMatrix();
(...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after
403 reset_data_buffer(&fPathIndexBuffer, kPathIdxBufferMinReserve); 428 reset_data_buffer(&fPathIndexBuffer, kPathIdxBufferMinReserve);
404 reset_data_buffer(&fPathTransformBuffer, kPathXformBufferMinReserve); 429 reset_data_buffer(&fPathTransformBuffer, kPathXformBufferMinReserve);
405 fGpuCmdMarkers.reset(); 430 fGpuCmdMarkers.reset();
406 } 431 }
407 432
408 void GrInOrderDrawBuffer::onFlush() { 433 void GrInOrderDrawBuffer::onFlush() {
409 if (fCmdBuffer.empty()) { 434 if (fCmdBuffer.empty()) {
410 return; 435 return;
411 } 436 }
412 437
413
414 CmdBuffer::Iter iter(fCmdBuffer); 438 CmdBuffer::Iter iter(fCmdBuffer);
415 439
416 int currCmdMarker = 0; 440 int currCmdMarker = 0;
417 441
418 // Updated every time we find a set state cmd to reflect the current state i n the playback 442 // Updated every time we find a set state cmd to reflect the current state i n the playback
419 // stream. 443 // stream.
420 SetState* currentState = NULL; 444 SetState* currentState = NULL;
421 445
446 // TODO to prevent flushing the batch buffer too much, we only flush when wa sBatch && !isBatch
447 // In the long term we can delete this and just flush once at the end of all geometry generation
448 bool wasBatch = false;
449
422 while (iter.next()) { 450 while (iter.next()) {
423 GrGpuTraceMarker newMarker("", -1); 451 GrGpuTraceMarker newMarker("", -1);
424 SkString traceString; 452 SkString traceString;
425 if (cmd_has_trace_marker(iter->fType)) { 453 if (cmd_has_trace_marker(iter->fType)) {
426 traceString = fGpuCmdMarkers[currCmdMarker].toString(); 454 traceString = fGpuCmdMarkers[currCmdMarker].toString();
427 newMarker.fMarker = traceString.c_str(); 455 newMarker.fMarker = traceString.c_str();
428 this->getGpu()->addGpuTraceMarker(&newMarker); 456 this->getGpu()->addGpuTraceMarker(&newMarker);
429 ++currCmdMarker; 457 ++currCmdMarker;
430 } 458 }
431 459
432 if (kSetState_Cmd == strip_trace_bit(iter->fType)) { 460 bool isSetState = kSetState_Cmd == strip_trace_bit(iter->fType);
461
462 if (!isSetState && kDrawBatch_Cmd != strip_trace_bit(iter->fType)) {
463 // TODO see note above, this gets deleted once everyone uses batch d rawing
464 if (wasBatch) {
465 wasBatch = false;
466 fBatchTarget.flush();
467 }
468 }
469
470 if (isSetState) {
433 SetState* ss = reinterpret_cast<SetState*>(iter.get()); 471 SetState* ss = reinterpret_cast<SetState*>(iter.get());
434 472
435 this->getGpu()->buildProgramDesc(&ss->fDesc, *ss->fPrimitiveProcesso r, ss->fPipeline, 473 // TODO sometimes we have a prim proc, othertimes we have a GrBatch. Eventually we will
436 ss->fPipeline.descInfo(), ss->fBatc hTracker); 474 // only have GrBatch and we can delete this
475 if (ss->fPrimitiveProcessor) {
476 this->getGpu()->buildProgramDesc(&ss->fDesc, *ss->fPrimitiveProc essor,
477 ss->fPipeline,
478 ss->fPipeline.descInfo(),
479 ss->fBatchTracker);
480 } else {
481 wasBatch = true;
482 }
437 currentState = ss; 483 currentState = ss;
438
439 } else { 484 } else {
440 iter->execute(this, currentState); 485 iter->execute(this, currentState);
441 } 486 }
442 487
443 if (cmd_has_trace_marker(iter->fType)) { 488 if (cmd_has_trace_marker(iter->fType)) {
444 this->getGpu()->removeGpuTraceMarker(&newMarker); 489 this->getGpu()->removeGpuTraceMarker(&newMarker);
445 } 490 }
446 } 491 }
447 492
493 // TODO see note above, one last catch
494 if (wasBatch) {
495 fBatchTarget.flush();
496 }
497
448 SkASSERT(fGpuCmdMarkers.count() == currCmdMarker); 498 SkASSERT(fGpuCmdMarkers.count() == currCmdMarker);
449 ++fDrawID; 499 ++fDrawID;
450 } 500 }
451 501
452 void GrInOrderDrawBuffer::Draw::execute(GrInOrderDrawBuffer* buf, const SetState * state) { 502 void GrInOrderDrawBuffer::Draw::execute(GrInOrderDrawBuffer* buf, const SetState * state) {
453 SkASSERT(state); 503 SkASSERT(state);
454 DrawArgs args(state->fPrimitiveProcessor.get(), &state->fPipeline, &state->f Desc, 504 DrawArgs args(state->fPrimitiveProcessor.get(), &state->fPipeline, &state->f Desc,
455 &state->fBatchTracker); 505 &state->fBatchTracker);
456 buf->getGpu()->draw(args, fInfo); 506 buf->getGpu()->draw(args, fInfo);
457 } 507 }
(...skipping 19 matching lines...) Expand all
477 void GrInOrderDrawBuffer::DrawPaths::execute(GrInOrderDrawBuffer* buf, const Set State* state) { 527 void GrInOrderDrawBuffer::DrawPaths::execute(GrInOrderDrawBuffer* buf, const Set State* state) {
478 SkASSERT(state); 528 SkASSERT(state);
479 DrawArgs args(state->fPrimitiveProcessor.get(), &state->fPipeline, &state->f Desc, 529 DrawArgs args(state->fPrimitiveProcessor.get(), &state->fPipeline, &state->f Desc,
480 &state->fBatchTracker); 530 &state->fBatchTracker);
481 buf->getGpu()->drawPaths(args, this->pathRange(), 531 buf->getGpu()->drawPaths(args, this->pathRange(),
482 &buf->fPathIndexBuffer[fIndicesLocation], fIndexType , 532 &buf->fPathIndexBuffer[fIndicesLocation], fIndexType ,
483 &buf->fPathTransformBuffer[fTransformsLocation], fTr ansformType, 533 &buf->fPathTransformBuffer[fTransformsLocation], fTr ansformType,
484 fCount, fStencilSettings); 534 fCount, fStencilSettings);
485 } 535 }
486 536
537 void GrInOrderDrawBuffer::DrawBatch::execute(GrInOrderDrawBuffer* buf, const Set State* state) {
538 SkASSERT(state);
539 fBatch->generateGeometry(buf->getBatchTarget(), &state->fPipeline);
540 }
541
487 void GrInOrderDrawBuffer::SetState::execute(GrInOrderDrawBuffer*, const SetState *) {} 542 void GrInOrderDrawBuffer::SetState::execute(GrInOrderDrawBuffer*, const SetState *) {}
488 543
489 void GrInOrderDrawBuffer::Clear::execute(GrInOrderDrawBuffer* buf, const SetStat e*) { 544 void GrInOrderDrawBuffer::Clear::execute(GrInOrderDrawBuffer* buf, const SetStat e*) {
490 if (GrColor_ILLEGAL == fColor) { 545 if (GrColor_ILLEGAL == fColor) {
491 buf->getGpu()->discard(this->renderTarget()); 546 buf->getGpu()->discard(this->renderTarget());
492 } else { 547 } else {
493 buf->getGpu()->clear(&fRect, fColor, fCanIgnoreRect, this->renderTarget( )); 548 buf->getGpu()->clear(&fRect, fColor, fCanIgnoreRect, this->renderTarget( ));
494 } 549 }
495 } 550 }
496 551
(...skipping 27 matching lines...) Expand all
524 (pipelineBuilder, primProc, *this->g etGpu()->caps(), 579 (pipelineBuilder, primProc, *this->g etGpu()->caps(),
525 scissor, dstCopy)); 580 scissor, dstCopy));
526 if (ss->fPipeline.mustSkip()) { 581 if (ss->fPipeline.mustSkip()) {
527 fCmdBuffer.pop_back(); 582 fCmdBuffer.pop_back();
528 return false; 583 return false;
529 } 584 }
530 585
531 ss->fPrimitiveProcessor->initBatchTracker(&ss->fBatchTracker, 586 ss->fPrimitiveProcessor->initBatchTracker(&ss->fBatchTracker,
532 ss->fPipeline.getInitBatchTracker( )); 587 ss->fPipeline.getInitBatchTracker( ));
533 588
534 if (fPrevState && 589 if (fPrevState && fPrevState->fPrimitiveProcessor.get() &&
535 fPrevState->fPrimitiveProcessor->canMakeEqual(fPrevState->fBatchTracker, 590 fPrevState->fPrimitiveProcessor->canMakeEqual(fPrevState->fBatchTracker,
536 *ss->fPrimitiveProcessor, 591 *ss->fPrimitiveProcessor,
537 ss->fBatchTracker) && 592 ss->fBatchTracker) &&
538 fPrevState->fPipeline.isEqual(ss->fPipeline)) { 593 fPrevState->fPipeline.isEqual(ss->fPipeline)) {
539 fCmdBuffer.pop_back(); 594 fCmdBuffer.pop_back();
540 } else { 595 } else {
541 fPrevState = ss; 596 fPrevState = ss;
542 this->recordTraceMarkersIfNecessary(); 597 this->recordTraceMarkersIfNecessary();
543 } 598 }
544 return true; 599 return true;
545 } 600 }
546 601
602 bool GrInOrderDrawBuffer::recordStateAndShouldDraw(GrBatch* batch,
603 const GrPipelineBuilder& pipe lineBuilder,
604 const GrScissorState& scissor ,
605 const GrDeviceCoordTexture* d stCopy) {
606 // TODO this gets much simpler when we have batches everywhere.
607 // If the previous command is also a set state, then we check to see if it h as a Batch. If so,
608 // and we can make the two batches equal, and we can combine the states, the n we make them equal
609 SetState* ss = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, SetState,
610 (batch, pipelineBuilder, *this->getG pu()->caps(), scissor,
611 dstCopy));
612 if (ss->fPipeline.mustSkip()) {
613 fCmdBuffer.pop_back();
614 return false;
615 }
616
617 batch->initBatchTracker(ss->fPipeline.getInitBatchTracker());
618
619 if (fPrevState && !fPrevState->fPrimitiveProcessor.get() &&
620 fPrevState->fPipeline.isEqual(ss->fPipeline)) {
621 fCmdBuffer.pop_back();
622 } else {
623 fPrevState = ss;
624 this->recordTraceMarkersIfNecessary();
625 }
626 return true;
627 }
628
547 void GrInOrderDrawBuffer::recordTraceMarkersIfNecessary() { 629 void GrInOrderDrawBuffer::recordTraceMarkersIfNecessary() {
548 SkASSERT(!fCmdBuffer.empty()); 630 SkASSERT(!fCmdBuffer.empty());
549 SkASSERT(!cmd_has_trace_marker(fCmdBuffer.back().fType)); 631 SkASSERT(!cmd_has_trace_marker(fCmdBuffer.back().fType));
550 const GrTraceMarkerSet& activeTraceMarkers = this->getActiveTraceMarkers(); 632 const GrTraceMarkerSet& activeTraceMarkers = this->getActiveTraceMarkers();
551 if (activeTraceMarkers.count() > 0) { 633 if (activeTraceMarkers.count() > 0) {
552 fCmdBuffer.back().fType = add_trace_bit(fCmdBuffer.back().fType); 634 fCmdBuffer.back().fType = add_trace_bit(fCmdBuffer.back().fType);
553 fGpuCmdMarkers.push_back(activeTraceMarkers); 635 fGpuCmdMarkers.push_back(activeTraceMarkers);
554 } 636 }
555 } 637 }
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698