Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(7)

Side by Side Diff: src/gpu/GrInOrderDrawBuffer.cpp

Issue 862823004: Revert of GrBatchPrototype (Closed) Base URL: https://skia.googlesource.com/skia.git@lc2
Patch Set: Created 5 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/gpu/GrInOrderDrawBuffer.h ('k') | src/gpu/GrOvalRenderer.cpp » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright 2011 Google Inc. 2 * Copyright 2011 Google Inc.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license that can be 4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file. 5 * found in the LICENSE file.
6 */ 6 */
7 7
8 #include "GrInOrderDrawBuffer.h" 8 #include "GrInOrderDrawBuffer.h"
9 9
10 #include "GrDefaultGeoProcFactory.h" 10 #include "GrDefaultGeoProcFactory.h"
11 #include "GrDrawTargetCaps.h" 11 #include "GrDrawTargetCaps.h"
12 #include "GrGpu.h" 12 #include "GrGpu.h"
13 #include "GrTemplates.h" 13 #include "GrTemplates.h"
14 #include "GrFontCache.h" 14 #include "GrFontCache.h"
15 #include "GrTexture.h" 15 #include "GrTexture.h"
16 16
17 GrInOrderDrawBuffer::GrInOrderDrawBuffer(GrGpu* gpu, 17 GrInOrderDrawBuffer::GrInOrderDrawBuffer(GrGpu* gpu,
18 GrVertexBufferAllocPool* vertexPool, 18 GrVertexBufferAllocPool* vertexPool,
19 GrIndexBufferAllocPool* indexPool) 19 GrIndexBufferAllocPool* indexPool)
20 : INHERITED(gpu, vertexPool, indexPool) 20 : INHERITED(gpu, vertexPool, indexPool)
21 , fCmdBuffer(kCmdBufferInitialSizeInBytes) 21 , fCmdBuffer(kCmdBufferInitialSizeInBytes)
22 , fPrevState(NULL) 22 , fPrevState(NULL)
23 , fDrawID(0) 23 , fDrawID(0) {
24 , fBatchTarget(gpu, vertexPool, indexPool) {
25 24
26 SkASSERT(vertexPool); 25 SkASSERT(vertexPool);
27 SkASSERT(indexPool); 26 SkASSERT(indexPool);
28 27
29 fPathIndexBuffer.setReserve(kPathIdxBufferMinReserve); 28 fPathIndexBuffer.setReserve(kPathIdxBufferMinReserve);
30 fPathTransformBuffer.setReserve(kPathXformBufferMinReserve); 29 fPathTransformBuffer.setReserve(kPathXformBufferMinReserve);
31 } 30 }
32 31
33 GrInOrderDrawBuffer::~GrInOrderDrawBuffer() { 32 GrInOrderDrawBuffer::~GrInOrderDrawBuffer() {
34 this->reset(); 33 this->reset();
(...skipping 169 matching lines...) Expand 10 before | Expand all | Expand 10 after
204 } 203 }
205 // Check if there is a draw info that is compatible that uses the same VB fr om the pool and 204 // Check if there is a draw info that is compatible that uses the same VB fr om the pool and
206 // the same IB 205 // the same IB
207 if (kDraw_Cmd != strip_trace_bit(fCmdBuffer.back().fType)) { 206 if (kDraw_Cmd != strip_trace_bit(fCmdBuffer.back().fType)) {
208 return 0; 207 return 0;
209 } 208 }
210 209
211 Draw* draw = static_cast<Draw*>(&fCmdBuffer.back()); 210 Draw* draw = static_cast<Draw*>(&fCmdBuffer.back());
212 211
213 if (!draw->fInfo.isInstanced() || 212 if (!draw->fInfo.isInstanced() ||
214 draw->fInfo.primitiveType() != info.primitiveType() ||
215 draw->fInfo.verticesPerInstance() != info.verticesPerInstance() || 213 draw->fInfo.verticesPerInstance() != info.verticesPerInstance() ||
216 draw->fInfo.indicesPerInstance() != info.indicesPerInstance() || 214 draw->fInfo.indicesPerInstance() != info.indicesPerInstance() ||
217 draw->fInfo.vertexBuffer() != info.vertexBuffer() || 215 draw->fInfo.vertexBuffer() != info.vertexBuffer() ||
218 draw->fInfo.indexBuffer() != geomSrc.fIndexBuffer) { 216 draw->fInfo.indexBuffer() != geomSrc.fIndexBuffer) {
219 return 0; 217 return 0;
220 } 218 }
221 if (draw->fInfo.startVertex() + draw->fInfo.vertexCount() != info.startVerte x()) { 219 if (draw->fInfo.startVertex() + draw->fInfo.vertexCount() != info.startVerte x()) {
222 return 0; 220 return 0;
223 } 221 }
224 222
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
260 draw = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Draw, (info)); 258 draw = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Draw, (info));
261 draw->fInfo.adjustInstanceCount(-instancesConcated); 259 draw->fInfo.adjustInstanceCount(-instancesConcated);
262 } else { 260 } else {
263 return; 261 return;
264 } 262 }
265 } else { 263 } else {
266 draw = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Draw, (info)); 264 draw = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Draw, (info));
267 } 265 }
268 this->recordTraceMarkersIfNecessary(); 266 this->recordTraceMarkersIfNecessary();
269 } 267 }
270
271 void GrInOrderDrawBuffer::onDrawBatch(GrBatch* batch,
272 const GrPipelineBuilder& pipelineBuilder,
273 const GrScissorState& scissorState,
274 const GrDeviceCoordTexture* dstCopy) {
275 if (!this->recordStateAndShouldDraw(batch, pipelineBuilder, scissorState, ds tCopy)) {
276 return;
277 }
278
279 // Check if there is a Batch Draw we can batch with
280 if (kDrawBatch_Cmd != strip_trace_bit(fCmdBuffer.back().fType)) {
281 GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawBatch, (batch));
282 return;
283 }
284
285 DrawBatch* draw = static_cast<DrawBatch*>(&fCmdBuffer.back());
286 if (draw->fBatch->combineIfPossible(batch)) {
287 return;
288 } else {
289 GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawBatch, (batch));
290 }
291 this->recordTraceMarkersIfNecessary();
292 }
293 268
294 void GrInOrderDrawBuffer::onStencilPath(const GrPipelineBuilder& pipelineBuilder , 269 void GrInOrderDrawBuffer::onStencilPath(const GrPipelineBuilder& pipelineBuilder ,
295 const GrPathProcessor* pathProc, 270 const GrPathProcessor* pathProc,
296 const GrPath* path, 271 const GrPath* path,
297 const GrScissorState& scissorState, 272 const GrScissorState& scissorState,
298 const GrStencilSettings& stencilSettings ) { 273 const GrStencilSettings& stencilSettings ) {
299 StencilPath* sp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, StencilPath, 274 StencilPath* sp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, StencilPath,
300 (path, pipelineBuilder.getRenderT arget())); 275 (path, pipelineBuilder.getRenderT arget()));
301 sp->fScissor = scissorState; 276 sp->fScissor = scissorState;
302 sp->fUseHWAA = pipelineBuilder.isHWAntialias(); 277 sp->fUseHWAA = pipelineBuilder.isHWAntialias();
(...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after
428 reset_data_buffer(&fPathIndexBuffer, kPathIdxBufferMinReserve); 403 reset_data_buffer(&fPathIndexBuffer, kPathIdxBufferMinReserve);
429 reset_data_buffer(&fPathTransformBuffer, kPathXformBufferMinReserve); 404 reset_data_buffer(&fPathTransformBuffer, kPathXformBufferMinReserve);
430 fGpuCmdMarkers.reset(); 405 fGpuCmdMarkers.reset();
431 } 406 }
432 407
433 void GrInOrderDrawBuffer::onFlush() { 408 void GrInOrderDrawBuffer::onFlush() {
434 if (fCmdBuffer.empty()) { 409 if (fCmdBuffer.empty()) {
435 return; 410 return;
436 } 411 }
437 412
413
438 CmdBuffer::Iter iter(fCmdBuffer); 414 CmdBuffer::Iter iter(fCmdBuffer);
439 415
440 int currCmdMarker = 0; 416 int currCmdMarker = 0;
441 417
442 // Updated every time we find a set state cmd to reflect the current state i n the playback 418 // Updated every time we find a set state cmd to reflect the current state i n the playback
443 // stream. 419 // stream.
444 SetState* currentState = NULL; 420 SetState* currentState = NULL;
445 421
446 // TODO to prevent flushing the batch buffer too much, we only flush when wa sBatch && !isBatch
447 // In the long term we can delete this and just flush once at the end of all geometry generation
448 bool wasBatch = false;
449
450 while (iter.next()) { 422 while (iter.next()) {
451 GrGpuTraceMarker newMarker("", -1); 423 GrGpuTraceMarker newMarker("", -1);
452 SkString traceString; 424 SkString traceString;
453 if (cmd_has_trace_marker(iter->fType)) { 425 if (cmd_has_trace_marker(iter->fType)) {
454 traceString = fGpuCmdMarkers[currCmdMarker].toString(); 426 traceString = fGpuCmdMarkers[currCmdMarker].toString();
455 newMarker.fMarker = traceString.c_str(); 427 newMarker.fMarker = traceString.c_str();
456 this->getGpu()->addGpuTraceMarker(&newMarker); 428 this->getGpu()->addGpuTraceMarker(&newMarker);
457 ++currCmdMarker; 429 ++currCmdMarker;
458 } 430 }
459 431
460 bool isSetState = kSetState_Cmd == strip_trace_bit(iter->fType); 432 if (kSetState_Cmd == strip_trace_bit(iter->fType)) {
461
462 if (!isSetState && kDrawBatch_Cmd != strip_trace_bit(iter->fType)) {
463 // TODO see note above, this gets deleted once everyone uses batch d rawing
464 if (wasBatch) {
465 wasBatch = false;
466 fBatchTarget.flush();
467 }
468 }
469
470 if (isSetState) {
471 SetState* ss = reinterpret_cast<SetState*>(iter.get()); 433 SetState* ss = reinterpret_cast<SetState*>(iter.get());
472 434
473 // TODO sometimes we have a prim proc, othertimes we have a GrBatch. Eventually we will 435 this->getGpu()->buildProgramDesc(&ss->fDesc, *ss->fPrimitiveProcesso r, ss->fPipeline,
474 // only have GrBatch and we can delete this 436 ss->fPipeline.descInfo(), ss->fBatc hTracker);
475 if (ss->fPrimitiveProcessor) {
476 this->getGpu()->buildProgramDesc(&ss->fDesc, *ss->fPrimitiveProc essor,
477 ss->fPipeline,
478 ss->fPipeline.descInfo(),
479 ss->fBatchTracker);
480 } else {
481 wasBatch = true;
482 }
483 currentState = ss; 437 currentState = ss;
438
484 } else { 439 } else {
485 iter->execute(this, currentState); 440 iter->execute(this, currentState);
486 } 441 }
487 442
488 if (cmd_has_trace_marker(iter->fType)) { 443 if (cmd_has_trace_marker(iter->fType)) {
489 this->getGpu()->removeGpuTraceMarker(&newMarker); 444 this->getGpu()->removeGpuTraceMarker(&newMarker);
490 } 445 }
491 } 446 }
492 447
493 // TODO see note above, one last catch
494 if (wasBatch) {
495 fBatchTarget.flush();
496 }
497
498 SkASSERT(fGpuCmdMarkers.count() == currCmdMarker); 448 SkASSERT(fGpuCmdMarkers.count() == currCmdMarker);
499 ++fDrawID; 449 ++fDrawID;
500 } 450 }
501 451
502 void GrInOrderDrawBuffer::Draw::execute(GrInOrderDrawBuffer* buf, const SetState * state) { 452 void GrInOrderDrawBuffer::Draw::execute(GrInOrderDrawBuffer* buf, const SetState * state) {
503 SkASSERT(state); 453 SkASSERT(state);
504 DrawArgs args(state->fPrimitiveProcessor.get(), &state->fPipeline, &state->f Desc, 454 DrawArgs args(state->fPrimitiveProcessor.get(), &state->fPipeline, &state->f Desc,
505 &state->fBatchTracker); 455 &state->fBatchTracker);
506 buf->getGpu()->draw(args, fInfo); 456 buf->getGpu()->draw(args, fInfo);
507 } 457 }
(...skipping 19 matching lines...) Expand all
527 void GrInOrderDrawBuffer::DrawPaths::execute(GrInOrderDrawBuffer* buf, const Set State* state) { 477 void GrInOrderDrawBuffer::DrawPaths::execute(GrInOrderDrawBuffer* buf, const Set State* state) {
528 SkASSERT(state); 478 SkASSERT(state);
529 DrawArgs args(state->fPrimitiveProcessor.get(), &state->fPipeline, &state->f Desc, 479 DrawArgs args(state->fPrimitiveProcessor.get(), &state->fPipeline, &state->f Desc,
530 &state->fBatchTracker); 480 &state->fBatchTracker);
531 buf->getGpu()->drawPaths(args, this->pathRange(), 481 buf->getGpu()->drawPaths(args, this->pathRange(),
532 &buf->fPathIndexBuffer[fIndicesLocation], fIndexType , 482 &buf->fPathIndexBuffer[fIndicesLocation], fIndexType ,
533 &buf->fPathTransformBuffer[fTransformsLocation], fTr ansformType, 483 &buf->fPathTransformBuffer[fTransformsLocation], fTr ansformType,
534 fCount, fStencilSettings); 484 fCount, fStencilSettings);
535 } 485 }
536 486
537 void GrInOrderDrawBuffer::DrawBatch::execute(GrInOrderDrawBuffer* buf, const Set State* state) {
538 SkASSERT(state);
539 fBatch->generateGeometry(buf->getBatchTarget(), &state->fPipeline);
540 }
541
542 void GrInOrderDrawBuffer::SetState::execute(GrInOrderDrawBuffer*, const SetState *) {} 487 void GrInOrderDrawBuffer::SetState::execute(GrInOrderDrawBuffer*, const SetState *) {}
543 488
544 void GrInOrderDrawBuffer::Clear::execute(GrInOrderDrawBuffer* buf, const SetStat e*) { 489 void GrInOrderDrawBuffer::Clear::execute(GrInOrderDrawBuffer* buf, const SetStat e*) {
545 if (GrColor_ILLEGAL == fColor) { 490 if (GrColor_ILLEGAL == fColor) {
546 buf->getGpu()->discard(this->renderTarget()); 491 buf->getGpu()->discard(this->renderTarget());
547 } else { 492 } else {
548 buf->getGpu()->clear(&fRect, fColor, fCanIgnoreRect, this->renderTarget( )); 493 buf->getGpu()->clear(&fRect, fColor, fCanIgnoreRect, this->renderTarget( ));
549 } 494 }
550 } 495 }
551 496
(...skipping 27 matching lines...) Expand all
579 (pipelineBuilder, primProc, *this->g etGpu()->caps(), 524 (pipelineBuilder, primProc, *this->g etGpu()->caps(),
580 scissor, dstCopy)); 525 scissor, dstCopy));
581 if (ss->fPipeline.mustSkip()) { 526 if (ss->fPipeline.mustSkip()) {
582 fCmdBuffer.pop_back(); 527 fCmdBuffer.pop_back();
583 return false; 528 return false;
584 } 529 }
585 530
586 ss->fPrimitiveProcessor->initBatchTracker(&ss->fBatchTracker, 531 ss->fPrimitiveProcessor->initBatchTracker(&ss->fBatchTracker,
587 ss->fPipeline.getInitBatchTracker( )); 532 ss->fPipeline.getInitBatchTracker( ));
588 533
589 if (fPrevState && fPrevState->fPrimitiveProcessor.get() && 534 if (fPrevState &&
590 fPrevState->fPrimitiveProcessor->canMakeEqual(fPrevState->fBatchTracker, 535 fPrevState->fPrimitiveProcessor->canMakeEqual(fPrevState->fBatchTracker,
591 *ss->fPrimitiveProcessor, 536 *ss->fPrimitiveProcessor,
592 ss->fBatchTracker) && 537 ss->fBatchTracker) &&
593 fPrevState->fPipeline.isEqual(ss->fPipeline)) { 538 fPrevState->fPipeline.isEqual(ss->fPipeline)) {
594 fCmdBuffer.pop_back(); 539 fCmdBuffer.pop_back();
595 } else { 540 } else {
596 fPrevState = ss; 541 fPrevState = ss;
597 this->recordTraceMarkersIfNecessary(); 542 this->recordTraceMarkersIfNecessary();
598 } 543 }
599 return true; 544 return true;
600 } 545 }
601 546
602 bool GrInOrderDrawBuffer::recordStateAndShouldDraw(GrBatch* batch,
603 const GrPipelineBuilder& pipe lineBuilder,
604 const GrScissorState& scissor ,
605 const GrDeviceCoordTexture* d stCopy) {
606 // TODO this gets much simpler when we have batches everywhere.
607 // If the previous command is also a set state, then we check to see if it h as a Batch. If so,
608 // and we can make the two batches equal, and we can combine the states, the n we make them equal
609 SetState* ss = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, SetState,
610 (batch, pipelineBuilder, *this->getG pu()->caps(), scissor,
611 dstCopy));
612 if (ss->fPipeline.mustSkip()) {
613 fCmdBuffer.pop_back();
614 return false;
615 }
616
617 batch->initBatchTracker(ss->fPipeline.getInitBatchTracker());
618
619 if (fPrevState && !fPrevState->fPrimitiveProcessor.get() &&
620 fPrevState->fPipeline.isEqual(ss->fPipeline)) {
621 fCmdBuffer.pop_back();
622 } else {
623 fPrevState = ss;
624 this->recordTraceMarkersIfNecessary();
625 }
626 return true;
627 }
628
629 void GrInOrderDrawBuffer::recordTraceMarkersIfNecessary() { 547 void GrInOrderDrawBuffer::recordTraceMarkersIfNecessary() {
630 SkASSERT(!fCmdBuffer.empty()); 548 SkASSERT(!fCmdBuffer.empty());
631 SkASSERT(!cmd_has_trace_marker(fCmdBuffer.back().fType)); 549 SkASSERT(!cmd_has_trace_marker(fCmdBuffer.back().fType));
632 const GrTraceMarkerSet& activeTraceMarkers = this->getActiveTraceMarkers(); 550 const GrTraceMarkerSet& activeTraceMarkers = this->getActiveTraceMarkers();
633 if (activeTraceMarkers.count() > 0) { 551 if (activeTraceMarkers.count() > 0) {
634 fCmdBuffer.back().fType = add_trace_bit(fCmdBuffer.back().fType); 552 fCmdBuffer.back().fType = add_trace_bit(fCmdBuffer.back().fType);
635 fGpuCmdMarkers.push_back(activeTraceMarkers); 553 fGpuCmdMarkers.push_back(activeTraceMarkers);
636 } 554 }
637 } 555 }
OLDNEW
« no previous file with comments | « src/gpu/GrInOrderDrawBuffer.h ('k') | src/gpu/GrOvalRenderer.cpp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698