| Index: src/gpu/instanced/InstancedRendering.cpp
|
| diff --git a/src/gpu/instanced/InstancedRendering.cpp b/src/gpu/instanced/InstancedRendering.cpp
|
| new file mode 100644
|
| index 0000000000000000000000000000000000000000..db96acf43b71254d60b4202350814b2e73fa2d23
|
| --- /dev/null
|
| +++ b/src/gpu/instanced/InstancedRendering.cpp
|
| @@ -0,0 +1,440 @@
|
| +/*
|
| + * Copyright 2016 Google Inc.
|
| + *
|
| + * Use of this source code is governed by a BSD-style license that can be
|
| + * found in the LICENSE file.
|
| + */
|
| +
|
| +#include "InstancedRendering.h"
|
| +
|
| +#include "GrBatchFlushState.h"
|
| +#include "GrPipeline.h"
|
| +#include "GrResourceProvider.h"
|
| +#include "instanced/InstanceProcessor.h"
|
| +
|
| +namespace gr_instanced {
|
| +
|
| +InstancedRendering::InstancedRendering(GrGpu* gpu, AntialiasMode lastSupportedAAMode)
|
| + : fGpu(SkRef(gpu)),
|
| + fLastSupportedAAMode(lastSupportedAAMode),
|
| + fState(State::kRecordingDraws) {
|
| +}
|
| +
|
| +GrDrawBatch* InstancedRendering::recordRect(const SkRect& rect, const SkMatrix& viewMatrix,
|
| + GrColor color, bool antialias,
|
| + const GrInstancedPipelineInfo& info, bool* useHWAA) {
|
| + return this->recordShape(ShapeType::kRect, rect, viewMatrix, color, rect, antialias, info,
|
| + useHWAA);
|
| +}
|
| +
|
| +GrDrawBatch* InstancedRendering::recordRect(const SkRect& rect, const SkMatrix& viewMatrix,
|
| + GrColor color, const SkRect& localRect, bool antialias,
|
| + const GrInstancedPipelineInfo& info, bool* useHWAA) {
|
| + return this->recordShape(ShapeType::kRect, rect, viewMatrix, color, localRect, antialias, info,
|
| + useHWAA);
|
| +}
|
| +
|
| +GrDrawBatch* InstancedRendering::recordRect(const SkRect& rect, const SkMatrix& viewMatrix,
|
| + GrColor color, const SkMatrix& localMatrix,
|
| + bool antialias, const GrInstancedPipelineInfo& info,
|
| + bool* useHWAA) {
|
| + if (localMatrix.hasPerspective()) {
|
| + return nullptr; // Perspective is not yet supported in the local matrix.
|
| + }
|
| + if (Batch* batch = this->recordShape(ShapeType::kRect, rect, viewMatrix, color, rect, antialias,
|
| + info, useHWAA)) {
|
| + fInstances.back().fInfo |= kLocalMatrix_InfoFlag;
|
| + this->appendParamsTexel(localMatrix.getScaleX(), localMatrix.getSkewX(),
|
| + localMatrix.getTranslateX());
|
| + this->appendParamsTexel(localMatrix.getSkewY(), localMatrix.getScaleY(),
|
| + localMatrix.getTranslateY());
|
| + batch->fInfo.fHasLocalMatrix = true;
|
| + batch->fInfo.fHasParams = true;
|
| + return batch;
|
| + }
|
| + return nullptr;
|
| +}
|
| +
|
| +GrDrawBatch* InstancedRendering::recordOval(const SkRect& oval, const SkMatrix& viewMatrix,
|
| + GrColor color, bool antialias,
|
| + const GrInstancedPipelineInfo& info, bool* useHWAA) {
|
| + return this->recordShape(ShapeType::kOval, oval, viewMatrix, color, oval, antialias, info,
|
| + useHWAA);
|
| +}
|
| +
|
| +GrDrawBatch* InstancedRendering::recordRRect(const SkRRect& rrect, const SkMatrix& viewMatrix,
|
| + GrColor color, bool antialias,
|
| + const GrInstancedPipelineInfo& info, bool* useHWAA) {
|
| + if (Batch* batch = this->recordShape(GetRRectShapeType(rrect), rrect.rect(), viewMatrix, color,
|
| + rrect.rect(), antialias, info, useHWAA)) {
|
| + this->appendRRectParams(rrect, &batch->fInfo);
|
| + return batch;
|
| + }
|
| + return nullptr;
|
| +}
|
| +
|
| +GrDrawBatch* InstancedRendering::recordDRRect(const SkRRect& outer, const SkRRect& inner,
|
| + const SkMatrix& viewMatrix, GrColor color,
|
| + bool antialias, const GrInstancedPipelineInfo& info,
|
| + bool* useHWAA) {
|
| + if (inner.getType() > SkRRect::kSimple_Type) {
|
| + return nullptr; // Complex inner round rects are not yet supported.
|
| + }
|
| + if (SkRRect::kEmpty_Type == inner.getType()) {
|
| + return this->recordRRect(outer, viewMatrix, color, antialias, info, useHWAA);
|
| + }
|
| + if (Batch* batch = this->recordShape(GetRRectShapeType(outer), outer.rect(), viewMatrix, color,
|
| + outer.rect(), antialias, info, useHWAA)) {
|
| + this->appendRRectParams(outer, &batch->fInfo);
|
| + ShapeType innerShapeType = GetRRectShapeType(inner);
|
| + batch->fInfo.fInnerShapeTypes |= GetShapeFlag(innerShapeType);
|
| + fInstances.back().fInfo |= ((int)innerShapeType << kInnerShapeType_InfoBit);
|
| + this->appendParamsTexel(inner.rect().asScalars(), 4);
|
| + this->appendRRectParams(inner, &batch->fInfo);
|
| + batch->fInfo.fHasParams = true;
|
| + return batch;
|
| + }
|
| + return nullptr;
|
| +}
|
| +
|
| +InstancedRendering::Batch* InstancedRendering::recordShape(ShapeType type, const SkRect& bounds,
|
| + const SkMatrix& viewMatrix,
|
| + GrColor color, const SkRect& localRect,
|
| + bool antialias,
|
| + const GrInstancedPipelineInfo& info,
|
| + bool* useHWAA) {
|
| + SkASSERT(State::kRecordingDraws == fState);
|
| +
|
| + uint32_t paramsIdx = fParams.count();
|
| + if (paramsIdx > kParamsIdx_InfoMask) {
|
| + return nullptr; // paramsIdx is too large for its allotted space.
|
| + }
|
| +
|
| + AntialiasMode antialiasMode;
|
| + if (!this->selectAntialiasMode(viewMatrix, antialias, info, useHWAA, &antialiasMode)) {
|
| + return nullptr;
|
| + }
|
| +
|
| + Batch* batch = this->createBatch(fInstances.count());
|
| + batch->fInfo.fAntialiasMode = antialiasMode;
|
| + batch->fInfo.fShapeTypes = GetShapeFlag(type);
|
| + batch->fInfo.fCannotDiscard = !info.fCanDiscard;
|
| +
|
| + Instance& instance = fInstances.push_back();
|
| + instance.fInfo = ((int)type << kShapeType_InfoBit) | paramsIdx;
|
| +
|
| + // The instanced shape renderer draws rectangles of [-1, -1, +1, +1], so we find the matrix that
|
| + // will map this rectangle to the same device coordinates as "viewMatrix * bounds".
|
| + float sx = 0.5f * bounds.width();
|
| + float sy = 0.5f * bounds.height();
|
| + float tx = sx + bounds.fLeft;
|
| + float ty = sy + bounds.fTop;
|
| + if (!viewMatrix.hasPerspective()) {
|
| + float* m = instance.fShapeMatrix2x3;
|
| + m[0] = viewMatrix.getScaleX() * sx;
|
| + m[1] = viewMatrix.getSkewX() * sy;
|
| + m[2] = viewMatrix.getTranslateX() +
|
| + viewMatrix.getScaleX() * tx + viewMatrix.getSkewX() * ty;
|
| +
|
| + m[3] = viewMatrix.getSkewY() * sx;
|
| + m[4] = viewMatrix.getScaleY() * sy;
|
| + m[5] = viewMatrix.getTranslateY() +
|
| + viewMatrix.getSkewY() * tx + viewMatrix.getScaleY() * ty;
|
| +
|
| + // Since 'm' is a 2x3 matrix that maps the rect [-1, +1] into the shape's device-space quad,
|
| + // it's quite simple to find the bounding rectangle:
|
| + float devBoundsHalfWidth = fabsf(m[0]) + fabsf(m[1]);
|
| + float devBoundsHalfHeight = fabsf(m[3]) + fabsf(m[4]);
|
| + batch->fBounds.fLeft = m[2] - devBoundsHalfWidth;
|
| + batch->fBounds.fRight = m[2] + devBoundsHalfWidth;
|
| + batch->fBounds.fTop = m[5] - devBoundsHalfHeight;
|
| + batch->fBounds.fBottom = m[5] + devBoundsHalfHeight;
|
| +
|
| + // TODO: Is this worth the CPU overhead?
|
| + batch->fInfo.fNonSquare =
|
| + fabsf(devBoundsHalfHeight - devBoundsHalfWidth) > 0.5f || // Early out.
|
| + fabs(m[0] * m[3] + m[1] * m[4]) > 1e-3f || // Skew?
|
| + fabs(m[0] * m[0] + m[1] * m[1] - m[3] * m[3] - m[4] * m[4]) > 1e-2f; // Diff. lengths?
|
| + } else {
|
| + SkMatrix shapeMatrix(viewMatrix);
|
| + shapeMatrix.preTranslate(tx, ty);
|
| + shapeMatrix.preScale(sx, sy);
|
| + instance.fInfo |= kPerspective_InfoFlag;
|
| +
|
| + float* m = instance.fShapeMatrix2x3;
|
| + m[0] = SkScalarToFloat(shapeMatrix.getScaleX());
|
| + m[1] = SkScalarToFloat(shapeMatrix.getSkewX());
|
| + m[2] = SkScalarToFloat(shapeMatrix.getTranslateX());
|
| + m[3] = SkScalarToFloat(shapeMatrix.getSkewY());
|
| + m[4] = SkScalarToFloat(shapeMatrix.getScaleY());
|
| + m[5] = SkScalarToFloat(shapeMatrix.getTranslateY());
|
| +
|
| + // Send the perspective column as a param.
|
| + this->appendParamsTexel(shapeMatrix[SkMatrix::kMPersp0], shapeMatrix[SkMatrix::kMPersp1],
|
| + shapeMatrix[SkMatrix::kMPersp2]);
|
| + batch->fInfo.fHasPerspective = true;
|
| + batch->fInfo.fHasParams = true;
|
| +
|
| + viewMatrix.mapRect(&batch->fBounds, bounds);
|
| +
|
| + batch->fInfo.fNonSquare = true;
|
| + }
|
| +
|
| + instance.fColor = color;
|
| +
|
| + const float* rectAsFloats = localRect.asScalars(); // Ensure SkScalar == float.
|
| + memcpy(&instance.fLocalRect, rectAsFloats, 4 * sizeof(float));
|
| +
|
| + fBatchList.addToTail(batch);
|
| + return batch;
|
| +}
|
| +
|
| +inline bool InstancedRendering::selectAntialiasMode(const SkMatrix& viewMatrix, bool antialias,
|
| + const GrInstancedPipelineInfo& info,
|
| + bool* useHWAA, AntialiasMode* antialiasMode) {
|
| + SkASSERT(!info.fColorDisabled || info.fDrawingShapeToStencil);
|
| + SkASSERT(!info.fIsMixedSampled || info.fIsMultisampled);
|
| +
|
| + if (!info.fIsMultisampled || fGpu->caps()->multisampleDisableSupport()) {
|
| + SkASSERT(fLastSupportedAAMode >= AntialiasMode::kCoverage);
|
| + if (!antialias) {
|
| + if (info.fDrawingShapeToStencil && !info.fCanDiscard) {
|
| + // We can't draw to the stencil buffer without discard (or sample mask if MSAA).
|
| + return false;
|
| + }
|
| + *antialiasMode = AntialiasMode::kNone;
|
| + *useHWAA = false;
|
| + return true;
|
| + }
|
| +
|
| + if (info.canUseCoverageAA() && viewMatrix.preservesRightAngles()) {
|
| + *antialiasMode = AntialiasMode::kCoverage;
|
| + *useHWAA = false;
|
| + return true;
|
| + }
|
| + }
|
| +
|
| + if (info.fIsMultisampled && fLastSupportedAAMode >= AntialiasMode::kMSAA) {
|
| + if (!info.fIsMixedSampled || info.fColorDisabled) {
|
| + *antialiasMode = AntialiasMode::kMSAA;
|
| + *useHWAA = true;
|
| + return true;
|
| + }
|
| + if (fLastSupportedAAMode >= AntialiasMode::kMixedSamples) {
|
| + *antialiasMode = AntialiasMode::kMixedSamples;
|
| + *useHWAA = true;
|
| + return true;
|
| + }
|
| + }
|
| +
|
| + return false;
|
| +}
|
| +
|
| +void InstancedRendering::appendRRectParams(const SkRRect& rrect, BatchInfo* batchInfo) {
|
| + switch (rrect.getType()) {
|
| + case SkRRect::kSimple_Type: {
|
| + const SkVector& radii = rrect.getSimpleRadii();
|
| + this->appendParamsTexel(radii.x(), radii.y(), rrect.width(), rrect.height());
|
| + batchInfo->fHasParams = true;
|
| + return;
|
| + }
|
| + case SkRRect::kNinePatch_Type: {
|
| + float twoOverW = 2 / rrect.width();
|
| + float twoOverH = 2 / rrect.height();
|
| + const SkVector& radiiTL = rrect.radii(SkRRect::kUpperLeft_Corner);
|
| + const SkVector& radiiBR = rrect.radii(SkRRect::kLowerRight_Corner);
|
| + this->appendParamsTexel(radiiTL.x() * twoOverW, radiiBR.x() * twoOverW,
|
| + radiiTL.y() * twoOverH, radiiBR.y() * twoOverH);
|
| + batchInfo->fHasParams = true;
|
| + return;
|
| + }
|
| + case SkRRect::kComplex_Type: {
|
| + /**
|
| + * The x and y radii of each arc are stored in separate vectors,
|
| + * in the following order:
|
| + *
|
| + * __x1 _ _ _ x3__
|
| + * y1 | | y2
|
| + *
|
| + * | |
|
| + *
|
| + * y3 |__ _ _ _ __| y4
|
| + * x2 x4
|
| + *
|
| + */
|
| + float twoOverW = 2 / rrect.width();
|
| + float twoOverH = 2 / rrect.height();
|
| + const SkVector& radiiTL = rrect.radii(SkRRect::kUpperLeft_Corner);
|
| + const SkVector& radiiTR = rrect.radii(SkRRect::kUpperRight_Corner);
|
| + const SkVector& radiiBR = rrect.radii(SkRRect::kLowerRight_Corner);
|
| + const SkVector& radiiBL = rrect.radii(SkRRect::kLowerLeft_Corner);
|
| + this->appendParamsTexel(radiiTL.x() * twoOverW, radiiBL.x() * twoOverW,
|
| + radiiTR.x() * twoOverW, radiiBR.x() * twoOverW);
|
| + this->appendParamsTexel(radiiTL.y() * twoOverH, radiiTR.y() * twoOverH,
|
| + radiiBL.y() * twoOverH, radiiBR.y() * twoOverH);
|
| + batchInfo->fHasParams = true;
|
| + return;
|
| + }
|
| + default: return;
|
| + }
|
| +}
|
| +
|
| +void InstancedRendering::appendParamsTexel(const SkScalar* vals, int count) {
|
| + SkASSERT(count <= 4 && count >= 0);
|
| + const float* valsAsFloats = vals; // Ensure SkScalar == float.
|
| + memcpy(&fParams.push_back(), valsAsFloats, count * sizeof(float));
|
| +}
|
| +
|
| +void InstancedRendering::appendParamsTexel(SkScalar x, SkScalar y, SkScalar z, SkScalar w) {
|
| + ParamsTexel& texel = fParams.push_back();
|
| + texel.fX = SkScalarToFloat(x);
|
| + texel.fY = SkScalarToFloat(y);
|
| + texel.fZ = SkScalarToFloat(z);
|
| + texel.fW = SkScalarToFloat(w);
|
| +}
|
| +
|
| +void InstancedRendering::appendParamsTexel(SkScalar x, SkScalar y, SkScalar z) {
|
| + ParamsTexel& texel = fParams.push_back();
|
| + texel.fX = SkScalarToFloat(x);
|
| + texel.fY = SkScalarToFloat(y);
|
| + texel.fZ = SkScalarToFloat(z);
|
| +}
|
| +
|
| +InstancedRendering::Batch::Batch(uint32_t classID, InstancedRendering* ir, int instanceIdx)
|
| + : INHERITED(classID),
|
| + fInstancedRendering(ir) {
|
| + fDrawCmds.push_back().fInstanceRange = {instanceIdx, 1};
|
| +}
|
| +
|
| +void InstancedRendering::Batch::initBatchTracker(const GrXPOverridesForBatch& overrides) {
|
| + DrawCmd& cmd = this->getSingleDrawCmd(); // This will assert if we have > 1 command.
|
| + SkASSERT(1 == cmd.fInstanceRange.fCount); // The batch should not have been combined yet.
|
| + SkASSERT(SkIsPow2(fInfo.fShapeTypes));
|
| + SkASSERT(cmd.fGeometry.isEmpty());
|
| +
|
| + if (kRect_ShapeFlag == fInfo.fShapeTypes) {
|
| + cmd.fGeometry = InstanceProcessor::GetIndexRangeForRect(fInfo.fAntialiasMode);
|
| + } else if (kOval_ShapeFlag == fInfo.fShapeTypes) {
|
| + cmd.fGeometry = InstanceProcessor::GetIndexRangeForOval(fInfo.fAntialiasMode, fBounds);
|
| + } else {
|
| + cmd.fGeometry = InstanceProcessor::GetIndexRangeForRRect(fInfo.fAntialiasMode);
|
| + }
|
| +
|
| + GrColor overrideColor;
|
| + if (overrides.getOverrideColorIfSet(&overrideColor)) {
|
| + SkASSERT(State::kRecordingDraws == fInstancedRendering->fState);
|
| + fInstancedRendering->fInstances[cmd.getSingleInstanceIdx()].fColor = overrideColor;
|
| + }
|
| + fInfo.fUsesLocalCoords = overrides.readsLocalCoords();
|
| + fInfo.fCannotTweakAlphaForCoverage = !overrides.canTweakAlphaForCoverage();
|
| +}
|
| +
|
| +bool InstancedRendering::Batch::onCombineIfPossible(GrBatch* other, const GrCaps& caps) {
|
| + Batch* that = static_cast<Batch*>(other);
|
| + SkASSERT(fInstancedRendering == that->fInstancedRendering);
|
| +
|
| + if (!fInfo.canJoin(that->fInfo) ||
|
| + !GrPipeline::CanCombine(*this->pipeline(), this->bounds(),
|
| + *that->pipeline(), that->bounds(), caps)) {
|
| + return false;
|
| + }
|
| +
|
| + fBounds.join(that->fBounds);
|
| + fInfo.join(that->fInfo);
|
| +
|
| + // Join the draw commands.
|
| + int idx = 0;
|
| + if (fDrawCmds.back().fGeometry == that->fDrawCmds.front().fGeometry) {
|
| + SkASSERT(!fDrawCmds.back().fGeometry.isEmpty()); // Should have called initBatchTracker.
|
| + InstanceRange& thisRange = fDrawCmds.back().fInstanceRange;
|
| + const InstanceRange& thatRange = that->fDrawCmds.front().fInstanceRange;
|
| + if (thisRange.end() == thatRange.fStart) {
|
| + thisRange.fCount += thatRange.fCount;
|
| + ++idx;
|
| + }
|
| + }
|
| + if (idx < that->fDrawCmds.count()) {
|
| + fDrawCmds.push_back_n(that->fDrawCmds.count() - idx, &that->fDrawCmds[idx]);
|
| + }
|
| +
|
| + return true;
|
| +}
|
| +
|
| +void InstancedRendering::Batch::computePipelineOptimizations(GrInitInvariantOutput* color,
|
| + GrInitInvariantOutput* coverage,
|
| + GrBatchToXPOverrides* overrides) const {
|
| + // We need to be careful about fInfo here and consider how it might change as batches combine.
|
| + // e.g. We can't make an assumption based on fInfo.isSimpleRects() because the batch might
|
| + // later combine with a non-rect.
|
| + color->setUnknownFourComponents();
|
| + if (fInfo.fAntialiasMode >= AntialiasMode::kMSAA) {
|
| + coverage->setKnownSingleComponent(255);
|
| + } else if (AntialiasMode::kNone == fInfo.fAntialiasMode && !fInfo.fCannotDiscard) {
|
| + coverage->setKnownSingleComponent(255);
|
| + } else {
|
| + coverage->setUnknownSingleComponent();
|
| + }
|
| +}
|
| +
|
| +void InstancedRendering::beginFlush(GrResourceProvider* rp) {
|
| + SkASSERT(State::kRecordingDraws == fState);
|
| + fState = State::kFlushing;
|
| +
|
| + if (fInstances.empty()) {
|
| + return;
|
| + }
|
| +
|
| + if (!fVertexBuffer) {
|
| + fVertexBuffer.reset(InstanceProcessor::FindOrCreateVertexBuffer(fGpu));
|
| + if (!fVertexBuffer) {
|
| + return;
|
| + }
|
| + }
|
| +
|
| + if (!fIndexBuffer) {
|
| + fIndexBuffer.reset(InstanceProcessor::FindOrCreateIndex8Buffer(fGpu));
|
| + if (!fIndexBuffer) {
|
| + return;
|
| + }
|
| + }
|
| +
|
| + if (!fParams.empty()) {
|
| + fParamsBuffer.reset(rp->createBuffer(fParams.count() * sizeof(ParamsTexel),
|
| + kTexel_GrBufferType, kDynamic_GrAccessPattern,
|
| + GrResourceProvider::kNoPendingIO_Flag,
|
| + fParams.begin()));
|
| + if (!fParamsBuffer) {
|
| + return;
|
| + }
|
| + }
|
| +
|
| + this->onBeginFlush(rp);
|
| +}
|
| +
|
| +void InstancedRendering::Batch::onDraw(GrBatchFlushState* state) {
|
| + SkASSERT(State::kFlushing == fInstancedRendering->fState);
|
| + SkASSERT(state->gpu() == fInstancedRendering->gpu());
|
| +
|
| + InstanceProcessor instProc(fInfo, fInstancedRendering->fParamsBuffer);
|
| + fInstancedRendering->onDraw(*this->pipeline(), instProc, this);
|
| +}
|
| +
|
| +void InstancedRendering::endFlush() {
|
| + // Caller is expected to delete all instanced batches before ending the flush.
|
| + SkASSERT(fBatchList.isEmpty());
|
| + fInstances.reset();
|
| + fParams.reset();
|
| + fParamsBuffer.reset();
|
| + this->onEndFlush();
|
| + fState = State::kRecordingDraws;
|
| + // Hold on to the shape coords and index buffers.
|
| +}
|
| +
|
| +void InstancedRendering::resetGpuResources(ResetType resetType) {
|
| + fVertexBuffer.reset();
|
| + fIndexBuffer.reset();
|
| + fParamsBuffer.reset();
|
| + this->onResetGpuResources(resetType);
|
| +}
|
| +
|
| +}
|
|
|