Chromium Code Reviews| Index: src/gpu/GrAAConvexPathRenderer.cpp |
| diff --git a/src/gpu/GrAAConvexPathRenderer.cpp b/src/gpu/GrAAConvexPathRenderer.cpp |
| index b3010767e3bf4d9d433000572b62ef087a90876c..96805c3f3da9cc234a200a7690c5403f3eef33ee 100644 |
| --- a/src/gpu/GrAAConvexPathRenderer.cpp |
| +++ b/src/gpu/GrAAConvexPathRenderer.cpp |
| @@ -8,6 +8,9 @@ |
| #include "GrAAConvexPathRenderer.h" |
| +#include "GrBatch.h" |
| +#include "GrBatchTarget.h" |
| +#include "GrBufferAllocPool.h" |
| #include "GrContext.h" |
| #include "GrDrawTargetCaps.h" |
| #include "GrGeometryProcessor.h" |
| @@ -222,12 +225,10 @@ static inline bool get_direction(const SkPath& path, const SkMatrix& m, SkPath:: |
| } |
| static inline void add_line_to_segment(const SkPoint& pt, |
| - SegmentArray* segments, |
| - SkRect* devBounds) { |
| + SegmentArray* segments) { |
| segments->push_back(); |
| segments->back().fType = Segment::kLine; |
| segments->back().fPts[0] = pt; |
| - devBounds->growToInclude(pt.fX, pt.fY); |
| } |
| #ifdef SK_DEBUG |
| @@ -237,31 +238,27 @@ static inline bool contains_inclusive(const SkRect& rect, const SkPoint& p) { |
| #endif |
| static inline void add_quad_segment(const SkPoint pts[3], |
| - SegmentArray* segments, |
| - SkRect* devBounds) { |
| + SegmentArray* segments) { |
| if (pts[0].distanceToSqd(pts[1]) < kCloseSqd || pts[1].distanceToSqd(pts[2]) < kCloseSqd) { |
| if (pts[0] != pts[2]) { |
| - add_line_to_segment(pts[2], segments, devBounds); |
| + add_line_to_segment(pts[2], segments); |
| } |
| } else { |
| segments->push_back(); |
| segments->back().fType = Segment::kQuad; |
| segments->back().fPts[0] = pts[1]; |
| segments->back().fPts[1] = pts[2]; |
| - SkASSERT(contains_inclusive(*devBounds, pts[0])); |
| - devBounds->growToInclude(pts + 1, 2); |
| } |
| } |
| static inline void add_cubic_segments(const SkPoint pts[4], |
| SkPath::Direction dir, |
| - SegmentArray* segments, |
| - SkRect* devBounds) { |
| + SegmentArray* segments) { |
| SkSTArray<15, SkPoint, true> quads; |
| GrPathUtils::convertCubicToQuads(pts, SK_Scalar1, true, dir, &quads); |
| int count = quads.count(); |
| for (int q = 0; q < count; q += 3) { |
| - add_quad_segment(&quads[q], segments, devBounds); |
| + add_quad_segment(&quads[q], segments); |
| } |
| } |
| @@ -270,8 +267,7 @@ static bool get_segments(const SkPath& path, |
| SegmentArray* segments, |
| SkPoint* fanPt, |
| int* vCount, |
| - int* iCount, |
| - SkRect* devBounds) { |
| + int* iCount) { |
| SkPath::Iter iter(path, true); |
| // This renderer over-emphasizes very thin path regions. We use the distance |
| // to the path from the sample to compute coverage. Every pixel intersected |
| @@ -294,19 +290,18 @@ static bool get_segments(const SkPath& path, |
| case SkPath::kMove_Verb: |
| m.mapPoints(pts, 1); |
| update_degenerate_test(°enerateData, pts[0]); |
| - devBounds->set(pts->fX, pts->fY, pts->fX, pts->fY); |
| break; |
| case SkPath::kLine_Verb: { |
| m.mapPoints(&pts[1], 1); |
| update_degenerate_test(°enerateData, pts[1]); |
| - add_line_to_segment(pts[1], segments, devBounds); |
| + add_line_to_segment(pts[1], segments); |
| break; |
| } |
| case SkPath::kQuad_Verb: |
| m.mapPoints(pts, 3); |
| update_degenerate_test(°enerateData, pts[1]); |
| update_degenerate_test(°enerateData, pts[2]); |
| - add_quad_segment(pts, segments, devBounds); |
| + add_quad_segment(pts, segments); |
| break; |
| case SkPath::kConic_Verb: { |
| m.mapPoints(pts, 3); |
| @@ -316,7 +311,7 @@ static bool get_segments(const SkPath& path, |
| for (int i = 0; i < converter.countQuads(); ++i) { |
| update_degenerate_test(°enerateData, quadPts[2*i + 1]); |
| update_degenerate_test(°enerateData, quadPts[2*i + 2]); |
| - add_quad_segment(quadPts + 2*i, segments, devBounds); |
| + add_quad_segment(quadPts + 2*i, segments); |
| } |
| break; |
| } |
| @@ -325,7 +320,7 @@ static bool get_segments(const SkPath& path, |
| update_degenerate_test(°enerateData, pts[1]); |
| update_degenerate_test(°enerateData, pts[2]); |
| update_degenerate_test(°enerateData, pts[3]); |
| - add_cubic_segments(pts, dir, segments, devBounds); |
| + add_cubic_segments(pts, dir, segments); |
| break; |
| }; |
| case SkPath::kDone_Verb: |
| @@ -703,95 +698,214 @@ bool GrAAConvexPathRenderer::canDrawPath(const GrDrawTarget* target, |
| stroke.isFillStyle() && !path.isInverseFillType() && path.isConvex()); |
| } |
| -bool GrAAConvexPathRenderer::onDrawPath(GrDrawTarget* target, |
| - GrPipelineBuilder* pipelineBuilder, |
| - GrColor color, |
| - const SkMatrix& vm, |
| - const SkPath& origPath, |
| - const SkStrokeRec&, |
| - bool antiAlias) { |
| +class AAConvexPathBatch : public GrBatch { |
| +public: |
| + struct Geometry { |
| + GrColor fColor; |
| + SkMatrix fViewMatrix; |
| + SkPath fPath; |
| + SkDEBUGCODE(SkRect fDevBounds;) |
| + }; |
| - const SkPath* path = &origPath; |
| - if (path->isEmpty()) { |
| - return true; |
| + static GrBatch* Create(const Geometry& geometry) { |
| + return SkNEW_ARGS(AAConvexPathBatch, (geometry)); |
| } |
| - SkMatrix viewMatrix = vm; |
| - SkMatrix invert; |
| - if (!viewMatrix.invert(&invert)) { |
| - return false; |
| - } |
| + const char* name() const SK_OVERRIDE { return "AAConvexBatch"; } |
| - // We use the fact that SkPath::transform path does subdivision based on |
| - // perspective. Otherwise, we apply the view matrix when copying to the |
| - // segment representation. |
| - SkPath tmpPath; |
| - if (viewMatrix.hasPerspective()) { |
| - origPath.transform(viewMatrix, &tmpPath); |
| - path = &tmpPath; |
| - viewMatrix = SkMatrix::I(); |
| + void getInvariantOutputColor(GrInitInvariantOutput* out) const SK_OVERRIDE { |
| + // When this is called on a batch, there is only one geometry bundle |
| + out->setKnownFourComponents(fGeoData[0].fColor); |
| + } |
| + void getInvariantOutputCoverage(GrInitInvariantOutput* out) const SK_OVERRIDE { |
| + out->setUnknownSingleComponent(); |
| } |
| - QuadVertex *verts; |
| - uint16_t* idxs; |
| + void initBatchOpt(const GrBatchOpt& batchOpt) { |
| + fBatchOpt = batchOpt; |
| + } |
| - int vCount; |
| - int iCount; |
| - enum { |
| - kPreallocSegmentCnt = 512 / sizeof(Segment), |
| - kPreallocDrawCnt = 4, |
| - }; |
| - SkSTArray<kPreallocSegmentCnt, Segment, true> segments; |
| - SkPoint fanPt; |
| + void initBatchTracker(const GrPipelineInfo& init) SK_OVERRIDE { |
| + // Handle any color overrides |
| + if (init.fColorIgnored) { |
| + fGeoData[0].fColor = GrColor_ILLEGAL; |
| + } else if (GrColor_ILLEGAL != init.fOverrideColor) { |
| + fGeoData[0].fColor = init.fOverrideColor; |
| + } |
| - // We can't simply use the path bounds because we may degenerate cubics to quads which produces |
| - // new control points outside the original convex hull. |
| - SkRect devBounds; |
| - if (!get_segments(*path, viewMatrix, &segments, &fanPt, &vCount, &iCount, &devBounds)) { |
| - return false; |
| + // setup batch properties |
| + fBatch.fColorIgnored = init.fColorIgnored; |
| + fBatch.fColor = fGeoData[0].fColor; |
| + fBatch.fUsesLocalCoords = init.fUsesLocalCoords; |
| + fBatch.fCoverageIgnored = init.fCoverageIgnored; |
| } |
| - // Our computed verts should all be within one pixel of the segment control points. |
| - devBounds.outset(SK_Scalar1, SK_Scalar1); |
| + void generateGeometry(GrBatchTarget* batchTarget, const GrPipeline* pipeline) SK_OVERRIDE { |
| + int instanceCount = fGeoData.count(); |
| + for (int i = 0; i < instanceCount; i++) { |
| + Geometry& args = fGeoData[i]; |
| - SkAutoTUnref<GrGeometryProcessor> quadProcessor(QuadEdgeEffect::Create(color, invert)); |
| + const SkMatrix* viewMatrix = &args.fViewMatrix; |
| + SkMatrix invert; |
| + if (!viewMatrix->invert(&invert)) { |
| + continue; |
| + } |
| - GrDrawTarget::AutoReleaseGeometry arg(target, vCount, quadProcessor->getVertexStride(), iCount); |
| - SkASSERT(quadProcessor->getVertexStride() == sizeof(QuadVertex)); |
| - if (!arg.succeeded()) { |
| - return false; |
| - } |
| - verts = reinterpret_cast<QuadVertex*>(arg.vertices()); |
| - idxs = reinterpret_cast<uint16_t*>(arg.indices()); |
| + // We use the fact that SkPath::transform path does subdivision based on |
| + // perspective. Otherwise, we apply the view matrix when copying to the |
| + // segment representation. |
| + if (viewMatrix->hasPerspective()) { |
| + args.fPath.transform(*viewMatrix); |
| + viewMatrix = &SkMatrix::I(); |
| + } |
| + |
| + int vertexCount; |
| + int indexCount; |
| + enum { |
| + kPreallocSegmentCnt = 512 / sizeof(Segment), |
| + kPreallocDrawCnt = 4, |
| + }; |
| + SkSTArray<kPreallocSegmentCnt, Segment, true> segments; |
| + SkPoint fanPt; |
| + |
| + if (!get_segments(args.fPath, *viewMatrix, &segments, &fanPt, &vertexCount, |
| + &indexCount)) { |
| + continue; |
| + } |
| - SkSTArray<kPreallocDrawCnt, Draw, true> draws; |
| - create_vertices(segments, fanPt, &draws, verts, idxs); |
| + SkAutoTUnref<GrGeometryProcessor> quadProcessor(QuadEdgeEffect::Create(args.fColor, |
| + invert)); |
| + |
| + batchTarget->initDraw(quadProcessor, pipeline); |
| + fBatchesGenerated++; |
| + |
| + // TODO remove this when batch is everywhere |
| + GrPipelineInfo init; |
| + init.fColorIgnored = fBatch.fColorIgnored; |
| + init.fOverrideColor = GrColor_ILLEGAL; |
| + init.fCoverageIgnored = fBatch.fCoverageIgnored; |
| + init.fUsesLocalCoords = this->usesLocalCoords(); |
| + quadProcessor->initBatchTracker(batchTarget->currentBatchTracker(), init); |
| + |
| + const GrVertexBuffer* vertexBuffer; |
| + int firstVertex; |
| + |
| + size_t vertexStride = quadProcessor->getVertexStride(); |
| + void *vertices = batchTarget->vertexPool()->makeSpace(vertexStride, |
| + vertexCount, |
| + &vertexBuffer, |
| + &firstVertex); |
| + |
| + const GrIndexBuffer* indexBuffer; |
| + int firstIndex; |
| + |
| + void *indices = batchTarget->indexPool()->makeSpace(indexCount, |
| + &indexBuffer, |
| + &firstIndex); |
| + |
| + QuadVertex* verts = reinterpret_cast<QuadVertex*>(vertices); |
| + uint16_t* idxs = reinterpret_cast<uint16_t*>(indices); |
| + |
| + SkSTArray<kPreallocDrawCnt, Draw, true> draws; |
| + create_vertices(segments, fanPt, &draws, verts, idxs); |
| - // Check devBounds |
| #ifdef SK_DEBUG |
| - SkRect tolDevBounds = devBounds; |
| - tolDevBounds.outset(SK_Scalar1 / 10000, SK_Scalar1 / 10000); |
| - SkRect actualBounds; |
| - actualBounds.set(verts[0].fPos, verts[1].fPos); |
| - for (int i = 2; i < vCount; ++i) { |
| - actualBounds.growToInclude(verts[i].fPos.fX, verts[i].fPos.fY); |
| - } |
| - SkASSERT(tolDevBounds.contains(actualBounds)); |
| + // Check devBounds |
| + SkRect tolDevBounds = args.fDevBounds; |
| + tolDevBounds.outset(SK_Scalar1 / 10000, SK_Scalar1 / 10000); |
| + SkRect actualBounds; |
| + actualBounds.set(verts[0].fPos, verts[1].fPos); |
| + for (int i = 2; i < vertexCount; ++i) { |
| + actualBounds.growToInclude(verts[i].fPos.fX, verts[i].fPos.fY); |
| + } |
| + SkASSERT(tolDevBounds.contains(actualBounds)); |
| #endif |
| - int vOffset = 0; |
| - for (int i = 0; i < draws.count(); ++i) { |
| - const Draw& draw = draws[i]; |
| - target->drawIndexed(pipelineBuilder, |
| - quadProcessor, |
| - kTriangles_GrPrimitiveType, |
| - vOffset, // start vertex |
| - 0, // start index |
| - draw.fVertexCnt, |
| - draw.fIndexCnt, |
| - &devBounds); |
| - vOffset += draw.fVertexCnt; |
| + GrDrawTarget::DrawInfo info; |
| + info.setVertexBuffer(vertexBuffer); |
| + info.setIndexBuffer(indexBuffer); |
| + info.setPrimitiveType(kTriangles_GrPrimitiveType); |
| + info.setStartIndex(firstIndex); |
| + |
| + int vOffset = 0; |
| + for (int i = 0; i < draws.count(); ++i) { |
| + const Draw& draw = draws[i]; |
| + info.setStartVertex(vOffset + firstVertex); |
| + info.setVertexCount(draw.fVertexCnt); |
| + info.setIndexCount(draw.fIndexCnt); |
| + batchTarget->draw(info); |
| + vOffset += draw.fVertexCnt; |
| + } |
| + } |
| + } |
| + |
| + SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; } |
| + |
| +private: |
| + AAConvexPathBatch(const Geometry& geometry) { |
| + this->initClassID<AAConvexPathBatch>(); |
| + fGeoData.push_back(geometry); |
| + } |
| + |
| + bool onCombineIfPossible(GrBatch* t) SK_OVERRIDE { |
| + AAConvexPathBatch* that = t->cast<AAConvexPathBatch>(); |
| + |
| + // TODO we can 'batch' with color because we draw each convex path separately |
| + if (this->color() != that->color()) { |
| + return false; |
| + } |
| + |
| + SkASSERT(this->usesLocalCoords() == that->usesLocalCoords()); |
| + if (this->usesLocalCoords() && !this->viewMatrix().cheapEqualTo(that->viewMatrix())) { |
| + return false; |
| + } |
| + |
| + fGeoData.push_back_n(that->geoData()->count(), that->geoData()->begin()); |
| + return true; |
| + } |
| + |
| + GrColor color() const { return fBatch.fColor; } |
| + bool usesLocalCoords() const { return fBatch.fUsesLocalCoords; } |
| + const SkMatrix& viewMatrix() const { return fGeoData[0].fViewMatrix; } |
| + |
| + struct BatchTracker { |
| + GrColor fColor; |
| + bool fUsesLocalCoords; |
| + bool fColorIgnored; |
| + bool fCoverageIgnored; |
| + }; |
| + |
| + GrBatchOpt fBatchOpt; |
| + BatchTracker fBatch; |
| + SkSTArray<1, Geometry, true> fGeoData; |
| +}; |
| + |
| +bool GrAAConvexPathRenderer::onDrawPath(GrDrawTarget* target, |
| + GrPipelineBuilder* pipelineBuilder, |
| + GrColor color, |
| + const SkMatrix& vm, |
| + const SkPath& path, |
| + const SkStrokeRec&, |
| + bool antiAlias) { |
| + if (path.isEmpty()) { |
| + return true; |
| } |
| + // This outset was determined experimentally by running skps and gms. It probably could be a |
|
bsalomon
2015/02/03 18:09:41
yikes!!! That seems really dangerous. I think we n
|
| + // bit tighter |
| + SkRect devRect = path.getBounds(); |
| + devRect.outset(7, 7); |
| + vm.mapRect(&devRect); |
| + |
| + AAConvexPathBatch::Geometry geometry; |
| + geometry.fColor = color; |
| + geometry.fViewMatrix = vm; |
| + geometry.fPath = path; |
| + SkDEBUGCODE(geometry.fDevBounds = devRect;) |
| + |
| + GrBatch* batch = AAConvexPathBatch::Create(geometry); |
| + target->drawBatch(pipelineBuilder, batch, &devRect); |
| + |
| return true; |
| + |
| } |