Index: src/gpu/batches/GrDrawPathBatch.cpp |
diff --git a/src/gpu/batches/GrDrawPathBatch.cpp b/src/gpu/batches/GrDrawPathBatch.cpp |
index faf2d961fd220231958cc251d3b12ef440c24324..a829378e54f15d9ed82a8b5e0c5af2db8d5b3a82 100644 |
--- a/src/gpu/batches/GrDrawPathBatch.cpp |
+++ b/src/gpu/batches/GrDrawPathBatch.cpp |
@@ -56,18 +56,15 @@ bool GrDrawPathRangeBatch::isWinding() const { |
} |
GrDrawPathRangeBatch::GrDrawPathRangeBatch(const SkMatrix& viewMatrix, const SkMatrix& localMatrix, |
- GrColor color, GrPathRange* range, GrPathRangeDraw* draw) |
+ GrColor color, GrPathRange* range, GrPathRangeDraw* draw, |
+ const SkRect& bounds) |
: INHERITED(ClassID(), viewMatrix, color) |
, fPathRange(range) |
, fLocalMatrix(localMatrix) { |
SkDEBUGCODE(draw->fUsedInBatch = true;) |
fDraws.addToHead(SkRef(draw)); |
fTotalPathCount = draw->count(); |
- // Don't compute a bounding box. For dst copy texture, we'll opt instead for it to just copy |
- // the entire dst. Realistically this is a moot point, because any context that supports |
- // NV_path_rendering will also support NV_blend_equation_advanced. |
- // For clipping we'll just skip any optimizations based on the bounds. |
- fBounds.setLargest(); |
+ fBounds = bounds; |
} |
bool GrDrawPathRangeBatch::onCombineIfPossible(GrBatch* t, const GrCaps& caps) { |