Index: src/gpu/GrBatch.h |
diff --git a/src/gpu/GrBatch.h b/src/gpu/GrBatch.h |
index 92a9c08ef2d736c52d1fd365f93738b19b850c33..7b5c888a7058e9a6eb9132bccf8533601d6a8701 100644 |
--- a/src/gpu/GrBatch.h |
+++ b/src/gpu/GrBatch.h |
@@ -38,7 +38,6 @@ struct GrInitInvariantOutput; |
* If there are any possible optimizations which might require knowing more about the full state of |
* the draw, ie whether or not the GrBatch is allowed to tweak alpha for coverage, then this |
* information will be communicated to the GrBatch prior to geometry generation. |
- * TODO Batch should own the draw bounds |
*/ |
class GrBatch : public SkRefCnt { |
@@ -62,13 +61,15 @@ public: |
return false; |
} |
- return onCombineIfPossible(that); |
+ return this->onCombineIfPossible(that); |
} |
virtual bool onCombineIfPossible(GrBatch*) = 0; |
virtual void generateGeometry(GrBatchTarget*, const GrPipeline*) = 0; |
+ const SkRect& bounds() const { return fBounds; } |
+ |
// TODO this goes away when batches are everywhere |
void setNumberOfDraws(int numberOfDraws) { fNumberOfDraws = numberOfDraws; } |
int numberOfDraws() const { return fNumberOfDraws; } |
@@ -104,6 +105,16 @@ protected: |
uint32_t fClassID; |
+ // NOTE, compute some bounds, even if extremely conservative. Do *NOT* setLargest on the bounds |
+ // rect because we outset it for dst copy textures |
+ void setBounds(const SkRect& newBounds) { fBounds = newBounds; } |
+ |
+ void joinBounds(const SkRect& otherBounds) { |
+ return fBounds.joinPossiblyEmptyRect(otherBounds); |
+ } |
+ |
+ SkRect fBounds; |
+ |
private: |
static uint32_t GenClassID() { |
// fCurrProcessorClassID has been initialized to kIllegalProcessorClassID. The |