| OLD | NEW | 
|---|
| 1 /* | 1 /* | 
| 2  * Copyright 2015 Google Inc. | 2  * Copyright 2015 Google Inc. | 
| 3  * | 3  * | 
| 4  * Use of this source code is governed by a BSD-style license that can be | 4  * Use of this source code is governed by a BSD-style license that can be | 
| 5  * found in the LICENSE file. | 5  * found in the LICENSE file. | 
| 6  */ | 6  */ | 
| 7 | 7 | 
| 8 #ifndef GrBatch_DEFINED | 8 #ifndef GrBatch_DEFINED | 
| 9 #define GrBatch_DEFINED | 9 #define GrBatch_DEFINED | 
| 10 | 10 | 
| 11 #include <new> | 11 #include <new> | 
| 12 #include "GrNonAtomicRef.h" | 12 #include "GrNonAtomicRef.h" | 
| 13 | 13 | 
| 14 #include "SkRect.h" | 14 #include "SkRect.h" | 
| 15 #include "SkString.h" | 15 #include "SkString.h" | 
| 16 | 16 | 
| 17 class GrCaps; | 17 class GrCaps; | 
| 18 class GrBatchFlushState; | 18 class GrBatchFlushState; | 
| 19 class GrRenderTarget; |  | 
| 20 | 19 | 
| 21 /** | 20 /** | 
| 22  * GrBatch is the base class for all Ganesh deferred geometry generators.  To fa
     cilitate | 21  * GrBatch is the base class for all Ganesh deferred geometry generators.  To fa
     cilitate | 
| 23  * reorderable batching, Ganesh does not generate geometry inline with draw call
     s.  Instead, it | 22  * reorderable batching, Ganesh does not generate geometry inline with draw call
     s.  Instead, it | 
| 24  * captures the arguments to the draw and then generates the geometry on demand.
       This gives GrBatch | 23  * captures the arguments to the draw and then generates the geometry on demand.
       This gives GrBatch | 
| 25  * subclasses complete freedom to decide how / what they can batch. | 24  * subclasses complete freedom to decide how / what they can batch. | 
| 26  * | 25  * | 
| 27  * Batches are created when GrContext processes a draw call. Batches of the same
       subclass may be | 26  * Batches are created when GrContext processes a draw call. Batches of the same
       subclass may be | 
| 28  * merged using combineIfPossible. When two batches merge, one takes on the unio
     n of the data | 27  * merged using combineIfPossible. When two batches merge, one takes on the unio
     n of the data | 
| 29  * and the other is left empty. The merged batch becomes responsible for drawing
      the data from both | 28  * and the other is left empty. The merged batch becomes responsible for drawing
      the data from both | 
| (...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 107     /** Issues the batches commands to GrGpu. */ | 106     /** Issues the batches commands to GrGpu. */ | 
| 108     void draw(GrBatchFlushState* state) { this->onDraw(state); } | 107     void draw(GrBatchFlushState* state) { this->onDraw(state); } | 
| 109 | 108 | 
| 110     /** Used to block batching across render target changes. Remove this once we
      store | 109     /** Used to block batching across render target changes. Remove this once we
      store | 
| 111         GrBatches for different RTs in different targets. */ | 110         GrBatches for different RTs in different targets. */ | 
| 112     virtual uint32_t renderTargetUniqueID() const = 0; | 111     virtual uint32_t renderTargetUniqueID() const = 0; | 
| 113 | 112 | 
| 114     /** Used for spewing information about batches when debugging. */ | 113     /** Used for spewing information about batches when debugging. */ | 
| 115     virtual SkString dumpInfo() const = 0; | 114     virtual SkString dumpInfo() const = 0; | 
| 116 | 115 | 
| 117     /** Can remove this when multi-draw-buffer lands */ |  | 
| 118     virtual GrRenderTarget* renderTarget() const = 0; |  | 
| 119 |  | 
| 120 protected: | 116 protected: | 
| 121     // NOTE, compute some bounds, even if extremely conservative.  Do *NOT* setL
     argest on the bounds | 117     // NOTE, compute some bounds, even if extremely conservative.  Do *NOT* setL
     argest on the bounds | 
| 122     // rect because we outset it for dst copy textures | 118     // rect because we outset it for dst copy textures | 
| 123     void setBounds(const SkRect& newBounds) { fBounds = newBounds; } | 119     void setBounds(const SkRect& newBounds) { fBounds = newBounds; } | 
| 124 | 120 | 
| 125     void joinBounds(const SkRect& otherBounds) { | 121     void joinBounds(const SkRect& otherBounds) { | 
| 126         return fBounds.joinPossiblyEmptyRect(otherBounds); | 122         return fBounds.joinPossiblyEmptyRect(otherBounds); | 
| 127     } | 123     } | 
| 128 | 124 | 
| 129     static uint32_t GenBatchClassID() { return GenID(&gCurrBatchClassID); } | 125     static uint32_t GenBatchClassID() { return GenID(&gCurrBatchClassID); } | 
| (...skipping 26 matching lines...) Expand all  Loading... | 
| 156 #if GR_BATCH_SPEW | 152 #if GR_BATCH_SPEW | 
| 157     static uint32_t GenBatchID() { return GenID(&gCurrBatchUniqueID); } | 153     static uint32_t GenBatchID() { return GenID(&gCurrBatchUniqueID); } | 
| 158     const uint32_t                      fUniqueID; | 154     const uint32_t                      fUniqueID; | 
| 159     static int32_t                      gCurrBatchUniqueID; | 155     static int32_t                      gCurrBatchUniqueID; | 
| 160 #endif | 156 #endif | 
| 161     static int32_t                      gCurrBatchClassID; | 157     static int32_t                      gCurrBatchClassID; | 
| 162     typedef GrNonAtomicRef INHERITED; | 158     typedef GrNonAtomicRef INHERITED; | 
| 163 }; | 159 }; | 
| 164 | 160 | 
| 165 #endif | 161 #endif | 
| OLD | NEW | 
|---|