| OLD | NEW |
| (Empty) |
| 1 /* | |
| 2 * Copyright 2016 Google Inc. | |
| 3 * | |
| 4 * Use of this source code is governed by a BSD-style license that can be | |
| 5 * found in the LICENSE file. | |
| 6 */ | |
| 7 | |
| 8 #ifndef gr_instanced_InstancedRendering_DEFINED | |
| 9 #define gr_instanced_InstancedRendering_DEFINED | |
| 10 | |
| 11 #include "GrMemoryPool.h" | |
| 12 #include "SkTInternalLList.h" | |
| 13 #include "batches/GrDrawBatch.h" | |
| 14 #include "instanced/InstancedRenderingTypes.h" | |
| 15 #include "../private/GrInstancedPipelineInfo.h" | |
| 16 | |
| 17 class GrResourceProvider; | |
| 18 | |
| 19 namespace gr_instanced { | |
| 20 | |
| 21 class InstanceProcessor; | |
| 22 | |
| 23 /** | |
| 24 * This class serves as a centralized clearinghouse for instanced rendering. It
accumulates data for | |
| 25 * instanced draws into one location, and creates special batches that pull from
this data. The | |
| 26 * nature of instanced rendering allows these batches to combine well and render
efficiently. | |
| 27 * | |
| 28 * During a flush, this class assembles the accumulated draw data into a single
vertex and texel | |
| 29 * buffer, and its subclass draws the batches using backend-specific instanced r
endering APIs. | |
| 30 * | |
| 31 * This class is responsible for the CPU side of instanced rendering. Shaders ar
e implemented by | |
| 32 * InstanceProcessor. | |
| 33 */ | |
| 34 class InstancedRendering : public SkNoncopyable { | |
| 35 public: | |
| 36 virtual ~InstancedRendering() { SkASSERT(State::kRecordingDraws == fState);
} | |
| 37 | |
| 38 GrGpu* gpu() const { return fGpu; } | |
| 39 | |
| 40 /** | |
| 41 * These methods make a new record internally for an instanced draw, and ret
urn a batch that is | |
| 42 * effectively just an index to that record. The returned batch is not self-
contained, but | |
| 43 * rather relies on this class to handle the rendering. The client must call
beginFlush() on | |
| 44 * this class before attempting to flush batches returned by it. It is inval
id to record new | |
| 45 * draws between beginFlush() and endFlush(). | |
| 46 */ | |
| 47 GrDrawBatch* SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix&
, GrColor, | |
| 48 bool antialias, const GrInstan
cedPipelineInfo&, | |
| 49 bool* useHWAA); | |
| 50 | |
| 51 GrDrawBatch* SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix&
, GrColor, | |
| 52 const SkRect& localRect, bool
antialias, | |
| 53 const GrInstancedPipelineInfo&
, bool* useHWAA); | |
| 54 | |
| 55 GrDrawBatch* SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix&
, GrColor, | |
| 56 const SkMatrix& localMatrix, b
ool antialias, | |
| 57 const GrInstancedPipelineInfo&
, bool* useHWAA); | |
| 58 | |
| 59 GrDrawBatch* SK_WARN_UNUSED_RESULT recordOval(const SkRect&, const SkMatrix&
, GrColor, | |
| 60 bool antialias, const GrInstan
cedPipelineInfo&, | |
| 61 bool* useHWAA); | |
| 62 | |
| 63 GrDrawBatch* SK_WARN_UNUSED_RESULT recordRRect(const SkRRect&, const SkMatri
x&, GrColor, | |
| 64 bool antialias, const GrInsta
ncedPipelineInfo&, | |
| 65 bool* useHWAA); | |
| 66 | |
| 67 GrDrawBatch* SK_WARN_UNUSED_RESULT recordDRRect(const SkRRect& outer, const
SkRRect& inner, | |
| 68 const SkMatrix&, GrColor, bo
ol antialias, | |
| 69 const GrInstancedPipelineInf
o&, bool* useHWAA); | |
| 70 | |
| 71 /** | |
| 72 * Compiles all recorded draws into GPU buffers and allows the client to beg
in flushing the | |
| 73 * batches created by this class. | |
| 74 */ | |
| 75 void beginFlush(GrResourceProvider*); | |
| 76 | |
| 77 /** | |
| 78 * Called once the batches created previously by this class have all been re
leased. Allows the | |
| 79 * client to begin recording draws again. | |
| 80 */ | |
| 81 void endFlush(); | |
| 82 | |
| 83 enum class ResetType : bool { | |
| 84 kDestroy, | |
| 85 kAbandon | |
| 86 }; | |
| 87 | |
| 88 /** | |
| 89 * Resets all GPU resources, including those that are held long term. They w
ill be lazily | |
| 90 * reinitialized if the class begins to be used again. | |
| 91 */ | |
| 92 void resetGpuResources(ResetType); | |
| 93 | |
| 94 protected: | |
| 95 class Batch : public GrDrawBatch { | |
| 96 public: | |
| 97 SK_DECLARE_INTERNAL_LLIST_INTERFACE(Batch); | |
| 98 | |
| 99 ~Batch() override; | |
| 100 const char* name() const override { return "Instanced Batch"; } | |
| 101 | |
| 102 struct Draw { | |
| 103 Instance fInstance; | |
| 104 IndexRange fGeometry; | |
| 105 Draw* fNext; | |
| 106 }; | |
| 107 | |
| 108 Draw& getSingleDraw() const { SkASSERT(fHeadDraw && !fHeadDraw->fNext);
return *fHeadDraw; } | |
| 109 Instance& getSingleInstance() const { return this->getSingleDraw().fInst
ance; } | |
| 110 | |
| 111 void appendRRectParams(const SkRRect&); | |
| 112 void appendParamsTexel(const SkScalar* vals, int count); | |
| 113 void appendParamsTexel(SkScalar x, SkScalar y, SkScalar z, SkScalar w); | |
| 114 void appendParamsTexel(SkScalar x, SkScalar y, SkScalar z); | |
| 115 | |
| 116 protected: | |
| 117 Batch(uint32_t classID, InstancedRendering* ir); | |
| 118 | |
| 119 void initBatchTracker(const GrXPOverridesForBatch&) override; | |
| 120 bool onCombineIfPossible(GrBatch* other, const GrCaps& caps) override; | |
| 121 | |
| 122 void computePipelineOptimizations(GrInitInvariantOutput* color, | |
| 123 GrInitInvariantOutput* coverage, | |
| 124 GrBatchToXPOverrides*) const override; | |
| 125 | |
| 126 void onPrepare(GrBatchFlushState*) override {} | |
| 127 void onDraw(GrBatchFlushState*) override; | |
| 128 | |
| 129 InstancedRendering* const fInstancedRendering; | |
| 130 BatchInfo fInfo; | |
| 131 SkSTArray<5, ParamsTexel, true> fParams; | |
| 132 bool fIsTracked; | |
| 133 int fNumDraws; | |
| 134 int fNumChangesInGeometry; | |
| 135 Draw* fHeadDraw; | |
| 136 Draw* fTailDraw; | |
| 137 | |
| 138 typedef GrDrawBatch INHERITED; | |
| 139 | |
| 140 friend class InstancedRendering; | |
| 141 }; | |
| 142 | |
| 143 typedef SkTInternalLList<Batch> BatchList; | |
| 144 | |
| 145 InstancedRendering(GrGpu* gpu, AntialiasMode lastSupportedAAMode, bool canRe
nderToFloat); | |
| 146 | |
| 147 const BatchList& trackedBatches() const { return fTrackedBatches; } | |
| 148 const GrBuffer* vertexBuffer() const { SkASSERT(fVertexBuffer); return fVert
exBuffer; } | |
| 149 const GrBuffer* indexBuffer() const { SkASSERT(fIndexBuffer); return fIndexB
uffer; } | |
| 150 | |
| 151 virtual void onBeginFlush(GrResourceProvider*) = 0; | |
| 152 virtual void onDraw(const GrPipeline&, const InstanceProcessor&, const Batch
*) = 0; | |
| 153 virtual void onEndFlush() = 0; | |
| 154 virtual void onResetGpuResources(ResetType) = 0; | |
| 155 | |
| 156 private: | |
| 157 enum class State : bool { | |
| 158 kRecordingDraws, | |
| 159 kFlushing | |
| 160 }; | |
| 161 | |
| 162 Batch* SK_WARN_UNUSED_RESULT recordShape(ShapeType, const SkRect& bounds, | |
| 163 const SkMatrix& viewMatrix, GrColor
, | |
| 164 const SkRect& localRect, bool antia
lias, | |
| 165 const GrInstancedPipelineInfo&, boo
l* requireHWAA); | |
| 166 | |
| 167 bool selectAntialiasMode(const SkMatrix& viewMatrix, bool antialias, | |
| 168 const GrInstancedPipelineInfo&, bool* useHWAA, Anti
aliasMode*); | |
| 169 | |
| 170 virtual Batch* createBatch() = 0; | |
| 171 | |
| 172 const SkAutoTUnref<GrGpu> fGpu; | |
| 173 const AntialiasMode fLastSupportedAAMode; | |
| 174 const bool fCanRenderToFloat; | |
| 175 State fState; | |
| 176 GrMemoryPool fDrawPool; | |
| 177 SkSTArray<1024, ParamsTexel, true> fParams; | |
| 178 BatchList fTrackedBatches; | |
| 179 SkAutoTUnref<const GrBuffer> fVertexBuffer; | |
| 180 SkAutoTUnref<const GrBuffer> fIndexBuffer; | |
| 181 SkAutoTUnref<GrBuffer> fParamsBuffer; | |
| 182 }; | |
| 183 | |
| 184 } | |
| 185 | |
| 186 #endif | |
| OLD | NEW |