Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(89)

Side by Side Diff: src/gpu/GrBatch.h

Issue 845103005: GrBatchPrototype (Closed) Base URL: https://skia.googlesource.com/skia.git@lc2
Patch Set: removing dstread Created 5 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/gpu/GrAARectRenderer.cpp ('k') | src/gpu/GrBatch.cpp » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #ifndef GrBatch_DEFINED
9 #define GrBatch_DEFINED
10
11 #include <new>
12 // TODO remove this header when we move entirely to batch
13 #include "GrGeometryProcessor.h"
14 #include "SkRefCnt.h"
15 #include "SkThread.h"
16 #include "SkTypes.h"
17
18 class GrBatchTarget;
19 class GrGpu;
20 class GrIndexBufferAllocPool;
21 class GrPipeline;
22 class GrVertexBufferAllocPool;
23
24 struct GrInitInvariantOutput;
25
26 /*
27 * GrBatch is the base class for all Ganesh deferred geometry generators. To fa cilitate
28 * reorderable batching, Ganesh does not generate geometry inline with draw call s. Instead, it
29 * captures the arguments to the draw and then generates the geometry on demand. This gives GrBatch
30 * subclasses complete freedom to decide how / what they can batch.
31 *
32 * Batches are created when GrContext processes a draw call. Batches of the same subclass may be
33 * merged using combineIfPossible. When two batches merge, one takes on the unio n of the data
34 * and the other is left empty. The merged batch becomes responsible for drawing the data from both
35 * the original batches.
36 *
37 * If there are any possible optimizations which might require knowing more abou t the full state of
38 * the draw, ie whether or not the GrBatch is allowed to tweak alpha for coverag e, then this
39 * information will be communicated to the GrBatch prior to geometry generation.
40 */
41
42 struct GrBatchOpt {
43 bool fCanTweakAlphaForCoverage;
44 };
45
46 class GrBatch : public SkRefCnt {
47 public:
48 SK_DECLARE_INST_COUNT(GrBatch)
49 GrBatch() { SkDEBUGCODE(fUsed = false;) }
50 virtual ~GrBatch() {}
51
52 virtual const char* name() const = 0;
53 virtual void getInvariantOutputColor(GrInitInvariantOutput* out) const = 0;
54 virtual void getInvariantOutputCoverage(GrInitInvariantOutput* out) const = 0;
55
56 /*
57 * initBatchOpt is used to communicate possible optimizations to the GrBatch . initBatchTracker
58 * is a hook for the some additional overrides from the GrXferProcessor. Th is is a bit
59 * confusing but has to be like this until GrBatch is everywhere.
60 *
61 * TODO combine to a single init call when GrBatch is everywhere.
62 */
63 virtual void initBatchOpt(const GrBatchOpt&) = 0;
64 virtual void initBatchTracker(const GrPipelineInfo& init) = 0;
65
66 bool combineIfPossible(GrBatch* that) {
67 if (this->classID() != that->classID()) {
68 return false;
69 }
70
71 return onCombineIfPossible(that);
72 }
73
74 virtual bool onCombineIfPossible(GrBatch*) = 0;
75
76 virtual void generateGeometry(GrBatchTarget*, const GrPipeline*) = 0;
77
78 void* operator new(size_t size);
79 void operator delete(void* target);
80
81 void* operator new(size_t size, void* placement) {
82 return ::operator new(size, placement);
83 }
84 void operator delete(void* target, void* placement) {
85 ::operator delete(target, placement);
86 }
87
88 /**
89 * Helper for down-casting to a GrBatch subclass
90 */
91 template <typename T> const T& cast() const { return *static_cast<const T*>( this); }
92 template <typename T> T* cast() { return static_cast<T*>(this); }
93
94 uint32_t classID() const { SkASSERT(kIllegalBatchClassID != fClassID); retur n fClassID; }
95
96 // TODO no GrPrimitiveProcessors yet read fragment position
97 bool willReadFragmentPosition() const { return false; }
98
99 SkDEBUGCODE(bool isUsed() const { return fUsed; })
100
101 protected:
102 template <typename PROC_SUBCLASS> void initClassID() {
103 static uint32_t kClassID = GenClassID();
104 fClassID = kClassID;
105 }
106
107 uint32_t fClassID;
108
109 private:
110 static uint32_t GenClassID() {
111 // fCurrProcessorClassID has been initialized to kIllegalProcessorClassI D. The
112 // atomic inc returns the old value not the incremented value. So we add
113 // 1 to the returned value.
114 uint32_t id = static_cast<uint32_t>(sk_atomic_inc(&gCurrBatchClassID)) + 1;
115 if (!id) {
116 SkFAIL("This should never wrap as it should only be called once for each GrBatch "
117 "subclass.");
118 }
119 return id;
120 }
121
122 enum {
123 kIllegalBatchClassID = 0,
124 };
125 static int32_t gCurrBatchClassID;
126
127 SkDEBUGCODE(bool fUsed;)
128
129 typedef SkRefCnt INHERITED;
130 };
131
132 #endif
OLDNEW
« no previous file with comments | « src/gpu/GrAARectRenderer.cpp ('k') | src/gpu/GrBatch.cpp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698