Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(154)

Side by Side Diff: src/gpu/GrBatch.h

Issue 845103005: GrBatchPrototype (Closed) Base URL: https://skia.googlesource.com/skia.git@lc2
Patch Set: whoops missed the comment Created 5 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/gpu/GrAARectRenderer.cpp ('k') | src/gpu/GrBatch.cpp » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #ifndef GrBatch_DEFINED
9 #define GrBatch_DEFINED
10
11 #include <new>
12 // TODO remove this header when we move entirely to batch
13 #include "GrGeometryProcessor.h"
14 #include "SkRefCnt.h"
15 #include "SkThread.h"
16 #include "SkTypes.h"
17
18 class GrBatchTarget;
19 class GrGpu;
20 class GrIndexBufferAllocPool;
21 class GrInitInvariantOutput;
22 class GrOptDrawState;
23 class GrVertexBufferAllocPool;
24
25 /*
26 * GrBatch is the base class for all Ganesh deferred geometry generators. To fa cilitate
27 * reorderable batching, Ganesh does not generate geometry inline with draw call s. Instead, it
28 * captures the arguments to the draw and then generates the geometry on demand. This gives GrBatch
29 * subclasses complete freedom to decide how / what they can batch.
30 *
31 * Batches are created when GrContext processes a draw call. Batches of the same subclass may be
32 * merged using combineIfPossible. When two batches merge, one takes on the unio n of the data
33 * and the other is left empty. The merged batch becomes responsible for drawing the data from both
34 * the original batches.
35 *
36 * If there are any possible optimizations which might require knowing more abou t the full state of
37 * the draw, ie whether or not the GrBatch is allowed to tweak alpha for coverag e, then this
38 * information will be communicated to the GrBatch prior to geometry generation.
39 */
40
41 struct GrBatchOpt {
42 bool fCanTweakAlphaForCoverage;
43 };
44
45 class GrBatch : public SkRefCnt {
46 public:
47 SK_DECLARE_INST_COUNT(GrBatch)
48 GrBatch() { SkDEBUGCODE(fUsed = false;) }
49 virtual ~GrBatch() {}
50
51 virtual const char* name() const = 0;
52 virtual void getInvariantOutputColor(GrInitInvariantOutput* out,
53 const GrBatchOpt&) const = 0;
54 virtual void getInvariantOutputCoverage(GrInitInvariantOutput* out,
55 const GrBatchOpt&) const = 0;
56
57 /*
58 * initBatchOpt is used to communicate possible optimizations to the GrBatch . initBatchTracker
59 * is a hook for the some additional overrides from the GrXferProcessor. Th is is a bit
bsalomon 2015/01/22 21:17:17 It still feels like InitBT should be renamed and/o
60 * confusing but has to be like this until GrBatch is everywhere.
61 *
62 * TODO combine to a single init call when GrBatch is everywhere.
63 */
64 virtual void initBatchOpt(const GrBatchOpt&) = 0;
65 virtual void initBatchTracker(const GrGeometryProcessor::InitBT& init) = 0;
66
67 bool combineIfPossible(GrBatch* that) {
68 if (this->classID() != that->classID()) {
69 return false;
70 }
71
72 return onCombineIfPossible(that);
73 }
74
75 virtual bool onCombineIfPossible(GrBatch*) = 0;
76
77 virtual void generateGeometry(GrBatchTarget*, const GrOptDrawState*) = 0;
78
79 void* operator new(size_t size);
80 void operator delete(void* target);
81
82 void* operator new(size_t size, void* placement) {
83 return ::operator new(size, placement);
84 }
85 void operator delete(void* target, void* placement) {
86 ::operator delete(target, placement);
87 }
88
89 /**
90 * Helper for down-casting to a GrBatch subclass
91 */
92 template <typename T> const T& cast() const { return *static_cast<const T*>( this); }
93 template <typename T> T* cast() { return static_cast<T*>(this); }
94
95 uint32_t classID() const { SkASSERT(kIllegalBatchClassID != fClassID); retur n fClassID; }
96
97 // TODO no GrPrimitiveProcessors yet read fragment position
98 bool willReadFragmentPosition() const { return false; }
99
100 SkDEBUGCODE(bool isUsed() const { return fUsed; })
101
102 protected:
103 template <typename PROC_SUBCLASS> void initClassID() {
104 static uint32_t kClassID = GenClassID();
105 fClassID = kClassID;
106 }
107
108 uint32_t fClassID;
109
110 private:
111 static uint32_t GenClassID() {
112 // fCurrProcessorClassID has been initialized to kIllegalProcessorClassI D. The
113 // atomic inc returns the old value not the incremented value. So we add
114 // 1 to the returned value.
115 uint32_t id = static_cast<uint32_t>(sk_atomic_inc(&gCurrBatchClassID)) + 1;
116 if (!id) {
117 SkFAIL("This should never wrap as it should only be called once for each GrBatch "
118 "subclass.");
119 }
120 return id;
121 }
122
123 enum {
124 kIllegalBatchClassID = 0,
125 };
126 static int32_t gCurrBatchClassID;
127
128 SkDEBUGCODE(bool fUsed;)
129
130 typedef SkRefCnt INHERITED;
131 };
132
133 #endif
OLDNEW
« no previous file with comments | « src/gpu/GrAARectRenderer.cpp ('k') | src/gpu/GrBatch.cpp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698