Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(541)

Side by Side Diff: src/gpu/GrInOrderDrawBuffer.h

Issue 973853002: Split command holding object (GrTargetCommands) out of GrInOrderDrawBuffer (Closed) Base URL: https://skia.googlesource.com/skia.git@master
Patch Set: Address compilation complaint Created 5 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/gpu/GrDrawTarget.h ('k') | src/gpu/GrInOrderDrawBuffer.cpp » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright 2011 Google Inc. 2 * Copyright 2011 Google Inc.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license that can be 4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file. 5 * found in the LICENSE file.
6 */ 6 */
7 7
8 #ifndef GrInOrderDrawBuffer_DEFINED 8 #ifndef GrInOrderDrawBuffer_DEFINED
9 #define GrInOrderDrawBuffer_DEFINED 9 #define GrInOrderDrawBuffer_DEFINED
10 10
11 #include "GrFlushToGpuDrawTarget.h" 11 #include "GrFlushToGpuDrawTarget.h"
12 12
13 #include "GrBatch.h" 13 #include "GrBatch.h"
14 #include "GrBatchTarget.h" 14 #include "GrBatchTarget.h"
15 #include "SkChunkAlloc.h" 15 #include "SkChunkAlloc.h"
16 #include "GrPipeline.h" 16 #include "GrPipeline.h"
17 #include "GrPath.h" 17 #include "GrPath.h"
18 #include "GrTRecorder.h" 18 #include "GrTRecorder.h"
19 19
20 /** 20 class GrInOrderDrawBuffer;
21 * GrInOrderDrawBuffer is an implementation of GrDrawTarget that queues up draws for eventual 21
22 * playback into a GrGpu. In theory one draw buffer could playback into another. When index or 22 class GrTargetCommands : ::SkNoncopyable {
23 * vertex buffers are used as geometry sources it is the callers the draw buffer only holds 23 struct SetState;
24 * references to the buffers. It is the callers responsibility to ensure that th e data is still 24
25 * valid when the draw buffer is played back into a GrGpu. Similarly, it is the caller's
26 * responsibility to ensure that all referenced textures, buffers, and render-ta rgets are associated
27 * in the GrGpu object that the buffer is played back into. The buffer requires VB and IB pools to
28 * store geometry.
29 */
30 class GrInOrderDrawBuffer : public GrFlushToGpuDrawTarget {
31 public: 25 public:
32 26 GrTargetCommands(GrGpu* gpu,
33 /** 27 GrVertexBufferAllocPool* vertexPool,
34 * Creates a GrInOrderDrawBuffer 28 GrIndexBufferAllocPool* indexPool)
35 * 29 : fCmdBuffer(kCmdBufferInitialSizeInBytes)
36 * @param gpu the gpu object that this draw buffer flushes to. 30 , fPrevState(NULL)
37 * @param vertexPool pool where vertices for queued draws will be saved when 31 , fBatchTarget(gpu, vertexPool, indexPool)
38 * the vertex source is either reserved or array. 32 , fDrawBatch(NULL) {
39 * @param indexPool pool where indices for queued draws will be saved when
40 * the index source is either reserved or array.
41 */
42 GrInOrderDrawBuffer(GrGpu* gpu,
43 GrVertexBufferAllocPool* vertexPool,
44 GrIndexBufferAllocPool* indexPool);
45
46 ~GrInOrderDrawBuffer() SK_OVERRIDE;
47
48 // tracking for draws
49 DrawToken getCurrentDrawToken() SK_OVERRIDE { return DrawToken(this, fDrawID ); }
50
51 void clearStencilClip(const SkIRect& rect,
52 bool insideClip,
53 GrRenderTarget* renderTarget) SK_OVERRIDE;
54
55 void discard(GrRenderTarget*) SK_OVERRIDE;
56
57 protected:
58 void willReserveVertexAndIndexSpace(int vertexCount,
59 size_t vertexStride,
60 int indexCount);
61
62 void appendIndicesAndTransforms(const void* indexValues, PathIndexType index Type,
63 const float* transformValues, PathTransformT ype transformType,
64 int count, char** indicesLocation, float** x formsLocation) {
65 int indexBytes = GrPathRange::PathIndexSizeInBytes(indexType);
66 *indicesLocation = (char*) fPathIndexBuffer.alloc(count * indexBytes,
67 SkChunkAlloc::kThrow_A llocFailType);
68 SkASSERT(SkIsAlign4((uintptr_t)*indicesLocation));
69 memcpy(*indicesLocation, reinterpret_cast<const char*>(indexValues), cou nt * indexBytes);
70
71 const int xformBytes = GrPathRendering::PathTransformSize(transformType) * sizeof(float);
72 *xformsLocation = NULL;
73
74 if (0 != xformBytes) {
75 *xformsLocation = (float*) fPathTransformBuffer.alloc(count * xformB ytes,
76 SkChunkAlloc::kTh row_AllocFailType);
77 SkASSERT(SkIsAlign4((uintptr_t)*xformsLocation));
78 memcpy(*xformsLocation, transformValues, count * xformBytes);
79 }
80 } 33 }
81 34
82 bool canConcatToIndexBuffer(const GrIndexBuffer** ib) {
83 const GrDrawTarget::GeometrySrcState& geomSrc = this->getGeomSrc();
84
85 // we only attempt to concat when reserved verts are used with a client- specified
86 // index buffer. To make this work with client-specified VBs we'd need t o know if the VB
87 // was updated between draws.
88 if (kReserved_GeometrySrcType != geomSrc.fVertexSrc ||
89 kBuffer_GeometrySrcType != geomSrc.fIndexSrc) {
90 return false;
91 }
92
93 *ib = geomSrc.fIndexBuffer;
94 return true;
95 }
96
97 private:
98 typedef GrGpu::DrawArgs DrawArgs;
99
100 struct SetState;
101
102 struct Cmd : ::SkNoncopyable { 35 struct Cmd : ::SkNoncopyable {
103 enum { 36 enum {
104 kDraw_Cmd = 1, 37 kDraw_Cmd = 1,
105 kStencilPath_Cmd = 2, 38 kStencilPath_Cmd = 2,
106 kSetState_Cmd = 3, 39 kSetState_Cmd = 3,
107 kClear_Cmd = 4, 40 kClear_Cmd = 4,
108 kCopySurface_Cmd = 5, 41 kCopySurface_Cmd = 5,
109 kDrawPath_Cmd = 6, 42 kDrawPath_Cmd = 6,
110 kDrawPaths_Cmd = 7, 43 kDrawPaths_Cmd = 7,
111 kDrawBatch_Cmd = 8, 44 kDrawBatch_Cmd = 8,
112 }; 45 };
113 46
114 Cmd(uint8_t type) : fType(type) {} 47 Cmd(uint8_t type) : fType(type) {}
115 virtual ~Cmd() {} 48 virtual ~Cmd() {}
116 49
117 virtual void execute(GrGpu*, const SetState*) = 0; 50 virtual void execute(GrGpu*, const SetState*) = 0;
118 51
119 uint8_t type() const { return fType & kCmdMask; } 52 uint8_t type() const { return fType & kCmdMask; }
120 53
121 bool isTraced() const { return SkToBool(fType & kTraceCmdBit); } 54 bool isTraced() const { return SkToBool(fType & kTraceCmdBit); }
122 void makeTraced() { fType |= kTraceCmdBit; } 55 void makeTraced() { fType |= kTraceCmdBit; }
123 56
124 private: 57 private:
125 static const int kCmdMask = 0x7F; 58 static const int kCmdMask = 0x7F;
126 static const int kTraceCmdBit = 0x80; 59 static const int kTraceCmdBit = 0x80;
127 60
128 uint8_t fType; 61 uint8_t fType;
129 }; 62 };
130 63
64 void reset();
65 void flush(GrInOrderDrawBuffer*);
66
67 Cmd* recordClearStencilClip(GrInOrderDrawBuffer*,
68 const SkIRect& rect,
69 bool insideClip,
70 GrRenderTarget* renderTarget);
71
72 Cmd* recordDiscard(GrInOrderDrawBuffer*, GrRenderTarget*);
73
74 Cmd* recordDraw(GrInOrderDrawBuffer*,
75 const GrGeometryProcessor*,
76 const GrDrawTarget::DrawInfo&,
77 const GrDrawTarget::PipelineInfo&);
78 Cmd* recordDrawBatch(GrInOrderDrawBuffer*,
79 GrBatch*,
80 const GrDrawTarget::PipelineInfo&);
81 void recordDrawRect(GrInOrderDrawBuffer*,
82 GrPipelineBuilder*,
83 GrColor,
84 const SkMatrix& viewMatrix,
85 const SkRect& rect,
86 const SkRect* localRect,
87 const SkMatrix* localMatrix);
88 Cmd* recordStencilPath(GrInOrderDrawBuffer*,
89 const GrPipelineBuilder&,
90 const GrPathProcessor*,
91 const GrPath*,
92 const GrScissorState&,
93 const GrStencilSettings&);
94 Cmd* recordDrawPath(GrInOrderDrawBuffer*,
95 const GrPathProcessor*,
96 const GrPath*,
97 const GrStencilSettings&,
98 const GrDrawTarget::PipelineInfo&);
99 Cmd* recordDrawPaths(GrInOrderDrawBuffer*,
100 const GrPathProcessor*,
101 const GrPathRange*,
102 const void*,
103 GrDrawTarget::PathIndexType,
104 const float transformValues[],
105 GrDrawTarget::PathTransformType ,
106 int,
107 const GrStencilSettings&,
108 const GrDrawTarget::PipelineInfo&);
109 Cmd* recordClear(GrInOrderDrawBuffer*,
110 const SkIRect* rect,
111 GrColor,
112 bool canIgnoreRect,
113 GrRenderTarget*);
114 Cmd* recordCopySurface(GrInOrderDrawBuffer*,
115 GrSurface* dst,
116 GrSurface* src,
117 const SkIRect& srcRect,
118 const SkIPoint& dstPoint);
119
120 protected:
121 void willReserveVertexAndIndexSpace(int vertexCount,
122 size_t vertexStride,
123 int indexCount);
124
125 private:
126 friend class GrInOrderDrawBuffer;
127
128 typedef GrGpu::DrawArgs DrawArgs;
129
130 // Attempts to concat instances from info onto the previous draw. info must represent an
131 // instanced draw. The caller must have already recorded a new draw state an d clip if necessary.
132 int concatInstancedDraw(GrInOrderDrawBuffer*, const GrDrawTarget::DrawInfo&) ;
133
134 bool SK_WARN_UNUSED_RESULT setupPipelineAndShouldDraw(GrInOrderDrawBuffer*,
135 const GrPrimitiveProce ssor*,
136 const GrDrawTarget::Pi pelineInfo&);
137 bool SK_WARN_UNUSED_RESULT setupPipelineAndShouldDraw(GrInOrderDrawBuffer*,
138 GrBatch*,
139 const GrDrawTarget::Pi pelineInfo&);
140
131 struct Draw : public Cmd { 141 struct Draw : public Cmd {
132 Draw(const DrawInfo& info) : Cmd(kDraw_Cmd), fInfo(info) {} 142 Draw(const GrDrawTarget::DrawInfo& info) : Cmd(kDraw_Cmd), fInfo(info) { }
133 143
134 void execute(GrGpu*, const SetState*) SK_OVERRIDE; 144 void execute(GrGpu*, const SetState*) SK_OVERRIDE;
135 145
136 DrawInfo fInfo; 146 GrDrawTarget::DrawInfo fInfo;
137 }; 147 };
138 148
139 struct StencilPath : public Cmd { 149 struct StencilPath : public Cmd {
140 StencilPath(const GrPath* path, GrRenderTarget* rt) 150 StencilPath(const GrPath* path, GrRenderTarget* rt)
141 : Cmd(kStencilPath_Cmd) 151 : Cmd(kStencilPath_Cmd)
142 , fRenderTarget(rt) 152 , fRenderTarget(rt)
143 , fPath(path) {} 153 , fPath(path) {}
144 154
145 const GrPath* path() const { return fPath.get(); } 155 const GrPath* path() const { return fPath.get(); }
146 156
(...skipping 21 matching lines...) Expand all
168 GrPendingIOResource<const GrPath, kRead_GrIOType> fPath; 178 GrPendingIOResource<const GrPath, kRead_GrIOType> fPath;
169 }; 179 };
170 180
171 struct DrawPaths : public Cmd { 181 struct DrawPaths : public Cmd {
172 DrawPaths(const GrPathRange* pathRange) : Cmd(kDrawPaths_Cmd), fPathRang e(pathRange) {} 182 DrawPaths(const GrPathRange* pathRange) : Cmd(kDrawPaths_Cmd), fPathRang e(pathRange) {}
173 183
174 const GrPathRange* pathRange() const { return fPathRange.get(); } 184 const GrPathRange* pathRange() const { return fPathRange.get(); }
175 185
176 void execute(GrGpu*, const SetState*) SK_OVERRIDE; 186 void execute(GrGpu*, const SetState*) SK_OVERRIDE;
177 187
178 char* fIndices; 188 char* fIndices;
179 PathIndexType fIndexType; 189 GrDrawTarget::PathIndexType fIndexType;
180 float* fTransforms; 190 float* fTransforms;
181 PathTransformType fTransformType; 191 GrDrawTarget::PathTransformType fTransformType;
182 int fCount; 192 int fCount;
183 GrStencilSettings fStencilSettings; 193 GrStencilSettings fStencilSettings;
184 194
185 private: 195 private:
186 GrPendingIOResource<const GrPathRange, kRead_GrIOType> fPathRange; 196 GrPendingIOResource<const GrPathRange, kRead_GrIOType> fPathRange;
187 }; 197 };
188 198
189 // This is also used to record a discard by setting the color to GrColor_ILL EGAL 199 // This is also used to record a discard by setting the color to GrColor_ILL EGAL
190 struct Clear : public Cmd { 200 struct Clear : public Cmd {
191 Clear(GrRenderTarget* rt) : Cmd(kClear_Cmd), fRenderTarget(rt) {} 201 Clear(GrRenderTarget* rt) : Cmd(kClear_Cmd), fRenderTarget(rt) {}
192 202
193 GrRenderTarget* renderTarget() const { return fRenderTarget.get(); } 203 GrRenderTarget* renderTarget() const { return fRenderTarget.get(); }
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after
269 279
270 void execute(GrGpu*, const SetState*) SK_OVERRIDE; 280 void execute(GrGpu*, const SetState*) SK_OVERRIDE;
271 281
272 // TODO it wouldn't be too hard to let batches allocate in the cmd buffe r 282 // TODO it wouldn't be too hard to let batches allocate in the cmd buffe r
273 SkAutoTUnref<GrBatch> fBatch; 283 SkAutoTUnref<GrBatch> fBatch;
274 284
275 private: 285 private:
276 GrBatchTarget* fBatchTarget; 286 GrBatchTarget* fBatchTarget;
277 }; 287 };
278 288
279 typedef void* TCmdAlign; // This wouldn't be enough align if a command used long double. 289 static const int kCmdBufferInitialSizeInBytes = 8 * 1024;
280 typedef GrTRecorder<Cmd, TCmdAlign> CmdBuffer; 290
291 typedef void* TCmdAlign; // This wouldn't be enough align if a command used long double.
292 typedef GrTRecorder<Cmd, TCmdAlign> CmdBuffer;
293
294 CmdBuffer fCmdBuffer;
295 SetState* fPrevState;
296 GrBatchTarget fBatchTarget;
297 // TODO hack until batch is everywhere
298 GrTargetCommands::DrawBatch* fDrawBatch;
299
300 // This will go away when everything uses batch. However, in the short ter m anything which
301 // might be put into the GrInOrderDrawBuffer needs to make sure it closes t he last batch
302 void closeBatch();
303 };
304
305 /**
306 * GrInOrderDrawBuffer is an implementation of GrDrawTarget that queues up draws for eventual
307 * playback into a GrGpu. In theory one draw buffer could playback into another. When index or
308 * vertex buffers are used as geometry sources it is the callers the draw buffer only holds
309 * references to the buffers. It is the callers responsibility to ensure that th e data is still
310 * valid when the draw buffer is played back into a GrGpu. Similarly, it is the caller's
311 * responsibility to ensure that all referenced textures, buffers, and render-ta rgets are associated
312 * in the GrGpu object that the buffer is played back into. The buffer requires VB and IB pools to
313 * store geometry.
314 */
315 class GrInOrderDrawBuffer : public GrFlushToGpuDrawTarget {
316 public:
317
318 /**
319 * Creates a GrInOrderDrawBuffer
320 *
321 * @param gpu the gpu object that this draw buffer flushes to.
322 * @param vertexPool pool where vertices for queued draws will be saved when
323 * the vertex source is either reserved or array.
324 * @param indexPool pool where indices for queued draws will be saved when
325 * the index source is either reserved or array.
326 */
327 GrInOrderDrawBuffer(GrGpu* gpu,
328 GrVertexBufferAllocPool* vertexPool,
329 GrIndexBufferAllocPool* indexPool);
330
331 ~GrInOrderDrawBuffer() SK_OVERRIDE;
332
333 // tracking for draws
334 DrawToken getCurrentDrawToken() SK_OVERRIDE { return DrawToken(this, fDrawID ); }
335
336 void clearStencilClip(const SkIRect& rect,
337 bool insideClip,
338 GrRenderTarget* renderTarget) SK_OVERRIDE;
339
340 void discard(GrRenderTarget*) SK_OVERRIDE;
341
342 protected:
343 void willReserveVertexAndIndexSpace(int vertexCount,
344 size_t vertexStride,
345 int indexCount) SK_OVERRIDE;
346
347 void appendIndicesAndTransforms(const void* indexValues, PathIndexType index Type,
348 const float* transformValues, PathTransformT ype transformType,
349 int count, char** indicesLocation, float** x formsLocation) {
350 int indexBytes = GrPathRange::PathIndexSizeInBytes(indexType);
351 *indicesLocation = (char*) fPathIndexBuffer.alloc(count * indexBytes,
352 SkChunkAlloc::kThrow_A llocFailType);
353 SkASSERT(SkIsAlign4((uintptr_t)*indicesLocation));
354 memcpy(*indicesLocation, reinterpret_cast<const char*>(indexValues), cou nt * indexBytes);
355
356 const int xformBytes = GrPathRendering::PathTransformSize(transformType) * sizeof(float);
357 *xformsLocation = NULL;
358
359 if (0 != xformBytes) {
360 *xformsLocation = (float*) fPathTransformBuffer.alloc(count * xformB ytes,
361 SkChunkAlloc::kTh row_AllocFailType);
362 SkASSERT(SkIsAlign4((uintptr_t)*xformsLocation));
363 memcpy(*xformsLocation, transformValues, count * xformBytes);
364 }
365 }
366
367 bool canConcatToIndexBuffer(const GrIndexBuffer** ib) {
368 const GrDrawTarget::GeometrySrcState& geomSrc = this->getGeomSrc();
369
370 // we only attempt to concat when reserved verts are used with a client- specified
371 // index buffer. To make this work with client-specified VBs we'd need t o know if the VB
372 // was updated between draws.
373 if (kReserved_GeometrySrcType != geomSrc.fVertexSrc ||
374 kBuffer_GeometrySrcType != geomSrc.fIndexSrc) {
375 return false;
376 }
377
378 *ib = geomSrc.fIndexBuffer;
379 return true;
380 }
381
382 private:
383 friend class GrTargetCommands;
384
385 typedef GrGpu::DrawArgs DrawArgs;
281 386
282 void onReset() SK_OVERRIDE; 387 void onReset() SK_OVERRIDE;
283 void onFlush() SK_OVERRIDE; 388 void onFlush() SK_OVERRIDE;
284 389
285 // overrides from GrDrawTarget 390 // overrides from GrDrawTarget
286 void onDraw(const GrGeometryProcessor*, const DrawInfo&, const PipelineInfo& ) SK_OVERRIDE; 391 void onDraw(const GrGeometryProcessor*, const DrawInfo&, const PipelineInfo& ) SK_OVERRIDE;
287 void onDrawBatch(GrBatch*, const PipelineInfo&) SK_OVERRIDE; 392 void onDrawBatch(GrBatch*, const PipelineInfo&) SK_OVERRIDE;
288 void onDrawRect(GrPipelineBuilder*, 393 void onDrawRect(GrPipelineBuilder*,
289 GrColor, 394 GrColor,
290 const SkMatrix& viewMatrix, 395 const SkMatrix& viewMatrix,
(...skipping 25 matching lines...) Expand all
316 GrRenderTarget* renderTarget) SK_OVERRIDE; 421 GrRenderTarget* renderTarget) SK_OVERRIDE;
317 bool onCopySurface(GrSurface* dst, 422 bool onCopySurface(GrSurface* dst,
318 GrSurface* src, 423 GrSurface* src,
319 const SkIRect& srcRect, 424 const SkIRect& srcRect,
320 const SkIPoint& dstPoint) SK_OVERRIDE; 425 const SkIPoint& dstPoint) SK_OVERRIDE;
321 426
322 // Attempts to concat instances from info onto the previous draw. info must represent an 427 // Attempts to concat instances from info onto the previous draw. info must represent an
323 // instanced draw. The caller must have already recorded a new draw state an d clip if necessary. 428 // instanced draw. The caller must have already recorded a new draw state an d clip if necessary.
324 int concatInstancedDraw(const DrawInfo&); 429 int concatInstancedDraw(const DrawInfo&);
325 430
326 // Determines whether the current draw operation requires a new GrPipeline a nd if so
327 // records it. If the draw can be skipped false is returned and no new GrPip eline is
328 // recorded.
329 // TODO delete the primproc variant when we have batches everywhere
330 bool SK_WARN_UNUSED_RESULT setupPipelineAndShouldDraw(const GrPrimitiveProce ssor*,
331 const PipelineInfo&);
332 bool SK_WARN_UNUSED_RESULT setupPipelineAndShouldDraw(GrBatch*, const Pipeli neInfo&);
333
334 // We lazily record clip changes in order to skip clips that have no effect. 431 // We lazily record clip changes in order to skip clips that have no effect.
335 void recordClipIfNecessary(); 432 void recordClipIfNecessary();
336 // Records any trace markers for a command 433 // Records any trace markers for a command
337 void recordTraceMarkersIfNecessary(Cmd*); 434 void recordTraceMarkersIfNecessary(GrTargetCommands::Cmd*);
338 SkString getCmdString(int index) const { 435 SkString getCmdString(int index) const {
339 SkASSERT(index < fGpuCmdMarkers.count()); 436 SkASSERT(index < fGpuCmdMarkers.count());
340 return fGpuCmdMarkers[index].toString(); 437 return fGpuCmdMarkers[index].toString();
341 } 438 }
342 bool isIssued(uint32_t drawID) SK_OVERRIDE { return drawID != fDrawID; } 439 bool isIssued(uint32_t drawID) SK_OVERRIDE { return drawID != fDrawID; }
343 440
344 GrBatchTarget* getBatchTarget() { return &fBatchTarget; }
345
346 // TODO: Use a single allocator for commands and records 441 // TODO: Use a single allocator for commands and records
347 enum { 442 enum {
348 kCmdBufferInitialSizeInBytes = 8 * 1024,
349 kPathIdxBufferMinReserve = 2 * 64, // 64 uint16_t's 443 kPathIdxBufferMinReserve = 2 * 64, // 64 uint16_t's
350 kPathXformBufferMinReserve = 2 * 64, // 64 two-float transforms 444 kPathXformBufferMinReserve = 2 * 64, // 64 two-float transforms
351 }; 445 };
352 446
353 CmdBuffer fCmdBuffer; 447 GrTargetCommands fCommands;
354 SetState* fPrevState;
355 SkTArray<GrTraceMarkerSet, false> fGpuCmdMarkers; 448 SkTArray<GrTraceMarkerSet, false> fGpuCmdMarkers;
356 SkChunkAlloc fPathIndexBuffer; 449 SkChunkAlloc fPathIndexBuffer;
357 SkChunkAlloc fPathTransformBuffer; 450 SkChunkAlloc fPathTransformBuffer;
358 uint32_t fDrawID; 451 uint32_t fDrawID;
359 GrBatchTarget fBatchTarget;
360 // TODO hack until batch is everywhere
361 DrawBatch* fDrawBatch;
362
363 // This will go away when everything uses batch. However, in the short term anything which
364 // might be put into the GrInOrderDrawBuffer needs to make sure it closes th e last batch
365 inline void closeBatch();
366 452
367 typedef GrFlushToGpuDrawTarget INHERITED; 453 typedef GrFlushToGpuDrawTarget INHERITED;
368 }; 454 };
369 455
370 #endif 456 #endif
OLDNEW
« no previous file with comments | « src/gpu/GrDrawTarget.h ('k') | src/gpu/GrInOrderDrawBuffer.cpp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698