OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2016 Google Inc. | 2 * Copyright 2016 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #include "GrGLBuffer.h" | 8 #include "GrGLBuffer.h" |
9 #include "GrGLGpu.h" | 9 #include "GrGLGpu.h" |
10 #include "SkTraceMemoryDump.h" | 10 #include "SkTraceMemoryDump.h" |
(...skipping 10 matching lines...) Expand all Loading... |
21 #define GL_ALLOC_CALL(iface, call) GR_GL_CALL(iface, call) | 21 #define GL_ALLOC_CALL(iface, call) GR_GL_CALL(iface, call) |
22 #define CHECK_ALLOC_ERROR(iface) GR_GL_NO_ERROR | 22 #define CHECK_ALLOC_ERROR(iface) GR_GL_NO_ERROR |
23 #endif | 23 #endif |
24 | 24 |
25 #ifdef SK_DEBUG | 25 #ifdef SK_DEBUG |
26 #define VALIDATE() this->validate() | 26 #define VALIDATE() this->validate() |
27 #else | 27 #else |
28 #define VALIDATE() do {} while(false) | 28 #define VALIDATE() do {} while(false) |
29 #endif | 29 #endif |
30 | 30 |
31 GrGLBuffer* GrGLBuffer::Create(GrGLGpu* gpu, GrBufferType type, size_t size, | 31 GrGLBuffer* GrGLBuffer::Create(GrGLGpu* gpu, size_t size, GrBufferType intendedT
ype, |
32 GrAccessPattern accessPattern) { | 32 GrAccessPattern accessPattern, const void* data)
{ |
33 static const int kIsVertexOrIndex = (1 << kVertex_GrBufferType) | (1 << kInd
ex_GrBufferType); | |
34 bool cpuBacked = gpu->glCaps().useNonVBOVertexAndIndexDynamicData() && | 33 bool cpuBacked = gpu->glCaps().useNonVBOVertexAndIndexDynamicData() && |
35 kDynamic_GrAccessPattern == accessPattern && | 34 GrBufferTypeIsVertexOrIndex(intendedType) && |
36 ((kIsVertexOrIndex >> type) & 1); | 35 kDynamic_GrAccessPattern == accessPattern; |
37 SkAutoTUnref<GrGLBuffer> buffer(new GrGLBuffer(gpu, type, size, accessPatter
n, cpuBacked)); | 36 SkAutoTUnref<GrGLBuffer> buffer(new GrGLBuffer(gpu, size, intendedType, acce
ssPattern, |
38 if (!cpuBacked && 0 == buffer->fBufferID) { | 37 cpuBacked, data)); |
| 38 if (!cpuBacked && 0 == buffer->bufferID()) { |
39 return nullptr; | 39 return nullptr; |
40 } | 40 } |
41 return buffer.release(); | 41 return buffer.release(); |
42 } | 42 } |
43 | 43 |
44 // GL_STREAM_DRAW triggers an optimization in Chromium's GPU process where a cli
ent's vertex buffer | 44 // GL_STREAM_DRAW triggers an optimization in Chromium's GPU process where a cli
ent's vertex buffer |
45 // objects are implemented as client-side-arrays on tile-deferred architectures. | 45 // objects are implemented as client-side-arrays on tile-deferred architectures. |
46 #define DYNAMIC_DRAW_PARAM GR_GL_STREAM_DRAW | 46 #define DYNAMIC_DRAW_PARAM GR_GL_STREAM_DRAW |
47 | 47 |
48 inline static void get_target_and_usage(GrBufferType type, GrAccessPattern acces
sPattern, | 48 inline static GrGLenum gr_to_gl_access_pattern(GrBufferType bufferType, |
49 const GrGLCaps& caps, GrGLenum* target,
GrGLenum* usage) { | 49 GrAccessPattern accessPattern) { |
50 static const GrGLenum nonXferTargets[] = { | 50 static const GrGLenum drawUsages[] = { |
51 GR_GL_ARRAY_BUFFER, | 51 DYNAMIC_DRAW_PARAM, // TODO: Do we really want to use STREAM_DRAW here
on non-Chromium? |
52 GR_GL_ELEMENT_ARRAY_BUFFER | 52 GR_GL_STATIC_DRAW, // kStatic_GrAccessPattern |
| 53 GR_GL_STREAM_DRAW // kStream_GrAccessPattern |
53 }; | 54 }; |
54 GR_STATIC_ASSERT(0 == kVertex_GrBufferType); | |
55 GR_STATIC_ASSERT(1 == kIndex_GrBufferType); | |
56 | 55 |
57 static const GrGLenum drawUsages[] = { | 56 static const GrGLenum readUsages[] = { |
58 DYNAMIC_DRAW_PARAM, // TODO: Do we really want to use STREAM_DRAW here o
n non-Chromium? | 57 GR_GL_DYNAMIC_READ, // kDynamic_GrAccessPattern |
59 GR_GL_STATIC_DRAW, | 58 GR_GL_STATIC_READ, // kStatic_GrAccessPattern |
60 GR_GL_STREAM_DRAW | 59 GR_GL_STREAM_READ // kStream_GrAccessPattern |
61 }; | 60 }; |
62 static const GrGLenum readUsages[] = { | 61 |
63 GR_GL_DYNAMIC_READ, | |
64 GR_GL_STATIC_READ, | |
65 GR_GL_STREAM_READ | |
66 }; | |
67 GR_STATIC_ASSERT(0 == kDynamic_GrAccessPattern); | 62 GR_STATIC_ASSERT(0 == kDynamic_GrAccessPattern); |
68 GR_STATIC_ASSERT(1 == kStatic_GrAccessPattern); | 63 GR_STATIC_ASSERT(1 == kStatic_GrAccessPattern); |
69 GR_STATIC_ASSERT(2 == kStream_GrAccessPattern); | 64 GR_STATIC_ASSERT(2 == kStream_GrAccessPattern); |
70 GR_STATIC_ASSERT(SK_ARRAY_COUNT(drawUsages) == 1 + kLast_GrAccessPattern); | 65 GR_STATIC_ASSERT(SK_ARRAY_COUNT(drawUsages) == 1 + kLast_GrAccessPattern); |
71 GR_STATIC_ASSERT(SK_ARRAY_COUNT(readUsages) == 1 + kLast_GrAccessPattern); | 66 GR_STATIC_ASSERT(SK_ARRAY_COUNT(readUsages) == 1 + kLast_GrAccessPattern); |
72 | 67 |
| 68 static GrGLenum const* const usageTypes[] = { |
| 69 drawUsages, // kVertex_GrBufferType, |
| 70 drawUsages, // kIndex_GrBufferType, |
| 71 drawUsages, // kTexel_GrBufferType, |
| 72 drawUsages, // kDrawIndirect_GrBufferType, |
| 73 drawUsages, // kXferCpuToGpu_GrBufferType, |
| 74 readUsages // kXferGpuToCpu_GrBufferType, |
| 75 }; |
| 76 |
| 77 GR_STATIC_ASSERT(0 == kVertex_GrBufferType); |
| 78 GR_STATIC_ASSERT(1 == kIndex_GrBufferType); |
| 79 GR_STATIC_ASSERT(2 == kTexel_GrBufferType); |
| 80 GR_STATIC_ASSERT(3 == kDrawIndirect_GrBufferType); |
| 81 GR_STATIC_ASSERT(4 == kXferCpuToGpu_GrBufferType); |
| 82 GR_STATIC_ASSERT(5 == kXferGpuToCpu_GrBufferType); |
| 83 GR_STATIC_ASSERT(SK_ARRAY_COUNT(usageTypes) == kGrBufferTypeCount); |
| 84 |
| 85 SkASSERT(bufferType >= 0 && bufferType <= kLast_GrBufferType); |
73 SkASSERT(accessPattern >= 0 && accessPattern <= kLast_GrAccessPattern); | 86 SkASSERT(accessPattern >= 0 && accessPattern <= kLast_GrAccessPattern); |
74 | 87 |
75 switch (type) { | 88 return usageTypes[bufferType][accessPattern]; |
76 case kVertex_GrBufferType: | |
77 case kIndex_GrBufferType: | |
78 *target = nonXferTargets[type]; | |
79 *usage = drawUsages[accessPattern]; | |
80 break; | |
81 case kXferCpuToGpu_GrBufferType: | |
82 if (GrGLCaps::kChromium_TransferBufferType == caps.transferBufferTyp
e()) { | |
83 *target = GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM; | |
84 } else { | |
85 SkASSERT(GrGLCaps::kPBO_TransferBufferType == caps.transferBuffe
rType()); | |
86 *target = GR_GL_PIXEL_UNPACK_BUFFER; | |
87 } | |
88 *usage = drawUsages[accessPattern]; | |
89 break; | |
90 case kXferGpuToCpu_GrBufferType: | |
91 if (GrGLCaps::kChromium_TransferBufferType == caps.transferBufferTyp
e()) { | |
92 *target = GR_GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM; | |
93 } else { | |
94 SkASSERT(GrGLCaps::kPBO_TransferBufferType == caps.transferBuffe
rType()); | |
95 *target = GR_GL_PIXEL_PACK_BUFFER; | |
96 } | |
97 *usage = readUsages[accessPattern]; | |
98 break; | |
99 default: | |
100 SkFAIL("Unexpected buffer type."); | |
101 break; | |
102 } | |
103 } | 89 } |
104 | 90 |
105 GrGLBuffer::GrGLBuffer(GrGLGpu* gpu, GrBufferType type, size_t size, GrAccessPat
tern accessPattern, | 91 GrGLBuffer::GrGLBuffer(GrGLGpu* gpu, size_t size, GrBufferType intendedType, |
106 bool cpuBacked) | 92 GrAccessPattern accessPattern, bool cpuBacked, const void
* data) |
107 : INHERITED(gpu, type, size, accessPattern, cpuBacked), | 93 : INHERITED(gpu, size, intendedType, accessPattern, cpuBacked), |
108 fCPUData(nullptr), | 94 fCPUData(nullptr), |
109 fTarget(0), | 95 fIntendedType(intendedType), |
110 fBufferID(0), | 96 fBufferID(0), |
111 fSizeInBytes(size), | 97 fSizeInBytes(size), |
112 fUsage(0), | 98 fUsage(gr_to_gl_access_pattern(intendedType, accessPattern)), |
113 fGLSizeInBytes(0) { | 99 fGLSizeInBytes(0) { |
114 if (cpuBacked) { | 100 if (this->isCPUBacked()) { |
| 101 // Core profile uses vertex array objects, which disallow client side ar
rays. |
| 102 SkASSERT(!gpu->glCaps().isCoreProfile()); |
115 if (gpu->caps()->mustClearUploadedBufferData()) { | 103 if (gpu->caps()->mustClearUploadedBufferData()) { |
116 fCPUData = sk_calloc_throw(fSizeInBytes); | 104 fCPUData = sk_calloc_throw(fSizeInBytes); |
117 } else { | 105 } else { |
118 fCPUData = sk_malloc_flags(fSizeInBytes, SK_MALLOC_THROW); | 106 fCPUData = sk_malloc_flags(fSizeInBytes, SK_MALLOC_THROW); |
119 } | 107 } |
120 SkASSERT(kVertex_GrBufferType == type || kIndex_GrBufferType == type); | 108 if (data) { |
121 fTarget = kVertex_GrBufferType == type ? GR_GL_ARRAY_BUFFER : GR_GL_ELEM
ENT_ARRAY_BUFFER; | 109 memcpy(fCPUData, data, fSizeInBytes); |
| 110 } |
122 } else { | 111 } else { |
123 GL_CALL(GenBuffers(1, &fBufferID)); | 112 GL_CALL(GenBuffers(1, &fBufferID)); |
124 fSizeInBytes = size; | |
125 get_target_and_usage(type, accessPattern, gpu->glCaps(), &fTarget, &fUsa
ge); | |
126 if (fBufferID) { | 113 if (fBufferID) { |
127 gpu->bindBuffer(fBufferID, fTarget); | 114 GrGLenum target = gpu->bindBuffer(fIntendedType, this); |
128 CLEAR_ERROR_BEFORE_ALLOC(gpu->glInterface()); | 115 CLEAR_ERROR_BEFORE_ALLOC(gpu->glInterface()); |
129 // make sure driver can allocate memory for this buffer | 116 // make sure driver can allocate memory for this buffer |
130 GL_ALLOC_CALL(gpu->glInterface(), BufferData(fTarget, | 117 GL_ALLOC_CALL(gpu->glInterface(), BufferData(target, |
131 (GrGLsizeiptr) fSizeInB
ytes, | 118 (GrGLsizeiptr) fSizeInB
ytes, |
132 nullptr, // data ptr | 119 data, |
133 fUsage)); | 120 fUsage)); |
134 if (CHECK_ALLOC_ERROR(gpu->glInterface()) != GR_GL_NO_ERROR) { | 121 if (CHECK_ALLOC_ERROR(gpu->glInterface()) != GR_GL_NO_ERROR) { |
135 gpu->releaseBuffer(fBufferID, fTarget); | 122 GL_CALL(DeleteBuffers(1, &fBufferID)); |
136 fBufferID = 0; | 123 fBufferID = 0; |
137 } else { | 124 } else { |
138 fGLSizeInBytes = fSizeInBytes; | 125 fGLSizeInBytes = fSizeInBytes; |
139 } | 126 } |
140 } | 127 } |
141 } | 128 } |
142 VALIDATE(); | 129 VALIDATE(); |
143 this->registerWithCache(); | 130 this->registerWithCache(); |
144 } | 131 } |
145 | 132 |
146 inline GrGLGpu* GrGLBuffer::glGpu() const { | 133 inline GrGLGpu* GrGLBuffer::glGpu() const { |
147 SkASSERT(!this->wasDestroyed()); | 134 SkASSERT(!this->wasDestroyed()); |
148 return static_cast<GrGLGpu*>(this->getGpu()); | 135 return static_cast<GrGLGpu*>(this->getGpu()); |
149 } | 136 } |
150 | 137 |
151 inline const GrGLCaps& GrGLBuffer::glCaps() const { | 138 inline const GrGLCaps& GrGLBuffer::glCaps() const { |
152 return this->glGpu()->glCaps(); | 139 return this->glGpu()->glCaps(); |
153 } | 140 } |
154 | 141 |
155 void GrGLBuffer::onRelease() { | 142 void GrGLBuffer::onRelease() { |
156 if (!this->wasDestroyed()) { | 143 if (!this->wasDestroyed()) { |
157 VALIDATE(); | 144 VALIDATE(); |
158 // make sure we've not been abandoned or already released | 145 // make sure we've not been abandoned or already released |
159 if (fCPUData) { | 146 if (fCPUData) { |
160 SkASSERT(!fBufferID); | 147 SkASSERT(!fBufferID); |
161 sk_free(fCPUData); | 148 sk_free(fCPUData); |
162 fCPUData = nullptr; | 149 fCPUData = nullptr; |
163 } else if (fBufferID) { | 150 } else if (fBufferID) { |
164 this->glGpu()->releaseBuffer(fBufferID, fTarget); | 151 GL_CALL(DeleteBuffers(1, &fBufferID)); |
165 fBufferID = 0; | 152 fBufferID = 0; |
166 fGLSizeInBytes = 0; | 153 fGLSizeInBytes = 0; |
167 } | 154 } |
168 fMapPtr = nullptr; | 155 fMapPtr = nullptr; |
169 VALIDATE(); | 156 VALIDATE(); |
170 } | 157 } |
171 | 158 |
172 INHERITED::onRelease(); | 159 INHERITED::onRelease(); |
173 } | 160 } |
174 | 161 |
(...skipping 14 matching lines...) Expand all Loading... |
189 | 176 |
190 VALIDATE(); | 177 VALIDATE(); |
191 SkASSERT(!this->isMapped()); | 178 SkASSERT(!this->isMapped()); |
192 | 179 |
193 if (0 == fBufferID) { | 180 if (0 == fBufferID) { |
194 fMapPtr = fCPUData; | 181 fMapPtr = fCPUData; |
195 VALIDATE(); | 182 VALIDATE(); |
196 return; | 183 return; |
197 } | 184 } |
198 | 185 |
199 bool readOnly = (kXferGpuToCpu_GrBufferType == this->type()); | 186 // TODO: Make this a function parameter. |
| 187 bool readOnly = (kXferGpuToCpu_GrBufferType == fIntendedType); |
200 | 188 |
201 // Handling dirty context is done in the bindBuffer call | 189 // Handling dirty context is done in the bindBuffer call |
202 switch (this->glCaps().mapBufferType()) { | 190 switch (this->glCaps().mapBufferType()) { |
203 case GrGLCaps::kNone_MapBufferType: | 191 case GrGLCaps::kNone_MapBufferType: |
204 break; | 192 break; |
205 case GrGLCaps::kMapBuffer_MapBufferType: | 193 case GrGLCaps::kMapBuffer_MapBufferType: { |
206 this->glGpu()->bindBuffer(fBufferID, fTarget); | 194 GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this); |
207 // Let driver know it can discard the old data | 195 // Let driver know it can discard the old data |
208 if (GR_GL_USE_BUFFER_DATA_NULL_HINT || fGLSizeInBytes != fSizeInByte
s) { | 196 if (GR_GL_USE_BUFFER_DATA_NULL_HINT || fGLSizeInBytes != fSizeInByte
s) { |
209 GL_CALL(BufferData(fTarget, fSizeInBytes, nullptr, fUsage)); | 197 GL_CALL(BufferData(target, fSizeInBytes, nullptr, fUsage)); |
210 } | 198 } |
211 GL_CALL_RET(fMapPtr, MapBuffer(fTarget, readOnly ? GR_GL_READ_ONLY :
GR_GL_WRITE_ONLY)); | 199 GL_CALL_RET(fMapPtr, MapBuffer(target, readOnly ? GR_GL_READ_ONLY :
GR_GL_WRITE_ONLY)); |
212 break; | 200 break; |
| 201 } |
213 case GrGLCaps::kMapBufferRange_MapBufferType: { | 202 case GrGLCaps::kMapBufferRange_MapBufferType: { |
214 this->glGpu()->bindBuffer(fBufferID, fTarget); | 203 GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this); |
215 // Make sure the GL buffer size agrees with fDesc before mapping. | 204 // Make sure the GL buffer size agrees with fDesc before mapping. |
216 if (fGLSizeInBytes != fSizeInBytes) { | 205 if (fGLSizeInBytes != fSizeInBytes) { |
217 GL_CALL(BufferData(fTarget, fSizeInBytes, nullptr, fUsage)); | 206 GL_CALL(BufferData(target, fSizeInBytes, nullptr, fUsage)); |
218 } | 207 } |
219 GrGLbitfield writeAccess = GR_GL_MAP_WRITE_BIT; | 208 GrGLbitfield writeAccess = GR_GL_MAP_WRITE_BIT; |
220 // TODO: allow the client to specify invalidation in the transfer bu
ffer case. | 209 if (kXferCpuToGpu_GrBufferType != fIntendedType) { |
221 if (kXferCpuToGpu_GrBufferType != this->type()) { | 210 // TODO: Make this a function parameter. |
222 writeAccess |= GR_GL_MAP_INVALIDATE_BUFFER_BIT; | 211 writeAccess |= GR_GL_MAP_INVALIDATE_BUFFER_BIT; |
223 } | 212 } |
224 GL_CALL_RET(fMapPtr, MapBufferRange(fTarget, 0, fSizeInBytes, | 213 GL_CALL_RET(fMapPtr, MapBufferRange(target, 0, fSizeInBytes, |
225 readOnly ? GR_GL_MAP_READ_BIT :
writeAccess)); | 214 readOnly ? GR_GL_MAP_READ_BIT :
writeAccess)); |
226 break; | 215 break; |
227 } | 216 } |
228 case GrGLCaps::kChromium_MapBufferType: | 217 case GrGLCaps::kChromium_MapBufferType: { |
229 this->glGpu()->bindBuffer(fBufferID, fTarget); | 218 GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this); |
230 // Make sure the GL buffer size agrees with fDesc before mapping. | 219 // Make sure the GL buffer size agrees with fDesc before mapping. |
231 if (fGLSizeInBytes != fSizeInBytes) { | 220 if (fGLSizeInBytes != fSizeInBytes) { |
232 GL_CALL(BufferData(fTarget, fSizeInBytes, nullptr, fUsage)); | 221 GL_CALL(BufferData(target, fSizeInBytes, nullptr, fUsage)); |
233 } | 222 } |
234 GL_CALL_RET(fMapPtr, MapBufferSubData(fTarget, 0, fSizeInBytes, | 223 GL_CALL_RET(fMapPtr, MapBufferSubData(target, 0, fSizeInBytes, |
235 readOnly ? GR_GL_READ_ONLY :
GR_GL_WRITE_ONLY)); | 224 readOnly ? GR_GL_READ_ONLY :
GR_GL_WRITE_ONLY)); |
236 break; | 225 break; |
| 226 } |
237 } | 227 } |
238 fGLSizeInBytes = fSizeInBytes; | 228 fGLSizeInBytes = fSizeInBytes; |
239 VALIDATE(); | 229 VALIDATE(); |
240 } | 230 } |
241 | 231 |
242 void GrGLBuffer::onUnmap() { | 232 void GrGLBuffer::onUnmap() { |
243 if (this->wasDestroyed()) { | 233 if (this->wasDestroyed()) { |
244 return; | 234 return; |
245 } | 235 } |
246 | 236 |
247 VALIDATE(); | 237 VALIDATE(); |
248 SkASSERT(this->isMapped()); | 238 SkASSERT(this->isMapped()); |
249 if (0 == fBufferID) { | 239 if (0 == fBufferID) { |
250 fMapPtr = nullptr; | 240 fMapPtr = nullptr; |
251 return; | 241 return; |
252 } | 242 } |
253 // bind buffer handles the dirty context | 243 // bind buffer handles the dirty context |
254 switch (this->glCaps().mapBufferType()) { | 244 switch (this->glCaps().mapBufferType()) { |
255 case GrGLCaps::kNone_MapBufferType: | 245 case GrGLCaps::kNone_MapBufferType: |
256 SkDEBUGFAIL("Shouldn't get here."); | 246 SkDEBUGFAIL("Shouldn't get here."); |
257 return; | 247 return; |
258 case GrGLCaps::kMapBuffer_MapBufferType: // fall through | 248 case GrGLCaps::kMapBuffer_MapBufferType: // fall through |
259 case GrGLCaps::kMapBufferRange_MapBufferType: | 249 case GrGLCaps::kMapBufferRange_MapBufferType: { |
260 this->glGpu()->bindBuffer(fBufferID, fTarget); | 250 GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this); |
261 GL_CALL(UnmapBuffer(fTarget)); | 251 GL_CALL(UnmapBuffer(target)); |
262 break; | 252 break; |
| 253 } |
263 case GrGLCaps::kChromium_MapBufferType: | 254 case GrGLCaps::kChromium_MapBufferType: |
264 this->glGpu()->bindBuffer(fBufferID, fTarget); | 255 this->glGpu()->bindBuffer(fIntendedType, this); // TODO: Is this nee
ded? |
265 GL_CALL(UnmapBufferSubData(fMapPtr)); | 256 GL_CALL(UnmapBufferSubData(fMapPtr)); |
266 break; | 257 break; |
267 } | 258 } |
268 fMapPtr = nullptr; | 259 fMapPtr = nullptr; |
269 } | 260 } |
270 | 261 |
271 bool GrGLBuffer::onUpdateData(const void* src, size_t srcSizeInBytes) { | 262 bool GrGLBuffer::onUpdateData(const void* src, size_t srcSizeInBytes) { |
272 if (this->wasDestroyed()) { | 263 if (this->wasDestroyed()) { |
273 return false; | 264 return false; |
274 } | 265 } |
275 | 266 |
276 SkASSERT(!this->isMapped()); | 267 SkASSERT(!this->isMapped()); |
277 SkASSERT(GR_GL_ARRAY_BUFFER == fTarget || GR_GL_ELEMENT_ARRAY_BUFFER == fTar
get); | |
278 VALIDATE(); | 268 VALIDATE(); |
279 if (srcSizeInBytes > fSizeInBytes) { | 269 if (srcSizeInBytes > fSizeInBytes) { |
280 return false; | 270 return false; |
281 } | 271 } |
282 if (0 == fBufferID) { | 272 if (0 == fBufferID) { |
283 memcpy(fCPUData, src, srcSizeInBytes); | 273 memcpy(fCPUData, src, srcSizeInBytes); |
284 return true; | 274 return true; |
285 } | 275 } |
286 SkASSERT(srcSizeInBytes <= fSizeInBytes); | 276 SkASSERT(srcSizeInBytes <= fSizeInBytes); |
287 // bindbuffer handles dirty context | 277 // bindbuffer handles dirty context |
288 this->glGpu()->bindBuffer(fBufferID, fTarget); | 278 GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this); |
289 | 279 |
290 #if GR_GL_USE_BUFFER_DATA_NULL_HINT | 280 #if GR_GL_USE_BUFFER_DATA_NULL_HINT |
291 if (fSizeInBytes == srcSizeInBytes) { | 281 if (fSizeInBytes == srcSizeInBytes) { |
292 GL_CALL(BufferData(fTarget, (GrGLsizeiptr) srcSizeInBytes, src, fUsage))
; | 282 GL_CALL(BufferData(target, (GrGLsizeiptr) srcSizeInBytes, src, fUsage)); |
293 } else { | 283 } else { |
294 // Before we call glBufferSubData we give the driver a hint using | 284 // Before we call glBufferSubData we give the driver a hint using |
295 // glBufferData with nullptr. This makes the old buffer contents | 285 // glBufferData with nullptr. This makes the old buffer contents |
296 // inaccessible to future draws. The GPU may still be processing | 286 // inaccessible to future draws. The GPU may still be processing |
297 // draws that reference the old contents. With this hint it can | 287 // draws that reference the old contents. With this hint it can |
298 // assign a different allocation for the new contents to avoid | 288 // assign a different allocation for the new contents to avoid |
299 // flushing the gpu past draws consuming the old contents. | 289 // flushing the gpu past draws consuming the old contents. |
300 // TODO I think we actually want to try calling bufferData here | 290 // TODO I think we actually want to try calling bufferData here |
301 GL_CALL(BufferData(fTarget, fSizeInBytes, nullptr, fUsage)); | 291 GL_CALL(BufferData(target, fSizeInBytes, nullptr, fUsage)); |
302 GL_CALL(BufferSubData(fTarget, 0, (GrGLsizeiptr) srcSizeInBytes, src)); | 292 GL_CALL(BufferSubData(target, 0, (GrGLsizeiptr) srcSizeInBytes, src)); |
303 } | 293 } |
304 fGLSizeInBytes = fSizeInBytes; | 294 fGLSizeInBytes = fSizeInBytes; |
305 #else | 295 #else |
306 // Note that we're cheating on the size here. Currently no methods | 296 // Note that we're cheating on the size here. Currently no methods |
307 // allow a partial update that preserves contents of non-updated | 297 // allow a partial update that preserves contents of non-updated |
308 // portions of the buffer (map() does a glBufferData(..size, nullptr..)) | 298 // portions of the buffer (map() does a glBufferData(..size, nullptr..)) |
309 GL_CALL(BufferData(fTarget, srcSizeInBytes, src, fUsage)); | 299 GL_CALL(BufferData(target, srcSizeInBytes, src, fUsage)); |
310 fGLSizeInBytes = srcSizeInBytes; | 300 fGLSizeInBytes = srcSizeInBytes; |
311 #endif | 301 #endif |
312 VALIDATE(); | 302 VALIDATE(); |
313 return true; | 303 return true; |
314 } | 304 } |
315 | 305 |
316 void GrGLBuffer::setMemoryBacking(SkTraceMemoryDump* traceMemoryDump, | 306 void GrGLBuffer::setMemoryBacking(SkTraceMemoryDump* traceMemoryDump, |
317 const SkString& dumpName) const { | 307 const SkString& dumpName) const { |
318 SkString buffer_id; | 308 SkString buffer_id; |
319 buffer_id.appendU32(this->bufferID()); | 309 buffer_id.appendU32(this->bufferID()); |
320 traceMemoryDump->setMemoryBacking(dumpName.c_str(), "gl_buffer", | 310 traceMemoryDump->setMemoryBacking(dumpName.c_str(), "gl_buffer", |
321 buffer_id.c_str()); | 311 buffer_id.c_str()); |
322 } | 312 } |
323 | 313 |
324 #ifdef SK_DEBUG | 314 #ifdef SK_DEBUG |
325 | 315 |
326 void GrGLBuffer::validate() const { | 316 void GrGLBuffer::validate() const { |
327 SkASSERT(GR_GL_ARRAY_BUFFER == fTarget || GR_GL_ELEMENT_ARRAY_BUFFER == fTar
get || | |
328 GR_GL_PIXEL_PACK_BUFFER == fTarget || GR_GL_PIXEL_UNPACK_BUFFER ==
fTarget || | |
329 GR_GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM == fTarget || | |
330 GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM == fTarget); | |
331 // The following assert isn't valid when the buffer has been abandoned: | 317 // The following assert isn't valid when the buffer has been abandoned: |
332 // SkASSERT((0 == fDesc.fID) == (fCPUData)); | 318 // SkASSERT((0 == fDesc.fID) == (fCPUData)); |
333 SkASSERT(0 != fBufferID || 0 == fGLSizeInBytes); | 319 SkASSERT(0 != fBufferID || 0 == fGLSizeInBytes); |
334 SkASSERT(nullptr == fMapPtr || fCPUData || fGLSizeInBytes <= fSizeInBytes); | 320 SkASSERT(nullptr == fMapPtr || fCPUData || fGLSizeInBytes <= fSizeInBytes); |
335 SkASSERT(nullptr == fCPUData || nullptr == fMapPtr || fCPUData == fMapPtr); | 321 SkASSERT(nullptr == fCPUData || nullptr == fMapPtr || fCPUData == fMapPtr); |
336 } | 322 } |
337 | 323 |
338 #endif | 324 #endif |
OLD | NEW |