OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2016 Google Inc. | 2 * Copyright 2016 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #include "GrGLBuffer.h" | 8 #include "GrGLBuffer.h" |
9 #include "GrGLGpu.h" | 9 #include "GrGLGpu.h" |
10 #include "SkTraceMemoryDump.h" | 10 #include "SkTraceMemoryDump.h" |
(...skipping 10 matching lines...) Expand all Loading... |
21 #define GL_ALLOC_CALL(iface, call) GR_GL_CALL(iface, call) | 21 #define GL_ALLOC_CALL(iface, call) GR_GL_CALL(iface, call) |
22 #define CHECK_ALLOC_ERROR(iface) GR_GL_NO_ERROR | 22 #define CHECK_ALLOC_ERROR(iface) GR_GL_NO_ERROR |
23 #endif | 23 #endif |
24 | 24 |
25 #ifdef SK_DEBUG | 25 #ifdef SK_DEBUG |
26 #define VALIDATE() this->validate() | 26 #define VALIDATE() this->validate() |
27 #else | 27 #else |
28 #define VALIDATE() do {} while(false) | 28 #define VALIDATE() do {} while(false) |
29 #endif | 29 #endif |
30 | 30 |
31 GrGLBuffer* GrGLBuffer::Create(GrGLGpu* gpu, size_t size, GrBufferType intendedT
ype, | 31 GrGLBuffer* GrGLBuffer::Create(GrGLGpu* gpu, GrBufferType type, size_t size, |
32 GrAccessPattern accessPattern, const void* data)
{ | 32 GrAccessPattern accessPattern) { |
| 33 static const int kIsVertexOrIndex = (1 << kVertex_GrBufferType) | (1 << kInd
ex_GrBufferType); |
33 bool cpuBacked = gpu->glCaps().useNonVBOVertexAndIndexDynamicData() && | 34 bool cpuBacked = gpu->glCaps().useNonVBOVertexAndIndexDynamicData() && |
34 GrBufferTypeIsVertexOrIndex(intendedType) && | 35 kDynamic_GrAccessPattern == accessPattern && |
35 kDynamic_GrAccessPattern == accessPattern; | 36 ((kIsVertexOrIndex >> type) & 1); |
36 SkAutoTUnref<GrGLBuffer> buffer(new GrGLBuffer(gpu, size, intendedType, acce
ssPattern, | 37 SkAutoTUnref<GrGLBuffer> buffer(new GrGLBuffer(gpu, type, size, accessPatter
n, cpuBacked)); |
37 cpuBacked, data)); | 38 if (!cpuBacked && 0 == buffer->fBufferID) { |
38 if (!cpuBacked && 0 == buffer->bufferID()) { | |
39 return nullptr; | 39 return nullptr; |
40 } | 40 } |
41 return buffer.release(); | 41 return buffer.release(); |
42 } | 42 } |
43 | 43 |
44 // GL_STREAM_DRAW triggers an optimization in Chromium's GPU process where a cli
ent's vertex buffer | 44 // GL_STREAM_DRAW triggers an optimization in Chromium's GPU process where a cli
ent's vertex buffer |
45 // objects are implemented as client-side-arrays on tile-deferred architectures. | 45 // objects are implemented as client-side-arrays on tile-deferred architectures. |
46 #define DYNAMIC_DRAW_PARAM GR_GL_STREAM_DRAW | 46 #define DYNAMIC_DRAW_PARAM GR_GL_STREAM_DRAW |
47 | 47 |
48 inline static GrGLenum gr_to_gl_access_pattern(GrBufferType bufferType, | 48 inline static void get_target_and_usage(GrBufferType type, GrAccessPattern acces
sPattern, |
49 GrAccessPattern accessPattern) { | 49 const GrGLCaps& caps, GrGLenum* target,
GrGLenum* usage) { |
| 50 static const GrGLenum nonXferTargets[] = { |
| 51 GR_GL_ARRAY_BUFFER, |
| 52 GR_GL_ELEMENT_ARRAY_BUFFER |
| 53 }; |
| 54 GR_STATIC_ASSERT(0 == kVertex_GrBufferType); |
| 55 GR_STATIC_ASSERT(1 == kIndex_GrBufferType); |
| 56 |
50 static const GrGLenum drawUsages[] = { | 57 static const GrGLenum drawUsages[] = { |
51 DYNAMIC_DRAW_PARAM, // TODO: Do we really want to use STREAM_DRAW here
on non-Chromium? | 58 DYNAMIC_DRAW_PARAM, // TODO: Do we really want to use STREAM_DRAW here o
n non-Chromium? |
52 GR_GL_STATIC_DRAW, // kStatic_GrAccessPattern | 59 GR_GL_STATIC_DRAW, |
53 GR_GL_STREAM_DRAW // kStream_GrAccessPattern | 60 GR_GL_STREAM_DRAW |
54 }; | 61 }; |
55 | |
56 static const GrGLenum readUsages[] = { | 62 static const GrGLenum readUsages[] = { |
57 GR_GL_DYNAMIC_READ, // kDynamic_GrAccessPattern | 63 GR_GL_DYNAMIC_READ, |
58 GR_GL_STATIC_READ, // kStatic_GrAccessPattern | 64 GR_GL_STATIC_READ, |
59 GR_GL_STREAM_READ // kStream_GrAccessPattern | 65 GR_GL_STREAM_READ |
60 }; | 66 }; |
61 | |
62 GR_STATIC_ASSERT(0 == kDynamic_GrAccessPattern); | 67 GR_STATIC_ASSERT(0 == kDynamic_GrAccessPattern); |
63 GR_STATIC_ASSERT(1 == kStatic_GrAccessPattern); | 68 GR_STATIC_ASSERT(1 == kStatic_GrAccessPattern); |
64 GR_STATIC_ASSERT(2 == kStream_GrAccessPattern); | 69 GR_STATIC_ASSERT(2 == kStream_GrAccessPattern); |
65 GR_STATIC_ASSERT(SK_ARRAY_COUNT(drawUsages) == 1 + kLast_GrAccessPattern); | 70 GR_STATIC_ASSERT(SK_ARRAY_COUNT(drawUsages) == 1 + kLast_GrAccessPattern); |
66 GR_STATIC_ASSERT(SK_ARRAY_COUNT(readUsages) == 1 + kLast_GrAccessPattern); | 71 GR_STATIC_ASSERT(SK_ARRAY_COUNT(readUsages) == 1 + kLast_GrAccessPattern); |
67 | 72 |
68 static GrGLenum const* const usageTypes[] = { | |
69 drawUsages, // kVertex_GrBufferType, | |
70 drawUsages, // kIndex_GrBufferType, | |
71 drawUsages, // kTexel_GrBufferType, | |
72 drawUsages, // kDrawIndirect_GrBufferType, | |
73 drawUsages, // kXferCpuToGpu_GrBufferType, | |
74 readUsages // kXferGpuToCpu_GrBufferType, | |
75 }; | |
76 | |
77 GR_STATIC_ASSERT(0 == kVertex_GrBufferType); | |
78 GR_STATIC_ASSERT(1 == kIndex_GrBufferType); | |
79 GR_STATIC_ASSERT(2 == kTexel_GrBufferType); | |
80 GR_STATIC_ASSERT(3 == kDrawIndirect_GrBufferType); | |
81 GR_STATIC_ASSERT(4 == kXferCpuToGpu_GrBufferType); | |
82 GR_STATIC_ASSERT(5 == kXferGpuToCpu_GrBufferType); | |
83 GR_STATIC_ASSERT(SK_ARRAY_COUNT(usageTypes) == kGrBufferTypeCount); | |
84 | |
85 SkASSERT(bufferType >= 0 && bufferType <= kLast_GrBufferType); | |
86 SkASSERT(accessPattern >= 0 && accessPattern <= kLast_GrAccessPattern); | 73 SkASSERT(accessPattern >= 0 && accessPattern <= kLast_GrAccessPattern); |
87 | 74 |
88 return usageTypes[bufferType][accessPattern]; | 75 switch (type) { |
| 76 case kVertex_GrBufferType: |
| 77 case kIndex_GrBufferType: |
| 78 *target = nonXferTargets[type]; |
| 79 *usage = drawUsages[accessPattern]; |
| 80 break; |
| 81 case kXferCpuToGpu_GrBufferType: |
| 82 if (GrGLCaps::kChromium_TransferBufferType == caps.transferBufferTyp
e()) { |
| 83 *target = GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM; |
| 84 } else { |
| 85 SkASSERT(GrGLCaps::kPBO_TransferBufferType == caps.transferBuffe
rType()); |
| 86 *target = GR_GL_PIXEL_UNPACK_BUFFER; |
| 87 } |
| 88 *usage = drawUsages[accessPattern]; |
| 89 break; |
| 90 case kXferGpuToCpu_GrBufferType: |
| 91 if (GrGLCaps::kChromium_TransferBufferType == caps.transferBufferTyp
e()) { |
| 92 *target = GR_GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM; |
| 93 } else { |
| 94 SkASSERT(GrGLCaps::kPBO_TransferBufferType == caps.transferBuffe
rType()); |
| 95 *target = GR_GL_PIXEL_PACK_BUFFER; |
| 96 } |
| 97 *usage = readUsages[accessPattern]; |
| 98 break; |
| 99 default: |
| 100 SkFAIL("Unexpected buffer type."); |
| 101 break; |
| 102 } |
89 } | 103 } |
90 | 104 |
91 GrGLBuffer::GrGLBuffer(GrGLGpu* gpu, size_t size, GrBufferType intendedType, | 105 GrGLBuffer::GrGLBuffer(GrGLGpu* gpu, GrBufferType type, size_t size, GrAccessPat
tern accessPattern, |
92 GrAccessPattern accessPattern, bool cpuBacked, const void
* data) | 106 bool cpuBacked) |
93 : INHERITED(gpu, size, intendedType, accessPattern, cpuBacked), | 107 : INHERITED(gpu, type, size, accessPattern, cpuBacked), |
94 fCPUData(nullptr), | 108 fCPUData(nullptr), |
95 fIntendedType(intendedType), | 109 fTarget(0), |
96 fBufferID(0), | 110 fBufferID(0), |
97 fSizeInBytes(size), | 111 fSizeInBytes(size), |
98 fUsage(gr_to_gl_access_pattern(intendedType, accessPattern)), | 112 fUsage(0), |
99 fGLSizeInBytes(0) { | 113 fGLSizeInBytes(0) { |
100 if (this->isCPUBacked()) { | 114 if (cpuBacked) { |
101 // Core profile uses vertex array objects, which disallow client side ar
rays. | |
102 SkASSERT(!gpu->glCaps().isCoreProfile()); | |
103 if (gpu->caps()->mustClearUploadedBufferData()) { | 115 if (gpu->caps()->mustClearUploadedBufferData()) { |
104 fCPUData = sk_calloc_throw(fSizeInBytes); | 116 fCPUData = sk_calloc_throw(fSizeInBytes); |
105 } else { | 117 } else { |
106 fCPUData = sk_malloc_flags(fSizeInBytes, SK_MALLOC_THROW); | 118 fCPUData = sk_malloc_flags(fSizeInBytes, SK_MALLOC_THROW); |
107 } | 119 } |
108 if (data) { | 120 SkASSERT(kVertex_GrBufferType == type || kIndex_GrBufferType == type); |
109 memcpy(fCPUData, data, fSizeInBytes); | 121 fTarget = kVertex_GrBufferType == type ? GR_GL_ARRAY_BUFFER : GR_GL_ELEM
ENT_ARRAY_BUFFER; |
110 } | |
111 } else { | 122 } else { |
112 GL_CALL(GenBuffers(1, &fBufferID)); | 123 GL_CALL(GenBuffers(1, &fBufferID)); |
| 124 fSizeInBytes = size; |
| 125 get_target_and_usage(type, accessPattern, gpu->glCaps(), &fTarget, &fUsa
ge); |
113 if (fBufferID) { | 126 if (fBufferID) { |
114 GrGLenum target = gpu->bindBuffer(fIntendedType, this); | 127 gpu->bindBuffer(fBufferID, fTarget); |
115 CLEAR_ERROR_BEFORE_ALLOC(gpu->glInterface()); | 128 CLEAR_ERROR_BEFORE_ALLOC(gpu->glInterface()); |
116 // make sure driver can allocate memory for this buffer | 129 // make sure driver can allocate memory for this buffer |
117 GL_ALLOC_CALL(gpu->glInterface(), BufferData(target, | 130 GL_ALLOC_CALL(gpu->glInterface(), BufferData(fTarget, |
118 (GrGLsizeiptr) fSizeInB
ytes, | 131 (GrGLsizeiptr) fSizeInB
ytes, |
119 data, | 132 nullptr, // data ptr |
120 fUsage)); | 133 fUsage)); |
121 if (CHECK_ALLOC_ERROR(gpu->glInterface()) != GR_GL_NO_ERROR) { | 134 if (CHECK_ALLOC_ERROR(gpu->glInterface()) != GR_GL_NO_ERROR) { |
122 GL_CALL(DeleteBuffers(1, &fBufferID)); | 135 gpu->releaseBuffer(fBufferID, fTarget); |
123 fBufferID = 0; | 136 fBufferID = 0; |
124 } else { | 137 } else { |
125 fGLSizeInBytes = fSizeInBytes; | 138 fGLSizeInBytes = fSizeInBytes; |
126 } | 139 } |
127 } | 140 } |
128 } | 141 } |
129 VALIDATE(); | 142 VALIDATE(); |
130 this->registerWithCache(); | 143 this->registerWithCache(); |
131 } | 144 } |
132 | 145 |
133 inline GrGLGpu* GrGLBuffer::glGpu() const { | 146 inline GrGLGpu* GrGLBuffer::glGpu() const { |
134 SkASSERT(!this->wasDestroyed()); | 147 SkASSERT(!this->wasDestroyed()); |
135 return static_cast<GrGLGpu*>(this->getGpu()); | 148 return static_cast<GrGLGpu*>(this->getGpu()); |
136 } | 149 } |
137 | 150 |
138 inline const GrGLCaps& GrGLBuffer::glCaps() const { | 151 inline const GrGLCaps& GrGLBuffer::glCaps() const { |
139 return this->glGpu()->glCaps(); | 152 return this->glGpu()->glCaps(); |
140 } | 153 } |
141 | 154 |
142 void GrGLBuffer::onRelease() { | 155 void GrGLBuffer::onRelease() { |
143 if (!this->wasDestroyed()) { | 156 if (!this->wasDestroyed()) { |
144 VALIDATE(); | 157 VALIDATE(); |
145 // make sure we've not been abandoned or already released | 158 // make sure we've not been abandoned or already released |
146 if (fCPUData) { | 159 if (fCPUData) { |
147 SkASSERT(!fBufferID); | 160 SkASSERT(!fBufferID); |
148 sk_free(fCPUData); | 161 sk_free(fCPUData); |
149 fCPUData = nullptr; | 162 fCPUData = nullptr; |
150 } else if (fBufferID) { | 163 } else if (fBufferID) { |
151 GL_CALL(DeleteBuffers(1, &fBufferID)); | 164 this->glGpu()->releaseBuffer(fBufferID, fTarget); |
152 fBufferID = 0; | 165 fBufferID = 0; |
153 fGLSizeInBytes = 0; | 166 fGLSizeInBytes = 0; |
154 } | 167 } |
155 fMapPtr = nullptr; | 168 fMapPtr = nullptr; |
156 VALIDATE(); | 169 VALIDATE(); |
157 } | 170 } |
158 | 171 |
159 INHERITED::onRelease(); | 172 INHERITED::onRelease(); |
160 } | 173 } |
161 | 174 |
(...skipping 14 matching lines...) Expand all Loading... |
176 | 189 |
177 VALIDATE(); | 190 VALIDATE(); |
178 SkASSERT(!this->isMapped()); | 191 SkASSERT(!this->isMapped()); |
179 | 192 |
180 if (0 == fBufferID) { | 193 if (0 == fBufferID) { |
181 fMapPtr = fCPUData; | 194 fMapPtr = fCPUData; |
182 VALIDATE(); | 195 VALIDATE(); |
183 return; | 196 return; |
184 } | 197 } |
185 | 198 |
186 // TODO: Make this a function parameter. | 199 bool readOnly = (kXferGpuToCpu_GrBufferType == this->type()); |
187 bool readOnly = (kXferGpuToCpu_GrBufferType == fIntendedType); | |
188 | 200 |
189 // Handling dirty context is done in the bindBuffer call | 201 // Handling dirty context is done in the bindBuffer call |
190 switch (this->glCaps().mapBufferType()) { | 202 switch (this->glCaps().mapBufferType()) { |
191 case GrGLCaps::kNone_MapBufferType: | 203 case GrGLCaps::kNone_MapBufferType: |
192 break; | 204 break; |
193 case GrGLCaps::kMapBuffer_MapBufferType: { | 205 case GrGLCaps::kMapBuffer_MapBufferType: |
194 GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this); | 206 this->glGpu()->bindBuffer(fBufferID, fTarget); |
195 // Let driver know it can discard the old data | 207 // Let driver know it can discard the old data |
196 if (GR_GL_USE_BUFFER_DATA_NULL_HINT || fGLSizeInBytes != fSizeInByte
s) { | 208 if (GR_GL_USE_BUFFER_DATA_NULL_HINT || fGLSizeInBytes != fSizeInByte
s) { |
197 GL_CALL(BufferData(target, fSizeInBytes, nullptr, fUsage)); | 209 GL_CALL(BufferData(fTarget, fSizeInBytes, nullptr, fUsage)); |
198 } | 210 } |
199 GL_CALL_RET(fMapPtr, MapBuffer(target, readOnly ? GR_GL_READ_ONLY :
GR_GL_WRITE_ONLY)); | 211 GL_CALL_RET(fMapPtr, MapBuffer(fTarget, readOnly ? GR_GL_READ_ONLY :
GR_GL_WRITE_ONLY)); |
200 break; | 212 break; |
201 } | |
202 case GrGLCaps::kMapBufferRange_MapBufferType: { | 213 case GrGLCaps::kMapBufferRange_MapBufferType: { |
203 GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this); | 214 this->glGpu()->bindBuffer(fBufferID, fTarget); |
204 // Make sure the GL buffer size agrees with fDesc before mapping. | 215 // Make sure the GL buffer size agrees with fDesc before mapping. |
205 if (fGLSizeInBytes != fSizeInBytes) { | 216 if (fGLSizeInBytes != fSizeInBytes) { |
206 GL_CALL(BufferData(target, fSizeInBytes, nullptr, fUsage)); | 217 GL_CALL(BufferData(fTarget, fSizeInBytes, nullptr, fUsage)); |
207 } | 218 } |
208 GrGLbitfield writeAccess = GR_GL_MAP_WRITE_BIT; | 219 GrGLbitfield writeAccess = GR_GL_MAP_WRITE_BIT; |
209 if (kXferCpuToGpu_GrBufferType != fIntendedType) { | 220 // TODO: allow the client to specify invalidation in the transfer bu
ffer case. |
210 // TODO: Make this a function parameter. | 221 if (kXferCpuToGpu_GrBufferType != this->type()) { |
211 writeAccess |= GR_GL_MAP_INVALIDATE_BUFFER_BIT; | 222 writeAccess |= GR_GL_MAP_INVALIDATE_BUFFER_BIT; |
212 } | 223 } |
213 GL_CALL_RET(fMapPtr, MapBufferRange(target, 0, fSizeInBytes, | 224 GL_CALL_RET(fMapPtr, MapBufferRange(fTarget, 0, fSizeInBytes, |
214 readOnly ? GR_GL_MAP_READ_BIT :
writeAccess)); | 225 readOnly ? GR_GL_MAP_READ_BIT :
writeAccess)); |
215 break; | 226 break; |
216 } | 227 } |
217 case GrGLCaps::kChromium_MapBufferType: { | 228 case GrGLCaps::kChromium_MapBufferType: |
218 GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this); | 229 this->glGpu()->bindBuffer(fBufferID, fTarget); |
219 // Make sure the GL buffer size agrees with fDesc before mapping. | 230 // Make sure the GL buffer size agrees with fDesc before mapping. |
220 if (fGLSizeInBytes != fSizeInBytes) { | 231 if (fGLSizeInBytes != fSizeInBytes) { |
221 GL_CALL(BufferData(target, fSizeInBytes, nullptr, fUsage)); | 232 GL_CALL(BufferData(fTarget, fSizeInBytes, nullptr, fUsage)); |
222 } | 233 } |
223 GL_CALL_RET(fMapPtr, MapBufferSubData(target, 0, fSizeInBytes, | 234 GL_CALL_RET(fMapPtr, MapBufferSubData(fTarget, 0, fSizeInBytes, |
224 readOnly ? GR_GL_READ_ONLY :
GR_GL_WRITE_ONLY)); | 235 readOnly ? GR_GL_READ_ONLY :
GR_GL_WRITE_ONLY)); |
225 break; | 236 break; |
226 } | |
227 } | 237 } |
228 fGLSizeInBytes = fSizeInBytes; | 238 fGLSizeInBytes = fSizeInBytes; |
229 VALIDATE(); | 239 VALIDATE(); |
230 } | 240 } |
231 | 241 |
232 void GrGLBuffer::onUnmap() { | 242 void GrGLBuffer::onUnmap() { |
233 if (this->wasDestroyed()) { | 243 if (this->wasDestroyed()) { |
234 return; | 244 return; |
235 } | 245 } |
236 | 246 |
237 VALIDATE(); | 247 VALIDATE(); |
238 SkASSERT(this->isMapped()); | 248 SkASSERT(this->isMapped()); |
239 if (0 == fBufferID) { | 249 if (0 == fBufferID) { |
240 fMapPtr = nullptr; | 250 fMapPtr = nullptr; |
241 return; | 251 return; |
242 } | 252 } |
243 // bind buffer handles the dirty context | 253 // bind buffer handles the dirty context |
244 switch (this->glCaps().mapBufferType()) { | 254 switch (this->glCaps().mapBufferType()) { |
245 case GrGLCaps::kNone_MapBufferType: | 255 case GrGLCaps::kNone_MapBufferType: |
246 SkDEBUGFAIL("Shouldn't get here."); | 256 SkDEBUGFAIL("Shouldn't get here."); |
247 return; | 257 return; |
248 case GrGLCaps::kMapBuffer_MapBufferType: // fall through | 258 case GrGLCaps::kMapBuffer_MapBufferType: // fall through |
249 case GrGLCaps::kMapBufferRange_MapBufferType: { | 259 case GrGLCaps::kMapBufferRange_MapBufferType: |
250 GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this); | 260 this->glGpu()->bindBuffer(fBufferID, fTarget); |
251 GL_CALL(UnmapBuffer(target)); | 261 GL_CALL(UnmapBuffer(fTarget)); |
252 break; | 262 break; |
253 } | |
254 case GrGLCaps::kChromium_MapBufferType: | 263 case GrGLCaps::kChromium_MapBufferType: |
255 this->glGpu()->bindBuffer(fIntendedType, this); // TODO: Is this nee
ded? | 264 this->glGpu()->bindBuffer(fBufferID, fTarget); |
256 GL_CALL(UnmapBufferSubData(fMapPtr)); | 265 GL_CALL(UnmapBufferSubData(fMapPtr)); |
257 break; | 266 break; |
258 } | 267 } |
259 fMapPtr = nullptr; | 268 fMapPtr = nullptr; |
260 } | 269 } |
261 | 270 |
262 bool GrGLBuffer::onUpdateData(const void* src, size_t srcSizeInBytes) { | 271 bool GrGLBuffer::onUpdateData(const void* src, size_t srcSizeInBytes) { |
263 if (this->wasDestroyed()) { | 272 if (this->wasDestroyed()) { |
264 return false; | 273 return false; |
265 } | 274 } |
266 | 275 |
267 SkASSERT(!this->isMapped()); | 276 SkASSERT(!this->isMapped()); |
| 277 SkASSERT(GR_GL_ARRAY_BUFFER == fTarget || GR_GL_ELEMENT_ARRAY_BUFFER == fTar
get); |
268 VALIDATE(); | 278 VALIDATE(); |
269 if (srcSizeInBytes > fSizeInBytes) { | 279 if (srcSizeInBytes > fSizeInBytes) { |
270 return false; | 280 return false; |
271 } | 281 } |
272 if (0 == fBufferID) { | 282 if (0 == fBufferID) { |
273 memcpy(fCPUData, src, srcSizeInBytes); | 283 memcpy(fCPUData, src, srcSizeInBytes); |
274 return true; | 284 return true; |
275 } | 285 } |
276 SkASSERT(srcSizeInBytes <= fSizeInBytes); | 286 SkASSERT(srcSizeInBytes <= fSizeInBytes); |
277 // bindbuffer handles dirty context | 287 // bindbuffer handles dirty context |
278 GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this); | 288 this->glGpu()->bindBuffer(fBufferID, fTarget); |
279 | 289 |
280 #if GR_GL_USE_BUFFER_DATA_NULL_HINT | 290 #if GR_GL_USE_BUFFER_DATA_NULL_HINT |
281 if (fSizeInBytes == srcSizeInBytes) { | 291 if (fSizeInBytes == srcSizeInBytes) { |
282 GL_CALL(BufferData(target, (GrGLsizeiptr) srcSizeInBytes, src, fUsage)); | 292 GL_CALL(BufferData(fTarget, (GrGLsizeiptr) srcSizeInBytes, src, fUsage))
; |
283 } else { | 293 } else { |
284 // Before we call glBufferSubData we give the driver a hint using | 294 // Before we call glBufferSubData we give the driver a hint using |
285 // glBufferData with nullptr. This makes the old buffer contents | 295 // glBufferData with nullptr. This makes the old buffer contents |
286 // inaccessible to future draws. The GPU may still be processing | 296 // inaccessible to future draws. The GPU may still be processing |
287 // draws that reference the old contents. With this hint it can | 297 // draws that reference the old contents. With this hint it can |
288 // assign a different allocation for the new contents to avoid | 298 // assign a different allocation for the new contents to avoid |
289 // flushing the gpu past draws consuming the old contents. | 299 // flushing the gpu past draws consuming the old contents. |
290 // TODO I think we actually want to try calling bufferData here | 300 // TODO I think we actually want to try calling bufferData here |
291 GL_CALL(BufferData(target, fSizeInBytes, nullptr, fUsage)); | 301 GL_CALL(BufferData(fTarget, fSizeInBytes, nullptr, fUsage)); |
292 GL_CALL(BufferSubData(target, 0, (GrGLsizeiptr) srcSizeInBytes, src)); | 302 GL_CALL(BufferSubData(fTarget, 0, (GrGLsizeiptr) srcSizeInBytes, src)); |
293 } | 303 } |
294 fGLSizeInBytes = fSizeInBytes; | 304 fGLSizeInBytes = fSizeInBytes; |
295 #else | 305 #else |
296 // Note that we're cheating on the size here. Currently no methods | 306 // Note that we're cheating on the size here. Currently no methods |
297 // allow a partial update that preserves contents of non-updated | 307 // allow a partial update that preserves contents of non-updated |
298 // portions of the buffer (map() does a glBufferData(..size, nullptr..)) | 308 // portions of the buffer (map() does a glBufferData(..size, nullptr..)) |
299 GL_CALL(BufferData(fTarget, srcSizeInBytes, src, fUsage)); | 309 GL_CALL(BufferData(fTarget, srcSizeInBytes, src, fUsage)); |
300 fGLSizeInBytes = srcSizeInBytes; | 310 fGLSizeInBytes = srcSizeInBytes; |
301 #endif | 311 #endif |
302 VALIDATE(); | 312 VALIDATE(); |
303 return true; | 313 return true; |
304 } | 314 } |
305 | 315 |
306 void GrGLBuffer::setMemoryBacking(SkTraceMemoryDump* traceMemoryDump, | 316 void GrGLBuffer::setMemoryBacking(SkTraceMemoryDump* traceMemoryDump, |
307 const SkString& dumpName) const { | 317 const SkString& dumpName) const { |
308 SkString buffer_id; | 318 SkString buffer_id; |
309 buffer_id.appendU32(this->bufferID()); | 319 buffer_id.appendU32(this->bufferID()); |
310 traceMemoryDump->setMemoryBacking(dumpName.c_str(), "gl_buffer", | 320 traceMemoryDump->setMemoryBacking(dumpName.c_str(), "gl_buffer", |
311 buffer_id.c_str()); | 321 buffer_id.c_str()); |
312 } | 322 } |
313 | 323 |
314 #ifdef SK_DEBUG | 324 #ifdef SK_DEBUG |
315 | 325 |
316 void GrGLBuffer::validate() const { | 326 void GrGLBuffer::validate() const { |
| 327 SkASSERT(GR_GL_ARRAY_BUFFER == fTarget || GR_GL_ELEMENT_ARRAY_BUFFER == fTar
get || |
| 328 GR_GL_PIXEL_PACK_BUFFER == fTarget || GR_GL_PIXEL_UNPACK_BUFFER ==
fTarget || |
| 329 GR_GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM == fTarget || |
| 330 GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM == fTarget); |
317 // The following assert isn't valid when the buffer has been abandoned: | 331 // The following assert isn't valid when the buffer has been abandoned: |
318 // SkASSERT((0 == fDesc.fID) == (fCPUData)); | 332 // SkASSERT((0 == fDesc.fID) == (fCPUData)); |
319 SkASSERT(0 != fBufferID || 0 == fGLSizeInBytes); | 333 SkASSERT(0 != fBufferID || 0 == fGLSizeInBytes); |
320 SkASSERT(nullptr == fMapPtr || fCPUData || fGLSizeInBytes <= fSizeInBytes); | 334 SkASSERT(nullptr == fMapPtr || fCPUData || fGLSizeInBytes <= fSizeInBytes); |
321 SkASSERT(nullptr == fCPUData || nullptr == fMapPtr || fCPUData == fMapPtr); | 335 SkASSERT(nullptr == fCPUData || nullptr == fMapPtr || fCPUData == fMapPtr); |
322 } | 336 } |
323 | 337 |
324 #endif | 338 #endif |
OLD | NEW |