| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2016 Google Inc. | 2 * Copyright 2016 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #include "GrGLBuffer.h" | 8 #include "GrGLBuffer.h" |
| 9 #include "GrGLGpu.h" | 9 #include "GrGLGpu.h" |
| 10 #include "SkTraceMemoryDump.h" | 10 #include "SkTraceMemoryDump.h" |
| (...skipping 12 matching lines...) Expand all Loading... |
| 23 #endif | 23 #endif |
| 24 | 24 |
| 25 #ifdef SK_DEBUG | 25 #ifdef SK_DEBUG |
| 26 #define VALIDATE() this->validate() | 26 #define VALIDATE() this->validate() |
| 27 #else | 27 #else |
| 28 #define VALIDATE() do {} while(false) | 28 #define VALIDATE() do {} while(false) |
| 29 #endif | 29 #endif |
| 30 | 30 |
| 31 GrGLBuffer* GrGLBuffer::Create(GrGLGpu* gpu, size_t size, GrBufferType intendedT
ype, | 31 GrGLBuffer* GrGLBuffer::Create(GrGLGpu* gpu, size_t size, GrBufferType intendedT
ype, |
| 32 GrAccessPattern accessPattern, const void* data)
{ | 32 GrAccessPattern accessPattern, const void* data)
{ |
| 33 bool cpuBacked = gpu->glCaps().useNonVBOVertexAndIndexDynamicData() && | 33 SkAutoTUnref<GrGLBuffer> buffer(new GrGLBuffer(gpu, size, intendedType, acce
ssPattern, data)); |
| 34 GrBufferTypeIsVertexOrIndex(intendedType) && | 34 if (0 == buffer->bufferID()) { |
| 35 kDynamic_GrAccessPattern == accessPattern; | |
| 36 SkAutoTUnref<GrGLBuffer> buffer(new GrGLBuffer(gpu, size, intendedType, acce
ssPattern, | |
| 37 cpuBacked, data)); | |
| 38 if (!cpuBacked && 0 == buffer->bufferID()) { | |
| 39 return nullptr; | 35 return nullptr; |
| 40 } | 36 } |
| 41 return buffer.release(); | 37 return buffer.release(); |
| 42 } | 38 } |
| 43 | 39 |
| 44 // GL_STREAM_DRAW triggers an optimization in Chromium's GPU process where a cli
ent's vertex buffer | 40 // GL_STREAM_DRAW triggers an optimization in Chromium's GPU process where a cli
ent's vertex buffer |
| 45 // objects are implemented as client-side-arrays on tile-deferred architectures. | 41 // objects are implemented as client-side-arrays on tile-deferred architectures. |
| 46 #define DYNAMIC_DRAW_PARAM GR_GL_STREAM_DRAW | 42 #define DYNAMIC_DRAW_PARAM GR_GL_STREAM_DRAW |
| 47 | 43 |
| 48 inline static GrGLenum gr_to_gl_access_pattern(GrBufferType bufferType, | 44 inline static GrGLenum gr_to_gl_access_pattern(GrBufferType bufferType, |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 82 GR_STATIC_ASSERT(5 == kXferGpuToCpu_GrBufferType); | 78 GR_STATIC_ASSERT(5 == kXferGpuToCpu_GrBufferType); |
| 83 GR_STATIC_ASSERT(SK_ARRAY_COUNT(usageTypes) == kGrBufferTypeCount); | 79 GR_STATIC_ASSERT(SK_ARRAY_COUNT(usageTypes) == kGrBufferTypeCount); |
| 84 | 80 |
| 85 SkASSERT(bufferType >= 0 && bufferType <= kLast_GrBufferType); | 81 SkASSERT(bufferType >= 0 && bufferType <= kLast_GrBufferType); |
| 86 SkASSERT(accessPattern >= 0 && accessPattern <= kLast_GrAccessPattern); | 82 SkASSERT(accessPattern >= 0 && accessPattern <= kLast_GrAccessPattern); |
| 87 | 83 |
| 88 return usageTypes[bufferType][accessPattern]; | 84 return usageTypes[bufferType][accessPattern]; |
| 89 } | 85 } |
| 90 | 86 |
| 91 GrGLBuffer::GrGLBuffer(GrGLGpu* gpu, size_t size, GrBufferType intendedType, | 87 GrGLBuffer::GrGLBuffer(GrGLGpu* gpu, size_t size, GrBufferType intendedType, |
| 92 GrAccessPattern accessPattern, bool cpuBacked, const void
* data) | 88 GrAccessPattern accessPattern, const void* data) |
| 93 : INHERITED(gpu, size, intendedType, accessPattern, cpuBacked), | 89 : INHERITED(gpu, size, intendedType, accessPattern), |
| 94 fCPUData(nullptr), | |
| 95 fIntendedType(intendedType), | 90 fIntendedType(intendedType), |
| 96 fBufferID(0), | 91 fBufferID(0), |
| 97 fSizeInBytes(size), | |
| 98 fUsage(gr_to_gl_access_pattern(intendedType, accessPattern)), | 92 fUsage(gr_to_gl_access_pattern(intendedType, accessPattern)), |
| 99 fGLSizeInBytes(0), | 93 fGLSizeInBytes(0), |
| 100 fHasAttachedToTexture(false) { | 94 fHasAttachedToTexture(false) { |
| 101 if (this->isCPUBacked()) { | 95 GL_CALL(GenBuffers(1, &fBufferID)); |
| 102 // Core profile uses vertex array objects, which disallow client side ar
rays. | 96 if (fBufferID) { |
| 103 SkASSERT(!gpu->glCaps().isCoreProfile()); | 97 GrGLenum target = gpu->bindBuffer(fIntendedType, this); |
| 104 if (gpu->caps()->mustClearUploadedBufferData()) { | 98 CLEAR_ERROR_BEFORE_ALLOC(gpu->glInterface()); |
| 105 fCPUData = sk_calloc_throw(fSizeInBytes); | 99 // make sure driver can allocate memory for this buffer |
| 100 GL_ALLOC_CALL(gpu->glInterface(), BufferData(target, |
| 101 (GrGLsizeiptr) size, |
| 102 data, |
| 103 fUsage)); |
| 104 if (CHECK_ALLOC_ERROR(gpu->glInterface()) != GR_GL_NO_ERROR) { |
| 105 GL_CALL(DeleteBuffers(1, &fBufferID)); |
| 106 fBufferID = 0; |
| 106 } else { | 107 } else { |
| 107 fCPUData = sk_malloc_flags(fSizeInBytes, SK_MALLOC_THROW); | 108 fGLSizeInBytes = size; |
| 108 } | |
| 109 if (data) { | |
| 110 memcpy(fCPUData, data, fSizeInBytes); | |
| 111 } | |
| 112 } else { | |
| 113 GL_CALL(GenBuffers(1, &fBufferID)); | |
| 114 if (fBufferID) { | |
| 115 GrGLenum target = gpu->bindBuffer(fIntendedType, this); | |
| 116 CLEAR_ERROR_BEFORE_ALLOC(gpu->glInterface()); | |
| 117 // make sure driver can allocate memory for this buffer | |
| 118 GL_ALLOC_CALL(gpu->glInterface(), BufferData(target, | |
| 119 (GrGLsizeiptr) fSizeInB
ytes, | |
| 120 data, | |
| 121 fUsage)); | |
| 122 if (CHECK_ALLOC_ERROR(gpu->glInterface()) != GR_GL_NO_ERROR) { | |
| 123 GL_CALL(DeleteBuffers(1, &fBufferID)); | |
| 124 fBufferID = 0; | |
| 125 } else { | |
| 126 fGLSizeInBytes = fSizeInBytes; | |
| 127 } | |
| 128 } | 109 } |
| 129 } | 110 } |
| 130 VALIDATE(); | 111 VALIDATE(); |
| 131 this->registerWithCache(SkBudgeted::kYes); | 112 this->registerWithCache(SkBudgeted::kYes); |
| 132 } | 113 } |
| 133 | 114 |
| 134 inline GrGLGpu* GrGLBuffer::glGpu() const { | 115 inline GrGLGpu* GrGLBuffer::glGpu() const { |
| 135 SkASSERT(!this->wasDestroyed()); | 116 SkASSERT(!this->wasDestroyed()); |
| 136 return static_cast<GrGLGpu*>(this->getGpu()); | 117 return static_cast<GrGLGpu*>(this->getGpu()); |
| 137 } | 118 } |
| 138 | 119 |
| 139 inline const GrGLCaps& GrGLBuffer::glCaps() const { | 120 inline const GrGLCaps& GrGLBuffer::glCaps() const { |
| 140 return this->glGpu()->glCaps(); | 121 return this->glGpu()->glCaps(); |
| 141 } | 122 } |
| 142 | 123 |
| 143 void GrGLBuffer::onRelease() { | 124 void GrGLBuffer::onRelease() { |
| 144 if (!this->wasDestroyed()) { | 125 if (!this->wasDestroyed()) { |
| 145 VALIDATE(); | 126 VALIDATE(); |
| 146 // make sure we've not been abandoned or already released | 127 // make sure we've not been abandoned or already released |
| 147 if (fCPUData) { | 128 if (fBufferID) { |
| 148 SkASSERT(!fBufferID); | |
| 149 sk_free(fCPUData); | |
| 150 fCPUData = nullptr; | |
| 151 } else if (fBufferID) { | |
| 152 GL_CALL(DeleteBuffers(1, &fBufferID)); | 129 GL_CALL(DeleteBuffers(1, &fBufferID)); |
| 153 fBufferID = 0; | 130 fBufferID = 0; |
| 154 fGLSizeInBytes = 0; | 131 fGLSizeInBytes = 0; |
| 155 this->glGpu()->notifyBufferReleased(this); | 132 this->glGpu()->notifyBufferReleased(this); |
| 156 } | 133 } |
| 157 fMapPtr = nullptr; | 134 fMapPtr = nullptr; |
| 158 VALIDATE(); | 135 VALIDATE(); |
| 159 } | 136 } |
| 160 | 137 |
| 161 INHERITED::onRelease(); | 138 INHERITED::onRelease(); |
| 162 } | 139 } |
| 163 | 140 |
| 164 void GrGLBuffer::onAbandon() { | 141 void GrGLBuffer::onAbandon() { |
| 165 fBufferID = 0; | 142 fBufferID = 0; |
| 166 fGLSizeInBytes = 0; | 143 fGLSizeInBytes = 0; |
| 167 fMapPtr = nullptr; | 144 fMapPtr = nullptr; |
| 168 sk_free(fCPUData); | |
| 169 fCPUData = nullptr; | |
| 170 VALIDATE(); | 145 VALIDATE(); |
| 171 INHERITED::onAbandon(); | 146 INHERITED::onAbandon(); |
| 172 } | 147 } |
| 173 | 148 |
| 174 void GrGLBuffer::onMap() { | 149 void GrGLBuffer::onMap() { |
| 175 if (this->wasDestroyed()) { | 150 if (this->wasDestroyed()) { |
| 176 return; | 151 return; |
| 177 } | 152 } |
| 178 | 153 |
| 179 VALIDATE(); | 154 VALIDATE(); |
| 180 SkASSERT(!this->isMapped()); | 155 SkASSERT(!this->isMapped()); |
| 181 | 156 |
| 182 if (0 == fBufferID) { | |
| 183 fMapPtr = fCPUData; | |
| 184 VALIDATE(); | |
| 185 return; | |
| 186 } | |
| 187 | |
| 188 // TODO: Make this a function parameter. | 157 // TODO: Make this a function parameter. |
| 189 bool readOnly = (kXferGpuToCpu_GrBufferType == fIntendedType); | 158 bool readOnly = (kXferGpuToCpu_GrBufferType == fIntendedType); |
| 190 | 159 |
| 191 // Handling dirty context is done in the bindBuffer call | 160 // Handling dirty context is done in the bindBuffer call |
| 192 switch (this->glCaps().mapBufferType()) { | 161 switch (this->glCaps().mapBufferType()) { |
| 193 case GrGLCaps::kNone_MapBufferType: | 162 case GrGLCaps::kNone_MapBufferType: |
| 194 break; | 163 break; |
| 195 case GrGLCaps::kMapBuffer_MapBufferType: { | 164 case GrGLCaps::kMapBuffer_MapBufferType: { |
| 196 GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this); | 165 GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this); |
| 197 // Let driver know it can discard the old data | 166 // Let driver know it can discard the old data |
| 198 if (GR_GL_USE_BUFFER_DATA_NULL_HINT || fGLSizeInBytes != fSizeInByte
s) { | 167 if (GR_GL_USE_BUFFER_DATA_NULL_HINT || fGLSizeInBytes != this->sizeI
nBytes()) { |
| 199 GL_CALL(BufferData(target, fSizeInBytes, nullptr, fUsage)); | 168 GL_CALL(BufferData(target, this->sizeInBytes(), nullptr, fUsage)
); |
| 200 } | 169 } |
| 201 GL_CALL_RET(fMapPtr, MapBuffer(target, readOnly ? GR_GL_READ_ONLY :
GR_GL_WRITE_ONLY)); | 170 GL_CALL_RET(fMapPtr, MapBuffer(target, readOnly ? GR_GL_READ_ONLY :
GR_GL_WRITE_ONLY)); |
| 202 break; | 171 break; |
| 203 } | 172 } |
| 204 case GrGLCaps::kMapBufferRange_MapBufferType: { | 173 case GrGLCaps::kMapBufferRange_MapBufferType: { |
| 205 GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this); | 174 GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this); |
| 206 // Make sure the GL buffer size agrees with fDesc before mapping. | 175 // Make sure the GL buffer size agrees with fDesc before mapping. |
| 207 if (fGLSizeInBytes != fSizeInBytes) { | 176 if (fGLSizeInBytes != this->sizeInBytes()) { |
| 208 GL_CALL(BufferData(target, fSizeInBytes, nullptr, fUsage)); | 177 GL_CALL(BufferData(target, this->sizeInBytes(), nullptr, fUsage)
); |
| 209 } | 178 } |
| 210 GrGLbitfield writeAccess = GR_GL_MAP_WRITE_BIT; | 179 GrGLbitfield writeAccess = GR_GL_MAP_WRITE_BIT; |
| 211 if (kXferCpuToGpu_GrBufferType != fIntendedType) { | 180 if (kXferCpuToGpu_GrBufferType != fIntendedType) { |
| 212 // TODO: Make this a function parameter. | 181 // TODO: Make this a function parameter. |
| 213 writeAccess |= GR_GL_MAP_INVALIDATE_BUFFER_BIT; | 182 writeAccess |= GR_GL_MAP_INVALIDATE_BUFFER_BIT; |
| 214 } | 183 } |
| 215 GL_CALL_RET(fMapPtr, MapBufferRange(target, 0, fSizeInBytes, | 184 GL_CALL_RET(fMapPtr, MapBufferRange(target, 0, this->sizeInBytes(), |
| 216 readOnly ? GR_GL_MAP_READ_BIT :
writeAccess)); | 185 readOnly ? GR_GL_MAP_READ_BIT :
writeAccess)); |
| 217 break; | 186 break; |
| 218 } | 187 } |
| 219 case GrGLCaps::kChromium_MapBufferType: { | 188 case GrGLCaps::kChromium_MapBufferType: { |
| 220 GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this); | 189 GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this); |
| 221 // Make sure the GL buffer size agrees with fDesc before mapping. | 190 // Make sure the GL buffer size agrees with fDesc before mapping. |
| 222 if (fGLSizeInBytes != fSizeInBytes) { | 191 if (fGLSizeInBytes != this->sizeInBytes()) { |
| 223 GL_CALL(BufferData(target, fSizeInBytes, nullptr, fUsage)); | 192 GL_CALL(BufferData(target, this->sizeInBytes(), nullptr, fUsage)
); |
| 224 } | 193 } |
| 225 GL_CALL_RET(fMapPtr, MapBufferSubData(target, 0, fSizeInBytes, | 194 GL_CALL_RET(fMapPtr, MapBufferSubData(target, 0, this->sizeInBytes()
, |
| 226 readOnly ? GR_GL_READ_ONLY :
GR_GL_WRITE_ONLY)); | 195 readOnly ? GR_GL_READ_ONLY :
GR_GL_WRITE_ONLY)); |
| 227 break; | 196 break; |
| 228 } | 197 } |
| 229 } | 198 } |
| 230 fGLSizeInBytes = fSizeInBytes; | 199 fGLSizeInBytes = this->sizeInBytes(); |
| 231 VALIDATE(); | 200 VALIDATE(); |
| 232 } | 201 } |
| 233 | 202 |
| 234 void GrGLBuffer::onUnmap() { | 203 void GrGLBuffer::onUnmap() { |
| 235 if (this->wasDestroyed()) { | 204 if (this->wasDestroyed()) { |
| 236 return; | 205 return; |
| 237 } | 206 } |
| 238 | 207 |
| 239 VALIDATE(); | 208 VALIDATE(); |
| 240 SkASSERT(this->isMapped()); | 209 SkASSERT(this->isMapped()); |
| (...skipping 20 matching lines...) Expand all Loading... |
| 261 fMapPtr = nullptr; | 230 fMapPtr = nullptr; |
| 262 } | 231 } |
| 263 | 232 |
| 264 bool GrGLBuffer::onUpdateData(const void* src, size_t srcSizeInBytes) { | 233 bool GrGLBuffer::onUpdateData(const void* src, size_t srcSizeInBytes) { |
| 265 if (this->wasDestroyed()) { | 234 if (this->wasDestroyed()) { |
| 266 return false; | 235 return false; |
| 267 } | 236 } |
| 268 | 237 |
| 269 SkASSERT(!this->isMapped()); | 238 SkASSERT(!this->isMapped()); |
| 270 VALIDATE(); | 239 VALIDATE(); |
| 271 if (srcSizeInBytes > fSizeInBytes) { | 240 if (srcSizeInBytes > this->sizeInBytes()) { |
| 272 return false; | 241 return false; |
| 273 } | 242 } |
| 274 if (0 == fBufferID) { | 243 SkASSERT(srcSizeInBytes <= this->sizeInBytes()); |
| 275 memcpy(fCPUData, src, srcSizeInBytes); | |
| 276 return true; | |
| 277 } | |
| 278 SkASSERT(srcSizeInBytes <= fSizeInBytes); | |
| 279 // bindbuffer handles dirty context | 244 // bindbuffer handles dirty context |
| 280 GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this); | 245 GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this); |
| 281 | 246 |
| 282 #if GR_GL_USE_BUFFER_DATA_NULL_HINT | 247 #if GR_GL_USE_BUFFER_DATA_NULL_HINT |
| 283 if (fSizeInBytes == srcSizeInBytes) { | 248 if (this->sizeInBytes() == srcSizeInBytes) { |
| 284 GL_CALL(BufferData(target, (GrGLsizeiptr) srcSizeInBytes, src, fUsage)); | 249 GL_CALL(BufferData(target, (GrGLsizeiptr) srcSizeInBytes, src, fUsage)); |
| 285 } else { | 250 } else { |
| 286 // Before we call glBufferSubData we give the driver a hint using | 251 // Before we call glBufferSubData we give the driver a hint using |
| 287 // glBufferData with nullptr. This makes the old buffer contents | 252 // glBufferData with nullptr. This makes the old buffer contents |
| 288 // inaccessible to future draws. The GPU may still be processing | 253 // inaccessible to future draws. The GPU may still be processing |
| 289 // draws that reference the old contents. With this hint it can | 254 // draws that reference the old contents. With this hint it can |
| 290 // assign a different allocation for the new contents to avoid | 255 // assign a different allocation for the new contents to avoid |
| 291 // flushing the gpu past draws consuming the old contents. | 256 // flushing the gpu past draws consuming the old contents. |
| 292 // TODO I think we actually want to try calling bufferData here | 257 // TODO I think we actually want to try calling bufferData here |
| 293 GL_CALL(BufferData(target, fSizeInBytes, nullptr, fUsage)); | 258 GL_CALL(BufferData(target, this->sizeInBytes(), nullptr, fUsage)); |
| 294 GL_CALL(BufferSubData(target, 0, (GrGLsizeiptr) srcSizeInBytes, src)); | 259 GL_CALL(BufferSubData(target, 0, (GrGLsizeiptr) srcSizeInBytes, src)); |
| 295 } | 260 } |
| 296 fGLSizeInBytes = fSizeInBytes; | 261 fGLSizeInBytes = this->sizeInBytes(); |
| 297 #else | 262 #else |
| 298 // Note that we're cheating on the size here. Currently no methods | 263 // Note that we're cheating on the size here. Currently no methods |
| 299 // allow a partial update that preserves contents of non-updated | 264 // allow a partial update that preserves contents of non-updated |
| 300 // portions of the buffer (map() does a glBufferData(..size, nullptr..)) | 265 // portions of the buffer (map() does a glBufferData(..size, nullptr..)) |
| 301 GL_CALL(BufferData(target, srcSizeInBytes, src, fUsage)); | 266 GL_CALL(BufferData(target, srcSizeInBytes, src, fUsage)); |
| 302 fGLSizeInBytes = srcSizeInBytes; | 267 fGLSizeInBytes = srcSizeInBytes; |
| 303 #endif | 268 #endif |
| 304 VALIDATE(); | 269 VALIDATE(); |
| 305 return true; | 270 return true; |
| 306 } | 271 } |
| 307 | 272 |
| 308 void GrGLBuffer::setMemoryBacking(SkTraceMemoryDump* traceMemoryDump, | 273 void GrGLBuffer::setMemoryBacking(SkTraceMemoryDump* traceMemoryDump, |
| 309 const SkString& dumpName) const { | 274 const SkString& dumpName) const { |
| 310 SkString buffer_id; | 275 SkString buffer_id; |
| 311 buffer_id.appendU32(this->bufferID()); | 276 buffer_id.appendU32(this->bufferID()); |
| 312 traceMemoryDump->setMemoryBacking(dumpName.c_str(), "gl_buffer", | 277 traceMemoryDump->setMemoryBacking(dumpName.c_str(), "gl_buffer", |
| 313 buffer_id.c_str()); | 278 buffer_id.c_str()); |
| 314 } | 279 } |
| 315 | 280 |
| 316 #ifdef SK_DEBUG | 281 #ifdef SK_DEBUG |
| 317 | 282 |
| 318 void GrGLBuffer::validate() const { | 283 void GrGLBuffer::validate() const { |
| 319 // The following assert isn't valid when the buffer has been abandoned: | |
| 320 // SkASSERT((0 == fDesc.fID) == (fCPUData)); | |
| 321 SkASSERT(0 != fBufferID || 0 == fGLSizeInBytes); | 284 SkASSERT(0 != fBufferID || 0 == fGLSizeInBytes); |
| 322 SkASSERT(nullptr == fMapPtr || fCPUData || fGLSizeInBytes <= fSizeInBytes); | 285 SkASSERT(nullptr == fMapPtr || fGLSizeInBytes <= this->sizeInBytes()); |
| 323 SkASSERT(nullptr == fCPUData || nullptr == fMapPtr || fCPUData == fMapPtr); | |
| 324 } | 286 } |
| 325 | 287 |
| 326 #endif | 288 #endif |
| OLD | NEW |