| OLD | NEW |
| (Empty) |
| 1 /* | |
| 2 * Copyright 2016 Google Inc. | |
| 3 * | |
| 4 * Use of this source code is governed by a BSD-style license that can be | |
| 5 * found in the LICENSE file. | |
| 6 */ | |
| 7 | |
| 8 #include "GrGLBuffer.h" | |
| 9 #include "GrGLGpu.h" | |
| 10 #include "SkTraceMemoryDump.h" | |
| 11 | |
| 12 #define GL_CALL(X) GR_GL_CALL(this->glGpu()->glInterface(), X) | |
| 13 #define GL_CALL_RET(RET, X) GR_GL_CALL_RET(this->glGpu()->glInterface(), RET, X) | |
| 14 | |
| 15 #if GR_GL_CHECK_ALLOC_WITH_GET_ERROR | |
| 16 #define CLEAR_ERROR_BEFORE_ALLOC(iface) GrGLClearErr(iface) | |
| 17 #define GL_ALLOC_CALL(iface, call) GR_GL_CALL_NOERRCHECK(iface, call) | |
| 18 #define CHECK_ALLOC_ERROR(iface) GR_GL_GET_ERROR(iface) | |
| 19 #else | |
| 20 #define CLEAR_ERROR_BEFORE_ALLOC(iface) | |
| 21 #define GL_ALLOC_CALL(iface, call) GR_GL_CALL(iface, call) | |
| 22 #define CHECK_ALLOC_ERROR(iface) GR_GL_NO_ERROR | |
| 23 #endif | |
| 24 | |
| 25 #ifdef SK_DEBUG | |
| 26 #define VALIDATE() this->validate() | |
| 27 #else | |
| 28 #define VALIDATE() do {} while(false) | |
| 29 #endif | |
| 30 | |
| 31 GrGLBuffer* GrGLBuffer::Create(GrGLGpu* gpu, GrBufferType type, size_t size, | |
| 32 GrAccessPattern accessPattern) { | |
| 33 static const int kIsVertexOrIndex = (1 << kVertex_GrBufferType) | (1 << kInd
ex_GrBufferType); | |
| 34 bool cpuBacked = gpu->glCaps().useNonVBOVertexAndIndexDynamicData() && | |
| 35 kDynamic_GrAccessPattern == accessPattern && | |
| 36 ((kIsVertexOrIndex >> type) & 1); | |
| 37 SkAutoTUnref<GrGLBuffer> buffer(new GrGLBuffer(gpu, type, size, accessPatter
n, cpuBacked)); | |
| 38 if (!cpuBacked && 0 == buffer->fBufferID) { | |
| 39 return nullptr; | |
| 40 } | |
| 41 return buffer.release(); | |
| 42 } | |
| 43 | |
| 44 // GL_STREAM_DRAW triggers an optimization in Chromium's GPU process where a cli
ent's vertex buffer | |
| 45 // objects are implemented as client-side-arrays on tile-deferred architectures. | |
| 46 #define DYNAMIC_DRAW_PARAM GR_GL_STREAM_DRAW | |
| 47 | |
| 48 inline static void get_target_and_usage(GrBufferType type, GrAccessPattern acces
sPattern, | |
| 49 const GrGLCaps& caps, GrGLenum* target,
GrGLenum* usage) { | |
| 50 static const GrGLenum nonXferTargets[] = { | |
| 51 GR_GL_ARRAY_BUFFER, | |
| 52 GR_GL_ELEMENT_ARRAY_BUFFER | |
| 53 }; | |
| 54 GR_STATIC_ASSERT(0 == kVertex_GrBufferType); | |
| 55 GR_STATIC_ASSERT(1 == kIndex_GrBufferType); | |
| 56 | |
| 57 static const GrGLenum drawUsages[] = { | |
| 58 DYNAMIC_DRAW_PARAM, // TODO: Do we really want to use STREAM_DRAW here o
n non-Chromium? | |
| 59 GR_GL_STATIC_DRAW, | |
| 60 GR_GL_STREAM_DRAW | |
| 61 }; | |
| 62 static const GrGLenum readUsages[] = { | |
| 63 GR_GL_DYNAMIC_READ, | |
| 64 GR_GL_STATIC_READ, | |
| 65 GR_GL_STREAM_READ | |
| 66 }; | |
| 67 GR_STATIC_ASSERT(0 == kDynamic_GrAccessPattern); | |
| 68 GR_STATIC_ASSERT(1 == kStatic_GrAccessPattern); | |
| 69 GR_STATIC_ASSERT(2 == kStream_GrAccessPattern); | |
| 70 GR_STATIC_ASSERT(SK_ARRAY_COUNT(drawUsages) == 1 + kLast_GrAccessPattern); | |
| 71 GR_STATIC_ASSERT(SK_ARRAY_COUNT(readUsages) == 1 + kLast_GrAccessPattern); | |
| 72 | |
| 73 SkASSERT(accessPattern >= 0 && accessPattern <= kLast_GrAccessPattern); | |
| 74 | |
| 75 switch (type) { | |
| 76 case kVertex_GrBufferType: | |
| 77 case kIndex_GrBufferType: | |
| 78 *target = nonXferTargets[type]; | |
| 79 *usage = drawUsages[accessPattern]; | |
| 80 break; | |
| 81 case kXferCpuToGpu_GrBufferType: | |
| 82 if (GrGLCaps::kChromium_TransferBufferType == caps.transferBufferTyp
e()) { | |
| 83 *target = GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM; | |
| 84 } else { | |
| 85 SkASSERT(GrGLCaps::kPBO_TransferBufferType == caps.transferBuffe
rType()); | |
| 86 *target = GR_GL_PIXEL_UNPACK_BUFFER; | |
| 87 } | |
| 88 *usage = drawUsages[accessPattern]; | |
| 89 break; | |
| 90 case kXferGpuToCpu_GrBufferType: | |
| 91 if (GrGLCaps::kChromium_TransferBufferType == caps.transferBufferTyp
e()) { | |
| 92 *target = GR_GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM; | |
| 93 } else { | |
| 94 SkASSERT(GrGLCaps::kPBO_TransferBufferType == caps.transferBuffe
rType()); | |
| 95 *target = GR_GL_PIXEL_PACK_BUFFER; | |
| 96 } | |
| 97 *usage = readUsages[accessPattern]; | |
| 98 break; | |
| 99 default: | |
| 100 SkFAIL("Unexpected buffer type."); | |
| 101 break; | |
| 102 } | |
| 103 } | |
| 104 | |
| 105 GrGLBuffer::GrGLBuffer(GrGLGpu* gpu, GrBufferType type, size_t size, GrAccessPat
tern accessPattern, | |
| 106 bool cpuBacked) | |
| 107 : INHERITED(gpu, type, size, accessPattern, cpuBacked), | |
| 108 fCPUData(nullptr), | |
| 109 fTarget(0), | |
| 110 fBufferID(0), | |
| 111 fSizeInBytes(size), | |
| 112 fUsage(0), | |
| 113 fGLSizeInBytes(0) { | |
| 114 if (cpuBacked) { | |
| 115 if (gpu->caps()->mustClearUploadedBufferData()) { | |
| 116 fCPUData = sk_calloc_throw(fSizeInBytes); | |
| 117 } else { | |
| 118 fCPUData = sk_malloc_flags(fSizeInBytes, SK_MALLOC_THROW); | |
| 119 } | |
| 120 } else { | |
| 121 GL_CALL(GenBuffers(1, &fBufferID)); | |
| 122 fSizeInBytes = size; | |
| 123 get_target_and_usage(type, accessPattern, gpu->glCaps(), &fTarget, &fUsa
ge); | |
| 124 if (fBufferID) { | |
| 125 gpu->bindBuffer(fBufferID, fTarget); | |
| 126 CLEAR_ERROR_BEFORE_ALLOC(gpu->glInterface()); | |
| 127 // make sure driver can allocate memory for this buffer | |
| 128 GL_ALLOC_CALL(gpu->glInterface(), BufferData(fTarget, | |
| 129 (GrGLsizeiptr) fSizeInB
ytes, | |
| 130 nullptr, // data ptr | |
| 131 fUsage)); | |
| 132 if (CHECK_ALLOC_ERROR(gpu->glInterface()) != GR_GL_NO_ERROR) { | |
| 133 gpu->releaseBuffer(fBufferID, fTarget); | |
| 134 fBufferID = 0; | |
| 135 } else { | |
| 136 fGLSizeInBytes = fSizeInBytes; | |
| 137 } | |
| 138 } | |
| 139 } | |
| 140 VALIDATE(); | |
| 141 this->registerWithCache(); | |
| 142 } | |
| 143 | |
| 144 inline GrGLGpu* GrGLBuffer::glGpu() const { | |
| 145 SkASSERT(!this->wasDestroyed()); | |
| 146 return static_cast<GrGLGpu*>(this->getGpu()); | |
| 147 } | |
| 148 | |
| 149 inline const GrGLCaps& GrGLBuffer::glCaps() const { | |
| 150 return this->glGpu()->glCaps(); | |
| 151 } | |
| 152 | |
| 153 void GrGLBuffer::onRelease() { | |
| 154 if (!this->wasDestroyed()) { | |
| 155 VALIDATE(); | |
| 156 // make sure we've not been abandoned or already released | |
| 157 if (fCPUData) { | |
| 158 SkASSERT(!fBufferID); | |
| 159 sk_free(fCPUData); | |
| 160 fCPUData = nullptr; | |
| 161 } else if (fBufferID) { | |
| 162 this->glGpu()->releaseBuffer(fBufferID, fTarget); | |
| 163 fBufferID = 0; | |
| 164 fGLSizeInBytes = 0; | |
| 165 } | |
| 166 fMapPtr = nullptr; | |
| 167 VALIDATE(); | |
| 168 } | |
| 169 | |
| 170 INHERITED::onRelease(); | |
| 171 } | |
| 172 | |
| 173 void GrGLBuffer::onAbandon() { | |
| 174 fBufferID = 0; | |
| 175 fGLSizeInBytes = 0; | |
| 176 fMapPtr = nullptr; | |
| 177 sk_free(fCPUData); | |
| 178 fCPUData = nullptr; | |
| 179 VALIDATE(); | |
| 180 INHERITED::onAbandon(); | |
| 181 } | |
| 182 | |
| 183 void GrGLBuffer::onMap() { | |
| 184 if (this->wasDestroyed()) { | |
| 185 return; | |
| 186 } | |
| 187 | |
| 188 VALIDATE(); | |
| 189 SkASSERT(!this->isMapped()); | |
| 190 | |
| 191 if (0 == fBufferID) { | |
| 192 fMapPtr = fCPUData; | |
| 193 VALIDATE(); | |
| 194 return; | |
| 195 } | |
| 196 | |
| 197 bool readOnly = (kXferGpuToCpu_GrBufferType == this->type()); | |
| 198 | |
| 199 // Handling dirty context is done in the bindBuffer call | |
| 200 switch (this->glCaps().mapBufferType()) { | |
| 201 case GrGLCaps::kNone_MapBufferType: | |
| 202 break; | |
| 203 case GrGLCaps::kMapBuffer_MapBufferType: | |
| 204 this->glGpu()->bindBuffer(fBufferID, fTarget); | |
| 205 // Let driver know it can discard the old data | |
| 206 if (GR_GL_USE_BUFFER_DATA_NULL_HINT || fGLSizeInBytes != fSizeInByte
s) { | |
| 207 GL_CALL(BufferData(fTarget, fSizeInBytes, nullptr, fUsage)); | |
| 208 } | |
| 209 GL_CALL_RET(fMapPtr, MapBuffer(fTarget, readOnly ? GR_GL_READ_ONLY :
GR_GL_WRITE_ONLY)); | |
| 210 break; | |
| 211 case GrGLCaps::kMapBufferRange_MapBufferType: { | |
| 212 this->glGpu()->bindBuffer(fBufferID, fTarget); | |
| 213 // Make sure the GL buffer size agrees with fDesc before mapping. | |
| 214 if (fGLSizeInBytes != fSizeInBytes) { | |
| 215 GL_CALL(BufferData(fTarget, fSizeInBytes, nullptr, fUsage)); | |
| 216 } | |
| 217 GrGLbitfield writeAccess = GR_GL_MAP_WRITE_BIT; | |
| 218 // TODO: allow the client to specify invalidation in the transfer bu
ffer case. | |
| 219 if (kXferCpuToGpu_GrBufferType != this->type()) { | |
| 220 writeAccess |= GR_GL_MAP_INVALIDATE_BUFFER_BIT; | |
| 221 } | |
| 222 GL_CALL_RET(fMapPtr, MapBufferRange(fTarget, 0, fSizeInBytes, | |
| 223 readOnly ? GR_GL_MAP_READ_BIT :
writeAccess)); | |
| 224 break; | |
| 225 } | |
| 226 case GrGLCaps::kChromium_MapBufferType: | |
| 227 this->glGpu()->bindBuffer(fBufferID, fTarget); | |
| 228 // Make sure the GL buffer size agrees with fDesc before mapping. | |
| 229 if (fGLSizeInBytes != fSizeInBytes) { | |
| 230 GL_CALL(BufferData(fTarget, fSizeInBytes, nullptr, fUsage)); | |
| 231 } | |
| 232 GL_CALL_RET(fMapPtr, MapBufferSubData(fTarget, 0, fSizeInBytes, | |
| 233 readOnly ? GR_GL_READ_ONLY :
GR_GL_WRITE_ONLY)); | |
| 234 break; | |
| 235 } | |
| 236 fGLSizeInBytes = fSizeInBytes; | |
| 237 VALIDATE(); | |
| 238 } | |
| 239 | |
| 240 void GrGLBuffer::onUnmap() { | |
| 241 if (this->wasDestroyed()) { | |
| 242 return; | |
| 243 } | |
| 244 | |
| 245 VALIDATE(); | |
| 246 SkASSERT(this->isMapped()); | |
| 247 if (0 == fBufferID) { | |
| 248 fMapPtr = nullptr; | |
| 249 return; | |
| 250 } | |
| 251 // bind buffer handles the dirty context | |
| 252 switch (this->glCaps().mapBufferType()) { | |
| 253 case GrGLCaps::kNone_MapBufferType: | |
| 254 SkDEBUGFAIL("Shouldn't get here."); | |
| 255 return; | |
| 256 case GrGLCaps::kMapBuffer_MapBufferType: // fall through | |
| 257 case GrGLCaps::kMapBufferRange_MapBufferType: | |
| 258 this->glGpu()->bindBuffer(fBufferID, fTarget); | |
| 259 GL_CALL(UnmapBuffer(fTarget)); | |
| 260 break; | |
| 261 case GrGLCaps::kChromium_MapBufferType: | |
| 262 this->glGpu()->bindBuffer(fBufferID, fTarget); | |
| 263 GL_CALL(UnmapBufferSubData(fMapPtr)); | |
| 264 break; | |
| 265 } | |
| 266 fMapPtr = nullptr; | |
| 267 } | |
| 268 | |
| 269 bool GrGLBuffer::onUpdateData(const void* src, size_t srcSizeInBytes) { | |
| 270 if (this->wasDestroyed()) { | |
| 271 return false; | |
| 272 } | |
| 273 | |
| 274 SkASSERT(!this->isMapped()); | |
| 275 SkASSERT(GR_GL_ARRAY_BUFFER == fTarget || GR_GL_ELEMENT_ARRAY_BUFFER == fTar
get); | |
| 276 VALIDATE(); | |
| 277 if (srcSizeInBytes > fSizeInBytes) { | |
| 278 return false; | |
| 279 } | |
| 280 if (0 == fBufferID) { | |
| 281 memcpy(fCPUData, src, srcSizeInBytes); | |
| 282 return true; | |
| 283 } | |
| 284 SkASSERT(srcSizeInBytes <= fSizeInBytes); | |
| 285 // bindbuffer handles dirty context | |
| 286 this->glGpu()->bindBuffer(fBufferID, fTarget); | |
| 287 | |
| 288 #if GR_GL_USE_BUFFER_DATA_NULL_HINT | |
| 289 if (fSizeInBytes == srcSizeInBytes) { | |
| 290 GL_CALL(BufferData(fTarget, (GrGLsizeiptr) srcSizeInBytes, src, fUsage))
; | |
| 291 } else { | |
| 292 // Before we call glBufferSubData we give the driver a hint using | |
| 293 // glBufferData with nullptr. This makes the old buffer contents | |
| 294 // inaccessible to future draws. The GPU may still be processing | |
| 295 // draws that reference the old contents. With this hint it can | |
| 296 // assign a different allocation for the new contents to avoid | |
| 297 // flushing the gpu past draws consuming the old contents. | |
| 298 // TODO I think we actually want to try calling bufferData here | |
| 299 GL_CALL(BufferData(fTarget, fSizeInBytes, nullptr, fUsage)); | |
| 300 GL_CALL(BufferSubData(fTarget, 0, (GrGLsizeiptr) srcSizeInBytes, src)); | |
| 301 } | |
| 302 fGLSizeInBytes = fSizeInBytes; | |
| 303 #else | |
| 304 // Note that we're cheating on the size here. Currently no methods | |
| 305 // allow a partial update that preserves contents of non-updated | |
| 306 // portions of the buffer (map() does a glBufferData(..size, nullptr..)) | |
| 307 GL_CALL(BufferData(fTarget, srcSizeInBytes, src, fUsage)); | |
| 308 fGLSizeInBytes = srcSizeInBytes; | |
| 309 #endif | |
| 310 VALIDATE(); | |
| 311 return true; | |
| 312 } | |
| 313 | |
| 314 void GrGLBuffer::setMemoryBacking(SkTraceMemoryDump* traceMemoryDump, | |
| 315 const SkString& dumpName) const { | |
| 316 SkString buffer_id; | |
| 317 buffer_id.appendU32(this->bufferID()); | |
| 318 traceMemoryDump->setMemoryBacking(dumpName.c_str(), "gl_buffer", | |
| 319 buffer_id.c_str()); | |
| 320 } | |
| 321 | |
| 322 #ifdef SK_DEBUG | |
| 323 | |
| 324 void GrGLBuffer::validate() const { | |
| 325 SkASSERT(GR_GL_ARRAY_BUFFER == fTarget || GR_GL_ELEMENT_ARRAY_BUFFER == fTar
get || | |
| 326 GR_GL_PIXEL_PACK_BUFFER == fTarget || GR_GL_PIXEL_UNPACK_BUFFER ==
fTarget || | |
| 327 GR_GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM == fTarget || | |
| 328 GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM == fTarget); | |
| 329 // The following assert isn't valid when the buffer has been abandoned: | |
| 330 // SkASSERT((0 == fDesc.fID) == (fCPUData)); | |
| 331 SkASSERT(0 != fBufferID || 0 == fGLSizeInBytes); | |
| 332 SkASSERT(nullptr == fMapPtr || fCPUData || fGLSizeInBytes <= fSizeInBytes); | |
| 333 SkASSERT(nullptr == fCPUData || nullptr == fMapPtr || fCPUData == fMapPtr); | |
| 334 } | |
| 335 | |
| 336 #endif | |
| OLD | NEW |