| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2013 Google Inc. | 2 * Copyright 2013 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #include "GrGLBufferImpl.h" | 8 #include "GrGLBufferImpl.h" |
| 9 #include "GrGpuGL.h" | 9 #include "GrGpuGL.h" |
| 10 | 10 |
| 11 #define GL_CALL(GPU, X) GR_GL_CALL(GPU->glInterface(), X) | 11 #define GL_CALL(GPU, X) GR_GL_CALL(GPU->glInterface(), X) |
| 12 | 12 |
| 13 #ifdef SK_DEBUG | 13 #ifdef SK_DEBUG |
| 14 #define VALIDATE() this->validate() | 14 #define VALIDATE() this->validate() |
| 15 #else | 15 #else |
| 16 #define VALIDATE() do {} while(false) | 16 #define VALIDATE() do {} while(false) |
| 17 #endif | 17 #endif |
| 18 | 18 |
| 19 // GL_STREAM_DRAW triggers an optimization in Chromium's GPU process where a cli
ent's vertex buffer | 19 // GL_STREAM_DRAW triggers an optimization in Chromium's GPU process where a cli
ent's vertex buffer |
| 20 // objects are implemented as client-side-arrays on tile-deferred architectures. | 20 // objects are implemented as client-side-arrays on tile-deferred architectures. |
| 21 #define DYNAMIC_USAGE_PARAM GR_GL_STREAM_DRAW | 21 #define DYNAMIC_USAGE_PARAM GR_GL_STREAM_DRAW |
| 22 | 22 |
| 23 GrGLBufferImpl::GrGLBufferImpl(GrGpuGL* gpu, const Desc& desc, GrGLenum bufferTy
pe) | 23 GrGLBufferImpl::GrGLBufferImpl(GrGLGpu* gpu, const Desc& desc, GrGLenum bufferTy
pe) |
| 24 : fDesc(desc) | 24 : fDesc(desc) |
| 25 , fBufferType(bufferType) | 25 , fBufferType(bufferType) |
| 26 , fMapPtr(NULL) { | 26 , fMapPtr(NULL) { |
| 27 if (0 == desc.fID) { | 27 if (0 == desc.fID) { |
| 28 fCPUData = sk_malloc_flags(desc.fSizeInBytes, SK_MALLOC_THROW); | 28 fCPUData = sk_malloc_flags(desc.fSizeInBytes, SK_MALLOC_THROW); |
| 29 fGLSizeInBytes = 0; | 29 fGLSizeInBytes = 0; |
| 30 } else { | 30 } else { |
| 31 fCPUData = NULL; | 31 fCPUData = NULL; |
| 32 // We assume that the GL buffer was created at the desc's size initially
. | 32 // We assume that the GL buffer was created at the desc's size initially
. |
| 33 fGLSizeInBytes = fDesc.fSizeInBytes; | 33 fGLSizeInBytes = fDesc.fSizeInBytes; |
| 34 } | 34 } |
| 35 VALIDATE(); | 35 VALIDATE(); |
| 36 } | 36 } |
| 37 | 37 |
| 38 void GrGLBufferImpl::release(GrGpuGL* gpu) { | 38 void GrGLBufferImpl::release(GrGLGpu* gpu) { |
| 39 VALIDATE(); | 39 VALIDATE(); |
| 40 // make sure we've not been abandoned or already released | 40 // make sure we've not been abandoned or already released |
| 41 if (fCPUData) { | 41 if (fCPUData) { |
| 42 sk_free(fCPUData); | 42 sk_free(fCPUData); |
| 43 fCPUData = NULL; | 43 fCPUData = NULL; |
| 44 } else if (fDesc.fID && !fDesc.fIsWrapped) { | 44 } else if (fDesc.fID && !fDesc.fIsWrapped) { |
| 45 GL_CALL(gpu, DeleteBuffers(1, &fDesc.fID)); | 45 GL_CALL(gpu, DeleteBuffers(1, &fDesc.fID)); |
| 46 if (GR_GL_ARRAY_BUFFER == fBufferType) { | 46 if (GR_GL_ARRAY_BUFFER == fBufferType) { |
| 47 gpu->notifyVertexBufferDelete(fDesc.fID); | 47 gpu->notifyVertexBufferDelete(fDesc.fID); |
| 48 } else { | 48 } else { |
| 49 SkASSERT(GR_GL_ELEMENT_ARRAY_BUFFER == fBufferType); | 49 SkASSERT(GR_GL_ELEMENT_ARRAY_BUFFER == fBufferType); |
| 50 gpu->notifyIndexBufferDelete(fDesc.fID); | 50 gpu->notifyIndexBufferDelete(fDesc.fID); |
| 51 } | 51 } |
| 52 fDesc.fID = 0; | 52 fDesc.fID = 0; |
| 53 fGLSizeInBytes = 0; | 53 fGLSizeInBytes = 0; |
| 54 } | 54 } |
| 55 fMapPtr = NULL; | 55 fMapPtr = NULL; |
| 56 VALIDATE(); | 56 VALIDATE(); |
| 57 } | 57 } |
| 58 | 58 |
| 59 void GrGLBufferImpl::abandon() { | 59 void GrGLBufferImpl::abandon() { |
| 60 fDesc.fID = 0; | 60 fDesc.fID = 0; |
| 61 fGLSizeInBytes = 0; | 61 fGLSizeInBytes = 0; |
| 62 fMapPtr = NULL; | 62 fMapPtr = NULL; |
| 63 sk_free(fCPUData); | 63 sk_free(fCPUData); |
| 64 fCPUData = NULL; | 64 fCPUData = NULL; |
| 65 VALIDATE(); | 65 VALIDATE(); |
| 66 } | 66 } |
| 67 | 67 |
| 68 void GrGLBufferImpl::bind(GrGpuGL* gpu) const { | 68 void GrGLBufferImpl::bind(GrGLGpu* gpu) const { |
| 69 VALIDATE(); | 69 VALIDATE(); |
| 70 if (GR_GL_ARRAY_BUFFER == fBufferType) { | 70 if (GR_GL_ARRAY_BUFFER == fBufferType) { |
| 71 gpu->bindVertexBuffer(fDesc.fID); | 71 gpu->bindVertexBuffer(fDesc.fID); |
| 72 } else { | 72 } else { |
| 73 SkASSERT(GR_GL_ELEMENT_ARRAY_BUFFER == fBufferType); | 73 SkASSERT(GR_GL_ELEMENT_ARRAY_BUFFER == fBufferType); |
| 74 gpu->bindIndexBufferAndDefaultVertexArray(fDesc.fID); | 74 gpu->bindIndexBufferAndDefaultVertexArray(fDesc.fID); |
| 75 } | 75 } |
| 76 VALIDATE(); | 76 VALIDATE(); |
| 77 } | 77 } |
| 78 | 78 |
| 79 void* GrGLBufferImpl::map(GrGpuGL* gpu) { | 79 void* GrGLBufferImpl::map(GrGLGpu* gpu) { |
| 80 VALIDATE(); | 80 VALIDATE(); |
| 81 SkASSERT(!this->isMapped()); | 81 SkASSERT(!this->isMapped()); |
| 82 if (0 == fDesc.fID) { | 82 if (0 == fDesc.fID) { |
| 83 fMapPtr = fCPUData; | 83 fMapPtr = fCPUData; |
| 84 } else { | 84 } else { |
| 85 switch (gpu->glCaps().mapBufferType()) { | 85 switch (gpu->glCaps().mapBufferType()) { |
| 86 case GrGLCaps::kNone_MapBufferType: | 86 case GrGLCaps::kNone_MapBufferType: |
| 87 VALIDATE(); | 87 VALIDATE(); |
| 88 return NULL; | 88 return NULL; |
| 89 case GrGLCaps::kMapBuffer_MapBufferType: | 89 case GrGLCaps::kMapBuffer_MapBufferType: |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 126 GR_GL_CALL_RET(gpu->glInterface(), | 126 GR_GL_CALL_RET(gpu->glInterface(), |
| 127 fMapPtr, | 127 fMapPtr, |
| 128 MapBufferSubData(fBufferType, 0, fGLSizeInBytes,
GR_GL_WRITE_ONLY)); | 128 MapBufferSubData(fBufferType, 0, fGLSizeInBytes,
GR_GL_WRITE_ONLY)); |
| 129 break; | 129 break; |
| 130 } | 130 } |
| 131 } | 131 } |
| 132 VALIDATE(); | 132 VALIDATE(); |
| 133 return fMapPtr; | 133 return fMapPtr; |
| 134 } | 134 } |
| 135 | 135 |
| 136 void GrGLBufferImpl::unmap(GrGpuGL* gpu) { | 136 void GrGLBufferImpl::unmap(GrGLGpu* gpu) { |
| 137 VALIDATE(); | 137 VALIDATE(); |
| 138 SkASSERT(this->isMapped()); | 138 SkASSERT(this->isMapped()); |
| 139 if (0 != fDesc.fID) { | 139 if (0 != fDesc.fID) { |
| 140 switch (gpu->glCaps().mapBufferType()) { | 140 switch (gpu->glCaps().mapBufferType()) { |
| 141 case GrGLCaps::kNone_MapBufferType: | 141 case GrGLCaps::kNone_MapBufferType: |
| 142 SkDEBUGFAIL("Shouldn't get here."); | 142 SkDEBUGFAIL("Shouldn't get here."); |
| 143 return; | 143 return; |
| 144 case GrGLCaps::kMapBuffer_MapBufferType: // fall through | 144 case GrGLCaps::kMapBuffer_MapBufferType: // fall through |
| 145 case GrGLCaps::kMapBufferRange_MapBufferType: | 145 case GrGLCaps::kMapBufferRange_MapBufferType: |
| 146 this->bind(gpu); | 146 this->bind(gpu); |
| 147 GL_CALL(gpu, UnmapBuffer(fBufferType)); | 147 GL_CALL(gpu, UnmapBuffer(fBufferType)); |
| 148 break; | 148 break; |
| 149 case GrGLCaps::kChromium_MapBufferType: | 149 case GrGLCaps::kChromium_MapBufferType: |
| 150 this->bind(gpu); | 150 this->bind(gpu); |
| 151 GR_GL_CALL(gpu->glInterface(), UnmapBufferSubData(fMapPtr)); | 151 GR_GL_CALL(gpu->glInterface(), UnmapBufferSubData(fMapPtr)); |
| 152 break; | 152 break; |
| 153 } | 153 } |
| 154 } | 154 } |
| 155 fMapPtr = NULL; | 155 fMapPtr = NULL; |
| 156 } | 156 } |
| 157 | 157 |
| 158 bool GrGLBufferImpl::isMapped() const { | 158 bool GrGLBufferImpl::isMapped() const { |
| 159 VALIDATE(); | 159 VALIDATE(); |
| 160 return SkToBool(fMapPtr); | 160 return SkToBool(fMapPtr); |
| 161 } | 161 } |
| 162 | 162 |
| 163 bool GrGLBufferImpl::updateData(GrGpuGL* gpu, const void* src, size_t srcSizeInB
ytes) { | 163 bool GrGLBufferImpl::updateData(GrGLGpu* gpu, const void* src, size_t srcSizeInB
ytes) { |
| 164 SkASSERT(!this->isMapped()); | 164 SkASSERT(!this->isMapped()); |
| 165 VALIDATE(); | 165 VALIDATE(); |
| 166 if (srcSizeInBytes > fDesc.fSizeInBytes) { | 166 if (srcSizeInBytes > fDesc.fSizeInBytes) { |
| 167 return false; | 167 return false; |
| 168 } | 168 } |
| 169 if (0 == fDesc.fID) { | 169 if (0 == fDesc.fID) { |
| 170 memcpy(fCPUData, src, srcSizeInBytes); | 170 memcpy(fCPUData, src, srcSizeInBytes); |
| 171 return true; | 171 return true; |
| 172 } | 172 } |
| 173 this->bind(gpu); | 173 this->bind(gpu); |
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 217 | 217 |
| 218 void GrGLBufferImpl::validate() const { | 218 void GrGLBufferImpl::validate() const { |
| 219 SkASSERT(GR_GL_ARRAY_BUFFER == fBufferType || GR_GL_ELEMENT_ARRAY_BUFFER ==
fBufferType); | 219 SkASSERT(GR_GL_ARRAY_BUFFER == fBufferType || GR_GL_ELEMENT_ARRAY_BUFFER ==
fBufferType); |
| 220 // The following assert isn't valid when the buffer has been abandoned: | 220 // The following assert isn't valid when the buffer has been abandoned: |
| 221 // SkASSERT((0 == fDesc.fID) == (fCPUData)); | 221 // SkASSERT((0 == fDesc.fID) == (fCPUData)); |
| 222 SkASSERT(0 != fDesc.fID || !fDesc.fIsWrapped); | 222 SkASSERT(0 != fDesc.fID || !fDesc.fIsWrapped); |
| 223 SkASSERT(NULL == fCPUData || 0 == fGLSizeInBytes); | 223 SkASSERT(NULL == fCPUData || 0 == fGLSizeInBytes); |
| 224 SkASSERT(NULL == fMapPtr || fCPUData || fGLSizeInBytes == fDesc.fSizeInBytes
); | 224 SkASSERT(NULL == fMapPtr || fCPUData || fGLSizeInBytes == fDesc.fSizeInBytes
); |
| 225 SkASSERT(NULL == fCPUData || NULL == fMapPtr || fCPUData == fMapPtr); | 225 SkASSERT(NULL == fCPUData || NULL == fMapPtr || fCPUData == fMapPtr); |
| 226 } | 226 } |
| OLD | NEW |