| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2011 Google Inc. | 2 * Copyright 2011 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #include "GrGLGpu.h" | 8 #include "GrGLGpu.h" |
| 9 #include "GrGLBuffer.h" |
| 9 #include "GrGLGLSL.h" | 10 #include "GrGLGLSL.h" |
| 10 #include "GrGLStencilAttachment.h" | 11 #include "GrGLStencilAttachment.h" |
| 11 #include "GrGLTextureRenderTarget.h" | 12 #include "GrGLTextureRenderTarget.h" |
| 12 #include "GrGpuResourcePriv.h" | 13 #include "GrGpuResourcePriv.h" |
| 13 #include "GrMesh.h" | 14 #include "GrMesh.h" |
| 14 #include "GrPipeline.h" | 15 #include "GrPipeline.h" |
| 15 #include "GrPLSGeometryProcessor.h" | 16 #include "GrPLSGeometryProcessor.h" |
| 16 #include "GrRenderTargetPriv.h" | 17 #include "GrRenderTargetPriv.h" |
| 17 #include "GrSurfacePriv.h" | 18 #include "GrSurfacePriv.h" |
| 18 #include "GrTexturePriv.h" | 19 #include "GrTexturePriv.h" |
| (...skipping 819 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 838 } else { | 839 } else { |
| 839 success = this->uploadTexData(glTex->desc(), glTex->target(), kWrite_Upl
oadType, | 840 success = this->uploadTexData(glTex->desc(), glTex->target(), kWrite_Upl
oadType, |
| 840 left, top, width, height, config, texels); | 841 left, top, width, height, config, texels); |
| 841 } | 842 } |
| 842 | 843 |
| 843 return success; | 844 return success; |
| 844 } | 845 } |
| 845 | 846 |
| 846 bool GrGLGpu::onTransferPixels(GrSurface* surface, | 847 bool GrGLGpu::onTransferPixels(GrSurface* surface, |
| 847 int left, int top, int width, int height, | 848 int left, int top, int width, int height, |
| 848 GrPixelConfig config, GrTransferBuffer* buffer, | 849 GrPixelConfig config, GrBuffer* transferBuffer, |
| 849 size_t offset, size_t rowBytes) { | 850 size_t offset, size_t rowBytes) { |
| 850 GrGLTexture* glTex = static_cast<GrGLTexture*>(surface->asTexture()); | 851 GrGLTexture* glTex = static_cast<GrGLTexture*>(surface->asTexture()); |
| 851 | 852 |
| 852 if (!check_write_and_transfer_input(glTex, surface, config)) { | 853 if (!check_write_and_transfer_input(glTex, surface, config)) { |
| 853 return false; | 854 return false; |
| 854 } | 855 } |
| 855 | 856 |
| 856 // For the moment, can't transfer compressed data | 857 // For the moment, can't transfer compressed data |
| 857 if (GrPixelConfigIsCompressed(glTex->desc().fConfig)) { | 858 if (GrPixelConfigIsCompressed(glTex->desc().fConfig)) { |
| 858 return false; | 859 return false; |
| 859 } | 860 } |
| 860 | 861 |
| 861 this->setScratchTextureUnit(); | 862 this->setScratchTextureUnit(); |
| 862 GL_CALL(BindTexture(glTex->target(), glTex->textureID())); | 863 GL_CALL(BindTexture(glTex->target(), glTex->textureID())); |
| 863 | 864 |
| 864 SkASSERT(!buffer->isMapped()); | 865 SkASSERT(kXferCpuToGpu_GrBufferType == transferBuffer->type()); |
| 865 GrGLTransferBuffer* glBuffer = reinterpret_cast<GrGLTransferBuffer*>(buffer)
; | 866 SkASSERT(!transferBuffer->isMapped()); |
| 866 // bind the transfer buffer | 867 const GrGLBuffer* glBuffer = reinterpret_cast<const GrGLBuffer*>(transferBuf
fer); |
| 867 SkASSERT(GR_GL_PIXEL_UNPACK_BUFFER == glBuffer->bufferType() || | 868 this->bindBuffer(glBuffer->bufferID(), glBuffer->target()); |
| 868 GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM == glBuffer->bufferType
()); | |
| 869 GL_CALL(BindBuffer(glBuffer->bufferType(), glBuffer->bufferID())); | |
| 870 | 869 |
| 871 bool success = false; | 870 bool success = false; |
| 872 GrMipLevel mipLevel; | 871 GrMipLevel mipLevel; |
| 873 mipLevel.fPixels = buffer; | 872 mipLevel.fPixels = transferBuffer; |
| 874 mipLevel.fRowBytes = rowBytes; | 873 mipLevel.fRowBytes = rowBytes; |
| 875 SkSTArray<1, GrMipLevel> texels; | 874 SkSTArray<1, GrMipLevel> texels; |
| 876 texels.push_back(mipLevel); | 875 texels.push_back(mipLevel); |
| 877 success = this->uploadTexData(glTex->desc(), glTex->target(), kTransfer_Uplo
adType, | 876 success = this->uploadTexData(glTex->desc(), glTex->target(), kTransfer_Uplo
adType, |
| 878 left, top, width, height, config, texels); | 877 left, top, width, height, config, texels); |
| 879 if (success) { | 878 if (success) { |
| 880 glTex->texturePriv().dirtyMipMaps(true); | 879 glTex->texturePriv().dirtyMipMaps(true); |
| 881 return true; | 880 return true; |
| 882 } | 881 } |
| 883 | 882 |
| (...skipping 1044 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1928 format); | 1927 format); |
| 1929 return stencil; | 1928 return stencil; |
| 1930 } | 1929 } |
| 1931 | 1930 |
| 1932 //////////////////////////////////////////////////////////////////////////////// | 1931 //////////////////////////////////////////////////////////////////////////////// |
| 1933 | 1932 |
| 1934 // GL_STREAM_DRAW triggers an optimization in Chromium's GPU process where a cli
ent's vertex buffer | 1933 // GL_STREAM_DRAW triggers an optimization in Chromium's GPU process where a cli
ent's vertex buffer |
| 1935 // objects are implemented as client-side-arrays on tile-deferred architectures. | 1934 // objects are implemented as client-side-arrays on tile-deferred architectures. |
| 1936 #define DYNAMIC_USAGE_PARAM GR_GL_STREAM_DRAW | 1935 #define DYNAMIC_USAGE_PARAM GR_GL_STREAM_DRAW |
| 1937 | 1936 |
| 1938 GrVertexBuffer* GrGLGpu::onCreateVertexBuffer(size_t size, bool dynamic) { | 1937 GrBuffer* GrGLGpu::onCreateBuffer(GrBufferType type, size_t size, GrAccessPatter
n accessPattern) { |
| 1939 GrGLVertexBuffer::Desc desc; | 1938 return GrGLBuffer::Create(this, type, size, accessPattern); |
| 1940 desc.fUsage = dynamic ? GrGLBufferImpl::kDynamicDraw_Usage : GrGLBufferImpl:
:kStaticDraw_Usage; | |
| 1941 desc.fSizeInBytes = size; | |
| 1942 | |
| 1943 if (this->glCaps().useNonVBOVertexAndIndexDynamicData() && dynamic) { | |
| 1944 desc.fID = 0; | |
| 1945 GrGLVertexBuffer* vertexBuffer = new GrGLVertexBuffer(this, desc); | |
| 1946 return vertexBuffer; | |
| 1947 } else { | |
| 1948 desc.fID = 0; | |
| 1949 GL_CALL(GenBuffers(1, &desc.fID)); | |
| 1950 if (desc.fID) { | |
| 1951 fHWGeometryState.setVertexBufferID(this, desc.fID); | |
| 1952 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface()); | |
| 1953 // make sure driver can allocate memory for this buffer | |
| 1954 GL_ALLOC_CALL(this->glInterface(), | |
| 1955 BufferData(GR_GL_ARRAY_BUFFER, | |
| 1956 (GrGLsizeiptr) desc.fSizeInBytes, | |
| 1957 nullptr, // data ptr | |
| 1958 dynamic ? DYNAMIC_USAGE_PARAM : GR_GL_STATI
C_DRAW)); | |
| 1959 if (CHECK_ALLOC_ERROR(this->glInterface()) != GR_GL_NO_ERROR) { | |
| 1960 GL_CALL(DeleteBuffers(1, &desc.fID)); | |
| 1961 this->notifyVertexBufferDelete(desc.fID); | |
| 1962 return nullptr; | |
| 1963 } | |
| 1964 GrGLVertexBuffer* vertexBuffer = new GrGLVertexBuffer(this, desc); | |
| 1965 return vertexBuffer; | |
| 1966 } | |
| 1967 return nullptr; | |
| 1968 } | |
| 1969 } | |
| 1970 | |
| 1971 GrIndexBuffer* GrGLGpu::onCreateIndexBuffer(size_t size, bool dynamic) { | |
| 1972 GrGLIndexBuffer::Desc desc; | |
| 1973 desc.fUsage = dynamic ? GrGLBufferImpl::kDynamicDraw_Usage : GrGLBufferImpl:
:kStaticDraw_Usage; | |
| 1974 desc.fSizeInBytes = size; | |
| 1975 | |
| 1976 if (this->glCaps().useNonVBOVertexAndIndexDynamicData() && dynamic) { | |
| 1977 desc.fID = 0; | |
| 1978 GrIndexBuffer* indexBuffer = new GrGLIndexBuffer(this, desc); | |
| 1979 return indexBuffer; | |
| 1980 } else { | |
| 1981 desc.fID = 0; | |
| 1982 GL_CALL(GenBuffers(1, &desc.fID)); | |
| 1983 if (desc.fID) { | |
| 1984 fHWGeometryState.setIndexBufferIDOnDefaultVertexArray(this, desc.fID
); | |
| 1985 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface()); | |
| 1986 // make sure driver can allocate memory for this buffer | |
| 1987 GL_ALLOC_CALL(this->glInterface(), | |
| 1988 BufferData(GR_GL_ELEMENT_ARRAY_BUFFER, | |
| 1989 (GrGLsizeiptr) desc.fSizeInBytes, | |
| 1990 nullptr, // data ptr | |
| 1991 dynamic ? DYNAMIC_USAGE_PARAM : GR_GL_STATI
C_DRAW)); | |
| 1992 if (CHECK_ALLOC_ERROR(this->glInterface()) != GR_GL_NO_ERROR) { | |
| 1993 GL_CALL(DeleteBuffers(1, &desc.fID)); | |
| 1994 this->notifyIndexBufferDelete(desc.fID); | |
| 1995 return nullptr; | |
| 1996 } | |
| 1997 GrIndexBuffer* indexBuffer = new GrGLIndexBuffer(this, desc); | |
| 1998 return indexBuffer; | |
| 1999 } | |
| 2000 return nullptr; | |
| 2001 } | |
| 2002 } | |
| 2003 | |
| 2004 GrTransferBuffer* GrGLGpu::onCreateTransferBuffer(size_t size, TransferType xfer
Type) { | |
| 2005 GrGLCaps::TransferBufferType xferBufferType = this->ctxInfo().caps()->transf
erBufferType(); | |
| 2006 if (GrGLCaps::kNone_TransferBufferType == xferBufferType) { | |
| 2007 return nullptr; | |
| 2008 } | |
| 2009 | |
| 2010 GrGLTransferBuffer::Desc desc; | |
| 2011 bool toGpu = (kCpuToGpu_TransferType == xferType); | |
| 2012 desc.fUsage = toGpu ? GrGLBufferImpl::kStreamDraw_Usage : GrGLBufferImpl::kS
treamRead_Usage; | |
| 2013 | |
| 2014 desc.fSizeInBytes = size; | |
| 2015 desc.fID = 0; | |
| 2016 GL_CALL(GenBuffers(1, &desc.fID)); | |
| 2017 if (desc.fID) { | |
| 2018 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface()); | |
| 2019 // make sure driver can allocate memory for this bmapuffer | |
| 2020 GrGLenum target; | |
| 2021 if (GrGLCaps::kChromium_TransferBufferType == xferBufferType) { | |
| 2022 target = toGpu ? GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM | |
| 2023 : GR_GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM; | |
| 2024 } else { | |
| 2025 SkASSERT(GrGLCaps::kPBO_TransferBufferType == xferBufferType); | |
| 2026 target = toGpu ? GR_GL_PIXEL_UNPACK_BUFFER : GR_GL_PIXEL_PACK_BUFFER
; | |
| 2027 } | |
| 2028 GL_CALL(BindBuffer(target, desc.fID)); | |
| 2029 GL_ALLOC_CALL(this->glInterface(), | |
| 2030 BufferData(target, | |
| 2031 (GrGLsizeiptr) desc.fSizeInBytes, | |
| 2032 nullptr, // data ptr | |
| 2033 (toGpu ? GR_GL_STREAM_DRAW : GR_GL_STREAM_READ)
)); | |
| 2034 if (CHECK_ALLOC_ERROR(this->glInterface()) != GR_GL_NO_ERROR) { | |
| 2035 GL_CALL(DeleteBuffers(1, &desc.fID)); | |
| 2036 return nullptr; | |
| 2037 } | |
| 2038 GrTransferBuffer* transferBuffer = new GrGLTransferBuffer(this, desc, ta
rget); | |
| 2039 return transferBuffer; | |
| 2040 } | |
| 2041 | |
| 2042 return nullptr; | |
| 2043 } | 1939 } |
| 2044 | 1940 |
| 2045 void GrGLGpu::flushScissor(const GrScissorState& scissorState, | 1941 void GrGLGpu::flushScissor(const GrScissorState& scissorState, |
| 2046 const GrGLIRect& rtViewport, | 1942 const GrGLIRect& rtViewport, |
| 2047 GrSurfaceOrigin rtOrigin) { | 1943 GrSurfaceOrigin rtOrigin) { |
| 2048 if (scissorState.enabled()) { | 1944 if (scissorState.enabled()) { |
| 2049 GrGLIRect scissor; | 1945 GrGLIRect scissor; |
| 2050 scissor.setRelativeTo(rtViewport, | 1946 scissor.setRelativeTo(rtViewport, |
| 2051 scissorState.rect().fLeft, | 1947 scissorState.rect().fLeft, |
| 2052 scissorState.rect().fTop, | 1948 scissorState.rect().fTop, |
| (...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2117 // This must come after textures are flushed because a texture may need | 2013 // This must come after textures are flushed because a texture may need |
| 2118 // to be msaa-resolved (which will modify bound FBO state). | 2014 // to be msaa-resolved (which will modify bound FBO state). |
| 2119 this->flushRenderTarget(glRT, nullptr); | 2015 this->flushRenderTarget(glRT, nullptr); |
| 2120 | 2016 |
| 2121 return true; | 2017 return true; |
| 2122 } | 2018 } |
| 2123 | 2019 |
| 2124 void GrGLGpu::setupGeometry(const GrPrimitiveProcessor& primProc, | 2020 void GrGLGpu::setupGeometry(const GrPrimitiveProcessor& primProc, |
| 2125 const GrNonInstancedMesh& mesh, | 2021 const GrNonInstancedMesh& mesh, |
| 2126 size_t* indexOffsetInBytes) { | 2022 size_t* indexOffsetInBytes) { |
| 2127 GrGLVertexBuffer* vbuf; | 2023 const GrGLBuffer* vbuf; |
| 2128 vbuf = (GrGLVertexBuffer*) mesh.vertexBuffer(); | 2024 vbuf = static_cast<const GrGLBuffer*>(mesh.vertexBuffer()); |
| 2129 | 2025 |
| 2130 SkASSERT(vbuf); | 2026 SkASSERT(vbuf); |
| 2131 SkASSERT(!vbuf->isMapped()); | 2027 SkASSERT(!vbuf->isMapped()); |
| 2132 | 2028 |
| 2133 GrGLIndexBuffer* ibuf = nullptr; | 2029 const GrGLBuffer* ibuf = nullptr; |
| 2134 if (mesh.isIndexed()) { | 2030 if (mesh.isIndexed()) { |
| 2135 SkASSERT(indexOffsetInBytes); | 2031 SkASSERT(indexOffsetInBytes); |
| 2136 | 2032 |
| 2137 *indexOffsetInBytes = 0; | 2033 *indexOffsetInBytes = 0; |
| 2138 ibuf = (GrGLIndexBuffer*)mesh.indexBuffer(); | 2034 ibuf = static_cast<const GrGLBuffer*>(mesh.indexBuffer()); |
| 2139 | 2035 |
| 2140 SkASSERT(ibuf); | 2036 SkASSERT(ibuf); |
| 2141 SkASSERT(!ibuf->isMapped()); | 2037 SkASSERT(!ibuf->isMapped()); |
| 2142 *indexOffsetInBytes += ibuf->baseOffset(); | 2038 *indexOffsetInBytes += ibuf->baseOffset(); |
| 2143 } | 2039 } |
| 2144 GrGLAttribArrayState* attribState = | 2040 GrGLAttribArrayState* attribState = |
| 2145 fHWGeometryState.bindArrayAndBuffersToDraw(this, vbuf, ibuf); | 2041 fHWGeometryState.bindArrayAndBuffersToDraw(this, vbuf, ibuf); |
| 2146 | 2042 |
| 2147 int vaCount = primProc.numAttribs(); | 2043 int vaCount = primProc.numAttribs(); |
| 2148 if (vaCount > 0) { | 2044 if (vaCount > 0) { |
| (...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2218 } | 2114 } |
| 2219 break; | 2115 break; |
| 2220 case GR_GL_DRAW_INDIRECT_BUFFER: | 2116 case GR_GL_DRAW_INDIRECT_BUFFER: |
| 2221 if (fHWBoundDrawIndirectBufferIDIsValid && id == fHWBoundDrawIndirec
tBufferID) { | 2117 if (fHWBoundDrawIndirectBufferIDIsValid && id == fHWBoundDrawIndirec
tBufferID) { |
| 2222 fHWBoundDrawIndirectBufferID = 0; | 2118 fHWBoundDrawIndirectBufferID = 0; |
| 2223 } | 2119 } |
| 2224 break; | 2120 break; |
| 2225 } | 2121 } |
| 2226 } | 2122 } |
| 2227 | 2123 |
| 2228 static GrGLenum get_gl_usage(GrGLBufferImpl::Usage usage) { | |
| 2229 static const GrGLenum grToGL[] = { | |
| 2230 GR_GL_STATIC_DRAW, // GrGLBufferImpl::kStaticDraw_Usage | |
| 2231 DYNAMIC_USAGE_PARAM, // GrGLBufferImpl::kDynamicDraw_Usage | |
| 2232 GR_GL_STREAM_DRAW, // GrGLBufferImpl::kStreamDraw_Usage | |
| 2233 GR_GL_STREAM_READ, // GrGLBufferImpl::kStreamRead_Usage | |
| 2234 }; | |
| 2235 static_assert(SK_ARRAY_COUNT(grToGL) == GrGLBufferImpl::kUsageCount, "array_
size_mismatch"); | |
| 2236 | |
| 2237 return grToGL[usage]; | |
| 2238 } | |
| 2239 | |
| 2240 void* GrGLGpu::mapBuffer(GrGLuint id, GrGLenum type, GrGLBufferImpl::Usage usage
, | |
| 2241 size_t currentSize, size_t requestedSize) { | |
| 2242 void* mapPtr = nullptr; | |
| 2243 GrGLenum glUsage = get_gl_usage(usage); | |
| 2244 bool readOnly = (GrGLBufferImpl::kStreamRead_Usage == usage); | |
| 2245 | |
| 2246 // Handling dirty context is done in the bindBuffer call | |
| 2247 switch (this->glCaps().mapBufferType()) { | |
| 2248 case GrGLCaps::kNone_MapBufferType: | |
| 2249 break; | |
| 2250 case GrGLCaps::kMapBuffer_MapBufferType: | |
| 2251 this->bindBuffer(id, type); | |
| 2252 // Let driver know it can discard the old data | |
| 2253 if (GR_GL_USE_BUFFER_DATA_NULL_HINT || currentSize != requestedSize)
{ | |
| 2254 GL_CALL(BufferData(type, requestedSize, nullptr, glUsage)); | |
| 2255 } | |
| 2256 GL_CALL_RET(mapPtr, MapBuffer(type, readOnly ? GR_GL_READ_ONLY : GR_
GL_WRITE_ONLY)); | |
| 2257 break; | |
| 2258 case GrGLCaps::kMapBufferRange_MapBufferType: { | |
| 2259 this->bindBuffer(id, type); | |
| 2260 // Make sure the GL buffer size agrees with fDesc before mapping. | |
| 2261 if (currentSize != requestedSize) { | |
| 2262 GL_CALL(BufferData(type, requestedSize, nullptr, glUsage)); | |
| 2263 } | |
| 2264 GrGLbitfield writeAccess = GR_GL_MAP_WRITE_BIT; | |
| 2265 // TODO: allow the client to specify invalidation in the stream draw
case | |
| 2266 if (GrGLBufferImpl::kStreamDraw_Usage != usage) { | |
| 2267 writeAccess |= GR_GL_MAP_INVALIDATE_BUFFER_BIT; | |
| 2268 } | |
| 2269 GL_CALL_RET(mapPtr, MapBufferRange(type, 0, requestedSize, readOnly
? | |
| 2270 GR_GL_MAP
_READ_BIT : | |
| 2271 writeAcce
ss)); | |
| 2272 break; | |
| 2273 } | |
| 2274 case GrGLCaps::kChromium_MapBufferType: | |
| 2275 this->bindBuffer(id, type); | |
| 2276 // Make sure the GL buffer size agrees with fDesc before mapping. | |
| 2277 if (currentSize != requestedSize) { | |
| 2278 GL_CALL(BufferData(type, requestedSize, nullptr, glUsage)); | |
| 2279 } | |
| 2280 GL_CALL_RET(mapPtr, MapBufferSubData(type, 0, requestedSize, readOnl
y ? | |
| 2281 GR_GL_R
EAD_ONLY : | |
| 2282 GR_GL_W
RITE_ONLY)); | |
| 2283 break; | |
| 2284 } | |
| 2285 return mapPtr; | |
| 2286 } | |
| 2287 | |
| 2288 void GrGLGpu::bufferData(GrGLuint id, GrGLenum type, GrGLBufferImpl::Usage usage
, | |
| 2289 size_t currentSize, const void* src, size_t srcSizeInBy
tes) { | |
| 2290 SkASSERT(srcSizeInBytes <= currentSize); | |
| 2291 // bindbuffer handles dirty context | |
| 2292 this->bindBuffer(id, type); | |
| 2293 GrGLenum glUsage = get_gl_usage(usage); | |
| 2294 | |
| 2295 #if GR_GL_USE_BUFFER_DATA_NULL_HINT | |
| 2296 if (currentSize == srcSizeInBytes) { | |
| 2297 GL_CALL(BufferData(type, (GrGLsizeiptr) srcSizeInBytes, src, glUsage)); | |
| 2298 } else { | |
| 2299 // Before we call glBufferSubData we give the driver a hint using | |
| 2300 // glBufferData with nullptr. This makes the old buffer contents | |
| 2301 // inaccessible to future draws. The GPU may still be processing | |
| 2302 // draws that reference the old contents. With this hint it can | |
| 2303 // assign a different allocation for the new contents to avoid | |
| 2304 // flushing the gpu past draws consuming the old contents. | |
| 2305 // TODO I think we actually want to try calling bufferData here | |
| 2306 GL_CALL(BufferData(type, currentSize, nullptr, glUsage)); | |
| 2307 GL_CALL(BufferSubData(type, 0, (GrGLsizeiptr) srcSizeInBytes, src)); | |
| 2308 } | |
| 2309 #else | |
| 2310 // Note that we're cheating on the size here. Currently no methods | |
| 2311 // allow a partial update that preserves contents of non-updated | |
| 2312 // portions of the buffer (map() does a glBufferData(..size, nullptr..)) | |
| 2313 GL_CALL(BufferData(type, srcSizeInBytes, src, glUsage)); | |
| 2314 #endif | |
| 2315 } | |
| 2316 | |
| 2317 void GrGLGpu::unmapBuffer(GrGLuint id, GrGLenum type, void* mapPtr) { | |
| 2318 // bind buffer handles the dirty context | |
| 2319 switch (this->glCaps().mapBufferType()) { | |
| 2320 case GrGLCaps::kNone_MapBufferType: | |
| 2321 SkDEBUGFAIL("Shouldn't get here."); | |
| 2322 return; | |
| 2323 case GrGLCaps::kMapBuffer_MapBufferType: // fall through | |
| 2324 case GrGLCaps::kMapBufferRange_MapBufferType: | |
| 2325 this->bindBuffer(id, type); | |
| 2326 GL_CALL(UnmapBuffer(type)); | |
| 2327 break; | |
| 2328 case GrGLCaps::kChromium_MapBufferType: | |
| 2329 this->bindBuffer(id, type); | |
| 2330 GL_CALL(UnmapBufferSubData(mapPtr)); | |
| 2331 break; | |
| 2332 } | |
| 2333 } | |
| 2334 | |
| 2335 void GrGLGpu::disableScissor() { | 2124 void GrGLGpu::disableScissor() { |
| 2336 if (kNo_TriState != fHWScissorSettings.fEnabled) { | 2125 if (kNo_TriState != fHWScissorSettings.fEnabled) { |
| 2337 GL_CALL(Disable(GR_GL_SCISSOR_TEST)); | 2126 GL_CALL(Disable(GR_GL_SCISSOR_TEST)); |
| 2338 fHWScissorSettings.fEnabled = kNo_TriState; | 2127 fHWScissorSettings.fEnabled = kNo_TriState; |
| 2339 return; | 2128 return; |
| 2340 } | 2129 } |
| 2341 } | 2130 } |
| 2342 | 2131 |
| 2343 void GrGLGpu::onClear(GrRenderTarget* target, const SkIRect& rect, GrColor color
) { | 2132 void GrGLGpu::onClear(GrRenderTarget* target, const SkIRect& rect, GrColor color
) { |
| 2344 // parent class should never let us get here with no RT | 2133 // parent class should never let us get here with no RT |
| (...skipping 2001 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4346 #endif | 4135 #endif |
| 4347 } | 4136 } |
| 4348 | 4137 |
| 4349 void GrGLGpu::resetShaderCacheForTesting() const { | 4138 void GrGLGpu::resetShaderCacheForTesting() const { |
| 4350 fProgramCache->abandon(); | 4139 fProgramCache->abandon(); |
| 4351 } | 4140 } |
| 4352 | 4141 |
| 4353 /////////////////////////////////////////////////////////////////////////////// | 4142 /////////////////////////////////////////////////////////////////////////////// |
| 4354 GrGLAttribArrayState* GrGLGpu::HWGeometryState::bindArrayAndBuffersToDraw( | 4143 GrGLAttribArrayState* GrGLGpu::HWGeometryState::bindArrayAndBuffersToDraw( |
| 4355 GrGLGpu* gpu, | 4144 GrGLGpu* gpu, |
| 4356 const GrGLVertexBuffer* vbuffer, | 4145 const GrGLBuffer* vbuffer, |
| 4357 const GrGLIndexBuffer* ibuffer)
{ | 4146 const GrGLBuffer* ibuffer) { |
| 4358 SkASSERT(vbuffer); | 4147 SkASSERT(vbuffer); |
| 4359 GrGLuint vbufferID = vbuffer->bufferID(); | 4148 GrGLuint vbufferID = vbuffer->bufferID(); |
| 4360 GrGLuint* ibufferIDPtr = nullptr; | 4149 GrGLuint* ibufferIDPtr = nullptr; |
| 4361 GrGLuint ibufferID; | 4150 GrGLuint ibufferID; |
| 4362 if (ibuffer) { | 4151 if (ibuffer) { |
| 4363 ibufferID = ibuffer->bufferID(); | 4152 ibufferID = ibuffer->bufferID(); |
| 4364 ibufferIDPtr = &ibufferID; | 4153 ibufferIDPtr = &ibufferID; |
| 4365 } | 4154 } |
| 4366 return this->internalBind(gpu, vbufferID, ibufferIDPtr); | 4155 return this->internalBind(gpu, vbufferID, ibufferIDPtr); |
| 4367 } | 4156 } |
| (...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4417 if (GR_GL_TEXTURE_EXTERNAL == glTexture->target() || | 4206 if (GR_GL_TEXTURE_EXTERNAL == glTexture->target() || |
| 4418 GR_GL_TEXTURE_RECTANGLE == glTexture->target()) { | 4207 GR_GL_TEXTURE_RECTANGLE == glTexture->target()) { |
| 4419 copyParams->fFilter = GrTextureParams::kNone_FilterMode; | 4208 copyParams->fFilter = GrTextureParams::kNone_FilterMode; |
| 4420 copyParams->fWidth = texture->width(); | 4209 copyParams->fWidth = texture->width(); |
| 4421 copyParams->fHeight = texture->height(); | 4210 copyParams->fHeight = texture->height(); |
| 4422 return true; | 4211 return true; |
| 4423 } | 4212 } |
| 4424 } | 4213 } |
| 4425 return false; | 4214 return false; |
| 4426 } | 4215 } |
| OLD | NEW |