Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(274)

Side by Side Diff: src/gpu/gl/GrGLGpu.cpp

Issue 1825393002: Consolidate GPU buffer implementations (Closed) Base URL: https://skia.googlesource.com/skia.git@master
Patch Set: asserts Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/gpu/gl/GrGLGpu.h ('k') | src/gpu/gl/GrGLIndexBuffer.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright 2011 Google Inc. 2 * Copyright 2011 Google Inc.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license that can be 4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file. 5 * found in the LICENSE file.
6 */ 6 */
7 7
8 #include "GrGLGpu.h" 8 #include "GrGLGpu.h"
9 #include "GrGLBuffer.h"
9 #include "GrGLGLSL.h" 10 #include "GrGLGLSL.h"
10 #include "GrGLStencilAttachment.h" 11 #include "GrGLStencilAttachment.h"
11 #include "GrGLTextureRenderTarget.h" 12 #include "GrGLTextureRenderTarget.h"
12 #include "GrGpuResourcePriv.h" 13 #include "GrGpuResourcePriv.h"
13 #include "GrMesh.h" 14 #include "GrMesh.h"
14 #include "GrPipeline.h" 15 #include "GrPipeline.h"
15 #include "GrPLSGeometryProcessor.h" 16 #include "GrPLSGeometryProcessor.h"
16 #include "GrRenderTargetPriv.h" 17 #include "GrRenderTargetPriv.h"
17 #include "GrSurfacePriv.h" 18 #include "GrSurfacePriv.h"
18 #include "GrTexturePriv.h" 19 #include "GrTexturePriv.h"
(...skipping 817 matching lines...) Expand 10 before | Expand all | Expand 10 after
836 } else { 837 } else {
837 success = this->uploadTexData(glTex->desc(), glTex->target(), kWrite_Upl oadType, 838 success = this->uploadTexData(glTex->desc(), glTex->target(), kWrite_Upl oadType,
838 left, top, width, height, config, texels); 839 left, top, width, height, config, texels);
839 } 840 }
840 841
841 return success; 842 return success;
842 } 843 }
843 844
844 bool GrGLGpu::onTransferPixels(GrSurface* surface, 845 bool GrGLGpu::onTransferPixels(GrSurface* surface,
845 int left, int top, int width, int height, 846 int left, int top, int width, int height,
846 GrPixelConfig config, GrTransferBuffer* buffer, 847 GrPixelConfig config, GrBuffer* transferBuffer,
847 size_t offset, size_t rowBytes) { 848 size_t offset, size_t rowBytes) {
848 GrGLTexture* glTex = static_cast<GrGLTexture*>(surface->asTexture()); 849 GrGLTexture* glTex = static_cast<GrGLTexture*>(surface->asTexture());
849 850
850 if (!check_write_and_transfer_input(glTex, surface, config)) { 851 if (!check_write_and_transfer_input(glTex, surface, config)) {
851 return false; 852 return false;
852 } 853 }
853 854
854 // For the moment, can't transfer compressed data 855 // For the moment, can't transfer compressed data
855 if (GrPixelConfigIsCompressed(glTex->desc().fConfig)) { 856 if (GrPixelConfigIsCompressed(glTex->desc().fConfig)) {
856 return false; 857 return false;
857 } 858 }
858 859
859 this->setScratchTextureUnit(); 860 this->setScratchTextureUnit();
860 GL_CALL(BindTexture(glTex->target(), glTex->textureID())); 861 GL_CALL(BindTexture(glTex->target(), glTex->textureID()));
861 862
862 SkASSERT(!buffer->isMapped()); 863 SkASSERT(kXferCpuToGpu_GrBufferType == transferBuffer->type());
863 GrGLTransferBuffer* glBuffer = reinterpret_cast<GrGLTransferBuffer*>(buffer) ; 864 SkASSERT(!transferBuffer->isMapped());
864 // bind the transfer buffer 865 SkASSERT(!transferBuffer->isCPUBacked());
865 SkASSERT(GR_GL_PIXEL_UNPACK_BUFFER == glBuffer->bufferType() || 866 const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(transferBuffer);
866 GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM == glBuffer->bufferType ()); 867 this->bindBuffer(glBuffer->bufferID(), glBuffer->target());
867 GL_CALL(BindBuffer(glBuffer->bufferType(), glBuffer->bufferID()));
868 868
869 bool success = false; 869 bool success = false;
870 GrMipLevel mipLevel; 870 GrMipLevel mipLevel;
871 mipLevel.fPixels = buffer; 871 mipLevel.fPixels = transferBuffer;
872 mipLevel.fRowBytes = rowBytes; 872 mipLevel.fRowBytes = rowBytes;
873 SkSTArray<1, GrMipLevel> texels; 873 SkSTArray<1, GrMipLevel> texels;
874 texels.push_back(mipLevel); 874 texels.push_back(mipLevel);
875 success = this->uploadTexData(glTex->desc(), glTex->target(), kTransfer_Uplo adType, 875 success = this->uploadTexData(glTex->desc(), glTex->target(), kTransfer_Uplo adType,
876 left, top, width, height, config, texels); 876 left, top, width, height, config, texels);
877 if (success) { 877 if (success) {
878 glTex->texturePriv().dirtyMipMaps(true); 878 glTex->texturePriv().dirtyMipMaps(true);
879 return true; 879 return true;
880 } 880 }
881 881
(...skipping 1044 matching lines...) Expand 10 before | Expand all | Expand 10 after
1926 format); 1926 format);
1927 return stencil; 1927 return stencil;
1928 } 1928 }
1929 1929
1930 //////////////////////////////////////////////////////////////////////////////// 1930 ////////////////////////////////////////////////////////////////////////////////
1931 1931
1932 // GL_STREAM_DRAW triggers an optimization in Chromium's GPU process where a cli ent's vertex buffer 1932 // GL_STREAM_DRAW triggers an optimization in Chromium's GPU process where a cli ent's vertex buffer
1933 // objects are implemented as client-side-arrays on tile-deferred architectures. 1933 // objects are implemented as client-side-arrays on tile-deferred architectures.
1934 #define DYNAMIC_USAGE_PARAM GR_GL_STREAM_DRAW 1934 #define DYNAMIC_USAGE_PARAM GR_GL_STREAM_DRAW
1935 1935
1936 GrVertexBuffer* GrGLGpu::onCreateVertexBuffer(size_t size, bool dynamic) { 1936 GrBuffer* GrGLGpu::onCreateBuffer(GrBufferType type, size_t size, GrAccessPatter n accessPattern) {
1937 GrGLVertexBuffer::Desc desc; 1937 return GrGLBuffer::Create(this, type, size, accessPattern);
1938 desc.fUsage = dynamic ? GrGLBufferImpl::kDynamicDraw_Usage : GrGLBufferImpl: :kStaticDraw_Usage;
1939 desc.fSizeInBytes = size;
1940
1941 if (this->glCaps().useNonVBOVertexAndIndexDynamicData() && dynamic) {
1942 desc.fID = 0;
1943 GrGLVertexBuffer* vertexBuffer = new GrGLVertexBuffer(this, desc);
1944 return vertexBuffer;
1945 } else {
1946 desc.fID = 0;
1947 GL_CALL(GenBuffers(1, &desc.fID));
1948 if (desc.fID) {
1949 fHWGeometryState.setVertexBufferID(this, desc.fID);
1950 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
1951 // make sure driver can allocate memory for this buffer
1952 GL_ALLOC_CALL(this->glInterface(),
1953 BufferData(GR_GL_ARRAY_BUFFER,
1954 (GrGLsizeiptr) desc.fSizeInBytes,
1955 nullptr, // data ptr
1956 dynamic ? DYNAMIC_USAGE_PARAM : GR_GL_STATI C_DRAW));
1957 if (CHECK_ALLOC_ERROR(this->glInterface()) != GR_GL_NO_ERROR) {
1958 GL_CALL(DeleteBuffers(1, &desc.fID));
1959 this->notifyVertexBufferDelete(desc.fID);
1960 return nullptr;
1961 }
1962 GrGLVertexBuffer* vertexBuffer = new GrGLVertexBuffer(this, desc);
1963 return vertexBuffer;
1964 }
1965 return nullptr;
1966 }
1967 }
1968
1969 GrIndexBuffer* GrGLGpu::onCreateIndexBuffer(size_t size, bool dynamic) {
1970 GrGLIndexBuffer::Desc desc;
1971 desc.fUsage = dynamic ? GrGLBufferImpl::kDynamicDraw_Usage : GrGLBufferImpl: :kStaticDraw_Usage;
1972 desc.fSizeInBytes = size;
1973
1974 if (this->glCaps().useNonVBOVertexAndIndexDynamicData() && dynamic) {
1975 desc.fID = 0;
1976 GrIndexBuffer* indexBuffer = new GrGLIndexBuffer(this, desc);
1977 return indexBuffer;
1978 } else {
1979 desc.fID = 0;
1980 GL_CALL(GenBuffers(1, &desc.fID));
1981 if (desc.fID) {
1982 fHWGeometryState.setIndexBufferIDOnDefaultVertexArray(this, desc.fID );
1983 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
1984 // make sure driver can allocate memory for this buffer
1985 GL_ALLOC_CALL(this->glInterface(),
1986 BufferData(GR_GL_ELEMENT_ARRAY_BUFFER,
1987 (GrGLsizeiptr) desc.fSizeInBytes,
1988 nullptr, // data ptr
1989 dynamic ? DYNAMIC_USAGE_PARAM : GR_GL_STATI C_DRAW));
1990 if (CHECK_ALLOC_ERROR(this->glInterface()) != GR_GL_NO_ERROR) {
1991 GL_CALL(DeleteBuffers(1, &desc.fID));
1992 this->notifyIndexBufferDelete(desc.fID);
1993 return nullptr;
1994 }
1995 GrIndexBuffer* indexBuffer = new GrGLIndexBuffer(this, desc);
1996 return indexBuffer;
1997 }
1998 return nullptr;
1999 }
2000 }
2001
2002 GrTransferBuffer* GrGLGpu::onCreateTransferBuffer(size_t size, TransferType xfer Type) {
2003 GrGLCaps::TransferBufferType xferBufferType = this->ctxInfo().caps()->transf erBufferType();
2004 if (GrGLCaps::kNone_TransferBufferType == xferBufferType) {
2005 return nullptr;
2006 }
2007
2008 GrGLTransferBuffer::Desc desc;
2009 bool toGpu = (kCpuToGpu_TransferType == xferType);
2010 desc.fUsage = toGpu ? GrGLBufferImpl::kStreamDraw_Usage : GrGLBufferImpl::kS treamRead_Usage;
2011
2012 desc.fSizeInBytes = size;
2013 desc.fID = 0;
2014 GL_CALL(GenBuffers(1, &desc.fID));
2015 if (desc.fID) {
2016 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
2017 // make sure driver can allocate memory for this bmapuffer
2018 GrGLenum target;
2019 if (GrGLCaps::kChromium_TransferBufferType == xferBufferType) {
2020 target = toGpu ? GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM
2021 : GR_GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM;
2022 } else {
2023 SkASSERT(GrGLCaps::kPBO_TransferBufferType == xferBufferType);
2024 target = toGpu ? GR_GL_PIXEL_UNPACK_BUFFER : GR_GL_PIXEL_PACK_BUFFER ;
2025 }
2026 GL_CALL(BindBuffer(target, desc.fID));
2027 GL_ALLOC_CALL(this->glInterface(),
2028 BufferData(target,
2029 (GrGLsizeiptr) desc.fSizeInBytes,
2030 nullptr, // data ptr
2031 (toGpu ? GR_GL_STREAM_DRAW : GR_GL_STREAM_READ) ));
2032 if (CHECK_ALLOC_ERROR(this->glInterface()) != GR_GL_NO_ERROR) {
2033 GL_CALL(DeleteBuffers(1, &desc.fID));
2034 return nullptr;
2035 }
2036 GrTransferBuffer* transferBuffer = new GrGLTransferBuffer(this, desc, ta rget);
2037 return transferBuffer;
2038 }
2039
2040 return nullptr;
2041 } 1938 }
2042 1939
2043 void GrGLGpu::flushScissor(const GrScissorState& scissorState, 1940 void GrGLGpu::flushScissor(const GrScissorState& scissorState,
2044 const GrGLIRect& rtViewport, 1941 const GrGLIRect& rtViewport,
2045 GrSurfaceOrigin rtOrigin) { 1942 GrSurfaceOrigin rtOrigin) {
2046 if (scissorState.enabled()) { 1943 if (scissorState.enabled()) {
2047 GrGLIRect scissor; 1944 GrGLIRect scissor;
2048 scissor.setRelativeTo(rtViewport, 1945 scissor.setRelativeTo(rtViewport,
2049 scissorState.rect().fLeft, 1946 scissorState.rect().fLeft,
2050 scissorState.rect().fTop, 1947 scissorState.rect().fTop,
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after
2115 // This must come after textures are flushed because a texture may need 2012 // This must come after textures are flushed because a texture may need
2116 // to be msaa-resolved (which will modify bound FBO state). 2013 // to be msaa-resolved (which will modify bound FBO state).
2117 this->flushRenderTarget(glRT, nullptr); 2014 this->flushRenderTarget(glRT, nullptr);
2118 2015
2119 return true; 2016 return true;
2120 } 2017 }
2121 2018
2122 void GrGLGpu::setupGeometry(const GrPrimitiveProcessor& primProc, 2019 void GrGLGpu::setupGeometry(const GrPrimitiveProcessor& primProc,
2123 const GrNonInstancedMesh& mesh, 2020 const GrNonInstancedMesh& mesh,
2124 size_t* indexOffsetInBytes) { 2021 size_t* indexOffsetInBytes) {
2125 GrGLVertexBuffer* vbuf; 2022 const GrGLBuffer* vbuf;
2126 vbuf = (GrGLVertexBuffer*) mesh.vertexBuffer(); 2023 vbuf = static_cast<const GrGLBuffer*>(mesh.vertexBuffer());
2127 2024
2128 SkASSERT(vbuf); 2025 SkASSERT(vbuf);
2129 SkASSERT(!vbuf->isMapped()); 2026 SkASSERT(!vbuf->isMapped());
2027 SkASSERT(kVertex_GrBufferType == vbuf->type());
2130 2028
2131 GrGLIndexBuffer* ibuf = nullptr; 2029 const GrGLBuffer* ibuf = nullptr;
2132 if (mesh.isIndexed()) { 2030 if (mesh.isIndexed()) {
2133 SkASSERT(indexOffsetInBytes); 2031 SkASSERT(indexOffsetInBytes);
2134 2032
2135 *indexOffsetInBytes = 0; 2033 *indexOffsetInBytes = 0;
2136 ibuf = (GrGLIndexBuffer*)mesh.indexBuffer(); 2034 ibuf = static_cast<const GrGLBuffer*>(mesh.indexBuffer());
2137 2035
2138 SkASSERT(ibuf); 2036 SkASSERT(ibuf);
2139 SkASSERT(!ibuf->isMapped()); 2037 SkASSERT(!ibuf->isMapped());
2038 SkASSERT(kIndex_GrBufferType == ibuf->type());
2140 *indexOffsetInBytes += ibuf->baseOffset(); 2039 *indexOffsetInBytes += ibuf->baseOffset();
2141 } 2040 }
2142 GrGLAttribArrayState* attribState = 2041 GrGLAttribArrayState* attribState =
2143 fHWGeometryState.bindArrayAndBuffersToDraw(this, vbuf, ibuf); 2042 fHWGeometryState.bindArrayAndBuffersToDraw(this, vbuf, ibuf);
2144 2043
2145 int vaCount = primProc.numAttribs(); 2044 int vaCount = primProc.numAttribs();
2146 if (vaCount > 0) { 2045 if (vaCount > 0) {
2147 2046
2148 GrGLsizei stride = static_cast<GrGLsizei>(primProc.getVertexStride()); 2047 GrGLsizei stride = static_cast<GrGLsizei>(primProc.getVertexStride());
2149 2048
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after
2216 } 2115 }
2217 break; 2116 break;
2218 case GR_GL_DRAW_INDIRECT_BUFFER: 2117 case GR_GL_DRAW_INDIRECT_BUFFER:
2219 if (fHWBoundDrawIndirectBufferIDIsValid && id == fHWBoundDrawIndirec tBufferID) { 2118 if (fHWBoundDrawIndirectBufferIDIsValid && id == fHWBoundDrawIndirec tBufferID) {
2220 fHWBoundDrawIndirectBufferID = 0; 2119 fHWBoundDrawIndirectBufferID = 0;
2221 } 2120 }
2222 break; 2121 break;
2223 } 2122 }
2224 } 2123 }
2225 2124
2226 static GrGLenum get_gl_usage(GrGLBufferImpl::Usage usage) {
2227 static const GrGLenum grToGL[] = {
2228 GR_GL_STATIC_DRAW, // GrGLBufferImpl::kStaticDraw_Usage
2229 DYNAMIC_USAGE_PARAM, // GrGLBufferImpl::kDynamicDraw_Usage
2230 GR_GL_STREAM_DRAW, // GrGLBufferImpl::kStreamDraw_Usage
2231 GR_GL_STREAM_READ, // GrGLBufferImpl::kStreamRead_Usage
2232 };
2233 static_assert(SK_ARRAY_COUNT(grToGL) == GrGLBufferImpl::kUsageCount, "array_ size_mismatch");
2234
2235 return grToGL[usage];
2236 }
2237
2238 void* GrGLGpu::mapBuffer(GrGLuint id, GrGLenum type, GrGLBufferImpl::Usage usage ,
2239 size_t currentSize, size_t requestedSize) {
2240 void* mapPtr = nullptr;
2241 GrGLenum glUsage = get_gl_usage(usage);
2242 bool readOnly = (GrGLBufferImpl::kStreamRead_Usage == usage);
2243
2244 // Handling dirty context is done in the bindBuffer call
2245 switch (this->glCaps().mapBufferType()) {
2246 case GrGLCaps::kNone_MapBufferType:
2247 break;
2248 case GrGLCaps::kMapBuffer_MapBufferType:
2249 this->bindBuffer(id, type);
2250 // Let driver know it can discard the old data
2251 if (GR_GL_USE_BUFFER_DATA_NULL_HINT || currentSize != requestedSize) {
2252 GL_CALL(BufferData(type, requestedSize, nullptr, glUsage));
2253 }
2254 GL_CALL_RET(mapPtr, MapBuffer(type, readOnly ? GR_GL_READ_ONLY : GR_ GL_WRITE_ONLY));
2255 break;
2256 case GrGLCaps::kMapBufferRange_MapBufferType: {
2257 this->bindBuffer(id, type);
2258 // Make sure the GL buffer size agrees with fDesc before mapping.
2259 if (currentSize != requestedSize) {
2260 GL_CALL(BufferData(type, requestedSize, nullptr, glUsage));
2261 }
2262 GrGLbitfield writeAccess = GR_GL_MAP_WRITE_BIT;
2263 // TODO: allow the client to specify invalidation in the stream draw case
2264 if (GrGLBufferImpl::kStreamDraw_Usage != usage) {
2265 writeAccess |= GR_GL_MAP_INVALIDATE_BUFFER_BIT;
2266 }
2267 GL_CALL_RET(mapPtr, MapBufferRange(type, 0, requestedSize, readOnly ?
2268 GR_GL_MAP _READ_BIT :
2269 writeAcce ss));
2270 break;
2271 }
2272 case GrGLCaps::kChromium_MapBufferType:
2273 this->bindBuffer(id, type);
2274 // Make sure the GL buffer size agrees with fDesc before mapping.
2275 if (currentSize != requestedSize) {
2276 GL_CALL(BufferData(type, requestedSize, nullptr, glUsage));
2277 }
2278 GL_CALL_RET(mapPtr, MapBufferSubData(type, 0, requestedSize, readOnl y ?
2279 GR_GL_R EAD_ONLY :
2280 GR_GL_W RITE_ONLY));
2281 break;
2282 }
2283 return mapPtr;
2284 }
2285
2286 void GrGLGpu::bufferData(GrGLuint id, GrGLenum type, GrGLBufferImpl::Usage usage ,
2287 size_t currentSize, const void* src, size_t srcSizeInBy tes) {
2288 SkASSERT(srcSizeInBytes <= currentSize);
2289 // bindbuffer handles dirty context
2290 this->bindBuffer(id, type);
2291 GrGLenum glUsage = get_gl_usage(usage);
2292
2293 #if GR_GL_USE_BUFFER_DATA_NULL_HINT
2294 if (currentSize == srcSizeInBytes) {
2295 GL_CALL(BufferData(type, (GrGLsizeiptr) srcSizeInBytes, src, glUsage));
2296 } else {
2297 // Before we call glBufferSubData we give the driver a hint using
2298 // glBufferData with nullptr. This makes the old buffer contents
2299 // inaccessible to future draws. The GPU may still be processing
2300 // draws that reference the old contents. With this hint it can
2301 // assign a different allocation for the new contents to avoid
2302 // flushing the gpu past draws consuming the old contents.
2303 // TODO I think we actually want to try calling bufferData here
2304 GL_CALL(BufferData(type, currentSize, nullptr, glUsage));
2305 GL_CALL(BufferSubData(type, 0, (GrGLsizeiptr) srcSizeInBytes, src));
2306 }
2307 #else
2308 // Note that we're cheating on the size here. Currently no methods
2309 // allow a partial update that preserves contents of non-updated
2310 // portions of the buffer (map() does a glBufferData(..size, nullptr..))
2311 GL_CALL(BufferData(type, srcSizeInBytes, src, glUsage));
2312 #endif
2313 }
2314
2315 void GrGLGpu::unmapBuffer(GrGLuint id, GrGLenum type, void* mapPtr) {
2316 // bind buffer handles the dirty context
2317 switch (this->glCaps().mapBufferType()) {
2318 case GrGLCaps::kNone_MapBufferType:
2319 SkDEBUGFAIL("Shouldn't get here.");
2320 return;
2321 case GrGLCaps::kMapBuffer_MapBufferType: // fall through
2322 case GrGLCaps::kMapBufferRange_MapBufferType:
2323 this->bindBuffer(id, type);
2324 GL_CALL(UnmapBuffer(type));
2325 break;
2326 case GrGLCaps::kChromium_MapBufferType:
2327 this->bindBuffer(id, type);
2328 GL_CALL(UnmapBufferSubData(mapPtr));
2329 break;
2330 }
2331 }
2332
2333 void GrGLGpu::disableScissor() { 2125 void GrGLGpu::disableScissor() {
2334 if (kNo_TriState != fHWScissorSettings.fEnabled) { 2126 if (kNo_TriState != fHWScissorSettings.fEnabled) {
2335 GL_CALL(Disable(GR_GL_SCISSOR_TEST)); 2127 GL_CALL(Disable(GR_GL_SCISSOR_TEST));
2336 fHWScissorSettings.fEnabled = kNo_TriState; 2128 fHWScissorSettings.fEnabled = kNo_TriState;
2337 return; 2129 return;
2338 } 2130 }
2339 } 2131 }
2340 2132
2341 void GrGLGpu::onClear(GrRenderTarget* target, const SkIRect& rect, GrColor color ) { 2133 void GrGLGpu::onClear(GrRenderTarget* target, const SkIRect& rect, GrColor color ) {
2342 // parent class should never let us get here with no RT 2134 // parent class should never let us get here with no RT
(...skipping 2001 matching lines...) Expand 10 before | Expand all | Expand 10 after
4344 #endif 4136 #endif
4345 } 4137 }
4346 4138
4347 void GrGLGpu::resetShaderCacheForTesting() const { 4139 void GrGLGpu::resetShaderCacheForTesting() const {
4348 fProgramCache->abandon(); 4140 fProgramCache->abandon();
4349 } 4141 }
4350 4142
4351 /////////////////////////////////////////////////////////////////////////////// 4143 ///////////////////////////////////////////////////////////////////////////////
4352 GrGLAttribArrayState* GrGLGpu::HWGeometryState::bindArrayAndBuffersToDraw( 4144 GrGLAttribArrayState* GrGLGpu::HWGeometryState::bindArrayAndBuffersToDraw(
4353 GrGLGpu* gpu, 4145 GrGLGpu* gpu,
4354 const GrGLVertexBuffer* vbuffer, 4146 const GrGLBuffer* vbuffer,
4355 const GrGLIndexBuffer* ibuffer) { 4147 const GrGLBuffer* ibuffer) {
4356 SkASSERT(vbuffer); 4148 SkASSERT(vbuffer);
4357 GrGLuint vbufferID = vbuffer->bufferID(); 4149 GrGLuint vbufferID = vbuffer->bufferID();
4358 GrGLuint* ibufferIDPtr = nullptr; 4150 GrGLuint* ibufferIDPtr = nullptr;
4359 GrGLuint ibufferID; 4151 GrGLuint ibufferID;
4360 if (ibuffer) { 4152 if (ibuffer) {
4361 ibufferID = ibuffer->bufferID(); 4153 ibufferID = ibuffer->bufferID();
4362 ibufferIDPtr = &ibufferID; 4154 ibufferIDPtr = &ibufferID;
4363 } 4155 }
4364 return this->internalBind(gpu, vbufferID, ibufferIDPtr); 4156 return this->internalBind(gpu, vbufferID, ibufferIDPtr);
4365 } 4157 }
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after
4415 if (GR_GL_TEXTURE_EXTERNAL == glTexture->target() || 4207 if (GR_GL_TEXTURE_EXTERNAL == glTexture->target() ||
4416 GR_GL_TEXTURE_RECTANGLE == glTexture->target()) { 4208 GR_GL_TEXTURE_RECTANGLE == glTexture->target()) {
4417 copyParams->fFilter = GrTextureParams::kNone_FilterMode; 4209 copyParams->fFilter = GrTextureParams::kNone_FilterMode;
4418 copyParams->fWidth = texture->width(); 4210 copyParams->fWidth = texture->width();
4419 copyParams->fHeight = texture->height(); 4211 copyParams->fHeight = texture->height();
4420 return true; 4212 return true;
4421 } 4213 }
4422 } 4214 }
4423 return false; 4215 return false;
4424 } 4216 }
OLDNEW
« no previous file with comments | « src/gpu/gl/GrGLGpu.h ('k') | src/gpu/gl/GrGLIndexBuffer.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698