Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(191)

Side by Side Diff: src/gpu/gl/GrGLGpu.cpp

Issue 1831133004: Revert of Consolidate GPU buffer implementations (Closed) Base URL: https://skia.googlesource.com/skia.git@master
Patch Set: Created 4 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/gpu/gl/GrGLGpu.h ('k') | src/gpu/gl/GrGLIndexBuffer.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright 2011 Google Inc. 2 * Copyright 2011 Google Inc.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license that can be 4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file. 5 * found in the LICENSE file.
6 */ 6 */
7 7
8 #include "GrGLGpu.h" 8 #include "GrGLGpu.h"
9 #include "GrGLBuffer.h"
10 #include "GrGLGLSL.h" 9 #include "GrGLGLSL.h"
11 #include "GrGLStencilAttachment.h" 10 #include "GrGLStencilAttachment.h"
12 #include "GrGLTextureRenderTarget.h" 11 #include "GrGLTextureRenderTarget.h"
13 #include "GrGpuResourcePriv.h" 12 #include "GrGpuResourcePriv.h"
14 #include "GrMesh.h" 13 #include "GrMesh.h"
15 #include "GrPipeline.h" 14 #include "GrPipeline.h"
16 #include "GrPLSGeometryProcessor.h" 15 #include "GrPLSGeometryProcessor.h"
17 #include "GrRenderTargetPriv.h" 16 #include "GrRenderTargetPriv.h"
18 #include "GrSurfacePriv.h" 17 #include "GrSurfacePriv.h"
19 #include "GrTexturePriv.h" 18 #include "GrTexturePriv.h"
(...skipping 817 matching lines...) Expand 10 before | Expand all | Expand 10 after
837 } else { 836 } else {
838 success = this->uploadTexData(glTex->desc(), glTex->target(), kWrite_Upl oadType, 837 success = this->uploadTexData(glTex->desc(), glTex->target(), kWrite_Upl oadType,
839 left, top, width, height, config, texels); 838 left, top, width, height, config, texels);
840 } 839 }
841 840
842 return success; 841 return success;
843 } 842 }
844 843
845 bool GrGLGpu::onTransferPixels(GrSurface* surface, 844 bool GrGLGpu::onTransferPixels(GrSurface* surface,
846 int left, int top, int width, int height, 845 int left, int top, int width, int height,
847 GrPixelConfig config, GrBuffer* transferBuffer, 846 GrPixelConfig config, GrTransferBuffer* buffer,
848 size_t offset, size_t rowBytes) { 847 size_t offset, size_t rowBytes) {
849 GrGLTexture* glTex = static_cast<GrGLTexture*>(surface->asTexture()); 848 GrGLTexture* glTex = static_cast<GrGLTexture*>(surface->asTexture());
850 849
851 if (!check_write_and_transfer_input(glTex, surface, config)) { 850 if (!check_write_and_transfer_input(glTex, surface, config)) {
852 return false; 851 return false;
853 } 852 }
854 853
855 // For the moment, can't transfer compressed data 854 // For the moment, can't transfer compressed data
856 if (GrPixelConfigIsCompressed(glTex->desc().fConfig)) { 855 if (GrPixelConfigIsCompressed(glTex->desc().fConfig)) {
857 return false; 856 return false;
858 } 857 }
859 858
860 this->setScratchTextureUnit(); 859 this->setScratchTextureUnit();
861 GL_CALL(BindTexture(glTex->target(), glTex->textureID())); 860 GL_CALL(BindTexture(glTex->target(), glTex->textureID()));
862 861
863 SkASSERT(kXferCpuToGpu_GrBufferType == transferBuffer->type()); 862 SkASSERT(!buffer->isMapped());
864 SkASSERT(!transferBuffer->isMapped()); 863 GrGLTransferBuffer* glBuffer = reinterpret_cast<GrGLTransferBuffer*>(buffer) ;
865 const GrGLBuffer* glBuffer = reinterpret_cast<const GrGLBuffer*>(transferBuf fer); 864 // bind the transfer buffer
866 this->bindBuffer(glBuffer->bufferID(), glBuffer->target()); 865 SkASSERT(GR_GL_PIXEL_UNPACK_BUFFER == glBuffer->bufferType() ||
866 GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM == glBuffer->bufferType ());
867 GL_CALL(BindBuffer(glBuffer->bufferType(), glBuffer->bufferID()));
867 868
868 bool success = false; 869 bool success = false;
869 GrMipLevel mipLevel; 870 GrMipLevel mipLevel;
870 mipLevel.fPixels = transferBuffer; 871 mipLevel.fPixels = buffer;
871 mipLevel.fRowBytes = rowBytes; 872 mipLevel.fRowBytes = rowBytes;
872 SkSTArray<1, GrMipLevel> texels; 873 SkSTArray<1, GrMipLevel> texels;
873 texels.push_back(mipLevel); 874 texels.push_back(mipLevel);
874 success = this->uploadTexData(glTex->desc(), glTex->target(), kTransfer_Uplo adType, 875 success = this->uploadTexData(glTex->desc(), glTex->target(), kTransfer_Uplo adType,
875 left, top, width, height, config, texels); 876 left, top, width, height, config, texels);
876 if (success) { 877 if (success) {
877 glTex->texturePriv().dirtyMipMaps(true); 878 glTex->texturePriv().dirtyMipMaps(true);
878 return true; 879 return true;
879 } 880 }
880 881
(...skipping 1044 matching lines...) Expand 10 before | Expand all | Expand 10 after
1925 format); 1926 format);
1926 return stencil; 1927 return stencil;
1927 } 1928 }
1928 1929
1929 //////////////////////////////////////////////////////////////////////////////// 1930 ////////////////////////////////////////////////////////////////////////////////
1930 1931
1931 // GL_STREAM_DRAW triggers an optimization in Chromium's GPU process where a cli ent's vertex buffer 1932 // GL_STREAM_DRAW triggers an optimization in Chromium's GPU process where a cli ent's vertex buffer
1932 // objects are implemented as client-side-arrays on tile-deferred architectures. 1933 // objects are implemented as client-side-arrays on tile-deferred architectures.
1933 #define DYNAMIC_USAGE_PARAM GR_GL_STREAM_DRAW 1934 #define DYNAMIC_USAGE_PARAM GR_GL_STREAM_DRAW
1934 1935
1935 GrBuffer* GrGLGpu::onCreateBuffer(GrBufferType type, size_t size, GrAccessPatter n accessPattern) { 1936 GrVertexBuffer* GrGLGpu::onCreateVertexBuffer(size_t size, bool dynamic) {
1936 return GrGLBuffer::Create(this, type, size, accessPattern); 1937 GrGLVertexBuffer::Desc desc;
1938 desc.fUsage = dynamic ? GrGLBufferImpl::kDynamicDraw_Usage : GrGLBufferImpl: :kStaticDraw_Usage;
1939 desc.fSizeInBytes = size;
1940
1941 if (this->glCaps().useNonVBOVertexAndIndexDynamicData() && dynamic) {
1942 desc.fID = 0;
1943 GrGLVertexBuffer* vertexBuffer = new GrGLVertexBuffer(this, desc);
1944 return vertexBuffer;
1945 } else {
1946 desc.fID = 0;
1947 GL_CALL(GenBuffers(1, &desc.fID));
1948 if (desc.fID) {
1949 fHWGeometryState.setVertexBufferID(this, desc.fID);
1950 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
1951 // make sure driver can allocate memory for this buffer
1952 GL_ALLOC_CALL(this->glInterface(),
1953 BufferData(GR_GL_ARRAY_BUFFER,
1954 (GrGLsizeiptr) desc.fSizeInBytes,
1955 nullptr, // data ptr
1956 dynamic ? DYNAMIC_USAGE_PARAM : GR_GL_STATI C_DRAW));
1957 if (CHECK_ALLOC_ERROR(this->glInterface()) != GR_GL_NO_ERROR) {
1958 GL_CALL(DeleteBuffers(1, &desc.fID));
1959 this->notifyVertexBufferDelete(desc.fID);
1960 return nullptr;
1961 }
1962 GrGLVertexBuffer* vertexBuffer = new GrGLVertexBuffer(this, desc);
1963 return vertexBuffer;
1964 }
1965 return nullptr;
1966 }
1967 }
1968
1969 GrIndexBuffer* GrGLGpu::onCreateIndexBuffer(size_t size, bool dynamic) {
1970 GrGLIndexBuffer::Desc desc;
1971 desc.fUsage = dynamic ? GrGLBufferImpl::kDynamicDraw_Usage : GrGLBufferImpl: :kStaticDraw_Usage;
1972 desc.fSizeInBytes = size;
1973
1974 if (this->glCaps().useNonVBOVertexAndIndexDynamicData() && dynamic) {
1975 desc.fID = 0;
1976 GrIndexBuffer* indexBuffer = new GrGLIndexBuffer(this, desc);
1977 return indexBuffer;
1978 } else {
1979 desc.fID = 0;
1980 GL_CALL(GenBuffers(1, &desc.fID));
1981 if (desc.fID) {
1982 fHWGeometryState.setIndexBufferIDOnDefaultVertexArray(this, desc.fID );
1983 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
1984 // make sure driver can allocate memory for this buffer
1985 GL_ALLOC_CALL(this->glInterface(),
1986 BufferData(GR_GL_ELEMENT_ARRAY_BUFFER,
1987 (GrGLsizeiptr) desc.fSizeInBytes,
1988 nullptr, // data ptr
1989 dynamic ? DYNAMIC_USAGE_PARAM : GR_GL_STATI C_DRAW));
1990 if (CHECK_ALLOC_ERROR(this->glInterface()) != GR_GL_NO_ERROR) {
1991 GL_CALL(DeleteBuffers(1, &desc.fID));
1992 this->notifyIndexBufferDelete(desc.fID);
1993 return nullptr;
1994 }
1995 GrIndexBuffer* indexBuffer = new GrGLIndexBuffer(this, desc);
1996 return indexBuffer;
1997 }
1998 return nullptr;
1999 }
2000 }
2001
2002 GrTransferBuffer* GrGLGpu::onCreateTransferBuffer(size_t size, TransferType xfer Type) {
2003 GrGLCaps::TransferBufferType xferBufferType = this->ctxInfo().caps()->transf erBufferType();
2004 if (GrGLCaps::kNone_TransferBufferType == xferBufferType) {
2005 return nullptr;
2006 }
2007
2008 GrGLTransferBuffer::Desc desc;
2009 bool toGpu = (kCpuToGpu_TransferType == xferType);
2010 desc.fUsage = toGpu ? GrGLBufferImpl::kStreamDraw_Usage : GrGLBufferImpl::kS treamRead_Usage;
2011
2012 desc.fSizeInBytes = size;
2013 desc.fID = 0;
2014 GL_CALL(GenBuffers(1, &desc.fID));
2015 if (desc.fID) {
2016 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
2017 // make sure driver can allocate memory for this bmapuffer
2018 GrGLenum target;
2019 if (GrGLCaps::kChromium_TransferBufferType == xferBufferType) {
2020 target = toGpu ? GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM
2021 : GR_GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM;
2022 } else {
2023 SkASSERT(GrGLCaps::kPBO_TransferBufferType == xferBufferType);
2024 target = toGpu ? GR_GL_PIXEL_UNPACK_BUFFER : GR_GL_PIXEL_PACK_BUFFER ;
2025 }
2026 GL_CALL(BindBuffer(target, desc.fID));
2027 GL_ALLOC_CALL(this->glInterface(),
2028 BufferData(target,
2029 (GrGLsizeiptr) desc.fSizeInBytes,
2030 nullptr, // data ptr
2031 (toGpu ? GR_GL_STREAM_DRAW : GR_GL_STREAM_READ) ));
2032 if (CHECK_ALLOC_ERROR(this->glInterface()) != GR_GL_NO_ERROR) {
2033 GL_CALL(DeleteBuffers(1, &desc.fID));
2034 return nullptr;
2035 }
2036 GrTransferBuffer* transferBuffer = new GrGLTransferBuffer(this, desc, ta rget);
2037 return transferBuffer;
2038 }
2039
2040 return nullptr;
1937 } 2041 }
1938 2042
1939 void GrGLGpu::flushScissor(const GrScissorState& scissorState, 2043 void GrGLGpu::flushScissor(const GrScissorState& scissorState,
1940 const GrGLIRect& rtViewport, 2044 const GrGLIRect& rtViewport,
1941 GrSurfaceOrigin rtOrigin) { 2045 GrSurfaceOrigin rtOrigin) {
1942 if (scissorState.enabled()) { 2046 if (scissorState.enabled()) {
1943 GrGLIRect scissor; 2047 GrGLIRect scissor;
1944 scissor.setRelativeTo(rtViewport, 2048 scissor.setRelativeTo(rtViewport,
1945 scissorState.rect().fLeft, 2049 scissorState.rect().fLeft,
1946 scissorState.rect().fTop, 2050 scissorState.rect().fTop,
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after
2011 // This must come after textures are flushed because a texture may need 2115 // This must come after textures are flushed because a texture may need
2012 // to be msaa-resolved (which will modify bound FBO state). 2116 // to be msaa-resolved (which will modify bound FBO state).
2013 this->flushRenderTarget(glRT, nullptr); 2117 this->flushRenderTarget(glRT, nullptr);
2014 2118
2015 return true; 2119 return true;
2016 } 2120 }
2017 2121
2018 void GrGLGpu::setupGeometry(const GrPrimitiveProcessor& primProc, 2122 void GrGLGpu::setupGeometry(const GrPrimitiveProcessor& primProc,
2019 const GrNonInstancedMesh& mesh, 2123 const GrNonInstancedMesh& mesh,
2020 size_t* indexOffsetInBytes) { 2124 size_t* indexOffsetInBytes) {
2021 const GrGLBuffer* vbuf; 2125 GrGLVertexBuffer* vbuf;
2022 vbuf = static_cast<const GrGLBuffer*>(mesh.vertexBuffer()); 2126 vbuf = (GrGLVertexBuffer*) mesh.vertexBuffer();
2023 2127
2024 SkASSERT(vbuf); 2128 SkASSERT(vbuf);
2025 SkASSERT(!vbuf->isMapped()); 2129 SkASSERT(!vbuf->isMapped());
2026 2130
2027 const GrGLBuffer* ibuf = nullptr; 2131 GrGLIndexBuffer* ibuf = nullptr;
2028 if (mesh.isIndexed()) { 2132 if (mesh.isIndexed()) {
2029 SkASSERT(indexOffsetInBytes); 2133 SkASSERT(indexOffsetInBytes);
2030 2134
2031 *indexOffsetInBytes = 0; 2135 *indexOffsetInBytes = 0;
2032 ibuf = static_cast<const GrGLBuffer*>(mesh.indexBuffer()); 2136 ibuf = (GrGLIndexBuffer*)mesh.indexBuffer();
2033 2137
2034 SkASSERT(ibuf); 2138 SkASSERT(ibuf);
2035 SkASSERT(!ibuf->isMapped()); 2139 SkASSERT(!ibuf->isMapped());
2036 *indexOffsetInBytes += ibuf->baseOffset(); 2140 *indexOffsetInBytes += ibuf->baseOffset();
2037 } 2141 }
2038 GrGLAttribArrayState* attribState = 2142 GrGLAttribArrayState* attribState =
2039 fHWGeometryState.bindArrayAndBuffersToDraw(this, vbuf, ibuf); 2143 fHWGeometryState.bindArrayAndBuffersToDraw(this, vbuf, ibuf);
2040 2144
2041 int vaCount = primProc.numAttribs(); 2145 int vaCount = primProc.numAttribs();
2042 if (vaCount > 0) { 2146 if (vaCount > 0) {
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after
2111 fHWBoundTextureBufferID = 0; 2215 fHWBoundTextureBufferID = 0;
2112 } 2216 }
2113 break; 2217 break;
2114 case GR_GL_DRAW_INDIRECT_BUFFER: 2218 case GR_GL_DRAW_INDIRECT_BUFFER:
2115 if (fHWBoundDrawIndirectBufferIDIsValid && id == fHWBoundDrawIndirec tBufferID) { 2219 if (fHWBoundDrawIndirectBufferIDIsValid && id == fHWBoundDrawIndirec tBufferID) {
2116 fHWBoundDrawIndirectBufferID = 0; 2220 fHWBoundDrawIndirectBufferID = 0;
2117 } 2221 }
2118 break; 2222 break;
2119 } 2223 }
2120 } 2224 }
2225
2226 static GrGLenum get_gl_usage(GrGLBufferImpl::Usage usage) {
2227 static const GrGLenum grToGL[] = {
2228 GR_GL_STATIC_DRAW, // GrGLBufferImpl::kStaticDraw_Usage
2229 DYNAMIC_USAGE_PARAM, // GrGLBufferImpl::kDynamicDraw_Usage
2230 GR_GL_STREAM_DRAW, // GrGLBufferImpl::kStreamDraw_Usage
2231 GR_GL_STREAM_READ, // GrGLBufferImpl::kStreamRead_Usage
2232 };
2233 static_assert(SK_ARRAY_COUNT(grToGL) == GrGLBufferImpl::kUsageCount, "array_ size_mismatch");
2234
2235 return grToGL[usage];
2236 }
2237
2238 void* GrGLGpu::mapBuffer(GrGLuint id, GrGLenum type, GrGLBufferImpl::Usage usage ,
2239 size_t currentSize, size_t requestedSize) {
2240 void* mapPtr = nullptr;
2241 GrGLenum glUsage = get_gl_usage(usage);
2242 bool readOnly = (GrGLBufferImpl::kStreamRead_Usage == usage);
2243
2244 // Handling dirty context is done in the bindBuffer call
2245 switch (this->glCaps().mapBufferType()) {
2246 case GrGLCaps::kNone_MapBufferType:
2247 break;
2248 case GrGLCaps::kMapBuffer_MapBufferType:
2249 this->bindBuffer(id, type);
2250 // Let driver know it can discard the old data
2251 if (GR_GL_USE_BUFFER_DATA_NULL_HINT || currentSize != requestedSize) {
2252 GL_CALL(BufferData(type, requestedSize, nullptr, glUsage));
2253 }
2254 GL_CALL_RET(mapPtr, MapBuffer(type, readOnly ? GR_GL_READ_ONLY : GR_ GL_WRITE_ONLY));
2255 break;
2256 case GrGLCaps::kMapBufferRange_MapBufferType: {
2257 this->bindBuffer(id, type);
2258 // Make sure the GL buffer size agrees with fDesc before mapping.
2259 if (currentSize != requestedSize) {
2260 GL_CALL(BufferData(type, requestedSize, nullptr, glUsage));
2261 }
2262 GrGLbitfield writeAccess = GR_GL_MAP_WRITE_BIT;
2263 // TODO: allow the client to specify invalidation in the stream draw case
2264 if (GrGLBufferImpl::kStreamDraw_Usage != usage) {
2265 writeAccess |= GR_GL_MAP_INVALIDATE_BUFFER_BIT;
2266 }
2267 GL_CALL_RET(mapPtr, MapBufferRange(type, 0, requestedSize, readOnly ?
2268 GR_GL_MAP _READ_BIT :
2269 writeAcce ss));
2270 break;
2271 }
2272 case GrGLCaps::kChromium_MapBufferType:
2273 this->bindBuffer(id, type);
2274 // Make sure the GL buffer size agrees with fDesc before mapping.
2275 if (currentSize != requestedSize) {
2276 GL_CALL(BufferData(type, requestedSize, nullptr, glUsage));
2277 }
2278 GL_CALL_RET(mapPtr, MapBufferSubData(type, 0, requestedSize, readOnl y ?
2279 GR_GL_R EAD_ONLY :
2280 GR_GL_W RITE_ONLY));
2281 break;
2282 }
2283 return mapPtr;
2284 }
2285
2286 void GrGLGpu::bufferData(GrGLuint id, GrGLenum type, GrGLBufferImpl::Usage usage ,
2287 size_t currentSize, const void* src, size_t srcSizeInBy tes) {
2288 SkASSERT(srcSizeInBytes <= currentSize);
2289 // bindbuffer handles dirty context
2290 this->bindBuffer(id, type);
2291 GrGLenum glUsage = get_gl_usage(usage);
2292
2293 #if GR_GL_USE_BUFFER_DATA_NULL_HINT
2294 if (currentSize == srcSizeInBytes) {
2295 GL_CALL(BufferData(type, (GrGLsizeiptr) srcSizeInBytes, src, glUsage));
2296 } else {
2297 // Before we call glBufferSubData we give the driver a hint using
2298 // glBufferData with nullptr. This makes the old buffer contents
2299 // inaccessible to future draws. The GPU may still be processing
2300 // draws that reference the old contents. With this hint it can
2301 // assign a different allocation for the new contents to avoid
2302 // flushing the gpu past draws consuming the old contents.
2303 // TODO I think we actually want to try calling bufferData here
2304 GL_CALL(BufferData(type, currentSize, nullptr, glUsage));
2305 GL_CALL(BufferSubData(type, 0, (GrGLsizeiptr) srcSizeInBytes, src));
2306 }
2307 #else
2308 // Note that we're cheating on the size here. Currently no methods
2309 // allow a partial update that preserves contents of non-updated
2310 // portions of the buffer (map() does a glBufferData(..size, nullptr..))
2311 GL_CALL(BufferData(type, srcSizeInBytes, src, glUsage));
2312 #endif
2313 }
2314
2315 void GrGLGpu::unmapBuffer(GrGLuint id, GrGLenum type, void* mapPtr) {
2316 // bind buffer handles the dirty context
2317 switch (this->glCaps().mapBufferType()) {
2318 case GrGLCaps::kNone_MapBufferType:
2319 SkDEBUGFAIL("Shouldn't get here.");
2320 return;
2321 case GrGLCaps::kMapBuffer_MapBufferType: // fall through
2322 case GrGLCaps::kMapBufferRange_MapBufferType:
2323 this->bindBuffer(id, type);
2324 GL_CALL(UnmapBuffer(type));
2325 break;
2326 case GrGLCaps::kChromium_MapBufferType:
2327 this->bindBuffer(id, type);
2328 GL_CALL(UnmapBufferSubData(mapPtr));
2329 break;
2330 }
2331 }
2121 2332
2122 void GrGLGpu::disableScissor() { 2333 void GrGLGpu::disableScissor() {
2123 if (kNo_TriState != fHWScissorSettings.fEnabled) { 2334 if (kNo_TriState != fHWScissorSettings.fEnabled) {
2124 GL_CALL(Disable(GR_GL_SCISSOR_TEST)); 2335 GL_CALL(Disable(GR_GL_SCISSOR_TEST));
2125 fHWScissorSettings.fEnabled = kNo_TriState; 2336 fHWScissorSettings.fEnabled = kNo_TriState;
2126 return; 2337 return;
2127 } 2338 }
2128 } 2339 }
2129 2340
2130 void GrGLGpu::onClear(GrRenderTarget* target, const SkIRect& rect, GrColor color ) { 2341 void GrGLGpu::onClear(GrRenderTarget* target, const SkIRect& rect, GrColor color ) {
(...skipping 2002 matching lines...) Expand 10 before | Expand all | Expand 10 after
4133 #endif 4344 #endif
4134 } 4345 }
4135 4346
4136 void GrGLGpu::resetShaderCacheForTesting() const { 4347 void GrGLGpu::resetShaderCacheForTesting() const {
4137 fProgramCache->abandon(); 4348 fProgramCache->abandon();
4138 } 4349 }
4139 4350
4140 /////////////////////////////////////////////////////////////////////////////// 4351 ///////////////////////////////////////////////////////////////////////////////
4141 GrGLAttribArrayState* GrGLGpu::HWGeometryState::bindArrayAndBuffersToDraw( 4352 GrGLAttribArrayState* GrGLGpu::HWGeometryState::bindArrayAndBuffersToDraw(
4142 GrGLGpu* gpu, 4353 GrGLGpu* gpu,
4143 const GrGLBuffer* vbuffer, 4354 const GrGLVertexBuffer* vbuffer,
4144 const GrGLBuffer* ibuffer) { 4355 const GrGLIndexBuffer* ibuffer) {
4145 SkASSERT(vbuffer); 4356 SkASSERT(vbuffer);
4146 GrGLuint vbufferID = vbuffer->bufferID(); 4357 GrGLuint vbufferID = vbuffer->bufferID();
4147 GrGLuint* ibufferIDPtr = nullptr; 4358 GrGLuint* ibufferIDPtr = nullptr;
4148 GrGLuint ibufferID; 4359 GrGLuint ibufferID;
4149 if (ibuffer) { 4360 if (ibuffer) {
4150 ibufferID = ibuffer->bufferID(); 4361 ibufferID = ibuffer->bufferID();
4151 ibufferIDPtr = &ibufferID; 4362 ibufferIDPtr = &ibufferID;
4152 } 4363 }
4153 return this->internalBind(gpu, vbufferID, ibufferIDPtr); 4364 return this->internalBind(gpu, vbufferID, ibufferIDPtr);
4154 } 4365 }
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after
4204 if (GR_GL_TEXTURE_EXTERNAL == glTexture->target() || 4415 if (GR_GL_TEXTURE_EXTERNAL == glTexture->target() ||
4205 GR_GL_TEXTURE_RECTANGLE == glTexture->target()) { 4416 GR_GL_TEXTURE_RECTANGLE == glTexture->target()) {
4206 copyParams->fFilter = GrTextureParams::kNone_FilterMode; 4417 copyParams->fFilter = GrTextureParams::kNone_FilterMode;
4207 copyParams->fWidth = texture->width(); 4418 copyParams->fWidth = texture->width();
4208 copyParams->fHeight = texture->height(); 4419 copyParams->fHeight = texture->height();
4209 return true; 4420 return true;
4210 } 4421 }
4211 } 4422 }
4212 return false; 4423 return false;
4213 } 4424 }
OLDNEW
« no previous file with comments | « src/gpu/gl/GrGLGpu.h ('k') | src/gpu/gl/GrGLIndexBuffer.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698