Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(634)

Side by Side Diff: src/gpu/gl/GrGLBufferImpl.cpp

Issue 262963002: Revert of Add support for glMapBufferRange. Use glMapBufferRange and glMapBufferSubData. (Closed) Base URL: https://skia.googlecode.com/svn/trunk
Patch Set: Created 6 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/gpu/gl/GrGLBufferImpl.h ('k') | src/gpu/gl/GrGLCaps.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright 2013 Google Inc. 2 * Copyright 2013 Google Inc.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license that can be 4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file. 5 * found in the LICENSE file.
6 */ 6 */
7 7
8 #include "GrGLBufferImpl.h" 8 #include "GrGLBufferImpl.h"
9 #include "GrGpuGL.h" 9 #include "GrGpuGL.h"
10 10
11 #define GL_CALL(GPU, X) GR_GL_CALL(GPU->glInterface(), X) 11 #define GL_CALL(GPU, X) GR_GL_CALL(GPU->glInterface(), X)
12 12
13 #ifdef SK_DEBUG 13 #ifdef SK_DEBUG
14 #define VALIDATE() this->validate() 14 #define VALIDATE() this->validate()
15 #else 15 #else
16 #define VALIDATE() do {} while(false) 16 #define VALIDATE() do {} while(false)
17 #endif 17 #endif
18 18
19 // GL_STREAM_DRAW triggers an optimization in Chromium's GPU process where a cli ent's vertex buffer 19 // GL_STREAM_DRAW triggers an optimization in Chromium's GPU process where a cli ent's vertex buffer
20 // objects are implemented as client-side-arrays on tile-deferred architectures. 20 // objects are implemented as client-side-arrays on tile-deferred architectures.
21 #define DYNAMIC_USAGE_PARAM GR_GL_STREAM_DRAW 21 #define DYNAMIC_USAGE_PARAM GR_GL_STREAM_DRAW
22 22
23 GrGLBufferImpl::GrGLBufferImpl(GrGpuGL* gpu, const Desc& desc, GrGLenum bufferTy pe) 23 GrGLBufferImpl::GrGLBufferImpl(GrGpuGL* gpu, const Desc& desc, GrGLenum bufferTy pe)
24 : fDesc(desc) 24 : fDesc(desc)
25 , fBufferType(bufferType) 25 , fBufferType(bufferType)
26 , fLockPtr(NULL) { 26 , fLockPtr(NULL) {
27 if (0 == desc.fID) { 27 if (0 == desc.fID) {
28 fCPUData = sk_malloc_flags(desc.fSizeInBytes, SK_MALLOC_THROW); 28 fCPUData = sk_malloc_flags(desc.fSizeInBytes, SK_MALLOC_THROW);
29 fGLSizeInBytes = 0;
30 } else { 29 } else {
31 fCPUData = NULL; 30 fCPUData = NULL;
32 // We assume that the GL buffer was created at the desc's size initially .
33 fGLSizeInBytes = fDesc.fSizeInBytes;
34 } 31 }
35 VALIDATE(); 32 VALIDATE();
36 } 33 }
37 34
38 void GrGLBufferImpl::release(GrGpuGL* gpu) { 35 void GrGLBufferImpl::release(GrGpuGL* gpu) {
39 VALIDATE();
40 // make sure we've not been abandoned or already released 36 // make sure we've not been abandoned or already released
41 if (NULL != fCPUData) { 37 if (NULL != fCPUData) {
38 VALIDATE();
42 sk_free(fCPUData); 39 sk_free(fCPUData);
43 fCPUData = NULL; 40 fCPUData = NULL;
44 } else if (fDesc.fID && !fDesc.fIsWrapped) { 41 } else if (fDesc.fID && !fDesc.fIsWrapped) {
42 VALIDATE();
45 GL_CALL(gpu, DeleteBuffers(1, &fDesc.fID)); 43 GL_CALL(gpu, DeleteBuffers(1, &fDesc.fID));
46 if (GR_GL_ARRAY_BUFFER == fBufferType) { 44 if (GR_GL_ARRAY_BUFFER == fBufferType) {
47 gpu->notifyVertexBufferDelete(fDesc.fID); 45 gpu->notifyVertexBufferDelete(fDesc.fID);
48 } else { 46 } else {
49 SkASSERT(GR_GL_ELEMENT_ARRAY_BUFFER == fBufferType); 47 SkASSERT(GR_GL_ELEMENT_ARRAY_BUFFER == fBufferType);
50 gpu->notifyIndexBufferDelete(fDesc.fID); 48 gpu->notifyIndexBufferDelete(fDesc.fID);
51 } 49 }
52 fDesc.fID = 0; 50 fDesc.fID = 0;
53 fGLSizeInBytes = 0;
54 } 51 }
55 fLockPtr = NULL; 52 fLockPtr = NULL;
56 VALIDATE();
57 } 53 }
58 54
59 void GrGLBufferImpl::abandon() { 55 void GrGLBufferImpl::abandon() {
60 fDesc.fID = 0; 56 fDesc.fID = 0;
61 fGLSizeInBytes = 0;
62 fLockPtr = NULL; 57 fLockPtr = NULL;
63 sk_free(fCPUData); 58 sk_free(fCPUData);
64 fCPUData = NULL; 59 fCPUData = NULL;
65 VALIDATE();
66 } 60 }
67 61
68 void GrGLBufferImpl::bind(GrGpuGL* gpu) const { 62 void GrGLBufferImpl::bind(GrGpuGL* gpu) const {
69 VALIDATE(); 63 VALIDATE();
70 if (GR_GL_ARRAY_BUFFER == fBufferType) { 64 if (GR_GL_ARRAY_BUFFER == fBufferType) {
71 gpu->bindVertexBuffer(fDesc.fID); 65 gpu->bindVertexBuffer(fDesc.fID);
72 } else { 66 } else {
73 SkASSERT(GR_GL_ELEMENT_ARRAY_BUFFER == fBufferType); 67 SkASSERT(GR_GL_ELEMENT_ARRAY_BUFFER == fBufferType);
74 gpu->bindIndexBufferAndDefaultVertexArray(fDesc.fID); 68 gpu->bindIndexBufferAndDefaultVertexArray(fDesc.fID);
75 } 69 }
76 VALIDATE();
77 } 70 }
78 71
79 void* GrGLBufferImpl::lock(GrGpuGL* gpu) { 72 void* GrGLBufferImpl::lock(GrGpuGL* gpu) {
80 VALIDATE(); 73 VALIDATE();
81 SkASSERT(!this->isLocked()); 74 SkASSERT(!this->isLocked());
82 if (0 == fDesc.fID) { 75 if (0 == fDesc.fID) {
83 fLockPtr = fCPUData; 76 fLockPtr = fCPUData;
84 } else { 77 } else if (gpu->caps()->bufferLockSupport()) {
85 switch (gpu->glCaps().mapBufferType()) { 78 this->bind(gpu);
86 case GrGLCaps::kNone_MapBufferType: 79 // Let driver know it can discard the old data
87 VALIDATE(); 80 GL_CALL(gpu, BufferData(fBufferType,
88 return NULL; 81 (GrGLsizeiptr) fDesc.fSizeInBytes,
89 case GrGLCaps::kMapBuffer_MapBufferType: 82 NULL,
90 this->bind(gpu); 83 fDesc.fDynamic ? DYNAMIC_USAGE_PARAM : GR_GL_STA TIC_DRAW));
91 // Let driver know it can discard the old data 84 GR_GL_CALL_RET(gpu->glInterface(),
92 if (GR_GL_USE_BUFFER_DATA_NULL_HINT || fDesc.fSizeInBytes != fGL SizeInBytes) { 85 fLockPtr,
93 fGLSizeInBytes = fDesc.fSizeInBytes; 86 MapBuffer(fBufferType, GR_GL_WRITE_ONLY));
94 GL_CALL(gpu,
95 BufferData(fBufferType, fGLSizeInBytes, NULL,
96 fDesc.fDynamic ? DYNAMIC_USAGE_PARAM : GR _GL_STATIC_DRAW));
97 }
98 GR_GL_CALL_RET(gpu->glInterface(), fLockPtr,
99 MapBuffer(fBufferType, GR_GL_WRITE_ONLY));
100 break;
101 case GrGLCaps::kMapBufferRange_MapBufferType: {
102 this->bind(gpu);
103 // Make sure the GL buffer size agrees with fDesc before mapping .
104 if (fDesc.fSizeInBytes != fGLSizeInBytes) {
105 fGLSizeInBytes = fDesc.fSizeInBytes;
106 GL_CALL(gpu,
107 BufferData(fBufferType, fGLSizeInBytes, NULL,
108 fDesc.fDynamic ? DYNAMIC_USAGE_PARAM : GR _GL_STATIC_DRAW));
109 }
110 static const GrGLbitfield kAccess = GR_GL_MAP_INVALIDATE_BUFFER_ BIT |
111 GR_GL_MAP_WRITE_BIT;
112 GR_GL_CALL_RET(gpu->glInterface(),
113 fLockPtr,
114 MapBufferRange(fBufferType, 0, fGLSizeInBytes, kA ccess));
115 break;
116 }
117 case GrGLCaps::kChromium_MapBufferType:
118 this->bind(gpu);
119 // Make sure the GL buffer size agrees with fDesc before mapping .
120 if (fDesc.fSizeInBytes != fGLSizeInBytes) {
121 fGLSizeInBytes = fDesc.fSizeInBytes;
122 GL_CALL(gpu,
123 BufferData(fBufferType, fGLSizeInBytes, NULL,
124 fDesc.fDynamic ? DYNAMIC_USAGE_PARAM : GR _GL_STATIC_DRAW));
125 }
126 GR_GL_CALL_RET(gpu->glInterface(),
127 fLockPtr,
128 MapBufferSubData(fBufferType, 0, fGLSizeInBytes, GR_GL_WRITE_ONLY));
129 break;
130 }
131 } 87 }
132 VALIDATE();
133 return fLockPtr; 88 return fLockPtr;
134 } 89 }
135 90
136 void GrGLBufferImpl::unlock(GrGpuGL* gpu) { 91 void GrGLBufferImpl::unlock(GrGpuGL* gpu) {
137 VALIDATE(); 92 VALIDATE();
138 SkASSERT(this->isLocked()); 93 SkASSERT(this->isLocked());
139 if (0 != fDesc.fID) { 94 if (0 != fDesc.fID) {
140 switch (gpu->glCaps().mapBufferType()) { 95 SkASSERT(gpu->caps()->bufferLockSupport());
141 case GrGLCaps::kNone_MapBufferType: 96 this->bind(gpu);
142 SkDEBUGFAIL("Shouldn't get here."); 97 GL_CALL(gpu, UnmapBuffer(fBufferType));
143 return;
144 case GrGLCaps::kMapBuffer_MapBufferType: // fall through
145 case GrGLCaps::kMapBufferRange_MapBufferType:
146 this->bind(gpu);
147 GL_CALL(gpu, UnmapBuffer(fBufferType));
148 break;
149 case GrGLCaps::kChromium_MapBufferType:
150 this->bind(gpu);
151 GR_GL_CALL(gpu->glInterface(), UnmapBufferSubData(fLockPtr));
152 break;
153 }
154 } 98 }
155 fLockPtr = NULL; 99 fLockPtr = NULL;
156 } 100 }
157 101
158 bool GrGLBufferImpl::isLocked() const { 102 bool GrGLBufferImpl::isLocked() const {
159 VALIDATE(); 103 VALIDATE();
160 return NULL != fLockPtr; 104 return NULL != fLockPtr;
161 } 105 }
162 106
163 bool GrGLBufferImpl::updateData(GrGpuGL* gpu, const void* src, size_t srcSizeInB ytes) { 107 bool GrGLBufferImpl::updateData(GrGpuGL* gpu, const void* src, size_t srcSizeInB ytes) {
(...skipping 12 matching lines...) Expand all
176 #if GR_GL_USE_BUFFER_DATA_NULL_HINT 120 #if GR_GL_USE_BUFFER_DATA_NULL_HINT
177 if (fDesc.fSizeInBytes == srcSizeInBytes) { 121 if (fDesc.fSizeInBytes == srcSizeInBytes) {
178 GL_CALL(gpu, BufferData(fBufferType, (GrGLsizeiptr) srcSizeInBytes, src, usage)); 122 GL_CALL(gpu, BufferData(fBufferType, (GrGLsizeiptr) srcSizeInBytes, src, usage));
179 } else { 123 } else {
180 // Before we call glBufferSubData we give the driver a hint using 124 // Before we call glBufferSubData we give the driver a hint using
181 // glBufferData with NULL. This makes the old buffer contents 125 // glBufferData with NULL. This makes the old buffer contents
182 // inaccessible to future draws. The GPU may still be processing 126 // inaccessible to future draws. The GPU may still be processing
183 // draws that reference the old contents. With this hint it can 127 // draws that reference the old contents. With this hint it can
184 // assign a different allocation for the new contents to avoid 128 // assign a different allocation for the new contents to avoid
185 // flushing the gpu past draws consuming the old contents. 129 // flushing the gpu past draws consuming the old contents.
186 fGLSizeInBytes = fDesc.fSizeInBytes; 130 GL_CALL(gpu, BufferData(fBufferType, (GrGLsizeiptr) fDesc.fSizeInBytes, NULL, usage));
187 GL_CALL(gpu, BufferData(fBufferType, fGLSizeInBytes, NULL, usage));
188 GL_CALL(gpu, BufferSubData(fBufferType, 0, (GrGLsizeiptr) srcSizeInBytes , src)); 131 GL_CALL(gpu, BufferSubData(fBufferType, 0, (GrGLsizeiptr) srcSizeInBytes , src));
189 } 132 }
190 #else 133 #else
191 // Note that we're cheating on the size here. Currently no methods 134 // Note that we're cheating on the size here. Currently no methods
192 // allow a partial update that preserves contents of non-updated 135 // allow a partial update that preserves contents of non-updated
193 // portions of the buffer (lock() does a glBufferData(..size, NULL..)) 136 // portions of the buffer (lock() does a glBufferData(..size, NULL..))
194 bool doSubData = false; 137 bool doSubData = false;
195 #if GR_GL_MAC_BUFFER_OBJECT_PERFOMANCE_WORKAROUND 138 #if GR_GL_MAC_BUFFER_OBJECT_PERFOMANCE_WORKAROUND
196 static int N = 0; 139 static int N = 0;
197 // 128 was chosen experimentally. At 256 a slight hitchiness was noticed 140 // 128 was chosen experimentally. At 256 a slight hitchiness was noticed
198 // when dragging a Chromium window around with a canvas tab backgrounded. 141 // when dragging a Chromium window around with a canvas tab backgrounded.
199 doSubData = 0 == (N % 128); 142 doSubData = 0 == (N % 128);
200 ++N; 143 ++N;
201 #endif 144 #endif
202 if (doSubData) { 145 if (doSubData) {
203 // The workaround is to do a glBufferData followed by glBufferSubData. 146 // The workaround is to do a glBufferData followed by glBufferSubData.
204 // Chromium's command buffer may turn a glBufferSubData where the size 147 // Chromium's command buffer may turn a glBufferSubData where the size
205 // exactly matches the buffer size into a glBufferData. So we tack 1 148 // exactly matches the buffer size into a glBufferData. So we tack 1
206 // extra byte onto the glBufferData. 149 // extra byte onto the glBufferData.
207 fGLSizeInBytes = srcSizeInBytes + 1; 150 GL_CALL(gpu, BufferData(fBufferType, srcSizeInBytes + 1, NULL, usage));
208 GL_CALL(gpu, BufferData(fBufferType, fGLSizeInBytes, NULL, usage));
209 GL_CALL(gpu, BufferSubData(fBufferType, 0, srcSizeInBytes, src)); 151 GL_CALL(gpu, BufferSubData(fBufferType, 0, srcSizeInBytes, src));
210 } else { 152 } else {
211 fGLSizeInBytes = srcSizeInBytes; 153 GL_CALL(gpu, BufferData(fBufferType, srcSizeInBytes, src, usage));
212 GL_CALL(gpu, BufferData(fBufferType, fGLSizeInBytes, src, usage));
213 } 154 }
214 #endif 155 #endif
215 return true; 156 return true;
216 } 157 }
217 158
218 void GrGLBufferImpl::validate() const { 159 void GrGLBufferImpl::validate() const {
219 SkASSERT(GR_GL_ARRAY_BUFFER == fBufferType || GR_GL_ELEMENT_ARRAY_BUFFER == fBufferType); 160 SkASSERT(GR_GL_ARRAY_BUFFER == fBufferType || GR_GL_ELEMENT_ARRAY_BUFFER == fBufferType);
220 // The following assert isn't valid when the buffer has been abandoned: 161 // The following assert isn't valid when the buffer has been abandoned:
221 // SkASSERT((0 == fDesc.fID) == (NULL != fCPUData)); 162 // SkASSERT((0 == fDesc.fID) == (NULL != fCPUData));
222 SkASSERT(0 != fDesc.fID || !fDesc.fIsWrapped); 163 SkASSERT(0 != fDesc.fID || !fDesc.fIsWrapped);
223 SkASSERT(NULL == fCPUData || 0 == fGLSizeInBytes);
224 SkASSERT(NULL == fLockPtr || fGLSizeInBytes == fDesc.fSizeInBytes);
225 SkASSERT(NULL == fCPUData || NULL == fLockPtr || fCPUData == fLockPtr); 164 SkASSERT(NULL == fCPUData || NULL == fLockPtr || fCPUData == fLockPtr);
226 } 165 }
OLDNEW
« no previous file with comments | « src/gpu/gl/GrGLBufferImpl.h ('k') | src/gpu/gl/GrGLCaps.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698