Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(13)

Side by Side Diff: src/gpu/GrBufferAllocPool.cpp

Issue 22850006: Replace uses of GrAssert by SkASSERT. (Closed) Base URL: https://skia.googlecode.com/svn/trunk
Patch Set: rebase Created 7 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/gpu/GrBlend.cpp ('k') | src/gpu/GrClipMaskCache.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 1
2 /* 2 /*
3 * Copyright 2010 Google Inc. 3 * Copyright 2010 Google Inc.
4 * 4 *
5 * Use of this source code is governed by a BSD-style license that can be 5 * Use of this source code is governed by a BSD-style license that can be
6 * found in the LICENSE file. 6 * found in the LICENSE file.
7 */ 7 */
8 8
9 9
10 #include "GrBufferAllocPool.h" 10 #include "GrBufferAllocPool.h"
(...skipping 12 matching lines...) Expand all
23 // page size 23 // page size
24 #define GrBufferAllocPool_MIN_BLOCK_SIZE ((size_t)1 << 12) 24 #define GrBufferAllocPool_MIN_BLOCK_SIZE ((size_t)1 << 12)
25 25
26 GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu, 26 GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu,
27 BufferType bufferType, 27 BufferType bufferType,
28 bool frequentResetHint, 28 bool frequentResetHint,
29 size_t blockSize, 29 size_t blockSize,
30 int preallocBufferCnt) : 30 int preallocBufferCnt) :
31 fBlocks(GrMax(8, 2*preallocBufferCnt)) { 31 fBlocks(GrMax(8, 2*preallocBufferCnt)) {
32 32
33 GrAssert(NULL != gpu); 33 SkASSERT(NULL != gpu);
34 fGpu = gpu; 34 fGpu = gpu;
35 fGpu->ref(); 35 fGpu->ref();
36 fGpuIsReffed = true; 36 fGpuIsReffed = true;
37 37
38 fBufferType = bufferType; 38 fBufferType = bufferType;
39 fFrequentResetHint = frequentResetHint; 39 fFrequentResetHint = frequentResetHint;
40 fBufferPtr = NULL; 40 fBufferPtr = NULL;
41 fMinBlockSize = GrMax(GrBufferAllocPool_MIN_BLOCK_SIZE, blockSize); 41 fMinBlockSize = GrMax(GrBufferAllocPool_MIN_BLOCK_SIZE, blockSize);
42 42
43 fBytesInUse = 0; 43 fBytesInUse = 0;
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
90 } 90 }
91 if (fPreallocBuffers.count()) { 91 if (fPreallocBuffers.count()) {
92 // must set this after above loop. 92 // must set this after above loop.
93 fPreallocBufferStartIdx = (fPreallocBufferStartIdx + 93 fPreallocBufferStartIdx = (fPreallocBufferStartIdx +
94 preallocBuffersInUse) % 94 preallocBuffersInUse) %
95 fPreallocBuffers.count(); 95 fPreallocBuffers.count();
96 } 96 }
97 // we may have created a large cpu mirror of a large VB. Reset the size 97 // we may have created a large cpu mirror of a large VB. Reset the size
98 // to match our pre-allocated VBs. 98 // to match our pre-allocated VBs.
99 fCpuData.reset(fMinBlockSize); 99 fCpuData.reset(fMinBlockSize);
100 GrAssert(0 == fPreallocBuffersInUse); 100 SkASSERT(0 == fPreallocBuffersInUse);
101 VALIDATE(); 101 VALIDATE();
102 } 102 }
103 103
104 void GrBufferAllocPool::unlock() { 104 void GrBufferAllocPool::unlock() {
105 VALIDATE(); 105 VALIDATE();
106 106
107 if (NULL != fBufferPtr) { 107 if (NULL != fBufferPtr) {
108 BufferBlock& block = fBlocks.back(); 108 BufferBlock& block = fBlocks.back();
109 if (block.fBuffer->isLocked()) { 109 if (block.fBuffer->isLocked()) {
110 block.fBuffer->unlock(); 110 block.fBuffer->unlock();
111 } else { 111 } else {
112 size_t flushSize = block.fBuffer->sizeInBytes() - block.fBytesFree; 112 size_t flushSize = block.fBuffer->sizeInBytes() - block.fBytesFree;
113 flushCpuData(fBlocks.back().fBuffer, flushSize); 113 flushCpuData(fBlocks.back().fBuffer, flushSize);
114 } 114 }
115 fBufferPtr = NULL; 115 fBufferPtr = NULL;
116 } 116 }
117 VALIDATE(); 117 VALIDATE();
118 } 118 }
119 119
120 #if GR_DEBUG 120 #if GR_DEBUG
121 void GrBufferAllocPool::validate(bool unusedBlockAllowed) const { 121 void GrBufferAllocPool::validate(bool unusedBlockAllowed) const {
122 if (NULL != fBufferPtr) { 122 if (NULL != fBufferPtr) {
123 GrAssert(!fBlocks.empty()); 123 SkASSERT(!fBlocks.empty());
124 if (fBlocks.back().fBuffer->isLocked()) { 124 if (fBlocks.back().fBuffer->isLocked()) {
125 GrGeometryBuffer* buf = fBlocks.back().fBuffer; 125 GrGeometryBuffer* buf = fBlocks.back().fBuffer;
126 GrAssert(buf->lockPtr() == fBufferPtr); 126 SkASSERT(buf->lockPtr() == fBufferPtr);
127 } else { 127 } else {
128 GrAssert(fCpuData.get() == fBufferPtr); 128 SkASSERT(fCpuData.get() == fBufferPtr);
129 } 129 }
130 } else { 130 } else {
131 GrAssert(fBlocks.empty() || !fBlocks.back().fBuffer->isLocked()); 131 SkASSERT(fBlocks.empty() || !fBlocks.back().fBuffer->isLocked());
132 } 132 }
133 size_t bytesInUse = 0; 133 size_t bytesInUse = 0;
134 for (int i = 0; i < fBlocks.count() - 1; ++i) { 134 for (int i = 0; i < fBlocks.count() - 1; ++i) {
135 GrAssert(!fBlocks[i].fBuffer->isLocked()); 135 SkASSERT(!fBlocks[i].fBuffer->isLocked());
136 } 136 }
137 for (int i = 0; i < fBlocks.count(); ++i) { 137 for (int i = 0; i < fBlocks.count(); ++i) {
138 size_t bytes = fBlocks[i].fBuffer->sizeInBytes() - fBlocks[i].fBytesFree ; 138 size_t bytes = fBlocks[i].fBuffer->sizeInBytes() - fBlocks[i].fBytesFree ;
139 bytesInUse += bytes; 139 bytesInUse += bytes;
140 GrAssert(bytes || unusedBlockAllowed); 140 SkASSERT(bytes || unusedBlockAllowed);
141 } 141 }
142 142
143 GrAssert(bytesInUse == fBytesInUse); 143 SkASSERT(bytesInUse == fBytesInUse);
144 if (unusedBlockAllowed) { 144 if (unusedBlockAllowed) {
145 GrAssert((fBytesInUse && !fBlocks.empty()) || 145 SkASSERT((fBytesInUse && !fBlocks.empty()) ||
146 (!fBytesInUse && (fBlocks.count() < 2))); 146 (!fBytesInUse && (fBlocks.count() < 2)));
147 } else { 147 } else {
148 GrAssert((0 == fBytesInUse) == fBlocks.empty()); 148 SkASSERT((0 == fBytesInUse) == fBlocks.empty());
149 } 149 }
150 } 150 }
151 #endif 151 #endif
152 152
153 void* GrBufferAllocPool::makeSpace(size_t size, 153 void* GrBufferAllocPool::makeSpace(size_t size,
154 size_t alignment, 154 size_t alignment,
155 const GrGeometryBuffer** buffer, 155 const GrGeometryBuffer** buffer,
156 size_t* offset) { 156 size_t* offset) {
157 VALIDATE(); 157 VALIDATE();
158 158
159 GrAssert(NULL != buffer); 159 SkASSERT(NULL != buffer);
160 GrAssert(NULL != offset); 160 SkASSERT(NULL != offset);
161 161
162 if (NULL != fBufferPtr) { 162 if (NULL != fBufferPtr) {
163 BufferBlock& back = fBlocks.back(); 163 BufferBlock& back = fBlocks.back();
164 size_t usedBytes = back.fBuffer->sizeInBytes() - back.fBytesFree; 164 size_t usedBytes = back.fBuffer->sizeInBytes() - back.fBytesFree;
165 size_t pad = GrSizeAlignUpPad(usedBytes, 165 size_t pad = GrSizeAlignUpPad(usedBytes,
166 alignment); 166 alignment);
167 if ((size + pad) <= back.fBytesFree) { 167 if ((size + pad) <= back.fBytesFree) {
168 usedBytes += pad; 168 usedBytes += pad;
169 *offset = usedBytes; 169 *offset = usedBytes;
170 *buffer = back.fBuffer; 170 *buffer = back.fBuffer;
171 back.fBytesFree -= size + pad; 171 back.fBytesFree -= size + pad;
172 fBytesInUse += size + pad; 172 fBytesInUse += size + pad;
173 VALIDATE(); 173 VALIDATE();
174 return (void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes); 174 return (void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes);
175 } 175 }
176 } 176 }
177 177
178 // We could honor the space request using by a partial update of the current 178 // We could honor the space request using by a partial update of the current
179 // VB (if there is room). But we don't currently use draw calls to GL that 179 // VB (if there is room). But we don't currently use draw calls to GL that
180 // allow the driver to know that previously issued draws won't read from 180 // allow the driver to know that previously issued draws won't read from
181 // the part of the buffer we update. Also, the GL buffer implementation 181 // the part of the buffer we update. Also, the GL buffer implementation
182 // may be cheating on the actual buffer size by shrinking the buffer on 182 // may be cheating on the actual buffer size by shrinking the buffer on
183 // updateData() if the amount of data passed is less than the full buffer 183 // updateData() if the amount of data passed is less than the full buffer
184 // size. 184 // size.
185 185
186 if (!createBlock(size)) { 186 if (!createBlock(size)) {
187 return NULL; 187 return NULL;
188 } 188 }
189 GrAssert(NULL != fBufferPtr); 189 SkASSERT(NULL != fBufferPtr);
190 190
191 *offset = 0; 191 *offset = 0;
192 BufferBlock& back = fBlocks.back(); 192 BufferBlock& back = fBlocks.back();
193 *buffer = back.fBuffer; 193 *buffer = back.fBuffer;
194 back.fBytesFree -= size; 194 back.fBytesFree -= size;
195 fBytesInUse += size; 195 fBytesInUse += size;
196 VALIDATE(); 196 VALIDATE();
197 return fBufferPtr; 197 return fBufferPtr;
198 } 198 }
199 199
(...skipping 22 matching lines...) Expand all
222 VALIDATE(); 222 VALIDATE();
223 223
224 // if the putBack unwinds all the preallocated buffers then we will 224 // if the putBack unwinds all the preallocated buffers then we will
225 // advance the starting index. As blocks are destroyed fPreallocBuffersInUse 225 // advance the starting index. As blocks are destroyed fPreallocBuffersInUse
226 // will be decremented. I will reach zero if all blocks using preallocated 226 // will be decremented. I will reach zero if all blocks using preallocated
227 // buffers are released. 227 // buffers are released.
228 int preallocBuffersInUse = fPreallocBuffersInUse; 228 int preallocBuffersInUse = fPreallocBuffersInUse;
229 229
230 while (bytes) { 230 while (bytes) {
231 // caller shouldnt try to put back more than they've taken 231 // caller shouldnt try to put back more than they've taken
232 GrAssert(!fBlocks.empty()); 232 SkASSERT(!fBlocks.empty());
233 BufferBlock& block = fBlocks.back(); 233 BufferBlock& block = fBlocks.back();
234 size_t bytesUsed = block.fBuffer->sizeInBytes() - block.fBytesFree; 234 size_t bytesUsed = block.fBuffer->sizeInBytes() - block.fBytesFree;
235 if (bytes >= bytesUsed) { 235 if (bytes >= bytesUsed) {
236 bytes -= bytesUsed; 236 bytes -= bytesUsed;
237 fBytesInUse -= bytesUsed; 237 fBytesInUse -= bytesUsed;
238 // if we locked a vb to satisfy the make space and we're releasing 238 // if we locked a vb to satisfy the make space and we're releasing
239 // beyond it, then unlock it. 239 // beyond it, then unlock it.
240 if (block.fBuffer->isLocked()) { 240 if (block.fBuffer->isLocked()) {
241 block.fBuffer->unlock(); 241 block.fBuffer->unlock();
242 } 242 }
243 this->destroyBlock(); 243 this->destroyBlock();
244 } else { 244 } else {
245 block.fBytesFree += bytes; 245 block.fBytesFree += bytes;
246 fBytesInUse -= bytes; 246 fBytesInUse -= bytes;
247 bytes = 0; 247 bytes = 0;
248 break; 248 break;
249 } 249 }
250 } 250 }
251 if (!fPreallocBuffersInUse && fPreallocBuffers.count()) { 251 if (!fPreallocBuffersInUse && fPreallocBuffers.count()) {
252 fPreallocBufferStartIdx = (fPreallocBufferStartIdx + 252 fPreallocBufferStartIdx = (fPreallocBufferStartIdx +
253 preallocBuffersInUse) % 253 preallocBuffersInUse) %
254 fPreallocBuffers.count(); 254 fPreallocBuffers.count();
255 } 255 }
256 VALIDATE(); 256 VALIDATE();
257 } 257 }
258 258
259 bool GrBufferAllocPool::createBlock(size_t requestSize) { 259 bool GrBufferAllocPool::createBlock(size_t requestSize) {
260 260
261 size_t size = GrMax(requestSize, fMinBlockSize); 261 size_t size = GrMax(requestSize, fMinBlockSize);
262 GrAssert(size >= GrBufferAllocPool_MIN_BLOCK_SIZE); 262 SkASSERT(size >= GrBufferAllocPool_MIN_BLOCK_SIZE);
263 263
264 VALIDATE(); 264 VALIDATE();
265 265
266 BufferBlock& block = fBlocks.push_back(); 266 BufferBlock& block = fBlocks.push_back();
267 267
268 if (size == fMinBlockSize && 268 if (size == fMinBlockSize &&
269 fPreallocBuffersInUse < fPreallocBuffers.count()) { 269 fPreallocBuffersInUse < fPreallocBuffers.count()) {
270 270
271 uint32_t nextBuffer = (fPreallocBuffersInUse + 271 uint32_t nextBuffer = (fPreallocBuffersInUse +
272 fPreallocBufferStartIdx) % 272 fPreallocBufferStartIdx) %
273 fPreallocBuffers.count(); 273 fPreallocBuffers.count();
274 block.fBuffer = fPreallocBuffers[nextBuffer]; 274 block.fBuffer = fPreallocBuffers[nextBuffer];
275 block.fBuffer->ref(); 275 block.fBuffer->ref();
276 ++fPreallocBuffersInUse; 276 ++fPreallocBuffersInUse;
277 } else { 277 } else {
278 block.fBuffer = this->createBuffer(size); 278 block.fBuffer = this->createBuffer(size);
279 if (NULL == block.fBuffer) { 279 if (NULL == block.fBuffer) {
280 fBlocks.pop_back(); 280 fBlocks.pop_back();
281 return false; 281 return false;
282 } 282 }
283 } 283 }
284 284
285 block.fBytesFree = size; 285 block.fBytesFree = size;
286 if (NULL != fBufferPtr) { 286 if (NULL != fBufferPtr) {
287 GrAssert(fBlocks.count() > 1); 287 SkASSERT(fBlocks.count() > 1);
288 BufferBlock& prev = fBlocks.fromBack(1); 288 BufferBlock& prev = fBlocks.fromBack(1);
289 if (prev.fBuffer->isLocked()) { 289 if (prev.fBuffer->isLocked()) {
290 prev.fBuffer->unlock(); 290 prev.fBuffer->unlock();
291 } else { 291 } else {
292 flushCpuData(prev.fBuffer, 292 flushCpuData(prev.fBuffer,
293 prev.fBuffer->sizeInBytes() - prev.fBytesFree); 293 prev.fBuffer->sizeInBytes() - prev.fBytesFree);
294 } 294 }
295 fBufferPtr = NULL; 295 fBufferPtr = NULL;
296 } 296 }
297 297
298 GrAssert(NULL == fBufferPtr); 298 SkASSERT(NULL == fBufferPtr);
299 299
300 // If the buffer is CPU-backed we lock it because it is free to do so and sa ves a copy. 300 // If the buffer is CPU-backed we lock it because it is free to do so and sa ves a copy.
301 // Otherwise when buffer locking is supported: 301 // Otherwise when buffer locking is supported:
302 // a) If the frequently reset hint is set we only lock when the request ed size meets a 302 // a) If the frequently reset hint is set we only lock when the request ed size meets a
303 // threshold (since we don't expect it is likely that we will see more vertex data) 303 // threshold (since we don't expect it is likely that we will see more vertex data)
304 // b) If the hint is not set we lock if the buffer size is greater than the threshold. 304 // b) If the hint is not set we lock if the buffer size is greater than the threshold.
305 bool attemptLock = block.fBuffer->isCPUBacked(); 305 bool attemptLock = block.fBuffer->isCPUBacked();
306 if (!attemptLock && fGpu->caps()->bufferLockSupport()) { 306 if (!attemptLock && fGpu->caps()->bufferLockSupport()) {
307 if (fFrequentResetHint) { 307 if (fFrequentResetHint) {
308 attemptLock = requestSize > GR_GEOM_BUFFER_LOCK_THRESHOLD; 308 attemptLock = requestSize > GR_GEOM_BUFFER_LOCK_THRESHOLD;
309 } else { 309 } else {
310 attemptLock = size > GR_GEOM_BUFFER_LOCK_THRESHOLD; 310 attemptLock = size > GR_GEOM_BUFFER_LOCK_THRESHOLD;
311 } 311 }
312 } 312 }
313 313
314 if (attemptLock) { 314 if (attemptLock) {
315 fBufferPtr = block.fBuffer->lock(); 315 fBufferPtr = block.fBuffer->lock();
316 } 316 }
317 317
318 if (NULL == fBufferPtr) { 318 if (NULL == fBufferPtr) {
319 fBufferPtr = fCpuData.reset(size); 319 fBufferPtr = fCpuData.reset(size);
320 } 320 }
321 321
322 VALIDATE(true); 322 VALIDATE(true);
323 323
324 return true; 324 return true;
325 } 325 }
326 326
327 void GrBufferAllocPool::destroyBlock() { 327 void GrBufferAllocPool::destroyBlock() {
328 GrAssert(!fBlocks.empty()); 328 SkASSERT(!fBlocks.empty());
329 329
330 BufferBlock& block = fBlocks.back(); 330 BufferBlock& block = fBlocks.back();
331 if (fPreallocBuffersInUse > 0) { 331 if (fPreallocBuffersInUse > 0) {
332 uint32_t prevPreallocBuffer = (fPreallocBuffersInUse + 332 uint32_t prevPreallocBuffer = (fPreallocBuffersInUse +
333 fPreallocBufferStartIdx + 333 fPreallocBufferStartIdx +
334 (fPreallocBuffers.count() - 1)) % 334 (fPreallocBuffers.count() - 1)) %
335 fPreallocBuffers.count(); 335 fPreallocBuffers.count();
336 if (block.fBuffer == fPreallocBuffers[prevPreallocBuffer]) { 336 if (block.fBuffer == fPreallocBuffers[prevPreallocBuffer]) {
337 --fPreallocBuffersInUse; 337 --fPreallocBuffersInUse;
338 } 338 }
339 } 339 }
340 GrAssert(!block.fBuffer->isLocked()); 340 SkASSERT(!block.fBuffer->isLocked());
341 block.fBuffer->unref(); 341 block.fBuffer->unref();
342 fBlocks.pop_back(); 342 fBlocks.pop_back();
343 fBufferPtr = NULL; 343 fBufferPtr = NULL;
344 } 344 }
345 345
346 void GrBufferAllocPool::flushCpuData(GrGeometryBuffer* buffer, 346 void GrBufferAllocPool::flushCpuData(GrGeometryBuffer* buffer,
347 size_t flushSize) { 347 size_t flushSize) {
348 GrAssert(NULL != buffer); 348 SkASSERT(NULL != buffer);
349 GrAssert(!buffer->isLocked()); 349 SkASSERT(!buffer->isLocked());
350 GrAssert(fCpuData.get() == fBufferPtr); 350 SkASSERT(fCpuData.get() == fBufferPtr);
351 GrAssert(flushSize <= buffer->sizeInBytes()); 351 SkASSERT(flushSize <= buffer->sizeInBytes());
352 VALIDATE(true); 352 VALIDATE(true);
353 353
354 if (fGpu->caps()->bufferLockSupport() && 354 if (fGpu->caps()->bufferLockSupport() &&
355 flushSize > GR_GEOM_BUFFER_LOCK_THRESHOLD) { 355 flushSize > GR_GEOM_BUFFER_LOCK_THRESHOLD) {
356 void* data = buffer->lock(); 356 void* data = buffer->lock();
357 if (NULL != data) { 357 if (NULL != data) {
358 memcpy(data, fBufferPtr, flushSize); 358 memcpy(data, fBufferPtr, flushSize);
359 buffer->unlock(); 359 buffer->unlock();
360 return; 360 return;
361 } 361 }
362 } 362 }
363 buffer->updateData(fBufferPtr, flushSize); 363 buffer->updateData(fBufferPtr, flushSize);
364 VALIDATE(true); 364 VALIDATE(true);
365 } 365 }
366 366
367 GrGeometryBuffer* GrBufferAllocPool::createBuffer(size_t size) { 367 GrGeometryBuffer* GrBufferAllocPool::createBuffer(size_t size) {
368 if (kIndex_BufferType == fBufferType) { 368 if (kIndex_BufferType == fBufferType) {
369 return fGpu->createIndexBuffer(size, true); 369 return fGpu->createIndexBuffer(size, true);
370 } else { 370 } else {
371 GrAssert(kVertex_BufferType == fBufferType); 371 SkASSERT(kVertex_BufferType == fBufferType);
372 return fGpu->createVertexBuffer(size, true); 372 return fGpu->createVertexBuffer(size, true);
373 } 373 }
374 } 374 }
375 375
376 //////////////////////////////////////////////////////////////////////////////// 376 ////////////////////////////////////////////////////////////////////////////////
377 377
378 GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu, 378 GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu,
379 bool frequentResetHint, 379 bool frequentResetHint,
380 size_t bufferSize, 380 size_t bufferSize,
381 int preallocBufferCnt) 381 int preallocBufferCnt)
382 : GrBufferAllocPool(gpu, 382 : GrBufferAllocPool(gpu,
383 kVertex_BufferType, 383 kVertex_BufferType,
384 frequentResetHint, 384 frequentResetHint,
385 bufferSize, 385 bufferSize,
386 preallocBufferCnt) { 386 preallocBufferCnt) {
387 } 387 }
388 388
389 void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize, 389 void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize,
390 int vertexCount, 390 int vertexCount,
391 const GrVertexBuffer** buffer, 391 const GrVertexBuffer** buffer,
392 int* startVertex) { 392 int* startVertex) {
393 393
394 GrAssert(vertexCount >= 0); 394 SkASSERT(vertexCount >= 0);
395 GrAssert(NULL != buffer); 395 SkASSERT(NULL != buffer);
396 GrAssert(NULL != startVertex); 396 SkASSERT(NULL != startVertex);
397 397
398 size_t offset = 0; // assign to suppress warning 398 size_t offset = 0; // assign to suppress warning
399 const GrGeometryBuffer* geomBuffer = NULL; // assign to suppress warning 399 const GrGeometryBuffer* geomBuffer = NULL; // assign to suppress warning
400 void* ptr = INHERITED::makeSpace(vertexSize * vertexCount, 400 void* ptr = INHERITED::makeSpace(vertexSize * vertexCount,
401 vertexSize, 401 vertexSize,
402 &geomBuffer, 402 &geomBuffer,
403 &offset); 403 &offset);
404 404
405 *buffer = (const GrVertexBuffer*) geomBuffer; 405 *buffer = (const GrVertexBuffer*) geomBuffer;
406 GrAssert(0 == offset % vertexSize); 406 SkASSERT(0 == offset % vertexSize);
407 *startVertex = offset / vertexSize; 407 *startVertex = offset / vertexSize;
408 return ptr; 408 return ptr;
409 } 409 }
410 410
411 bool GrVertexBufferAllocPool::appendVertices(size_t vertexSize, 411 bool GrVertexBufferAllocPool::appendVertices(size_t vertexSize,
412 int vertexCount, 412 int vertexCount,
413 const void* vertices, 413 const void* vertices,
414 const GrVertexBuffer** buffer, 414 const GrVertexBuffer** buffer,
415 int* startVertex) { 415 int* startVertex) {
416 void* space = makeSpace(vertexSize, vertexCount, buffer, startVertex); 416 void* space = makeSpace(vertexSize, vertexCount, buffer, startVertex);
(...skipping 25 matching lines...) Expand all
442 kIndex_BufferType, 442 kIndex_BufferType,
443 frequentResetHint, 443 frequentResetHint,
444 bufferSize, 444 bufferSize,
445 preallocBufferCnt) { 445 preallocBufferCnt) {
446 } 446 }
447 447
448 void* GrIndexBufferAllocPool::makeSpace(int indexCount, 448 void* GrIndexBufferAllocPool::makeSpace(int indexCount,
449 const GrIndexBuffer** buffer, 449 const GrIndexBuffer** buffer,
450 int* startIndex) { 450 int* startIndex) {
451 451
452 GrAssert(indexCount >= 0); 452 SkASSERT(indexCount >= 0);
453 GrAssert(NULL != buffer); 453 SkASSERT(NULL != buffer);
454 GrAssert(NULL != startIndex); 454 SkASSERT(NULL != startIndex);
455 455
456 size_t offset = 0; // assign to suppress warning 456 size_t offset = 0; // assign to suppress warning
457 const GrGeometryBuffer* geomBuffer = NULL; // assign to suppress warning 457 const GrGeometryBuffer* geomBuffer = NULL; // assign to suppress warning
458 void* ptr = INHERITED::makeSpace(indexCount * sizeof(uint16_t), 458 void* ptr = INHERITED::makeSpace(indexCount * sizeof(uint16_t),
459 sizeof(uint16_t), 459 sizeof(uint16_t),
460 &geomBuffer, 460 &geomBuffer,
461 &offset); 461 &offset);
462 462
463 *buffer = (const GrIndexBuffer*) geomBuffer; 463 *buffer = (const GrIndexBuffer*) geomBuffer;
464 GrAssert(0 == offset % sizeof(uint16_t)); 464 SkASSERT(0 == offset % sizeof(uint16_t));
465 *startIndex = offset / sizeof(uint16_t); 465 *startIndex = offset / sizeof(uint16_t);
466 return ptr; 466 return ptr;
467 } 467 }
468 468
469 bool GrIndexBufferAllocPool::appendIndices(int indexCount, 469 bool GrIndexBufferAllocPool::appendIndices(int indexCount,
470 const void* indices, 470 const void* indices,
471 const GrIndexBuffer** buffer, 471 const GrIndexBuffer** buffer,
472 int* startIndex) { 472 int* startIndex) {
473 void* space = makeSpace(indexCount, buffer, startIndex); 473 void* space = makeSpace(indexCount, buffer, startIndex);
474 if (NULL != space) { 474 if (NULL != space) {
475 memcpy(space, indices, sizeof(uint16_t) * indexCount); 475 memcpy(space, indices, sizeof(uint16_t) * indexCount);
476 return true; 476 return true;
477 } else { 477 } else {
478 return false; 478 return false;
479 } 479 }
480 } 480 }
481 481
482 int GrIndexBufferAllocPool::preallocatedBufferIndices() const { 482 int GrIndexBufferAllocPool::preallocatedBufferIndices() const {
483 return INHERITED::preallocatedBufferSize() / sizeof(uint16_t); 483 return INHERITED::preallocatedBufferSize() / sizeof(uint16_t);
484 } 484 }
485 485
486 int GrIndexBufferAllocPool::currentBufferIndices() const { 486 int GrIndexBufferAllocPool::currentBufferIndices() const {
487 return currentBufferItems(sizeof(uint16_t)); 487 return currentBufferItems(sizeof(uint16_t));
488 } 488 }
OLDNEW
« no previous file with comments | « src/gpu/GrBlend.cpp ('k') | src/gpu/GrClipMaskCache.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698