Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1207)

Side by Side Diff: src/gpu/GrBufferAllocPool.cpp

Issue 544233002: "NULL !=" = NULL (Closed) Base URL: https://skia.googlesource.com/skia.git@are
Patch Set: rebase Created 6 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/gpu/GrBlend.cpp ('k') | src/gpu/GrClipMaskManager.cpp » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 1
2 /* 2 /*
3 * Copyright 2010 Google Inc. 3 * Copyright 2010 Google Inc.
4 * 4 *
5 * Use of this source code is governed by a BSD-style license that can be 5 * Use of this source code is governed by a BSD-style license that can be
6 * found in the LICENSE file. 6 * found in the LICENSE file.
7 */ 7 */
8 8
9 9
10 #include "GrBufferAllocPool.h" 10 #include "GrBufferAllocPool.h"
(...skipping 24 matching lines...) Expand all
35 (block).fBuffer->unmap(); \ 35 (block).fBuffer->unmap(); \
36 } while (false) 36 } while (false)
37 37
38 GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu, 38 GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu,
39 BufferType bufferType, 39 BufferType bufferType,
40 bool frequentResetHint, 40 bool frequentResetHint,
41 size_t blockSize, 41 size_t blockSize,
42 int preallocBufferCnt) : 42 int preallocBufferCnt) :
43 fBlocks(SkTMax(8, 2*preallocBufferCnt)) { 43 fBlocks(SkTMax(8, 2*preallocBufferCnt)) {
44 44
45 SkASSERT(NULL != gpu); 45 SkASSERT(gpu);
46 fGpu = gpu; 46 fGpu = gpu;
47 fGpu->ref(); 47 fGpu->ref();
48 fGpuIsReffed = true; 48 fGpuIsReffed = true;
49 49
50 fBufferType = bufferType; 50 fBufferType = bufferType;
51 fFrequentResetHint = frequentResetHint; 51 fFrequentResetHint = frequentResetHint;
52 fBufferPtr = NULL; 52 fBufferPtr = NULL;
53 fMinBlockSize = SkTMax(GrBufferAllocPool_MIN_BLOCK_SIZE, blockSize); 53 fMinBlockSize = SkTMax(GrBufferAllocPool_MIN_BLOCK_SIZE, blockSize);
54 54
55 fBytesInUse = 0; 55 fBytesInUse = 0;
56 56
57 fPreallocBuffersInUse = 0; 57 fPreallocBuffersInUse = 0;
58 fPreallocBufferStartIdx = 0; 58 fPreallocBufferStartIdx = 0;
59 for (int i = 0; i < preallocBufferCnt; ++i) { 59 for (int i = 0; i < preallocBufferCnt; ++i) {
60 GrGeometryBuffer* buffer = this->createBuffer(fMinBlockSize); 60 GrGeometryBuffer* buffer = this->createBuffer(fMinBlockSize);
61 if (NULL != buffer) { 61 if (buffer) {
62 *fPreallocBuffers.append() = buffer; 62 *fPreallocBuffers.append() = buffer;
63 } 63 }
64 } 64 }
65 } 65 }
66 66
67 GrBufferAllocPool::~GrBufferAllocPool() { 67 GrBufferAllocPool::~GrBufferAllocPool() {
68 VALIDATE(); 68 VALIDATE();
69 if (fBlocks.count()) { 69 if (fBlocks.count()) {
70 GrGeometryBuffer* buffer = fBlocks.back().fBuffer; 70 GrGeometryBuffer* buffer = fBlocks.back().fBuffer;
71 if (buffer->isMapped()) { 71 if (buffer->isMapped()) {
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
109 // we may have created a large cpu mirror of a large VB. Reset the size 109 // we may have created a large cpu mirror of a large VB. Reset the size
110 // to match our pre-allocated VBs. 110 // to match our pre-allocated VBs.
111 fCpuData.reset(fMinBlockSize); 111 fCpuData.reset(fMinBlockSize);
112 SkASSERT(0 == fPreallocBuffersInUse); 112 SkASSERT(0 == fPreallocBuffersInUse);
113 VALIDATE(); 113 VALIDATE();
114 } 114 }
115 115
116 void GrBufferAllocPool::unmap() { 116 void GrBufferAllocPool::unmap() {
117 VALIDATE(); 117 VALIDATE();
118 118
119 if (NULL != fBufferPtr) { 119 if (fBufferPtr) {
120 BufferBlock& block = fBlocks.back(); 120 BufferBlock& block = fBlocks.back();
121 if (block.fBuffer->isMapped()) { 121 if (block.fBuffer->isMapped()) {
122 UNMAP_BUFFER(block); 122 UNMAP_BUFFER(block);
123 } else { 123 } else {
124 size_t flushSize = block.fBuffer->gpuMemorySize() - block.fBytesFree ; 124 size_t flushSize = block.fBuffer->gpuMemorySize() - block.fBytesFree ;
125 this->flushCpuData(fBlocks.back(), flushSize); 125 this->flushCpuData(fBlocks.back(), flushSize);
126 } 126 }
127 fBufferPtr = NULL; 127 fBufferPtr = NULL;
128 } 128 }
129 VALIDATE(); 129 VALIDATE();
130 } 130 }
131 131
132 #ifdef SK_DEBUG 132 #ifdef SK_DEBUG
133 void GrBufferAllocPool::validate(bool unusedBlockAllowed) const { 133 void GrBufferAllocPool::validate(bool unusedBlockAllowed) const {
134 if (NULL != fBufferPtr) { 134 if (fBufferPtr) {
135 SkASSERT(!fBlocks.empty()); 135 SkASSERT(!fBlocks.empty());
136 if (fBlocks.back().fBuffer->isMapped()) { 136 if (fBlocks.back().fBuffer->isMapped()) {
137 GrGeometryBuffer* buf = fBlocks.back().fBuffer; 137 GrGeometryBuffer* buf = fBlocks.back().fBuffer;
138 SkASSERT(buf->mapPtr() == fBufferPtr); 138 SkASSERT(buf->mapPtr() == fBufferPtr);
139 } else { 139 } else {
140 SkASSERT(fCpuData.get() == fBufferPtr); 140 SkASSERT(fCpuData.get() == fBufferPtr);
141 } 141 }
142 } else { 142 } else {
143 SkASSERT(fBlocks.empty() || !fBlocks.back().fBuffer->isMapped()); 143 SkASSERT(fBlocks.empty() || !fBlocks.back().fBuffer->isMapped());
144 } 144 }
(...skipping 16 matching lines...) Expand all
161 } 161 }
162 } 162 }
163 #endif 163 #endif
164 164
165 void* GrBufferAllocPool::makeSpace(size_t size, 165 void* GrBufferAllocPool::makeSpace(size_t size,
166 size_t alignment, 166 size_t alignment,
167 const GrGeometryBuffer** buffer, 167 const GrGeometryBuffer** buffer,
168 size_t* offset) { 168 size_t* offset) {
169 VALIDATE(); 169 VALIDATE();
170 170
171 SkASSERT(NULL != buffer); 171 SkASSERT(buffer);
172 SkASSERT(NULL != offset); 172 SkASSERT(offset);
173 173
174 if (NULL != fBufferPtr) { 174 if (fBufferPtr) {
175 BufferBlock& back = fBlocks.back(); 175 BufferBlock& back = fBlocks.back();
176 size_t usedBytes = back.fBuffer->gpuMemorySize() - back.fBytesFree; 176 size_t usedBytes = back.fBuffer->gpuMemorySize() - back.fBytesFree;
177 size_t pad = GrSizeAlignUpPad(usedBytes, 177 size_t pad = GrSizeAlignUpPad(usedBytes,
178 alignment); 178 alignment);
179 if ((size + pad) <= back.fBytesFree) { 179 if ((size + pad) <= back.fBytesFree) {
180 usedBytes += pad; 180 usedBytes += pad;
181 *offset = usedBytes; 181 *offset = usedBytes;
182 *buffer = back.fBuffer; 182 *buffer = back.fBuffer;
183 back.fBytesFree -= size + pad; 183 back.fBytesFree -= size + pad;
184 fBytesInUse += size + pad; 184 fBytesInUse += size + pad;
185 VALIDATE(); 185 VALIDATE();
186 return (void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes); 186 return (void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes);
187 } 187 }
188 } 188 }
189 189
190 // We could honor the space request using by a partial update of the current 190 // We could honor the space request using by a partial update of the current
191 // VB (if there is room). But we don't currently use draw calls to GL that 191 // VB (if there is room). But we don't currently use draw calls to GL that
192 // allow the driver to know that previously issued draws won't read from 192 // allow the driver to know that previously issued draws won't read from
193 // the part of the buffer we update. Also, the GL buffer implementation 193 // the part of the buffer we update. Also, the GL buffer implementation
194 // may be cheating on the actual buffer size by shrinking the buffer on 194 // may be cheating on the actual buffer size by shrinking the buffer on
195 // updateData() if the amount of data passed is less than the full buffer 195 // updateData() if the amount of data passed is less than the full buffer
196 // size. 196 // size.
197 197
198 if (!createBlock(size)) { 198 if (!createBlock(size)) {
199 return NULL; 199 return NULL;
200 } 200 }
201 SkASSERT(NULL != fBufferPtr); 201 SkASSERT(fBufferPtr);
202 202
203 *offset = 0; 203 *offset = 0;
204 BufferBlock& back = fBlocks.back(); 204 BufferBlock& back = fBlocks.back();
205 *buffer = back.fBuffer; 205 *buffer = back.fBuffer;
206 back.fBytesFree -= size; 206 back.fBytesFree -= size;
207 fBytesInUse += size; 207 fBytesInUse += size;
208 VALIDATE(); 208 VALIDATE();
209 return fBufferPtr; 209 return fBufferPtr;
210 } 210 }
211 211
212 int GrBufferAllocPool::currentBufferItems(size_t itemSize) const { 212 int GrBufferAllocPool::currentBufferItems(size_t itemSize) const {
213 VALIDATE(); 213 VALIDATE();
214 if (NULL != fBufferPtr) { 214 if (fBufferPtr) {
215 const BufferBlock& back = fBlocks.back(); 215 const BufferBlock& back = fBlocks.back();
216 size_t usedBytes = back.fBuffer->gpuMemorySize() - back.fBytesFree; 216 size_t usedBytes = back.fBuffer->gpuMemorySize() - back.fBytesFree;
217 size_t pad = GrSizeAlignUpPad(usedBytes, itemSize); 217 size_t pad = GrSizeAlignUpPad(usedBytes, itemSize);
218 return static_cast<int>((back.fBytesFree - pad) / itemSize); 218 return static_cast<int>((back.fBytesFree - pad) / itemSize);
219 } else if (fPreallocBuffersInUse < fPreallocBuffers.count()) { 219 } else if (fPreallocBuffersInUse < fPreallocBuffers.count()) {
220 return static_cast<int>(fMinBlockSize / itemSize); 220 return static_cast<int>(fMinBlockSize / itemSize);
221 } 221 }
222 return 0; 222 return 0;
223 } 223 }
224 224
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after
288 ++fPreallocBuffersInUse; 288 ++fPreallocBuffersInUse;
289 } else { 289 } else {
290 block.fBuffer = this->createBuffer(size); 290 block.fBuffer = this->createBuffer(size);
291 if (NULL == block.fBuffer) { 291 if (NULL == block.fBuffer) {
292 fBlocks.pop_back(); 292 fBlocks.pop_back();
293 return false; 293 return false;
294 } 294 }
295 } 295 }
296 296
297 block.fBytesFree = size; 297 block.fBytesFree = size;
298 if (NULL != fBufferPtr) { 298 if (fBufferPtr) {
299 SkASSERT(fBlocks.count() > 1); 299 SkASSERT(fBlocks.count() > 1);
300 BufferBlock& prev = fBlocks.fromBack(1); 300 BufferBlock& prev = fBlocks.fromBack(1);
301 if (prev.fBuffer->isMapped()) { 301 if (prev.fBuffer->isMapped()) {
302 UNMAP_BUFFER(prev); 302 UNMAP_BUFFER(prev);
303 } else { 303 } else {
304 this->flushCpuData(prev, prev.fBuffer->gpuMemorySize() - prev.fBytes Free); 304 this->flushCpuData(prev, prev.fBuffer->gpuMemorySize() - prev.fBytes Free);
305 } 305 }
306 fBufferPtr = NULL; 306 fBufferPtr = NULL;
307 } 307 }
308 308
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
349 } 349 }
350 } 350 }
351 SkASSERT(!block.fBuffer->isMapped()); 351 SkASSERT(!block.fBuffer->isMapped());
352 block.fBuffer->unref(); 352 block.fBuffer->unref();
353 fBlocks.pop_back(); 353 fBlocks.pop_back();
354 fBufferPtr = NULL; 354 fBufferPtr = NULL;
355 } 355 }
356 356
357 void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize) { 357 void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize) {
358 GrGeometryBuffer* buffer = block.fBuffer; 358 GrGeometryBuffer* buffer = block.fBuffer;
359 SkASSERT(NULL != buffer); 359 SkASSERT(buffer);
360 SkASSERT(!buffer->isMapped()); 360 SkASSERT(!buffer->isMapped());
361 SkASSERT(fCpuData.get() == fBufferPtr); 361 SkASSERT(fCpuData.get() == fBufferPtr);
362 SkASSERT(flushSize <= buffer->gpuMemorySize()); 362 SkASSERT(flushSize <= buffer->gpuMemorySize());
363 VALIDATE(true); 363 VALIDATE(true);
364 364
365 if (GrDrawTargetCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() && 365 if (GrDrawTargetCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() &&
366 flushSize > GR_GEOM_BUFFER_MAP_THRESHOLD) { 366 flushSize > GR_GEOM_BUFFER_MAP_THRESHOLD) {
367 void* data = buffer->map(); 367 void* data = buffer->map();
368 if (NULL != data) { 368 if (data) {
369 memcpy(data, fBufferPtr, flushSize); 369 memcpy(data, fBufferPtr, flushSize);
370 UNMAP_BUFFER(block); 370 UNMAP_BUFFER(block);
371 return; 371 return;
372 } 372 }
373 } 373 }
374 buffer->updateData(fBufferPtr, flushSize); 374 buffer->updateData(fBufferPtr, flushSize);
375 VALIDATE(true); 375 VALIDATE(true);
376 } 376 }
377 377
378 GrGeometryBuffer* GrBufferAllocPool::createBuffer(size_t size) { 378 GrGeometryBuffer* GrBufferAllocPool::createBuffer(size_t size) {
(...skipping 17 matching lines...) Expand all
396 bufferSize, 396 bufferSize,
397 preallocBufferCnt) { 397 preallocBufferCnt) {
398 } 398 }
399 399
400 void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize, 400 void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize,
401 int vertexCount, 401 int vertexCount,
402 const GrVertexBuffer** buffer, 402 const GrVertexBuffer** buffer,
403 int* startVertex) { 403 int* startVertex) {
404 404
405 SkASSERT(vertexCount >= 0); 405 SkASSERT(vertexCount >= 0);
406 SkASSERT(NULL != buffer); 406 SkASSERT(buffer);
407 SkASSERT(NULL != startVertex); 407 SkASSERT(startVertex);
408 408
409 size_t offset = 0; // assign to suppress warning 409 size_t offset = 0; // assign to suppress warning
410 const GrGeometryBuffer* geomBuffer = NULL; // assign to suppress warning 410 const GrGeometryBuffer* geomBuffer = NULL; // assign to suppress warning
411 void* ptr = INHERITED::makeSpace(vertexSize * vertexCount, 411 void* ptr = INHERITED::makeSpace(vertexSize * vertexCount,
412 vertexSize, 412 vertexSize,
413 &geomBuffer, 413 &geomBuffer,
414 &offset); 414 &offset);
415 415
416 *buffer = (const GrVertexBuffer*) geomBuffer; 416 *buffer = (const GrVertexBuffer*) geomBuffer;
417 SkASSERT(0 == offset % vertexSize); 417 SkASSERT(0 == offset % vertexSize);
418 *startVertex = static_cast<int>(offset / vertexSize); 418 *startVertex = static_cast<int>(offset / vertexSize);
419 return ptr; 419 return ptr;
420 } 420 }
421 421
422 bool GrVertexBufferAllocPool::appendVertices(size_t vertexSize, 422 bool GrVertexBufferAllocPool::appendVertices(size_t vertexSize,
423 int vertexCount, 423 int vertexCount,
424 const void* vertices, 424 const void* vertices,
425 const GrVertexBuffer** buffer, 425 const GrVertexBuffer** buffer,
426 int* startVertex) { 426 int* startVertex) {
427 void* space = makeSpace(vertexSize, vertexCount, buffer, startVertex); 427 void* space = makeSpace(vertexSize, vertexCount, buffer, startVertex);
428 if (NULL != space) { 428 if (space) {
429 memcpy(space, 429 memcpy(space,
430 vertices, 430 vertices,
431 vertexSize * vertexCount); 431 vertexSize * vertexCount);
432 return true; 432 return true;
433 } else { 433 } else {
434 return false; 434 return false;
435 } 435 }
436 } 436 }
437 437
438 int GrVertexBufferAllocPool::preallocatedBufferVertices(size_t vertexSize) const { 438 int GrVertexBufferAllocPool::preallocatedBufferVertices(size_t vertexSize) const {
(...skipping 15 matching lines...) Expand all
454 frequentResetHint, 454 frequentResetHint,
455 bufferSize, 455 bufferSize,
456 preallocBufferCnt) { 456 preallocBufferCnt) {
457 } 457 }
458 458
459 void* GrIndexBufferAllocPool::makeSpace(int indexCount, 459 void* GrIndexBufferAllocPool::makeSpace(int indexCount,
460 const GrIndexBuffer** buffer, 460 const GrIndexBuffer** buffer,
461 int* startIndex) { 461 int* startIndex) {
462 462
463 SkASSERT(indexCount >= 0); 463 SkASSERT(indexCount >= 0);
464 SkASSERT(NULL != buffer); 464 SkASSERT(buffer);
465 SkASSERT(NULL != startIndex); 465 SkASSERT(startIndex);
466 466
467 size_t offset = 0; // assign to suppress warning 467 size_t offset = 0; // assign to suppress warning
468 const GrGeometryBuffer* geomBuffer = NULL; // assign to suppress warning 468 const GrGeometryBuffer* geomBuffer = NULL; // assign to suppress warning
469 void* ptr = INHERITED::makeSpace(indexCount * sizeof(uint16_t), 469 void* ptr = INHERITED::makeSpace(indexCount * sizeof(uint16_t),
470 sizeof(uint16_t), 470 sizeof(uint16_t),
471 &geomBuffer, 471 &geomBuffer,
472 &offset); 472 &offset);
473 473
474 *buffer = (const GrIndexBuffer*) geomBuffer; 474 *buffer = (const GrIndexBuffer*) geomBuffer;
475 SkASSERT(0 == offset % sizeof(uint16_t)); 475 SkASSERT(0 == offset % sizeof(uint16_t));
476 *startIndex = static_cast<int>(offset / sizeof(uint16_t)); 476 *startIndex = static_cast<int>(offset / sizeof(uint16_t));
477 return ptr; 477 return ptr;
478 } 478 }
479 479
480 bool GrIndexBufferAllocPool::appendIndices(int indexCount, 480 bool GrIndexBufferAllocPool::appendIndices(int indexCount,
481 const void* indices, 481 const void* indices,
482 const GrIndexBuffer** buffer, 482 const GrIndexBuffer** buffer,
483 int* startIndex) { 483 int* startIndex) {
484 void* space = makeSpace(indexCount, buffer, startIndex); 484 void* space = makeSpace(indexCount, buffer, startIndex);
485 if (NULL != space) { 485 if (space) {
486 memcpy(space, indices, sizeof(uint16_t) * indexCount); 486 memcpy(space, indices, sizeof(uint16_t) * indexCount);
487 return true; 487 return true;
488 } else { 488 } else {
489 return false; 489 return false;
490 } 490 }
491 } 491 }
492 492
493 int GrIndexBufferAllocPool::preallocatedBufferIndices() const { 493 int GrIndexBufferAllocPool::preallocatedBufferIndices() const {
494 return static_cast<int>(INHERITED::preallocatedBufferSize() / sizeof(uint16_ t)); 494 return static_cast<int>(INHERITED::preallocatedBufferSize() / sizeof(uint16_ t));
495 } 495 }
496 496
497 int GrIndexBufferAllocPool::currentBufferIndices() const { 497 int GrIndexBufferAllocPool::currentBufferIndices() const {
498 return currentBufferItems(sizeof(uint16_t)); 498 return currentBufferItems(sizeof(uint16_t));
499 } 499 }
OLDNEW
« no previous file with comments | « src/gpu/GrBlend.cpp ('k') | src/gpu/GrClipMaskManager.cpp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698