OLD | NEW |
1 | 1 |
2 /* | 2 /* |
3 * Copyright 2010 Google Inc. | 3 * Copyright 2010 Google Inc. |
4 * | 4 * |
5 * Use of this source code is governed by a BSD-style license that can be | 5 * Use of this source code is governed by a BSD-style license that can be |
6 * found in the LICENSE file. | 6 * found in the LICENSE file. |
7 */ | 7 */ |
8 | 8 |
9 | 9 |
10 #include "GrBufferAllocPool.h" | 10 #include "GrBufferAllocPool.h" |
(...skipping 20 matching lines...) Expand all Loading... |
31 "GrBufferAllocPool Unmapping Buffer",
\ | 31 "GrBufferAllocPool Unmapping Buffer",
\ |
32 TRACE_EVENT_SCOPE_THREAD,
\ | 32 TRACE_EVENT_SCOPE_THREAD,
\ |
33 "percent_unwritten",
\ | 33 "percent_unwritten",
\ |
34 (float)((block).fBytesFree) / (block).fBuffer->gpuMemor
ySize()); \ | 34 (float)((block).fBytesFree) / (block).fBuffer->gpuMemor
ySize()); \ |
35 (block).fBuffer->unmap();
\ | 35 (block).fBuffer->unmap();
\ |
36 } while (false) | 36 } while (false) |
37 | 37 |
38 GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu, | 38 GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu, |
39 BufferType bufferType, | 39 BufferType bufferType, |
40 size_t blockSize, | 40 size_t blockSize, |
41 int preallocBufferCnt) : | 41 int preallocBufferCnt) |
42 fBlocks(SkTMax(8, 2*preallocBufferCnt)) { | 42 : fBlocks(SkTMax(8, 2*preallocBufferCnt)) { |
43 | 43 |
44 fGpu = SkRef(gpu); | 44 fGpu = SkRef(gpu); |
45 | 45 |
46 fBufferType = bufferType; | 46 fBufferType = bufferType; |
47 fBufferPtr = NULL; | 47 fBufferPtr = NULL; |
48 fMinBlockSize = SkTMax(GrBufferAllocPool_MIN_BLOCK_SIZE, blockSize); | 48 fMinBlockSize = SkTMax(GrBufferAllocPool_MIN_BLOCK_SIZE, blockSize); |
49 | 49 |
50 fBytesInUse = 0; | 50 fBytesInUse = 0; |
51 | 51 |
52 fPreallocBuffersInUse = 0; | 52 fPreallocBuffersInUse = 0; |
53 fPreallocBufferStartIdx = 0; | 53 fPreallocBufferStartIdx = 0; |
54 for (int i = 0; i < preallocBufferCnt; ++i) { | 54 for (int i = 0; i < preallocBufferCnt; ++i) { |
55 GrGeometryBuffer* buffer = this->createBuffer(fMinBlockSize); | 55 GrGeometryBuffer* buffer = this->createBuffer(fMinBlockSize); |
56 if (buffer) { | 56 if (buffer) { |
57 *fPreallocBuffers.append() = buffer; | 57 *fPreallocBuffers.append() = buffer; |
58 } | 58 } |
59 } | 59 } |
60 } | 60 } |
61 | 61 |
62 GrBufferAllocPool::~GrBufferAllocPool() { | 62 GrBufferAllocPool::~GrBufferAllocPool() { |
63 VALIDATE(); | 63 VALIDATE(); |
64 if (fBlocks.count()) { | 64 if (fBlocks.count()) { |
65 GrGeometryBuffer* buffer = fBlocks.back().fBuffer; | 65 GrGeometryBuffer* buffer = fBlocks.back().fBuffer; |
66 if (buffer->isMapped()) { | 66 if (buffer->isMapped()) { |
67 UNMAP_BUFFER(fBlocks.back()); | 67 UNMAP_BUFFER(fBlocks.back()); |
68 } | 68 } |
69 } | 69 } |
70 while (!fBlocks.empty()) { | 70 while (!fBlocks.empty()) { |
71 destroyBlock(); | 71 this->destroyBlock(); |
72 } | 72 } |
73 fPreallocBuffers.unrefAll(); | 73 fPreallocBuffers.unrefAll(); |
74 fGpu->unref(); | 74 fGpu->unref(); |
75 } | 75 } |
76 | 76 |
77 void GrBufferAllocPool::reset() { | 77 void GrBufferAllocPool::reset() { |
78 VALIDATE(); | 78 VALIDATE(); |
79 fBytesInUse = 0; | 79 fBytesInUse = 0; |
80 if (fBlocks.count()) { | 80 if (fBlocks.count()) { |
81 GrGeometryBuffer* buffer = fBlocks.back().fBuffer; | 81 GrGeometryBuffer* buffer = fBlocks.back().fBuffer; |
(...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
184 } | 184 } |
185 | 185 |
186 // We could honor the space request using by a partial update of the current | 186 // We could honor the space request using by a partial update of the current |
187 // VB (if there is room). But we don't currently use draw calls to GL that | 187 // VB (if there is room). But we don't currently use draw calls to GL that |
188 // allow the driver to know that previously issued draws won't read from | 188 // allow the driver to know that previously issued draws won't read from |
189 // the part of the buffer we update. Also, the GL buffer implementation | 189 // the part of the buffer we update. Also, the GL buffer implementation |
190 // may be cheating on the actual buffer size by shrinking the buffer on | 190 // may be cheating on the actual buffer size by shrinking the buffer on |
191 // updateData() if the amount of data passed is less than the full buffer | 191 // updateData() if the amount of data passed is less than the full buffer |
192 // size. | 192 // size. |
193 | 193 |
194 if (!createBlock(size)) { | 194 if (!this->createBlock(size)) { |
195 return NULL; | 195 return NULL; |
196 } | 196 } |
197 SkASSERT(fBufferPtr); | 197 SkASSERT(fBufferPtr); |
198 | 198 |
199 *offset = 0; | 199 *offset = 0; |
200 BufferBlock& back = fBlocks.back(); | 200 BufferBlock& back = fBlocks.back(); |
201 *buffer = back.fBuffer; | 201 *buffer = back.fBuffer; |
202 back.fBytesFree -= size; | 202 back.fBytesFree -= size; |
203 fBytesInUse += size; | 203 fBytesInUse += size; |
204 VALIDATE(); | 204 VALIDATE(); |
205 return fBufferPtr; | 205 return fBufferPtr; |
206 } | 206 } |
207 | 207 |
208 int GrBufferAllocPool::currentBufferItems(size_t itemSize) const { | |
209 VALIDATE(); | |
210 if (fBufferPtr) { | |
211 const BufferBlock& back = fBlocks.back(); | |
212 size_t usedBytes = back.fBuffer->gpuMemorySize() - back.fBytesFree; | |
213 size_t pad = GrSizeAlignUpPad(usedBytes, itemSize); | |
214 return static_cast<int>((back.fBytesFree - pad) / itemSize); | |
215 } else if (fPreallocBuffersInUse < fPreallocBuffers.count()) { | |
216 return static_cast<int>(fMinBlockSize / itemSize); | |
217 } | |
218 return 0; | |
219 } | |
220 | |
221 int GrBufferAllocPool::preallocatedBuffersRemaining() const { | |
222 return fPreallocBuffers.count() - fPreallocBuffersInUse; | |
223 } | |
224 | |
225 int GrBufferAllocPool::preallocatedBufferCount() const { | |
226 return fPreallocBuffers.count(); | |
227 } | |
228 | |
229 void GrBufferAllocPool::putBack(size_t bytes) { | 208 void GrBufferAllocPool::putBack(size_t bytes) { |
230 VALIDATE(); | 209 VALIDATE(); |
231 | 210 |
232 // if the putBack unwinds all the preallocated buffers then we will | 211 // if the putBack unwinds all the preallocated buffers then we will |
233 // advance the starting index. As blocks are destroyed fPreallocBuffersInUse | 212 // advance the starting index. As blocks are destroyed fPreallocBuffersInUse |
234 // will be decremented. I will reach zero if all blocks using preallocated | 213 // will be decremented. I will reach zero if all blocks using preallocated |
235 // buffers are released. | 214 // buffers are released. |
236 int preallocBuffersInUse = fPreallocBuffersInUse; | 215 int preallocBuffersInUse = fPreallocBuffersInUse; |
237 | 216 |
238 while (bytes) { | 217 while (bytes) { |
239 // caller shouldnt try to put back more than they've taken | 218 // caller shouldn't try to put back more than they've taken |
240 SkASSERT(!fBlocks.empty()); | 219 SkASSERT(!fBlocks.empty()); |
241 BufferBlock& block = fBlocks.back(); | 220 BufferBlock& block = fBlocks.back(); |
242 size_t bytesUsed = block.fBuffer->gpuMemorySize() - block.fBytesFree; | 221 size_t bytesUsed = block.fBuffer->gpuMemorySize() - block.fBytesFree; |
243 if (bytes >= bytesUsed) { | 222 if (bytes >= bytesUsed) { |
244 bytes -= bytesUsed; | 223 bytes -= bytesUsed; |
245 fBytesInUse -= bytesUsed; | 224 fBytesInUse -= bytesUsed; |
246 // if we locked a vb to satisfy the make space and we're releasing | 225 // if we locked a vb to satisfy the make space and we're releasing |
247 // beyond it, then unmap it. | 226 // beyond it, then unmap it. |
248 if (block.fBuffer->isMapped()) { | 227 if (block.fBuffer->isMapped()) { |
249 UNMAP_BUFFER(block); | 228 UNMAP_BUFFER(block); |
(...skipping 150 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
400 vertexSize, | 379 vertexSize, |
401 &geomBuffer, | 380 &geomBuffer, |
402 &offset); | 381 &offset); |
403 | 382 |
404 *buffer = (const GrVertexBuffer*) geomBuffer; | 383 *buffer = (const GrVertexBuffer*) geomBuffer; |
405 SkASSERT(0 == offset % vertexSize); | 384 SkASSERT(0 == offset % vertexSize); |
406 *startVertex = static_cast<int>(offset / vertexSize); | 385 *startVertex = static_cast<int>(offset / vertexSize); |
407 return ptr; | 386 return ptr; |
408 } | 387 } |
409 | 388 |
410 int GrVertexBufferAllocPool::preallocatedBufferVertices(size_t vertexSize) const
{ | |
411 return static_cast<int>(INHERITED::preallocatedBufferSize() / vertexSize); | |
412 } | |
413 | |
414 int GrVertexBufferAllocPool::currentBufferVertices(size_t vertexSize) const { | |
415 return currentBufferItems(vertexSize); | |
416 } | |
417 | |
418 //////////////////////////////////////////////////////////////////////////////// | 389 //////////////////////////////////////////////////////////////////////////////// |
419 | 390 |
420 GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu, | 391 GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu, |
421 size_t bufferSize, | 392 size_t bufferSize, |
422 int preallocBufferCnt) | 393 int preallocBufferCnt) |
423 : GrBufferAllocPool(gpu, | 394 : GrBufferAllocPool(gpu, |
424 kIndex_BufferType, | 395 kIndex_BufferType, |
425 bufferSize, | 396 bufferSize, |
426 preallocBufferCnt) { | 397 preallocBufferCnt) { |
427 } | 398 } |
(...skipping 12 matching lines...) Expand all Loading... |
440 sizeof(uint16_t), | 411 sizeof(uint16_t), |
441 &geomBuffer, | 412 &geomBuffer, |
442 &offset); | 413 &offset); |
443 | 414 |
444 *buffer = (const GrIndexBuffer*) geomBuffer; | 415 *buffer = (const GrIndexBuffer*) geomBuffer; |
445 SkASSERT(0 == offset % sizeof(uint16_t)); | 416 SkASSERT(0 == offset % sizeof(uint16_t)); |
446 *startIndex = static_cast<int>(offset / sizeof(uint16_t)); | 417 *startIndex = static_cast<int>(offset / sizeof(uint16_t)); |
447 return ptr; | 418 return ptr; |
448 } | 419 } |
449 | 420 |
450 int GrIndexBufferAllocPool::preallocatedBufferIndices() const { | |
451 return static_cast<int>(INHERITED::preallocatedBufferSize() / sizeof(uint16_
t)); | |
452 } | |
453 | 421 |
454 int GrIndexBufferAllocPool::currentBufferIndices() const { | |
455 return currentBufferItems(sizeof(uint16_t)); | |
456 } | |
OLD | NEW |