Index: src/gpu/GrBufferAllocPool.cpp |
diff --git a/src/gpu/GrBufferAllocPool.cpp b/src/gpu/GrBufferAllocPool.cpp |
index b0e4e2853590dcced5d71418f0edfaf0287562f1..878d537d45833ddeb82e57e7daad9637771e4454 100644 |
--- a/src/gpu/GrBufferAllocPool.cpp |
+++ b/src/gpu/GrBufferAllocPool.cpp |
@@ -11,7 +11,6 @@ |
#include "GrDrawTargetCaps.h" |
#include "GrGpu.h" |
#include "GrIndexBuffer.h" |
-#include "GrResourceProvider.h" |
#include "GrTypes.h" |
#include "GrVertexBuffer.h" |
@@ -22,9 +21,6 @@ |
#else |
static void VALIDATE(bool = false) {} |
#endif |
- |
-static const size_t MIN_VERTEX_BUFFER_SIZE = 1 << 15; |
-static const size_t MIN_INDEX_BUFFER_SIZE = 1 << 12; |
// page size |
#define GrBufferAllocPool_MIN_BLOCK_SIZE ((size_t)1 << 12) |
@@ -41,8 +37,9 @@ |
GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu, |
BufferType bufferType, |
- size_t blockSize) |
- : fBlocks(8) { |
+ size_t blockSize, |
+ int preallocBufferCnt) |
+ : fBlocks(SkTMax(8, 2*preallocBufferCnt)) { |
fGpu = SkRef(gpu); |
@@ -51,9 +48,19 @@ |
fMinBlockSize = SkTMax(GrBufferAllocPool_MIN_BLOCK_SIZE, blockSize); |
fBytesInUse = 0; |
-} |
- |
-void GrBufferAllocPool::deleteBlocks() { |
+ |
+ fPreallocBuffersInUse = 0; |
+ fPreallocBufferStartIdx = 0; |
+ for (int i = 0; i < preallocBufferCnt; ++i) { |
+ GrGeometryBuffer* buffer = this->createBuffer(fMinBlockSize); |
+ if (buffer) { |
+ *fPreallocBuffers.append() = buffer; |
+ } |
+ } |
+} |
+ |
+GrBufferAllocPool::~GrBufferAllocPool() { |
+ VALIDATE(); |
if (fBlocks.count()) { |
GrGeometryBuffer* buffer = fBlocks.back().fBuffer; |
if (buffer->isMapped()) { |
@@ -63,22 +70,34 @@ |
while (!fBlocks.empty()) { |
this->destroyBlock(); |
} |
- SkASSERT(!fBufferPtr); |
-} |
- |
-GrBufferAllocPool::~GrBufferAllocPool() { |
- VALIDATE(); |
- this->deleteBlocks(); |
+ fPreallocBuffers.unrefAll(); |
fGpu->unref(); |
} |
void GrBufferAllocPool::reset() { |
VALIDATE(); |
fBytesInUse = 0; |
- this->deleteBlocks(); |
+ if (fBlocks.count()) { |
+ GrGeometryBuffer* buffer = fBlocks.back().fBuffer; |
+ if (buffer->isMapped()) { |
+ UNMAP_BUFFER(fBlocks.back()); |
+ } |
+ } |
+ // fPreallocBuffersInUse will be decremented down to zero in the while loop |
+ int preallocBuffersInUse = fPreallocBuffersInUse; |
+ while (!fBlocks.empty()) { |
+ this->destroyBlock(); |
+ } |
+ if (fPreallocBuffers.count()) { |
+ // must set this after above loop. |
+ fPreallocBufferStartIdx = (fPreallocBufferStartIdx + |
+ preallocBuffersInUse) % |
+ fPreallocBuffers.count(); |
+ } |
// we may have created a large cpu mirror of a large VB. Reset the size |
- // to match our minimum. |
+ // to match our pre-allocated VBs. |
fCpuData.reset(fMinBlockSize); |
+ SkASSERT(0 == fPreallocBuffersInUse); |
VALIDATE(); |
} |
@@ -150,7 +169,8 @@ |
if (fBufferPtr) { |
BufferBlock& back = fBlocks.back(); |
size_t usedBytes = back.fBuffer->gpuMemorySize() - back.fBytesFree; |
- size_t pad = GrSizeAlignUpPad(usedBytes, alignment); |
+ size_t pad = GrSizeAlignUpPad(usedBytes, |
+ alignment); |
if ((size + pad) <= back.fBytesFree) { |
memset((void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes), 0, pad); |
usedBytes += pad; |
@@ -187,6 +207,12 @@ |
void GrBufferAllocPool::putBack(size_t bytes) { |
VALIDATE(); |
+ |
+ // if the putBack unwinds all the preallocated buffers then we will |
+ // advance the starting index. As blocks are destroyed fPreallocBuffersInUse |
+ // will be decremented. I will reach zero if all blocks using preallocated |
+ // buffers are released. |
+ int preallocBuffersInUse = fPreallocBuffersInUse; |
while (bytes) { |
// caller shouldn't try to put back more than they've taken |
@@ -209,7 +235,11 @@ |
break; |
} |
} |
- |
+ if (!fPreallocBuffersInUse && fPreallocBuffers.count()) { |
+ fPreallocBufferStartIdx = (fPreallocBufferStartIdx + |
+ preallocBuffersInUse) % |
+ fPreallocBuffers.count(); |
+ } |
VALIDATE(); |
} |
@@ -222,13 +252,24 @@ |
BufferBlock& block = fBlocks.push_back(); |
- block.fBuffer = this->getBuffer(size); |
- if (NULL == block.fBuffer) { |
- fBlocks.pop_back(); |
- return false; |
- } |
- |
- block.fBytesFree = block.fBuffer->gpuMemorySize(); |
+ if (size == fMinBlockSize && |
+ fPreallocBuffersInUse < fPreallocBuffers.count()) { |
+ |
+ uint32_t nextBuffer = (fPreallocBuffersInUse + |
+ fPreallocBufferStartIdx) % |
+ fPreallocBuffers.count(); |
+ block.fBuffer = fPreallocBuffers[nextBuffer]; |
+ block.fBuffer->ref(); |
+ ++fPreallocBuffersInUse; |
+ } else { |
+ block.fBuffer = this->createBuffer(size); |
+ if (NULL == block.fBuffer) { |
+ fBlocks.pop_back(); |
+ return false; |
+ } |
+ } |
+ |
+ block.fBytesFree = size; |
if (fBufferPtr) { |
SkASSERT(fBlocks.count() > 1); |
BufferBlock& prev = fBlocks.fromBack(1); |
@@ -247,7 +288,7 @@ |
// threshold. |
bool attemptMap = block.fBuffer->isCPUBacked(); |
if (!attemptMap && GrDrawTargetCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags()) { |
- attemptMap = block.fBytesFree > GR_GEOM_BUFFER_MAP_THRESHOLD; |
+ attemptMap = size > GR_GEOM_BUFFER_MAP_THRESHOLD; |
} |
if (attemptMap) { |
@@ -255,7 +296,7 @@ |
} |
if (NULL == fBufferPtr) { |
- fBufferPtr = fCpuData.reset(block.fBytesFree); |
+ fBufferPtr = fCpuData.reset(size); |
} |
VALIDATE(true); |
@@ -267,7 +308,15 @@ |
SkASSERT(!fBlocks.empty()); |
BufferBlock& block = fBlocks.back(); |
- |
+ if (fPreallocBuffersInUse > 0) { |
+ uint32_t prevPreallocBuffer = (fPreallocBuffersInUse + |
+ fPreallocBufferStartIdx + |
+ (fPreallocBuffers.count() - 1)) % |
+ fPreallocBuffers.count(); |
+ if (block.fBuffer == fPreallocBuffers[prevPreallocBuffer]) { |
+ --fPreallocBuffersInUse; |
+ } |
+ } |
SkASSERT(!block.fBuffer->isMapped()); |
block.fBuffer->unref(); |
fBlocks.pop_back(); |
@@ -295,22 +344,24 @@ |
VALIDATE(true); |
} |
-GrGeometryBuffer* GrBufferAllocPool::getBuffer(size_t size) { |
- |
- GrResourceProvider* rp = fGpu->getContext()->resourceProvider(); |
- |
+GrGeometryBuffer* GrBufferAllocPool::createBuffer(size_t size) { |
if (kIndex_BufferType == fBufferType) { |
- return rp->getIndexBuffer(size, /* dynamic = */ true, /* duringFlush = */ true); |
+ return fGpu->createIndexBuffer(size, true); |
} else { |
SkASSERT(kVertex_BufferType == fBufferType); |
- return rp->getVertexBuffer(size, /* dynamic = */ true, /* duringFlush = */ true); |
+ return fGpu->createVertexBuffer(size, true); |
} |
} |
//////////////////////////////////////////////////////////////////////////////// |
-GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu) |
- : GrBufferAllocPool(gpu, kVertex_BufferType, MIN_VERTEX_BUFFER_SIZE) { |
+GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu, |
+ size_t bufferSize, |
+ int preallocBufferCnt) |
+ : GrBufferAllocPool(gpu, |
+ kVertex_BufferType, |
+ bufferSize, |
+ preallocBufferCnt) { |
} |
void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize, |
@@ -337,8 +388,13 @@ |
//////////////////////////////////////////////////////////////////////////////// |
-GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu) |
- : GrBufferAllocPool(gpu, kIndex_BufferType, MIN_INDEX_BUFFER_SIZE) { |
+GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu, |
+ size_t bufferSize, |
+ int preallocBufferCnt) |
+ : GrBufferAllocPool(gpu, |
+ kIndex_BufferType, |
+ bufferSize, |
+ preallocBufferCnt) { |
} |
void* GrIndexBufferAllocPool::makeSpace(int indexCount, |