Chromium Code Reviews| Index: src/gpu/GrBufferAllocPool.cpp |
| diff --git a/src/gpu/GrBufferAllocPool.cpp b/src/gpu/GrBufferAllocPool.cpp |
| index 878d537d45833ddeb82e57e7daad9637771e4454..5bfe77bad43f94dc87e361e23317da95f3673856 100644 |
| --- a/src/gpu/GrBufferAllocPool.cpp |
| +++ b/src/gpu/GrBufferAllocPool.cpp |
| @@ -11,6 +11,7 @@ |
| #include "GrDrawTargetCaps.h" |
| #include "GrGpu.h" |
| #include "GrIndexBuffer.h" |
| +#include "GrResourceProvider.h" |
| #include "GrTypes.h" |
| #include "GrVertexBuffer.h" |
| @@ -22,6 +23,9 @@ |
| static void VALIDATE(bool = false) {} |
| #endif |
| +static const size_t MIN_VERTEX_BUFFER_SIZE = 1 << 15; |
| +static const size_t MIN_INDEX_BUFFER_SIZE = 1 << 12; |
| + |
| // page size |
| #define GrBufferAllocPool_MIN_BLOCK_SIZE ((size_t)1 << 12) |
| @@ -37,9 +41,8 @@ do { |
| GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu, |
| BufferType bufferType, |
| - size_t blockSize, |
| - int preallocBufferCnt) |
| - : fBlocks(SkTMax(8, 2*preallocBufferCnt)) { |
| + size_t blockSize) |
| + : fBlocks(8) { |
| fGpu = SkRef(gpu); |
| @@ -48,19 +51,9 @@ GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu, |
| fMinBlockSize = SkTMax(GrBufferAllocPool_MIN_BLOCK_SIZE, blockSize); |
| fBytesInUse = 0; |
| - |
| - fPreallocBuffersInUse = 0; |
| - fPreallocBufferStartIdx = 0; |
| - for (int i = 0; i < preallocBufferCnt; ++i) { |
| - GrGeometryBuffer* buffer = this->createBuffer(fMinBlockSize); |
| - if (buffer) { |
| - *fPreallocBuffers.append() = buffer; |
| - } |
| - } |
| } |
| -GrBufferAllocPool::~GrBufferAllocPool() { |
| - VALIDATE(); |
| +void GrBufferAllocPool::deleteBlocks() { |
| if (fBlocks.count()) { |
| GrGeometryBuffer* buffer = fBlocks.back().fBuffer; |
| if (buffer->isMapped()) { |
| @@ -70,34 +63,22 @@ GrBufferAllocPool::~GrBufferAllocPool() { |
| while (!fBlocks.empty()) { |
| this->destroyBlock(); |
| } |
| - fPreallocBuffers.unrefAll(); |
| + SkASSERT(!fBufferPtr); |
| +} |
| + |
| +GrBufferAllocPool::~GrBufferAllocPool() { |
| + VALIDATE(); |
| + this->deleteBlocks(); |
| fGpu->unref(); |
| } |
| void GrBufferAllocPool::reset() { |
| VALIDATE(); |
| fBytesInUse = 0; |
| - if (fBlocks.count()) { |
| - GrGeometryBuffer* buffer = fBlocks.back().fBuffer; |
| - if (buffer->isMapped()) { |
| - UNMAP_BUFFER(fBlocks.back()); |
| - } |
| - } |
| - // fPreallocBuffersInUse will be decremented down to zero in the while loop |
| - int preallocBuffersInUse = fPreallocBuffersInUse; |
| - while (!fBlocks.empty()) { |
| - this->destroyBlock(); |
| - } |
| - if (fPreallocBuffers.count()) { |
| - // must set this after above loop. |
| - fPreallocBufferStartIdx = (fPreallocBufferStartIdx + |
| - preallocBuffersInUse) % |
| - fPreallocBuffers.count(); |
| - } |
| + this->deleteBlocks(); |
| // we may have created a large cpu mirror of a large VB. Reset the size |
| - // to match our pre-allocated VBs. |
| + // to match our minimum. |
| fCpuData.reset(fMinBlockSize); |
| - SkASSERT(0 == fPreallocBuffersInUse); |
| VALIDATE(); |
| } |
| @@ -169,8 +150,7 @@ void* GrBufferAllocPool::makeSpace(size_t size, |
| if (fBufferPtr) { |
| BufferBlock& back = fBlocks.back(); |
| size_t usedBytes = back.fBuffer->gpuMemorySize() - back.fBytesFree; |
| - size_t pad = GrSizeAlignUpPad(usedBytes, |
| - alignment); |
| + size_t pad = GrSizeAlignUpPad(usedBytes, alignment); |
| if ((size + pad) <= back.fBytesFree) { |
| memset((void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes), 0, pad); |
| usedBytes += pad; |
| @@ -208,12 +188,6 @@ void* GrBufferAllocPool::makeSpace(size_t size, |
| void GrBufferAllocPool::putBack(size_t bytes) { |
| VALIDATE(); |
| - // if the putBack unwinds all the preallocated buffers then we will |
| - // advance the starting index. As blocks are destroyed fPreallocBuffersInUse |
| - // will be decremented. I will reach zero if all blocks using preallocated |
| - // buffers are released. |
| - int preallocBuffersInUse = fPreallocBuffersInUse; |
| - |
| while (bytes) { |
| // caller shouldn't try to put back more than they've taken |
| SkASSERT(!fBlocks.empty()); |
| @@ -235,11 +209,7 @@ void GrBufferAllocPool::putBack(size_t bytes) { |
| break; |
| } |
| } |
| - if (!fPreallocBuffersInUse && fPreallocBuffers.count()) { |
| - fPreallocBufferStartIdx = (fPreallocBufferStartIdx + |
| - preallocBuffersInUse) % |
| - fPreallocBuffers.count(); |
| - } |
| + |
| VALIDATE(); |
| } |
| @@ -252,24 +222,13 @@ bool GrBufferAllocPool::createBlock(size_t requestSize) { |
| BufferBlock& block = fBlocks.push_back(); |
| - if (size == fMinBlockSize && |
| - fPreallocBuffersInUse < fPreallocBuffers.count()) { |
| - |
| - uint32_t nextBuffer = (fPreallocBuffersInUse + |
| - fPreallocBufferStartIdx) % |
| - fPreallocBuffers.count(); |
| - block.fBuffer = fPreallocBuffers[nextBuffer]; |
| - block.fBuffer->ref(); |
| - ++fPreallocBuffersInUse; |
| - } else { |
| - block.fBuffer = this->createBuffer(size); |
| - if (NULL == block.fBuffer) { |
| - fBlocks.pop_back(); |
| - return false; |
| - } |
| + block.fBuffer = this->getBuffer(size); |
| + if (NULL == block.fBuffer) { |
| + fBlocks.pop_back(); |
| + return false; |
| } |
| - block.fBytesFree = size; |
| + block.fBytesFree = block.fBuffer->gpuMemorySize(); |
| if (fBufferPtr) { |
| SkASSERT(fBlocks.count() > 1); |
| BufferBlock& prev = fBlocks.fromBack(1); |
| @@ -288,7 +247,7 @@ bool GrBufferAllocPool::createBlock(size_t requestSize) { |
| // threshold. |
| bool attemptMap = block.fBuffer->isCPUBacked(); |
| if (!attemptMap && GrDrawTargetCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags()) { |
| - attemptMap = size > GR_GEOM_BUFFER_MAP_THRESHOLD; |
| + attemptMap = block.fBytesFree > GR_GEOM_BUFFER_MAP_THRESHOLD; |
| } |
| if (attemptMap) { |
| @@ -296,7 +255,7 @@ bool GrBufferAllocPool::createBlock(size_t requestSize) { |
| } |
| if (NULL == fBufferPtr) { |
| - fBufferPtr = fCpuData.reset(size); |
| + fBufferPtr = fCpuData.reset(block.fBytesFree); |
| } |
| VALIDATE(true); |
| @@ -308,15 +267,7 @@ void GrBufferAllocPool::destroyBlock() { |
| SkASSERT(!fBlocks.empty()); |
| BufferBlock& block = fBlocks.back(); |
| - if (fPreallocBuffersInUse > 0) { |
| - uint32_t prevPreallocBuffer = (fPreallocBuffersInUse + |
| - fPreallocBufferStartIdx + |
| - (fPreallocBuffers.count() - 1)) % |
| - fPreallocBuffers.count(); |
| - if (block.fBuffer == fPreallocBuffers[prevPreallocBuffer]) { |
| - --fPreallocBuffersInUse; |
| - } |
| - } |
| + |
| SkASSERT(!block.fBuffer->isMapped()); |
| block.fBuffer->unref(); |
| fBlocks.pop_back(); |
| @@ -344,24 +295,22 @@ void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize) |
| VALIDATE(true); |
| } |
| -GrGeometryBuffer* GrBufferAllocPool::createBuffer(size_t size) { |
| +GrGeometryBuffer* GrBufferAllocPool::getBuffer(size_t size) { |
| + |
| + GrResourceProvider* rp = fGpu->getContext()->resourceProvider(); |
|
bsalomon
2015/05/13 16:24:07
Can we get GrGpu off this guy?
robertphillips
2015/05/13 16:56:14
Unfortunately, GrBufferAllocPool needs access to t
|
| + |
| if (kIndex_BufferType == fBufferType) { |
| - return fGpu->createIndexBuffer(size, true); |
| + return rp->getIndexBuffer(size, /* dynamic = */ true, /* duringFlush = */ true); |
| } else { |
| SkASSERT(kVertex_BufferType == fBufferType); |
| - return fGpu->createVertexBuffer(size, true); |
| + return rp->getVertBuffer(size, /* dynamic = */ true, /* duringFlush = */ true); |
| } |
| } |
| //////////////////////////////////////////////////////////////////////////////// |
| -GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu, |
| - size_t bufferSize, |
| - int preallocBufferCnt) |
| - : GrBufferAllocPool(gpu, |
| - kVertex_BufferType, |
| - bufferSize, |
| - preallocBufferCnt) { |
| +GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu) |
| + : GrBufferAllocPool(gpu, kVertex_BufferType, MIN_VERTEX_BUFFER_SIZE) { |
| } |
| void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize, |
| @@ -388,13 +337,8 @@ void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize, |
| //////////////////////////////////////////////////////////////////////////////// |
| -GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu, |
| - size_t bufferSize, |
| - int preallocBufferCnt) |
| - : GrBufferAllocPool(gpu, |
| - kIndex_BufferType, |
| - bufferSize, |
| - preallocBufferCnt) { |
| +GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu) |
| + : GrBufferAllocPool(gpu, kIndex_BufferType, MIN_INDEX_BUFFER_SIZE) { |
| } |
| void* GrIndexBufferAllocPool::makeSpace(int indexCount, |