| Index: src/gpu/GrBufferAllocPool.cpp
|
| diff --git a/src/gpu/GrBufferAllocPool.cpp b/src/gpu/GrBufferAllocPool.cpp
|
| index a197a6719f199a051c76ecc66a6b3f903b8e18bb..226f45196e4afdb002ac989c4940534ad9306317 100644
|
| --- a/src/gpu/GrBufferAllocPool.cpp
|
| +++ b/src/gpu/GrBufferAllocPool.cpp
|
| @@ -42,7 +42,7 @@ GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu,
|
| int preallocBufferCnt) :
|
| fBlocks(SkTMax(8, 2*preallocBufferCnt)) {
|
|
|
| - SkASSERT(NULL != gpu);
|
| + SkASSERT(gpu);
|
| fGpu = gpu;
|
| fGpu->ref();
|
| fGpuIsReffed = true;
|
| @@ -58,7 +58,7 @@ GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu,
|
| fPreallocBufferStartIdx = 0;
|
| for (int i = 0; i < preallocBufferCnt; ++i) {
|
| GrGeometryBuffer* buffer = this->createBuffer(fMinBlockSize);
|
| - if (NULL != buffer) {
|
| + if (buffer) {
|
| *fPreallocBuffers.append() = buffer;
|
| }
|
| }
|
| @@ -116,7 +116,7 @@ void GrBufferAllocPool::reset() {
|
| void GrBufferAllocPool::unmap() {
|
| VALIDATE();
|
|
|
| - if (NULL != fBufferPtr) {
|
| + if (fBufferPtr) {
|
| BufferBlock& block = fBlocks.back();
|
| if (block.fBuffer->isMapped()) {
|
| UNMAP_BUFFER(block);
|
| @@ -131,7 +131,7 @@ void GrBufferAllocPool::unmap() {
|
|
|
| #ifdef SK_DEBUG
|
| void GrBufferAllocPool::validate(bool unusedBlockAllowed) const {
|
| - if (NULL != fBufferPtr) {
|
| + if (fBufferPtr) {
|
| SkASSERT(!fBlocks.empty());
|
| if (fBlocks.back().fBuffer->isMapped()) {
|
| GrGeometryBuffer* buf = fBlocks.back().fBuffer;
|
| @@ -168,10 +168,10 @@ void* GrBufferAllocPool::makeSpace(size_t size,
|
| size_t* offset) {
|
| VALIDATE();
|
|
|
| - SkASSERT(NULL != buffer);
|
| - SkASSERT(NULL != offset);
|
| + SkASSERT(buffer);
|
| + SkASSERT(offset);
|
|
|
| - if (NULL != fBufferPtr) {
|
| + if (fBufferPtr) {
|
| BufferBlock& back = fBlocks.back();
|
| size_t usedBytes = back.fBuffer->gpuMemorySize() - back.fBytesFree;
|
| size_t pad = GrSizeAlignUpPad(usedBytes,
|
| @@ -198,7 +198,7 @@ void* GrBufferAllocPool::makeSpace(size_t size,
|
| if (!createBlock(size)) {
|
| return NULL;
|
| }
|
| - SkASSERT(NULL != fBufferPtr);
|
| + SkASSERT(fBufferPtr);
|
|
|
| *offset = 0;
|
| BufferBlock& back = fBlocks.back();
|
| @@ -211,7 +211,7 @@ void* GrBufferAllocPool::makeSpace(size_t size,
|
|
|
| int GrBufferAllocPool::currentBufferItems(size_t itemSize) const {
|
| VALIDATE();
|
| - if (NULL != fBufferPtr) {
|
| + if (fBufferPtr) {
|
| const BufferBlock& back = fBlocks.back();
|
| size_t usedBytes = back.fBuffer->gpuMemorySize() - back.fBytesFree;
|
| size_t pad = GrSizeAlignUpPad(usedBytes, itemSize);
|
| @@ -295,7 +295,7 @@ bool GrBufferAllocPool::createBlock(size_t requestSize) {
|
| }
|
|
|
| block.fBytesFree = size;
|
| - if (NULL != fBufferPtr) {
|
| + if (fBufferPtr) {
|
| SkASSERT(fBlocks.count() > 1);
|
| BufferBlock& prev = fBlocks.fromBack(1);
|
| if (prev.fBuffer->isMapped()) {
|
| @@ -356,7 +356,7 @@ void GrBufferAllocPool::destroyBlock() {
|
|
|
| void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize) {
|
| GrGeometryBuffer* buffer = block.fBuffer;
|
| - SkASSERT(NULL != buffer);
|
| + SkASSERT(buffer);
|
| SkASSERT(!buffer->isMapped());
|
| SkASSERT(fCpuData.get() == fBufferPtr);
|
| SkASSERT(flushSize <= buffer->gpuMemorySize());
|
| @@ -365,7 +365,7 @@ void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize)
|
| if (GrDrawTargetCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() &&
|
| flushSize > GR_GEOM_BUFFER_MAP_THRESHOLD) {
|
| void* data = buffer->map();
|
| - if (NULL != data) {
|
| + if (data) {
|
| memcpy(data, fBufferPtr, flushSize);
|
| UNMAP_BUFFER(block);
|
| return;
|
| @@ -403,8 +403,8 @@ void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize,
|
| int* startVertex) {
|
|
|
| SkASSERT(vertexCount >= 0);
|
| - SkASSERT(NULL != buffer);
|
| - SkASSERT(NULL != startVertex);
|
| + SkASSERT(buffer);
|
| + SkASSERT(startVertex);
|
|
|
| size_t offset = 0; // assign to suppress warning
|
| const GrGeometryBuffer* geomBuffer = NULL; // assign to suppress warning
|
| @@ -425,7 +425,7 @@ bool GrVertexBufferAllocPool::appendVertices(size_t vertexSize,
|
| const GrVertexBuffer** buffer,
|
| int* startVertex) {
|
| void* space = makeSpace(vertexSize, vertexCount, buffer, startVertex);
|
| - if (NULL != space) {
|
| + if (space) {
|
| memcpy(space,
|
| vertices,
|
| vertexSize * vertexCount);
|
| @@ -461,8 +461,8 @@ void* GrIndexBufferAllocPool::makeSpace(int indexCount,
|
| int* startIndex) {
|
|
|
| SkASSERT(indexCount >= 0);
|
| - SkASSERT(NULL != buffer);
|
| - SkASSERT(NULL != startIndex);
|
| + SkASSERT(buffer);
|
| + SkASSERT(startIndex);
|
|
|
| size_t offset = 0; // assign to suppress warning
|
| const GrGeometryBuffer* geomBuffer = NULL; // assign to suppress warning
|
| @@ -482,7 +482,7 @@ bool GrIndexBufferAllocPool::appendIndices(int indexCount,
|
| const GrIndexBuffer** buffer,
|
| int* startIndex) {
|
| void* space = makeSpace(indexCount, buffer, startIndex);
|
| - if (NULL != space) {
|
| + if (space) {
|
| memcpy(space, indices, sizeof(uint16_t) * indexCount);
|
| return true;
|
| } else {
|
|
|