| OLD | NEW |
| 1 | 1 |
| 2 /* | 2 /* |
| 3 * Copyright 2010 Google Inc. | 3 * Copyright 2010 Google Inc. |
| 4 * | 4 * |
| 5 * Use of this source code is governed by a BSD-style license that can be | 5 * Use of this source code is governed by a BSD-style license that can be |
| 6 * found in the LICENSE file. | 6 * found in the LICENSE file. |
| 7 */ | 7 */ |
| 8 | 8 |
| 9 | 9 |
| 10 #include "GrBufferAllocPool.h" | 10 #include "GrBufferAllocPool.h" |
| 11 #include "GrDrawTargetCaps.h" | 11 #include "GrDrawTargetCaps.h" |
| 12 #include "GrGpu.h" | 12 #include "GrGpu.h" |
| 13 #include "GrIndexBuffer.h" | 13 #include "GrIndexBuffer.h" |
| 14 #include "GrTypes.h" | 14 #include "GrTypes.h" |
| 15 #include "GrVertexBuffer.h" | 15 #include "GrVertexBuffer.h" |
| 16 | 16 |
| 17 #include "SkTraceEvent.h" |
| 18 |
| 17 #ifdef SK_DEBUG | 19 #ifdef SK_DEBUG |
| 18 #define VALIDATE validate | 20 #define VALIDATE validate |
| 19 #else | 21 #else |
| 20 static void VALIDATE(bool = false) {} | 22 static void VALIDATE(bool = false) {} |
| 21 #endif | 23 #endif |
| 22 | 24 |
| 23 // page size | 25 // page size |
| 24 #define GrBufferAllocPool_MIN_BLOCK_SIZE ((size_t)1 << 12) | 26 #define GrBufferAllocPool_MIN_BLOCK_SIZE ((size_t)1 << 12) |
| 25 | 27 |
| 28 #define UNMAP_BUFFER(block)
\ |
| 29 do {
\ |
| 30 TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("skia.gpu"),
\ |
| 31 "GrBufferAllocPool Unmapping Buffer",
\ |
| 32 TRACE_EVENT_SCOPE_THREAD,
\ |
| 33 "percent_unwritten",
\ |
| 34 (float)((block).fBytesFree) / (block).fBuffer->gpuMemor
ySize()); \ |
| 35 (block).fBuffer->unmap();
\ |
| 36 } while (false) |
| 37 |
| 26 GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu, | 38 GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu, |
| 27 BufferType bufferType, | 39 BufferType bufferType, |
| 28 bool frequentResetHint, | 40 bool frequentResetHint, |
| 29 size_t blockSize, | 41 size_t blockSize, |
| 30 int preallocBufferCnt) : | 42 int preallocBufferCnt) : |
| 31 fBlocks(SkTMax(8, 2*preallocBufferCnt)) { | 43 fBlocks(SkTMax(8, 2*preallocBufferCnt)) { |
| 32 | 44 |
| 33 SkASSERT(NULL != gpu); | 45 SkASSERT(NULL != gpu); |
| 34 fGpu = gpu; | 46 fGpu = gpu; |
| 35 fGpu->ref(); | 47 fGpu->ref(); |
| (...skipping 14 matching lines...) Expand all Loading... |
| 50 *fPreallocBuffers.append() = buffer; | 62 *fPreallocBuffers.append() = buffer; |
| 51 } | 63 } |
| 52 } | 64 } |
| 53 } | 65 } |
| 54 | 66 |
| 55 GrBufferAllocPool::~GrBufferAllocPool() { | 67 GrBufferAllocPool::~GrBufferAllocPool() { |
| 56 VALIDATE(); | 68 VALIDATE(); |
| 57 if (fBlocks.count()) { | 69 if (fBlocks.count()) { |
| 58 GrGeometryBuffer* buffer = fBlocks.back().fBuffer; | 70 GrGeometryBuffer* buffer = fBlocks.back().fBuffer; |
| 59 if (buffer->isMapped()) { | 71 if (buffer->isMapped()) { |
| 60 buffer->unmap(); | 72 UNMAP_BUFFER(fBlocks.back()); |
| 61 } | 73 } |
| 62 } | 74 } |
| 63 while (!fBlocks.empty()) { | 75 while (!fBlocks.empty()) { |
| 64 destroyBlock(); | 76 destroyBlock(); |
| 65 } | 77 } |
| 66 fPreallocBuffers.unrefAll(); | 78 fPreallocBuffers.unrefAll(); |
| 67 releaseGpuRef(); | 79 releaseGpuRef(); |
| 68 } | 80 } |
| 69 | 81 |
| 70 void GrBufferAllocPool::releaseGpuRef() { | 82 void GrBufferAllocPool::releaseGpuRef() { |
| 71 if (fGpuIsReffed) { | 83 if (fGpuIsReffed) { |
| 72 fGpu->unref(); | 84 fGpu->unref(); |
| 73 fGpuIsReffed = false; | 85 fGpuIsReffed = false; |
| 74 } | 86 } |
| 75 } | 87 } |
| 76 | 88 |
| 77 void GrBufferAllocPool::reset() { | 89 void GrBufferAllocPool::reset() { |
| 78 VALIDATE(); | 90 VALIDATE(); |
| 79 fBytesInUse = 0; | 91 fBytesInUse = 0; |
| 80 if (fBlocks.count()) { | 92 if (fBlocks.count()) { |
| 81 GrGeometryBuffer* buffer = fBlocks.back().fBuffer; | 93 GrGeometryBuffer* buffer = fBlocks.back().fBuffer; |
| 82 if (buffer->isMapped()) { | 94 if (buffer->isMapped()) { |
| 83 buffer->unmap(); | 95 UNMAP_BUFFER(fBlocks.back()); |
| 84 } | 96 } |
| 85 } | 97 } |
| 86 // fPreallocBuffersInUse will be decremented down to zero in the while loop | 98 // fPreallocBuffersInUse will be decremented down to zero in the while loop |
| 87 int preallocBuffersInUse = fPreallocBuffersInUse; | 99 int preallocBuffersInUse = fPreallocBuffersInUse; |
| 88 while (!fBlocks.empty()) { | 100 while (!fBlocks.empty()) { |
| 89 this->destroyBlock(); | 101 this->destroyBlock(); |
| 90 } | 102 } |
| 91 if (fPreallocBuffers.count()) { | 103 if (fPreallocBuffers.count()) { |
| 92 // must set this after above loop. | 104 // must set this after above loop. |
| 93 fPreallocBufferStartIdx = (fPreallocBufferStartIdx + | 105 fPreallocBufferStartIdx = (fPreallocBufferStartIdx + |
| 94 preallocBuffersInUse) % | 106 preallocBuffersInUse) % |
| 95 fPreallocBuffers.count(); | 107 fPreallocBuffers.count(); |
| 96 } | 108 } |
| 97 // we may have created a large cpu mirror of a large VB. Reset the size | 109 // we may have created a large cpu mirror of a large VB. Reset the size |
| 98 // to match our pre-allocated VBs. | 110 // to match our pre-allocated VBs. |
| 99 fCpuData.reset(fMinBlockSize); | 111 fCpuData.reset(fMinBlockSize); |
| 100 SkASSERT(0 == fPreallocBuffersInUse); | 112 SkASSERT(0 == fPreallocBuffersInUse); |
| 101 VALIDATE(); | 113 VALIDATE(); |
| 102 } | 114 } |
| 103 | 115 |
| 104 void GrBufferAllocPool::unmap() { | 116 void GrBufferAllocPool::unmap() { |
| 105 VALIDATE(); | 117 VALIDATE(); |
| 106 | 118 |
| 107 if (NULL != fBufferPtr) { | 119 if (NULL != fBufferPtr) { |
| 108 BufferBlock& block = fBlocks.back(); | 120 BufferBlock& block = fBlocks.back(); |
| 109 if (block.fBuffer->isMapped()) { | 121 if (block.fBuffer->isMapped()) { |
| 110 block.fBuffer->unmap(); | 122 UNMAP_BUFFER(block); |
| 111 } else { | 123 } else { |
| 112 size_t flushSize = block.fBuffer->gpuMemorySize() - block.fBytesFree
; | 124 size_t flushSize = block.fBuffer->gpuMemorySize() - block.fBytesFree
; |
| 113 this->flushCpuData(fBlocks.back().fBuffer, flushSize); | 125 this->flushCpuData(fBlocks.back(), flushSize); |
| 114 } | 126 } |
| 115 fBufferPtr = NULL; | 127 fBufferPtr = NULL; |
| 116 } | 128 } |
| 117 VALIDATE(); | 129 VALIDATE(); |
| 118 } | 130 } |
| 119 | 131 |
| 120 #ifdef SK_DEBUG | 132 #ifdef SK_DEBUG |
| 121 void GrBufferAllocPool::validate(bool unusedBlockAllowed) const { | 133 void GrBufferAllocPool::validate(bool unusedBlockAllowed) const { |
| 122 if (NULL != fBufferPtr) { | 134 if (NULL != fBufferPtr) { |
| 123 SkASSERT(!fBlocks.empty()); | 135 SkASSERT(!fBlocks.empty()); |
| (...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 231 // caller shouldnt try to put back more than they've taken | 243 // caller shouldnt try to put back more than they've taken |
| 232 SkASSERT(!fBlocks.empty()); | 244 SkASSERT(!fBlocks.empty()); |
| 233 BufferBlock& block = fBlocks.back(); | 245 BufferBlock& block = fBlocks.back(); |
| 234 size_t bytesUsed = block.fBuffer->gpuMemorySize() - block.fBytesFree; | 246 size_t bytesUsed = block.fBuffer->gpuMemorySize() - block.fBytesFree; |
| 235 if (bytes >= bytesUsed) { | 247 if (bytes >= bytesUsed) { |
| 236 bytes -= bytesUsed; | 248 bytes -= bytesUsed; |
| 237 fBytesInUse -= bytesUsed; | 249 fBytesInUse -= bytesUsed; |
| 238 // if we locked a vb to satisfy the make space and we're releasing | 250 // if we locked a vb to satisfy the make space and we're releasing |
| 239 // beyond it, then unmap it. | 251 // beyond it, then unmap it. |
| 240 if (block.fBuffer->isMapped()) { | 252 if (block.fBuffer->isMapped()) { |
| 241 block.fBuffer->unmap(); | 253 UNMAP_BUFFER(block); |
| 242 } | 254 } |
| 243 this->destroyBlock(); | 255 this->destroyBlock(); |
| 244 } else { | 256 } else { |
| 245 block.fBytesFree += bytes; | 257 block.fBytesFree += bytes; |
| 246 fBytesInUse -= bytes; | 258 fBytesInUse -= bytes; |
| 247 bytes = 0; | 259 bytes = 0; |
| 248 break; | 260 break; |
| 249 } | 261 } |
| 250 } | 262 } |
| 251 if (!fPreallocBuffersInUse && fPreallocBuffers.count()) { | 263 if (!fPreallocBuffersInUse && fPreallocBuffers.count()) { |
| (...skipping 28 matching lines...) Expand all Loading... |
| 280 fBlocks.pop_back(); | 292 fBlocks.pop_back(); |
| 281 return false; | 293 return false; |
| 282 } | 294 } |
| 283 } | 295 } |
| 284 | 296 |
| 285 block.fBytesFree = size; | 297 block.fBytesFree = size; |
| 286 if (NULL != fBufferPtr) { | 298 if (NULL != fBufferPtr) { |
| 287 SkASSERT(fBlocks.count() > 1); | 299 SkASSERT(fBlocks.count() > 1); |
| 288 BufferBlock& prev = fBlocks.fromBack(1); | 300 BufferBlock& prev = fBlocks.fromBack(1); |
| 289 if (prev.fBuffer->isMapped()) { | 301 if (prev.fBuffer->isMapped()) { |
| 290 prev.fBuffer->unmap(); | 302 UNMAP_BUFFER(prev); |
| 291 } else { | 303 } else { |
| 292 flushCpuData(prev.fBuffer, | 304 this->flushCpuData(prev, prev.fBuffer->gpuMemorySize() - prev.fBytes
Free); |
| 293 prev.fBuffer->gpuMemorySize() - prev.fBytesFree); | |
| 294 } | 305 } |
| 295 fBufferPtr = NULL; | 306 fBufferPtr = NULL; |
| 296 } | 307 } |
| 297 | 308 |
| 298 SkASSERT(NULL == fBufferPtr); | 309 SkASSERT(NULL == fBufferPtr); |
| 299 | 310 |
| 300 // If the buffer is CPU-backed we map it because it is free to do so and sav
es a copy. | 311 // If the buffer is CPU-backed we map it because it is free to do so and sav
es a copy. |
| 301 // Otherwise when buffer mapping is supported: | 312 // Otherwise when buffer mapping is supported: |
| 302 // a) If the frequently reset hint is set we only map when the requeste
d size meets a | 313 // a) If the frequently reset hint is set we only map when the requeste
d size meets a |
| 303 // threshold (since we don't expect it is likely that we will see more
vertex data) | 314 // threshold (since we don't expect it is likely that we will see more
vertex data) |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 336 if (block.fBuffer == fPreallocBuffers[prevPreallocBuffer]) { | 347 if (block.fBuffer == fPreallocBuffers[prevPreallocBuffer]) { |
| 337 --fPreallocBuffersInUse; | 348 --fPreallocBuffersInUse; |
| 338 } | 349 } |
| 339 } | 350 } |
| 340 SkASSERT(!block.fBuffer->isMapped()); | 351 SkASSERT(!block.fBuffer->isMapped()); |
| 341 block.fBuffer->unref(); | 352 block.fBuffer->unref(); |
| 342 fBlocks.pop_back(); | 353 fBlocks.pop_back(); |
| 343 fBufferPtr = NULL; | 354 fBufferPtr = NULL; |
| 344 } | 355 } |
| 345 | 356 |
| 346 void GrBufferAllocPool::flushCpuData(GrGeometryBuffer* buffer, | 357 void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize)
{ |
| 347 size_t flushSize) { | 358 GrGeometryBuffer* buffer = block.fBuffer; |
| 348 SkASSERT(NULL != buffer); | 359 SkASSERT(NULL != buffer); |
| 349 SkASSERT(!buffer->isMapped()); | 360 SkASSERT(!buffer->isMapped()); |
| 350 SkASSERT(fCpuData.get() == fBufferPtr); | 361 SkASSERT(fCpuData.get() == fBufferPtr); |
| 351 SkASSERT(flushSize <= buffer->gpuMemorySize()); | 362 SkASSERT(flushSize <= buffer->gpuMemorySize()); |
| 352 VALIDATE(true); | 363 VALIDATE(true); |
| 353 | 364 |
| 354 if (GrDrawTargetCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() && | 365 if (GrDrawTargetCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() && |
| 355 flushSize > GR_GEOM_BUFFER_MAP_THRESHOLD) { | 366 flushSize > GR_GEOM_BUFFER_MAP_THRESHOLD) { |
| 356 void* data = buffer->map(); | 367 void* data = buffer->map(); |
| 357 if (NULL != data) { | 368 if (NULL != data) { |
| 358 memcpy(data, fBufferPtr, flushSize); | 369 memcpy(data, fBufferPtr, flushSize); |
| 359 buffer->unmap(); | 370 UNMAP_BUFFER(block); |
| 360 return; | 371 return; |
| 361 } | 372 } |
| 362 } | 373 } |
| 363 buffer->updateData(fBufferPtr, flushSize); | 374 buffer->updateData(fBufferPtr, flushSize); |
| 364 VALIDATE(true); | 375 VALIDATE(true); |
| 365 } | 376 } |
| 366 | 377 |
| 367 GrGeometryBuffer* GrBufferAllocPool::createBuffer(size_t size) { | 378 GrGeometryBuffer* GrBufferAllocPool::createBuffer(size_t size) { |
| 368 if (kIndex_BufferType == fBufferType) { | 379 if (kIndex_BufferType == fBufferType) { |
| 369 return fGpu->createIndexBuffer(size, true); | 380 return fGpu->createIndexBuffer(size, true); |
| (...skipping 109 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 479 } | 490 } |
| 480 } | 491 } |
| 481 | 492 |
| 482 int GrIndexBufferAllocPool::preallocatedBufferIndices() const { | 493 int GrIndexBufferAllocPool::preallocatedBufferIndices() const { |
| 483 return static_cast<int>(INHERITED::preallocatedBufferSize() / sizeof(uint16_
t)); | 494 return static_cast<int>(INHERITED::preallocatedBufferSize() / sizeof(uint16_
t)); |
| 484 } | 495 } |
| 485 | 496 |
| 486 int GrIndexBufferAllocPool::currentBufferIndices() const { | 497 int GrIndexBufferAllocPool::currentBufferIndices() const { |
| 487 return currentBufferItems(sizeof(uint16_t)); | 498 return currentBufferItems(sizeof(uint16_t)); |
| 488 } | 499 } |
| OLD | NEW |