| OLD | NEW |
| 1 | 1 |
| 2 /* | 2 /* |
| 3 * Copyright 2010 Google Inc. | 3 * Copyright 2010 Google Inc. |
| 4 * | 4 * |
| 5 * Use of this source code is governed by a BSD-style license that can be | 5 * Use of this source code is governed by a BSD-style license that can be |
| 6 * found in the LICENSE file. | 6 * found in the LICENSE file. |
| 7 */ | 7 */ |
| 8 | 8 |
| 9 | 9 |
| 10 #include "GrBufferAllocPool.h" | 10 #include "GrBufferAllocPool.h" |
| (...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 49 if (NULL != buffer) { | 49 if (NULL != buffer) { |
| 50 *fPreallocBuffers.append() = buffer; | 50 *fPreallocBuffers.append() = buffer; |
| 51 } | 51 } |
| 52 } | 52 } |
| 53 } | 53 } |
| 54 | 54 |
| 55 GrBufferAllocPool::~GrBufferAllocPool() { | 55 GrBufferAllocPool::~GrBufferAllocPool() { |
| 56 VALIDATE(); | 56 VALIDATE(); |
| 57 if (fBlocks.count()) { | 57 if (fBlocks.count()) { |
| 58 GrGeometryBuffer* buffer = fBlocks.back().fBuffer; | 58 GrGeometryBuffer* buffer = fBlocks.back().fBuffer; |
| 59 if (buffer->isLocked()) { | 59 if (buffer->isMapped()) { |
| 60 buffer->unlock(); | 60 buffer->unmap(); |
| 61 } | 61 } |
| 62 } | 62 } |
| 63 while (!fBlocks.empty()) { | 63 while (!fBlocks.empty()) { |
| 64 destroyBlock(); | 64 destroyBlock(); |
| 65 } | 65 } |
| 66 fPreallocBuffers.unrefAll(); | 66 fPreallocBuffers.unrefAll(); |
| 67 releaseGpuRef(); | 67 releaseGpuRef(); |
| 68 } | 68 } |
| 69 | 69 |
| 70 void GrBufferAllocPool::releaseGpuRef() { | 70 void GrBufferAllocPool::releaseGpuRef() { |
| 71 if (fGpuIsReffed) { | 71 if (fGpuIsReffed) { |
| 72 fGpu->unref(); | 72 fGpu->unref(); |
| 73 fGpuIsReffed = false; | 73 fGpuIsReffed = false; |
| 74 } | 74 } |
| 75 } | 75 } |
| 76 | 76 |
| 77 void GrBufferAllocPool::reset() { | 77 void GrBufferAllocPool::reset() { |
| 78 VALIDATE(); | 78 VALIDATE(); |
| 79 fBytesInUse = 0; | 79 fBytesInUse = 0; |
| 80 if (fBlocks.count()) { | 80 if (fBlocks.count()) { |
| 81 GrGeometryBuffer* buffer = fBlocks.back().fBuffer; | 81 GrGeometryBuffer* buffer = fBlocks.back().fBuffer; |
| 82 if (buffer->isLocked()) { | 82 if (buffer->isMapped()) { |
| 83 buffer->unlock(); | 83 buffer->unmap(); |
| 84 } | 84 } |
| 85 } | 85 } |
| 86 // fPreallocBuffersInUse will be decremented down to zero in the while loop | 86 // fPreallocBuffersInUse will be decremented down to zero in the while loop |
| 87 int preallocBuffersInUse = fPreallocBuffersInUse; | 87 int preallocBuffersInUse = fPreallocBuffersInUse; |
| 88 while (!fBlocks.empty()) { | 88 while (!fBlocks.empty()) { |
| 89 this->destroyBlock(); | 89 this->destroyBlock(); |
| 90 } | 90 } |
| 91 if (fPreallocBuffers.count()) { | 91 if (fPreallocBuffers.count()) { |
| 92 // must set this after above loop. | 92 // must set this after above loop. |
| 93 fPreallocBufferStartIdx = (fPreallocBufferStartIdx + | 93 fPreallocBufferStartIdx = (fPreallocBufferStartIdx + |
| 94 preallocBuffersInUse) % | 94 preallocBuffersInUse) % |
| 95 fPreallocBuffers.count(); | 95 fPreallocBuffers.count(); |
| 96 } | 96 } |
| 97 // we may have created a large cpu mirror of a large VB. Reset the size | 97 // we may have created a large cpu mirror of a large VB. Reset the size |
| 98 // to match our pre-allocated VBs. | 98 // to match our pre-allocated VBs. |
| 99 fCpuData.reset(fMinBlockSize); | 99 fCpuData.reset(fMinBlockSize); |
| 100 SkASSERT(0 == fPreallocBuffersInUse); | 100 SkASSERT(0 == fPreallocBuffersInUse); |
| 101 VALIDATE(); | 101 VALIDATE(); |
| 102 } | 102 } |
| 103 | 103 |
| 104 void GrBufferAllocPool::unlock() { | 104 void GrBufferAllocPool::unmap() { |
| 105 VALIDATE(); | 105 VALIDATE(); |
| 106 | 106 |
| 107 if (NULL != fBufferPtr) { | 107 if (NULL != fBufferPtr) { |
| 108 BufferBlock& block = fBlocks.back(); | 108 BufferBlock& block = fBlocks.back(); |
| 109 if (block.fBuffer->isLocked()) { | 109 if (block.fBuffer->isMapped()) { |
| 110 block.fBuffer->unlock(); | 110 block.fBuffer->unmap(); |
| 111 } else { | 111 } else { |
| 112 size_t flushSize = block.fBuffer->gpuMemorySize() - block.fBytesFree
; | 112 size_t flushSize = block.fBuffer->gpuMemorySize() - block.fBytesFree
; |
| 113 flushCpuData(fBlocks.back().fBuffer, flushSize); | 113 this->flushCpuData(fBlocks.back().fBuffer, flushSize); |
| 114 } | 114 } |
| 115 fBufferPtr = NULL; | 115 fBufferPtr = NULL; |
| 116 } | 116 } |
| 117 VALIDATE(); | 117 VALIDATE(); |
| 118 } | 118 } |
| 119 | 119 |
| 120 #ifdef SK_DEBUG | 120 #ifdef SK_DEBUG |
| 121 void GrBufferAllocPool::validate(bool unusedBlockAllowed) const { | 121 void GrBufferAllocPool::validate(bool unusedBlockAllowed) const { |
| 122 if (NULL != fBufferPtr) { | 122 if (NULL != fBufferPtr) { |
| 123 SkASSERT(!fBlocks.empty()); | 123 SkASSERT(!fBlocks.empty()); |
| 124 if (fBlocks.back().fBuffer->isLocked()) { | 124 if (fBlocks.back().fBuffer->isMapped()) { |
| 125 GrGeometryBuffer* buf = fBlocks.back().fBuffer; | 125 GrGeometryBuffer* buf = fBlocks.back().fBuffer; |
| 126 SkASSERT(buf->lockPtr() == fBufferPtr); | 126 SkASSERT(buf->mapPtr() == fBufferPtr); |
| 127 } else { | 127 } else { |
| 128 SkASSERT(fCpuData.get() == fBufferPtr); | 128 SkASSERT(fCpuData.get() == fBufferPtr); |
| 129 } | 129 } |
| 130 } else { | 130 } else { |
| 131 SkASSERT(fBlocks.empty() || !fBlocks.back().fBuffer->isLocked()); | 131 SkASSERT(fBlocks.empty() || !fBlocks.back().fBuffer->isMapped()); |
| 132 } | 132 } |
| 133 size_t bytesInUse = 0; | 133 size_t bytesInUse = 0; |
| 134 for (int i = 0; i < fBlocks.count() - 1; ++i) { | 134 for (int i = 0; i < fBlocks.count() - 1; ++i) { |
| 135 SkASSERT(!fBlocks[i].fBuffer->isLocked()); | 135 SkASSERT(!fBlocks[i].fBuffer->isMapped()); |
| 136 } | 136 } |
| 137 for (int i = 0; i < fBlocks.count(); ++i) { | 137 for (int i = 0; i < fBlocks.count(); ++i) { |
| 138 size_t bytes = fBlocks[i].fBuffer->gpuMemorySize() - fBlocks[i].fBytesFr
ee; | 138 size_t bytes = fBlocks[i].fBuffer->gpuMemorySize() - fBlocks[i].fBytesFr
ee; |
| 139 bytesInUse += bytes; | 139 bytesInUse += bytes; |
| 140 SkASSERT(bytes || unusedBlockAllowed); | 140 SkASSERT(bytes || unusedBlockAllowed); |
| 141 } | 141 } |
| 142 | 142 |
| 143 SkASSERT(bytesInUse == fBytesInUse); | 143 SkASSERT(bytesInUse == fBytesInUse); |
| 144 if (unusedBlockAllowed) { | 144 if (unusedBlockAllowed) { |
| 145 SkASSERT((fBytesInUse && !fBlocks.empty()) || | 145 SkASSERT((fBytesInUse && !fBlocks.empty()) || |
| (...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 229 | 229 |
| 230 while (bytes) { | 230 while (bytes) { |
| 231 // caller shouldnt try to put back more than they've taken | 231 // caller shouldnt try to put back more than they've taken |
| 232 SkASSERT(!fBlocks.empty()); | 232 SkASSERT(!fBlocks.empty()); |
| 233 BufferBlock& block = fBlocks.back(); | 233 BufferBlock& block = fBlocks.back(); |
| 234 size_t bytesUsed = block.fBuffer->gpuMemorySize() - block.fBytesFree; | 234 size_t bytesUsed = block.fBuffer->gpuMemorySize() - block.fBytesFree; |
| 235 if (bytes >= bytesUsed) { | 235 if (bytes >= bytesUsed) { |
| 236 bytes -= bytesUsed; | 236 bytes -= bytesUsed; |
| 237 fBytesInUse -= bytesUsed; | 237 fBytesInUse -= bytesUsed; |
| 238 // if we locked a vb to satisfy the make space and we're releasing | 238 // if we locked a vb to satisfy the make space and we're releasing |
| 239 // beyond it, then unlock it. | 239 // beyond it, then unmap it. |
| 240 if (block.fBuffer->isLocked()) { | 240 if (block.fBuffer->isMapped()) { |
| 241 block.fBuffer->unlock(); | 241 block.fBuffer->unmap(); |
| 242 } | 242 } |
| 243 this->destroyBlock(); | 243 this->destroyBlock(); |
| 244 } else { | 244 } else { |
| 245 block.fBytesFree += bytes; | 245 block.fBytesFree += bytes; |
| 246 fBytesInUse -= bytes; | 246 fBytesInUse -= bytes; |
| 247 bytes = 0; | 247 bytes = 0; |
| 248 break; | 248 break; |
| 249 } | 249 } |
| 250 } | 250 } |
| 251 if (!fPreallocBuffersInUse && fPreallocBuffers.count()) { | 251 if (!fPreallocBuffersInUse && fPreallocBuffers.count()) { |
| (...skipping 27 matching lines...) Expand all Loading... |
| 279 if (NULL == block.fBuffer) { | 279 if (NULL == block.fBuffer) { |
| 280 fBlocks.pop_back(); | 280 fBlocks.pop_back(); |
| 281 return false; | 281 return false; |
| 282 } | 282 } |
| 283 } | 283 } |
| 284 | 284 |
| 285 block.fBytesFree = size; | 285 block.fBytesFree = size; |
| 286 if (NULL != fBufferPtr) { | 286 if (NULL != fBufferPtr) { |
| 287 SkASSERT(fBlocks.count() > 1); | 287 SkASSERT(fBlocks.count() > 1); |
| 288 BufferBlock& prev = fBlocks.fromBack(1); | 288 BufferBlock& prev = fBlocks.fromBack(1); |
| 289 if (prev.fBuffer->isLocked()) { | 289 if (prev.fBuffer->isMapped()) { |
| 290 prev.fBuffer->unlock(); | 290 prev.fBuffer->unmap(); |
| 291 } else { | 291 } else { |
| 292 flushCpuData(prev.fBuffer, | 292 flushCpuData(prev.fBuffer, |
| 293 prev.fBuffer->gpuMemorySize() - prev.fBytesFree); | 293 prev.fBuffer->gpuMemorySize() - prev.fBytesFree); |
| 294 } | 294 } |
| 295 fBufferPtr = NULL; | 295 fBufferPtr = NULL; |
| 296 } | 296 } |
| 297 | 297 |
| 298 SkASSERT(NULL == fBufferPtr); | 298 SkASSERT(NULL == fBufferPtr); |
| 299 | 299 |
| 300 // If the buffer is CPU-backed we lock it because it is free to do so and sa
ves a copy. | 300 // If the buffer is CPU-backed we map it because it is free to do so and sav
es a copy. |
| 301 // Otherwise when buffer locking is supported: | 301 // Otherwise when buffer mapping is supported: |
| 302 // a) If the frequently reset hint is set we only lock when the request
ed size meets a | 302 // a) If the frequently reset hint is set we only map when the requeste
d size meets a |
| 303 // threshold (since we don't expect it is likely that we will see more
vertex data) | 303 // threshold (since we don't expect it is likely that we will see more
vertex data) |
| 304 // b) If the hint is not set we lock if the buffer size is greater than
the threshold. | 304 // b) If the hint is not set we map if the buffer size is greater than
the threshold. |
| 305 bool attemptLock = block.fBuffer->isCPUBacked(); | 305 bool attemptMap = block.fBuffer->isCPUBacked(); |
| 306 if (!attemptLock && GrDrawTargetCaps::kNone_MapFlags != fGpu->caps()->mapBuf
ferFlags()) { | 306 if (!attemptMap && GrDrawTargetCaps::kNone_MapFlags != fGpu->caps()->mapBuff
erFlags()) { |
| 307 if (fFrequentResetHint) { | 307 if (fFrequentResetHint) { |
| 308 attemptLock = requestSize > GR_GEOM_BUFFER_LOCK_THRESHOLD; | 308 attemptMap = requestSize > GR_GEOM_BUFFER_MAP_THRESHOLD; |
| 309 } else { | 309 } else { |
| 310 attemptLock = size > GR_GEOM_BUFFER_LOCK_THRESHOLD; | 310 attemptMap = size > GR_GEOM_BUFFER_MAP_THRESHOLD; |
| 311 } | 311 } |
| 312 } | 312 } |
| 313 | 313 |
| 314 if (attemptLock) { | 314 if (attemptMap) { |
| 315 fBufferPtr = block.fBuffer->lock(); | 315 fBufferPtr = block.fBuffer->map(); |
| 316 } | 316 } |
| 317 | 317 |
| 318 if (NULL == fBufferPtr) { | 318 if (NULL == fBufferPtr) { |
| 319 fBufferPtr = fCpuData.reset(size); | 319 fBufferPtr = fCpuData.reset(size); |
| 320 } | 320 } |
| 321 | 321 |
| 322 VALIDATE(true); | 322 VALIDATE(true); |
| 323 | 323 |
| 324 return true; | 324 return true; |
| 325 } | 325 } |
| 326 | 326 |
| 327 void GrBufferAllocPool::destroyBlock() { | 327 void GrBufferAllocPool::destroyBlock() { |
| 328 SkASSERT(!fBlocks.empty()); | 328 SkASSERT(!fBlocks.empty()); |
| 329 | 329 |
| 330 BufferBlock& block = fBlocks.back(); | 330 BufferBlock& block = fBlocks.back(); |
| 331 if (fPreallocBuffersInUse > 0) { | 331 if (fPreallocBuffersInUse > 0) { |
| 332 uint32_t prevPreallocBuffer = (fPreallocBuffersInUse + | 332 uint32_t prevPreallocBuffer = (fPreallocBuffersInUse + |
| 333 fPreallocBufferStartIdx + | 333 fPreallocBufferStartIdx + |
| 334 (fPreallocBuffers.count() - 1)) % | 334 (fPreallocBuffers.count() - 1)) % |
| 335 fPreallocBuffers.count(); | 335 fPreallocBuffers.count(); |
| 336 if (block.fBuffer == fPreallocBuffers[prevPreallocBuffer]) { | 336 if (block.fBuffer == fPreallocBuffers[prevPreallocBuffer]) { |
| 337 --fPreallocBuffersInUse; | 337 --fPreallocBuffersInUse; |
| 338 } | 338 } |
| 339 } | 339 } |
| 340 SkASSERT(!block.fBuffer->isLocked()); | 340 SkASSERT(!block.fBuffer->isMapped()); |
| 341 block.fBuffer->unref(); | 341 block.fBuffer->unref(); |
| 342 fBlocks.pop_back(); | 342 fBlocks.pop_back(); |
| 343 fBufferPtr = NULL; | 343 fBufferPtr = NULL; |
| 344 } | 344 } |
| 345 | 345 |
| 346 void GrBufferAllocPool::flushCpuData(GrGeometryBuffer* buffer, | 346 void GrBufferAllocPool::flushCpuData(GrGeometryBuffer* buffer, |
| 347 size_t flushSize) { | 347 size_t flushSize) { |
| 348 SkASSERT(NULL != buffer); | 348 SkASSERT(NULL != buffer); |
| 349 SkASSERT(!buffer->isLocked()); | 349 SkASSERT(!buffer->isMapped()); |
| 350 SkASSERT(fCpuData.get() == fBufferPtr); | 350 SkASSERT(fCpuData.get() == fBufferPtr); |
| 351 SkASSERT(flushSize <= buffer->gpuMemorySize()); | 351 SkASSERT(flushSize <= buffer->gpuMemorySize()); |
| 352 VALIDATE(true); | 352 VALIDATE(true); |
| 353 | 353 |
| 354 if (GrDrawTargetCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() && | 354 if (GrDrawTargetCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() && |
| 355 flushSize > GR_GEOM_BUFFER_LOCK_THRESHOLD) { | 355 flushSize > GR_GEOM_BUFFER_MAP_THRESHOLD) { |
| 356 void* data = buffer->lock(); | 356 void* data = buffer->map(); |
| 357 if (NULL != data) { | 357 if (NULL != data) { |
| 358 memcpy(data, fBufferPtr, flushSize); | 358 memcpy(data, fBufferPtr, flushSize); |
| 359 buffer->unlock(); | 359 buffer->unmap(); |
| 360 return; | 360 return; |
| 361 } | 361 } |
| 362 } | 362 } |
| 363 buffer->updateData(fBufferPtr, flushSize); | 363 buffer->updateData(fBufferPtr, flushSize); |
| 364 VALIDATE(true); | 364 VALIDATE(true); |
| 365 } | 365 } |
| 366 | 366 |
| 367 GrGeometryBuffer* GrBufferAllocPool::createBuffer(size_t size) { | 367 GrGeometryBuffer* GrBufferAllocPool::createBuffer(size_t size) { |
| 368 if (kIndex_BufferType == fBufferType) { | 368 if (kIndex_BufferType == fBufferType) { |
| 369 return fGpu->createIndexBuffer(size, true); | 369 return fGpu->createIndexBuffer(size, true); |
| (...skipping 109 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 479 } | 479 } |
| 480 } | 480 } |
| 481 | 481 |
| 482 int GrIndexBufferAllocPool::preallocatedBufferIndices() const { | 482 int GrIndexBufferAllocPool::preallocatedBufferIndices() const { |
| 483 return static_cast<int>(INHERITED::preallocatedBufferSize() / sizeof(uint16_
t)); | 483 return static_cast<int>(INHERITED::preallocatedBufferSize() / sizeof(uint16_
t)); |
| 484 } | 484 } |
| 485 | 485 |
| 486 int GrIndexBufferAllocPool::currentBufferIndices() const { | 486 int GrIndexBufferAllocPool::currentBufferIndices() const { |
| 487 return currentBufferItems(sizeof(uint16_t)); | 487 return currentBufferItems(sizeof(uint16_t)); |
| 488 } | 488 } |
| OLD | NEW |