| OLD | NEW |
| 1 | 1 |
| 2 /* | 2 /* |
| 3 * Copyright 2010 Google Inc. | 3 * Copyright 2010 Google Inc. |
| 4 * | 4 * |
| 5 * Use of this source code is governed by a BSD-style license that can be | 5 * Use of this source code is governed by a BSD-style license that can be |
| 6 * found in the LICENSE file. | 6 * found in the LICENSE file. |
| 7 */ | 7 */ |
| 8 | 8 |
| 9 | 9 |
| 10 #include "GrBufferAllocPool.h" | 10 #include "GrBufferAllocPool.h" |
| (...skipping 19 matching lines...) Expand all Loading... |
| 30 TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("skia.gpu"),
\ | 30 TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("skia.gpu"),
\ |
| 31 "GrBufferAllocPool Unmapping Buffer",
\ | 31 "GrBufferAllocPool Unmapping Buffer",
\ |
| 32 TRACE_EVENT_SCOPE_THREAD,
\ | 32 TRACE_EVENT_SCOPE_THREAD,
\ |
| 33 "percent_unwritten",
\ | 33 "percent_unwritten",
\ |
| 34 (float)((block).fBytesFree) / (block).fBuffer->gpuMemor
ySize()); \ | 34 (float)((block).fBytesFree) / (block).fBuffer->gpuMemor
ySize()); \ |
| 35 (block).fBuffer->unmap();
\ | 35 (block).fBuffer->unmap();
\ |
| 36 } while (false) | 36 } while (false) |
| 37 | 37 |
| 38 GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu, | 38 GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu, |
| 39 BufferType bufferType, | 39 BufferType bufferType, |
| 40 bool frequentResetHint, | |
| 41 size_t blockSize, | 40 size_t blockSize, |
| 42 int preallocBufferCnt) : | 41 int preallocBufferCnt) : |
| 43 fBlocks(SkTMax(8, 2*preallocBufferCnt)) { | 42 fBlocks(SkTMax(8, 2*preallocBufferCnt)) { |
| 44 | 43 |
| 45 SkASSERT(gpu); | 44 fGpu = SkRef(gpu); |
| 46 fGpu = gpu; | |
| 47 fGpu->ref(); | |
| 48 fGpuIsReffed = true; | |
| 49 | 45 |
| 50 fBufferType = bufferType; | 46 fBufferType = bufferType; |
| 51 fFrequentResetHint = frequentResetHint; | |
| 52 fBufferPtr = NULL; | 47 fBufferPtr = NULL; |
| 53 fMinBlockSize = SkTMax(GrBufferAllocPool_MIN_BLOCK_SIZE, blockSize); | 48 fMinBlockSize = SkTMax(GrBufferAllocPool_MIN_BLOCK_SIZE, blockSize); |
| 54 | 49 |
| 55 fBytesInUse = 0; | 50 fBytesInUse = 0; |
| 56 | 51 |
| 57 fPreallocBuffersInUse = 0; | 52 fPreallocBuffersInUse = 0; |
| 58 fPreallocBufferStartIdx = 0; | 53 fPreallocBufferStartIdx = 0; |
| 59 for (int i = 0; i < preallocBufferCnt; ++i) { | 54 for (int i = 0; i < preallocBufferCnt; ++i) { |
| 60 GrGeometryBuffer* buffer = this->createBuffer(fMinBlockSize); | 55 GrGeometryBuffer* buffer = this->createBuffer(fMinBlockSize); |
| 61 if (buffer) { | 56 if (buffer) { |
| 62 *fPreallocBuffers.append() = buffer; | 57 *fPreallocBuffers.append() = buffer; |
| 63 } | 58 } |
| 64 } | 59 } |
| 65 } | 60 } |
| 66 | 61 |
| 67 GrBufferAllocPool::~GrBufferAllocPool() { | 62 GrBufferAllocPool::~GrBufferAllocPool() { |
| 68 VALIDATE(); | 63 VALIDATE(); |
| 69 if (fBlocks.count()) { | 64 if (fBlocks.count()) { |
| 70 GrGeometryBuffer* buffer = fBlocks.back().fBuffer; | 65 GrGeometryBuffer* buffer = fBlocks.back().fBuffer; |
| 71 if (buffer->isMapped()) { | 66 if (buffer->isMapped()) { |
| 72 UNMAP_BUFFER(fBlocks.back()); | 67 UNMAP_BUFFER(fBlocks.back()); |
| 73 } | 68 } |
| 74 } | 69 } |
| 75 while (!fBlocks.empty()) { | 70 while (!fBlocks.empty()) { |
| 76 destroyBlock(); | 71 destroyBlock(); |
| 77 } | 72 } |
| 78 fPreallocBuffers.unrefAll(); | 73 fPreallocBuffers.unrefAll(); |
| 79 releaseGpuRef(); | 74 fGpu->unref(); |
| 80 } | |
| 81 | |
| 82 void GrBufferAllocPool::releaseGpuRef() { | |
| 83 if (fGpuIsReffed) { | |
| 84 fGpu->unref(); | |
| 85 fGpuIsReffed = false; | |
| 86 } | |
| 87 } | 75 } |
| 88 | 76 |
| 89 void GrBufferAllocPool::reset() { | 77 void GrBufferAllocPool::reset() { |
| 90 VALIDATE(); | 78 VALIDATE(); |
| 91 fBytesInUse = 0; | 79 fBytesInUse = 0; |
| 92 if (fBlocks.count()) { | 80 if (fBlocks.count()) { |
| 93 GrGeometryBuffer* buffer = fBlocks.back().fBuffer; | 81 GrGeometryBuffer* buffer = fBlocks.back().fBuffer; |
| 94 if (buffer->isMapped()) { | 82 if (buffer->isMapped()) { |
| 95 UNMAP_BUFFER(fBlocks.back()); | 83 UNMAP_BUFFER(fBlocks.back()); |
| 96 } | 84 } |
| (...skipping 213 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 310 UNMAP_BUFFER(prev); | 298 UNMAP_BUFFER(prev); |
| 311 } else { | 299 } else { |
| 312 this->flushCpuData(prev, prev.fBuffer->gpuMemorySize() - prev.fBytes
Free); | 300 this->flushCpuData(prev, prev.fBuffer->gpuMemorySize() - prev.fBytes
Free); |
| 313 } | 301 } |
| 314 fBufferPtr = NULL; | 302 fBufferPtr = NULL; |
| 315 } | 303 } |
| 316 | 304 |
| 317 SkASSERT(NULL == fBufferPtr); | 305 SkASSERT(NULL == fBufferPtr); |
| 318 | 306 |
| 319 // If the buffer is CPU-backed we map it because it is free to do so and sav
es a copy. | 307 // If the buffer is CPU-backed we map it because it is free to do so and sav
es a copy. |
| 320 // Otherwise when buffer mapping is supported: | 308 // Otherwise when buffer mapping is supported we map if the buffer size is g
reater than the |
| 321 // a) If the frequently reset hint is set we only map when the requeste
d size meets a | 309 // threshold. |
| 322 // threshold (since we don't expect it is likely that we will see more
vertex data) | |
| 323 // b) If the hint is not set we map if the buffer size is greater than
the threshold. | |
| 324 bool attemptMap = block.fBuffer->isCPUBacked(); | 310 bool attemptMap = block.fBuffer->isCPUBacked(); |
| 325 if (!attemptMap && GrDrawTargetCaps::kNone_MapFlags != fGpu->caps()->mapBuff
erFlags()) { | 311 if (!attemptMap && GrDrawTargetCaps::kNone_MapFlags != fGpu->caps()->mapBuff
erFlags()) { |
| 326 if (fFrequentResetHint) { | 312 attemptMap = size > GR_GEOM_BUFFER_MAP_THRESHOLD; |
| 327 attemptMap = requestSize > GR_GEOM_BUFFER_MAP_THRESHOLD; | |
| 328 } else { | |
| 329 attemptMap = size > GR_GEOM_BUFFER_MAP_THRESHOLD; | |
| 330 } | |
| 331 } | 313 } |
| 332 | 314 |
| 333 if (attemptMap) { | 315 if (attemptMap) { |
| 334 fBufferPtr = block.fBuffer->map(); | 316 fBufferPtr = block.fBuffer->map(); |
| 335 } | 317 } |
| 336 | 318 |
| 337 if (NULL == fBufferPtr) { | 319 if (NULL == fBufferPtr) { |
| 338 fBufferPtr = fCpuData.reset(size); | 320 fBufferPtr = fCpuData.reset(size); |
| 339 } | 321 } |
| 340 | 322 |
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 388 return fGpu->createIndexBuffer(size, true); | 370 return fGpu->createIndexBuffer(size, true); |
| 389 } else { | 371 } else { |
| 390 SkASSERT(kVertex_BufferType == fBufferType); | 372 SkASSERT(kVertex_BufferType == fBufferType); |
| 391 return fGpu->createVertexBuffer(size, true); | 373 return fGpu->createVertexBuffer(size, true); |
| 392 } | 374 } |
| 393 } | 375 } |
| 394 | 376 |
| 395 //////////////////////////////////////////////////////////////////////////////// | 377 //////////////////////////////////////////////////////////////////////////////// |
| 396 | 378 |
| 397 GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu, | 379 GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu, |
| 398 bool frequentResetHint, | |
| 399 size_t bufferSize, | 380 size_t bufferSize, |
| 400 int preallocBufferCnt) | 381 int preallocBufferCnt) |
| 401 : GrBufferAllocPool(gpu, | 382 : GrBufferAllocPool(gpu, |
| 402 kVertex_BufferType, | 383 kVertex_BufferType, |
| 403 frequentResetHint, | |
| 404 bufferSize, | 384 bufferSize, |
| 405 preallocBufferCnt) { | 385 preallocBufferCnt) { |
| 406 } | 386 } |
| 407 | 387 |
| 408 void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize, | 388 void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize, |
| 409 int vertexCount, | 389 int vertexCount, |
| 410 const GrVertexBuffer** buffer, | 390 const GrVertexBuffer** buffer, |
| 411 int* startVertex) { | 391 int* startVertex) { |
| 412 | 392 |
| 413 SkASSERT(vertexCount >= 0); | 393 SkASSERT(vertexCount >= 0); |
| (...skipping 17 matching lines...) Expand all Loading... |
| 431 return static_cast<int>(INHERITED::preallocatedBufferSize() / vertexSize); | 411 return static_cast<int>(INHERITED::preallocatedBufferSize() / vertexSize); |
| 432 } | 412 } |
| 433 | 413 |
| 434 int GrVertexBufferAllocPool::currentBufferVertices(size_t vertexSize) const { | 414 int GrVertexBufferAllocPool::currentBufferVertices(size_t vertexSize) const { |
| 435 return currentBufferItems(vertexSize); | 415 return currentBufferItems(vertexSize); |
| 436 } | 416 } |
| 437 | 417 |
| 438 //////////////////////////////////////////////////////////////////////////////// | 418 //////////////////////////////////////////////////////////////////////////////// |
| 439 | 419 |
| 440 GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu, | 420 GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu, |
| 441 bool frequentResetHint, | |
| 442 size_t bufferSize, | 421 size_t bufferSize, |
| 443 int preallocBufferCnt) | 422 int preallocBufferCnt) |
| 444 : GrBufferAllocPool(gpu, | 423 : GrBufferAllocPool(gpu, |
| 445 kIndex_BufferType, | 424 kIndex_BufferType, |
| 446 frequentResetHint, | |
| 447 bufferSize, | 425 bufferSize, |
| 448 preallocBufferCnt) { | 426 preallocBufferCnt) { |
| 449 } | 427 } |
| 450 | 428 |
| 451 void* GrIndexBufferAllocPool::makeSpace(int indexCount, | 429 void* GrIndexBufferAllocPool::makeSpace(int indexCount, |
| 452 const GrIndexBuffer** buffer, | 430 const GrIndexBuffer** buffer, |
| 453 int* startIndex) { | 431 int* startIndex) { |
| 454 | 432 |
| 455 SkASSERT(indexCount >= 0); | 433 SkASSERT(indexCount >= 0); |
| 456 SkASSERT(buffer); | 434 SkASSERT(buffer); |
| (...skipping 12 matching lines...) Expand all Loading... |
| 469 return ptr; | 447 return ptr; |
| 470 } | 448 } |
| 471 | 449 |
| 472 int GrIndexBufferAllocPool::preallocatedBufferIndices() const { | 450 int GrIndexBufferAllocPool::preallocatedBufferIndices() const { |
| 473 return static_cast<int>(INHERITED::preallocatedBufferSize() / sizeof(uint16_
t)); | 451 return static_cast<int>(INHERITED::preallocatedBufferSize() / sizeof(uint16_
t)); |
| 474 } | 452 } |
| 475 | 453 |
| 476 int GrIndexBufferAllocPool::currentBufferIndices() const { | 454 int GrIndexBufferAllocPool::currentBufferIndices() const { |
| 477 return currentBufferItems(sizeof(uint16_t)); | 455 return currentBufferItems(sizeof(uint16_t)); |
| 478 } | 456 } |
| OLD | NEW |