| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #include "GrBatch.h" | 8 #include "GrBatch.h" |
| 9 #include "GrBatchTarget.h" | |
| 10 #include "GrResourceProvider.h" | |
| 11 | 9 |
| 12 #include "GrMemoryPool.h" | 10 #include "GrMemoryPool.h" |
| 13 #include "SkSpinlock.h" | 11 #include "SkSpinlock.h" |
| 14 | 12 |
| 15 // TODO I noticed a small benefit to using a larger exclusive pool for batches.
Its very small, | 13 // TODO I noticed a small benefit to using a larger exclusive pool for batches.
Its very small, |
| 16 // but seems to be mostly consistent. There is a lot in flux right now, but we
should really | 14 // but seems to be mostly consistent. There is a lot in flux right now, but we
should really |
| 17 // revisit this when batch is everywhere | 15 // revisit this when batch is everywhere |
| 18 | 16 |
| 19 | 17 |
| 20 // We use a global pool protected by a mutex(spinlock). Chrome may use the same
GrContext on | 18 // We use a global pool protected by a mutex(spinlock). Chrome may use the same
GrContext on |
| (...skipping 17 matching lines...) Expand all Loading... |
| 38 | 36 |
| 39 int32_t GrBatch::gCurrBatchClassID = GrBatch::kIllegalBatchClassID; | 37 int32_t GrBatch::gCurrBatchClassID = GrBatch::kIllegalBatchClassID; |
| 40 | 38 |
| 41 void* GrBatch::operator new(size_t size) { | 39 void* GrBatch::operator new(size_t size) { |
| 42 return MemoryPoolAccessor().pool()->allocate(size); | 40 return MemoryPoolAccessor().pool()->allocate(size); |
| 43 } | 41 } |
| 44 | 42 |
| 45 void GrBatch::operator delete(void* target) { | 43 void GrBatch::operator delete(void* target) { |
| 46 return MemoryPoolAccessor().pool()->release(target); | 44 return MemoryPoolAccessor().pool()->release(target); |
| 47 } | 45 } |
| 48 | |
| 49 void* GrBatch::InstancedHelper::init(GrBatchTarget* batchTarget, size_t vertexSt
ride, | |
| 50 const GrIndexBuffer* indexBuffer, int verti
cesPerInstance, | |
| 51 int indicesPerInstance, int instancesToDraw
) { | |
| 52 SkASSERT(!fInstancesRemaining); | |
| 53 SkASSERT(batchTarget); | |
| 54 if (!indexBuffer) { | |
| 55 return NULL; | |
| 56 } | |
| 57 const GrVertexBuffer* vertexBuffer; | |
| 58 int firstVertex; | |
| 59 int vertexCount = verticesPerInstance * instancesToDraw; | |
| 60 void* vertices = batchTarget->vertexPool()->makeSpace(vertexStride, vertexCo
unt, &vertexBuffer, | |
| 61 &firstVertex); | |
| 62 if (!vertices) { | |
| 63 SkDebugf("Vertices could not be allocated for instanced rendering."); | |
| 64 return NULL; | |
| 65 } | |
| 66 SkASSERT(vertexBuffer); | |
| 67 fInstancesRemaining = instancesToDraw; | |
| 68 | |
| 69 fDrawInfo.initInstanced(kTriangles_GrPrimitiveType, vertexBuffer, indexBuffe
r, | |
| 70 firstVertex, verticesPerInstance, indicesPerInstance, &fInstancesRemaini
ng, | |
| 71 indexBuffer->maxQuads()); | |
| 72 size_t ibSize = fDrawInfo.indexBuffer()->gpuMemorySize(); | |
| 73 fMaxInstancesPerDraw = static_cast<int>(ibSize / (sizeof(uint16_t) * indices
PerInstance)); | |
| 74 SkASSERT(fMaxInstancesPerDraw > 0); | |
| 75 return vertices; | |
| 76 } | |
| 77 | |
| 78 void* GrBatch::QuadHelper::init(GrBatchTarget* batchTarget, size_t vertexStride,
int quadsToDraw) { | |
| 79 SkAutoTUnref<const GrIndexBuffer> quadIndexBuffer( | |
| 80 batchTarget->resourceProvider()->refQuadIndexBuffer()); | |
| 81 if (!quadIndexBuffer) { | |
| 82 SkDebugf("Could not get quad index buffer."); | |
| 83 return NULL; | |
| 84 } | |
| 85 return this->INHERITED::init(batchTarget, vertexStride, quadIndexBuffer, kVe
rticesPerQuad, | |
| 86 kIndicesPerQuad, quadsToDraw); | |
| 87 } | |
| OLD | NEW |