| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #include "GrBatch.h" | 8 #include "GrBatch.h" |
| 9 #include "GrBatchTarget.h" |
| 10 #include "GrResourceProvider.h" |
| 9 | 11 |
| 10 #include "GrMemoryPool.h" | 12 #include "GrMemoryPool.h" |
| 11 #include "SkSpinlock.h" | 13 #include "SkSpinlock.h" |
| 12 | 14 |
| 13 // TODO I noticed a small benefit to using a larger exclusive pool for batches.
Its very small, | 15 // TODO I noticed a small benefit to using a larger exclusive pool for batches.
Its very small, |
| 14 // but seems to be mostly consistent. There is a lot in flux right now, but we
should really | 16 // but seems to be mostly consistent. There is a lot in flux right now, but we
should really |
| 15 // revisit this when batch is everywhere | 17 // revisit this when batch is everywhere |
| 16 | 18 |
| 17 | 19 |
| 18 // We use a global pool protected by a mutex(spinlock). Chrome may use the same
GrContext on | 20 // We use a global pool protected by a mutex(spinlock). Chrome may use the same
GrContext on |
| (...skipping 17 matching lines...) Expand all Loading... |
| 36 | 38 |
| 37 int32_t GrBatch::gCurrBatchClassID = GrBatch::kIllegalBatchClassID; | 39 int32_t GrBatch::gCurrBatchClassID = GrBatch::kIllegalBatchClassID; |
| 38 | 40 |
| 39 void* GrBatch::operator new(size_t size) { | 41 void* GrBatch::operator new(size_t size) { |
| 40 return MemoryPoolAccessor().pool()->allocate(size); | 42 return MemoryPoolAccessor().pool()->allocate(size); |
| 41 } | 43 } |
| 42 | 44 |
| 43 void GrBatch::operator delete(void* target) { | 45 void GrBatch::operator delete(void* target) { |
| 44 return MemoryPoolAccessor().pool()->release(target); | 46 return MemoryPoolAccessor().pool()->release(target); |
| 45 } | 47 } |
| 48 |
| 49 void* GrBatch::InstancedHelper::init(GrBatchTarget* batchTarget, GrPrimitiveType
primType, |
| 50 size_t vertexStride, const GrIndexBuffer* i
ndexBuffer, |
| 51 int verticesPerInstance, int indicesPerInst
ance, |
| 52 int instancesToDraw) { |
| 53 SkASSERT(!fInstancesRemaining); |
| 54 SkASSERT(batchTarget); |
| 55 if (!indexBuffer) { |
| 56 return NULL; |
| 57 } |
| 58 const GrVertexBuffer* vertexBuffer; |
| 59 int firstVertex; |
| 60 int vertexCount = verticesPerInstance * instancesToDraw; |
| 61 void* vertices = batchTarget->vertexPool()->makeSpace(vertexStride, vertexCo
unt, &vertexBuffer, |
| 62 &firstVertex); |
| 63 if (!vertices) { |
| 64 SkDebugf("Vertices could not be allocated for instanced rendering."); |
| 65 return NULL; |
| 66 } |
| 67 SkASSERT(vertexBuffer); |
| 68 fInstancesRemaining = instancesToDraw; |
| 69 size_t ibSize = indexBuffer->gpuMemorySize(); |
| 70 fMaxInstancesPerDraw = static_cast<int>(ibSize / (sizeof(uint16_t) * indices
PerInstance)); |
| 71 |
| 72 fDrawInfo.initInstanced(primType, vertexBuffer, indexBuffer, |
| 73 firstVertex, verticesPerInstance, indicesPerInstance, &fInstancesRemaini
ng, |
| 74 fMaxInstancesPerDraw); |
| 75 SkASSERT(fMaxInstancesPerDraw > 0); |
| 76 return vertices; |
| 77 } |
| 78 |
| 79 void* GrBatch::QuadHelper::init(GrBatchTarget* batchTarget, size_t vertexStride,
int quadsToDraw) { |
| 80 SkAutoTUnref<const GrIndexBuffer> quadIndexBuffer( |
| 81 batchTarget->resourceProvider()->refQuadIndexBuffer()); |
| 82 if (!quadIndexBuffer) { |
| 83 SkDebugf("Could not get quad index buffer."); |
| 84 return NULL; |
| 85 } |
| 86 return this->INHERITED::init(batchTarget, kTriangles_GrPrimitiveType, vertex
Stride, |
| 87 quadIndexBuffer, kVerticesPerQuad, kIndicesPerQ
uad, quadsToDraw); |
| 88 } |
| OLD | NEW |