OLD | NEW |
| (Empty) |
1 /* | |
2 * Copyright 2015 Google Inc. | |
3 * | |
4 * Use of this source code is governed by a BSD-style license that can be | |
5 * found in the LICENSE file. | |
6 */ | |
7 | |
8 #include "GrBatch.h" | |
9 #include "GrBatchTarget.h" | |
10 #include "GrResourceProvider.h" | |
11 | |
12 #include "GrMemoryPool.h" | |
13 #include "SkSpinlock.h" | |
14 | |
15 // TODO I noticed a small benefit to using a larger exclusive pool for batches.
Its very small, | |
16 // but seems to be mostly consistent. There is a lot in flux right now, but we
should really | |
17 // revisit this when batch is everywhere | |
18 | |
19 | |
20 // We use a global pool protected by a mutex(spinlock). Chrome may use the same
GrContext on | |
21 // different threads. The GrContext is not used concurrently on different thread
s and there is a | |
22 // memory barrier between accesses of a context on different threads. Also, ther
e may be multiple | |
23 // GrContexts and those contexts may be in use concurrently on different threads
. | |
24 namespace { | |
25 SK_DECLARE_STATIC_SPINLOCK(gBatchSpinlock); | |
26 class MemoryPoolAccessor { | |
27 public: | |
28 MemoryPoolAccessor() { gBatchSpinlock.acquire(); } | |
29 | |
30 ~MemoryPoolAccessor() { gBatchSpinlock.release(); } | |
31 | |
32 GrMemoryPool* pool() const { | |
33 static GrMemoryPool gPool(16384, 16384); | |
34 return &gPool; | |
35 } | |
36 }; | |
37 } | |
38 | |
39 int32_t GrBatch::gCurrBatchClassID = GrBatch::kIllegalBatchID; | |
40 | |
41 GrBATCH_SPEW(int32_t GrBatch::gCurrBatchUniqueID = GrBatch::kIllegalBatchID;) | |
42 | |
43 void* GrBatch::operator new(size_t size) { | |
44 return MemoryPoolAccessor().pool()->allocate(size); | |
45 } | |
46 | |
47 void GrBatch::operator delete(void* target) { | |
48 return MemoryPoolAccessor().pool()->release(target); | |
49 } | |
50 | |
51 void* GrBatch::InstancedHelper::init(GrBatchTarget* batchTarget, GrPrimitiveType
primType, | |
52 size_t vertexStride, const GrIndexBuffer* i
ndexBuffer, | |
53 int verticesPerInstance, int indicesPerInst
ance, | |
54 int instancesToDraw) { | |
55 SkASSERT(batchTarget); | |
56 if (!indexBuffer) { | |
57 return NULL; | |
58 } | |
59 const GrVertexBuffer* vertexBuffer; | |
60 int firstVertex; | |
61 int vertexCount = verticesPerInstance * instancesToDraw; | |
62 void* vertices = batchTarget->makeVertSpace(vertexStride, vertexCount, | |
63 &vertexBuffer, &firstVertex); | |
64 if (!vertices) { | |
65 SkDebugf("Vertices could not be allocated for instanced rendering."); | |
66 return NULL; | |
67 } | |
68 SkASSERT(vertexBuffer); | |
69 size_t ibSize = indexBuffer->gpuMemorySize(); | |
70 int maxInstancesPerDraw = static_cast<int>(ibSize / (sizeof(uint16_t) * indi
cesPerInstance)); | |
71 | |
72 fVertices.initInstanced(primType, vertexBuffer, indexBuffer, | |
73 firstVertex, verticesPerInstance, indicesPerInstance, instancesToDraw, | |
74 maxInstancesPerDraw); | |
75 return vertices; | |
76 } | |
77 | |
78 void* GrBatch::QuadHelper::init(GrBatchTarget* batchTarget, size_t vertexStride,
int quadsToDraw) { | |
79 SkAutoTUnref<const GrIndexBuffer> quadIndexBuffer( | |
80 batchTarget->resourceProvider()->refQuadIndexBuffer()); | |
81 if (!quadIndexBuffer) { | |
82 SkDebugf("Could not get quad index buffer."); | |
83 return NULL; | |
84 } | |
85 return this->INHERITED::init(batchTarget, kTriangles_GrPrimitiveType, vertex
Stride, | |
86 quadIndexBuffer, kVerticesPerQuad, kIndicesPerQ
uad, quadsToDraw); | |
87 } | |
OLD | NEW |