Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(2)

Side by Side Diff: src/gpu/GrBufferAllocPool.cpp

Issue 1825393002: Consolidate GPU buffer implementations (Closed) Base URL: https://skia.googlesource.com/skia.git@master
Patch Set: asserts Created 4 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/gpu/GrBufferAllocPool.h ('k') | src/gpu/GrCaps.cpp » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 1
2 /* 2 /*
3 * Copyright 2010 Google Inc. 3 * Copyright 2010 Google Inc.
4 * 4 *
5 * Use of this source code is governed by a BSD-style license that can be 5 * Use of this source code is governed by a BSD-style license that can be
6 * found in the LICENSE file. 6 * found in the LICENSE file.
7 */ 7 */
8 8
9 9
10 #include "GrBufferAllocPool.h" 10 #include "GrBufferAllocPool.h"
11 #include "GrBuffer.h"
11 #include "GrCaps.h" 12 #include "GrCaps.h"
12 #include "GrContext.h" 13 #include "GrContext.h"
13 #include "GrGpu.h" 14 #include "GrGpu.h"
14 #include "GrIndexBuffer.h"
15 #include "GrResourceProvider.h" 15 #include "GrResourceProvider.h"
16 #include "GrTypes.h" 16 #include "GrTypes.h"
17 #include "GrVertexBuffer.h"
18 17
19 #include "SkTraceEvent.h" 18 #include "SkTraceEvent.h"
20 19
21 #ifdef SK_DEBUG 20 #ifdef SK_DEBUG
22 #define VALIDATE validate 21 #define VALIDATE validate
23 #else 22 #else
24 static void VALIDATE(bool = false) {} 23 static void VALIDATE(bool = false) {}
25 #endif 24 #endif
26 25
27 static const size_t MIN_VERTEX_BUFFER_SIZE = 1 << 15; 26 static const size_t MIN_VERTEX_BUFFER_SIZE = 1 << 15;
28 static const size_t MIN_INDEX_BUFFER_SIZE = 1 << 12; 27 static const size_t MIN_INDEX_BUFFER_SIZE = 1 << 12;
29 28
30 // page size 29 // page size
31 #define GrBufferAllocPool_MIN_BLOCK_SIZE ((size_t)1 << 15) 30 #define GrBufferAllocPool_MIN_BLOCK_SIZE ((size_t)1 << 15)
32 31
33 #define UNMAP_BUFFER(block) \ 32 #define UNMAP_BUFFER(block) \
34 do { \ 33 do { \
35 TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("skia.gpu"), \ 34 TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("skia.gpu"), \
36 "GrBufferAllocPool Unmapping Buffer", \ 35 "GrBufferAllocPool Unmapping Buffer", \
37 TRACE_EVENT_SCOPE_THREAD, \ 36 TRACE_EVENT_SCOPE_THREAD, \
38 "percent_unwritten", \ 37 "percent_unwritten", \
39 (float)((block).fBytesFree) / (block).fBuffer->gpuMemor ySize()); \ 38 (float)((block).fBytesFree) / (block).fBuffer->gpuMemor ySize()); \
40 (block).fBuffer->unmap(); \ 39 (block).fBuffer->unmap(); \
41 } while (false) 40 } while (false)
42 41
43 GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu, 42 GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu,
44 BufferType bufferType, 43 GrBufferType bufferType,
45 size_t blockSize) 44 size_t blockSize)
46 : fBlocks(8) { 45 : fBlocks(8) {
47 46
48 fGpu = SkRef(gpu); 47 fGpu = SkRef(gpu);
49 fCpuData = nullptr; 48 fCpuData = nullptr;
50 fBufferType = bufferType; 49 fBufferType = bufferType;
51 fBufferPtr = nullptr; 50 fBufferPtr = nullptr;
52 fMinBlockSize = SkTMax(GrBufferAllocPool_MIN_BLOCK_SIZE, blockSize); 51 fMinBlockSize = SkTMax(GrBufferAllocPool_MIN_BLOCK_SIZE, blockSize);
53 52
54 fBytesInUse = 0; 53 fBytesInUse = 0;
55 54
56 fGeometryBufferMapThreshold = gpu->caps()->geometryBufferMapThreshold(); 55 fBufferMapThreshold = gpu->caps()->bufferMapThreshold();
57 } 56 }
58 57
59 void GrBufferAllocPool::deleteBlocks() { 58 void GrBufferAllocPool::deleteBlocks() {
60 if (fBlocks.count()) { 59 if (fBlocks.count()) {
61 GrGeometryBuffer* buffer = fBlocks.back().fBuffer; 60 GrBuffer* buffer = fBlocks.back().fBuffer;
62 if (buffer->isMapped()) { 61 if (buffer->isMapped()) {
63 UNMAP_BUFFER(fBlocks.back()); 62 UNMAP_BUFFER(fBlocks.back());
64 } 63 }
65 } 64 }
66 while (!fBlocks.empty()) { 65 while (!fBlocks.empty()) {
67 this->destroyBlock(); 66 this->destroyBlock();
68 } 67 }
69 SkASSERT(!fBufferPtr); 68 SkASSERT(!fBufferPtr);
70 } 69 }
71 70
(...skipping 30 matching lines...) Expand all
102 } 101 }
103 VALIDATE(); 102 VALIDATE();
104 } 103 }
105 104
106 #ifdef SK_DEBUG 105 #ifdef SK_DEBUG
107 void GrBufferAllocPool::validate(bool unusedBlockAllowed) const { 106 void GrBufferAllocPool::validate(bool unusedBlockAllowed) const {
108 bool wasDestroyed = false; 107 bool wasDestroyed = false;
109 if (fBufferPtr) { 108 if (fBufferPtr) {
110 SkASSERT(!fBlocks.empty()); 109 SkASSERT(!fBlocks.empty());
111 if (fBlocks.back().fBuffer->isMapped()) { 110 if (fBlocks.back().fBuffer->isMapped()) {
112 GrGeometryBuffer* buf = fBlocks.back().fBuffer; 111 GrBuffer* buf = fBlocks.back().fBuffer;
113 SkASSERT(buf->mapPtr() == fBufferPtr); 112 SkASSERT(buf->mapPtr() == fBufferPtr);
114 } else { 113 } else {
115 SkASSERT(fCpuData == fBufferPtr); 114 SkASSERT(fCpuData == fBufferPtr);
116 } 115 }
117 } else { 116 } else {
118 SkASSERT(fBlocks.empty() || !fBlocks.back().fBuffer->isMapped()); 117 SkASSERT(fBlocks.empty() || !fBlocks.back().fBuffer->isMapped());
119 } 118 }
120 size_t bytesInUse = 0; 119 size_t bytesInUse = 0;
121 for (int i = 0; i < fBlocks.count() - 1; ++i) { 120 for (int i = 0; i < fBlocks.count() - 1; ++i) {
122 SkASSERT(!fBlocks[i].fBuffer->isMapped()); 121 SkASSERT(!fBlocks[i].fBuffer->isMapped());
(...skipping 15 matching lines...) Expand all
138 (!fBytesInUse && (fBlocks.count() < 2))); 137 (!fBytesInUse && (fBlocks.count() < 2)));
139 } else { 138 } else {
140 SkASSERT((0 == fBytesInUse) == fBlocks.empty()); 139 SkASSERT((0 == fBytesInUse) == fBlocks.empty());
141 } 140 }
142 } 141 }
143 } 142 }
144 #endif 143 #endif
145 144
146 void* GrBufferAllocPool::makeSpace(size_t size, 145 void* GrBufferAllocPool::makeSpace(size_t size,
147 size_t alignment, 146 size_t alignment,
148 const GrGeometryBuffer** buffer, 147 const GrBuffer** buffer,
149 size_t* offset) { 148 size_t* offset) {
150 VALIDATE(); 149 VALIDATE();
151 150
152 SkASSERT(buffer); 151 SkASSERT(buffer);
153 SkASSERT(offset); 152 SkASSERT(offset);
154 153
155 if (fBufferPtr) { 154 if (fBufferPtr) {
156 BufferBlock& back = fBlocks.back(); 155 BufferBlock& back = fBlocks.back();
157 size_t usedBytes = back.fBuffer->gpuMemorySize() - back.fBytesFree; 156 size_t usedBytes = back.fBuffer->gpuMemorySize() - back.fBytesFree;
158 size_t pad = GrSizeAlignUpPad(usedBytes, alignment); 157 size_t pad = GrSizeAlignUpPad(usedBytes, alignment);
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after
245 fBufferPtr = nullptr; 244 fBufferPtr = nullptr;
246 } 245 }
247 246
248 SkASSERT(!fBufferPtr); 247 SkASSERT(!fBufferPtr);
249 248
250 // If the buffer is CPU-backed we map it because it is free to do so and sav es a copy. 249 // If the buffer is CPU-backed we map it because it is free to do so and sav es a copy.
251 // Otherwise when buffer mapping is supported we map if the buffer size is g reater than the 250 // Otherwise when buffer mapping is supported we map if the buffer size is g reater than the
252 // threshold. 251 // threshold.
253 bool attemptMap = block.fBuffer->isCPUBacked(); 252 bool attemptMap = block.fBuffer->isCPUBacked();
254 if (!attemptMap && GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags()) { 253 if (!attemptMap && GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags()) {
255 attemptMap = size > fGeometryBufferMapThreshold; 254 attemptMap = size > fBufferMapThreshold;
256 } 255 }
257 256
258 if (attemptMap) { 257 if (attemptMap) {
259 fBufferPtr = block.fBuffer->map(); 258 fBufferPtr = block.fBuffer->map();
260 } 259 }
261 260
262 if (!fBufferPtr) { 261 if (!fBufferPtr) {
263 fBufferPtr = this->resetCpuData(block.fBytesFree); 262 fBufferPtr = this->resetCpuData(block.fBytesFree);
264 } 263 }
265 264
(...skipping 22 matching lines...) Expand all
288 fCpuData = sk_malloc_throw(newSize); 287 fCpuData = sk_malloc_throw(newSize);
289 } 288 }
290 } else { 289 } else {
291 fCpuData = nullptr; 290 fCpuData = nullptr;
292 } 291 }
293 return fCpuData; 292 return fCpuData;
294 } 293 }
295 294
296 295
297 void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize) { 296 void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize) {
298 GrGeometryBuffer* buffer = block.fBuffer; 297 GrBuffer* buffer = block.fBuffer;
299 SkASSERT(buffer); 298 SkASSERT(buffer);
300 SkASSERT(!buffer->isMapped()); 299 SkASSERT(!buffer->isMapped());
301 SkASSERT(fCpuData == fBufferPtr); 300 SkASSERT(fCpuData == fBufferPtr);
302 SkASSERT(flushSize <= buffer->gpuMemorySize()); 301 SkASSERT(flushSize <= buffer->gpuMemorySize());
303 VALIDATE(true); 302 VALIDATE(true);
304 303
305 if (GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() && 304 if (GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() &&
306 flushSize > fGeometryBufferMapThreshold) { 305 flushSize > fBufferMapThreshold) {
307 void* data = buffer->map(); 306 void* data = buffer->map();
308 if (data) { 307 if (data) {
309 memcpy(data, fBufferPtr, flushSize); 308 memcpy(data, fBufferPtr, flushSize);
310 UNMAP_BUFFER(block); 309 UNMAP_BUFFER(block);
311 return; 310 return;
312 } 311 }
313 } 312 }
314 buffer->updateData(fBufferPtr, flushSize); 313 buffer->updateData(fBufferPtr, flushSize);
315 VALIDATE(true); 314 VALIDATE(true);
316 } 315 }
317 316
318 GrGeometryBuffer* GrBufferAllocPool::getBuffer(size_t size) { 317 GrBuffer* GrBufferAllocPool::getBuffer(size_t size) {
319 318
320 GrResourceProvider* rp = fGpu->getContext()->resourceProvider(); 319 GrResourceProvider* rp = fGpu->getContext()->resourceProvider();
321 320
322 static const GrResourceProvider::BufferUsage kUsage = GrResourceProvider::kD ynamic_BufferUsage;
323 // Shouldn't have to use this flag (https://bug.skia.org/4156) 321 // Shouldn't have to use this flag (https://bug.skia.org/4156)
324 static const uint32_t kFlags = GrResourceProvider::kNoPendingIO_Flag; 322 static const uint32_t kFlags = GrResourceProvider::kNoPendingIO_Flag;
325 if (kIndex_BufferType == fBufferType) { 323 return rp->createBuffer(fBufferType, size, kDynamic_GrAccessPattern, kFlags) ;
326 return rp->createIndexBuffer(size, kUsage, kFlags);
327 } else {
328 SkASSERT(kVertex_BufferType == fBufferType);
329 return rp->createVertexBuffer(size, kUsage, kFlags);
330 }
331 } 324 }
332 325
333 //////////////////////////////////////////////////////////////////////////////// 326 ////////////////////////////////////////////////////////////////////////////////
334 327
335 GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu) 328 GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu)
336 : GrBufferAllocPool(gpu, kVertex_BufferType, MIN_VERTEX_BUFFER_SIZE) { 329 : GrBufferAllocPool(gpu, kVertex_GrBufferType, MIN_VERTEX_BUFFER_SIZE) {
337 } 330 }
338 331
339 void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize, 332 void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize,
340 int vertexCount, 333 int vertexCount,
341 const GrVertexBuffer** buffer, 334 const GrBuffer** buffer,
342 int* startVertex) { 335 int* startVertex) {
343 336
344 SkASSERT(vertexCount >= 0); 337 SkASSERT(vertexCount >= 0);
345 SkASSERT(buffer); 338 SkASSERT(buffer);
346 SkASSERT(startVertex); 339 SkASSERT(startVertex);
347 340
348 size_t offset = 0; // assign to suppress warning 341 size_t offset = 0; // assign to suppress warning
349 const GrGeometryBuffer* geomBuffer = nullptr; // assign to suppress warning
350 void* ptr = INHERITED::makeSpace(vertexSize * vertexCount, 342 void* ptr = INHERITED::makeSpace(vertexSize * vertexCount,
351 vertexSize, 343 vertexSize,
352 &geomBuffer, 344 buffer,
353 &offset); 345 &offset);
354 346
355 *buffer = (const GrVertexBuffer*) geomBuffer;
356 SkASSERT(0 == offset % vertexSize); 347 SkASSERT(0 == offset % vertexSize);
357 *startVertex = static_cast<int>(offset / vertexSize); 348 *startVertex = static_cast<int>(offset / vertexSize);
358 return ptr; 349 return ptr;
359 } 350 }
360 351
361 //////////////////////////////////////////////////////////////////////////////// 352 ////////////////////////////////////////////////////////////////////////////////
362 353
363 GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu) 354 GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu)
364 : GrBufferAllocPool(gpu, kIndex_BufferType, MIN_INDEX_BUFFER_SIZE) { 355 : GrBufferAllocPool(gpu, kIndex_GrBufferType, MIN_INDEX_BUFFER_SIZE) {
365 } 356 }
366 357
367 void* GrIndexBufferAllocPool::makeSpace(int indexCount, 358 void* GrIndexBufferAllocPool::makeSpace(int indexCount,
368 const GrIndexBuffer** buffer, 359 const GrBuffer** buffer,
369 int* startIndex) { 360 int* startIndex) {
370 361
371 SkASSERT(indexCount >= 0); 362 SkASSERT(indexCount >= 0);
372 SkASSERT(buffer); 363 SkASSERT(buffer);
373 SkASSERT(startIndex); 364 SkASSERT(startIndex);
374 365
375 size_t offset = 0; // assign to suppress warning 366 size_t offset = 0; // assign to suppress warning
376 const GrGeometryBuffer* geomBuffer = nullptr; // assign to suppress warning
377 void* ptr = INHERITED::makeSpace(indexCount * sizeof(uint16_t), 367 void* ptr = INHERITED::makeSpace(indexCount * sizeof(uint16_t),
378 sizeof(uint16_t), 368 sizeof(uint16_t),
379 &geomBuffer, 369 buffer,
380 &offset); 370 &offset);
381 371
382 *buffer = (const GrIndexBuffer*) geomBuffer;
383 SkASSERT(0 == offset % sizeof(uint16_t)); 372 SkASSERT(0 == offset % sizeof(uint16_t));
384 *startIndex = static_cast<int>(offset / sizeof(uint16_t)); 373 *startIndex = static_cast<int>(offset / sizeof(uint16_t));
385 return ptr; 374 return ptr;
386 } 375 }
OLDNEW
« no previous file with comments | « src/gpu/GrBufferAllocPool.h ('k') | src/gpu/GrCaps.cpp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698