Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1)

Side by Side Diff: src/gpu/GrBufferAllocPool.cpp

Issue 1831133004: Revert of Consolidate GPU buffer implementations (Closed) Base URL: https://skia.googlesource.com/skia.git@master
Patch Set: Created 4 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/gpu/GrBufferAllocPool.h ('k') | src/gpu/GrCaps.cpp » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 1
2 /* 2 /*
3 * Copyright 2010 Google Inc. 3 * Copyright 2010 Google Inc.
4 * 4 *
5 * Use of this source code is governed by a BSD-style license that can be 5 * Use of this source code is governed by a BSD-style license that can be
6 * found in the LICENSE file. 6 * found in the LICENSE file.
7 */ 7 */
8 8
9 9
10 #include "GrBufferAllocPool.h" 10 #include "GrBufferAllocPool.h"
11 #include "GrBuffer.h"
12 #include "GrCaps.h" 11 #include "GrCaps.h"
13 #include "GrContext.h" 12 #include "GrContext.h"
14 #include "GrGpu.h" 13 #include "GrGpu.h"
14 #include "GrIndexBuffer.h"
15 #include "GrResourceProvider.h" 15 #include "GrResourceProvider.h"
16 #include "GrTypes.h" 16 #include "GrTypes.h"
17 #include "GrVertexBuffer.h"
17 18
18 #include "SkTraceEvent.h" 19 #include "SkTraceEvent.h"
19 20
20 #ifdef SK_DEBUG 21 #ifdef SK_DEBUG
21 #define VALIDATE validate 22 #define VALIDATE validate
22 #else 23 #else
23 static void VALIDATE(bool = false) {} 24 static void VALIDATE(bool = false) {}
24 #endif 25 #endif
25 26
26 static const size_t MIN_VERTEX_BUFFER_SIZE = 1 << 15; 27 static const size_t MIN_VERTEX_BUFFER_SIZE = 1 << 15;
27 static const size_t MIN_INDEX_BUFFER_SIZE = 1 << 12; 28 static const size_t MIN_INDEX_BUFFER_SIZE = 1 << 12;
28 29
29 // page size 30 // page size
30 #define GrBufferAllocPool_MIN_BLOCK_SIZE ((size_t)1 << 15) 31 #define GrBufferAllocPool_MIN_BLOCK_SIZE ((size_t)1 << 15)
31 32
32 #define UNMAP_BUFFER(block) \ 33 #define UNMAP_BUFFER(block) \
33 do { \ 34 do { \
34 TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("skia.gpu"), \ 35 TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("skia.gpu"), \
35 "GrBufferAllocPool Unmapping Buffer", \ 36 "GrBufferAllocPool Unmapping Buffer", \
36 TRACE_EVENT_SCOPE_THREAD, \ 37 TRACE_EVENT_SCOPE_THREAD, \
37 "percent_unwritten", \ 38 "percent_unwritten", \
38 (float)((block).fBytesFree) / (block).fBuffer->gpuMemor ySize()); \ 39 (float)((block).fBytesFree) / (block).fBuffer->gpuMemor ySize()); \
39 (block).fBuffer->unmap(); \ 40 (block).fBuffer->unmap(); \
40 } while (false) 41 } while (false)
41 42
42 GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu, 43 GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu,
43 GrBufferType bufferType, 44 BufferType bufferType,
44 size_t blockSize) 45 size_t blockSize)
45 : fBlocks(8) { 46 : fBlocks(8) {
46 47
47 fGpu = SkRef(gpu); 48 fGpu = SkRef(gpu);
48 fCpuData = nullptr; 49 fCpuData = nullptr;
49 fBufferType = bufferType; 50 fBufferType = bufferType;
50 fBufferPtr = nullptr; 51 fBufferPtr = nullptr;
51 fMinBlockSize = SkTMax(GrBufferAllocPool_MIN_BLOCK_SIZE, blockSize); 52 fMinBlockSize = SkTMax(GrBufferAllocPool_MIN_BLOCK_SIZE, blockSize);
52 53
53 fBytesInUse = 0; 54 fBytesInUse = 0;
54 55
55 fBufferMapThreshold = gpu->caps()->bufferMapThreshold(); 56 fGeometryBufferMapThreshold = gpu->caps()->geometryBufferMapThreshold();
56 } 57 }
57 58
58 void GrBufferAllocPool::deleteBlocks() { 59 void GrBufferAllocPool::deleteBlocks() {
59 if (fBlocks.count()) { 60 if (fBlocks.count()) {
60 GrBuffer* buffer = fBlocks.back().fBuffer; 61 GrGeometryBuffer* buffer = fBlocks.back().fBuffer;
61 if (buffer->isMapped()) { 62 if (buffer->isMapped()) {
62 UNMAP_BUFFER(fBlocks.back()); 63 UNMAP_BUFFER(fBlocks.back());
63 } 64 }
64 } 65 }
65 while (!fBlocks.empty()) { 66 while (!fBlocks.empty()) {
66 this->destroyBlock(); 67 this->destroyBlock();
67 } 68 }
68 SkASSERT(!fBufferPtr); 69 SkASSERT(!fBufferPtr);
69 } 70 }
70 71
(...skipping 30 matching lines...) Expand all
101 } 102 }
102 VALIDATE(); 103 VALIDATE();
103 } 104 }
104 105
105 #ifdef SK_DEBUG 106 #ifdef SK_DEBUG
106 void GrBufferAllocPool::validate(bool unusedBlockAllowed) const { 107 void GrBufferAllocPool::validate(bool unusedBlockAllowed) const {
107 bool wasDestroyed = false; 108 bool wasDestroyed = false;
108 if (fBufferPtr) { 109 if (fBufferPtr) {
109 SkASSERT(!fBlocks.empty()); 110 SkASSERT(!fBlocks.empty());
110 if (fBlocks.back().fBuffer->isMapped()) { 111 if (fBlocks.back().fBuffer->isMapped()) {
111 GrBuffer* buf = fBlocks.back().fBuffer; 112 GrGeometryBuffer* buf = fBlocks.back().fBuffer;
112 SkASSERT(buf->mapPtr() == fBufferPtr); 113 SkASSERT(buf->mapPtr() == fBufferPtr);
113 } else { 114 } else {
114 SkASSERT(fCpuData == fBufferPtr); 115 SkASSERT(fCpuData == fBufferPtr);
115 } 116 }
116 } else { 117 } else {
117 SkASSERT(fBlocks.empty() || !fBlocks.back().fBuffer->isMapped()); 118 SkASSERT(fBlocks.empty() || !fBlocks.back().fBuffer->isMapped());
118 } 119 }
119 size_t bytesInUse = 0; 120 size_t bytesInUse = 0;
120 for (int i = 0; i < fBlocks.count() - 1; ++i) { 121 for (int i = 0; i < fBlocks.count() - 1; ++i) {
121 SkASSERT(!fBlocks[i].fBuffer->isMapped()); 122 SkASSERT(!fBlocks[i].fBuffer->isMapped());
(...skipping 15 matching lines...) Expand all
137 (!fBytesInUse && (fBlocks.count() < 2))); 138 (!fBytesInUse && (fBlocks.count() < 2)));
138 } else { 139 } else {
139 SkASSERT((0 == fBytesInUse) == fBlocks.empty()); 140 SkASSERT((0 == fBytesInUse) == fBlocks.empty());
140 } 141 }
141 } 142 }
142 } 143 }
143 #endif 144 #endif
144 145
145 void* GrBufferAllocPool::makeSpace(size_t size, 146 void* GrBufferAllocPool::makeSpace(size_t size,
146 size_t alignment, 147 size_t alignment,
147 const GrBuffer** buffer, 148 const GrGeometryBuffer** buffer,
148 size_t* offset) { 149 size_t* offset) {
149 VALIDATE(); 150 VALIDATE();
150 151
151 SkASSERT(buffer); 152 SkASSERT(buffer);
152 SkASSERT(offset); 153 SkASSERT(offset);
153 154
154 if (fBufferPtr) { 155 if (fBufferPtr) {
155 BufferBlock& back = fBlocks.back(); 156 BufferBlock& back = fBlocks.back();
156 size_t usedBytes = back.fBuffer->gpuMemorySize() - back.fBytesFree; 157 size_t usedBytes = back.fBuffer->gpuMemorySize() - back.fBytesFree;
157 size_t pad = GrSizeAlignUpPad(usedBytes, alignment); 158 size_t pad = GrSizeAlignUpPad(usedBytes, alignment);
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after
244 fBufferPtr = nullptr; 245 fBufferPtr = nullptr;
245 } 246 }
246 247
247 SkASSERT(!fBufferPtr); 248 SkASSERT(!fBufferPtr);
248 249
249 // If the buffer is CPU-backed we map it because it is free to do so and sav es a copy. 250 // If the buffer is CPU-backed we map it because it is free to do so and sav es a copy.
250 // Otherwise when buffer mapping is supported we map if the buffer size is g reater than the 251 // Otherwise when buffer mapping is supported we map if the buffer size is g reater than the
251 // threshold. 252 // threshold.
252 bool attemptMap = block.fBuffer->isCPUBacked(); 253 bool attemptMap = block.fBuffer->isCPUBacked();
253 if (!attemptMap && GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags()) { 254 if (!attemptMap && GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags()) {
254 attemptMap = size > fBufferMapThreshold; 255 attemptMap = size > fGeometryBufferMapThreshold;
255 } 256 }
256 257
257 if (attemptMap) { 258 if (attemptMap) {
258 fBufferPtr = block.fBuffer->map(); 259 fBufferPtr = block.fBuffer->map();
259 } 260 }
260 261
261 if (!fBufferPtr) { 262 if (!fBufferPtr) {
262 fBufferPtr = this->resetCpuData(block.fBytesFree); 263 fBufferPtr = this->resetCpuData(block.fBytesFree);
263 } 264 }
264 265
(...skipping 22 matching lines...) Expand all
287 fCpuData = sk_malloc_throw(newSize); 288 fCpuData = sk_malloc_throw(newSize);
288 } 289 }
289 } else { 290 } else {
290 fCpuData = nullptr; 291 fCpuData = nullptr;
291 } 292 }
292 return fCpuData; 293 return fCpuData;
293 } 294 }
294 295
295 296
296 void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize) { 297 void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize) {
297 GrBuffer* buffer = block.fBuffer; 298 GrGeometryBuffer* buffer = block.fBuffer;
298 SkASSERT(buffer); 299 SkASSERT(buffer);
299 SkASSERT(!buffer->isMapped()); 300 SkASSERT(!buffer->isMapped());
300 SkASSERT(fCpuData == fBufferPtr); 301 SkASSERT(fCpuData == fBufferPtr);
301 SkASSERT(flushSize <= buffer->gpuMemorySize()); 302 SkASSERT(flushSize <= buffer->gpuMemorySize());
302 VALIDATE(true); 303 VALIDATE(true);
303 304
304 if (GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() && 305 if (GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() &&
305 flushSize > fBufferMapThreshold) { 306 flushSize > fGeometryBufferMapThreshold) {
306 void* data = buffer->map(); 307 void* data = buffer->map();
307 if (data) { 308 if (data) {
308 memcpy(data, fBufferPtr, flushSize); 309 memcpy(data, fBufferPtr, flushSize);
309 UNMAP_BUFFER(block); 310 UNMAP_BUFFER(block);
310 return; 311 return;
311 } 312 }
312 } 313 }
313 buffer->updateData(fBufferPtr, flushSize); 314 buffer->updateData(fBufferPtr, flushSize);
314 VALIDATE(true); 315 VALIDATE(true);
315 } 316 }
316 317
317 GrBuffer* GrBufferAllocPool::getBuffer(size_t size) { 318 GrGeometryBuffer* GrBufferAllocPool::getBuffer(size_t size) {
318 319
319 GrResourceProvider* rp = fGpu->getContext()->resourceProvider(); 320 GrResourceProvider* rp = fGpu->getContext()->resourceProvider();
320 321
322 static const GrResourceProvider::BufferUsage kUsage = GrResourceProvider::kD ynamic_BufferUsage;
321 // Shouldn't have to use this flag (https://bug.skia.org/4156) 323 // Shouldn't have to use this flag (https://bug.skia.org/4156)
322 static const uint32_t kFlags = GrResourceProvider::kNoPendingIO_Flag; 324 static const uint32_t kFlags = GrResourceProvider::kNoPendingIO_Flag;
323 return rp->createBuffer(fBufferType, size, kDynamic_GrAccessPattern, kFlags) ; 325 if (kIndex_BufferType == fBufferType) {
326 return rp->createIndexBuffer(size, kUsage, kFlags);
327 } else {
328 SkASSERT(kVertex_BufferType == fBufferType);
329 return rp->createVertexBuffer(size, kUsage, kFlags);
330 }
324 } 331 }
325 332
326 //////////////////////////////////////////////////////////////////////////////// 333 ////////////////////////////////////////////////////////////////////////////////
327 334
328 GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu) 335 GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu)
329 : GrBufferAllocPool(gpu, kVertex_GrBufferType, MIN_VERTEX_BUFFER_SIZE) { 336 : GrBufferAllocPool(gpu, kVertex_BufferType, MIN_VERTEX_BUFFER_SIZE) {
330 } 337 }
331 338
332 void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize, 339 void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize,
333 int vertexCount, 340 int vertexCount,
334 const GrBuffer** buffer, 341 const GrVertexBuffer** buffer,
335 int* startVertex) { 342 int* startVertex) {
336 343
337 SkASSERT(vertexCount >= 0); 344 SkASSERT(vertexCount >= 0);
338 SkASSERT(buffer); 345 SkASSERT(buffer);
339 SkASSERT(startVertex); 346 SkASSERT(startVertex);
340 347
341 size_t offset = 0; // assign to suppress warning 348 size_t offset = 0; // assign to suppress warning
349 const GrGeometryBuffer* geomBuffer = nullptr; // assign to suppress warning
342 void* ptr = INHERITED::makeSpace(vertexSize * vertexCount, 350 void* ptr = INHERITED::makeSpace(vertexSize * vertexCount,
343 vertexSize, 351 vertexSize,
344 buffer, 352 &geomBuffer,
345 &offset); 353 &offset);
346 354
355 *buffer = (const GrVertexBuffer*) geomBuffer;
347 SkASSERT(0 == offset % vertexSize); 356 SkASSERT(0 == offset % vertexSize);
348 *startVertex = static_cast<int>(offset / vertexSize); 357 *startVertex = static_cast<int>(offset / vertexSize);
349 return ptr; 358 return ptr;
350 } 359 }
351 360
352 //////////////////////////////////////////////////////////////////////////////// 361 ////////////////////////////////////////////////////////////////////////////////
353 362
354 GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu) 363 GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu)
355 : GrBufferAllocPool(gpu, kIndex_GrBufferType, MIN_INDEX_BUFFER_SIZE) { 364 : GrBufferAllocPool(gpu, kIndex_BufferType, MIN_INDEX_BUFFER_SIZE) {
356 } 365 }
357 366
358 void* GrIndexBufferAllocPool::makeSpace(int indexCount, 367 void* GrIndexBufferAllocPool::makeSpace(int indexCount,
359 const GrBuffer** buffer, 368 const GrIndexBuffer** buffer,
360 int* startIndex) { 369 int* startIndex) {
361 370
362 SkASSERT(indexCount >= 0); 371 SkASSERT(indexCount >= 0);
363 SkASSERT(buffer); 372 SkASSERT(buffer);
364 SkASSERT(startIndex); 373 SkASSERT(startIndex);
365 374
366 size_t offset = 0; // assign to suppress warning 375 size_t offset = 0; // assign to suppress warning
376 const GrGeometryBuffer* geomBuffer = nullptr; // assign to suppress warning
367 void* ptr = INHERITED::makeSpace(indexCount * sizeof(uint16_t), 377 void* ptr = INHERITED::makeSpace(indexCount * sizeof(uint16_t),
368 sizeof(uint16_t), 378 sizeof(uint16_t),
369 buffer, 379 &geomBuffer,
370 &offset); 380 &offset);
371 381
382 *buffer = (const GrIndexBuffer*) geomBuffer;
372 SkASSERT(0 == offset % sizeof(uint16_t)); 383 SkASSERT(0 == offset % sizeof(uint16_t));
373 *startIndex = static_cast<int>(offset / sizeof(uint16_t)); 384 *startIndex = static_cast<int>(offset / sizeof(uint16_t));
374 return ptr; 385 return ptr;
375 } 386 }
OLDNEW
« no previous file with comments | « src/gpu/GrBufferAllocPool.h ('k') | src/gpu/GrCaps.cpp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698