Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(285)

Side by Side Diff: src/gpu/GrBufferAllocPool.cpp

Issue 1300123002: Use calloc to allocate data that will be uploaded to vertex/index buffers in Chrome (Closed) Base URL: https://skia.googlesource.com/skia.git@master
Patch Set: Address comment Created 5 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/gpu/GrBufferAllocPool.h ('k') | src/gpu/GrCaps.cpp » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 1
2 /* 2 /*
3 * Copyright 2010 Google Inc. 3 * Copyright 2010 Google Inc.
4 * 4 *
5 * Use of this source code is governed by a BSD-style license that can be 5 * Use of this source code is governed by a BSD-style license that can be
6 * found in the LICENSE file. 6 * found in the LICENSE file.
7 */ 7 */
8 8
9 9
10 #include "GrBufferAllocPool.h" 10 #include "GrBufferAllocPool.h"
(...skipping 28 matching lines...) Expand all
39 (float)((block).fBytesFree) / (block).fBuffer->gpuMemor ySize()); \ 39 (float)((block).fBytesFree) / (block).fBuffer->gpuMemor ySize()); \
40 (block).fBuffer->unmap(); \ 40 (block).fBuffer->unmap(); \
41 } while (false) 41 } while (false)
42 42
43 GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu, 43 GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu,
44 BufferType bufferType, 44 BufferType bufferType,
45 size_t blockSize) 45 size_t blockSize)
46 : fBlocks(8) { 46 : fBlocks(8) {
47 47
48 fGpu = SkRef(gpu); 48 fGpu = SkRef(gpu);
49 49 fCpuData = nullptr;
50 fBufferType = bufferType; 50 fBufferType = bufferType;
51 fBufferPtr = NULL; 51 fBufferPtr = nullptr;
52 fMinBlockSize = SkTMax(GrBufferAllocPool_MIN_BLOCK_SIZE, blockSize); 52 fMinBlockSize = SkTMax(GrBufferAllocPool_MIN_BLOCK_SIZE, blockSize);
53 53
54 fBytesInUse = 0; 54 fBytesInUse = 0;
55 55
56 fGeometryBufferMapThreshold = gpu->caps()->geometryBufferMapThreshold(); 56 fGeometryBufferMapThreshold = gpu->caps()->geometryBufferMapThreshold();
57 } 57 }
58 58
59 void GrBufferAllocPool::deleteBlocks() { 59 void GrBufferAllocPool::deleteBlocks() {
60 if (fBlocks.count()) { 60 if (fBlocks.count()) {
61 GrGeometryBuffer* buffer = fBlocks.back().fBuffer; 61 GrGeometryBuffer* buffer = fBlocks.back().fBuffer;
62 if (buffer->isMapped()) { 62 if (buffer->isMapped()) {
63 UNMAP_BUFFER(fBlocks.back()); 63 UNMAP_BUFFER(fBlocks.back());
64 } 64 }
65 } 65 }
66 while (!fBlocks.empty()) { 66 while (!fBlocks.empty()) {
67 this->destroyBlock(); 67 this->destroyBlock();
68 } 68 }
69 SkASSERT(!fBufferPtr); 69 SkASSERT(!fBufferPtr);
70 } 70 }
71 71
72 GrBufferAllocPool::~GrBufferAllocPool() { 72 GrBufferAllocPool::~GrBufferAllocPool() {
73 VALIDATE(); 73 VALIDATE();
74 this->deleteBlocks(); 74 this->deleteBlocks();
75 sk_free(fCpuData);
75 fGpu->unref(); 76 fGpu->unref();
76 } 77 }
77 78
78 void GrBufferAllocPool::reset() { 79 void GrBufferAllocPool::reset() {
79 VALIDATE(); 80 VALIDATE();
80 fBytesInUse = 0; 81 fBytesInUse = 0;
81 this->deleteBlocks(); 82 this->deleteBlocks();
82 // we may have created a large cpu mirror of a large VB. Reset the size 83
83 // to match our minimum. 84 // we may have created a large cpu mirror of a large VB. Reset the size to m atch our minimum.
84 fCpuData.reset(fMinBlockSize); 85 this->resetCpuData(fMinBlockSize);
86
85 VALIDATE(); 87 VALIDATE();
86 } 88 }
87 89
88 void GrBufferAllocPool::unmap() { 90 void GrBufferAllocPool::unmap() {
89 VALIDATE(); 91 VALIDATE();
90 92
91 if (fBufferPtr) { 93 if (fBufferPtr) {
92 BufferBlock& block = fBlocks.back(); 94 BufferBlock& block = fBlocks.back();
93 if (block.fBuffer->isMapped()) { 95 if (block.fBuffer->isMapped()) {
94 UNMAP_BUFFER(block); 96 UNMAP_BUFFER(block);
95 } else { 97 } else {
96 size_t flushSize = block.fBuffer->gpuMemorySize() - block.fBytesFree ; 98 size_t flushSize = block.fBuffer->gpuMemorySize() - block.fBytesFree ;
97 this->flushCpuData(fBlocks.back(), flushSize); 99 this->flushCpuData(fBlocks.back(), flushSize);
98 } 100 }
99 fBufferPtr = NULL; 101 fBufferPtr = nullptr;
100 } 102 }
101 VALIDATE(); 103 VALIDATE();
102 } 104 }
103 105
104 #ifdef SK_DEBUG 106 #ifdef SK_DEBUG
105 void GrBufferAllocPool::validate(bool unusedBlockAllowed) const { 107 void GrBufferAllocPool::validate(bool unusedBlockAllowed) const {
106 bool wasDestroyed = false; 108 bool wasDestroyed = false;
107 if (fBufferPtr) { 109 if (fBufferPtr) {
108 SkASSERT(!fBlocks.empty()); 110 SkASSERT(!fBlocks.empty());
109 if (fBlocks.back().fBuffer->isMapped()) { 111 if (fBlocks.back().fBuffer->isMapped()) {
110 GrGeometryBuffer* buf = fBlocks.back().fBuffer; 112 GrGeometryBuffer* buf = fBlocks.back().fBuffer;
111 SkASSERT(buf->mapPtr() == fBufferPtr); 113 SkASSERT(buf->mapPtr() == fBufferPtr);
112 } else { 114 } else {
113 SkASSERT(fCpuData.get() == fBufferPtr); 115 SkASSERT(fCpuData == fBufferPtr);
114 } 116 }
115 } else { 117 } else {
116 SkASSERT(fBlocks.empty() || !fBlocks.back().fBuffer->isMapped()); 118 SkASSERT(fBlocks.empty() || !fBlocks.back().fBuffer->isMapped());
117 } 119 }
118 size_t bytesInUse = 0; 120 size_t bytesInUse = 0;
119 for (int i = 0; i < fBlocks.count() - 1; ++i) { 121 for (int i = 0; i < fBlocks.count() - 1; ++i) {
120 SkASSERT(!fBlocks[i].fBuffer->isMapped()); 122 SkASSERT(!fBlocks[i].fBuffer->isMapped());
121 } 123 }
122 for (int i = 0; !wasDestroyed && i < fBlocks.count(); ++i) { 124 for (int i = 0; !wasDestroyed && i < fBlocks.count(); ++i) {
123 if (fBlocks[i].fBuffer->wasDestroyed()) { 125 if (fBlocks[i].fBuffer->wasDestroyed()) {
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
168 170
169 // We could honor the space request using by a partial update of the current 171 // We could honor the space request using by a partial update of the current
170 // VB (if there is room). But we don't currently use draw calls to GL that 172 // VB (if there is room). But we don't currently use draw calls to GL that
171 // allow the driver to know that previously issued draws won't read from 173 // allow the driver to know that previously issued draws won't read from
172 // the part of the buffer we update. Also, the GL buffer implementation 174 // the part of the buffer we update. Also, the GL buffer implementation
173 // may be cheating on the actual buffer size by shrinking the buffer on 175 // may be cheating on the actual buffer size by shrinking the buffer on
174 // updateData() if the amount of data passed is less than the full buffer 176 // updateData() if the amount of data passed is less than the full buffer
175 // size. 177 // size.
176 178
177 if (!this->createBlock(size)) { 179 if (!this->createBlock(size)) {
178 return NULL; 180 return nullptr;
179 } 181 }
180 SkASSERT(fBufferPtr); 182 SkASSERT(fBufferPtr);
181 183
182 *offset = 0; 184 *offset = 0;
183 BufferBlock& back = fBlocks.back(); 185 BufferBlock& back = fBlocks.back();
184 *buffer = back.fBuffer; 186 *buffer = back.fBuffer;
185 back.fBytesFree -= size; 187 back.fBytesFree -= size;
186 fBytesInUse += size; 188 fBytesInUse += size;
187 VALIDATE(); 189 VALIDATE();
188 return fBufferPtr; 190 return fBufferPtr;
(...skipping 30 matching lines...) Expand all
219 bool GrBufferAllocPool::createBlock(size_t requestSize) { 221 bool GrBufferAllocPool::createBlock(size_t requestSize) {
220 222
221 size_t size = SkTMax(requestSize, fMinBlockSize); 223 size_t size = SkTMax(requestSize, fMinBlockSize);
222 SkASSERT(size >= GrBufferAllocPool_MIN_BLOCK_SIZE); 224 SkASSERT(size >= GrBufferAllocPool_MIN_BLOCK_SIZE);
223 225
224 VALIDATE(); 226 VALIDATE();
225 227
226 BufferBlock& block = fBlocks.push_back(); 228 BufferBlock& block = fBlocks.push_back();
227 229
228 block.fBuffer = this->getBuffer(size); 230 block.fBuffer = this->getBuffer(size);
229 if (NULL == block.fBuffer) { 231 if (!block.fBuffer) {
230 fBlocks.pop_back(); 232 fBlocks.pop_back();
231 return false; 233 return false;
232 } 234 }
233 235
234 block.fBytesFree = block.fBuffer->gpuMemorySize(); 236 block.fBytesFree = block.fBuffer->gpuMemorySize();
235 if (fBufferPtr) { 237 if (fBufferPtr) {
236 SkASSERT(fBlocks.count() > 1); 238 SkASSERT(fBlocks.count() > 1);
237 BufferBlock& prev = fBlocks.fromBack(1); 239 BufferBlock& prev = fBlocks.fromBack(1);
238 if (prev.fBuffer->isMapped()) { 240 if (prev.fBuffer->isMapped()) {
239 UNMAP_BUFFER(prev); 241 UNMAP_BUFFER(prev);
240 } else { 242 } else {
241 this->flushCpuData(prev, prev.fBuffer->gpuMemorySize() - prev.fBytes Free); 243 this->flushCpuData(prev, prev.fBuffer->gpuMemorySize() - prev.fBytes Free);
242 } 244 }
243 fBufferPtr = NULL; 245 fBufferPtr = nullptr;
244 } 246 }
245 247
246 SkASSERT(NULL == fBufferPtr); 248 SkASSERT(!fBufferPtr);
247 249
248 // If the buffer is CPU-backed we map it because it is free to do so and sav es a copy. 250 // If the buffer is CPU-backed we map it because it is free to do so and sav es a copy.
249 // Otherwise when buffer mapping is supported we map if the buffer size is g reater than the 251 // Otherwise when buffer mapping is supported we map if the buffer size is g reater than the
250 // threshold. 252 // threshold.
251 bool attemptMap = block.fBuffer->isCPUBacked(); 253 bool attemptMap = block.fBuffer->isCPUBacked();
252 if (!attemptMap && GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags()) { 254 if (!attemptMap && GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags()) {
253 attemptMap = size > fGeometryBufferMapThreshold; 255 attemptMap = size > fGeometryBufferMapThreshold;
254 } 256 }
255 257
256 if (attemptMap) { 258 if (attemptMap) {
257 fBufferPtr = block.fBuffer->map(); 259 fBufferPtr = block.fBuffer->map();
258 } 260 }
259 261
260 if (NULL == fBufferPtr) { 262 if (!fBufferPtr) {
261 fBufferPtr = fCpuData.reset(block.fBytesFree); 263 fBufferPtr = this->resetCpuData(block.fBytesFree);
262 } 264 }
263 265
264 VALIDATE(true); 266 VALIDATE(true);
265 267
266 return true; 268 return true;
267 } 269 }
268 270
269 void GrBufferAllocPool::destroyBlock() { 271 void GrBufferAllocPool::destroyBlock() {
270 SkASSERT(!fBlocks.empty()); 272 SkASSERT(!fBlocks.empty());
271 273
272 BufferBlock& block = fBlocks.back(); 274 BufferBlock& block = fBlocks.back();
273 275
274 SkASSERT(!block.fBuffer->isMapped()); 276 SkASSERT(!block.fBuffer->isMapped());
275 block.fBuffer->unref(); 277 block.fBuffer->unref();
276 fBlocks.pop_back(); 278 fBlocks.pop_back();
277 fBufferPtr = NULL; 279 fBufferPtr = nullptr;
278 } 280 }
279 281
282 void* GrBufferAllocPool::resetCpuData(size_t newSize) {
283 sk_free(fCpuData);
284 if (newSize) {
285 if (fGpu->caps()->mustClearUploadedBufferData()) {
286 fCpuData = sk_calloc(newSize);
287 } else {
288 fCpuData = sk_malloc_throw(newSize);
289 }
290 } else {
291 fCpuData = nullptr;
292 }
293 return fCpuData;
294 }
295
296
280 void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize) { 297 void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize) {
281 GrGeometryBuffer* buffer = block.fBuffer; 298 GrGeometryBuffer* buffer = block.fBuffer;
282 SkASSERT(buffer); 299 SkASSERT(buffer);
283 SkASSERT(!buffer->isMapped()); 300 SkASSERT(!buffer->isMapped());
284 SkASSERT(fCpuData.get() == fBufferPtr); 301 SkASSERT(fCpuData == fBufferPtr);
285 SkASSERT(flushSize <= buffer->gpuMemorySize()); 302 SkASSERT(flushSize <= buffer->gpuMemorySize());
286 VALIDATE(true); 303 VALIDATE(true);
287 304
288 if (GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() && 305 if (GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() &&
289 flushSize > fGeometryBufferMapThreshold) { 306 flushSize > fGeometryBufferMapThreshold) {
290 void* data = buffer->map(); 307 void* data = buffer->map();
291 if (data) { 308 if (data) {
292 memcpy(data, fBufferPtr, flushSize); 309 memcpy(data, fBufferPtr, flushSize);
293 UNMAP_BUFFER(block); 310 UNMAP_BUFFER(block);
294 return; 311 return;
(...skipping 27 matching lines...) Expand all
322 void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize, 339 void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize,
323 int vertexCount, 340 int vertexCount,
324 const GrVertexBuffer** buffer, 341 const GrVertexBuffer** buffer,
325 int* startVertex) { 342 int* startVertex) {
326 343
327 SkASSERT(vertexCount >= 0); 344 SkASSERT(vertexCount >= 0);
328 SkASSERT(buffer); 345 SkASSERT(buffer);
329 SkASSERT(startVertex); 346 SkASSERT(startVertex);
330 347
331 size_t offset = 0; // assign to suppress warning 348 size_t offset = 0; // assign to suppress warning
332 const GrGeometryBuffer* geomBuffer = NULL; // assign to suppress warning 349 const GrGeometryBuffer* geomBuffer = nullptr; // assign to suppress warning
333 void* ptr = INHERITED::makeSpace(vertexSize * vertexCount, 350 void* ptr = INHERITED::makeSpace(vertexSize * vertexCount,
334 vertexSize, 351 vertexSize,
335 &geomBuffer, 352 &geomBuffer,
336 &offset); 353 &offset);
337 354
338 *buffer = (const GrVertexBuffer*) geomBuffer; 355 *buffer = (const GrVertexBuffer*) geomBuffer;
339 SkASSERT(0 == offset % vertexSize); 356 SkASSERT(0 == offset % vertexSize);
340 *startVertex = static_cast<int>(offset / vertexSize); 357 *startVertex = static_cast<int>(offset / vertexSize);
341 return ptr; 358 return ptr;
342 } 359 }
343 360
344 //////////////////////////////////////////////////////////////////////////////// 361 ////////////////////////////////////////////////////////////////////////////////
345 362
346 GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu) 363 GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu)
347 : GrBufferAllocPool(gpu, kIndex_BufferType, MIN_INDEX_BUFFER_SIZE) { 364 : GrBufferAllocPool(gpu, kIndex_BufferType, MIN_INDEX_BUFFER_SIZE) {
348 } 365 }
349 366
350 void* GrIndexBufferAllocPool::makeSpace(int indexCount, 367 void* GrIndexBufferAllocPool::makeSpace(int indexCount,
351 const GrIndexBuffer** buffer, 368 const GrIndexBuffer** buffer,
352 int* startIndex) { 369 int* startIndex) {
353 370
354 SkASSERT(indexCount >= 0); 371 SkASSERT(indexCount >= 0);
355 SkASSERT(buffer); 372 SkASSERT(buffer);
356 SkASSERT(startIndex); 373 SkASSERT(startIndex);
357 374
358 size_t offset = 0; // assign to suppress warning 375 size_t offset = 0; // assign to suppress warning
359 const GrGeometryBuffer* geomBuffer = NULL; // assign to suppress warning 376 const GrGeometryBuffer* geomBuffer = nullptr; // assign to suppress warning
360 void* ptr = INHERITED::makeSpace(indexCount * sizeof(uint16_t), 377 void* ptr = INHERITED::makeSpace(indexCount * sizeof(uint16_t),
361 sizeof(uint16_t), 378 sizeof(uint16_t),
362 &geomBuffer, 379 &geomBuffer,
363 &offset); 380 &offset);
364 381
365 *buffer = (const GrIndexBuffer*) geomBuffer; 382 *buffer = (const GrIndexBuffer*) geomBuffer;
366 SkASSERT(0 == offset % sizeof(uint16_t)); 383 SkASSERT(0 == offset % sizeof(uint16_t));
367 *startIndex = static_cast<int>(offset / sizeof(uint16_t)); 384 *startIndex = static_cast<int>(offset / sizeof(uint16_t));
368 return ptr; 385 return ptr;
369 } 386 }
370
371
OLDNEW
« no previous file with comments | « src/gpu/GrBufferAllocPool.h ('k') | src/gpu/GrCaps.cpp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698