Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(295)

Side by Side Diff: src/gpu/GrBufferAllocPool.cpp

Issue 1139753002: Refactor GrBufferAllocPools to use resource cache (Closed) Base URL: https://skia.googlesource.com/skia.git@master
Patch Set: fix merge issue Created 5 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/gpu/GrBufferAllocPool.h ('k') | src/gpu/GrCaps.cpp » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 1
2 /* 2 /*
3 * Copyright 2010 Google Inc. 3 * Copyright 2010 Google Inc.
4 * 4 *
5 * Use of this source code is governed by a BSD-style license that can be 5 * Use of this source code is governed by a BSD-style license that can be
6 * found in the LICENSE file. 6 * found in the LICENSE file.
7 */ 7 */
8 8
9 9
10 #include "GrBufferAllocPool.h" 10 #include "GrBufferAllocPool.h"
11 #include "GrCaps.h" 11 #include "GrCaps.h"
12 #include "GrContext.h"
12 #include "GrGpu.h" 13 #include "GrGpu.h"
13 #include "GrIndexBuffer.h" 14 #include "GrIndexBuffer.h"
15 #include "GrResourceProvider.h"
14 #include "GrTypes.h" 16 #include "GrTypes.h"
15 #include "GrVertexBuffer.h" 17 #include "GrVertexBuffer.h"
16 18
17 #include "SkTraceEvent.h" 19 #include "SkTraceEvent.h"
18 20
19 #ifdef SK_DEBUG 21 #ifdef SK_DEBUG
20 #define VALIDATE validate 22 #define VALIDATE validate
21 #else 23 #else
22 static void VALIDATE(bool = false) {} 24 static void VALIDATE(bool = false) {}
23 #endif 25 #endif
24 26
27 static const size_t MIN_VERTEX_BUFFER_SIZE = 1 << 15;
28 static const size_t MIN_INDEX_BUFFER_SIZE = 1 << 12;
29
25 // page size 30 // page size
26 #define GrBufferAllocPool_MIN_BLOCK_SIZE ((size_t)1 << 15) 31 #define GrBufferAllocPool_MIN_BLOCK_SIZE ((size_t)1 << 15)
27 32
28 #define UNMAP_BUFFER(block) \ 33 #define UNMAP_BUFFER(block) \
29 do { \ 34 do { \
30 TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("skia.gpu"), \ 35 TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("skia.gpu"), \
31 "GrBufferAllocPool Unmapping Buffer", \ 36 "GrBufferAllocPool Unmapping Buffer", \
32 TRACE_EVENT_SCOPE_THREAD, \ 37 TRACE_EVENT_SCOPE_THREAD, \
33 "percent_unwritten", \ 38 "percent_unwritten", \
34 (float)((block).fBytesFree) / (block).fBuffer->gpuMemor ySize()); \ 39 (float)((block).fBytesFree) / (block).fBuffer->gpuMemor ySize()); \
35 (block).fBuffer->unmap(); \ 40 (block).fBuffer->unmap(); \
36 } while (false) 41 } while (false)
37 42
38 GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu, 43 GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu,
39 BufferType bufferType, 44 BufferType bufferType,
40 size_t blockSize, 45 size_t blockSize)
41 int preallocBufferCnt) 46 : fBlocks(8) {
42 : fBlocks(SkTMax(8, 2*preallocBufferCnt)) {
43 47
44 fGpu = SkRef(gpu); 48 fGpu = SkRef(gpu);
45 49
46 fBufferType = bufferType; 50 fBufferType = bufferType;
47 fBufferPtr = NULL; 51 fBufferPtr = NULL;
48 fMinBlockSize = SkTMax(GrBufferAllocPool_MIN_BLOCK_SIZE, blockSize); 52 fMinBlockSize = SkTMax(GrBufferAllocPool_MIN_BLOCK_SIZE, blockSize);
49 53
50 fBytesInUse = 0; 54 fBytesInUse = 0;
51 55
52 fPreallocBuffersInUse = 0;
53 fPreallocBufferStartIdx = 0;
54 for (int i = 0; i < preallocBufferCnt; ++i) {
55 GrGeometryBuffer* buffer = this->createBuffer(fMinBlockSize);
56 if (buffer) {
57 *fPreallocBuffers.append() = buffer;
58 }
59 }
60 fGeometryBufferMapThreshold = gpu->caps()->geometryBufferMapThreshold(); 56 fGeometryBufferMapThreshold = gpu->caps()->geometryBufferMapThreshold();
61 } 57 }
62 58
63 GrBufferAllocPool::~GrBufferAllocPool() { 59 void GrBufferAllocPool::deleteBlocks() {
64 VALIDATE();
65 if (fBlocks.count()) { 60 if (fBlocks.count()) {
66 GrGeometryBuffer* buffer = fBlocks.back().fBuffer; 61 GrGeometryBuffer* buffer = fBlocks.back().fBuffer;
67 if (buffer->isMapped()) { 62 if (buffer->isMapped()) {
68 UNMAP_BUFFER(fBlocks.back()); 63 UNMAP_BUFFER(fBlocks.back());
69 } 64 }
70 } 65 }
71 while (!fBlocks.empty()) { 66 while (!fBlocks.empty()) {
72 this->destroyBlock(); 67 this->destroyBlock();
73 } 68 }
74 fPreallocBuffers.unrefAll(); 69 SkASSERT(!fBufferPtr);
70 }
71
72 GrBufferAllocPool::~GrBufferAllocPool() {
73 VALIDATE();
74 this->deleteBlocks();
75 fGpu->unref(); 75 fGpu->unref();
76 } 76 }
77 77
78 void GrBufferAllocPool::reset() { 78 void GrBufferAllocPool::reset() {
79 VALIDATE(); 79 VALIDATE();
80 fBytesInUse = 0; 80 fBytesInUse = 0;
81 if (fBlocks.count()) { 81 this->deleteBlocks();
82 GrGeometryBuffer* buffer = fBlocks.back().fBuffer;
83 if (buffer->isMapped()) {
84 UNMAP_BUFFER(fBlocks.back());
85 }
86 }
87 // fPreallocBuffersInUse will be decremented down to zero in the while loop
88 int preallocBuffersInUse = fPreallocBuffersInUse;
89 while (!fBlocks.empty()) {
90 this->destroyBlock();
91 }
92 if (fPreallocBuffers.count()) {
93 // must set this after above loop.
94 fPreallocBufferStartIdx = (fPreallocBufferStartIdx +
95 preallocBuffersInUse) %
96 fPreallocBuffers.count();
97 }
98 // we may have created a large cpu mirror of a large VB. Reset the size 82 // we may have created a large cpu mirror of a large VB. Reset the size
99 // to match our pre-allocated VBs. 83 // to match our minimum.
100 fCpuData.reset(fMinBlockSize); 84 fCpuData.reset(fMinBlockSize);
101 SkASSERT(0 == fPreallocBuffersInUse);
102 VALIDATE(); 85 VALIDATE();
103 } 86 }
104 87
105 void GrBufferAllocPool::unmap() { 88 void GrBufferAllocPool::unmap() {
106 VALIDATE(); 89 VALIDATE();
107 90
108 if (fBufferPtr) { 91 if (fBufferPtr) {
109 BufferBlock& block = fBlocks.back(); 92 BufferBlock& block = fBlocks.back();
110 if (block.fBuffer->isMapped()) { 93 if (block.fBuffer->isMapped()) {
111 UNMAP_BUFFER(block); 94 UNMAP_BUFFER(block);
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
163 const GrGeometryBuffer** buffer, 146 const GrGeometryBuffer** buffer,
164 size_t* offset) { 147 size_t* offset) {
165 VALIDATE(); 148 VALIDATE();
166 149
167 SkASSERT(buffer); 150 SkASSERT(buffer);
168 SkASSERT(offset); 151 SkASSERT(offset);
169 152
170 if (fBufferPtr) { 153 if (fBufferPtr) {
171 BufferBlock& back = fBlocks.back(); 154 BufferBlock& back = fBlocks.back();
172 size_t usedBytes = back.fBuffer->gpuMemorySize() - back.fBytesFree; 155 size_t usedBytes = back.fBuffer->gpuMemorySize() - back.fBytesFree;
173 size_t pad = GrSizeAlignUpPad(usedBytes, 156 size_t pad = GrSizeAlignUpPad(usedBytes, alignment);
174 alignment);
175 if ((size + pad) <= back.fBytesFree) { 157 if ((size + pad) <= back.fBytesFree) {
176 memset((void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes), 0, pad); 158 memset((void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes), 0, pad);
177 usedBytes += pad; 159 usedBytes += pad;
178 *offset = usedBytes; 160 *offset = usedBytes;
179 *buffer = back.fBuffer; 161 *buffer = back.fBuffer;
180 back.fBytesFree -= size + pad; 162 back.fBytesFree -= size + pad;
181 fBytesInUse += size + pad; 163 fBytesInUse += size + pad;
182 VALIDATE(); 164 VALIDATE();
183 return (void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes); 165 return (void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes);
184 } 166 }
(...skipping 17 matching lines...) Expand all
202 *buffer = back.fBuffer; 184 *buffer = back.fBuffer;
203 back.fBytesFree -= size; 185 back.fBytesFree -= size;
204 fBytesInUse += size; 186 fBytesInUse += size;
205 VALIDATE(); 187 VALIDATE();
206 return fBufferPtr; 188 return fBufferPtr;
207 } 189 }
208 190
209 void GrBufferAllocPool::putBack(size_t bytes) { 191 void GrBufferAllocPool::putBack(size_t bytes) {
210 VALIDATE(); 192 VALIDATE();
211 193
212 // if the putBack unwinds all the preallocated buffers then we will
213 // advance the starting index. As blocks are destroyed fPreallocBuffersInUse
214 // will be decremented. I will reach zero if all blocks using preallocated
215 // buffers are released.
216 int preallocBuffersInUse = fPreallocBuffersInUse;
217
218 while (bytes) { 194 while (bytes) {
219 // caller shouldn't try to put back more than they've taken 195 // caller shouldn't try to put back more than they've taken
220 SkASSERT(!fBlocks.empty()); 196 SkASSERT(!fBlocks.empty());
221 BufferBlock& block = fBlocks.back(); 197 BufferBlock& block = fBlocks.back();
222 size_t bytesUsed = block.fBuffer->gpuMemorySize() - block.fBytesFree; 198 size_t bytesUsed = block.fBuffer->gpuMemorySize() - block.fBytesFree;
223 if (bytes >= bytesUsed) { 199 if (bytes >= bytesUsed) {
224 bytes -= bytesUsed; 200 bytes -= bytesUsed;
225 fBytesInUse -= bytesUsed; 201 fBytesInUse -= bytesUsed;
226 // if we locked a vb to satisfy the make space and we're releasing 202 // if we locked a vb to satisfy the make space and we're releasing
227 // beyond it, then unmap it. 203 // beyond it, then unmap it.
228 if (block.fBuffer->isMapped()) { 204 if (block.fBuffer->isMapped()) {
229 UNMAP_BUFFER(block); 205 UNMAP_BUFFER(block);
230 } 206 }
231 this->destroyBlock(); 207 this->destroyBlock();
232 } else { 208 } else {
233 block.fBytesFree += bytes; 209 block.fBytesFree += bytes;
234 fBytesInUse -= bytes; 210 fBytesInUse -= bytes;
235 bytes = 0; 211 bytes = 0;
236 break; 212 break;
237 } 213 }
238 } 214 }
239 if (!fPreallocBuffersInUse && fPreallocBuffers.count()) { 215
240 fPreallocBufferStartIdx = (fPreallocBufferStartIdx +
241 preallocBuffersInUse) %
242 fPreallocBuffers.count();
243 }
244 VALIDATE(); 216 VALIDATE();
245 } 217 }
246 218
247 bool GrBufferAllocPool::createBlock(size_t requestSize) { 219 bool GrBufferAllocPool::createBlock(size_t requestSize) {
248 220
249 size_t size = SkTMax(requestSize, fMinBlockSize); 221 size_t size = SkTMax(requestSize, fMinBlockSize);
250 SkASSERT(size >= GrBufferAllocPool_MIN_BLOCK_SIZE); 222 SkASSERT(size >= GrBufferAllocPool_MIN_BLOCK_SIZE);
251 223
252 VALIDATE(); 224 VALIDATE();
253 225
254 BufferBlock& block = fBlocks.push_back(); 226 BufferBlock& block = fBlocks.push_back();
255 227
256 if (size == fMinBlockSize && 228 block.fBuffer = this->getBuffer(size);
257 fPreallocBuffersInUse < fPreallocBuffers.count()) { 229 if (NULL == block.fBuffer) {
258 230 fBlocks.pop_back();
259 uint32_t nextBuffer = (fPreallocBuffersInUse + 231 return false;
260 fPreallocBufferStartIdx) %
261 fPreallocBuffers.count();
262 block.fBuffer = fPreallocBuffers[nextBuffer];
263 block.fBuffer->ref();
264 ++fPreallocBuffersInUse;
265 } else {
266 block.fBuffer = this->createBuffer(size);
267 if (NULL == block.fBuffer) {
268 fBlocks.pop_back();
269 return false;
270 }
271 } 232 }
272 233
273 block.fBytesFree = size; 234 block.fBytesFree = block.fBuffer->gpuMemorySize();
274 if (fBufferPtr) { 235 if (fBufferPtr) {
275 SkASSERT(fBlocks.count() > 1); 236 SkASSERT(fBlocks.count() > 1);
276 BufferBlock& prev = fBlocks.fromBack(1); 237 BufferBlock& prev = fBlocks.fromBack(1);
277 if (prev.fBuffer->isMapped()) { 238 if (prev.fBuffer->isMapped()) {
278 UNMAP_BUFFER(prev); 239 UNMAP_BUFFER(prev);
279 } else { 240 } else {
280 this->flushCpuData(prev, prev.fBuffer->gpuMemorySize() - prev.fBytes Free); 241 this->flushCpuData(prev, prev.fBuffer->gpuMemorySize() - prev.fBytes Free);
281 } 242 }
282 fBufferPtr = NULL; 243 fBufferPtr = NULL;
283 } 244 }
284 245
285 SkASSERT(NULL == fBufferPtr); 246 SkASSERT(NULL == fBufferPtr);
286 247
287 // If the buffer is CPU-backed we map it because it is free to do so and sav es a copy. 248 // If the buffer is CPU-backed we map it because it is free to do so and sav es a copy.
288 // Otherwise when buffer mapping is supported we map if the buffer size is g reater than the 249 // Otherwise when buffer mapping is supported we map if the buffer size is g reater than the
289 // threshold. 250 // threshold.
290 bool attemptMap = block.fBuffer->isCPUBacked(); 251 bool attemptMap = block.fBuffer->isCPUBacked();
291 if (!attemptMap && GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags()) { 252 if (!attemptMap && GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags()) {
292 attemptMap = size > fGeometryBufferMapThreshold; 253 attemptMap = size > fGeometryBufferMapThreshold;
293 } 254 }
294 255
295 if (attemptMap) { 256 if (attemptMap) {
296 fBufferPtr = block.fBuffer->map(); 257 fBufferPtr = block.fBuffer->map();
297 } 258 }
298 259
299 if (NULL == fBufferPtr) { 260 if (NULL == fBufferPtr) {
300 fBufferPtr = fCpuData.reset(size); 261 fBufferPtr = fCpuData.reset(block.fBytesFree);
301 } 262 }
302 263
303 VALIDATE(true); 264 VALIDATE(true);
304 265
305 return true; 266 return true;
306 } 267 }
307 268
308 void GrBufferAllocPool::destroyBlock() { 269 void GrBufferAllocPool::destroyBlock() {
309 SkASSERT(!fBlocks.empty()); 270 SkASSERT(!fBlocks.empty());
310 271
311 BufferBlock& block = fBlocks.back(); 272 BufferBlock& block = fBlocks.back();
312 if (fPreallocBuffersInUse > 0) { 273
313 uint32_t prevPreallocBuffer = (fPreallocBuffersInUse +
314 fPreallocBufferStartIdx +
315 (fPreallocBuffers.count() - 1)) %
316 fPreallocBuffers.count();
317 if (block.fBuffer == fPreallocBuffers[prevPreallocBuffer]) {
318 --fPreallocBuffersInUse;
319 }
320 }
321 SkASSERT(!block.fBuffer->isMapped()); 274 SkASSERT(!block.fBuffer->isMapped());
322 block.fBuffer->unref(); 275 block.fBuffer->unref();
323 fBlocks.pop_back(); 276 fBlocks.pop_back();
324 fBufferPtr = NULL; 277 fBufferPtr = NULL;
325 } 278 }
326 279
327 void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize) { 280 void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize) {
328 GrGeometryBuffer* buffer = block.fBuffer; 281 GrGeometryBuffer* buffer = block.fBuffer;
329 SkASSERT(buffer); 282 SkASSERT(buffer);
330 SkASSERT(!buffer->isMapped()); 283 SkASSERT(!buffer->isMapped());
331 SkASSERT(fCpuData.get() == fBufferPtr); 284 SkASSERT(fCpuData.get() == fBufferPtr);
332 SkASSERT(flushSize <= buffer->gpuMemorySize()); 285 SkASSERT(flushSize <= buffer->gpuMemorySize());
333 VALIDATE(true); 286 VALIDATE(true);
334 287
335 if (GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() && 288 if (GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() &&
336 flushSize > fGeometryBufferMapThreshold) { 289 flushSize > fGeometryBufferMapThreshold) {
337 void* data = buffer->map(); 290 void* data = buffer->map();
338 if (data) { 291 if (data) {
339 memcpy(data, fBufferPtr, flushSize); 292 memcpy(data, fBufferPtr, flushSize);
340 UNMAP_BUFFER(block); 293 UNMAP_BUFFER(block);
341 return; 294 return;
342 } 295 }
343 } 296 }
344 buffer->updateData(fBufferPtr, flushSize); 297 buffer->updateData(fBufferPtr, flushSize);
345 VALIDATE(true); 298 VALIDATE(true);
346 } 299 }
347 300
348 GrGeometryBuffer* GrBufferAllocPool::createBuffer(size_t size) { 301 GrGeometryBuffer* GrBufferAllocPool::getBuffer(size_t size) {
302
303 GrResourceProvider* rp = fGpu->getContext()->resourceProvider();
304
349 if (kIndex_BufferType == fBufferType) { 305 if (kIndex_BufferType == fBufferType) {
350 return fGpu->createIndexBuffer(size, true); 306 return rp->getIndexBuffer(size, /* dynamic = */ true, /* duringFlush = * / true);
351 } else { 307 } else {
352 SkASSERT(kVertex_BufferType == fBufferType); 308 SkASSERT(kVertex_BufferType == fBufferType);
353 return fGpu->createVertexBuffer(size, true); 309 return rp->getVertexBuffer(size, /* dynamic = */ true, /* duringFlush = */ true);
354 } 310 }
355 } 311 }
356 312
357 //////////////////////////////////////////////////////////////////////////////// 313 ////////////////////////////////////////////////////////////////////////////////
358 314
359 GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu, 315 GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu)
360 size_t bufferSize, 316 : GrBufferAllocPool(gpu, kVertex_BufferType, MIN_VERTEX_BUFFER_SIZE) {
361 int preallocBufferCnt)
362 : GrBufferAllocPool(gpu,
363 kVertex_BufferType,
364 bufferSize,
365 preallocBufferCnt) {
366 } 317 }
367 318
368 void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize, 319 void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize,
369 int vertexCount, 320 int vertexCount,
370 const GrVertexBuffer** buffer, 321 const GrVertexBuffer** buffer,
371 int* startVertex) { 322 int* startVertex) {
372 323
373 SkASSERT(vertexCount >= 0); 324 SkASSERT(vertexCount >= 0);
374 SkASSERT(buffer); 325 SkASSERT(buffer);
375 SkASSERT(startVertex); 326 SkASSERT(startVertex);
376 327
377 size_t offset = 0; // assign to suppress warning 328 size_t offset = 0; // assign to suppress warning
378 const GrGeometryBuffer* geomBuffer = NULL; // assign to suppress warning 329 const GrGeometryBuffer* geomBuffer = NULL; // assign to suppress warning
379 void* ptr = INHERITED::makeSpace(vertexSize * vertexCount, 330 void* ptr = INHERITED::makeSpace(vertexSize * vertexCount,
380 vertexSize, 331 vertexSize,
381 &geomBuffer, 332 &geomBuffer,
382 &offset); 333 &offset);
383 334
384 *buffer = (const GrVertexBuffer*) geomBuffer; 335 *buffer = (const GrVertexBuffer*) geomBuffer;
385 SkASSERT(0 == offset % vertexSize); 336 SkASSERT(0 == offset % vertexSize);
386 *startVertex = static_cast<int>(offset / vertexSize); 337 *startVertex = static_cast<int>(offset / vertexSize);
387 return ptr; 338 return ptr;
388 } 339 }
389 340
390 //////////////////////////////////////////////////////////////////////////////// 341 ////////////////////////////////////////////////////////////////////////////////
391 342
392 GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu, 343 GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu)
393 size_t bufferSize, 344 : GrBufferAllocPool(gpu, kIndex_BufferType, MIN_INDEX_BUFFER_SIZE) {
394 int preallocBufferCnt)
395 : GrBufferAllocPool(gpu,
396 kIndex_BufferType,
397 bufferSize,
398 preallocBufferCnt) {
399 } 345 }
400 346
401 void* GrIndexBufferAllocPool::makeSpace(int indexCount, 347 void* GrIndexBufferAllocPool::makeSpace(int indexCount,
402 const GrIndexBuffer** buffer, 348 const GrIndexBuffer** buffer,
403 int* startIndex) { 349 int* startIndex) {
404 350
405 SkASSERT(indexCount >= 0); 351 SkASSERT(indexCount >= 0);
406 SkASSERT(buffer); 352 SkASSERT(buffer);
407 SkASSERT(startIndex); 353 SkASSERT(startIndex);
408 354
409 size_t offset = 0; // assign to suppress warning 355 size_t offset = 0; // assign to suppress warning
410 const GrGeometryBuffer* geomBuffer = NULL; // assign to suppress warning 356 const GrGeometryBuffer* geomBuffer = NULL; // assign to suppress warning
411 void* ptr = INHERITED::makeSpace(indexCount * sizeof(uint16_t), 357 void* ptr = INHERITED::makeSpace(indexCount * sizeof(uint16_t),
412 sizeof(uint16_t), 358 sizeof(uint16_t),
413 &geomBuffer, 359 &geomBuffer,
414 &offset); 360 &offset);
415 361
416 *buffer = (const GrIndexBuffer*) geomBuffer; 362 *buffer = (const GrIndexBuffer*) geomBuffer;
417 SkASSERT(0 == offset % sizeof(uint16_t)); 363 SkASSERT(0 == offset % sizeof(uint16_t));
418 *startIndex = static_cast<int>(offset / sizeof(uint16_t)); 364 *startIndex = static_cast<int>(offset / sizeof(uint16_t));
419 return ptr; 365 return ptr;
420 } 366 }
421 367
422 368
OLDNEW
« no previous file with comments | « src/gpu/GrBufferAllocPool.h ('k') | src/gpu/GrCaps.cpp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698