Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(320)

Side by Side Diff: src/gpu/GrBufferAllocPool.cpp

Issue 1129863008: Revert of Refactor GrBufferAllocPools to use resource cache (Closed) Base URL: https://skia.googlesource.com/skia.git@master
Patch Set: Created 5 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/gpu/GrBufferAllocPool.h ('k') | src/gpu/GrIndexBuffer.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 1
2 /* 2 /*
3 * Copyright 2010 Google Inc. 3 * Copyright 2010 Google Inc.
4 * 4 *
5 * Use of this source code is governed by a BSD-style license that can be 5 * Use of this source code is governed by a BSD-style license that can be
6 * found in the LICENSE file. 6 * found in the LICENSE file.
7 */ 7 */
8 8
9 9
10 #include "GrBufferAllocPool.h" 10 #include "GrBufferAllocPool.h"
11 #include "GrDrawTargetCaps.h" 11 #include "GrDrawTargetCaps.h"
12 #include "GrGpu.h" 12 #include "GrGpu.h"
13 #include "GrIndexBuffer.h" 13 #include "GrIndexBuffer.h"
14 #include "GrResourceProvider.h"
15 #include "GrTypes.h" 14 #include "GrTypes.h"
16 #include "GrVertexBuffer.h" 15 #include "GrVertexBuffer.h"
17 16
18 #include "SkTraceEvent.h" 17 #include "SkTraceEvent.h"
19 18
20 #ifdef SK_DEBUG 19 #ifdef SK_DEBUG
21 #define VALIDATE validate 20 #define VALIDATE validate
22 #else 21 #else
23 static void VALIDATE(bool = false) {} 22 static void VALIDATE(bool = false) {}
24 #endif 23 #endif
25 24
26 static const size_t MIN_VERTEX_BUFFER_SIZE = 1 << 15;
27 static const size_t MIN_INDEX_BUFFER_SIZE = 1 << 12;
28
29 // page size 25 // page size
30 #define GrBufferAllocPool_MIN_BLOCK_SIZE ((size_t)1 << 12) 26 #define GrBufferAllocPool_MIN_BLOCK_SIZE ((size_t)1 << 12)
31 27
32 #define UNMAP_BUFFER(block) \ 28 #define UNMAP_BUFFER(block) \
33 do { \ 29 do { \
34 TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("skia.gpu"), \ 30 TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("skia.gpu"), \
35 "GrBufferAllocPool Unmapping Buffer", \ 31 "GrBufferAllocPool Unmapping Buffer", \
36 TRACE_EVENT_SCOPE_THREAD, \ 32 TRACE_EVENT_SCOPE_THREAD, \
37 "percent_unwritten", \ 33 "percent_unwritten", \
38 (float)((block).fBytesFree) / (block).fBuffer->gpuMemor ySize()); \ 34 (float)((block).fBytesFree) / (block).fBuffer->gpuMemor ySize()); \
39 (block).fBuffer->unmap(); \ 35 (block).fBuffer->unmap(); \
40 } while (false) 36 } while (false)
41 37
42 GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu, 38 GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu,
43 BufferType bufferType, 39 BufferType bufferType,
44 size_t blockSize) 40 size_t blockSize,
45 : fBlocks(8) { 41 int preallocBufferCnt)
42 : fBlocks(SkTMax(8, 2*preallocBufferCnt)) {
46 43
47 fGpu = SkRef(gpu); 44 fGpu = SkRef(gpu);
48 45
49 fBufferType = bufferType; 46 fBufferType = bufferType;
50 fBufferPtr = NULL; 47 fBufferPtr = NULL;
51 fMinBlockSize = SkTMax(GrBufferAllocPool_MIN_BLOCK_SIZE, blockSize); 48 fMinBlockSize = SkTMax(GrBufferAllocPool_MIN_BLOCK_SIZE, blockSize);
52 49
53 fBytesInUse = 0; 50 fBytesInUse = 0;
51
52 fPreallocBuffersInUse = 0;
53 fPreallocBufferStartIdx = 0;
54 for (int i = 0; i < preallocBufferCnt; ++i) {
55 GrGeometryBuffer* buffer = this->createBuffer(fMinBlockSize);
56 if (buffer) {
57 *fPreallocBuffers.append() = buffer;
58 }
59 }
54 } 60 }
55 61
56 void GrBufferAllocPool::deleteBlocks() { 62 GrBufferAllocPool::~GrBufferAllocPool() {
63 VALIDATE();
57 if (fBlocks.count()) { 64 if (fBlocks.count()) {
58 GrGeometryBuffer* buffer = fBlocks.back().fBuffer; 65 GrGeometryBuffer* buffer = fBlocks.back().fBuffer;
59 if (buffer->isMapped()) { 66 if (buffer->isMapped()) {
60 UNMAP_BUFFER(fBlocks.back()); 67 UNMAP_BUFFER(fBlocks.back());
61 } 68 }
62 } 69 }
63 while (!fBlocks.empty()) { 70 while (!fBlocks.empty()) {
64 this->destroyBlock(); 71 this->destroyBlock();
65 } 72 }
66 SkASSERT(!fBufferPtr); 73 fPreallocBuffers.unrefAll();
67 }
68
69 GrBufferAllocPool::~GrBufferAllocPool() {
70 VALIDATE();
71 this->deleteBlocks();
72 fGpu->unref(); 74 fGpu->unref();
73 } 75 }
74 76
75 void GrBufferAllocPool::reset() { 77 void GrBufferAllocPool::reset() {
76 VALIDATE(); 78 VALIDATE();
77 fBytesInUse = 0; 79 fBytesInUse = 0;
78 this->deleteBlocks(); 80 if (fBlocks.count()) {
81 GrGeometryBuffer* buffer = fBlocks.back().fBuffer;
82 if (buffer->isMapped()) {
83 UNMAP_BUFFER(fBlocks.back());
84 }
85 }
86 // fPreallocBuffersInUse will be decremented down to zero in the while loop
87 int preallocBuffersInUse = fPreallocBuffersInUse;
88 while (!fBlocks.empty()) {
89 this->destroyBlock();
90 }
91 if (fPreallocBuffers.count()) {
92 // must set this after above loop.
93 fPreallocBufferStartIdx = (fPreallocBufferStartIdx +
94 preallocBuffersInUse) %
95 fPreallocBuffers.count();
96 }
79 // we may have created a large cpu mirror of a large VB. Reset the size 97 // we may have created a large cpu mirror of a large VB. Reset the size
80 // to match our minimum. 98 // to match our pre-allocated VBs.
81 fCpuData.reset(fMinBlockSize); 99 fCpuData.reset(fMinBlockSize);
100 SkASSERT(0 == fPreallocBuffersInUse);
82 VALIDATE(); 101 VALIDATE();
83 } 102 }
84 103
85 void GrBufferAllocPool::unmap() { 104 void GrBufferAllocPool::unmap() {
86 VALIDATE(); 105 VALIDATE();
87 106
88 if (fBufferPtr) { 107 if (fBufferPtr) {
89 BufferBlock& block = fBlocks.back(); 108 BufferBlock& block = fBlocks.back();
90 if (block.fBuffer->isMapped()) { 109 if (block.fBuffer->isMapped()) {
91 UNMAP_BUFFER(block); 110 UNMAP_BUFFER(block);
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
143 const GrGeometryBuffer** buffer, 162 const GrGeometryBuffer** buffer,
144 size_t* offset) { 163 size_t* offset) {
145 VALIDATE(); 164 VALIDATE();
146 165
147 SkASSERT(buffer); 166 SkASSERT(buffer);
148 SkASSERT(offset); 167 SkASSERT(offset);
149 168
150 if (fBufferPtr) { 169 if (fBufferPtr) {
151 BufferBlock& back = fBlocks.back(); 170 BufferBlock& back = fBlocks.back();
152 size_t usedBytes = back.fBuffer->gpuMemorySize() - back.fBytesFree; 171 size_t usedBytes = back.fBuffer->gpuMemorySize() - back.fBytesFree;
153 size_t pad = GrSizeAlignUpPad(usedBytes, alignment); 172 size_t pad = GrSizeAlignUpPad(usedBytes,
173 alignment);
154 if ((size + pad) <= back.fBytesFree) { 174 if ((size + pad) <= back.fBytesFree) {
155 memset((void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes), 0, pad); 175 memset((void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes), 0, pad);
156 usedBytes += pad; 176 usedBytes += pad;
157 *offset = usedBytes; 177 *offset = usedBytes;
158 *buffer = back.fBuffer; 178 *buffer = back.fBuffer;
159 back.fBytesFree -= size + pad; 179 back.fBytesFree -= size + pad;
160 fBytesInUse += size + pad; 180 fBytesInUse += size + pad;
161 VALIDATE(); 181 VALIDATE();
162 return (void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes); 182 return (void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes);
163 } 183 }
(...skipping 17 matching lines...) Expand all
181 *buffer = back.fBuffer; 201 *buffer = back.fBuffer;
182 back.fBytesFree -= size; 202 back.fBytesFree -= size;
183 fBytesInUse += size; 203 fBytesInUse += size;
184 VALIDATE(); 204 VALIDATE();
185 return fBufferPtr; 205 return fBufferPtr;
186 } 206 }
187 207
188 void GrBufferAllocPool::putBack(size_t bytes) { 208 void GrBufferAllocPool::putBack(size_t bytes) {
189 VALIDATE(); 209 VALIDATE();
190 210
211 // if the putBack unwinds all the preallocated buffers then we will
212 // advance the starting index. As blocks are destroyed fPreallocBuffersInUse
213 // will be decremented. I will reach zero if all blocks using preallocated
214 // buffers are released.
215 int preallocBuffersInUse = fPreallocBuffersInUse;
216
191 while (bytes) { 217 while (bytes) {
192 // caller shouldn't try to put back more than they've taken 218 // caller shouldn't try to put back more than they've taken
193 SkASSERT(!fBlocks.empty()); 219 SkASSERT(!fBlocks.empty());
194 BufferBlock& block = fBlocks.back(); 220 BufferBlock& block = fBlocks.back();
195 size_t bytesUsed = block.fBuffer->gpuMemorySize() - block.fBytesFree; 221 size_t bytesUsed = block.fBuffer->gpuMemorySize() - block.fBytesFree;
196 if (bytes >= bytesUsed) { 222 if (bytes >= bytesUsed) {
197 bytes -= bytesUsed; 223 bytes -= bytesUsed;
198 fBytesInUse -= bytesUsed; 224 fBytesInUse -= bytesUsed;
199 // if we locked a vb to satisfy the make space and we're releasing 225 // if we locked a vb to satisfy the make space and we're releasing
200 // beyond it, then unmap it. 226 // beyond it, then unmap it.
201 if (block.fBuffer->isMapped()) { 227 if (block.fBuffer->isMapped()) {
202 UNMAP_BUFFER(block); 228 UNMAP_BUFFER(block);
203 } 229 }
204 this->destroyBlock(); 230 this->destroyBlock();
205 } else { 231 } else {
206 block.fBytesFree += bytes; 232 block.fBytesFree += bytes;
207 fBytesInUse -= bytes; 233 fBytesInUse -= bytes;
208 bytes = 0; 234 bytes = 0;
209 break; 235 break;
210 } 236 }
211 } 237 }
212 238 if (!fPreallocBuffersInUse && fPreallocBuffers.count()) {
239 fPreallocBufferStartIdx = (fPreallocBufferStartIdx +
240 preallocBuffersInUse) %
241 fPreallocBuffers.count();
242 }
213 VALIDATE(); 243 VALIDATE();
214 } 244 }
215 245
216 bool GrBufferAllocPool::createBlock(size_t requestSize) { 246 bool GrBufferAllocPool::createBlock(size_t requestSize) {
217 247
218 size_t size = SkTMax(requestSize, fMinBlockSize); 248 size_t size = SkTMax(requestSize, fMinBlockSize);
219 SkASSERT(size >= GrBufferAllocPool_MIN_BLOCK_SIZE); 249 SkASSERT(size >= GrBufferAllocPool_MIN_BLOCK_SIZE);
220 250
221 VALIDATE(); 251 VALIDATE();
222 252
223 BufferBlock& block = fBlocks.push_back(); 253 BufferBlock& block = fBlocks.push_back();
224 254
225 block.fBuffer = this->getBuffer(size); 255 if (size == fMinBlockSize &&
226 if (NULL == block.fBuffer) { 256 fPreallocBuffersInUse < fPreallocBuffers.count()) {
227 fBlocks.pop_back(); 257
228 return false; 258 uint32_t nextBuffer = (fPreallocBuffersInUse +
259 fPreallocBufferStartIdx) %
260 fPreallocBuffers.count();
261 block.fBuffer = fPreallocBuffers[nextBuffer];
262 block.fBuffer->ref();
263 ++fPreallocBuffersInUse;
264 } else {
265 block.fBuffer = this->createBuffer(size);
266 if (NULL == block.fBuffer) {
267 fBlocks.pop_back();
268 return false;
269 }
229 } 270 }
230 271
231 block.fBytesFree = block.fBuffer->gpuMemorySize(); 272 block.fBytesFree = size;
232 if (fBufferPtr) { 273 if (fBufferPtr) {
233 SkASSERT(fBlocks.count() > 1); 274 SkASSERT(fBlocks.count() > 1);
234 BufferBlock& prev = fBlocks.fromBack(1); 275 BufferBlock& prev = fBlocks.fromBack(1);
235 if (prev.fBuffer->isMapped()) { 276 if (prev.fBuffer->isMapped()) {
236 UNMAP_BUFFER(prev); 277 UNMAP_BUFFER(prev);
237 } else { 278 } else {
238 this->flushCpuData(prev, prev.fBuffer->gpuMemorySize() - prev.fBytes Free); 279 this->flushCpuData(prev, prev.fBuffer->gpuMemorySize() - prev.fBytes Free);
239 } 280 }
240 fBufferPtr = NULL; 281 fBufferPtr = NULL;
241 } 282 }
242 283
243 SkASSERT(NULL == fBufferPtr); 284 SkASSERT(NULL == fBufferPtr);
244 285
245 // If the buffer is CPU-backed we map it because it is free to do so and sav es a copy. 286 // If the buffer is CPU-backed we map it because it is free to do so and sav es a copy.
246 // Otherwise when buffer mapping is supported we map if the buffer size is g reater than the 287 // Otherwise when buffer mapping is supported we map if the buffer size is g reater than the
247 // threshold. 288 // threshold.
248 bool attemptMap = block.fBuffer->isCPUBacked(); 289 bool attemptMap = block.fBuffer->isCPUBacked();
249 if (!attemptMap && GrDrawTargetCaps::kNone_MapFlags != fGpu->caps()->mapBuff erFlags()) { 290 if (!attemptMap && GrDrawTargetCaps::kNone_MapFlags != fGpu->caps()->mapBuff erFlags()) {
250 attemptMap = block.fBytesFree > GR_GEOM_BUFFER_MAP_THRESHOLD; 291 attemptMap = size > GR_GEOM_BUFFER_MAP_THRESHOLD;
251 } 292 }
252 293
253 if (attemptMap) { 294 if (attemptMap) {
254 fBufferPtr = block.fBuffer->map(); 295 fBufferPtr = block.fBuffer->map();
255 } 296 }
256 297
257 if (NULL == fBufferPtr) { 298 if (NULL == fBufferPtr) {
258 fBufferPtr = fCpuData.reset(block.fBytesFree); 299 fBufferPtr = fCpuData.reset(size);
259 } 300 }
260 301
261 VALIDATE(true); 302 VALIDATE(true);
262 303
263 return true; 304 return true;
264 } 305 }
265 306
266 void GrBufferAllocPool::destroyBlock() { 307 void GrBufferAllocPool::destroyBlock() {
267 SkASSERT(!fBlocks.empty()); 308 SkASSERT(!fBlocks.empty());
268 309
269 BufferBlock& block = fBlocks.back(); 310 BufferBlock& block = fBlocks.back();
270 311 if (fPreallocBuffersInUse > 0) {
312 uint32_t prevPreallocBuffer = (fPreallocBuffersInUse +
313 fPreallocBufferStartIdx +
314 (fPreallocBuffers.count() - 1)) %
315 fPreallocBuffers.count();
316 if (block.fBuffer == fPreallocBuffers[prevPreallocBuffer]) {
317 --fPreallocBuffersInUse;
318 }
319 }
271 SkASSERT(!block.fBuffer->isMapped()); 320 SkASSERT(!block.fBuffer->isMapped());
272 block.fBuffer->unref(); 321 block.fBuffer->unref();
273 fBlocks.pop_back(); 322 fBlocks.pop_back();
274 fBufferPtr = NULL; 323 fBufferPtr = NULL;
275 } 324 }
276 325
277 void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize) { 326 void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize) {
278 GrGeometryBuffer* buffer = block.fBuffer; 327 GrGeometryBuffer* buffer = block.fBuffer;
279 SkASSERT(buffer); 328 SkASSERT(buffer);
280 SkASSERT(!buffer->isMapped()); 329 SkASSERT(!buffer->isMapped());
281 SkASSERT(fCpuData.get() == fBufferPtr); 330 SkASSERT(fCpuData.get() == fBufferPtr);
282 SkASSERT(flushSize <= buffer->gpuMemorySize()); 331 SkASSERT(flushSize <= buffer->gpuMemorySize());
283 VALIDATE(true); 332 VALIDATE(true);
284 333
285 if (GrDrawTargetCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() && 334 if (GrDrawTargetCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() &&
286 flushSize > GR_GEOM_BUFFER_MAP_THRESHOLD) { 335 flushSize > GR_GEOM_BUFFER_MAP_THRESHOLD) {
287 void* data = buffer->map(); 336 void* data = buffer->map();
288 if (data) { 337 if (data) {
289 memcpy(data, fBufferPtr, flushSize); 338 memcpy(data, fBufferPtr, flushSize);
290 UNMAP_BUFFER(block); 339 UNMAP_BUFFER(block);
291 return; 340 return;
292 } 341 }
293 } 342 }
294 buffer->updateData(fBufferPtr, flushSize); 343 buffer->updateData(fBufferPtr, flushSize);
295 VALIDATE(true); 344 VALIDATE(true);
296 } 345 }
297 346
298 GrGeometryBuffer* GrBufferAllocPool::getBuffer(size_t size) { 347 GrGeometryBuffer* GrBufferAllocPool::createBuffer(size_t size) {
299
300 GrResourceProvider* rp = fGpu->getContext()->resourceProvider();
301
302 if (kIndex_BufferType == fBufferType) { 348 if (kIndex_BufferType == fBufferType) {
303 return rp->getIndexBuffer(size, /* dynamic = */ true, /* duringFlush = * / true); 349 return fGpu->createIndexBuffer(size, true);
304 } else { 350 } else {
305 SkASSERT(kVertex_BufferType == fBufferType); 351 SkASSERT(kVertex_BufferType == fBufferType);
306 return rp->getVertexBuffer(size, /* dynamic = */ true, /* duringFlush = */ true); 352 return fGpu->createVertexBuffer(size, true);
307 } 353 }
308 } 354 }
309 355
310 //////////////////////////////////////////////////////////////////////////////// 356 ////////////////////////////////////////////////////////////////////////////////
311 357
312 GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu) 358 GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu,
313 : GrBufferAllocPool(gpu, kVertex_BufferType, MIN_VERTEX_BUFFER_SIZE) { 359 size_t bufferSize,
360 int preallocBufferCnt)
361 : GrBufferAllocPool(gpu,
362 kVertex_BufferType,
363 bufferSize,
364 preallocBufferCnt) {
314 } 365 }
315 366
316 void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize, 367 void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize,
317 int vertexCount, 368 int vertexCount,
318 const GrVertexBuffer** buffer, 369 const GrVertexBuffer** buffer,
319 int* startVertex) { 370 int* startVertex) {
320 371
321 SkASSERT(vertexCount >= 0); 372 SkASSERT(vertexCount >= 0);
322 SkASSERT(buffer); 373 SkASSERT(buffer);
323 SkASSERT(startVertex); 374 SkASSERT(startVertex);
324 375
325 size_t offset = 0; // assign to suppress warning 376 size_t offset = 0; // assign to suppress warning
326 const GrGeometryBuffer* geomBuffer = NULL; // assign to suppress warning 377 const GrGeometryBuffer* geomBuffer = NULL; // assign to suppress warning
327 void* ptr = INHERITED::makeSpace(vertexSize * vertexCount, 378 void* ptr = INHERITED::makeSpace(vertexSize * vertexCount,
328 vertexSize, 379 vertexSize,
329 &geomBuffer, 380 &geomBuffer,
330 &offset); 381 &offset);
331 382
332 *buffer = (const GrVertexBuffer*) geomBuffer; 383 *buffer = (const GrVertexBuffer*) geomBuffer;
333 SkASSERT(0 == offset % vertexSize); 384 SkASSERT(0 == offset % vertexSize);
334 *startVertex = static_cast<int>(offset / vertexSize); 385 *startVertex = static_cast<int>(offset / vertexSize);
335 return ptr; 386 return ptr;
336 } 387 }
337 388
338 //////////////////////////////////////////////////////////////////////////////// 389 ////////////////////////////////////////////////////////////////////////////////
339 390
340 GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu) 391 GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu,
341 : GrBufferAllocPool(gpu, kIndex_BufferType, MIN_INDEX_BUFFER_SIZE) { 392 size_t bufferSize,
393 int preallocBufferCnt)
394 : GrBufferAllocPool(gpu,
395 kIndex_BufferType,
396 bufferSize,
397 preallocBufferCnt) {
342 } 398 }
343 399
344 void* GrIndexBufferAllocPool::makeSpace(int indexCount, 400 void* GrIndexBufferAllocPool::makeSpace(int indexCount,
345 const GrIndexBuffer** buffer, 401 const GrIndexBuffer** buffer,
346 int* startIndex) { 402 int* startIndex) {
347 403
348 SkASSERT(indexCount >= 0); 404 SkASSERT(indexCount >= 0);
349 SkASSERT(buffer); 405 SkASSERT(buffer);
350 SkASSERT(startIndex); 406 SkASSERT(startIndex);
351 407
352 size_t offset = 0; // assign to suppress warning 408 size_t offset = 0; // assign to suppress warning
353 const GrGeometryBuffer* geomBuffer = NULL; // assign to suppress warning 409 const GrGeometryBuffer* geomBuffer = NULL; // assign to suppress warning
354 void* ptr = INHERITED::makeSpace(indexCount * sizeof(uint16_t), 410 void* ptr = INHERITED::makeSpace(indexCount * sizeof(uint16_t),
355 sizeof(uint16_t), 411 sizeof(uint16_t),
356 &geomBuffer, 412 &geomBuffer,
357 &offset); 413 &offset);
358 414
359 *buffer = (const GrIndexBuffer*) geomBuffer; 415 *buffer = (const GrIndexBuffer*) geomBuffer;
360 SkASSERT(0 == offset % sizeof(uint16_t)); 416 SkASSERT(0 == offset % sizeof(uint16_t));
361 *startIndex = static_cast<int>(offset / sizeof(uint16_t)); 417 *startIndex = static_cast<int>(offset / sizeof(uint16_t));
362 return ptr; 418 return ptr;
363 } 419 }
364 420
365 421
OLDNEW
« no previous file with comments | « src/gpu/GrBufferAllocPool.h ('k') | src/gpu/GrIndexBuffer.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698