Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(443)

Side by Side Diff: src/gpu/GrBufferAllocPool.cpp

Issue 1204773003: Revert of Refactor GrBufferAllocPools to use resource cache (Closed) Base URL: https://skia.googlesource.com/skia.git@master
Patch Set: Created 5 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/gpu/GrBufferAllocPool.h ('k') | src/gpu/GrCaps.cpp » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 1
2 /* 2 /*
3 * Copyright 2010 Google Inc. 3 * Copyright 2010 Google Inc.
4 * 4 *
5 * Use of this source code is governed by a BSD-style license that can be 5 * Use of this source code is governed by a BSD-style license that can be
6 * found in the LICENSE file. 6 * found in the LICENSE file.
7 */ 7 */
8 8
9 9
10 #include "GrBufferAllocPool.h" 10 #include "GrBufferAllocPool.h"
11 #include "GrCaps.h" 11 #include "GrCaps.h"
12 #include "GrContext.h"
13 #include "GrGpu.h" 12 #include "GrGpu.h"
14 #include "GrIndexBuffer.h" 13 #include "GrIndexBuffer.h"
15 #include "GrResourceProvider.h"
16 #include "GrTypes.h" 14 #include "GrTypes.h"
17 #include "GrVertexBuffer.h" 15 #include "GrVertexBuffer.h"
18 16
19 #include "SkTraceEvent.h" 17 #include "SkTraceEvent.h"
20 18
21 #ifdef SK_DEBUG 19 #ifdef SK_DEBUG
22 #define VALIDATE validate 20 #define VALIDATE validate
23 #else 21 #else
24 static void VALIDATE(bool = false) {} 22 static void VALIDATE(bool = false) {}
25 #endif 23 #endif
26 24
27 static const size_t MIN_VERTEX_BUFFER_SIZE = 1 << 15;
28 static const size_t MIN_INDEX_BUFFER_SIZE = 1 << 12;
29
30 // page size 25 // page size
31 #define GrBufferAllocPool_MIN_BLOCK_SIZE ((size_t)1 << 15) 26 #define GrBufferAllocPool_MIN_BLOCK_SIZE ((size_t)1 << 15)
32 27
33 #define UNMAP_BUFFER(block) \ 28 #define UNMAP_BUFFER(block) \
34 do { \ 29 do { \
35 TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("skia.gpu"), \ 30 TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("skia.gpu"), \
36 "GrBufferAllocPool Unmapping Buffer", \ 31 "GrBufferAllocPool Unmapping Buffer", \
37 TRACE_EVENT_SCOPE_THREAD, \ 32 TRACE_EVENT_SCOPE_THREAD, \
38 "percent_unwritten", \ 33 "percent_unwritten", \
39 (float)((block).fBytesFree) / (block).fBuffer->gpuMemor ySize()); \ 34 (float)((block).fBytesFree) / (block).fBuffer->gpuMemor ySize()); \
40 (block).fBuffer->unmap(); \ 35 (block).fBuffer->unmap(); \
41 } while (false) 36 } while (false)
42 37
43 GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu, 38 GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu,
44 BufferType bufferType, 39 BufferType bufferType,
45 size_t blockSize) 40 size_t blockSize,
46 : fBlocks(8) { 41 int preallocBufferCnt)
42 : fBlocks(SkTMax(8, 2*preallocBufferCnt)) {
47 43
48 fGpu = SkRef(gpu); 44 fGpu = SkRef(gpu);
49 45
50 fBufferType = bufferType; 46 fBufferType = bufferType;
51 fBufferPtr = NULL; 47 fBufferPtr = NULL;
52 fMinBlockSize = SkTMax(GrBufferAllocPool_MIN_BLOCK_SIZE, blockSize); 48 fMinBlockSize = SkTMax(GrBufferAllocPool_MIN_BLOCK_SIZE, blockSize);
53 49
54 fBytesInUse = 0; 50 fBytesInUse = 0;
55 51
52 fPreallocBuffersInUse = 0;
53 fPreallocBufferStartIdx = 0;
54 for (int i = 0; i < preallocBufferCnt; ++i) {
55 GrGeometryBuffer* buffer = this->createBuffer(fMinBlockSize);
56 if (buffer) {
57 *fPreallocBuffers.append() = buffer;
58 }
59 }
56 fGeometryBufferMapThreshold = gpu->caps()->geometryBufferMapThreshold(); 60 fGeometryBufferMapThreshold = gpu->caps()->geometryBufferMapThreshold();
57 } 61 }
58 62
59 void GrBufferAllocPool::deleteBlocks() { 63 GrBufferAllocPool::~GrBufferAllocPool() {
64 VALIDATE();
60 if (fBlocks.count()) { 65 if (fBlocks.count()) {
61 GrGeometryBuffer* buffer = fBlocks.back().fBuffer; 66 GrGeometryBuffer* buffer = fBlocks.back().fBuffer;
62 if (buffer->isMapped()) { 67 if (buffer->isMapped()) {
63 UNMAP_BUFFER(fBlocks.back()); 68 UNMAP_BUFFER(fBlocks.back());
64 } 69 }
65 } 70 }
66 while (!fBlocks.empty()) { 71 while (!fBlocks.empty()) {
67 this->destroyBlock(); 72 this->destroyBlock();
68 } 73 }
69 SkASSERT(!fBufferPtr); 74 fPreallocBuffers.unrefAll();
70 }
71
72 GrBufferAllocPool::~GrBufferAllocPool() {
73 VALIDATE();
74 this->deleteBlocks();
75 fGpu->unref(); 75 fGpu->unref();
76 } 76 }
77 77
78 void GrBufferAllocPool::reset() { 78 void GrBufferAllocPool::reset() {
79 VALIDATE(); 79 VALIDATE();
80 fBytesInUse = 0; 80 fBytesInUse = 0;
81 this->deleteBlocks(); 81 if (fBlocks.count()) {
82 GrGeometryBuffer* buffer = fBlocks.back().fBuffer;
83 if (buffer->isMapped()) {
84 UNMAP_BUFFER(fBlocks.back());
85 }
86 }
87 // fPreallocBuffersInUse will be decremented down to zero in the while loop
88 int preallocBuffersInUse = fPreallocBuffersInUse;
89 while (!fBlocks.empty()) {
90 this->destroyBlock();
91 }
92 if (fPreallocBuffers.count()) {
93 // must set this after above loop.
94 fPreallocBufferStartIdx = (fPreallocBufferStartIdx +
95 preallocBuffersInUse) %
96 fPreallocBuffers.count();
97 }
82 // we may have created a large cpu mirror of a large VB. Reset the size 98 // we may have created a large cpu mirror of a large VB. Reset the size
83 // to match our minimum. 99 // to match our pre-allocated VBs.
84 fCpuData.reset(fMinBlockSize); 100 fCpuData.reset(fMinBlockSize);
101 SkASSERT(0 == fPreallocBuffersInUse);
85 VALIDATE(); 102 VALIDATE();
86 } 103 }
87 104
88 void GrBufferAllocPool::unmap() { 105 void GrBufferAllocPool::unmap() {
89 VALIDATE(); 106 VALIDATE();
90 107
91 if (fBufferPtr) { 108 if (fBufferPtr) {
92 BufferBlock& block = fBlocks.back(); 109 BufferBlock& block = fBlocks.back();
93 if (block.fBuffer->isMapped()) { 110 if (block.fBuffer->isMapped()) {
94 UNMAP_BUFFER(block); 111 UNMAP_BUFFER(block);
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
146 const GrGeometryBuffer** buffer, 163 const GrGeometryBuffer** buffer,
147 size_t* offset) { 164 size_t* offset) {
148 VALIDATE(); 165 VALIDATE();
149 166
150 SkASSERT(buffer); 167 SkASSERT(buffer);
151 SkASSERT(offset); 168 SkASSERT(offset);
152 169
153 if (fBufferPtr) { 170 if (fBufferPtr) {
154 BufferBlock& back = fBlocks.back(); 171 BufferBlock& back = fBlocks.back();
155 size_t usedBytes = back.fBuffer->gpuMemorySize() - back.fBytesFree; 172 size_t usedBytes = back.fBuffer->gpuMemorySize() - back.fBytesFree;
156 size_t pad = GrSizeAlignUpPad(usedBytes, alignment); 173 size_t pad = GrSizeAlignUpPad(usedBytes,
174 alignment);
157 if ((size + pad) <= back.fBytesFree) { 175 if ((size + pad) <= back.fBytesFree) {
158 memset((void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes), 0, pad); 176 memset((void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes), 0, pad);
159 usedBytes += pad; 177 usedBytes += pad;
160 *offset = usedBytes; 178 *offset = usedBytes;
161 *buffer = back.fBuffer; 179 *buffer = back.fBuffer;
162 back.fBytesFree -= size + pad; 180 back.fBytesFree -= size + pad;
163 fBytesInUse += size + pad; 181 fBytesInUse += size + pad;
164 VALIDATE(); 182 VALIDATE();
165 return (void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes); 183 return (void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes);
166 } 184 }
(...skipping 17 matching lines...) Expand all
184 *buffer = back.fBuffer; 202 *buffer = back.fBuffer;
185 back.fBytesFree -= size; 203 back.fBytesFree -= size;
186 fBytesInUse += size; 204 fBytesInUse += size;
187 VALIDATE(); 205 VALIDATE();
188 return fBufferPtr; 206 return fBufferPtr;
189 } 207 }
190 208
191 void GrBufferAllocPool::putBack(size_t bytes) { 209 void GrBufferAllocPool::putBack(size_t bytes) {
192 VALIDATE(); 210 VALIDATE();
193 211
212 // if the putBack unwinds all the preallocated buffers then we will
213 // advance the starting index. As blocks are destroyed fPreallocBuffersInUse
214 // will be decremented. I will reach zero if all blocks using preallocated
215 // buffers are released.
216 int preallocBuffersInUse = fPreallocBuffersInUse;
217
194 while (bytes) { 218 while (bytes) {
195 // caller shouldn't try to put back more than they've taken 219 // caller shouldn't try to put back more than they've taken
196 SkASSERT(!fBlocks.empty()); 220 SkASSERT(!fBlocks.empty());
197 BufferBlock& block = fBlocks.back(); 221 BufferBlock& block = fBlocks.back();
198 size_t bytesUsed = block.fBuffer->gpuMemorySize() - block.fBytesFree; 222 size_t bytesUsed = block.fBuffer->gpuMemorySize() - block.fBytesFree;
199 if (bytes >= bytesUsed) { 223 if (bytes >= bytesUsed) {
200 bytes -= bytesUsed; 224 bytes -= bytesUsed;
201 fBytesInUse -= bytesUsed; 225 fBytesInUse -= bytesUsed;
202 // if we locked a vb to satisfy the make space and we're releasing 226 // if we locked a vb to satisfy the make space and we're releasing
203 // beyond it, then unmap it. 227 // beyond it, then unmap it.
204 if (block.fBuffer->isMapped()) { 228 if (block.fBuffer->isMapped()) {
205 UNMAP_BUFFER(block); 229 UNMAP_BUFFER(block);
206 } 230 }
207 this->destroyBlock(); 231 this->destroyBlock();
208 } else { 232 } else {
209 block.fBytesFree += bytes; 233 block.fBytesFree += bytes;
210 fBytesInUse -= bytes; 234 fBytesInUse -= bytes;
211 bytes = 0; 235 bytes = 0;
212 break; 236 break;
213 } 237 }
214 } 238 }
215 239 if (!fPreallocBuffersInUse && fPreallocBuffers.count()) {
240 fPreallocBufferStartIdx = (fPreallocBufferStartIdx +
241 preallocBuffersInUse) %
242 fPreallocBuffers.count();
243 }
216 VALIDATE(); 244 VALIDATE();
217 } 245 }
218 246
219 bool GrBufferAllocPool::createBlock(size_t requestSize) { 247 bool GrBufferAllocPool::createBlock(size_t requestSize) {
220 248
221 size_t size = SkTMax(requestSize, fMinBlockSize); 249 size_t size = SkTMax(requestSize, fMinBlockSize);
222 SkASSERT(size >= GrBufferAllocPool_MIN_BLOCK_SIZE); 250 SkASSERT(size >= GrBufferAllocPool_MIN_BLOCK_SIZE);
223 251
224 VALIDATE(); 252 VALIDATE();
225 253
226 BufferBlock& block = fBlocks.push_back(); 254 BufferBlock& block = fBlocks.push_back();
227 255
228 block.fBuffer = this->getBuffer(size); 256 if (size == fMinBlockSize &&
229 if (NULL == block.fBuffer) { 257 fPreallocBuffersInUse < fPreallocBuffers.count()) {
230 fBlocks.pop_back(); 258
231 return false; 259 uint32_t nextBuffer = (fPreallocBuffersInUse +
260 fPreallocBufferStartIdx) %
261 fPreallocBuffers.count();
262 block.fBuffer = fPreallocBuffers[nextBuffer];
263 block.fBuffer->ref();
264 ++fPreallocBuffersInUse;
265 } else {
266 block.fBuffer = this->createBuffer(size);
267 if (NULL == block.fBuffer) {
268 fBlocks.pop_back();
269 return false;
270 }
232 } 271 }
233 272
234 block.fBytesFree = block.fBuffer->gpuMemorySize(); 273 block.fBytesFree = size;
235 if (fBufferPtr) { 274 if (fBufferPtr) {
236 SkASSERT(fBlocks.count() > 1); 275 SkASSERT(fBlocks.count() > 1);
237 BufferBlock& prev = fBlocks.fromBack(1); 276 BufferBlock& prev = fBlocks.fromBack(1);
238 if (prev.fBuffer->isMapped()) { 277 if (prev.fBuffer->isMapped()) {
239 UNMAP_BUFFER(prev); 278 UNMAP_BUFFER(prev);
240 } else { 279 } else {
241 this->flushCpuData(prev, prev.fBuffer->gpuMemorySize() - prev.fBytes Free); 280 this->flushCpuData(prev, prev.fBuffer->gpuMemorySize() - prev.fBytes Free);
242 } 281 }
243 fBufferPtr = NULL; 282 fBufferPtr = NULL;
244 } 283 }
245 284
246 SkASSERT(NULL == fBufferPtr); 285 SkASSERT(NULL == fBufferPtr);
247 286
248 // If the buffer is CPU-backed we map it because it is free to do so and sav es a copy. 287 // If the buffer is CPU-backed we map it because it is free to do so and sav es a copy.
249 // Otherwise when buffer mapping is supported we map if the buffer size is g reater than the 288 // Otherwise when buffer mapping is supported we map if the buffer size is g reater than the
250 // threshold. 289 // threshold.
251 bool attemptMap = block.fBuffer->isCPUBacked(); 290 bool attemptMap = block.fBuffer->isCPUBacked();
252 if (!attemptMap && GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags()) { 291 if (!attemptMap && GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags()) {
253 attemptMap = size > fGeometryBufferMapThreshold; 292 attemptMap = size > fGeometryBufferMapThreshold;
254 } 293 }
255 294
256 if (attemptMap) { 295 if (attemptMap) {
257 fBufferPtr = block.fBuffer->map(); 296 fBufferPtr = block.fBuffer->map();
258 } 297 }
259 298
260 if (NULL == fBufferPtr) { 299 if (NULL == fBufferPtr) {
261 fBufferPtr = fCpuData.reset(block.fBytesFree); 300 fBufferPtr = fCpuData.reset(size);
262 } 301 }
263 302
264 VALIDATE(true); 303 VALIDATE(true);
265 304
266 return true; 305 return true;
267 } 306 }
268 307
269 void GrBufferAllocPool::destroyBlock() { 308 void GrBufferAllocPool::destroyBlock() {
270 SkASSERT(!fBlocks.empty()); 309 SkASSERT(!fBlocks.empty());
271 310
272 BufferBlock& block = fBlocks.back(); 311 BufferBlock& block = fBlocks.back();
273 312 if (fPreallocBuffersInUse > 0) {
313 uint32_t prevPreallocBuffer = (fPreallocBuffersInUse +
314 fPreallocBufferStartIdx +
315 (fPreallocBuffers.count() - 1)) %
316 fPreallocBuffers.count();
317 if (block.fBuffer == fPreallocBuffers[prevPreallocBuffer]) {
318 --fPreallocBuffersInUse;
319 }
320 }
274 SkASSERT(!block.fBuffer->isMapped()); 321 SkASSERT(!block.fBuffer->isMapped());
275 block.fBuffer->unref(); 322 block.fBuffer->unref();
276 fBlocks.pop_back(); 323 fBlocks.pop_back();
277 fBufferPtr = NULL; 324 fBufferPtr = NULL;
278 } 325 }
279 326
280 void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize) { 327 void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize) {
281 GrGeometryBuffer* buffer = block.fBuffer; 328 GrGeometryBuffer* buffer = block.fBuffer;
282 SkASSERT(buffer); 329 SkASSERT(buffer);
283 SkASSERT(!buffer->isMapped()); 330 SkASSERT(!buffer->isMapped());
284 SkASSERT(fCpuData.get() == fBufferPtr); 331 SkASSERT(fCpuData.get() == fBufferPtr);
285 SkASSERT(flushSize <= buffer->gpuMemorySize()); 332 SkASSERT(flushSize <= buffer->gpuMemorySize());
286 VALIDATE(true); 333 VALIDATE(true);
287 334
288 if (GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() && 335 if (GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() &&
289 flushSize > fGeometryBufferMapThreshold) { 336 flushSize > fGeometryBufferMapThreshold) {
290 void* data = buffer->map(); 337 void* data = buffer->map();
291 if (data) { 338 if (data) {
292 memcpy(data, fBufferPtr, flushSize); 339 memcpy(data, fBufferPtr, flushSize);
293 UNMAP_BUFFER(block); 340 UNMAP_BUFFER(block);
294 return; 341 return;
295 } 342 }
296 } 343 }
297 buffer->updateData(fBufferPtr, flushSize); 344 buffer->updateData(fBufferPtr, flushSize);
298 VALIDATE(true); 345 VALIDATE(true);
299 } 346 }
300 347
301 GrGeometryBuffer* GrBufferAllocPool::getBuffer(size_t size) { 348 GrGeometryBuffer* GrBufferAllocPool::createBuffer(size_t size) {
302
303 GrResourceProvider* rp = fGpu->getContext()->resourceProvider();
304
305 if (kIndex_BufferType == fBufferType) { 349 if (kIndex_BufferType == fBufferType) {
306 return rp->getIndexBuffer(size, /* dynamic = */ true, /* duringFlush = * / true); 350 return fGpu->createIndexBuffer(size, true);
307 } else { 351 } else {
308 SkASSERT(kVertex_BufferType == fBufferType); 352 SkASSERT(kVertex_BufferType == fBufferType);
309 return rp->getVertexBuffer(size, /* dynamic = */ true, /* duringFlush = */ true); 353 return fGpu->createVertexBuffer(size, true);
310 } 354 }
311 } 355 }
312 356
313 //////////////////////////////////////////////////////////////////////////////// 357 ////////////////////////////////////////////////////////////////////////////////
314 358
315 GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu) 359 GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu,
316 : GrBufferAllocPool(gpu, kVertex_BufferType, MIN_VERTEX_BUFFER_SIZE) { 360 size_t bufferSize,
361 int preallocBufferCnt)
362 : GrBufferAllocPool(gpu,
363 kVertex_BufferType,
364 bufferSize,
365 preallocBufferCnt) {
317 } 366 }
318 367
319 void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize, 368 void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize,
320 int vertexCount, 369 int vertexCount,
321 const GrVertexBuffer** buffer, 370 const GrVertexBuffer** buffer,
322 int* startVertex) { 371 int* startVertex) {
323 372
324 SkASSERT(vertexCount >= 0); 373 SkASSERT(vertexCount >= 0);
325 SkASSERT(buffer); 374 SkASSERT(buffer);
326 SkASSERT(startVertex); 375 SkASSERT(startVertex);
327 376
328 size_t offset = 0; // assign to suppress warning 377 size_t offset = 0; // assign to suppress warning
329 const GrGeometryBuffer* geomBuffer = NULL; // assign to suppress warning 378 const GrGeometryBuffer* geomBuffer = NULL; // assign to suppress warning
330 void* ptr = INHERITED::makeSpace(vertexSize * vertexCount, 379 void* ptr = INHERITED::makeSpace(vertexSize * vertexCount,
331 vertexSize, 380 vertexSize,
332 &geomBuffer, 381 &geomBuffer,
333 &offset); 382 &offset);
334 383
335 *buffer = (const GrVertexBuffer*) geomBuffer; 384 *buffer = (const GrVertexBuffer*) geomBuffer;
336 SkASSERT(0 == offset % vertexSize); 385 SkASSERT(0 == offset % vertexSize);
337 *startVertex = static_cast<int>(offset / vertexSize); 386 *startVertex = static_cast<int>(offset / vertexSize);
338 return ptr; 387 return ptr;
339 } 388 }
340 389
341 //////////////////////////////////////////////////////////////////////////////// 390 ////////////////////////////////////////////////////////////////////////////////
342 391
343 GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu) 392 GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu,
344 : GrBufferAllocPool(gpu, kIndex_BufferType, MIN_INDEX_BUFFER_SIZE) { 393 size_t bufferSize,
394 int preallocBufferCnt)
395 : GrBufferAllocPool(gpu,
396 kIndex_BufferType,
397 bufferSize,
398 preallocBufferCnt) {
345 } 399 }
346 400
347 void* GrIndexBufferAllocPool::makeSpace(int indexCount, 401 void* GrIndexBufferAllocPool::makeSpace(int indexCount,
348 const GrIndexBuffer** buffer, 402 const GrIndexBuffer** buffer,
349 int* startIndex) { 403 int* startIndex) {
350 404
351 SkASSERT(indexCount >= 0); 405 SkASSERT(indexCount >= 0);
352 SkASSERT(buffer); 406 SkASSERT(buffer);
353 SkASSERT(startIndex); 407 SkASSERT(startIndex);
354 408
355 size_t offset = 0; // assign to suppress warning 409 size_t offset = 0; // assign to suppress warning
356 const GrGeometryBuffer* geomBuffer = NULL; // assign to suppress warning 410 const GrGeometryBuffer* geomBuffer = NULL; // assign to suppress warning
357 void* ptr = INHERITED::makeSpace(indexCount * sizeof(uint16_t), 411 void* ptr = INHERITED::makeSpace(indexCount * sizeof(uint16_t),
358 sizeof(uint16_t), 412 sizeof(uint16_t),
359 &geomBuffer, 413 &geomBuffer,
360 &offset); 414 &offset);
361 415
362 *buffer = (const GrIndexBuffer*) geomBuffer; 416 *buffer = (const GrIndexBuffer*) geomBuffer;
363 SkASSERT(0 == offset % sizeof(uint16_t)); 417 SkASSERT(0 == offset % sizeof(uint16_t));
364 *startIndex = static_cast<int>(offset / sizeof(uint16_t)); 418 *startIndex = static_cast<int>(offset / sizeof(uint16_t));
365 return ptr; 419 return ptr;
366 } 420 }
367 421
368 422
OLDNEW
« no previous file with comments | « src/gpu/GrBufferAllocPool.h ('k') | src/gpu/GrCaps.cpp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698