Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(35)

Side by Side Diff: src/gpu/GrBufferAllocPool.cpp

Issue 1139753002: Refactor GrBufferAllocPools to use resource cache (Closed) Base URL: https://skia.googlesource.com/skia.git@master
Patch Set: clean up Created 5 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 1
2 /* 2 /*
3 * Copyright 2010 Google Inc. 3 * Copyright 2010 Google Inc.
4 * 4 *
5 * Use of this source code is governed by a BSD-style license that can be 5 * Use of this source code is governed by a BSD-style license that can be
6 * found in the LICENSE file. 6 * found in the LICENSE file.
7 */ 7 */
8 8
9 9
10 #include "GrBufferAllocPool.h" 10 #include "GrBufferAllocPool.h"
11 #include "GrDrawTargetCaps.h" 11 #include "GrDrawTargetCaps.h"
12 #include "GrGpu.h" 12 #include "GrGpu.h"
13 #include "GrIndexBuffer.h" 13 #include "GrIndexBuffer.h"
14 #include "GrResourceProvider.h"
14 #include "GrTypes.h" 15 #include "GrTypes.h"
15 #include "GrVertexBuffer.h" 16 #include "GrVertexBuffer.h"
16 17
17 #include "SkTraceEvent.h" 18 #include "SkTraceEvent.h"
18 19
19 #ifdef SK_DEBUG 20 #ifdef SK_DEBUG
20 #define VALIDATE validate 21 #define VALIDATE validate
21 #else 22 #else
22 static void VALIDATE(bool = false) {} 23 static void VALIDATE(bool = false) {}
23 #endif 24 #endif
24 25
26 static const size_t MIN_VERTEX_BUFFER_SIZE = 1 << 15;
27 static const size_t MIN_INDEX_BUFFER_SIZE = 1 << 12;
28
25 // page size 29 // page size
26 #define GrBufferAllocPool_MIN_BLOCK_SIZE ((size_t)1 << 12) 30 #define GrBufferAllocPool_MIN_BLOCK_SIZE ((size_t)1 << 12)
27 31
28 #define UNMAP_BUFFER(block) \ 32 #define UNMAP_BUFFER(block) \
29 do { \ 33 do { \
30 TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("skia.gpu"), \ 34 TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("skia.gpu"), \
31 "GrBufferAllocPool Unmapping Buffer", \ 35 "GrBufferAllocPool Unmapping Buffer", \
32 TRACE_EVENT_SCOPE_THREAD, \ 36 TRACE_EVENT_SCOPE_THREAD, \
33 "percent_unwritten", \ 37 "percent_unwritten", \
34 (float)((block).fBytesFree) / (block).fBuffer->gpuMemor ySize()); \ 38 (float)((block).fBytesFree) / (block).fBuffer->gpuMemor ySize()); \
35 (block).fBuffer->unmap(); \ 39 (block).fBuffer->unmap(); \
36 } while (false) 40 } while (false)
37 41
38 GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu, 42 GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu,
39 BufferType bufferType, 43 BufferType bufferType,
40 size_t blockSize, 44 size_t blockSize)
41 int preallocBufferCnt) 45 : fBlocks(8) {
42 : fBlocks(SkTMax(8, 2*preallocBufferCnt)) {
43 46
44 fGpu = SkRef(gpu); 47 fGpu = SkRef(gpu);
45 48
46 fBufferType = bufferType; 49 fBufferType = bufferType;
47 fBufferPtr = NULL; 50 fBufferPtr = NULL;
48 fMinBlockSize = SkTMax(GrBufferAllocPool_MIN_BLOCK_SIZE, blockSize); 51 fMinBlockSize = SkTMax(GrBufferAllocPool_MIN_BLOCK_SIZE, blockSize);
49 52
50 fBytesInUse = 0; 53 fBytesInUse = 0;
51
52 fPreallocBuffersInUse = 0;
53 fPreallocBufferStartIdx = 0;
54 for (int i = 0; i < preallocBufferCnt; ++i) {
55 GrGeometryBuffer* buffer = this->createBuffer(fMinBlockSize);
56 if (buffer) {
57 *fPreallocBuffers.append() = buffer;
58 }
59 }
60 } 54 }
61 55
62 GrBufferAllocPool::~GrBufferAllocPool() { 56 void GrBufferAllocPool::deleteBlocks() {
63 VALIDATE();
64 if (fBlocks.count()) { 57 if (fBlocks.count()) {
65 GrGeometryBuffer* buffer = fBlocks.back().fBuffer; 58 GrGeometryBuffer* buffer = fBlocks.back().fBuffer;
66 if (buffer->isMapped()) { 59 if (buffer->isMapped()) {
67 UNMAP_BUFFER(fBlocks.back()); 60 UNMAP_BUFFER(fBlocks.back());
68 } 61 }
69 } 62 }
70 while (!fBlocks.empty()) { 63 while (!fBlocks.empty()) {
71 this->destroyBlock(); 64 this->destroyBlock();
72 } 65 }
73 fPreallocBuffers.unrefAll(); 66 SkASSERT(!fBufferPtr);
67 }
68
69 GrBufferAllocPool::~GrBufferAllocPool() {
70 VALIDATE();
71 this->deleteBlocks();
74 fGpu->unref(); 72 fGpu->unref();
75 } 73 }
76 74
77 void GrBufferAllocPool::reset() { 75 void GrBufferAllocPool::reset() {
78 VALIDATE(); 76 VALIDATE();
79 fBytesInUse = 0; 77 fBytesInUse = 0;
80 if (fBlocks.count()) { 78 this->deleteBlocks();
81 GrGeometryBuffer* buffer = fBlocks.back().fBuffer;
82 if (buffer->isMapped()) {
83 UNMAP_BUFFER(fBlocks.back());
84 }
85 }
86 // fPreallocBuffersInUse will be decremented down to zero in the while loop
87 int preallocBuffersInUse = fPreallocBuffersInUse;
88 while (!fBlocks.empty()) {
89 this->destroyBlock();
90 }
91 if (fPreallocBuffers.count()) {
92 // must set this after above loop.
93 fPreallocBufferStartIdx = (fPreallocBufferStartIdx +
94 preallocBuffersInUse) %
95 fPreallocBuffers.count();
96 }
97 // we may have created a large cpu mirror of a large VB. Reset the size 79 // we may have created a large cpu mirror of a large VB. Reset the size
98 // to match our pre-allocated VBs. 80 // to match our minimum.
99 fCpuData.reset(fMinBlockSize); 81 fCpuData.reset(fMinBlockSize);
100 SkASSERT(0 == fPreallocBuffersInUse);
101 VALIDATE(); 82 VALIDATE();
102 } 83 }
103 84
104 void GrBufferAllocPool::unmap() { 85 void GrBufferAllocPool::unmap() {
105 VALIDATE(); 86 VALIDATE();
106 87
107 if (fBufferPtr) { 88 if (fBufferPtr) {
108 BufferBlock& block = fBlocks.back(); 89 BufferBlock& block = fBlocks.back();
109 if (block.fBuffer->isMapped()) { 90 if (block.fBuffer->isMapped()) {
110 UNMAP_BUFFER(block); 91 UNMAP_BUFFER(block);
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
162 const GrGeometryBuffer** buffer, 143 const GrGeometryBuffer** buffer,
163 size_t* offset) { 144 size_t* offset) {
164 VALIDATE(); 145 VALIDATE();
165 146
166 SkASSERT(buffer); 147 SkASSERT(buffer);
167 SkASSERT(offset); 148 SkASSERT(offset);
168 149
169 if (fBufferPtr) { 150 if (fBufferPtr) {
170 BufferBlock& back = fBlocks.back(); 151 BufferBlock& back = fBlocks.back();
171 size_t usedBytes = back.fBuffer->gpuMemorySize() - back.fBytesFree; 152 size_t usedBytes = back.fBuffer->gpuMemorySize() - back.fBytesFree;
172 size_t pad = GrSizeAlignUpPad(usedBytes, 153 size_t pad = GrSizeAlignUpPad(usedBytes, alignment);
173 alignment);
174 if ((size + pad) <= back.fBytesFree) { 154 if ((size + pad) <= back.fBytesFree) {
175 memset((void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes), 0, pad); 155 memset((void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes), 0, pad);
176 usedBytes += pad; 156 usedBytes += pad;
177 *offset = usedBytes; 157 *offset = usedBytes;
178 *buffer = back.fBuffer; 158 *buffer = back.fBuffer;
179 back.fBytesFree -= size + pad; 159 back.fBytesFree -= size + pad;
180 fBytesInUse += size + pad; 160 fBytesInUse += size + pad;
181 VALIDATE(); 161 VALIDATE();
182 return (void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes); 162 return (void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes);
183 } 163 }
(...skipping 17 matching lines...) Expand all
201 *buffer = back.fBuffer; 181 *buffer = back.fBuffer;
202 back.fBytesFree -= size; 182 back.fBytesFree -= size;
203 fBytesInUse += size; 183 fBytesInUse += size;
204 VALIDATE(); 184 VALIDATE();
205 return fBufferPtr; 185 return fBufferPtr;
206 } 186 }
207 187
208 void GrBufferAllocPool::putBack(size_t bytes) { 188 void GrBufferAllocPool::putBack(size_t bytes) {
209 VALIDATE(); 189 VALIDATE();
210 190
211 // if the putBack unwinds all the preallocated buffers then we will
212 // advance the starting index. As blocks are destroyed fPreallocBuffersInUse
213 // will be decremented. I will reach zero if all blocks using preallocated
214 // buffers are released.
215 int preallocBuffersInUse = fPreallocBuffersInUse;
216
217 while (bytes) { 191 while (bytes) {
218 // caller shouldn't try to put back more than they've taken 192 // caller shouldn't try to put back more than they've taken
219 SkASSERT(!fBlocks.empty()); 193 SkASSERT(!fBlocks.empty());
220 BufferBlock& block = fBlocks.back(); 194 BufferBlock& block = fBlocks.back();
221 size_t bytesUsed = block.fBuffer->gpuMemorySize() - block.fBytesFree; 195 size_t bytesUsed = block.fBuffer->gpuMemorySize() - block.fBytesFree;
222 if (bytes >= bytesUsed) { 196 if (bytes >= bytesUsed) {
223 bytes -= bytesUsed; 197 bytes -= bytesUsed;
224 fBytesInUse -= bytesUsed; 198 fBytesInUse -= bytesUsed;
225 // if we locked a vb to satisfy the make space and we're releasing 199 // if we locked a vb to satisfy the make space and we're releasing
226 // beyond it, then unmap it. 200 // beyond it, then unmap it.
227 if (block.fBuffer->isMapped()) { 201 if (block.fBuffer->isMapped()) {
228 UNMAP_BUFFER(block); 202 UNMAP_BUFFER(block);
229 } 203 }
230 this->destroyBlock(); 204 this->destroyBlock();
231 } else { 205 } else {
232 block.fBytesFree += bytes; 206 block.fBytesFree += bytes;
233 fBytesInUse -= bytes; 207 fBytesInUse -= bytes;
234 bytes = 0; 208 bytes = 0;
235 break; 209 break;
236 } 210 }
237 } 211 }
238 if (!fPreallocBuffersInUse && fPreallocBuffers.count()) { 212
239 fPreallocBufferStartIdx = (fPreallocBufferStartIdx +
240 preallocBuffersInUse) %
241 fPreallocBuffers.count();
242 }
243 VALIDATE(); 213 VALIDATE();
244 } 214 }
245 215
246 bool GrBufferAllocPool::createBlock(size_t requestSize) { 216 bool GrBufferAllocPool::createBlock(size_t requestSize) {
247 217
248 size_t size = SkTMax(requestSize, fMinBlockSize); 218 size_t size = SkTMax(requestSize, fMinBlockSize);
249 SkASSERT(size >= GrBufferAllocPool_MIN_BLOCK_SIZE); 219 SkASSERT(size >= GrBufferAllocPool_MIN_BLOCK_SIZE);
250 220
251 VALIDATE(); 221 VALIDATE();
252 222
253 BufferBlock& block = fBlocks.push_back(); 223 BufferBlock& block = fBlocks.push_back();
254 224
255 if (size == fMinBlockSize && 225 block.fBuffer = this->getBuffer(size);
256 fPreallocBuffersInUse < fPreallocBuffers.count()) { 226 if (NULL == block.fBuffer) {
257 227 fBlocks.pop_back();
258 uint32_t nextBuffer = (fPreallocBuffersInUse + 228 return false;
259 fPreallocBufferStartIdx) %
260 fPreallocBuffers.count();
261 block.fBuffer = fPreallocBuffers[nextBuffer];
262 block.fBuffer->ref();
263 ++fPreallocBuffersInUse;
264 } else {
265 block.fBuffer = this->createBuffer(size);
266 if (NULL == block.fBuffer) {
267 fBlocks.pop_back();
268 return false;
269 }
270 } 229 }
271 230
272 block.fBytesFree = size; 231 block.fBytesFree = block.fBuffer->gpuMemorySize();
273 if (fBufferPtr) { 232 if (fBufferPtr) {
274 SkASSERT(fBlocks.count() > 1); 233 SkASSERT(fBlocks.count() > 1);
275 BufferBlock& prev = fBlocks.fromBack(1); 234 BufferBlock& prev = fBlocks.fromBack(1);
276 if (prev.fBuffer->isMapped()) { 235 if (prev.fBuffer->isMapped()) {
277 UNMAP_BUFFER(prev); 236 UNMAP_BUFFER(prev);
278 } else { 237 } else {
279 this->flushCpuData(prev, prev.fBuffer->gpuMemorySize() - prev.fBytes Free); 238 this->flushCpuData(prev, prev.fBuffer->gpuMemorySize() - prev.fBytes Free);
280 } 239 }
281 fBufferPtr = NULL; 240 fBufferPtr = NULL;
282 } 241 }
283 242
284 SkASSERT(NULL == fBufferPtr); 243 SkASSERT(NULL == fBufferPtr);
285 244
286 // If the buffer is CPU-backed we map it because it is free to do so and sav es a copy. 245 // If the buffer is CPU-backed we map it because it is free to do so and sav es a copy.
287 // Otherwise when buffer mapping is supported we map if the buffer size is g reater than the 246 // Otherwise when buffer mapping is supported we map if the buffer size is g reater than the
288 // threshold. 247 // threshold.
289 bool attemptMap = block.fBuffer->isCPUBacked(); 248 bool attemptMap = block.fBuffer->isCPUBacked();
290 if (!attemptMap && GrDrawTargetCaps::kNone_MapFlags != fGpu->caps()->mapBuff erFlags()) { 249 if (!attemptMap && GrDrawTargetCaps::kNone_MapFlags != fGpu->caps()->mapBuff erFlags()) {
291 attemptMap = size > GR_GEOM_BUFFER_MAP_THRESHOLD; 250 attemptMap = block.fBytesFree > GR_GEOM_BUFFER_MAP_THRESHOLD;
292 } 251 }
293 252
294 if (attemptMap) { 253 if (attemptMap) {
295 fBufferPtr = block.fBuffer->map(); 254 fBufferPtr = block.fBuffer->map();
296 } 255 }
297 256
298 if (NULL == fBufferPtr) { 257 if (NULL == fBufferPtr) {
299 fBufferPtr = fCpuData.reset(size); 258 fBufferPtr = fCpuData.reset(block.fBytesFree);
300 } 259 }
301 260
302 VALIDATE(true); 261 VALIDATE(true);
303 262
304 return true; 263 return true;
305 } 264 }
306 265
307 void GrBufferAllocPool::destroyBlock() { 266 void GrBufferAllocPool::destroyBlock() {
308 SkASSERT(!fBlocks.empty()); 267 SkASSERT(!fBlocks.empty());
309 268
310 BufferBlock& block = fBlocks.back(); 269 BufferBlock& block = fBlocks.back();
311 if (fPreallocBuffersInUse > 0) { 270
312 uint32_t prevPreallocBuffer = (fPreallocBuffersInUse +
313 fPreallocBufferStartIdx +
314 (fPreallocBuffers.count() - 1)) %
315 fPreallocBuffers.count();
316 if (block.fBuffer == fPreallocBuffers[prevPreallocBuffer]) {
317 --fPreallocBuffersInUse;
318 }
319 }
320 SkASSERT(!block.fBuffer->isMapped()); 271 SkASSERT(!block.fBuffer->isMapped());
321 block.fBuffer->unref(); 272 block.fBuffer->unref();
322 fBlocks.pop_back(); 273 fBlocks.pop_back();
323 fBufferPtr = NULL; 274 fBufferPtr = NULL;
324 } 275 }
325 276
326 void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize) { 277 void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize) {
327 GrGeometryBuffer* buffer = block.fBuffer; 278 GrGeometryBuffer* buffer = block.fBuffer;
328 SkASSERT(buffer); 279 SkASSERT(buffer);
329 SkASSERT(!buffer->isMapped()); 280 SkASSERT(!buffer->isMapped());
330 SkASSERT(fCpuData.get() == fBufferPtr); 281 SkASSERT(fCpuData.get() == fBufferPtr);
331 SkASSERT(flushSize <= buffer->gpuMemorySize()); 282 SkASSERT(flushSize <= buffer->gpuMemorySize());
332 VALIDATE(true); 283 VALIDATE(true);
333 284
334 if (GrDrawTargetCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() && 285 if (GrDrawTargetCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() &&
335 flushSize > GR_GEOM_BUFFER_MAP_THRESHOLD) { 286 flushSize > GR_GEOM_BUFFER_MAP_THRESHOLD) {
336 void* data = buffer->map(); 287 void* data = buffer->map();
337 if (data) { 288 if (data) {
338 memcpy(data, fBufferPtr, flushSize); 289 memcpy(data, fBufferPtr, flushSize);
339 UNMAP_BUFFER(block); 290 UNMAP_BUFFER(block);
340 return; 291 return;
341 } 292 }
342 } 293 }
343 buffer->updateData(fBufferPtr, flushSize); 294 buffer->updateData(fBufferPtr, flushSize);
344 VALIDATE(true); 295 VALIDATE(true);
345 } 296 }
346 297
347 GrGeometryBuffer* GrBufferAllocPool::createBuffer(size_t size) { 298 GrGeometryBuffer* GrBufferAllocPool::getBuffer(size_t size) {
299
300 GrResourceProvider* rp = fGpu->getContext()->resourceProvider();
bsalomon 2015/05/13 16:24:07 Can we get GrGpu off this guy?
robertphillips 2015/05/13 16:56:14 Unfortunately, GrBufferAllocPool needs access to t
301
348 if (kIndex_BufferType == fBufferType) { 302 if (kIndex_BufferType == fBufferType) {
349 return fGpu->createIndexBuffer(size, true); 303 return rp->getIndexBuffer(size, /* dynamic = */ true, /* duringFlush = * / true);
350 } else { 304 } else {
351 SkASSERT(kVertex_BufferType == fBufferType); 305 SkASSERT(kVertex_BufferType == fBufferType);
352 return fGpu->createVertexBuffer(size, true); 306 return rp->getVertBuffer(size, /* dynamic = */ true, /* duringFlush = */ true);
353 } 307 }
354 } 308 }
355 309
356 //////////////////////////////////////////////////////////////////////////////// 310 ////////////////////////////////////////////////////////////////////////////////
357 311
358 GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu, 312 GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu)
359 size_t bufferSize, 313 : GrBufferAllocPool(gpu, kVertex_BufferType, MIN_VERTEX_BUFFER_SIZE) {
360 int preallocBufferCnt)
361 : GrBufferAllocPool(gpu,
362 kVertex_BufferType,
363 bufferSize,
364 preallocBufferCnt) {
365 } 314 }
366 315
367 void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize, 316 void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize,
368 int vertexCount, 317 int vertexCount,
369 const GrVertexBuffer** buffer, 318 const GrVertexBuffer** buffer,
370 int* startVertex) { 319 int* startVertex) {
371 320
372 SkASSERT(vertexCount >= 0); 321 SkASSERT(vertexCount >= 0);
373 SkASSERT(buffer); 322 SkASSERT(buffer);
374 SkASSERT(startVertex); 323 SkASSERT(startVertex);
375 324
376 size_t offset = 0; // assign to suppress warning 325 size_t offset = 0; // assign to suppress warning
377 const GrGeometryBuffer* geomBuffer = NULL; // assign to suppress warning 326 const GrGeometryBuffer* geomBuffer = NULL; // assign to suppress warning
378 void* ptr = INHERITED::makeSpace(vertexSize * vertexCount, 327 void* ptr = INHERITED::makeSpace(vertexSize * vertexCount,
379 vertexSize, 328 vertexSize,
380 &geomBuffer, 329 &geomBuffer,
381 &offset); 330 &offset);
382 331
383 *buffer = (const GrVertexBuffer*) geomBuffer; 332 *buffer = (const GrVertexBuffer*) geomBuffer;
384 SkASSERT(0 == offset % vertexSize); 333 SkASSERT(0 == offset % vertexSize);
385 *startVertex = static_cast<int>(offset / vertexSize); 334 *startVertex = static_cast<int>(offset / vertexSize);
386 return ptr; 335 return ptr;
387 } 336 }
388 337
389 //////////////////////////////////////////////////////////////////////////////// 338 ////////////////////////////////////////////////////////////////////////////////
390 339
391 GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu, 340 GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu)
392 size_t bufferSize, 341 : GrBufferAllocPool(gpu, kIndex_BufferType, MIN_INDEX_BUFFER_SIZE) {
393 int preallocBufferCnt)
394 : GrBufferAllocPool(gpu,
395 kIndex_BufferType,
396 bufferSize,
397 preallocBufferCnt) {
398 } 342 }
399 343
400 void* GrIndexBufferAllocPool::makeSpace(int indexCount, 344 void* GrIndexBufferAllocPool::makeSpace(int indexCount,
401 const GrIndexBuffer** buffer, 345 const GrIndexBuffer** buffer,
402 int* startIndex) { 346 int* startIndex) {
403 347
404 SkASSERT(indexCount >= 0); 348 SkASSERT(indexCount >= 0);
405 SkASSERT(buffer); 349 SkASSERT(buffer);
406 SkASSERT(startIndex); 350 SkASSERT(startIndex);
407 351
408 size_t offset = 0; // assign to suppress warning 352 size_t offset = 0; // assign to suppress warning
409 const GrGeometryBuffer* geomBuffer = NULL; // assign to suppress warning 353 const GrGeometryBuffer* geomBuffer = NULL; // assign to suppress warning
410 void* ptr = INHERITED::makeSpace(indexCount * sizeof(uint16_t), 354 void* ptr = INHERITED::makeSpace(indexCount * sizeof(uint16_t),
411 sizeof(uint16_t), 355 sizeof(uint16_t),
412 &geomBuffer, 356 &geomBuffer,
413 &offset); 357 &offset);
414 358
415 *buffer = (const GrIndexBuffer*) geomBuffer; 359 *buffer = (const GrIndexBuffer*) geomBuffer;
416 SkASSERT(0 == offset % sizeof(uint16_t)); 360 SkASSERT(0 == offset % sizeof(uint16_t));
417 *startIndex = static_cast<int>(offset / sizeof(uint16_t)); 361 *startIndex = static_cast<int>(offset / sizeof(uint16_t));
418 return ptr; 362 return ptr;
419 } 363 }
420 364
421 365
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698