OLD | NEW |
1 | 1 |
2 /* | 2 /* |
3 * Copyright 2010 Google Inc. | 3 * Copyright 2010 Google Inc. |
4 * | 4 * |
5 * Use of this source code is governed by a BSD-style license that can be | 5 * Use of this source code is governed by a BSD-style license that can be |
6 * found in the LICENSE file. | 6 * found in the LICENSE file. |
7 */ | 7 */ |
8 | 8 |
9 | 9 |
10 #include "GrBufferAllocPool.h" | 10 #include "GrBufferAllocPool.h" |
(...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
102 } | 102 } |
103 | 103 |
104 void GrBufferAllocPool::unlock() { | 104 void GrBufferAllocPool::unlock() { |
105 VALIDATE(); | 105 VALIDATE(); |
106 | 106 |
107 if (NULL != fBufferPtr) { | 107 if (NULL != fBufferPtr) { |
108 BufferBlock& block = fBlocks.back(); | 108 BufferBlock& block = fBlocks.back(); |
109 if (block.fBuffer->isLocked()) { | 109 if (block.fBuffer->isLocked()) { |
110 block.fBuffer->unlock(); | 110 block.fBuffer->unlock(); |
111 } else { | 111 } else { |
112 size_t flushSize = block.fBuffer->sizeInBytes() - block.fBytesFree; | 112 size_t flushSize = block.fBuffer->gpuMemorySize() - block.fBytesFree
; |
113 flushCpuData(fBlocks.back().fBuffer, flushSize); | 113 flushCpuData(fBlocks.back().fBuffer, flushSize); |
114 } | 114 } |
115 fBufferPtr = NULL; | 115 fBufferPtr = NULL; |
116 } | 116 } |
117 VALIDATE(); | 117 VALIDATE(); |
118 } | 118 } |
119 | 119 |
120 #ifdef SK_DEBUG | 120 #ifdef SK_DEBUG |
121 void GrBufferAllocPool::validate(bool unusedBlockAllowed) const { | 121 void GrBufferAllocPool::validate(bool unusedBlockAllowed) const { |
122 if (NULL != fBufferPtr) { | 122 if (NULL != fBufferPtr) { |
123 SkASSERT(!fBlocks.empty()); | 123 SkASSERT(!fBlocks.empty()); |
124 if (fBlocks.back().fBuffer->isLocked()) { | 124 if (fBlocks.back().fBuffer->isLocked()) { |
125 GrGeometryBuffer* buf = fBlocks.back().fBuffer; | 125 GrGeometryBuffer* buf = fBlocks.back().fBuffer; |
126 SkASSERT(buf->lockPtr() == fBufferPtr); | 126 SkASSERT(buf->lockPtr() == fBufferPtr); |
127 } else { | 127 } else { |
128 SkASSERT(fCpuData.get() == fBufferPtr); | 128 SkASSERT(fCpuData.get() == fBufferPtr); |
129 } | 129 } |
130 } else { | 130 } else { |
131 SkASSERT(fBlocks.empty() || !fBlocks.back().fBuffer->isLocked()); | 131 SkASSERT(fBlocks.empty() || !fBlocks.back().fBuffer->isLocked()); |
132 } | 132 } |
133 size_t bytesInUse = 0; | 133 size_t bytesInUse = 0; |
134 for (int i = 0; i < fBlocks.count() - 1; ++i) { | 134 for (int i = 0; i < fBlocks.count() - 1; ++i) { |
135 SkASSERT(!fBlocks[i].fBuffer->isLocked()); | 135 SkASSERT(!fBlocks[i].fBuffer->isLocked()); |
136 } | 136 } |
137 for (int i = 0; i < fBlocks.count(); ++i) { | 137 for (int i = 0; i < fBlocks.count(); ++i) { |
138 size_t bytes = fBlocks[i].fBuffer->sizeInBytes() - fBlocks[i].fBytesFree
; | 138 size_t bytes = fBlocks[i].fBuffer->gpuMemorySize() - fBlocks[i].fBytesFr
ee; |
139 bytesInUse += bytes; | 139 bytesInUse += bytes; |
140 SkASSERT(bytes || unusedBlockAllowed); | 140 SkASSERT(bytes || unusedBlockAllowed); |
141 } | 141 } |
142 | 142 |
143 SkASSERT(bytesInUse == fBytesInUse); | 143 SkASSERT(bytesInUse == fBytesInUse); |
144 if (unusedBlockAllowed) { | 144 if (unusedBlockAllowed) { |
145 SkASSERT((fBytesInUse && !fBlocks.empty()) || | 145 SkASSERT((fBytesInUse && !fBlocks.empty()) || |
146 (!fBytesInUse && (fBlocks.count() < 2))); | 146 (!fBytesInUse && (fBlocks.count() < 2))); |
147 } else { | 147 } else { |
148 SkASSERT((0 == fBytesInUse) == fBlocks.empty()); | 148 SkASSERT((0 == fBytesInUse) == fBlocks.empty()); |
149 } | 149 } |
150 } | 150 } |
151 #endif | 151 #endif |
152 | 152 |
153 void* GrBufferAllocPool::makeSpace(size_t size, | 153 void* GrBufferAllocPool::makeSpace(size_t size, |
154 size_t alignment, | 154 size_t alignment, |
155 const GrGeometryBuffer** buffer, | 155 const GrGeometryBuffer** buffer, |
156 size_t* offset) { | 156 size_t* offset) { |
157 VALIDATE(); | 157 VALIDATE(); |
158 | 158 |
159 SkASSERT(NULL != buffer); | 159 SkASSERT(NULL != buffer); |
160 SkASSERT(NULL != offset); | 160 SkASSERT(NULL != offset); |
161 | 161 |
162 if (NULL != fBufferPtr) { | 162 if (NULL != fBufferPtr) { |
163 BufferBlock& back = fBlocks.back(); | 163 BufferBlock& back = fBlocks.back(); |
164 size_t usedBytes = back.fBuffer->sizeInBytes() - back.fBytesFree; | 164 size_t usedBytes = back.fBuffer->gpuMemorySize() - back.fBytesFree; |
165 size_t pad = GrSizeAlignUpPad(usedBytes, | 165 size_t pad = GrSizeAlignUpPad(usedBytes, |
166 alignment); | 166 alignment); |
167 if ((size + pad) <= back.fBytesFree) { | 167 if ((size + pad) <= back.fBytesFree) { |
168 usedBytes += pad; | 168 usedBytes += pad; |
169 *offset = usedBytes; | 169 *offset = usedBytes; |
170 *buffer = back.fBuffer; | 170 *buffer = back.fBuffer; |
171 back.fBytesFree -= size + pad; | 171 back.fBytesFree -= size + pad; |
172 fBytesInUse += size + pad; | 172 fBytesInUse += size + pad; |
173 VALIDATE(); | 173 VALIDATE(); |
174 return (void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes); | 174 return (void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes); |
(...skipping 19 matching lines...) Expand all Loading... |
194 back.fBytesFree -= size; | 194 back.fBytesFree -= size; |
195 fBytesInUse += size; | 195 fBytesInUse += size; |
196 VALIDATE(); | 196 VALIDATE(); |
197 return fBufferPtr; | 197 return fBufferPtr; |
198 } | 198 } |
199 | 199 |
200 int GrBufferAllocPool::currentBufferItems(size_t itemSize) const { | 200 int GrBufferAllocPool::currentBufferItems(size_t itemSize) const { |
201 VALIDATE(); | 201 VALIDATE(); |
202 if (NULL != fBufferPtr) { | 202 if (NULL != fBufferPtr) { |
203 const BufferBlock& back = fBlocks.back(); | 203 const BufferBlock& back = fBlocks.back(); |
204 size_t usedBytes = back.fBuffer->sizeInBytes() - back.fBytesFree; | 204 size_t usedBytes = back.fBuffer->gpuMemorySize() - back.fBytesFree; |
205 size_t pad = GrSizeAlignUpPad(usedBytes, itemSize); | 205 size_t pad = GrSizeAlignUpPad(usedBytes, itemSize); |
206 return static_cast<int>((back.fBytesFree - pad) / itemSize); | 206 return static_cast<int>((back.fBytesFree - pad) / itemSize); |
207 } else if (fPreallocBuffersInUse < fPreallocBuffers.count()) { | 207 } else if (fPreallocBuffersInUse < fPreallocBuffers.count()) { |
208 return static_cast<int>(fMinBlockSize / itemSize); | 208 return static_cast<int>(fMinBlockSize / itemSize); |
209 } | 209 } |
210 return 0; | 210 return 0; |
211 } | 211 } |
212 | 212 |
213 int GrBufferAllocPool::preallocatedBuffersRemaining() const { | 213 int GrBufferAllocPool::preallocatedBuffersRemaining() const { |
214 return fPreallocBuffers.count() - fPreallocBuffersInUse; | 214 return fPreallocBuffers.count() - fPreallocBuffersInUse; |
215 } | 215 } |
216 | 216 |
217 int GrBufferAllocPool::preallocatedBufferCount() const { | 217 int GrBufferAllocPool::preallocatedBufferCount() const { |
218 return fPreallocBuffers.count(); | 218 return fPreallocBuffers.count(); |
219 } | 219 } |
220 | 220 |
221 void GrBufferAllocPool::putBack(size_t bytes) { | 221 void GrBufferAllocPool::putBack(size_t bytes) { |
222 VALIDATE(); | 222 VALIDATE(); |
223 | 223 |
224 // if the putBack unwinds all the preallocated buffers then we will | 224 // if the putBack unwinds all the preallocated buffers then we will |
225 // advance the starting index. As blocks are destroyed fPreallocBuffersInUse | 225 // advance the starting index. As blocks are destroyed fPreallocBuffersInUse |
226 // will be decremented. I will reach zero if all blocks using preallocated | 226 // will be decremented. I will reach zero if all blocks using preallocated |
227 // buffers are released. | 227 // buffers are released. |
228 int preallocBuffersInUse = fPreallocBuffersInUse; | 228 int preallocBuffersInUse = fPreallocBuffersInUse; |
229 | 229 |
230 while (bytes) { | 230 while (bytes) { |
231 // caller shouldnt try to put back more than they've taken | 231 // caller shouldnt try to put back more than they've taken |
232 SkASSERT(!fBlocks.empty()); | 232 SkASSERT(!fBlocks.empty()); |
233 BufferBlock& block = fBlocks.back(); | 233 BufferBlock& block = fBlocks.back(); |
234 size_t bytesUsed = block.fBuffer->sizeInBytes() - block.fBytesFree; | 234 size_t bytesUsed = block.fBuffer->gpuMemorySize() - block.fBytesFree; |
235 if (bytes >= bytesUsed) { | 235 if (bytes >= bytesUsed) { |
236 bytes -= bytesUsed; | 236 bytes -= bytesUsed; |
237 fBytesInUse -= bytesUsed; | 237 fBytesInUse -= bytesUsed; |
238 // if we locked a vb to satisfy the make space and we're releasing | 238 // if we locked a vb to satisfy the make space and we're releasing |
239 // beyond it, then unlock it. | 239 // beyond it, then unlock it. |
240 if (block.fBuffer->isLocked()) { | 240 if (block.fBuffer->isLocked()) { |
241 block.fBuffer->unlock(); | 241 block.fBuffer->unlock(); |
242 } | 242 } |
243 this->destroyBlock(); | 243 this->destroyBlock(); |
244 } else { | 244 } else { |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
283 } | 283 } |
284 | 284 |
285 block.fBytesFree = size; | 285 block.fBytesFree = size; |
286 if (NULL != fBufferPtr) { | 286 if (NULL != fBufferPtr) { |
287 SkASSERT(fBlocks.count() > 1); | 287 SkASSERT(fBlocks.count() > 1); |
288 BufferBlock& prev = fBlocks.fromBack(1); | 288 BufferBlock& prev = fBlocks.fromBack(1); |
289 if (prev.fBuffer->isLocked()) { | 289 if (prev.fBuffer->isLocked()) { |
290 prev.fBuffer->unlock(); | 290 prev.fBuffer->unlock(); |
291 } else { | 291 } else { |
292 flushCpuData(prev.fBuffer, | 292 flushCpuData(prev.fBuffer, |
293 prev.fBuffer->sizeInBytes() - prev.fBytesFree); | 293 prev.fBuffer->gpuMemorySize() - prev.fBytesFree); |
294 } | 294 } |
295 fBufferPtr = NULL; | 295 fBufferPtr = NULL; |
296 } | 296 } |
297 | 297 |
298 SkASSERT(NULL == fBufferPtr); | 298 SkASSERT(NULL == fBufferPtr); |
299 | 299 |
300 // If the buffer is CPU-backed we lock it because it is free to do so and sa
ves a copy. | 300 // If the buffer is CPU-backed we lock it because it is free to do so and sa
ves a copy. |
301 // Otherwise when buffer locking is supported: | 301 // Otherwise when buffer locking is supported: |
302 // a) If the frequently reset hint is set we only lock when the request
ed size meets a | 302 // a) If the frequently reset hint is set we only lock when the request
ed size meets a |
303 // threshold (since we don't expect it is likely that we will see more
vertex data) | 303 // threshold (since we don't expect it is likely that we will see more
vertex data) |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
341 block.fBuffer->unref(); | 341 block.fBuffer->unref(); |
342 fBlocks.pop_back(); | 342 fBlocks.pop_back(); |
343 fBufferPtr = NULL; | 343 fBufferPtr = NULL; |
344 } | 344 } |
345 | 345 |
346 void GrBufferAllocPool::flushCpuData(GrGeometryBuffer* buffer, | 346 void GrBufferAllocPool::flushCpuData(GrGeometryBuffer* buffer, |
347 size_t flushSize) { | 347 size_t flushSize) { |
348 SkASSERT(NULL != buffer); | 348 SkASSERT(NULL != buffer); |
349 SkASSERT(!buffer->isLocked()); | 349 SkASSERT(!buffer->isLocked()); |
350 SkASSERT(fCpuData.get() == fBufferPtr); | 350 SkASSERT(fCpuData.get() == fBufferPtr); |
351 SkASSERT(flushSize <= buffer->sizeInBytes()); | 351 SkASSERT(flushSize <= buffer->gpuMemorySize()); |
352 VALIDATE(true); | 352 VALIDATE(true); |
353 | 353 |
354 if (fGpu->caps()->bufferLockSupport() && | 354 if (fGpu->caps()->bufferLockSupport() && |
355 flushSize > GR_GEOM_BUFFER_LOCK_THRESHOLD) { | 355 flushSize > GR_GEOM_BUFFER_LOCK_THRESHOLD) { |
356 void* data = buffer->lock(); | 356 void* data = buffer->lock(); |
357 if (NULL != data) { | 357 if (NULL != data) { |
358 memcpy(data, fBufferPtr, flushSize); | 358 memcpy(data, fBufferPtr, flushSize); |
359 buffer->unlock(); | 359 buffer->unlock(); |
360 return; | 360 return; |
361 } | 361 } |
(...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
479 } | 479 } |
480 } | 480 } |
481 | 481 |
482 int GrIndexBufferAllocPool::preallocatedBufferIndices() const { | 482 int GrIndexBufferAllocPool::preallocatedBufferIndices() const { |
483 return static_cast<int>(INHERITED::preallocatedBufferSize() / sizeof(uint16_
t)); | 483 return static_cast<int>(INHERITED::preallocatedBufferSize() / sizeof(uint16_
t)); |
484 } | 484 } |
485 | 485 |
486 int GrIndexBufferAllocPool::currentBufferIndices() const { | 486 int GrIndexBufferAllocPool::currentBufferIndices() const { |
487 return currentBufferItems(sizeof(uint16_t)); | 487 return currentBufferItems(sizeof(uint16_t)); |
488 } | 488 } |
OLD | NEW |