Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(371)

Side by Side Diff: src/gpu/vk/GrVkMemory.cpp

Issue 2029763002: Create free list heap for suballocation (Closed) Base URL: https://skia.googlesource.com/skia.git@master
Patch Set: Address comments; clean up debug code Created 4 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/gpu/vk/GrVkMemory.h ('k') | src/gpu/vk/GrVkTexture.cpp » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright 2015 Google Inc. 2 * Copyright 2015 Google Inc.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license that can be 4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file. 5 * found in the LICENSE file.
6 */ 6 */
7 7
8 #include "GrVkMemory.h" 8 #include "GrVkMemory.h"
9 9
10 #include "GrVkGpu.h" 10 #include "GrVkGpu.h"
(...skipping 11 matching lines...) Expand all
22 if (supportedFlags == requestedMemFlags) { 22 if (supportedFlags == requestedMemFlags) {
23 *typeIndex = i; 23 *typeIndex = i;
24 return true; 24 return true;
25 } 25 }
26 } 26 }
27 checkBit <<= 1; 27 checkBit <<= 1;
28 } 28 }
29 return false; 29 return false;
30 } 30 }
31 31
32 static bool alloc_device_memory(const GrVkGpu* gpu, 32 static GrVkGpu::Heap buffer_type_to_heap(GrVkBuffer::Type type) {
33 VkMemoryRequirements* memReqs, 33 const GrVkGpu::Heap kBufferToHeap[]{
34 const VkMemoryPropertyFlags flags, 34 GrVkGpu::kVertexBuffer_Heap,
35 VkDeviceMemory* memory) { 35 GrVkGpu::kIndexBuffer_Heap,
36 uint32_t typeIndex; 36 GrVkGpu::kUniformBuffer_Heap,
37 if (!get_valid_memory_type_index(gpu->physicalDeviceMemoryProperties(), 37 GrVkGpu::kCopyReadBuffer_Heap,
38 memReqs->memoryTypeBits, 38 GrVkGpu::kCopyWriteBuffer_Heap,
39 flags, 39 };
40 &typeIndex)) { 40 GR_STATIC_ASSERT(0 == GrVkBuffer::kVertex_Type);
41 return false; 41 GR_STATIC_ASSERT(1 == GrVkBuffer::kIndex_Type);
42 } 42 GR_STATIC_ASSERT(2 == GrVkBuffer::kUniform_Type);
43 GR_STATIC_ASSERT(3 == GrVkBuffer::kCopyRead_Type);
44 GR_STATIC_ASSERT(4 == GrVkBuffer::kCopyWrite_Type);
43 45
44 VkMemoryAllocateInfo allocInfo = { 46 return kBufferToHeap[type];
45 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType
46 NULL, // pNext
47 memReqs->size, // allocationSize
48 typeIndex, // memoryTypeIndex
49 };
50
51 VkResult err = GR_VK_CALL(gpu->vkInterface(), AllocateMemory(gpu->device(),
52 &allocInfo,
53 nullptr,
54 memory));
55 if (err) {
56 return false;
57 }
58 return true;
59 } 47 }
60 48
61 bool GrVkMemory::AllocAndBindBufferMemory(const GrVkGpu* gpu, 49 bool GrVkMemory::AllocAndBindBufferMemory(const GrVkGpu* gpu,
62 VkBuffer buffer, 50 VkBuffer buffer,
63 const VkMemoryPropertyFlags flags, 51 GrVkBuffer::Type type,
64 GrVkAlloc* alloc) { 52 GrVkAlloc* alloc) {
65 const GrVkInterface* iface = gpu->vkInterface(); 53 const GrVkInterface* iface = gpu->vkInterface();
66 VkDevice device = gpu->device(); 54 VkDevice device = gpu->device();
67 55
68 VkMemoryRequirements memReqs; 56 VkMemoryRequirements memReqs;
69 GR_VK_CALL(iface, GetBufferMemoryRequirements(device, buffer, &memReqs)); 57 GR_VK_CALL(iface, GetBufferMemoryRequirements(device, buffer, &memReqs));
70 58
71 if (!alloc_device_memory(gpu, &memReqs, flags, &alloc->fMemory)) { 59 VkMemoryPropertyFlags desiredMemProps = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
60 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
61 VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
62 uint32_t typeIndex;
63 if (!get_valid_memory_type_index(gpu->physicalDeviceMemoryProperties(),
64 memReqs.memoryTypeBits,
65 desiredMemProps,
66 &typeIndex)) {
67 // this memory type should always be available
68 SkASSERT_RELEASE(get_valid_memory_type_index(gpu->physicalDeviceMemoryPr operties(),
69 memReqs.memoryTypeBits,
70 VK_MEMORY_PROPERTY_HOST_VIS IBLE_BIT |
71 VK_MEMORY_PROPERTY_HOST_COH ERENT_BIT,
72 &typeIndex));
73 }
74
75 GrVkHeap* heap = gpu->getHeap(buffer_type_to_heap(type));
76
77 if (!heap->alloc(memReqs.size, memReqs.alignment, typeIndex, alloc)) {
78 SkDebugf("Failed to alloc buffer\n");
72 return false; 79 return false;
73 } 80 }
74 // for now, offset is always 0
75 alloc->fOffset = 0;
76 81
77 // Bind Memory to device 82 // Bind Memory to device
78 VkResult err = GR_VK_CALL(iface, BindBufferMemory(device, buffer, 83 VkResult err = GR_VK_CALL(iface, BindBufferMemory(device, buffer,
79 alloc->fMemory, alloc->fOf fset)); 84 alloc->fMemory, alloc->fOf fset));
80 if (err) { 85 if (err) {
81 GR_VK_CALL(iface, FreeMemory(device, alloc->fMemory, nullptr)); 86 SkASSERT_RELEASE(heap->free(*alloc));
82 return false; 87 return false;
83 } 88 }
89
84 return true; 90 return true;
85 } 91 }
86 92
87 void GrVkMemory::FreeBufferMemory(const GrVkGpu* gpu, const GrVkAlloc& alloc) { 93 void GrVkMemory::FreeBufferMemory(const GrVkGpu* gpu, GrVkBuffer::Type type,
88 const GrVkInterface* iface = gpu->vkInterface(); 94 const GrVkAlloc& alloc) {
89 GR_VK_CALL(iface, FreeMemory(gpu->device(), alloc.fMemory, nullptr)); 95
96 GrVkHeap* heap = gpu->getHeap(buffer_type_to_heap(type));
97 SkASSERT_RELEASE(heap->free(alloc));
98 }
99
100 // for debugging
101 static uint64_t gTotalImageMemory = 0;
102 static uint64_t gTotalImageMemoryFullPage = 0;
103
104 const VkDeviceSize kMaxSmallImageSize = 16 * 1024;
105 const VkDeviceSize kMinVulkanPageSize = 16 * 1024;
106
107 static VkDeviceSize align_size(VkDeviceSize size, VkDeviceSize alignment) {
108 return (size + alignment - 1) & ~(alignment - 1);
90 } 109 }
91 110
92 bool GrVkMemory::AllocAndBindImageMemory(const GrVkGpu* gpu, 111 bool GrVkMemory::AllocAndBindImageMemory(const GrVkGpu* gpu,
93 VkImage image, 112 VkImage image,
94 const VkMemoryPropertyFlags flags, 113 bool linearTiling,
95 GrVkAlloc* alloc) { 114 GrVkAlloc* alloc) {
96 const GrVkInterface* iface = gpu->vkInterface(); 115 const GrVkInterface* iface = gpu->vkInterface();
97 VkDevice device = gpu->device(); 116 VkDevice device = gpu->device();
98 117
99 VkMemoryRequirements memReqs; 118 VkMemoryRequirements memReqs;
100 GR_VK_CALL(iface, GetImageMemoryRequirements(device, image, &memReqs)); 119 GR_VK_CALL(iface, GetImageMemoryRequirements(device, image, &memReqs));
101 120
102 if (!alloc_device_memory(gpu, &memReqs, flags, &alloc->fMemory)) { 121 uint32_t typeIndex;
122 GrVkHeap* heap;
123 if (linearTiling) {
124 VkMemoryPropertyFlags desiredMemProps = VK_MEMORY_PROPERTY_HOST_VISIBLE_ BIT |
125 VK_MEMORY_PROPERTY_HOST_COHERENT _BIT |
126 VK_MEMORY_PROPERTY_HOST_CACHED_B IT;
127 if (!get_valid_memory_type_index(gpu->physicalDeviceMemoryProperties(),
128 memReqs.memoryTypeBits,
129 desiredMemProps,
130 &typeIndex)) {
131 // this memory type should always be available
132 SkASSERT_RELEASE(get_valid_memory_type_index(gpu->physicalDeviceMemo ryProperties(),
133 memReqs.memoryTypeBits,
134 VK_MEMORY_PROPERTY_HOST _VISIBLE_BIT |
135 VK_MEMORY_PROPERTY_HOST _COHERENT_BIT,
136 &typeIndex));
137 }
138 heap = gpu->getHeap(GrVkGpu::kLinearImage_Heap);
139 } else {
140 // this memory type should always be available
141 SkASSERT_RELEASE(get_valid_memory_type_index(gpu->physicalDeviceMemoryPr operties(),
142 memReqs.memoryTypeBits,
143 VK_MEMORY_PROPERTY_DEVICE_L OCAL_BIT,
144 &typeIndex));
145 if (memReqs.size <= kMaxSmallImageSize) {
146 heap = gpu->getHeap(GrVkGpu::kSmallOptimalImage_Heap);
147 } else {
148 heap = gpu->getHeap(GrVkGpu::kOptimalImage_Heap);
149 }
150 }
151
152 if (!heap->alloc(memReqs.size, memReqs.alignment, typeIndex, alloc)) {
153 SkDebugf("Failed to alloc image\n");
103 return false; 154 return false;
104 } 155 }
105 // for now, offset is always 0
106 alloc->fOffset = 0;
107 156
108 // Bind Memory to device 157 // Bind Memory to device
109 VkResult err = GR_VK_CALL(iface, BindImageMemory(device, image, 158 VkResult err = GR_VK_CALL(iface, BindImageMemory(device, image,
110 alloc->fMemory, alloc->fOffset)); 159 alloc->fMemory, alloc->fOffset));
111 if (err) { 160 if (err) {
112 GR_VK_CALL(iface, FreeMemory(device, alloc->fMemory, nullptr)); 161 SkASSERT_RELEASE(heap->free(*alloc));
113 return false; 162 return false;
114 } 163 }
164
165 gTotalImageMemory += alloc->fSize;
166
167 VkDeviceSize pageAlignedSize = align_size(alloc->fSize, kMinVulkanPageSize);
168 gTotalImageMemoryFullPage += pageAlignedSize;
169
115 return true; 170 return true;
116 } 171 }
117 172
118 void GrVkMemory::FreeImageMemory(const GrVkGpu* gpu, const GrVkAlloc& alloc) { 173 void GrVkMemory::FreeImageMemory(const GrVkGpu* gpu, bool linearTiling,
119 const GrVkInterface* iface = gpu->vkInterface(); 174 const GrVkAlloc& alloc) {
120 GR_VK_CALL(iface, FreeMemory(gpu->device(), alloc.fMemory, nullptr)); 175 GrVkHeap* heap;
176 if (linearTiling) {
177 heap = gpu->getHeap(GrVkGpu::kLinearImage_Heap);
178 } else if (alloc.fSize <= kMaxSmallImageSize) {
179 heap = gpu->getHeap(GrVkGpu::kSmallOptimalImage_Heap);
180 } else {
181 heap = gpu->getHeap(GrVkGpu::kOptimalImage_Heap);
182 }
183 if (!heap->free(alloc)) {
184 // must be an adopted allocation
185 GR_VK_CALL(gpu->vkInterface(), FreeMemory(gpu->device(), alloc.fMemory, nullptr));
186 } else {
187 gTotalImageMemory -= alloc.fSize;
188 VkDeviceSize pageAlignedSize = align_size(alloc.fSize, kMinVulkanPageSiz e);
189 gTotalImageMemoryFullPage -= pageAlignedSize;
190 }
121 } 191 }
122 192
123 VkPipelineStageFlags GrVkMemory::LayoutToPipelineStageFlags(const VkImageLayout layout) { 193 VkPipelineStageFlags GrVkMemory::LayoutToPipelineStageFlags(const VkImageLayout layout) {
124 if (VK_IMAGE_LAYOUT_GENERAL == layout) { 194 if (VK_IMAGE_LAYOUT_GENERAL == layout) {
125 return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT; 195 return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
126 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout || 196 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
127 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) { 197 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
128 return VK_PIPELINE_STAGE_TRANSFER_BIT; 198 return VK_PIPELINE_STAGE_TRANSFER_BIT;
129 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout || 199 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout ||
130 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout || 200 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
162 flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT; 232 flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
163 } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) { 233 } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
164 flags = VK_ACCESS_TRANSFER_WRITE_BIT; 234 flags = VK_ACCESS_TRANSFER_WRITE_BIT;
165 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout) { 235 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout) {
166 flags = VK_ACCESS_TRANSFER_READ_BIT; 236 flags = VK_ACCESS_TRANSFER_READ_BIT;
167 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) { 237 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
168 flags = VK_ACCESS_SHADER_READ_BIT; 238 flags = VK_ACCESS_SHADER_READ_BIT;
169 } 239 }
170 return flags; 240 return flags;
171 } 241 }
242
243 GrVkSubHeap::GrVkSubHeap(const GrVkGpu* gpu, uint32_t memoryTypeIndex,
244 VkDeviceSize size, VkDeviceSize alignment)
245 : fGpu(gpu)
246 , fMemoryTypeIndex(memoryTypeIndex) {
247
248 VkMemoryAllocateInfo allocInfo = {
249 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType
250 NULL, // pNext
251 size, // allocationSize
252 memoryTypeIndex, // memoryTypeIndex
253 };
254
255 VkResult err = GR_VK_CALL(gpu->vkInterface(), AllocateMemory(gpu->device(),
256 &allocInfo,
257 nullptr,
258 &fAlloc));
259
260 if (VK_SUCCESS == err) {
261 fSize = size;
262 fAlignment = alignment;
263 fFreeSize = size;
264 fLargestBlockSize = size;
265 fLargestBlockOffset = 0;
266
267 Block* block = fFreeList.addToTail();
268 block->fOffset = 0;
269 block->fSize = fSize;
270 } else {
271 fSize = 0;
272 fAlignment = 0;
273 fFreeSize = 0;
274 fLargestBlockSize = 0;
275 }
276 }
277
278 GrVkSubHeap::~GrVkSubHeap() {
279 const GrVkInterface* iface = fGpu->vkInterface();
280 GR_VK_CALL(iface, FreeMemory(fGpu->device(), fAlloc, nullptr));
281
282 fFreeList.reset();
283 }
284
285 bool GrVkSubHeap::alloc(VkDeviceSize size, GrVkAlloc* alloc) {
286 VkDeviceSize alignedSize = align_size(size, fAlignment);
287
288 // find the smallest block big enough for our allocation
289 FreeList::Iter iter = fFreeList.headIter();
290 FreeList::Iter bestFitIter;
291 VkDeviceSize bestFitSize = fSize + 1;
292 VkDeviceSize secondLargestSize = 0;
293 VkDeviceSize secondLargestOffset = 0;
294 while (iter.get()) {
295 Block* block = iter.get();
296 // need to adjust size to match desired alignment
297 SkASSERT(align_size(block->fOffset, fAlignment) - block->fOffset == 0);
298 if (block->fSize >= alignedSize && block->fSize < bestFitSize) {
299 bestFitIter = iter;
300 bestFitSize = block->fSize;
301 }
302 if (secondLargestSize < block->fSize && block->fOffset != fLargestBlockO ffset) {
303 secondLargestSize = block->fSize;
304 secondLargestOffset = block->fOffset;
305 }
306 iter.next();
307 }
308 SkASSERT(secondLargestSize <= fLargestBlockSize);
309
310 Block* bestFit = bestFitIter.get();
311 if (bestFit) {
312 alloc->fMemory = fAlloc;
313 SkASSERT(align_size(bestFit->fOffset, fAlignment) == bestFit->fOffset);
314 alloc->fOffset = bestFit->fOffset;
315 alloc->fSize = alignedSize;
316 // adjust or remove current block
317 VkDeviceSize originalBestFitOffset = bestFit->fOffset;
318 if (bestFit->fSize > alignedSize) {
319 bestFit->fOffset += alignedSize;
320 bestFit->fSize -= alignedSize;
321 if (fLargestBlockOffset == originalBestFitOffset) {
322 if (bestFit->fSize >= secondLargestSize) {
323 fLargestBlockSize = bestFit->fSize;
324 fLargestBlockOffset = bestFit->fOffset;
325 } else {
326 fLargestBlockSize = secondLargestSize;
327 fLargestBlockOffset = secondLargestOffset;
328 }
329 }
330 #ifdef SK_DEBUG
331 VkDeviceSize largestSize = 0;
332 iter = fFreeList.headIter();
333 while (iter.get()) {
334 Block* block = iter.get();
335 if (largestSize < block->fSize) {
336 largestSize = block->fSize;
337 }
338 iter.next();
339 }
340 SkASSERT(largestSize == fLargestBlockSize)
341 #endif
342 } else {
343 SkASSERT(bestFit->fSize == alignedSize);
344 if (fLargestBlockOffset == originalBestFitOffset) {
345 fLargestBlockSize = secondLargestSize;
346 fLargestBlockOffset = secondLargestOffset;
347 }
348 fFreeList.remove(bestFit);
349 #ifdef SK_DEBUG
350 VkDeviceSize largestSize = 0;
351 iter = fFreeList.headIter();
352 while (iter.get()) {
353 Block* block = iter.get();
354 if (largestSize < block->fSize) {
355 largestSize = block->fSize;
356 }
357 iter.next();
358 }
359 SkASSERT(largestSize == fLargestBlockSize);
360 #endif
361 }
362 fFreeSize -= alignedSize;
363
364 return true;
365 }
366
367 SkDebugf("Can't allocate %d bytes, %d bytes available, largest free block %d \n", alignedSize, fFreeSize, fLargestBlockSize);
368
369 return false;
370 }
371
372
373 void GrVkSubHeap::free(const GrVkAlloc& alloc) {
374 SkASSERT(alloc.fMemory == fAlloc);
375
376 // find the block right after this allocation
377 FreeList::Iter iter = fFreeList.headIter();
378 while (iter.get() && iter.get()->fOffset < alloc.fOffset) {
379 iter.next();
380 }
381 FreeList::Iter prev = iter;
382 prev.prev();
383 // we have four cases:
384 // we exactly follow the previous one
385 Block* block;
386 if (prev.get() && prev.get()->fOffset + prev.get()->fSize == alloc.fOffset) {
387 block = prev.get();
388 block->fSize += alloc.fSize;
389 if (block->fOffset == fLargestBlockOffset) {
390 fLargestBlockSize = block->fSize;
391 }
392 // and additionally we may exactly precede the next one
393 if (iter.get() && iter.get()->fOffset == alloc.fOffset + alloc.fSize) {
394 block->fSize += iter.get()->fSize;
395 if (iter.get()->fOffset == fLargestBlockOffset) {
396 fLargestBlockOffset = block->fOffset;
397 fLargestBlockSize = block->fSize;
398 }
399 fFreeList.remove(iter.get());
400 }
401 // or we only exactly proceed the next one
402 } else if (iter.get() && iter.get()->fOffset == alloc.fOffset + alloc.fSize) {
403 block = iter.get();
404 block->fSize += alloc.fSize;
405 if (block->fOffset == fLargestBlockOffset) {
406 fLargestBlockOffset = alloc.fOffset;
407 fLargestBlockSize = block->fSize;
408 }
409 block->fOffset = alloc.fOffset;
410 // or we fall somewhere in between, with gaps
411 } else {
412 block = fFreeList.addBefore(iter);
413 block->fOffset = alloc.fOffset;
414 block->fSize = alloc.fSize;
415 }
416 fFreeSize += alloc.fSize;
417 if (block->fSize > fLargestBlockSize) {
418 fLargestBlockSize = block->fSize;
419 fLargestBlockOffset = block->fOffset;
420 }
421
422 #ifdef SK_DEBUG
423 VkDeviceSize largestSize = 0;
424 iter = fFreeList.headIter();
425 while (iter.get()) {
426 Block* block = iter.get();
427 if (largestSize < block->fSize) {
428 largestSize = block->fSize;
429 }
430 iter.next();
431 }
432 SkASSERT(fLargestBlockSize == largestSize);
433 #endif
434 }
435
436 GrVkHeap::~GrVkHeap() {
437 }
438
439 bool GrVkHeap::subAlloc(VkDeviceSize size, VkDeviceSize alignment,
440 uint32_t memoryTypeIndex, GrVkAlloc* alloc) {
441 VkDeviceSize alignedSize = align_size(size, alignment);
442
443 // first try to find a subheap that fits our allocation request
444 int bestFitIndex = -1;
445 VkDeviceSize bestFitSize = 0x7FFFFFFF;
446 for (auto i = 0; i < fSubHeaps.count(); ++i) {
447 if (fSubHeaps[i]->memoryTypeIndex() == memoryTypeIndex) {
448 VkDeviceSize heapSize = fSubHeaps[i]->largestBlockSize();
449 if (heapSize > alignedSize && heapSize < bestFitSize) {
450 bestFitIndex = i;
451 bestFitSize = heapSize;
452 }
453 }
454 }
455
456 if (bestFitIndex >= 0) {
457 SkASSERT(fSubHeaps[bestFitIndex]->alignment() == alignment);
458 if (fSubHeaps[bestFitIndex]->alloc(size, alloc)) {
459 fUsedSize += alloc->fSize;
460 return true;
461 }
462 return false;
463 }
464
465 // need to allocate a new subheap
466 SkAutoTDelete<GrVkSubHeap>& subHeap = fSubHeaps.push_back();
467 subHeap.reset(new GrVkSubHeap(fGpu, memoryTypeIndex, fSubHeapSize, alignment ));
468 fAllocSize += fSubHeapSize;
469 if (subHeap->alloc(size, alloc)) {
470 fUsedSize += alloc->fSize;
471 return true;
472 }
473
474 return false;
475 }
476
477 bool GrVkHeap::singleAlloc(VkDeviceSize size, VkDeviceSize alignment,
478 uint32_t memoryTypeIndex, GrVkAlloc* alloc) {
479 VkDeviceSize alignedSize = align_size(size, alignment);
480
481 // first try to find an unallocated subheap that fits our allocation request
482 int bestFitIndex = -1;
483 VkDeviceSize bestFitSize = 0x7FFFFFFF;
484 for (auto i = 0; i < fSubHeaps.count(); ++i) {
485 if (fSubHeaps[i]->memoryTypeIndex() == memoryTypeIndex && fSubHeaps[i]-> unallocated()) {
486 VkDeviceSize heapSize = fSubHeaps[i]->size();
487 if (heapSize > alignedSize && heapSize < bestFitSize) {
488 bestFitIndex = i;
489 bestFitSize = heapSize;
490 }
491 }
492 }
493
494 if (bestFitIndex >= 0) {
495 SkASSERT(fSubHeaps[bestFitIndex]->alignment() == alignment);
496 if (fSubHeaps[bestFitIndex]->alloc(size, alloc)) {
497 fUsedSize += alloc->fSize;
498 return true;
499 }
500 return false;
501 }
502
503 // need to allocate a new subheap
504 SkAutoTDelete<GrVkSubHeap>& subHeap = fSubHeaps.push_back();
505 subHeap.reset(new GrVkSubHeap(fGpu, memoryTypeIndex, alignedSize, alignment) );
506 fAllocSize += alignedSize;
507 if (subHeap->alloc(size, alloc)) {
508 fUsedSize += alloc->fSize;
509 return true;
510 }
511
512 return false;
513 }
514
515 bool GrVkHeap::free(const GrVkAlloc& alloc) {
516 for (auto i = 0; i < fSubHeaps.count(); ++i) {
517 if (fSubHeaps[i]->memory() == alloc.fMemory) {
518 fSubHeaps[i]->free(alloc);
519 fUsedSize -= alloc.fSize;
520 return true;
521 }
522 }
523
524 return false;
525 }
526
527
OLDNEW
« no previous file with comments | « src/gpu/vk/GrVkMemory.h ('k') | src/gpu/vk/GrVkTexture.cpp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698