| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #include "GrVkMemory.h" | 8 #include "GrVkMemory.h" |
| 9 | 9 |
| 10 #include "GrVkGpu.h" | 10 #include "GrVkGpu.h" |
| 11 #include "GrVkUtil.h" | 11 #include "GrVkUtil.h" |
| 12 | 12 |
| 13 #ifdef SK_DEBUG | |
| 14 // for simple tracking of how much we're using in each heap | |
| 15 // last counter is for non-subheap allocations | |
| 16 VkDeviceSize gHeapUsage[VK_MAX_MEMORY_HEAPS+1] = { 0 }; | |
| 17 #endif | |
| 18 | |
| 19 static bool get_valid_memory_type_index(const VkPhysicalDeviceMemoryProperties&
physDevMemProps, | 13 static bool get_valid_memory_type_index(const VkPhysicalDeviceMemoryProperties&
physDevMemProps, |
| 20 uint32_t typeBits, | 14 uint32_t typeBits, |
| 21 VkMemoryPropertyFlags requestedMemFlags, | 15 VkMemoryPropertyFlags requestedMemFlags, |
| 22 uint32_t* typeIndex, | 16 uint32_t* typeIndex) { |
| 23 uint32_t* heapIndex) { | |
| 24 for (uint32_t i = 0; i < physDevMemProps.memoryTypeCount; ++i) { | 17 for (uint32_t i = 0; i < physDevMemProps.memoryTypeCount; ++i) { |
| 25 if (typeBits & (1 << i)) { | 18 if (typeBits & (1 << i)) { |
| 26 uint32_t supportedFlags = physDevMemProps.memoryTypes[i].propertyFla
gs & | 19 uint32_t supportedFlags = physDevMemProps.memoryTypes[i].propertyFla
gs & |
| 27 requestedMemFlags; | 20 requestedMemFlags; |
| 28 if (supportedFlags == requestedMemFlags) { | 21 if (supportedFlags == requestedMemFlags) { |
| 29 *typeIndex = i; | 22 *typeIndex = i; |
| 30 *heapIndex = physDevMemProps.memoryTypes[i].heapIndex; | |
| 31 return true; | 23 return true; |
| 32 } | 24 } |
| 33 } | 25 } |
| 34 } | 26 } |
| 35 return false; | 27 return false; |
| 36 } | 28 } |
| 37 | 29 |
| 38 static GrVkGpu::Heap buffer_type_to_heap(GrVkBuffer::Type type) { | 30 static GrVkGpu::Heap buffer_type_to_heap(GrVkBuffer::Type type) { |
| 39 const GrVkGpu::Heap kBufferToHeap[]{ | 31 const GrVkGpu::Heap kBufferToHeap[]{ |
| 40 GrVkGpu::kVertexBuffer_Heap, | 32 GrVkGpu::kVertexBuffer_Heap, |
| (...skipping 16 matching lines...) Expand all Loading... |
| 57 GrVkBuffer::Type type, | 49 GrVkBuffer::Type type, |
| 58 bool dynamic, | 50 bool dynamic, |
| 59 GrVkAlloc* alloc) { | 51 GrVkAlloc* alloc) { |
| 60 const GrVkInterface* iface = gpu->vkInterface(); | 52 const GrVkInterface* iface = gpu->vkInterface(); |
| 61 VkDevice device = gpu->device(); | 53 VkDevice device = gpu->device(); |
| 62 | 54 |
| 63 VkMemoryRequirements memReqs; | 55 VkMemoryRequirements memReqs; |
| 64 GR_VK_CALL(iface, GetBufferMemoryRequirements(device, buffer, &memReqs)); | 56 GR_VK_CALL(iface, GetBufferMemoryRequirements(device, buffer, &memReqs)); |
| 65 | 57 |
| 66 uint32_t typeIndex = 0; | 58 uint32_t typeIndex = 0; |
| 67 uint32_t heapIndex = 0; | |
| 68 const VkPhysicalDeviceMemoryProperties& phDevMemProps = gpu->physicalDeviceM
emoryProperties(); | 59 const VkPhysicalDeviceMemoryProperties& phDevMemProps = gpu->physicalDeviceM
emoryProperties(); |
| 69 if (dynamic) { | 60 if (dynamic) { |
| 70 // try to get cached and ideally non-coherent memory first | 61 // try to get cached and ideally non-coherent memory first |
| 71 if (!get_valid_memory_type_index(phDevMemProps, | 62 if (!get_valid_memory_type_index(phDevMemProps, |
| 72 memReqs.memoryTypeBits, | 63 memReqs.memoryTypeBits, |
| 73 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | | 64 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | |
| 74 VK_MEMORY_PROPERTY_HOST_CACHED_BIT, | 65 VK_MEMORY_PROPERTY_HOST_CACHED_BIT, |
| 75 &typeIndex, | 66 &typeIndex)) { |
| 76 &heapIndex)) { | |
| 77 // some sort of host-visible memory type should always be available
for dynamic buffers | 67 // some sort of host-visible memory type should always be available
for dynamic buffers |
| 78 SkASSERT_RELEASE(get_valid_memory_type_index(phDevMemProps, | 68 SkASSERT_RELEASE(get_valid_memory_type_index(phDevMemProps, |
| 79 memReqs.memoryTypeBits, | 69 memReqs.memoryTypeBits, |
| 80 VK_MEMORY_PROPERTY_HOST
_VISIBLE_BIT, | 70 VK_MEMORY_PROPERTY_HOST
_VISIBLE_BIT, |
| 81 &typeIndex, | 71 &typeIndex)); |
| 82 &heapIndex)); | |
| 83 } | 72 } |
| 84 | 73 |
| 85 VkMemoryPropertyFlags mpf = phDevMemProps.memoryTypes[typeIndex].propert
yFlags; | 74 VkMemoryPropertyFlags mpf = phDevMemProps.memoryTypes[typeIndex].propert
yFlags; |
| 86 alloc->fFlags = mpf & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT ? 0x0 | 75 alloc->fFlags = mpf & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT ? 0x0 |
| 87 : GrVkAlloc::
kNoncoherent_Flag; | 76 : GrVkAlloc::
kNoncoherent_Flag; |
| 88 } else { | 77 } else { |
| 89 // device-local memory should always be available for static buffers | 78 // device-local memory should always be available for static buffers |
| 90 SkASSERT_RELEASE(get_valid_memory_type_index(phDevMemProps, | 79 SkASSERT_RELEASE(get_valid_memory_type_index(phDevMemProps, |
| 91 memReqs.memoryTypeBits, | 80 memReqs.memoryTypeBits, |
| 92 VK_MEMORY_PROPERTY_DEVICE_L
OCAL_BIT, | 81 VK_MEMORY_PROPERTY_DEVICE_L
OCAL_BIT, |
| 93 &typeIndex, | 82 &typeIndex)); |
| 94 &heapIndex)); | |
| 95 alloc->fFlags = 0x0; | 83 alloc->fFlags = 0x0; |
| 96 } | 84 } |
| 97 | 85 |
| 98 GrVkHeap* heap = gpu->getHeap(buffer_type_to_heap(type)); | 86 GrVkHeap* heap = gpu->getHeap(buffer_type_to_heap(type)); |
| 99 | 87 |
| 100 if (!heap->alloc(memReqs.size, memReqs.alignment, typeIndex, heapIndex, allo
c)) { | 88 if (!heap->alloc(memReqs.size, memReqs.alignment, typeIndex, alloc)) { |
| 101 // if static, try to allocate from non-host-visible non-device-local mem
ory instead | 89 SkDebugf("Failed to alloc buffer\n"); |
| 102 if (dynamic || | 90 return false; |
| 103 !get_valid_memory_type_index(phDevMemProps, memReqs.memoryTypeBits, | |
| 104 0, &typeIndex, &heapIndex) || | |
| 105 !heap->alloc(memReqs.size, memReqs.alignment, typeIndex, heapIndex,
alloc)) { | |
| 106 SkDebugf("Failed to alloc buffer\n"); | |
| 107 return false; | |
| 108 } | |
| 109 } | 91 } |
| 110 | 92 |
| 111 // Bind buffer | 93 // Bind buffer |
| 112 VkResult err = GR_VK_CALL(iface, BindBufferMemory(device, buffer, | 94 VkResult err = GR_VK_CALL(iface, BindBufferMemory(device, buffer, |
| 113 alloc->fMemory, alloc->fOf
fset)); | 95 alloc->fMemory, alloc->fOf
fset)); |
| 114 if (err) { | 96 if (err) { |
| 115 SkASSERT_RELEASE(heap->free(*alloc)); | 97 SkASSERT_RELEASE(heap->free(*alloc)); |
| 116 return false; | 98 return false; |
| 117 } | 99 } |
| 118 | 100 |
| (...skipping 22 matching lines...) Expand all Loading... |
| 141 VkImage image, | 123 VkImage image, |
| 142 bool linearTiling, | 124 bool linearTiling, |
| 143 GrVkAlloc* alloc) { | 125 GrVkAlloc* alloc) { |
| 144 const GrVkInterface* iface = gpu->vkInterface(); | 126 const GrVkInterface* iface = gpu->vkInterface(); |
| 145 VkDevice device = gpu->device(); | 127 VkDevice device = gpu->device(); |
| 146 | 128 |
| 147 VkMemoryRequirements memReqs; | 129 VkMemoryRequirements memReqs; |
| 148 GR_VK_CALL(iface, GetImageMemoryRequirements(device, image, &memReqs)); | 130 GR_VK_CALL(iface, GetImageMemoryRequirements(device, image, &memReqs)); |
| 149 | 131 |
| 150 uint32_t typeIndex = 0; | 132 uint32_t typeIndex = 0; |
| 151 uint32_t heapIndex = 0; | |
| 152 GrVkHeap* heap; | 133 GrVkHeap* heap; |
| 153 const VkPhysicalDeviceMemoryProperties& phDevMemProps = gpu->physicalDeviceM
emoryProperties(); | 134 const VkPhysicalDeviceMemoryProperties& phDevMemProps = gpu->physicalDeviceM
emoryProperties(); |
| 154 if (linearTiling) { | 135 if (linearTiling) { |
| 155 VkMemoryPropertyFlags desiredMemProps = VK_MEMORY_PROPERTY_HOST_VISIBLE_
BIT | | 136 VkMemoryPropertyFlags desiredMemProps = VK_MEMORY_PROPERTY_HOST_VISIBLE_
BIT | |
| 156 VK_MEMORY_PROPERTY_HOST_CACHED_B
IT; | 137 VK_MEMORY_PROPERTY_HOST_CACHED_B
IT; |
| 157 if (!get_valid_memory_type_index(phDevMemProps, | 138 if (!get_valid_memory_type_index(phDevMemProps, |
| 158 memReqs.memoryTypeBits, | 139 memReqs.memoryTypeBits, |
| 159 desiredMemProps, | 140 desiredMemProps, |
| 160 &typeIndex, | 141 &typeIndex)) { |
| 161 &heapIndex)) { | |
| 162 // some sort of host-visible memory type should always be available | 142 // some sort of host-visible memory type should always be available |
| 163 SkASSERT_RELEASE(get_valid_memory_type_index(phDevMemProps, | 143 SkASSERT_RELEASE(get_valid_memory_type_index(phDevMemProps, |
| 164 memReqs.memoryTypeBits, | 144 memReqs.memoryTypeBits, |
| 165 VK_MEMORY_PROPERTY_HOST
_VISIBLE_BIT, | 145 VK_MEMORY_PROPERTY_HOST
_VISIBLE_BIT, |
| 166 &typeIndex, | 146 &typeIndex)); |
| 167 &heapIndex)); | |
| 168 } | 147 } |
| 169 heap = gpu->getHeap(GrVkGpu::kLinearImage_Heap); | 148 heap = gpu->getHeap(GrVkGpu::kLinearImage_Heap); |
| 170 VkMemoryPropertyFlags mpf = phDevMemProps.memoryTypes[typeIndex].propert
yFlags; | 149 VkMemoryPropertyFlags mpf = phDevMemProps.memoryTypes[typeIndex].propert
yFlags; |
| 171 alloc->fFlags = mpf & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT ? 0x0 | 150 alloc->fFlags = mpf & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT ? 0x0 |
| 172 : GrVkAlloc::
kNoncoherent_Flag; | 151 : GrVkAlloc::
kNoncoherent_Flag; |
| 173 } else { | 152 } else { |
| 174 // this memory type should always be available | 153 // this memory type should always be available |
| 175 SkASSERT_RELEASE(get_valid_memory_type_index(phDevMemProps, | 154 SkASSERT_RELEASE(get_valid_memory_type_index(phDevMemProps, |
| 176 memReqs.memoryTypeBits, | 155 memReqs.memoryTypeBits, |
| 177 VK_MEMORY_PROPERTY_DEVICE_L
OCAL_BIT, | 156 VK_MEMORY_PROPERTY_DEVICE_L
OCAL_BIT, |
| 178 &typeIndex, | 157 &typeIndex)); |
| 179 &heapIndex)); | |
| 180 if (memReqs.size <= kMaxSmallImageSize) { | 158 if (memReqs.size <= kMaxSmallImageSize) { |
| 181 heap = gpu->getHeap(GrVkGpu::kSmallOptimalImage_Heap); | 159 heap = gpu->getHeap(GrVkGpu::kSmallOptimalImage_Heap); |
| 182 } else { | 160 } else { |
| 183 heap = gpu->getHeap(GrVkGpu::kOptimalImage_Heap); | 161 heap = gpu->getHeap(GrVkGpu::kOptimalImage_Heap); |
| 184 } | 162 } |
| 185 alloc->fFlags = 0x0; | 163 alloc->fFlags = 0x0; |
| 186 } | 164 } |
| 187 | 165 |
| 188 if (!heap->alloc(memReqs.size, memReqs.alignment, typeIndex, heapIndex, allo
c)) { | 166 if (!heap->alloc(memReqs.size, memReqs.alignment, typeIndex, alloc)) { |
| 189 // if optimal, try to allocate from non-host-visible non-device-local me
mory instead | 167 SkDebugf("Failed to alloc image\n"); |
| 190 if (linearTiling || | 168 return false; |
| 191 !get_valid_memory_type_index(phDevMemProps, memReqs.memoryTypeBits, | |
| 192 0, &typeIndex, &heapIndex) || | |
| 193 !heap->alloc(memReqs.size, memReqs.alignment, typeIndex, heapIndex,
alloc)) { | |
| 194 SkDebugf("Failed to alloc image\n"); | |
| 195 return false; | |
| 196 } | |
| 197 } | 169 } |
| 198 | 170 |
| 199 // Bind image | 171 // Bind image |
| 200 VkResult err = GR_VK_CALL(iface, BindImageMemory(device, image, | 172 VkResult err = GR_VK_CALL(iface, BindImageMemory(device, image, |
| 201 alloc->fMemory, alloc->fOffset)); | 173 alloc->fMemory, alloc->fOffset)); |
| 202 if (err) { | 174 if (err) { |
| 203 SkASSERT_RELEASE(heap->free(*alloc)); | 175 SkASSERT_RELEASE(heap->free(*alloc)); |
| 204 return false; | 176 return false; |
| 205 } | 177 } |
| 206 | 178 |
| (...skipping 245 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 452 Block* block = iter.get(); | 424 Block* block = iter.get(); |
| 453 if (largestSize < block->fSize) { | 425 if (largestSize < block->fSize) { |
| 454 largestSize = block->fSize; | 426 largestSize = block->fSize; |
| 455 } | 427 } |
| 456 iter.next(); | 428 iter.next(); |
| 457 } | 429 } |
| 458 SkASSERT(fLargestBlockSize == largestSize); | 430 SkASSERT(fLargestBlockSize == largestSize); |
| 459 #endif | 431 #endif |
| 460 } | 432 } |
| 461 | 433 |
| 462 GrVkSubHeap::GrVkSubHeap(const GrVkGpu* gpu, uint32_t memoryTypeIndex, uint32_t
heapIndex, | 434 GrVkSubHeap::GrVkSubHeap(const GrVkGpu* gpu, uint32_t memoryTypeIndex, |
| 463 VkDeviceSize size, VkDeviceSize alignment) | 435 VkDeviceSize size, VkDeviceSize alignment) |
| 464 : INHERITED(size, alignment) | 436 : INHERITED(size, alignment) |
| 465 , fGpu(gpu) | 437 , fGpu(gpu) |
| 466 , fMemoryTypeIndex(memoryTypeIndex) | 438 , fMemoryTypeIndex(memoryTypeIndex) { |
| 467 , fHeapIndex(heapIndex) { | |
| 468 | 439 |
| 469 VkMemoryAllocateInfo allocInfo = { | 440 VkMemoryAllocateInfo allocInfo = { |
| 470 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType | 441 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType |
| 471 NULL, // pNext | 442 NULL, // pNext |
| 472 size, // allocationSize | 443 size, // allocationSize |
| 473 memoryTypeIndex, // memoryTypeIndex | 444 memoryTypeIndex, // memoryTypeIndex |
| 474 }; | 445 }; |
| 475 | 446 |
| 476 VkResult err = GR_VK_CALL(gpu->vkInterface(), AllocateMemory(gpu->device(), | 447 VkResult err = GR_VK_CALL(gpu->vkInterface(), AllocateMemory(gpu->device(), |
| 477 &allocInfo, | 448 &allocInfo, |
| 478 nullptr, | 449 nullptr, |
| 479 &fAlloc)); | 450 &fAlloc)); |
| 480 if (VK_SUCCESS != err) { | 451 if (VK_SUCCESS != err) { |
| 481 this->reset(); | 452 this->reset(); |
| 482 } | |
| 483 #ifdef SK_DEBUG | |
| 484 else { | |
| 485 gHeapUsage[heapIndex] += size; | |
| 486 } | 453 } |
| 487 #endif | |
| 488 } | 454 } |
| 489 | 455 |
| 490 GrVkSubHeap::~GrVkSubHeap() { | 456 GrVkSubHeap::~GrVkSubHeap() { |
| 491 const GrVkInterface* iface = fGpu->vkInterface(); | 457 const GrVkInterface* iface = fGpu->vkInterface(); |
| 492 GR_VK_CALL(iface, FreeMemory(fGpu->device(), fAlloc, nullptr)); | 458 GR_VK_CALL(iface, FreeMemory(fGpu->device(), fAlloc, nullptr)); |
| 493 #ifdef SK_DEBUG | |
| 494 gHeapUsage[fHeapIndex] -= fSize; | |
| 495 #endif | |
| 496 } | 459 } |
| 497 | 460 |
| 498 bool GrVkSubHeap::alloc(VkDeviceSize size, GrVkAlloc* alloc) { | 461 bool GrVkSubHeap::alloc(VkDeviceSize size, GrVkAlloc* alloc) { |
| 499 alloc->fMemory = fAlloc; | 462 alloc->fMemory = fAlloc; |
| 500 return INHERITED::alloc(size, &alloc->fOffset, &alloc->fSize); | 463 return INHERITED::alloc(size, &alloc->fOffset, &alloc->fSize); |
| 501 } | 464 } |
| 502 | 465 |
| 503 void GrVkSubHeap::free(const GrVkAlloc& alloc) { | 466 void GrVkSubHeap::free(const GrVkAlloc& alloc) { |
| 504 SkASSERT(alloc.fMemory == fAlloc); | 467 SkASSERT(alloc.fMemory == fAlloc); |
| 505 | 468 |
| 506 INHERITED::free(alloc.fOffset, alloc.fSize); | 469 INHERITED::free(alloc.fOffset, alloc.fSize); |
| 507 } | 470 } |
| 508 | 471 |
| 509 bool GrVkHeap::subAlloc(VkDeviceSize size, VkDeviceSize alignment, | 472 bool GrVkHeap::subAlloc(VkDeviceSize size, VkDeviceSize alignment, |
| 510 uint32_t memoryTypeIndex, uint32_t heapIndex, GrVkAlloc*
alloc) { | 473 uint32_t memoryTypeIndex, GrVkAlloc* alloc) { |
| 511 VkDeviceSize alignedSize = align_size(size, alignment); | 474 VkDeviceSize alignedSize = align_size(size, alignment); |
| 512 | 475 |
| 513 // if requested is larger than our subheap allocation, just alloc directly | 476 // if requested is larger than our subheap allocation, just alloc directly |
| 514 if (alignedSize > fSubHeapSize) { | 477 if (alignedSize > fSubHeapSize) { |
| 515 VkMemoryAllocateInfo allocInfo = { | 478 VkMemoryAllocateInfo allocInfo = { |
| 516 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType | 479 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType |
| 517 NULL, // pNext | 480 NULL, // pNext |
| 518 size, // allocationSize | 481 size, // allocationSize |
| 519 memoryTypeIndex, // memoryTypeIndex | 482 memoryTypeIndex, // memoryTypeIndex |
| 520 }; | 483 }; |
| 521 | 484 |
| 522 VkResult err = GR_VK_CALL(fGpu->vkInterface(), AllocateMemory(fGpu->devi
ce(), | 485 VkResult err = GR_VK_CALL(fGpu->vkInterface(), AllocateMemory(fGpu->devi
ce(), |
| 523 &allocInfo
, | 486 &allocInfo
, |
| 524 nullptr, | 487 nullptr, |
| 525 &alloc->fM
emory)); | 488 &alloc->fM
emory)); |
| 526 if (VK_SUCCESS != err) { | 489 if (VK_SUCCESS != err) { |
| 527 return false; | 490 return false; |
| 528 } | 491 } |
| 529 alloc->fOffset = 0; | 492 alloc->fOffset = 0; |
| 530 alloc->fSize = 0; // hint that this is not a subheap allocation | 493 alloc->fSize = 0; // hint that this is not a subheap allocation |
| 531 #ifdef SK_DEBUG | |
| 532 gHeapUsage[VK_MAX_MEMORY_HEAPS] += alignedSize; | |
| 533 #endif | |
| 534 | 494 |
| 535 return true; | 495 return true; |
| 536 } | 496 } |
| 537 | 497 |
| 538 // first try to find a subheap that fits our allocation request | 498 // first try to find a subheap that fits our allocation request |
| 539 int bestFitIndex = -1; | 499 int bestFitIndex = -1; |
| 540 VkDeviceSize bestFitSize = 0x7FFFFFFF; | 500 VkDeviceSize bestFitSize = 0x7FFFFFFF; |
| 541 for (auto i = 0; i < fSubHeaps.count(); ++i) { | 501 for (auto i = 0; i < fSubHeaps.count(); ++i) { |
| 542 if (fSubHeaps[i]->memoryTypeIndex() == memoryTypeIndex && | 502 if (fSubHeaps[i]->memoryTypeIndex() == memoryTypeIndex && |
| 543 fSubHeaps[i]->alignment() == alignment) { | 503 fSubHeaps[i]->alignment() == alignment) { |
| 544 VkDeviceSize heapSize = fSubHeaps[i]->largestBlockSize(); | 504 VkDeviceSize heapSize = fSubHeaps[i]->largestBlockSize(); |
| 545 if (heapSize >= alignedSize && heapSize < bestFitSize) { | 505 if (heapSize >= alignedSize && heapSize < bestFitSize) { |
| 546 bestFitIndex = i; | 506 bestFitIndex = i; |
| 547 bestFitSize = heapSize; | 507 bestFitSize = heapSize; |
| 548 } | 508 } |
| 549 } | 509 } |
| 550 } | 510 } |
| 551 | 511 |
| 552 if (bestFitIndex >= 0) { | 512 if (bestFitIndex >= 0) { |
| 553 SkASSERT(fSubHeaps[bestFitIndex]->alignment() == alignment); | 513 SkASSERT(fSubHeaps[bestFitIndex]->alignment() == alignment); |
| 554 if (fSubHeaps[bestFitIndex]->alloc(size, alloc)) { | 514 if (fSubHeaps[bestFitIndex]->alloc(size, alloc)) { |
| 555 fUsedSize += alloc->fSize; | 515 fUsedSize += alloc->fSize; |
| 556 return true; | 516 return true; |
| 557 } | 517 } |
| 558 return false; | 518 return false; |
| 559 } | 519 } |
| 560 | 520 |
| 561 // need to allocate a new subheap | 521 // need to allocate a new subheap |
| 562 SkAutoTDelete<GrVkSubHeap>& subHeap = fSubHeaps.push_back(); | 522 SkAutoTDelete<GrVkSubHeap>& subHeap = fSubHeaps.push_back(); |
| 563 subHeap.reset(new GrVkSubHeap(fGpu, memoryTypeIndex, heapIndex, fSubHeapSize
, alignment)); | 523 subHeap.reset(new GrVkSubHeap(fGpu, memoryTypeIndex, fSubHeapSize, alignment
)); |
| 564 // try to recover from failed allocation by only allocating what we need | 524 // try to recover from failed allocation by only allocating what we need |
| 565 if (subHeap->size() == 0) { | 525 if (subHeap->size() == 0) { |
| 566 VkDeviceSize alignedSize = align_size(size, alignment); | 526 VkDeviceSize alignedSize = align_size(size, alignment); |
| 567 subHeap.reset(new GrVkSubHeap(fGpu, memoryTypeIndex, heapIndex, alignedS
ize, alignment)); | 527 subHeap.reset(new GrVkSubHeap(fGpu, memoryTypeIndex, alignedSize, alignm
ent)); |
| 568 if (subHeap->size() == 0) { | 528 if (subHeap->size() == 0) { |
| 569 return false; | 529 return false; |
| 570 } | 530 } |
| 571 } | 531 } |
| 572 fAllocSize += fSubHeapSize; | 532 fAllocSize += fSubHeapSize; |
| 573 if (subHeap->alloc(size, alloc)) { | 533 if (subHeap->alloc(size, alloc)) { |
| 574 fUsedSize += alloc->fSize; | 534 fUsedSize += alloc->fSize; |
| 575 return true; | 535 return true; |
| 576 } | 536 } |
| 577 | 537 |
| 578 return false; | 538 return false; |
| 579 } | 539 } |
| 580 | 540 |
| 581 bool GrVkHeap::singleAlloc(VkDeviceSize size, VkDeviceSize alignment, | 541 bool GrVkHeap::singleAlloc(VkDeviceSize size, VkDeviceSize alignment, |
| 582 uint32_t memoryTypeIndex, uint32_t heapIndex, GrVkAll
oc* alloc) { | 542 uint32_t memoryTypeIndex, GrVkAlloc* alloc) { |
| 583 VkDeviceSize alignedSize = align_size(size, alignment); | 543 VkDeviceSize alignedSize = align_size(size, alignment); |
| 584 | 544 |
| 585 // first try to find an unallocated subheap that fits our allocation request | 545 // first try to find an unallocated subheap that fits our allocation request |
| 586 int bestFitIndex = -1; | 546 int bestFitIndex = -1; |
| 587 VkDeviceSize bestFitSize = 0x7FFFFFFF; | 547 VkDeviceSize bestFitSize = 0x7FFFFFFF; |
| 588 for (auto i = 0; i < fSubHeaps.count(); ++i) { | 548 for (auto i = 0; i < fSubHeaps.count(); ++i) { |
| 589 if (fSubHeaps[i]->memoryTypeIndex() == memoryTypeIndex && | 549 if (fSubHeaps[i]->memoryTypeIndex() == memoryTypeIndex && |
| 590 fSubHeaps[i]->alignment() == alignment && | 550 fSubHeaps[i]->alignment() == alignment && |
| 591 fSubHeaps[i]->unallocated()) { | 551 fSubHeaps[i]->unallocated()) { |
| 592 VkDeviceSize heapSize = fSubHeaps[i]->size(); | 552 VkDeviceSize heapSize = fSubHeaps[i]->size(); |
| 593 if (heapSize >= alignedSize && heapSize < bestFitSize) { | 553 if (heapSize >= alignedSize && heapSize < bestFitSize) { |
| 594 bestFitIndex = i; | 554 bestFitIndex = i; |
| 595 bestFitSize = heapSize; | 555 bestFitSize = heapSize; |
| 596 } | 556 } |
| 597 } | 557 } |
| 598 } | 558 } |
| 599 | 559 |
| 600 if (bestFitIndex >= 0) { | 560 if (bestFitIndex >= 0) { |
| 601 SkASSERT(fSubHeaps[bestFitIndex]->alignment() == alignment); | 561 SkASSERT(fSubHeaps[bestFitIndex]->alignment() == alignment); |
| 602 if (fSubHeaps[bestFitIndex]->alloc(size, alloc)) { | 562 if (fSubHeaps[bestFitIndex]->alloc(size, alloc)) { |
| 603 fUsedSize += alloc->fSize; | 563 fUsedSize += alloc->fSize; |
| 604 return true; | 564 return true; |
| 605 } | 565 } |
| 606 return false; | 566 return false; |
| 607 } | 567 } |
| 608 | 568 |
| 609 // need to allocate a new subheap | 569 // need to allocate a new subheap |
| 610 SkAutoTDelete<GrVkSubHeap>& subHeap = fSubHeaps.push_back(); | 570 SkAutoTDelete<GrVkSubHeap>& subHeap = fSubHeaps.push_back(); |
| 611 subHeap.reset(new GrVkSubHeap(fGpu, memoryTypeIndex, heapIndex, alignedSize,
alignment)); | 571 subHeap.reset(new GrVkSubHeap(fGpu, memoryTypeIndex, alignedSize, alignment)
); |
| 612 fAllocSize += alignedSize; | 572 fAllocSize += alignedSize; |
| 613 if (subHeap->alloc(size, alloc)) { | 573 if (subHeap->alloc(size, alloc)) { |
| 614 fUsedSize += alloc->fSize; | 574 fUsedSize += alloc->fSize; |
| 615 return true; | 575 return true; |
| 616 } | 576 } |
| 617 | 577 |
| 618 return false; | 578 return false; |
| 619 } | 579 } |
| 620 | 580 |
| 621 bool GrVkHeap::free(const GrVkAlloc& alloc) { | 581 bool GrVkHeap::free(const GrVkAlloc& alloc) { |
| 622 // a size of 0 means we're using the system heap | 582 // a size of 0 means we're using the system heap |
| 623 if (0 == alloc.fSize) { | 583 if (0 == alloc.fSize) { |
| 624 const GrVkInterface* iface = fGpu->vkInterface(); | 584 const GrVkInterface* iface = fGpu->vkInterface(); |
| 625 GR_VK_CALL(iface, FreeMemory(fGpu->device(), alloc.fMemory, nullptr)); | 585 GR_VK_CALL(iface, FreeMemory(fGpu->device(), alloc.fMemory, nullptr)); |
| 626 return true; | 586 return true; |
| 627 } | 587 } |
| 628 | 588 |
| 629 for (auto i = 0; i < fSubHeaps.count(); ++i) { | 589 for (auto i = 0; i < fSubHeaps.count(); ++i) { |
| 630 if (fSubHeaps[i]->memory() == alloc.fMemory) { | 590 if (fSubHeaps[i]->memory() == alloc.fMemory) { |
| 631 fSubHeaps[i]->free(alloc); | 591 fSubHeaps[i]->free(alloc); |
| 632 fUsedSize -= alloc.fSize; | 592 fUsedSize -= alloc.fSize; |
| 633 return true; | 593 return true; |
| 634 } | 594 } |
| 635 } | 595 } |
| 636 | 596 |
| 637 return false; | 597 return false; |
| 638 } | 598 } |
| 639 | 599 |
| 640 | 600 |
| OLD | NEW |