| Index: src/gpu/vk/GrVkMemory.cpp
|
| diff --git a/src/gpu/vk/GrVkMemory.cpp b/src/gpu/vk/GrVkMemory.cpp
|
| index 4f96fcdba034b54c4efcbf881a663353df480a70..98b2f89e243a0964228fd54f3fba9e0f51b6b0cc 100644
|
| --- a/src/gpu/vk/GrVkMemory.cpp
|
| +++ b/src/gpu/vk/GrVkMemory.cpp
|
| @@ -10,24 +10,16 @@
|
| #include "GrVkGpu.h"
|
| #include "GrVkUtil.h"
|
|
|
| -#ifdef SK_DEBUG
|
| -// for simple tracking of how much we're using in each heap
|
| -// last counter is for non-subheap allocations
|
| -VkDeviceSize gHeapUsage[VK_MAX_MEMORY_HEAPS+1] = { 0 };
|
| -#endif
|
| -
|
| static bool get_valid_memory_type_index(const VkPhysicalDeviceMemoryProperties& physDevMemProps,
|
| uint32_t typeBits,
|
| VkMemoryPropertyFlags requestedMemFlags,
|
| - uint32_t* typeIndex,
|
| - uint32_t* heapIndex) {
|
| + uint32_t* typeIndex) {
|
| for (uint32_t i = 0; i < physDevMemProps.memoryTypeCount; ++i) {
|
| if (typeBits & (1 << i)) {
|
| uint32_t supportedFlags = physDevMemProps.memoryTypes[i].propertyFlags &
|
| requestedMemFlags;
|
| if (supportedFlags == requestedMemFlags) {
|
| *typeIndex = i;
|
| - *heapIndex = physDevMemProps.memoryTypes[i].heapIndex;
|
| return true;
|
| }
|
| }
|
| @@ -64,7 +56,6 @@
|
| GR_VK_CALL(iface, GetBufferMemoryRequirements(device, buffer, &memReqs));
|
|
|
| uint32_t typeIndex = 0;
|
| - uint32_t heapIndex = 0;
|
| const VkPhysicalDeviceMemoryProperties& phDevMemProps = gpu->physicalDeviceMemoryProperties();
|
| if (dynamic) {
|
| // try to get cached and ideally non-coherent memory first
|
| @@ -72,14 +63,12 @@
|
| memReqs.memoryTypeBits,
|
| VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
|
| VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
|
| - &typeIndex,
|
| - &heapIndex)) {
|
| + &typeIndex)) {
|
| // some sort of host-visible memory type should always be available for dynamic buffers
|
| SkASSERT_RELEASE(get_valid_memory_type_index(phDevMemProps,
|
| memReqs.memoryTypeBits,
|
| VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
|
| - &typeIndex,
|
| - &heapIndex));
|
| + &typeIndex));
|
| }
|
|
|
| VkMemoryPropertyFlags mpf = phDevMemProps.memoryTypes[typeIndex].propertyFlags;
|
| @@ -90,22 +79,15 @@
|
| SkASSERT_RELEASE(get_valid_memory_type_index(phDevMemProps,
|
| memReqs.memoryTypeBits,
|
| VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
|
| - &typeIndex,
|
| - &heapIndex));
|
| + &typeIndex));
|
| alloc->fFlags = 0x0;
|
| }
|
|
|
| GrVkHeap* heap = gpu->getHeap(buffer_type_to_heap(type));
|
|
|
| - if (!heap->alloc(memReqs.size, memReqs.alignment, typeIndex, heapIndex, alloc)) {
|
| - // if static, try to allocate from non-host-visible non-device-local memory instead
|
| - if (dynamic ||
|
| - !get_valid_memory_type_index(phDevMemProps, memReqs.memoryTypeBits,
|
| - 0, &typeIndex, &heapIndex) ||
|
| - !heap->alloc(memReqs.size, memReqs.alignment, typeIndex, heapIndex, alloc)) {
|
| - SkDebugf("Failed to alloc buffer\n");
|
| - return false;
|
| - }
|
| + if (!heap->alloc(memReqs.size, memReqs.alignment, typeIndex, alloc)) {
|
| + SkDebugf("Failed to alloc buffer\n");
|
| + return false;
|
| }
|
|
|
| // Bind buffer
|
| @@ -148,7 +130,6 @@
|
| GR_VK_CALL(iface, GetImageMemoryRequirements(device, image, &memReqs));
|
|
|
| uint32_t typeIndex = 0;
|
| - uint32_t heapIndex = 0;
|
| GrVkHeap* heap;
|
| const VkPhysicalDeviceMemoryProperties& phDevMemProps = gpu->physicalDeviceMemoryProperties();
|
| if (linearTiling) {
|
| @@ -157,14 +138,12 @@
|
| if (!get_valid_memory_type_index(phDevMemProps,
|
| memReqs.memoryTypeBits,
|
| desiredMemProps,
|
| - &typeIndex,
|
| - &heapIndex)) {
|
| + &typeIndex)) {
|
| // some sort of host-visible memory type should always be available
|
| SkASSERT_RELEASE(get_valid_memory_type_index(phDevMemProps,
|
| memReqs.memoryTypeBits,
|
| VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
|
| - &typeIndex,
|
| - &heapIndex));
|
| + &typeIndex));
|
| }
|
| heap = gpu->getHeap(GrVkGpu::kLinearImage_Heap);
|
| VkMemoryPropertyFlags mpf = phDevMemProps.memoryTypes[typeIndex].propertyFlags;
|
| @@ -175,8 +154,7 @@
|
| SkASSERT_RELEASE(get_valid_memory_type_index(phDevMemProps,
|
| memReqs.memoryTypeBits,
|
| VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
|
| - &typeIndex,
|
| - &heapIndex));
|
| + &typeIndex));
|
| if (memReqs.size <= kMaxSmallImageSize) {
|
| heap = gpu->getHeap(GrVkGpu::kSmallOptimalImage_Heap);
|
| } else {
|
| @@ -185,15 +163,9 @@
|
| alloc->fFlags = 0x0;
|
| }
|
|
|
| - if (!heap->alloc(memReqs.size, memReqs.alignment, typeIndex, heapIndex, alloc)) {
|
| - // if optimal, try to allocate from non-host-visible non-device-local memory instead
|
| - if (linearTiling ||
|
| - !get_valid_memory_type_index(phDevMemProps, memReqs.memoryTypeBits,
|
| - 0, &typeIndex, &heapIndex) ||
|
| - !heap->alloc(memReqs.size, memReqs.alignment, typeIndex, heapIndex, alloc)) {
|
| - SkDebugf("Failed to alloc image\n");
|
| - return false;
|
| - }
|
| + if (!heap->alloc(memReqs.size, memReqs.alignment, typeIndex, alloc)) {
|
| + SkDebugf("Failed to alloc image\n");
|
| + return false;
|
| }
|
|
|
| // Bind image
|
| @@ -459,12 +431,11 @@
|
| #endif
|
| }
|
|
|
| -GrVkSubHeap::GrVkSubHeap(const GrVkGpu* gpu, uint32_t memoryTypeIndex, uint32_t heapIndex,
|
| +GrVkSubHeap::GrVkSubHeap(const GrVkGpu* gpu, uint32_t memoryTypeIndex,
|
| VkDeviceSize size, VkDeviceSize alignment)
|
| : INHERITED(size, alignment)
|
| , fGpu(gpu)
|
| - , fMemoryTypeIndex(memoryTypeIndex)
|
| - , fHeapIndex(heapIndex) {
|
| + , fMemoryTypeIndex(memoryTypeIndex) {
|
|
|
| VkMemoryAllocateInfo allocInfo = {
|
| VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType
|
| @@ -479,20 +450,12 @@
|
| &fAlloc));
|
| if (VK_SUCCESS != err) {
|
| this->reset();
|
| - }
|
| -#ifdef SK_DEBUG
|
| - else {
|
| - gHeapUsage[heapIndex] += size;
|
| - }
|
| -#endif
|
| + }
|
| }
|
|
|
| GrVkSubHeap::~GrVkSubHeap() {
|
| const GrVkInterface* iface = fGpu->vkInterface();
|
| GR_VK_CALL(iface, FreeMemory(fGpu->device(), fAlloc, nullptr));
|
| -#ifdef SK_DEBUG
|
| - gHeapUsage[fHeapIndex] -= fSize;
|
| -#endif
|
| }
|
|
|
| bool GrVkSubHeap::alloc(VkDeviceSize size, GrVkAlloc* alloc) {
|
| @@ -507,7 +470,7 @@
|
| }
|
|
|
| bool GrVkHeap::subAlloc(VkDeviceSize size, VkDeviceSize alignment,
|
| - uint32_t memoryTypeIndex, uint32_t heapIndex, GrVkAlloc* alloc) {
|
| + uint32_t memoryTypeIndex, GrVkAlloc* alloc) {
|
| VkDeviceSize alignedSize = align_size(size, alignment);
|
|
|
| // if requested is larger than our subheap allocation, just alloc directly
|
| @@ -528,9 +491,6 @@
|
| }
|
| alloc->fOffset = 0;
|
| alloc->fSize = 0; // hint that this is not a subheap allocation
|
| -#ifdef SK_DEBUG
|
| - gHeapUsage[VK_MAX_MEMORY_HEAPS] += alignedSize;
|
| -#endif
|
|
|
| return true;
|
| }
|
| @@ -560,11 +520,11 @@
|
|
|
| // need to allocate a new subheap
|
| SkAutoTDelete<GrVkSubHeap>& subHeap = fSubHeaps.push_back();
|
| - subHeap.reset(new GrVkSubHeap(fGpu, memoryTypeIndex, heapIndex, fSubHeapSize, alignment));
|
| + subHeap.reset(new GrVkSubHeap(fGpu, memoryTypeIndex, fSubHeapSize, alignment));
|
| // try to recover from failed allocation by only allocating what we need
|
| if (subHeap->size() == 0) {
|
| VkDeviceSize alignedSize = align_size(size, alignment);
|
| - subHeap.reset(new GrVkSubHeap(fGpu, memoryTypeIndex, heapIndex, alignedSize, alignment));
|
| + subHeap.reset(new GrVkSubHeap(fGpu, memoryTypeIndex, alignedSize, alignment));
|
| if (subHeap->size() == 0) {
|
| return false;
|
| }
|
| @@ -579,7 +539,7 @@
|
| }
|
|
|
| bool GrVkHeap::singleAlloc(VkDeviceSize size, VkDeviceSize alignment,
|
| - uint32_t memoryTypeIndex, uint32_t heapIndex, GrVkAlloc* alloc) {
|
| + uint32_t memoryTypeIndex, GrVkAlloc* alloc) {
|
| VkDeviceSize alignedSize = align_size(size, alignment);
|
|
|
| // first try to find an unallocated subheap that fits our allocation request
|
| @@ -608,7 +568,7 @@
|
|
|
| // need to allocate a new subheap
|
| SkAutoTDelete<GrVkSubHeap>& subHeap = fSubHeaps.push_back();
|
| - subHeap.reset(new GrVkSubHeap(fGpu, memoryTypeIndex, heapIndex, alignedSize, alignment));
|
| + subHeap.reset(new GrVkSubHeap(fGpu, memoryTypeIndex, alignedSize, alignment));
|
| fAllocSize += alignedSize;
|
| if (subHeap->alloc(size, alloc)) {
|
| fUsedSize += alloc->fSize;
|
|
|