OLD | NEW |
(Empty) | |
| 1 /* |
| 2 * Copyright 2015 Google Inc. |
| 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. |
| 6 */ |
| 7 |
| 8 #include "GrVkMemory.h" |
| 9 |
| 10 #include "GrVkGpu.h" |
| 11 #include "GrVkUtil.h" |
| 12 |
| 13 static bool get_valid_memory_type_index(VkPhysicalDeviceMemoryProperties physDev
MemProps, |
| 14 uint32_t typeBits, |
| 15 VkMemoryPropertyFlags requestedMemFlags, |
| 16 uint32_t* typeIndex) { |
| 17 uint32_t checkBit = 1; |
| 18 for (uint32_t i = 0; i < 32; ++i) { |
| 19 if (typeBits & checkBit) { |
| 20 uint32_t supportedFlags = physDevMemProps.memoryTypes[i].propertyFla
gs & |
| 21 requestedMemFlags; |
| 22 if (supportedFlags == requestedMemFlags) { |
| 23 *typeIndex = i; |
| 24 return true; |
| 25 } |
| 26 } |
| 27 checkBit <<= 1; |
| 28 } |
| 29 return false; |
| 30 } |
| 31 |
| 32 static bool alloc_device_memory(const GrVkGpu* gpu, |
| 33 VkMemoryRequirements* memReqs, |
| 34 const VkMemoryPropertyFlags flags, |
| 35 VkDeviceMemory* memory) { |
| 36 uint32_t typeIndex; |
| 37 if (!get_valid_memory_type_index(gpu->physicalDeviceMemoryProperties(), |
| 38 memReqs->memoryTypeBits, |
| 39 flags, |
| 40 &typeIndex)) { |
| 41 return false; |
| 42 } |
| 43 |
| 44 VkMemoryAllocateInfo allocInfo = { |
| 45 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType |
| 46 NULL, // pNext |
| 47 memReqs->size, // allocationSize |
| 48 typeIndex, // memoryTypeIndex |
| 49 }; |
| 50 |
| 51 VkResult err = GR_VK_CALL(gpu->vkInterface(), AllocateMemory(gpu->device(), |
| 52 &allocInfo, |
| 53 nullptr, |
| 54 memory)); |
| 55 if (err) { |
| 56 return false; |
| 57 } |
| 58 return true; |
| 59 } |
| 60 |
| 61 bool GrVkMemory::AllocAndBindBufferMemory(const GrVkGpu* gpu, |
| 62 VkBuffer buffer, |
| 63 const VkMemoryPropertyFlags flags, |
| 64 VkDeviceMemory* memory) { |
| 65 const GrVkInterface* interface = gpu->vkInterface(); |
| 66 VkDevice device = gpu->device(); |
| 67 |
| 68 VkMemoryRequirements memReqs; |
| 69 GR_VK_CALL(interface, GetBufferMemoryRequirements(device, buffer, &memReqs))
; |
| 70 |
| 71 |
| 72 if (!alloc_device_memory(gpu, &memReqs, flags, memory)) { |
| 73 return false; |
| 74 } |
| 75 |
| 76 // Bind Memory to queue |
| 77 VkResult err = GR_VK_CALL(interface, BindBufferMemory(device, buffer, *memor
y, 0)); |
| 78 if (err) { |
| 79 GR_VK_CALL(interface, FreeMemory(device, *memory, nullptr)); |
| 80 return false; |
| 81 } |
| 82 return true; |
| 83 } |
| 84 |
| 85 bool GrVkMemory::AllocAndBindImageMemory(const GrVkGpu* gpu, |
| 86 VkImage image, |
| 87 const VkMemoryPropertyFlags flags, |
| 88 VkDeviceMemory* memory) { |
| 89 const GrVkInterface* interface = gpu->vkInterface(); |
| 90 VkDevice device = gpu->device(); |
| 91 |
| 92 VkMemoryRequirements memReqs; |
| 93 GR_VK_CALL(interface, GetImageMemoryRequirements(device, image, &memReqs)); |
| 94 |
| 95 if (!alloc_device_memory(gpu, &memReqs, flags, memory)) { |
| 96 return false; |
| 97 } |
| 98 |
| 99 // Bind Memory to queue |
| 100 VkResult err = GR_VK_CALL(interface, BindImageMemory(device, image, *memory,
0)); |
| 101 if (err) { |
| 102 GR_VK_CALL(interface, FreeMemory(device, *memory, nullptr)); |
| 103 return false; |
| 104 } |
| 105 return true; |
| 106 } |
| 107 |
| 108 VkPipelineStageFlags GrVkMemory::LayoutToPipelineStageFlags(const VkImageLayout
layout) { |
| 109 if (VK_IMAGE_LAYOUT_GENERAL == layout) { |
| 110 return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT; |
| 111 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout || |
| 112 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) { |
| 113 return VK_PIPELINE_STAGE_TRANSFER_BIT; |
| 114 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout || |
| 115 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout || |
| 116 VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout || |
| 117 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) { |
| 118 return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT; |
| 119 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) { |
| 120 return VK_PIPELINE_STAGE_HOST_BIT; |
| 121 } |
| 122 |
| 123 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout); |
| 124 return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT; |
| 125 } |
| 126 |
| 127 VkAccessFlags GrVkMemory::LayoutToSrcAccessMask(const VkImageLayout layout) { |
| 128 // Currently we assume we will never being doing any explict shader writes (
this doesn't include |
| 129 // color attachment or depth/stencil writes). So we will ignore the |
| 130 // VK_MEMORY_OUTPUT_SHADER_WRITE_BIT. |
| 131 |
| 132 // We can only directly access the host memory if we are in preinitialized o
r general layout, |
| 133 // and the image is linear. |
| 134 // TODO: Add check for linear here so we are not always adding host to gener
al, and we should |
| 135 // only be in preinitialized if we are linear |
| 136 VkAccessFlags flags = 0;; |
| 137 if (VK_IMAGE_LAYOUT_GENERAL == layout) { |
| 138 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | |
| 139 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | |
| 140 VK_ACCESS_TRANSFER_WRITE_BIT | |
| 141 VK_ACCESS_HOST_WRITE_BIT | VK_ACCESS_HOST_READ_BIT; |
| 142 } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) { |
| 143 flags = VK_ACCESS_HOST_WRITE_BIT | VK_ACCESS_HOST_READ_BIT; |
| 144 } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) { |
| 145 flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; |
| 146 } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) { |
| 147 flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT; |
| 148 } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) { |
| 149 flags = VK_ACCESS_TRANSFER_WRITE_BIT; |
| 150 } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout) { |
| 151 flags = VK_ACCESS_TRANSFER_READ_BIT; |
| 152 } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) { |
| 153 flags = VK_ACCESS_SHADER_READ_BIT; |
| 154 } |
| 155 return flags; |
| 156 } |
| 157 |
OLD | NEW |