Chromium Code Reviews| Index: src/gpu/vk/GrVkMemory.cpp |
| diff --git a/src/gpu/vk/GrVkMemory.cpp b/src/gpu/vk/GrVkMemory.cpp |
| index c9663ed1502393c3e185f2b384ce64917be8fa4e..927453dbad63a91ba0eb8e8ab4baf8fc81a181c7 100644 |
| --- a/src/gpu/vk/GrVkMemory.cpp |
| +++ b/src/gpu/vk/GrVkMemory.cpp |
| @@ -61,7 +61,8 @@ static bool alloc_device_memory(const GrVkGpu* gpu, |
| bool GrVkMemory::AllocAndBindBufferMemory(const GrVkGpu* gpu, |
| VkBuffer buffer, |
| const VkMemoryPropertyFlags flags, |
| - VkDeviceMemory* memory) { |
| + VkDeviceMemory* memory, |
| + VkDeviceSize* offset) { |
| const GrVkInterface* iface = gpu->vkInterface(); |
| VkDevice device = gpu->device(); |
| @@ -71,9 +72,11 @@ bool GrVkMemory::AllocAndBindBufferMemory(const GrVkGpu* gpu, |
| if (!alloc_device_memory(gpu, &memReqs, flags, memory)) { |
| return false; |
| } |
| + // for now, offset is always 0 |
| + *offset = 0; |
| - // Bind Memory to queue |
| - VkResult err = GR_VK_CALL(iface, BindBufferMemory(device, buffer, *memory, 0)); |
| + // Bind Memory to device |
| + VkResult err = GR_VK_CALL(iface, BindBufferMemory(device, buffer, *memory, *offset)); |
| if (err) { |
| GR_VK_CALL(iface, FreeMemory(device, *memory, nullptr)); |
| return false; |
| @@ -81,10 +84,17 @@ bool GrVkMemory::AllocAndBindBufferMemory(const GrVkGpu* gpu, |
| return true; |
| } |
| +void GrVkMemory::FreeBufferMemory(const GrVkGpu* gpu, VkDeviceMemory memory, |
|
egdaniel
2016/05/31 13:22:14
Is the plan to treat buffer and image memory/freei
jvanverth1
2016/05/31 22:05:57
At the very least they'll have different heaps. Bu
|
| + VkDeviceSize offset) { |
| + const GrVkInterface* iface = gpu->vkInterface(); |
| + GR_VK_CALL(iface, FreeMemory(gpu->device(), memory, nullptr)); |
| +} |
| + |
| bool GrVkMemory::AllocAndBindImageMemory(const GrVkGpu* gpu, |
| VkImage image, |
| const VkMemoryPropertyFlags flags, |
| - VkDeviceMemory* memory) { |
| + VkDeviceMemory* memory, |
| + VkDeviceSize* offset) { |
| const GrVkInterface* iface = gpu->vkInterface(); |
| VkDevice device = gpu->device(); |
| @@ -94,9 +104,11 @@ bool GrVkMemory::AllocAndBindImageMemory(const GrVkGpu* gpu, |
| if (!alloc_device_memory(gpu, &memReqs, flags, memory)) { |
| return false; |
| } |
| + // for now, offset is always 0 |
| + *offset = 0; |
| - // Bind Memory to queue |
| - VkResult err = GR_VK_CALL(iface, BindImageMemory(device, image, *memory, 0)); |
| + // Bind Memory to device |
| + VkResult err = GR_VK_CALL(iface, BindImageMemory(device, image, *memory, *offset)); |
| if (err) { |
| GR_VK_CALL(iface, FreeMemory(device, *memory, nullptr)); |
| return false; |
| @@ -104,6 +116,12 @@ bool GrVkMemory::AllocAndBindImageMemory(const GrVkGpu* gpu, |
| return true; |
| } |
| +void GrVkMemory::FreeImageMemory(const GrVkGpu* gpu, VkDeviceMemory memory, |
| + VkDeviceSize offset) { |
| + const GrVkInterface* iface = gpu->vkInterface(); |
| + GR_VK_CALL(iface, FreeMemory(gpu->device(), memory, nullptr)); |
| +} |
| + |
| VkPipelineStageFlags GrVkMemory::LayoutToPipelineStageFlags(const VkImageLayout layout) { |
| if (VK_IMAGE_LAYOUT_GENERAL == layout) { |
| return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT; |