| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #include "GrVkGpu.h" | 8 #include "GrVkGpu.h" |
| 9 #include "GrVkImage.h" | 9 #include "GrVkImage.h" |
| 10 #include "GrVkMemory.h" | 10 #include "GrVkMemory.h" |
| 11 #include "GrVkUtil.h" | 11 #include "GrVkUtil.h" |
| 12 | 12 |
| 13 #define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X) | 13 #define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X) |
| 14 | 14 |
| 15 VkImageAspectFlags vk_format_to_aspect_flags(VkFormat format) { | 15 VkImageAspectFlags vk_format_to_aspect_flags(VkFormat format) { |
| 16 switch (format) { | 16 switch (format) { |
| 17 case VK_FORMAT_S8_UINT: | 17 case VK_FORMAT_S8_UINT: |
| 18 return VK_IMAGE_ASPECT_STENCIL_BIT; | 18 return VK_IMAGE_ASPECT_STENCIL_BIT; |
| 19 case VK_FORMAT_D24_UNORM_S8_UINT: // fallthrough | 19 case VK_FORMAT_D24_UNORM_S8_UINT: // fallthrough |
| 20 case VK_FORMAT_D32_SFLOAT_S8_UINT: | 20 case VK_FORMAT_D32_SFLOAT_S8_UINT: |
| 21 return VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; | 21 return VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; |
| 22 default: | 22 default: |
| 23 SkASSERT(GrVkFormatToPixelConfig(format, nullptr)); | 23 SkASSERT(GrVkFormatToPixelConfig(format, nullptr)); |
| 24 return VK_IMAGE_ASPECT_COLOR_BIT; | 24 return VK_IMAGE_ASPECT_COLOR_BIT; |
| 25 } | 25 } |
| 26 } | 26 } |
| 27 | 27 |
| 28 void GrVkImage::setImageLayout(const GrVkGpu* gpu, VkImageLayout newLayout, | 28 void GrVkImage::setImageLayout(const GrVkGpu* gpu, VkImageLayout newLayout, |
| 29 VkAccessFlags srcAccessMask, | |
| 30 VkAccessFlags dstAccessMask, | 29 VkAccessFlags dstAccessMask, |
| 31 VkPipelineStageFlags srcStageMask, | |
| 32 VkPipelineStageFlags dstStageMask, | 30 VkPipelineStageFlags dstStageMask, |
| 33 uint32_t baseMipLevel, | |
| 34 uint32_t levelCount, | |
| 35 bool byRegion) { | 31 bool byRegion) { |
| 36 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED != newLayout && | 32 SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED != newLayout && |
| 37 VK_IMAGE_LAYOUT_PREINITIALIZED != newLayout); | 33 VK_IMAGE_LAYOUT_PREINITIALIZED != newLayout); |
| 38 // Is this reasonable? Could someone want to keep the same layout but use th
e masks to force | 34 // Is this reasonable? Could someone want to keep the same layout but use th
e masks to force |
| 39 // a barrier on certain things? | 35 // a barrier on certain things? |
| 40 if (newLayout == fCurrentLayout) { | 36 if (newLayout == fCurrentLayout) { |
| 41 return; | 37 return; |
| 42 } | 38 } |
| 39 |
| 40 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(fCurrentLayo
ut); |
| 41 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(f
CurrentLayout); |
| 42 |
| 43 VkImageAspectFlags aspectFlags = vk_format_to_aspect_flags(fResource->fForma
t); | 43 VkImageAspectFlags aspectFlags = vk_format_to_aspect_flags(fResource->fForma
t); |
| 44 VkImageMemoryBarrier imageMemoryBarrier = { | 44 VkImageMemoryBarrier imageMemoryBarrier = { |
| 45 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType | 45 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType |
| 46 NULL, // pNext | 46 NULL, // pNext |
| 47 srcAccessMask, // outputMask | 47 srcAccessMask, // outputMask |
| 48 dstAccessMask, // inputMask | 48 dstAccessMask, // inputMask |
| 49 fCurrentLayout, // oldLayout | 49 fCurrentLayout, // oldLayout |
| 50 newLayout, // newLayout | 50 newLayout, // newLayout |
| 51 VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex | 51 VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex |
| 52 VK_QUEUE_FAMILY_IGNORED, // dstQueueFamilyIndex | 52 VK_QUEUE_FAMILY_IGNORED, // dstQueueFamilyIndex |
| 53 fResource->fImage, // image | 53 fResource->fImage, // image |
| 54 { aspectFlags, baseMipLevel, levelCount, 0, 1 } // subresourceRange | 54 { aspectFlags, 0, fResource->fLevelCount, 0, 1 } // subresourceRange |
| 55 }; | 55 }; |
| 56 | 56 |
| 57 // TODO: restrict to area of image we're interested in | |
| 58 gpu->addImageMemoryBarrier(srcStageMask, dstStageMask, byRegion, &imageMemor
yBarrier); | 57 gpu->addImageMemoryBarrier(srcStageMask, dstStageMask, byRegion, &imageMemor
yBarrier); |
| 59 | 58 |
| 60 fCurrentLayout = newLayout; | 59 fCurrentLayout = newLayout; |
| 61 } | 60 } |
| 62 | 61 |
| 63 const GrVkImage::Resource* GrVkImage::CreateResource(const GrVkGpu* gpu, | 62 const GrVkImage::Resource* GrVkImage::CreateResource(const GrVkGpu* gpu, |
| 64 const ImageDesc& imageDesc)
{ | 63 const ImageDesc& imageDesc)
{ |
| 65 VkImage image = 0; | 64 VkImage image = 0; |
| 66 VkDeviceMemory alloc; | 65 VkDeviceMemory alloc; |
| 67 | 66 |
| (...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 129 } | 128 } |
| 130 } | 129 } |
| 131 | 130 |
| 132 void GrVkImage::Resource::freeGPUData(const GrVkGpu* gpu) const { | 131 void GrVkImage::Resource::freeGPUData(const GrVkGpu* gpu) const { |
| 133 VK_CALL(gpu, DestroyImage(gpu->device(), fImage, nullptr)); | 132 VK_CALL(gpu, DestroyImage(gpu->device(), fImage, nullptr)); |
| 134 VK_CALL(gpu, FreeMemory(gpu->device(), fAlloc, nullptr)); | 133 VK_CALL(gpu, FreeMemory(gpu->device(), fAlloc, nullptr)); |
| 135 } | 134 } |
| 136 | 135 |
| 137 void GrVkImage::BorrowedResource::freeGPUData(const GrVkGpu* gpu) const { | 136 void GrVkImage::BorrowedResource::freeGPUData(const GrVkGpu* gpu) const { |
| 138 } | 137 } |
| OLD | NEW |