Index: src/gpu/vk/GrVkMemory.h |
diff --git a/src/gpu/vk/GrVkMemory.h b/src/gpu/vk/GrVkMemory.h |
index 279dd58dd5b4a5489115fec2b47de04de9051eeb..197bbe8719cc28f0e1ecc95742a84e45066d32fc 100644 |
--- a/src/gpu/vk/GrVkMemory.h |
+++ b/src/gpu/vk/GrVkMemory.h |
@@ -8,6 +8,9 @@ |
#ifndef GrVkMemory_DEFINED |
#define GrVkMemory_DEFINED |
+#include "GrVkBuffer.h" |
+#include "SkTArray.h" |
+#include "SkTLList.h" |
#include "vk/GrVkDefines.h" |
#include "vk/GrVkTypes.h" |
@@ -16,23 +19,102 @@ class GrVkGpu; |
namespace GrVkMemory { |
/** |
* Allocates vulkan device memory and binds it to the gpu's device for the given object. |
- * Returns true of allocation succeeded. |
+ * Returns true if allocation succeeded. |
*/ |
bool AllocAndBindBufferMemory(const GrVkGpu* gpu, |
VkBuffer buffer, |
- const VkMemoryPropertyFlags flags, |
+ GrVkBuffer::Type type, |
GrVkAlloc* alloc); |
- void FreeBufferMemory(const GrVkGpu* gpu, const GrVkAlloc& alloc); |
+ void FreeBufferMemory(const GrVkGpu* gpu, GrVkBuffer::Type type, const GrVkAlloc& alloc); |
bool AllocAndBindImageMemory(const GrVkGpu* gpu, |
VkImage image, |
- const VkMemoryPropertyFlags flags, |
+ bool linearTiling, |
GrVkAlloc* alloc); |
- void FreeImageMemory(const GrVkGpu* gpu, const GrVkAlloc& alloc); |
+ void FreeImageMemory(const GrVkGpu* gpu, bool linearTiling, const GrVkAlloc& alloc); |
VkPipelineStageFlags LayoutToPipelineStageFlags(const VkImageLayout layout); |
VkAccessFlags LayoutToSrcAccessMask(const VkImageLayout layout); |
} |
+class GrVkSubHeap { |
+public: |
+ GrVkSubHeap(const GrVkGpu* gpu, uint32_t memoryTypeIndex, |
+ VkDeviceSize size, VkDeviceSize alignment); |
+ ~GrVkSubHeap(); |
+ |
+ uint32_t memoryTypeIndex() const { return fMemoryTypeIndex; } |
+ VkDeviceSize size() const { return fSize; } |
+ VkDeviceSize alignment() const { return fAlignment; } |
+ VkDeviceSize freeSize() const { return fFreeSize; } |
+ VkDeviceSize largestBlockSize() const { return fLargestBlockSize; } |
+ VkDeviceMemory memory() { return fAlloc; } |
+ |
+ bool unallocated() const { return fSize == fFreeSize; } |
+ |
+ bool alloc(VkDeviceSize size, GrVkAlloc* alloc); |
+ void free(const GrVkAlloc& alloc); |
+ |
+private: |
+ struct Block { |
+ VkDeviceSize fOffset; |
+ VkDeviceSize fSize; |
+ }; |
+ typedef SkTLList<Block, 16> FreeList; |
+ |
+ const GrVkGpu* fGpu; |
+ uint32_t fMemoryTypeIndex; |
+ VkDeviceSize fSize; |
+ VkDeviceSize fAlignment; |
+ VkDeviceSize fFreeSize; |
+ VkDeviceSize fLargestBlockSize; |
+ VkDeviceSize fLargestBlockOffset; |
+ VkDeviceMemory fAlloc; |
+ FreeList fFreeList; |
+}; |
+ |
+class GrVkHeap { |
+public: |
+ enum Strategy { |
+ kSubAlloc_Strategy, // alloc large subheaps and suballoc within them |
+ kSingleAlloc_Strategy // alloc/recycle an individual subheap per object |
+ }; |
+ |
+ GrVkHeap(const GrVkGpu* gpu, Strategy strategy, VkDeviceSize subHeapSize) |
+ : fGpu(gpu) |
+ , fSubHeapSize(subHeapSize) |
+ , fAllocSize(0) |
+ , fUsedSize(0) { |
+ if (strategy == kSubAlloc_Strategy) { |
+ fAllocFunc = &GrVkHeap::subAlloc; |
+ } else { |
+ fAllocFunc = &GrVkHeap::singleAlloc; |
+ } |
+ } |
+ |
+ ~GrVkHeap(); |
+ |
+ bool alloc(VkDeviceSize size, VkDeviceSize alignment, uint32_t memoryTypeIndex, |
+ GrVkAlloc* alloc) { |
+ return (*this.*fAllocFunc)(size, alignment, memoryTypeIndex, alloc); |
+ } |
+ bool free(const GrVkAlloc& alloc); |
+ |
+private: |
+ typedef bool (GrVkHeap::*AllocFunc)(VkDeviceSize size, VkDeviceSize alignment, |
+ uint32_t memoryTypeIndex, GrVkAlloc* alloc); |
+ |
+ bool subAlloc(VkDeviceSize size, VkDeviceSize alignment, |
+ uint32_t memoryTypeIndex, GrVkAlloc* alloc); |
+ bool singleAlloc(VkDeviceSize size, VkDeviceSize alignment, |
+ uint32_t memoryTypeIndex, GrVkAlloc* alloc); |
+ |
+ const GrVkGpu* fGpu; |
+ VkDeviceSize fSubHeapSize; |
+ VkDeviceSize fAllocSize; |
+ VkDeviceSize fUsedSize; |
+ AllocFunc fAllocFunc; |
+ SkTArray<SkAutoTDelete<GrVkSubHeap>> fSubHeaps; |
+}; |
#endif |