OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #include "GrVkBuffer.h" | 8 #include "GrVkBuffer.h" |
9 #include "GrVkGpu.h" | 9 #include "GrVkGpu.h" |
10 #include "GrVkMemory.h" | 10 #include "GrVkMemory.h" |
(...skipping 11 matching lines...) Expand all Loading... |
22 VkBuffer buffer; | 22 VkBuffer buffer; |
23 VkDeviceMemory alloc; | 23 VkDeviceMemory alloc; |
24 | 24 |
25 // create the buffer object | 25 // create the buffer object |
26 VkBufferCreateInfo bufInfo; | 26 VkBufferCreateInfo bufInfo; |
27 memset(&bufInfo, 0, sizeof(VkBufferCreateInfo)); | 27 memset(&bufInfo, 0, sizeof(VkBufferCreateInfo)); |
28 bufInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; | 28 bufInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; |
29 bufInfo.flags = 0; | 29 bufInfo.flags = 0; |
30 bufInfo.size = desc.fSizeInBytes; | 30 bufInfo.size = desc.fSizeInBytes; |
31 switch (desc.fType) { | 31 switch (desc.fType) { |
32 case kVertex_Type: | 32 case kVertex_GrBufferType: |
33 bufInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT; | 33 bufInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT; |
34 break; | 34 break; |
35 case kIndex_Type: | 35 case kIndex_GrBufferType: |
36 bufInfo.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT; | 36 bufInfo.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT; |
37 break; | 37 break; |
38 case kUniform_Type: | 38 case kUniform_GrBufferType: |
39 bufInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; | 39 bufInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; |
40 break; | 40 break; |
41 case kCopyRead_Type: | 41 case kXferCpuToGpu_GrBufferType: |
42 bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; | 42 bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; |
43 break; | 43 break; |
44 case kCopyWrite_Type: | 44 case kXferGpuToCpu_GrBufferType: |
45 bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT; | 45 bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT; |
46 break; | 46 break; |
47 | 47 |
48 } | 48 } |
49 bufInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; | 49 bufInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; |
50 bufInfo.queueFamilyIndexCount = 0; | 50 bufInfo.queueFamilyIndexCount = 0; |
51 bufInfo.pQueueFamilyIndices = nullptr; | 51 bufInfo.pQueueFamilyIndices = nullptr; |
52 | 52 |
53 VkResult err; | 53 VkResult err; |
54 err = VK_CALL(gpu, CreateBuffer(gpu->device(), &bufInfo, nullptr, &buffer)); | 54 err = VK_CALL(gpu, CreateBuffer(gpu->device(), &bufInfo, nullptr, &buffer)); |
(...skipping 15 matching lines...) Expand all Loading... |
70 const GrVkBuffer::Resource* resource = new GrVkBuffer::Resource(buffer, allo
c); | 70 const GrVkBuffer::Resource* resource = new GrVkBuffer::Resource(buffer, allo
c); |
71 if (!resource) { | 71 if (!resource) { |
72 VK_CALL(gpu, DestroyBuffer(gpu->device(), buffer, nullptr)); | 72 VK_CALL(gpu, DestroyBuffer(gpu->device(), buffer, nullptr)); |
73 VK_CALL(gpu, FreeMemory(gpu->device(), alloc, nullptr)); | 73 VK_CALL(gpu, FreeMemory(gpu->device(), alloc, nullptr)); |
74 return nullptr; | 74 return nullptr; |
75 } | 75 } |
76 | 76 |
77 return resource; | 77 return resource; |
78 } | 78 } |
79 | 79 |
| 80 GrVkBuffer::GrVkBuffer(GrVkGpu* gpu, const Desc& desc, const GrVkBuffer::Resourc
e* resource) |
| 81 : INHERITED(gpu, desc.fType, desc.fSizeInBytes, desc.fAccessPattern, false), |
| 82 fDesc(desc), |
| 83 fResource(resource), |
| 84 fMapPtr(nullptr) { |
| 85 } |
| 86 |
| 87 GrVkGpu* GrVkBuffer::vkGpu() const { |
| 88 return static_cast<GrVkGpu*>(this->getGpu()); |
| 89 } |
80 | 90 |
81 void GrVkBuffer::addMemoryBarrier(const GrVkGpu* gpu, | 91 void GrVkBuffer::addMemoryBarrier(const GrVkGpu* gpu, |
82 VkAccessFlags srcAccessMask, | 92 VkAccessFlags srcAccessMask, |
83 VkAccessFlags dstAccesMask, | 93 VkAccessFlags dstAccesMask, |
84 VkPipelineStageFlags srcStageMask, | 94 VkPipelineStageFlags srcStageMask, |
85 VkPipelineStageFlags dstStageMask, | 95 VkPipelineStageFlags dstStageMask, |
86 bool byRegion) const { | 96 bool byRegion) const { |
87 VkBufferMemoryBarrier bufferMemoryBarrier = { | 97 VkBufferMemoryBarrier bufferMemoryBarrier = { |
88 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // sType | 98 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // sType |
89 NULL, // pNext | 99 NULL, // pNext |
(...skipping 10 matching lines...) Expand all Loading... |
100 gpu->addBufferMemoryBarrier(srcStageMask, dstStageMask, byRegion, &bufferMem
oryBarrier); | 110 gpu->addBufferMemoryBarrier(srcStageMask, dstStageMask, byRegion, &bufferMem
oryBarrier); |
101 } | 111 } |
102 | 112 |
103 void GrVkBuffer::Resource::freeGPUData(const GrVkGpu* gpu) const { | 113 void GrVkBuffer::Resource::freeGPUData(const GrVkGpu* gpu) const { |
104 SkASSERT(fBuffer); | 114 SkASSERT(fBuffer); |
105 SkASSERT(fAlloc); | 115 SkASSERT(fAlloc); |
106 VK_CALL(gpu, DestroyBuffer(gpu->device(), fBuffer, nullptr)); | 116 VK_CALL(gpu, DestroyBuffer(gpu->device(), fBuffer, nullptr)); |
107 VK_CALL(gpu, FreeMemory(gpu->device(), fAlloc, nullptr)); | 117 VK_CALL(gpu, FreeMemory(gpu->device(), fAlloc, nullptr)); |
108 } | 118 } |
109 | 119 |
110 void GrVkBuffer::vkRelease(const GrVkGpu* gpu) { | 120 void GrVkBuffer::onRelease() { |
111 VALIDATE(); | 121 if (!this->wasDestroyed()) { |
112 fResource->unref(gpu); | 122 VALIDATE(); |
113 fResource = nullptr; | 123 fResource->unref(this->vkGpu()); |
114 fMapPtr = nullptr; | 124 fResource = nullptr; |
115 VALIDATE(); | 125 fMapPtr = nullptr; |
| 126 VALIDATE(); |
| 127 } |
| 128 INHERITED::onRelease(); |
116 } | 129 } |
117 | 130 |
118 void GrVkBuffer::vkAbandon() { | 131 void GrVkBuffer::onAbandon() { |
119 fResource->unrefAndAbandon(); | 132 fResource->unrefAndAbandon(); |
120 fMapPtr = nullptr; | 133 fMapPtr = nullptr; |
121 VALIDATE(); | 134 VALIDATE(); |
| 135 INHERITED::onAbandon(); |
122 } | 136 } |
123 | 137 |
124 void* GrVkBuffer::vkMap(const GrVkGpu* gpu) { | 138 void GrVkBuffer::onMap() { |
| 139 if (this->wasDestroyed()) { |
| 140 return; |
| 141 } |
| 142 |
125 VALIDATE(); | 143 VALIDATE(); |
126 SkASSERT(!this->vkIsMapped()); | 144 SkASSERT(!this->vkIsMapped()); |
127 | 145 |
128 if (!fResource->unique()) { | 146 if (!fResource->unique()) { |
129 // in use by the command buffer, so we need to create a new one | 147 // in use by the command buffer, so we need to create a new one |
130 fResource->unref(gpu); | 148 fResource->unref(this->vkGpu()); |
131 fResource = Create(gpu, fDesc); | 149 fResource = Create(this->vkGpu(), fDesc); |
132 } | 150 } |
133 | 151 |
134 VkResult err = VK_CALL(gpu, MapMemory(gpu->device(), alloc(), 0, VK_WHOLE_SI
ZE, 0, &fMapPtr)); | 152 VkResult err = VK_CALL(this->vkGpu(), MapMemory(this->vkGpu()->device(), all
oc(), 0, |
| 153 VK_WHOLE_SIZE, 0, &fMapPtr))
; |
135 if (err) { | 154 if (err) { |
136 fMapPtr = nullptr; | 155 fMapPtr = nullptr; |
137 } | 156 } |
138 | 157 |
139 VALIDATE(); | 158 VALIDATE(); |
140 return fMapPtr; | |
141 } | 159 } |
142 | 160 |
143 void GrVkBuffer::vkUnmap(const GrVkGpu* gpu) { | 161 void GrVkBuffer::onUnmap() { |
144 VALIDATE(); | 162 VALIDATE(); |
145 SkASSERT(this->vkIsMapped()); | 163 SkASSERT(this->vkIsMapped()); |
146 | 164 |
147 VK_CALL(gpu, UnmapMemory(gpu->device(), alloc())); | 165 VK_CALL(this->vkGpu(), UnmapMemory(this->vkGpu()->device(), alloc())); |
148 | 166 |
149 fMapPtr = nullptr; | 167 fMapPtr = nullptr; |
150 } | 168 } |
151 | 169 |
152 bool GrVkBuffer::vkIsMapped() const { | 170 bool GrVkBuffer::vkIsMapped() const { |
153 VALIDATE(); | 171 VALIDATE(); |
154 return SkToBool(fMapPtr); | 172 return SkToBool(fMapPtr); |
155 } | 173 } |
156 | 174 |
157 bool GrVkBuffer::vkUpdateData(const GrVkGpu* gpu, const void* src, size_t srcSiz
eInBytes) { | 175 bool GrVkBuffer::onUpdateData(const void* src, size_t srcSizeInBytes) { |
| 176 if (this->wasDestroyed()) { |
| 177 return false; |
| 178 } |
| 179 |
158 SkASSERT(!this->vkIsMapped()); | 180 SkASSERT(!this->vkIsMapped()); |
159 VALIDATE(); | 181 VALIDATE(); |
160 if (srcSizeInBytes > fDesc.fSizeInBytes) { | 182 if (srcSizeInBytes > fDesc.fSizeInBytes) { |
161 return false; | 183 return false; |
162 } | 184 } |
163 | 185 |
164 if (!fResource->unique()) { | 186 if (!fResource->unique()) { |
165 // in use by the command buffer, so we need to create a new one | 187 // in use by the command buffer, so we need to create a new one |
166 fResource->unref(gpu); | 188 fResource->unref(this->vkGpu()); |
167 fResource = Create(gpu, fDesc); | 189 fResource = Create(this->vkGpu(), fDesc); |
168 } | 190 } |
169 | 191 |
170 void* mapPtr; | 192 void* mapPtr; |
171 VkResult err = VK_CALL(gpu, MapMemory(gpu->device(), alloc(), 0, srcSizeInBy
tes, 0, &mapPtr)); | 193 VkResult err = VK_CALL(this->vkGpu(), MapMemory(this->vkGpu()->device(), all
oc(), 0, |
| 194 srcSizeInBytes, 0, &mapPtr))
; |
172 | 195 |
173 if (VK_SUCCESS != err) { | 196 if (VK_SUCCESS != err) { |
174 return false; | 197 return false; |
175 } | 198 } |
176 | 199 |
177 memcpy(mapPtr, src, srcSizeInBytes); | 200 memcpy(mapPtr, src, srcSizeInBytes); |
178 | 201 |
179 VK_CALL(gpu, UnmapMemory(gpu->device(), alloc())); | 202 VK_CALL(this->vkGpu(), UnmapMemory(this->vkGpu()->device(), alloc())); |
180 | 203 |
181 return true; | 204 return true; |
182 } | 205 } |
183 | 206 |
184 void GrVkBuffer::validate() const { | 207 void GrVkBuffer::validate() const { |
185 SkASSERT(!fResource || kVertex_Type == fDesc.fType || kIndex_Type == fDesc.f
Type | 208 SkASSERT(!fResource || kVertex_GrBufferType == fDesc.fType || |
186 || kCopyRead_Type == fDesc.fType || kCopyWrite_Type == fDesc.fType | 209 kIndex_GrBufferType == fDesc.fType || |
187 || kUniform_Type == fDesc.fType); | 210 kXferCpuToGpu_GrBufferType == fDesc.fType || |
| 211 kXferGpuToCpu_GrBufferType == fDesc.fType || |
| 212 kUniform_GrBufferType == fDesc.fType); |
188 } | 213 } |
189 | 214 |
OLD | NEW |