OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #include "GrVkBuffer.h" | 8 #include "GrVkBuffer.h" |
9 #include "GrVkGpu.h" | 9 #include "GrVkGpu.h" |
10 #include "GrVkMemory.h" | 10 #include "GrVkMemory.h" |
(...skipping 11 matching lines...) Expand all Loading... | |
22 VkBuffer buffer; | 22 VkBuffer buffer; |
23 GrVkAlloc alloc; | 23 GrVkAlloc alloc; |
24 | 24 |
25 // create the buffer object | 25 // create the buffer object |
26 VkBufferCreateInfo bufInfo; | 26 VkBufferCreateInfo bufInfo; |
27 memset(&bufInfo, 0, sizeof(VkBufferCreateInfo)); | 27 memset(&bufInfo, 0, sizeof(VkBufferCreateInfo)); |
28 bufInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; | 28 bufInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; |
29 bufInfo.flags = 0; | 29 bufInfo.flags = 0; |
30 bufInfo.size = desc.fSizeInBytes; | 30 bufInfo.size = desc.fSizeInBytes; |
31 switch (desc.fType) { | 31 switch (desc.fType) { |
32 case kVertex_Type: | 32 case kVertex_Type: |
egdaniel
2016/06/30 13:51:19
nit (from not your change), should these case's al
jvanverth1
2016/06/30 14:56:38
Done.
| |
33 bufInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT; | 33 bufInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT; |
34 break; | 34 break; |
35 case kIndex_Type: | 35 case kIndex_Type: |
36 bufInfo.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT; | 36 bufInfo.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT; |
37 break; | 37 break; |
38 case kUniform_Type: | 38 case kUniform_Type: |
39 bufInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; | 39 bufInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; |
40 break; | 40 break; |
41 case kCopyRead_Type: | 41 case kCopyRead_Type: |
42 bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; | 42 bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; |
43 break; | 43 break; |
44 case kCopyWrite_Type: | 44 case kCopyWrite_Type: |
45 bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT; | 45 bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT; |
46 break; | 46 break; |
47 } | |
48 if (!desc.fDynamic) { | |
49 bufInfo.usage |= VK_BUFFER_USAGE_TRANSFER_DST_BIT; | |
50 } | |
47 | 51 |
48 } | |
49 bufInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; | 52 bufInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; |
50 bufInfo.queueFamilyIndexCount = 0; | 53 bufInfo.queueFamilyIndexCount = 0; |
51 bufInfo.pQueueFamilyIndices = nullptr; | 54 bufInfo.pQueueFamilyIndices = nullptr; |
52 | 55 |
53 VkResult err; | 56 VkResult err; |
54 err = VK_CALL(gpu, CreateBuffer(gpu->device(), &bufInfo, nullptr, &buffer)); | 57 err = VK_CALL(gpu, CreateBuffer(gpu->device(), &bufInfo, nullptr, &buffer)); |
55 if (err) { | 58 if (err) { |
56 return nullptr; | 59 return nullptr; |
57 } | 60 } |
58 | 61 |
59 if (!GrVkMemory::AllocAndBindBufferMemory(gpu, | 62 if (!GrVkMemory::AllocAndBindBufferMemory(gpu, |
60 buffer, | 63 buffer, |
61 desc.fType, | 64 desc.fType, |
65 desc.fDynamic, | |
62 &alloc)) { | 66 &alloc)) { |
63 return nullptr; | 67 return nullptr; |
64 } | 68 } |
65 | 69 |
66 const GrVkBuffer::Resource* resource = new GrVkBuffer::Resource(buffer, allo c, desc.fType); | 70 const GrVkBuffer::Resource* resource = new GrVkBuffer::Resource(buffer, allo c, desc.fType); |
67 if (!resource) { | 71 if (!resource) { |
68 VK_CALL(gpu, DestroyBuffer(gpu->device(), buffer, nullptr)); | 72 VK_CALL(gpu, DestroyBuffer(gpu->device(), buffer, nullptr)); |
69 GrVkMemory::FreeBufferMemory(gpu, desc.fType, alloc); | 73 GrVkMemory::FreeBufferMemory(gpu, desc.fType, alloc); |
70 return nullptr; | 74 return nullptr; |
71 } | 75 } |
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
113 void GrVkBuffer::vkAbandon() { | 117 void GrVkBuffer::vkAbandon() { |
114 fResource->unrefAndAbandon(); | 118 fResource->unrefAndAbandon(); |
115 fResource = nullptr; | 119 fResource = nullptr; |
116 fMapPtr = nullptr; | 120 fMapPtr = nullptr; |
117 VALIDATE(); | 121 VALIDATE(); |
118 } | 122 } |
119 | 123 |
120 void* GrVkBuffer::vkMap(const GrVkGpu* gpu) { | 124 void* GrVkBuffer::vkMap(const GrVkGpu* gpu) { |
121 VALIDATE(); | 125 VALIDATE(); |
122 SkASSERT(!this->vkIsMapped()); | 126 SkASSERT(!this->vkIsMapped()); |
127 if (!fDesc.fDynamic) { | |
128 return nullptr; | |
129 } | |
123 | 130 |
124 if (!fResource->unique()) { | 131 if (!fResource->unique()) { |
125 // in use by the command buffer, so we need to create a new one | 132 // in use by the command buffer, so we need to create a new one |
126 fResource->unref(gpu); | 133 fResource->unref(gpu); |
127 fResource = Create(gpu, fDesc); | 134 fResource = Create(gpu, fDesc); |
128 } | 135 } |
129 | 136 |
130 const GrVkAlloc& alloc = this->alloc(); | 137 const GrVkAlloc& alloc = this->alloc(); |
131 VkResult err = VK_CALL(gpu, MapMemory(gpu->device(), alloc.fMemory, alloc.fO ffset, | 138 VkResult err = VK_CALL(gpu, MapMemory(gpu->device(), alloc.fMemory, alloc.fO ffset, |
132 VK_WHOLE_SIZE, 0, &fMapPtr)); | 139 VK_WHOLE_SIZE, 0, &fMapPtr)); |
133 if (err) { | 140 if (err) { |
134 fMapPtr = nullptr; | 141 fMapPtr = nullptr; |
135 } | 142 } |
136 | 143 |
137 VALIDATE(); | 144 VALIDATE(); |
138 return fMapPtr; | 145 return fMapPtr; |
139 } | 146 } |
140 | 147 |
141 void GrVkBuffer::vkUnmap(const GrVkGpu* gpu) { | 148 void GrVkBuffer::vkUnmap(const GrVkGpu* gpu) { |
142 VALIDATE(); | 149 VALIDATE(); |
143 SkASSERT(this->vkIsMapped()); | 150 SkASSERT(this->vkIsMapped()); |
151 SkASSERT(fDesc.fDynamic); | |
144 | 152 |
145 VK_CALL(gpu, UnmapMemory(gpu->device(), this->alloc().fMemory)); | 153 VK_CALL(gpu, UnmapMemory(gpu->device(), this->alloc().fMemory)); |
146 | 154 |
147 fMapPtr = nullptr; | 155 fMapPtr = nullptr; |
148 } | 156 } |
149 | 157 |
150 bool GrVkBuffer::vkIsMapped() const { | 158 bool GrVkBuffer::vkIsMapped() const { |
151 VALIDATE(); | 159 VALIDATE(); |
152 return SkToBool(fMapPtr); | 160 return SkToBool(fMapPtr); |
153 } | 161 } |
154 | 162 |
155 bool GrVkBuffer::vkUpdateData(const GrVkGpu* gpu, const void* src, size_t srcSiz eInBytes, | 163 bool GrVkBuffer::vkUpdateData(GrVkGpu* gpu, const void* src, size_t srcSizeInByt es, |
156 bool* createdNewBuffer) { | 164 bool* createdNewBuffer) { |
157 SkASSERT(!this->vkIsMapped()); | 165 SkASSERT(!this->vkIsMapped()); |
158 VALIDATE(); | 166 VALIDATE(); |
159 if (srcSizeInBytes > fDesc.fSizeInBytes) { | 167 if (srcSizeInBytes > fDesc.fSizeInBytes) { |
160 return false; | 168 return false; |
161 } | 169 } |
162 | 170 |
171 if (!fDesc.fDynamic) { | |
172 return gpu->updateBuffer(this, src, srcSizeInBytes); | |
173 } | |
174 | |
163 if (!fResource->unique()) { | 175 if (!fResource->unique()) { |
164 // in use by the command buffer, so we need to create a new one | 176 // in use by the command buffer, so we need to create a new one |
165 fResource->unref(gpu); | 177 fResource->unref(gpu); |
166 fResource = Create(gpu, fDesc); | 178 fResource = Create(gpu, fDesc); |
167 if (createdNewBuffer) { | 179 if (createdNewBuffer) { |
168 *createdNewBuffer = true; | 180 *createdNewBuffer = true; |
169 } | 181 } |
170 } | 182 } |
171 | 183 |
172 void* mapPtr; | 184 void* mapPtr; |
(...skipping 10 matching lines...) Expand all Loading... | |
183 VK_CALL(gpu, UnmapMemory(gpu->device(), alloc.fMemory)); | 195 VK_CALL(gpu, UnmapMemory(gpu->device(), alloc.fMemory)); |
184 | 196 |
185 return true; | 197 return true; |
186 } | 198 } |
187 | 199 |
188 void GrVkBuffer::validate() const { | 200 void GrVkBuffer::validate() const { |
189 SkASSERT(!fResource || kVertex_Type == fDesc.fType || kIndex_Type == fDesc.f Type | 201 SkASSERT(!fResource || kVertex_Type == fDesc.fType || kIndex_Type == fDesc.f Type |
190 || kCopyRead_Type == fDesc.fType || kCopyWrite_Type == fDesc.fType | 202 || kCopyRead_Type == fDesc.fType || kCopyWrite_Type == fDesc.fType |
191 || kUniform_Type == fDesc.fType); | 203 || kUniform_Type == fDesc.fType); |
192 } | 204 } |
OLD | NEW |