OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2016 Google Inc. | 2 * Copyright 2016 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #include "GrVkProgram.h" | 8 #include "GrVkPipelineState.h" |
9 | 9 |
10 #include "GrPipeline.h" | 10 #include "GrPipeline.h" |
11 #include "GrVkCommandBuffer.h" | 11 #include "GrVkCommandBuffer.h" |
12 #include "GrVkDescriptorPool.h" | 12 #include "GrVkDescriptorPool.h" |
13 #include "GrVkGpu.h" | 13 #include "GrVkGpu.h" |
14 #include "GrVkImageView.h" | 14 #include "GrVkImageView.h" |
15 #include "GrVkMemory.h" | 15 #include "GrVkMemory.h" |
16 #include "GrVkPipeline.h" | 16 #include "GrVkPipeline.h" |
| 17 #include "GrVkRenderTarget.h" |
17 #include "GrVkSampler.h" | 18 #include "GrVkSampler.h" |
18 #include "GrVkTexture.h" | 19 #include "GrVkTexture.h" |
19 #include "GrVkUniformBuffer.h" | 20 #include "GrVkUniformBuffer.h" |
20 #include "glsl/GrGLSLFragmentProcessor.h" | 21 #include "glsl/GrGLSLFragmentProcessor.h" |
21 #include "glsl/GrGLSLGeometryProcessor.h" | 22 #include "glsl/GrGLSLGeometryProcessor.h" |
22 #include "glsl/GrGLSLXferProcessor.h" | 23 #include "glsl/GrGLSLXferProcessor.h" |
23 | 24 |
24 GrVkProgram::GrVkProgram(GrVkGpu* gpu, | 25 GrVkPipelineState::GrVkPipelineState(GrVkGpu* gpu, |
25 GrVkPipeline* pipeline, | 26 const GrVkPipelineState::Desc& desc, |
26 VkPipelineLayout layout, | 27 GrVkPipeline* pipeline, |
27 VkDescriptorSetLayout dsLayout[2], | 28 VkPipelineLayout layout, |
28 const BuiltinUniformHandles& builtinUniformHandles, | 29 VkDescriptorSetLayout dsLayout[2], |
29 const UniformInfoArray& uniforms, | 30 const BuiltinUniformHandles& builtinUniform
Handles, |
30 uint32_t vertexUniformSize, | 31 const UniformInfoArray& uniforms, |
31 uint32_t fragmentUniformSize, | 32 uint32_t vertexUniformSize, |
32 uint32_t numSamplers, | 33 uint32_t fragmentUniformSize, |
33 GrGLSLPrimitiveProcessor* geometryProcessor, | 34 uint32_t numSamplers, |
34 GrGLSLXferProcessor* xferProcessor, | 35 GrGLSLPrimitiveProcessor* geometryProcessor
, |
35 const GrGLSLFragProcs& fragmentProcessors) | 36 GrGLSLXferProcessor* xferProcessor, |
| 37 const GrGLSLFragProcs& fragmentProcessors) |
36 : fPipeline(pipeline) | 38 : fPipeline(pipeline) |
37 , fPipelineLayout(layout) | 39 , fPipelineLayout(layout) |
38 , fBuiltinUniformHandles(builtinUniformHandles) | 40 , fBuiltinUniformHandles(builtinUniformHandles) |
39 , fGeometryProcessor(geometryProcessor) | 41 , fGeometryProcessor(geometryProcessor) |
40 , fXferProcessor(xferProcessor) | 42 , fXferProcessor(xferProcessor) |
41 , fFragmentProcessors(fragmentProcessors) | 43 , fFragmentProcessors(fragmentProcessors) |
42 , fProgramDataManager(uniforms, vertexUniformSize, fragmentUniformSize) | 44 , fDesc(desc) |
| 45 , fDataManager(uniforms, vertexUniformSize, fragmentUniformSize) |
43 , fSamplerPoolManager(dsLayout[GrVkUniformHandler::kSamplerDescSet], | 46 , fSamplerPoolManager(dsLayout[GrVkUniformHandler::kSamplerDescSet], |
44 VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, numSamplers
, gpu) | 47 VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, numSamplers
, gpu) |
45 , fUniformPoolManager(dsLayout[GrVkUniformHandler::kUniformBufferDescSet], | 48 , fUniformPoolManager(dsLayout[GrVkUniformHandler::kUniformBufferDescSet], |
46 VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 2, gpu) { | 49 VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 2, gpu) { |
47 fSamplers.setReserve(numSamplers); | 50 fSamplers.setReserve(numSamplers); |
48 fTextureViews.setReserve(numSamplers); | 51 fTextureViews.setReserve(numSamplers); |
49 fTextures.setReserve(numSamplers); | 52 fTextures.setReserve(numSamplers); |
50 | 53 |
51 fDescriptorSets[0] = VK_NULL_HANDLE; | 54 fDescriptorSets[0] = VK_NULL_HANDLE; |
52 fDescriptorSets[1] = VK_NULL_HANDLE; | 55 fDescriptorSets[1] = VK_NULL_HANDLE; |
53 | 56 |
54 // Currently we are always binding a descriptor set for uniform buffers. | 57 // Currently we are always binding a descriptor set for uniform buffers. |
55 fStartDS = GrVkUniformHandler::kUniformBufferDescSet; | 58 fStartDS = GrVkUniformHandler::kUniformBufferDescSet; |
56 fDSCount = 1; | 59 fDSCount = 1; |
57 if (numSamplers) { | 60 if (numSamplers) { |
58 fDSCount++; | 61 fDSCount++; |
59 fStartDS = SkTMin(fStartDS, (int)GrVkUniformHandler::kSamplerDescSet); | 62 fStartDS = SkTMin(fStartDS, (int)GrVkUniformHandler::kSamplerDescSet); |
60 } | 63 } |
61 | 64 |
62 fVertexUniformBuffer.reset(GrVkUniformBuffer::Create(gpu, vertexUniformSize,
true)); | 65 fVertexUniformBuffer.reset(GrVkUniformBuffer::Create(gpu, vertexUniformSize,
true)); |
63 fFragmentUniformBuffer.reset(GrVkUniformBuffer::Create(gpu, fragmentUniformS
ize, true)); | 66 fFragmentUniformBuffer.reset(GrVkUniformBuffer::Create(gpu, fragmentUniformS
ize, true)); |
64 | 67 |
65 fNumSamplers = numSamplers; | 68 fNumSamplers = numSamplers; |
66 } | 69 } |
67 | 70 |
68 GrVkProgram::~GrVkProgram() { | 71 GrVkPipelineState::~GrVkPipelineState() { |
69 // Must of freed all GPU resources before this is destroyed | 72 // Must of freed all GPU resources before this is destroyed |
70 SkASSERT(!fPipeline); | 73 SkASSERT(!fPipeline); |
71 SkASSERT(!fPipelineLayout); | 74 SkASSERT(!fPipelineLayout); |
72 SkASSERT(!fSamplers.count()); | 75 SkASSERT(!fSamplers.count()); |
73 SkASSERT(!fTextureViews.count()); | 76 SkASSERT(!fTextureViews.count()); |
74 SkASSERT(!fTextures.count()); | 77 SkASSERT(!fTextures.count()); |
75 } | 78 } |
76 | 79 |
77 void GrVkProgram::freeTempResources(const GrVkGpu* gpu) { | 80 void GrVkPipelineState::freeTempResources(const GrVkGpu* gpu) { |
78 for (int i = 0; i < fSamplers.count(); ++i) { | 81 for (int i = 0; i < fSamplers.count(); ++i) { |
79 fSamplers[i]->unref(gpu); | 82 fSamplers[i]->unref(gpu); |
80 } | 83 } |
81 fSamplers.rewind(); | 84 fSamplers.rewind(); |
82 | 85 |
83 for (int i = 0; i < fTextureViews.count(); ++i) { | 86 for (int i = 0; i < fTextureViews.count(); ++i) { |
84 fTextureViews[i]->unref(gpu); | 87 fTextureViews[i]->unref(gpu); |
85 } | 88 } |
86 fTextureViews.rewind(); | 89 fTextureViews.rewind(); |
87 | 90 |
88 for (int i = 0; i < fTextures.count(); ++i) { | 91 for (int i = 0; i < fTextures.count(); ++i) { |
89 fTextures[i]->unref(gpu); | 92 fTextures[i]->unref(gpu); |
90 } | 93 } |
91 fTextures.rewind(); | 94 fTextures.rewind(); |
92 } | 95 } |
93 | 96 |
94 void GrVkProgram::freeGPUResources(const GrVkGpu* gpu) { | 97 void GrVkPipelineState::freeGPUResources(const GrVkGpu* gpu) { |
95 if (fPipeline) { | 98 if (fPipeline) { |
96 fPipeline->unref(gpu); | 99 fPipeline->unref(gpu); |
97 fPipeline = nullptr; | 100 fPipeline = nullptr; |
98 } | 101 } |
99 | 102 |
100 if (fPipelineLayout) { | 103 if (fPipelineLayout) { |
101 GR_VK_CALL(gpu->vkInterface(), DestroyPipelineLayout(gpu->device(), | 104 GR_VK_CALL(gpu->vkInterface(), DestroyPipelineLayout(gpu->device(), |
102 fPipelineLayout, | 105 fPipelineLayout, |
103 nullptr)); | 106 nullptr)); |
104 fPipelineLayout = VK_NULL_HANDLE; | 107 fPipelineLayout = VK_NULL_HANDLE; |
105 } | 108 } |
106 | 109 |
107 if (fVertexUniformBuffer) { | 110 if (fVertexUniformBuffer) { |
108 fVertexUniformBuffer->release(gpu); | 111 fVertexUniformBuffer->release(gpu); |
109 } | 112 } |
110 | 113 |
111 if (fFragmentUniformBuffer) { | 114 if (fFragmentUniformBuffer) { |
112 fFragmentUniformBuffer->release(gpu); | 115 fFragmentUniformBuffer->release(gpu); |
113 } | 116 } |
114 | 117 |
115 fSamplerPoolManager.freeGPUResources(gpu); | 118 fSamplerPoolManager.freeGPUResources(gpu); |
116 fUniformPoolManager.freeGPUResources(gpu); | 119 fUniformPoolManager.freeGPUResources(gpu); |
117 | 120 |
118 this->freeTempResources(gpu); | 121 this->freeTempResources(gpu); |
119 } | 122 } |
120 | 123 |
121 void GrVkProgram::abandonGPUResources() { | 124 void GrVkPipelineState::abandonGPUResources() { |
122 fPipeline->unrefAndAbandon(); | 125 fPipeline->unrefAndAbandon(); |
123 fPipeline = nullptr; | 126 fPipeline = nullptr; |
124 | 127 |
125 fPipelineLayout = VK_NULL_HANDLE; | 128 fPipelineLayout = VK_NULL_HANDLE; |
126 | 129 |
127 fVertexUniformBuffer->abandon(); | 130 fVertexUniformBuffer->abandon(); |
128 fFragmentUniformBuffer->abandon(); | 131 fFragmentUniformBuffer->abandon(); |
129 | 132 |
130 for (int i = 0; i < fSamplers.count(); ++i) { | 133 for (int i = 0; i < fSamplers.count(); ++i) { |
131 fSamplers[i]->unrefAndAbandon(); | 134 fSamplers[i]->unrefAndAbandon(); |
(...skipping 18 matching lines...) Expand all Loading... |
150 SkTArray<const GrTextureAccess*>* textureBin
dings) { | 153 SkTArray<const GrTextureAccess*>* textureBin
dings) { |
151 if (int numTextures = processor.numTextures()) { | 154 if (int numTextures = processor.numTextures()) { |
152 const GrTextureAccess** bindings = textureBindings->push_back_n(numTextu
res); | 155 const GrTextureAccess** bindings = textureBindings->push_back_n(numTextu
res); |
153 int i = 0; | 156 int i = 0; |
154 do { | 157 do { |
155 bindings[i] = &processor.textureAccess(i); | 158 bindings[i] = &processor.textureAccess(i); |
156 } while (++i < numTextures); | 159 } while (++i < numTextures); |
157 } | 160 } |
158 } | 161 } |
159 | 162 |
160 void GrVkProgram::setData(GrVkGpu* gpu, | 163 void GrVkPipelineState::setData(GrVkGpu* gpu, |
161 const GrPrimitiveProcessor& primProc, | 164 const GrPrimitiveProcessor& primProc, |
162 const GrPipeline& pipeline) { | 165 const GrPipeline& pipeline) { |
163 // This is here to protect against someone calling setData multiple times in
a row without | 166 // This is here to protect against someone calling setData multiple times in
a row without |
164 // freeing the tempData between calls. | 167 // freeing the tempData between calls. |
165 this->freeTempResources(gpu); | 168 this->freeTempResources(gpu); |
166 | 169 |
167 this->setRenderTargetState(pipeline); | 170 this->setRenderTargetState(pipeline); |
168 | 171 |
169 SkSTArray<8, const GrTextureAccess*> textureBindings; | 172 SkSTArray<8, const GrTextureAccess*> textureBindings; |
170 | 173 |
171 fGeometryProcessor->setData(fProgramDataManager, primProc); | 174 fGeometryProcessor->setData(fDataManager, primProc); |
172 append_texture_bindings(primProc, &textureBindings); | 175 append_texture_bindings(primProc, &textureBindings); |
173 | 176 |
174 for (int i = 0; i < fFragmentProcessors.count(); ++i) { | 177 for (int i = 0; i < fFragmentProcessors.count(); ++i) { |
175 const GrFragmentProcessor& processor = pipeline.getFragmentProcessor(i); | 178 const GrFragmentProcessor& processor = pipeline.getFragmentProcessor(i); |
176 fFragmentProcessors[i]->setData(fProgramDataManager, processor); | 179 fFragmentProcessors[i]->setData(fDataManager, processor); |
177 fGeometryProcessor->setTransformData(primProc, fProgramDataManager, i, | 180 fGeometryProcessor->setTransformData(primProc, fDataManager, i, |
178 processor.coordTransforms()); | 181 processor.coordTransforms()); |
179 append_texture_bindings(processor, &textureBindings); | 182 append_texture_bindings(processor, &textureBindings); |
180 } | 183 } |
181 | 184 |
182 fXferProcessor->setData(fProgramDataManager, pipeline.getXferProcessor()); | 185 fXferProcessor->setData(fDataManager, pipeline.getXferProcessor()); |
183 append_texture_bindings(pipeline.getXferProcessor(), &textureBindings); | 186 append_texture_bindings(pipeline.getXferProcessor(), &textureBindings); |
184 | 187 |
185 // Get new descriptor sets | 188 // Get new descriptor sets |
186 if (fNumSamplers) { | 189 if (fNumSamplers) { |
187 fSamplerPoolManager.getNewDescriptorSet(gpu, | 190 fSamplerPoolManager.getNewDescriptorSet(gpu, |
188 &fDescriptorSets[GrVkUniformHandler
::kSamplerDescSet]); | 191 &fDescriptorSets[GrVkUniformHandler
::kSamplerDescSet]); |
189 } | 192 } |
190 fUniformPoolManager.getNewDescriptorSet(gpu, | 193 fUniformPoolManager.getNewDescriptorSet(gpu, |
191 &fDescriptorSets[GrVkUniformHandler::kUni
formBufferDescSet]); | 194 &fDescriptorSets[GrVkUniformHandler::kUni
formBufferDescSet]); |
192 | 195 |
193 this->writeUniformBuffers(gpu); | 196 this->writeUniformBuffers(gpu); |
194 | 197 |
195 this->writeSamplers(gpu, textureBindings); | 198 this->writeSamplers(gpu, textureBindings); |
196 } | 199 } |
197 | 200 |
198 void GrVkProgram::writeUniformBuffers(const GrVkGpu* gpu) { | 201 void GrVkPipelineState::writeUniformBuffers(const GrVkGpu* gpu) { |
199 fProgramDataManager.uploadUniformBuffers(gpu, fVertexUniformBuffer, fFragmen
tUniformBuffer); | 202 fDataManager.uploadUniformBuffers(gpu, fVertexUniformBuffer, fFragmentUnifor
mBuffer); |
200 | 203 |
201 VkWriteDescriptorSet descriptorWrites[2]; | 204 VkWriteDescriptorSet descriptorWrites[2]; |
202 memset(descriptorWrites, 0, 2 * sizeof(VkWriteDescriptorSet)); | 205 memset(descriptorWrites, 0, 2 * sizeof(VkWriteDescriptorSet)); |
203 | 206 |
204 uint32_t firstUniformWrite = 0; | 207 uint32_t firstUniformWrite = 0; |
205 uint32_t uniformBindingUpdateCount = 0; | 208 uint32_t uniformBindingUpdateCount = 0; |
206 | 209 |
207 VkDescriptorBufferInfo vertBufferInfo; | 210 VkDescriptorBufferInfo vertBufferInfo; |
208 // Vertex Uniform Buffer | 211 // Vertex Uniform Buffer |
209 if (fVertexUniformBuffer.get()) { | 212 if (fVertexUniformBuffer.get()) { |
210 ++uniformBindingUpdateCount; | 213 ++uniformBindingUpdateCount; |
211 memset(&vertBufferInfo, 0, sizeof(VkDescriptorBufferInfo)); | 214 memset(&vertBufferInfo, 0, sizeof(VkDescriptorBufferInfo)); |
212 vertBufferInfo.buffer = fVertexUniformBuffer->buffer(); | 215 vertBufferInfo.buffer = fVertexUniformBuffer->buffer(); |
213 vertBufferInfo.offset = 0; | 216 vertBufferInfo.offset = 0; |
214 vertBufferInfo.range = fVertexUniformBuffer->size(); | 217 vertBufferInfo.range = fVertexUniformBuffer->size(); |
215 | 218 |
216 descriptorWrites[0].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; | 219 descriptorWrites[0].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; |
217 descriptorWrites[0].pNext = nullptr; | 220 descriptorWrites[0].pNext = nullptr; |
218 descriptorWrites[0].dstSet = fDescriptorSets[1]; | 221 descriptorWrites[0].dstSet = fDescriptorSets[1]; |
219 descriptorWrites[0].dstBinding = GrVkUniformHandler::kVertexBinding; | 222 descriptorWrites[0].dstBinding = GrVkUniformHandler::kVertexBinding; |
220 descriptorWrites[0].dstArrayElement = 0; | 223 descriptorWrites[0].dstArrayElement = 0; |
221 descriptorWrites[0].descriptorCount = 1; | 224 descriptorWrites[0].descriptorCount = 1; |
222 descriptorWrites[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; | 225 descriptorWrites[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; |
223 descriptorWrites[0].pImageInfo = nullptr; | 226 descriptorWrites[0].pImageInfo = nullptr; |
224 descriptorWrites[0].pBufferInfo = &vertBufferInfo; | 227 descriptorWrites[0].pBufferInfo = &vertBufferInfo; |
225 descriptorWrites[0].pTexelBufferView = nullptr; | 228 descriptorWrites[0].pTexelBufferView = nullptr; |
| 229 |
| 230 fVertexUniformBuffer->addMemoryBarrier(gpu, |
| 231 VK_ACCESS_HOST_WRITE_BIT, |
| 232 VK_ACCESS_UNIFORM_READ_BIT, |
| 233 VK_PIPELINE_STAGE_HOST_BIT, |
| 234 VK_PIPELINE_STAGE_VERTEX_SHADER_B
IT, |
| 235 false); |
226 } | 236 } |
227 | 237 |
228 VkDescriptorBufferInfo fragBufferInfo; | 238 VkDescriptorBufferInfo fragBufferInfo; |
229 // Fragment Uniform Buffer | 239 // Fragment Uniform Buffer |
230 if (fFragmentUniformBuffer.get()) { | 240 if (fFragmentUniformBuffer.get()) { |
231 if (0 == uniformBindingUpdateCount) { | 241 if (0 == uniformBindingUpdateCount) { |
232 firstUniformWrite = 1; | 242 firstUniformWrite = 1; |
233 } | 243 } |
234 ++uniformBindingUpdateCount; | 244 ++uniformBindingUpdateCount; |
235 memset(&fragBufferInfo, 0, sizeof(VkDescriptorBufferInfo)); | 245 memset(&fragBufferInfo, 0, sizeof(VkDescriptorBufferInfo)); |
236 fragBufferInfo.buffer = fFragmentUniformBuffer->buffer(); | 246 fragBufferInfo.buffer = fFragmentUniformBuffer->buffer(); |
237 fragBufferInfo.offset = 0; | 247 fragBufferInfo.offset = 0; |
238 fragBufferInfo.range = fFragmentUniformBuffer->size(); | 248 fragBufferInfo.range = fFragmentUniformBuffer->size(); |
239 | 249 |
240 descriptorWrites[1].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; | 250 descriptorWrites[1].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; |
241 descriptorWrites[1].pNext = nullptr; | 251 descriptorWrites[1].pNext = nullptr; |
242 descriptorWrites[1].dstSet = fDescriptorSets[1]; | 252 descriptorWrites[1].dstSet = fDescriptorSets[1]; |
243 descriptorWrites[1].dstBinding = GrVkUniformHandler::kFragBinding;; | 253 descriptorWrites[1].dstBinding = GrVkUniformHandler::kFragBinding;; |
244 descriptorWrites[1].dstArrayElement = 0; | 254 descriptorWrites[1].dstArrayElement = 0; |
245 descriptorWrites[1].descriptorCount = 1; | 255 descriptorWrites[1].descriptorCount = 1; |
246 descriptorWrites[1].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; | 256 descriptorWrites[1].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; |
247 descriptorWrites[1].pImageInfo = nullptr; | 257 descriptorWrites[1].pImageInfo = nullptr; |
248 descriptorWrites[1].pBufferInfo = &fragBufferInfo; | 258 descriptorWrites[1].pBufferInfo = &fragBufferInfo; |
249 descriptorWrites[1].pTexelBufferView = nullptr; | 259 descriptorWrites[1].pTexelBufferView = nullptr; |
| 260 |
| 261 fFragmentUniformBuffer->addMemoryBarrier(gpu, |
| 262 VK_ACCESS_HOST_WRITE_BIT, |
| 263 VK_ACCESS_UNIFORM_READ_BIT, |
| 264 VK_PIPELINE_STAGE_HOST_BIT, |
| 265 VK_PIPELINE_STAGE_FRAGMENT_SHAD
ER_BIT, |
| 266 false); |
250 } | 267 } |
251 | 268 |
252 if (uniformBindingUpdateCount) { | 269 if (uniformBindingUpdateCount) { |
253 GR_VK_CALL(gpu->vkInterface(), UpdateDescriptorSets(gpu->device(), | 270 GR_VK_CALL(gpu->vkInterface(), UpdateDescriptorSets(gpu->device(), |
254 uniformBindingUpdate
Count, | 271 uniformBindingUpdate
Count, |
255 &descriptorWrites[fi
rstUniformWrite], | 272 &descriptorWrites[fi
rstUniformWrite], |
256 0, nullptr)); | 273 0, nullptr)); |
257 } | 274 } |
258 } | 275 } |
259 | 276 |
260 void GrVkProgram::writeSamplers(GrVkGpu* gpu, | 277 void GrVkPipelineState::writeSamplers(GrVkGpu* gpu, |
261 const SkTArray<const GrTextureAccess*>& textureB
indings) { | 278 const SkTArray<const GrTextureAccess*>& te
xtureBindings) { |
262 SkASSERT(fNumSamplers == textureBindings.count()); | 279 SkASSERT(fNumSamplers == textureBindings.count()); |
263 | 280 |
264 for (int i = 0; i < textureBindings.count(); ++i) { | 281 for (int i = 0; i < textureBindings.count(); ++i) { |
265 const GrTextureParams& params = textureBindings[i]->getParams(); | 282 const GrTextureParams& params = textureBindings[i]->getParams(); |
266 fSamplers.push(gpu->resourceProvider().findOrCreateCompatibleSampler(par
ams)); | 283 fSamplers.push(gpu->resourceProvider().findOrCreateCompatibleSampler(par
ams)); |
267 | 284 |
268 GrVkTexture* texture = static_cast<GrVkTexture*>(textureBindings[i]->get
Texture()); | 285 GrVkTexture* texture = static_cast<GrVkTexture*>(textureBindings[i]->get
Texture()); |
269 | 286 |
270 const GrVkImage::Resource* textureResource = texture->resource(); | 287 const GrVkImage::Resource* textureResource = texture->resource(); |
271 textureResource->ref(); | 288 textureResource->ref(); |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
309 writeInfo.pTexelBufferView = nullptr; | 326 writeInfo.pTexelBufferView = nullptr; |
310 | 327 |
311 GR_VK_CALL(gpu->vkInterface(), UpdateDescriptorSets(gpu->device(), | 328 GR_VK_CALL(gpu->vkInterface(), UpdateDescriptorSets(gpu->device(), |
312 1, | 329 1, |
313 &writeInfo, | 330 &writeInfo, |
314 0, | 331 0, |
315 nullptr)); | 332 nullptr)); |
316 } | 333 } |
317 } | 334 } |
318 | 335 |
319 void GrVkProgram::setRenderTargetState(const GrPipeline& pipeline) { | 336 void GrVkPipelineState::setRenderTargetState(const GrPipeline& pipeline) { |
320 // Load the RT height uniform if it is needed to y-flip gl_FragCoord. | 337 // Load the RT height uniform if it is needed to y-flip gl_FragCoord. |
321 if (fBuiltinUniformHandles.fRTHeightUni.isValid() && | 338 if (fBuiltinUniformHandles.fRTHeightUni.isValid() && |
322 fRenderTargetState.fRenderTargetSize.fHeight != pipeline.getRenderTarget
()->height()) { | 339 fRenderTargetState.fRenderTargetSize.fHeight != pipeline.getRenderTarget
()->height()) { |
323 fProgramDataManager.set1f(fBuiltinUniformHandles.fRTHeightUni, | 340 fDataManager.set1f(fBuiltinUniformHandles.fRTHeightUni, |
324 SkIntToScalar(pipeline.getRenderTarget()->heig
ht())); | 341 SkIntToScalar(pipeline.getRenderTarget()->heig
ht())); |
325 } | 342 } |
326 | 343 |
327 // set RT adjustment | 344 // set RT adjustment |
328 const GrRenderTarget* rt = pipeline.getRenderTarget(); | 345 const GrRenderTarget* rt = pipeline.getRenderTarget(); |
329 SkISize size; | 346 SkISize size; |
330 size.set(rt->width(), rt->height()); | 347 size.set(rt->width(), rt->height()); |
331 SkASSERT(fBuiltinUniformHandles.fRTAdjustmentUni.isValid()); | 348 SkASSERT(fBuiltinUniformHandles.fRTAdjustmentUni.isValid()); |
332 if (fRenderTargetState.fRenderTargetOrigin != rt->origin() || | 349 if (fRenderTargetState.fRenderTargetOrigin != rt->origin() || |
333 fRenderTargetState.fRenderTargetSize != size) { | 350 fRenderTargetState.fRenderTargetSize != size) { |
334 fRenderTargetState.fRenderTargetSize = size; | 351 fRenderTargetState.fRenderTargetSize = size; |
335 fRenderTargetState.fRenderTargetOrigin = rt->origin(); | 352 fRenderTargetState.fRenderTargetOrigin = rt->origin(); |
336 | 353 |
337 float rtAdjustmentVec[4]; | 354 float rtAdjustmentVec[4]; |
338 fRenderTargetState.getRTAdjustmentVec(rtAdjustmentVec); | 355 fRenderTargetState.getRTAdjustmentVec(rtAdjustmentVec); |
339 fProgramDataManager.set4fv(fBuiltinUniformHandles.fRTAdjustmentUni, 1, r
tAdjustmentVec); | 356 fDataManager.set4fv(fBuiltinUniformHandles.fRTAdjustmentUni, 1, rtAdjust
mentVec); |
340 } | 357 } |
341 } | 358 } |
342 | 359 |
343 void GrVkProgram::bind(const GrVkGpu* gpu, GrVkCommandBuffer* commandBuffer) { | 360 void GrVkPipelineState::bind(const GrVkGpu* gpu, GrVkCommandBuffer* commandBuffe
r) { |
344 commandBuffer->bindPipeline(gpu, fPipeline); | 361 commandBuffer->bindPipeline(gpu, fPipeline); |
345 | 362 |
346 if (fDSCount) { | 363 if (fDSCount) { |
347 commandBuffer->bindDescriptorSets(gpu, this, fPipelineLayout, fStartDS,
fDSCount, | 364 commandBuffer->bindDescriptorSets(gpu, this, fPipelineLayout, fStartDS,
fDSCount, |
348 &fDescriptorSets[fStartDS], 0, nullptr
); | 365 &fDescriptorSets[fStartDS], 0, nullptr
); |
349 } | 366 } |
350 } | 367 } |
351 | 368 |
352 void GrVkProgram::addUniformResources(GrVkCommandBuffer& commandBuffer) { | 369 void GrVkPipelineState::addUniformResources(GrVkCommandBuffer& commandBuffer) { |
353 if (fSamplerPoolManager.fPool) { | 370 if (fSamplerPoolManager.fPool) { |
354 commandBuffer.addResource(fSamplerPoolManager.fPool); | 371 commandBuffer.addResource(fSamplerPoolManager.fPool); |
355 } | 372 } |
356 if (fUniformPoolManager.fPool) { | 373 if (fUniformPoolManager.fPool) { |
357 commandBuffer.addResource(fUniformPoolManager.fPool); | 374 commandBuffer.addResource(fUniformPoolManager.fPool); |
358 } | 375 } |
359 | 376 |
360 if (fVertexUniformBuffer.get()) { | 377 if (fVertexUniformBuffer.get()) { |
361 commandBuffer.addResource(fVertexUniformBuffer->resource()); | 378 commandBuffer.addResource(fVertexUniformBuffer->resource()); |
362 } | 379 } |
363 if (fFragmentUniformBuffer.get()) { | 380 if (fFragmentUniformBuffer.get()) { |
364 commandBuffer.addResource(fFragmentUniformBuffer->resource()); | 381 commandBuffer.addResource(fFragmentUniformBuffer->resource()); |
365 } | 382 } |
366 for (int i = 0; i < fSamplers.count(); ++i) { | 383 for (int i = 0; i < fSamplers.count(); ++i) { |
367 commandBuffer.addResource(fSamplers[i]); | 384 commandBuffer.addResource(fSamplers[i]); |
368 } | 385 } |
369 | 386 |
370 for (int i = 0; i < fTextureViews.count(); ++i) { | 387 for (int i = 0; i < fTextureViews.count(); ++i) { |
371 commandBuffer.addResource(fTextureViews[i]); | 388 commandBuffer.addResource(fTextureViews[i]); |
372 } | 389 } |
373 | 390 |
374 for (int i = 0; i < fTextures.count(); ++i) { | 391 for (int i = 0; i < fTextures.count(); ++i) { |
375 commandBuffer.addResource(fTextures[i]); | 392 commandBuffer.addResource(fTextures[i]); |
376 } | 393 } |
377 } | 394 } |
378 | 395 |
379 //////////////////////////////////////////////////////////////////////////////// | 396 //////////////////////////////////////////////////////////////////////////////// |
380 | 397 |
381 void GrVkProgram::DescriptorPoolManager::getNewPool(GrVkGpu* gpu) { | 398 void GrVkPipelineState::DescriptorPoolManager::getNewPool(GrVkGpu* gpu) { |
382 if (fPool) { | 399 if (fPool) { |
383 fPool->unref(gpu); | 400 fPool->unref(gpu); |
384 SkASSERT(fMaxDescriptorSets < (SK_MaxU32 >> 1)); | 401 SkASSERT(fMaxDescriptorSets < (SK_MaxU32 >> 1)); |
385 fMaxDescriptorSets = fMaxDescriptorSets << 1; | 402 fMaxDescriptorSets = fMaxDescriptorSets << 1; |
386 | 403 |
387 } | 404 } |
388 if (fMaxDescriptorSets) { | 405 if (fMaxDescriptorSets) { |
389 fPool = gpu->resourceProvider().findOrCreateCompatibleDescriptorPool(fDe
scType, | 406 fPool = gpu->resourceProvider().findOrCreateCompatibleDescriptorPool(fDe
scType, |
390 fMa
xDescriptorSets); | 407 fMa
xDescriptorSets); |
391 } | 408 } |
392 SkASSERT(fPool || !fMaxDescriptorSets); | 409 SkASSERT(fPool || !fMaxDescriptorSets); |
393 } | 410 } |
394 | 411 |
395 void GrVkProgram::DescriptorPoolManager::getNewDescriptorSet(GrVkGpu* gpu, VkDes
criptorSet* ds) { | 412 void GrVkPipelineState::DescriptorPoolManager::getNewDescriptorSet(GrVkGpu* gpu,
VkDescriptorSet* ds) { |
396 if (!fMaxDescriptorSets) { | 413 if (!fMaxDescriptorSets) { |
397 return; | 414 return; |
398 } | 415 } |
399 if (fCurrentDescriptorSet == fMaxDescriptorSets) { | 416 if (fCurrentDescriptorSet == fMaxDescriptorSets) { |
400 this->getNewPool(gpu); | 417 this->getNewPool(gpu); |
401 fCurrentDescriptorSet = 0; | 418 fCurrentDescriptorSet = 0; |
402 } | 419 } |
403 fCurrentDescriptorSet++; | 420 fCurrentDescriptorSet++; |
404 | 421 |
405 VkDescriptorSetAllocateInfo dsAllocateInfo; | 422 VkDescriptorSetAllocateInfo dsAllocateInfo; |
406 memset(&dsAllocateInfo, 0, sizeof(VkDescriptorSetAllocateInfo)); | 423 memset(&dsAllocateInfo, 0, sizeof(VkDescriptorSetAllocateInfo)); |
407 dsAllocateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; | 424 dsAllocateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; |
408 dsAllocateInfo.pNext = nullptr; | 425 dsAllocateInfo.pNext = nullptr; |
409 dsAllocateInfo.descriptorPool = fPool->descPool(); | 426 dsAllocateInfo.descriptorPool = fPool->descPool(); |
410 dsAllocateInfo.descriptorSetCount = 1; | 427 dsAllocateInfo.descriptorSetCount = 1; |
411 dsAllocateInfo.pSetLayouts = &fDescLayout; | 428 dsAllocateInfo.pSetLayouts = &fDescLayout; |
412 | |
413 GR_VK_CALL_ERRCHECK(gpu->vkInterface(), AllocateDescriptorSets(gpu->device()
, | 429 GR_VK_CALL_ERRCHECK(gpu->vkInterface(), AllocateDescriptorSets(gpu->device()
, |
414 &dsAllocateIn
fo, | 430 &dsAllocateIn
fo, |
415 ds)); | 431 ds)); |
416 } | 432 } |
417 | 433 |
418 void GrVkProgram::DescriptorPoolManager::freeGPUResources(const GrVkGpu* gpu) { | 434 void GrVkPipelineState::DescriptorPoolManager::freeGPUResources(const GrVkGpu* g
pu) { |
419 if (fDescLayout) { | 435 if (fDescLayout) { |
420 GR_VK_CALL(gpu->vkInterface(), DestroyDescriptorSetLayout(gpu->device(),
fDescLayout, | 436 GR_VK_CALL(gpu->vkInterface(), DestroyDescriptorSetLayout(gpu->device(),
fDescLayout, |
421 nullptr)); | 437 nullptr)); |
422 fDescLayout = VK_NULL_HANDLE; | 438 fDescLayout = VK_NULL_HANDLE; |
423 } | 439 } |
424 | 440 |
425 if (fPool) { | 441 if (fPool) { |
426 fPool->unref(gpu); | 442 fPool->unref(gpu); |
427 fPool = nullptr; | 443 fPool = nullptr; |
428 } | 444 } |
429 } | 445 } |
430 | 446 |
431 void GrVkProgram::DescriptorPoolManager::abandonGPUResources() { | 447 void GrVkPipelineState::DescriptorPoolManager::abandonGPUResources() { |
432 fDescLayout = VK_NULL_HANDLE; | 448 fDescLayout = VK_NULL_HANDLE; |
433 if (fPool) { | 449 if (fPool) { |
434 fPool->unrefAndAbandon(); | 450 fPool->unrefAndAbandon(); |
435 fPool = nullptr; | 451 fPool = nullptr; |
436 } | 452 } |
437 } | 453 } |
| 454 |
| 455 uint32_t get_blend_info_key(const GrPipeline& pipeline) { |
| 456 GrXferProcessor::BlendInfo blendInfo; |
| 457 pipeline.getXferProcessor().getBlendInfo(&blendInfo); |
| 458 |
| 459 static const uint32_t kBlendWriteShift = 1; |
| 460 static const uint32_t kBlendCoeffShift = 5; |
| 461 GR_STATIC_ASSERT(kLast_GrBlendCoeff < (1 << kBlendCoeffShift)); |
| 462 GR_STATIC_ASSERT(kFirstAdvancedGrBlendEquation - 1 < 4); |
| 463 |
| 464 uint32_t key = blendInfo.fWriteColor; |
| 465 key |= (blendInfo.fSrcBlend << kBlendWriteShift); |
| 466 key |= (blendInfo.fDstBlend << (kBlendWriteShift + kBlendCoeffShift)); |
| 467 key |= (blendInfo.fEquation << (kBlendWriteShift + 2 * kBlendCoeffShift)); |
| 468 |
| 469 return key; |
| 470 } |
| 471 |
| 472 void GrVkPipelineState::BuildStateKey(const GrPipeline& pipeline, GrPrimitiveTyp
e primitiveType, |
| 473 SkTArray<uint8_t, true>* key) { |
| 474 // Save room for the key length and key header |
| 475 key->reset(); |
| 476 key->push_back_n(kData_StateKeyOffset); |
| 477 |
| 478 GrProcessorKeyBuilder b(key); |
| 479 |
| 480 GrVkRenderTarget* vkRT = (GrVkRenderTarget*)pipeline.getRenderTarget(); |
| 481 vkRT->simpleRenderPass()->genKey(&b); |
| 482 |
| 483 pipeline.getStencil().genKey(&b); |
| 484 |
| 485 SkASSERT(sizeof(GrPipelineBuilder::DrawFace) <= sizeof(uint32_t)); |
| 486 b.add32(pipeline.getDrawFace()); |
| 487 |
| 488 b.add32(get_blend_info_key(pipeline)); |
| 489 |
| 490 b.add32(primitiveType); |
| 491 |
| 492 // Set key length |
| 493 int keyLength = key->count(); |
| 494 SkASSERT(0 == (keyLength % 4)); |
| 495 *reinterpret_cast<uint32_t*>(key->begin()) = SkToU32(keyLength); |
| 496 } |
OLD | NEW |