OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2016 Google Inc. | 2 * Copyright 2016 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #include "GrVkPipelineStateDataManager.h" | 8 #include "GrVkPipelineStateDataManager.h" |
9 | 9 |
10 #include "GrVkGpu.h" | 10 #include "GrVkGpu.h" |
(...skipping 235 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
246 }; | 246 }; |
247 | 247 |
248 template<> struct set_uniform_matrix<4> { | 248 template<> struct set_uniform_matrix<4> { |
249 inline static void set(void* buffer, int uniformOffset, int count, const flo
at matrices[]) { | 249 inline static void set(void* buffer, int uniformOffset, int count, const flo
at matrices[]) { |
250 GR_STATIC_ASSERT(sizeof(float) == 4); | 250 GR_STATIC_ASSERT(sizeof(float) == 4); |
251 buffer = static_cast<char*>(buffer) + uniformOffset; | 251 buffer = static_cast<char*>(buffer) + uniformOffset; |
252 memcpy(buffer, matrices, count * 16 * sizeof(float)); | 252 memcpy(buffer, matrices, count * 16 * sizeof(float)); |
253 } | 253 } |
254 }; | 254 }; |
255 | 255 |
256 void GrVkPipelineStateDataManager::uploadUniformBuffers(const GrVkGpu* gpu, | 256 bool GrVkPipelineStateDataManager::uploadUniformBuffers(const GrVkGpu* gpu, |
257 GrVkUniformBuffer* verte
xBuffer, | 257 GrVkUniformBuffer* verte
xBuffer, |
258 GrVkUniformBuffer* fragm
entBuffer) const { | 258 GrVkUniformBuffer* fragm
entBuffer) const { |
| 259 bool updatedBuffer = false; |
259 if (vertexBuffer && fVertexUniformsDirty) { | 260 if (vertexBuffer && fVertexUniformsDirty) { |
260 vertexBuffer->addMemoryBarrier(gpu, | 261 vertexBuffer->addMemoryBarrier(gpu, |
261 VK_ACCESS_UNIFORM_READ_BIT, | 262 VK_ACCESS_UNIFORM_READ_BIT, |
262 VK_ACCESS_HOST_WRITE_BIT, | 263 VK_ACCESS_HOST_WRITE_BIT, |
263 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, | 264 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, |
264 VK_PIPELINE_STAGE_HOST_BIT, | 265 VK_PIPELINE_STAGE_HOST_BIT, |
265 false); | 266 false); |
266 SkAssertResult(vertexBuffer->updateData(gpu, fVertexUniformData.get(), f
VertexUniformSize)); | 267 SkAssertResult(vertexBuffer->updateData(gpu, fVertexUniformData.get(), f
VertexUniformSize, |
| 268 &updatedBuffer)); |
267 fVertexUniformsDirty = false; | 269 fVertexUniformsDirty = false; |
268 } | 270 } |
269 | 271 |
270 if (fragmentBuffer && fFragmentUniformsDirty) { | 272 if (fragmentBuffer && fFragmentUniformsDirty) { |
271 fragmentBuffer->addMemoryBarrier(gpu, | 273 fragmentBuffer->addMemoryBarrier(gpu, |
272 VK_ACCESS_UNIFORM_READ_BIT, | 274 VK_ACCESS_UNIFORM_READ_BIT, |
273 VK_ACCESS_HOST_WRITE_BIT, | 275 VK_ACCESS_HOST_WRITE_BIT, |
274 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, | 276 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, |
275 VK_PIPELINE_STAGE_HOST_BIT, | 277 VK_PIPELINE_STAGE_HOST_BIT, |
276 false); | 278 false); |
277 SkAssertResult(fragmentBuffer->updateData(gpu, fFragmentUniformData.get(
), | 279 SkAssertResult(fragmentBuffer->updateData(gpu, fFragmentUniformData.get(
), |
278 fFragmentUniformSize)); | 280 fFragmentUniformSize, &updated
Buffer)); |
279 fFragmentUniformsDirty = false; | 281 fFragmentUniformsDirty = false; |
280 } | 282 } |
| 283 return updatedBuffer; |
281 } | 284 } |
OLD | NEW |