| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2016 Google Inc. | 2 * Copyright 2016 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #include "GrVkUniformHandler.h" | 8 #include "GrVkUniformHandler.h" |
| 9 #include "glsl/GrGLSLProgramBuilder.h" | 9 #include "glsl/GrGLSLProgramBuilder.h" |
| 10 | 10 |
| 11 // To determine whether a current offset is aligned, we can just 'and' the lowes
t bits with the | 11 // To determine whether a current offset is aligned, we can just 'and' the lowes
t bits with the |
| 12 // alignment mask. A value of 0 means aligned, any other value is how many bytes
past alignment we | 12 // alignment mask. A value of 0 means aligned, any other value is how many bytes
past alignment we |
| 13 // are. This works since all alignments are powers of 2. The mask is always (ali
gnment - 1). | 13 // are. This works since all alignments are powers of 2. The mask is always (ali
gnment - 1). |
| 14 // This alignment mask will give correct alignments for using the std430 block l
ayout. If you want | 14 // This alignment mask will give correct alignments for using the std430 block l
ayout. If you want |
| 15 // the std140 alignment, you can use this, but then make sure if you have an arr
ay type it is | 15 // the std140 alignment, you can use this, but then make sure if you have an arr
ay type it is |
| 16 // aligned to 16 bytes (i.e. has mask of 0xF). | 16 // aligned to 16 bytes (i.e. has mask of 0xF). |
| 17 uint32_t grsltype_to_alignment_mask(GrSLType type) { | 17 uint32_t grsltype_to_alignment_mask(GrSLType type) { |
| 18 SkASSERT(GrSLTypeIsFloatType(type)); | 18 SkASSERT(GrSLTypeIsFloatType(type)); |
| 19 static const uint32_t kAlignments[kGrSLTypeCount] = { | 19 static const uint32_t kAlignments[kGrSLTypeCount] = { |
| 20 0x0, // kVoid_GrSLType, should never return this | 20 0x0, // kVoid_GrSLType, should never return this |
| 21 0x3, // kFloat_GrSLType | 21 0x3, // kFloat_GrSLType |
| 22 0x7, // kVec2f_GrSLType | 22 0x7, // kVec2f_GrSLType |
| 23 0xF, // kVec3f_GrSLType | 23 0xF, // kVec3f_GrSLType |
| 24 0xF, // kVec4f_GrSLType | 24 0xF, // kVec4f_GrSLType |
| 25 0x7, // kMat22f_GrSLType |
| 25 0xF, // kMat33f_GrSLType | 26 0xF, // kMat33f_GrSLType |
| 26 0xF, // kMat44f_GrSLType | 27 0xF, // kMat44f_GrSLType |
| 27 0x0, // Sampler2D_GrSLType, should never return this | 28 0x0, // Sampler2D_GrSLType, should never return this |
| 28 0x0, // SamplerExternal_GrSLType, should never return this | 29 0x0, // SamplerExternal_GrSLType, should never return this |
| 29 }; | 30 }; |
| 30 GR_STATIC_ASSERT(0 == kVoid_GrSLType); | 31 GR_STATIC_ASSERT(0 == kVoid_GrSLType); |
| 31 GR_STATIC_ASSERT(1 == kFloat_GrSLType); | 32 GR_STATIC_ASSERT(1 == kFloat_GrSLType); |
| 32 GR_STATIC_ASSERT(2 == kVec2f_GrSLType); | 33 GR_STATIC_ASSERT(2 == kVec2f_GrSLType); |
| 33 GR_STATIC_ASSERT(3 == kVec3f_GrSLType); | 34 GR_STATIC_ASSERT(3 == kVec3f_GrSLType); |
| 34 GR_STATIC_ASSERT(4 == kVec4f_GrSLType); | 35 GR_STATIC_ASSERT(4 == kVec4f_GrSLType); |
| 35 GR_STATIC_ASSERT(5 == kMat33f_GrSLType); | 36 GR_STATIC_ASSERT(5 == kMat22f_GrSLType); |
| 36 GR_STATIC_ASSERT(6 == kMat44f_GrSLType); | 37 GR_STATIC_ASSERT(6 == kMat33f_GrSLType); |
| 37 GR_STATIC_ASSERT(7 == kSampler2D_GrSLType); | 38 GR_STATIC_ASSERT(7 == kMat44f_GrSLType); |
| 38 GR_STATIC_ASSERT(8 == kSamplerExternal_GrSLType); | 39 GR_STATIC_ASSERT(8 == kSampler2D_GrSLType); |
| 40 GR_STATIC_ASSERT(9 == kSamplerExternal_GrSLType); |
| 39 GR_STATIC_ASSERT(SK_ARRAY_COUNT(kAlignments) == kGrSLTypeCount); | 41 GR_STATIC_ASSERT(SK_ARRAY_COUNT(kAlignments) == kGrSLTypeCount); |
| 40 return kAlignments[type]; | 42 return kAlignments[type]; |
| 41 } | 43 } |
| 42 | 44 |
| 43 /** Returns the size in bytes taken up in vulkanbuffers for floating point GrSLT
ypes. | 45 /** Returns the size in bytes taken up in vulkanbuffers for floating point GrSLT
ypes. |
| 44 For non floating point type returns 0 */ | 46 For non floating point type returns 0 */ |
| 45 static inline uint32_t grsltype_to_vk_size(GrSLType type) { | 47 static inline uint32_t grsltype_to_vk_size(GrSLType type) { |
| 46 SkASSERT(GrSLTypeIsFloatType(type)); | 48 SkASSERT(GrSLTypeIsFloatType(type)); |
| 49 SkASSERT(kMat22f_GrSLType != type); // TODO: handle mat2 differences between
std140 and std430. |
| 47 static const uint32_t kSizes[] = { | 50 static const uint32_t kSizes[] = { |
| 48 0, // kVoid_GrSLType | 51 0, // kVoid_GrSLType |
| 49 sizeof(float), // kFloat_GrSLType | 52 sizeof(float), // kFloat_GrSLType |
| 50 2 * sizeof(float), // kVec2f_GrSLType | 53 2 * sizeof(float), // kVec2f_GrSLType |
| 51 3 * sizeof(float), // kVec3f_GrSLType | 54 3 * sizeof(float), // kVec3f_GrSLType |
| 52 4 * sizeof(float), // kVec4f_GrSLType | 55 4 * sizeof(float), // kVec4f_GrSLType |
| 56 8 * sizeof(float), // kMat22f_GrSLType. TODO: this will be 4 * sz
of(float) on std430. |
| 53 12 * sizeof(float), // kMat33f_GrSLType | 57 12 * sizeof(float), // kMat33f_GrSLType |
| 54 16 * sizeof(float), // kMat44f_GrSLType | 58 16 * sizeof(float), // kMat44f_GrSLType |
| 55 0, // kSampler2D_GrSLType | 59 0, // kSampler2D_GrSLType |
| 56 0, // kSamplerExternal_GrSLType | 60 0, // kSamplerExternal_GrSLType |
| 57 0, // kSampler2DRect_GrSLType | 61 0, // kSampler2DRect_GrSLType |
| 58 0, // kBool_GrSLType | 62 0, // kBool_GrSLType |
| 59 0, // kInt_GrSLType | 63 0, // kInt_GrSLType |
| 60 0, // kUint_GrSLType | 64 0, // kUint_GrSLType |
| 61 }; | 65 }; |
| 62 return kSizes[type]; | 66 return kSizes[type]; |
| 63 | 67 |
| 64 GR_STATIC_ASSERT(0 == kVoid_GrSLType); | 68 GR_STATIC_ASSERT(0 == kVoid_GrSLType); |
| 65 GR_STATIC_ASSERT(1 == kFloat_GrSLType); | 69 GR_STATIC_ASSERT(1 == kFloat_GrSLType); |
| 66 GR_STATIC_ASSERT(2 == kVec2f_GrSLType); | 70 GR_STATIC_ASSERT(2 == kVec2f_GrSLType); |
| 67 GR_STATIC_ASSERT(3 == kVec3f_GrSLType); | 71 GR_STATIC_ASSERT(3 == kVec3f_GrSLType); |
| 68 GR_STATIC_ASSERT(4 == kVec4f_GrSLType); | 72 GR_STATIC_ASSERT(4 == kVec4f_GrSLType); |
| 69 GR_STATIC_ASSERT(5 == kMat33f_GrSLType); | 73 GR_STATIC_ASSERT(5 == kMat22f_GrSLType); |
| 70 GR_STATIC_ASSERT(6 == kMat44f_GrSLType); | 74 GR_STATIC_ASSERT(6 == kMat33f_GrSLType); |
| 71 GR_STATIC_ASSERT(7 == kSampler2D_GrSLType); | 75 GR_STATIC_ASSERT(7 == kMat44f_GrSLType); |
| 72 GR_STATIC_ASSERT(8 == kSamplerExternal_GrSLType); | 76 GR_STATIC_ASSERT(8 == kSampler2D_GrSLType); |
| 73 GR_STATIC_ASSERT(9 == kSampler2DRect_GrSLType); | 77 GR_STATIC_ASSERT(9 == kSamplerExternal_GrSLType); |
| 74 GR_STATIC_ASSERT(10 == kBool_GrSLType); | 78 GR_STATIC_ASSERT(10 == kSampler2DRect_GrSLType); |
| 75 GR_STATIC_ASSERT(11 == kInt_GrSLType); | 79 GR_STATIC_ASSERT(11 == kBool_GrSLType); |
| 76 GR_STATIC_ASSERT(12 == kUint_GrSLType); | 80 GR_STATIC_ASSERT(12 == kInt_GrSLType); |
| 77 GR_STATIC_ASSERT(13 == kGrSLTypeCount); | 81 GR_STATIC_ASSERT(13 == kUint_GrSLType); |
| 82 GR_STATIC_ASSERT(SK_ARRAY_COUNT(kSizes) == kGrSLTypeCount); |
| 78 } | 83 } |
| 79 | 84 |
| 80 | 85 |
| 81 // Given the current offset into the ubo, calculate the offset for the uniform w
e're trying to add | 86 // Given the current offset into the ubo, calculate the offset for the uniform w
e're trying to add |
| 82 // taking into consideration all alignment requirements. The uniformOffset is se
t to the offset for | 87 // taking into consideration all alignment requirements. The uniformOffset is se
t to the offset for |
| 83 // the new uniform, and currentOffset is updated to be the offset to the end of
the new uniform. | 88 // the new uniform, and currentOffset is updated to be the offset to the end of
the new uniform. |
| 84 void get_ubo_aligned_offset(uint32_t* uniformOffset, | 89 void get_ubo_aligned_offset(uint32_t* uniformOffset, |
| 85 uint32_t* currentOffset, | 90 uint32_t* currentOffset, |
| 86 GrSLType type, | 91 GrSLType type, |
| 87 int arrayCount) { | 92 int arrayCount) { |
| 88 uint32_t alignmentMask = grsltype_to_alignment_mask(type); | 93 uint32_t alignmentMask = grsltype_to_alignment_mask(type); |
| 89 // We want to use the std140 layout here, so we must make arrays align to 16
bytes. | 94 // We want to use the std140 layout here, so we must make arrays align to 16
bytes. |
| 95 SkASSERT(type != kMat22f_GrSLType); // TODO: support mat2. |
| 90 if (arrayCount) { | 96 if (arrayCount) { |
| 91 alignmentMask = 0xF; | 97 alignmentMask = 0xF; |
| 92 } | 98 } |
| 93 uint32_t offsetDiff = *currentOffset & alignmentMask; | 99 uint32_t offsetDiff = *currentOffset & alignmentMask; |
| 94 if (offsetDiff != 0) { | 100 if (offsetDiff != 0) { |
| 95 offsetDiff = alignmentMask - offsetDiff + 1; | 101 offsetDiff = alignmentMask - offsetDiff + 1; |
| 96 } | 102 } |
| 97 *uniformOffset = *currentOffset + offsetDiff; | 103 *uniformOffset = *currentOffset + offsetDiff; |
| 98 SkASSERT(sizeof(float) == 4); | 104 SkASSERT(sizeof(float) == 4); |
| 99 if (arrayCount) { | 105 if (arrayCount) { |
| (...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 189 } | 195 } |
| 190 } | 196 } |
| 191 if (!uniformsString.isEmpty()) { | 197 if (!uniformsString.isEmpty()) { |
| 192 const char* stage = (visibility == kVertex_GrShaderFlag) ? "vertex" : "f
ragment"; | 198 const char* stage = (visibility == kVertex_GrShaderFlag) ? "vertex" : "f
ragment"; |
| 193 out->appendf("layout (set=%d, binding=%d) uniform %sUniformBuffer\n{\n", | 199 out->appendf("layout (set=%d, binding=%d) uniform %sUniformBuffer\n{\n", |
| 194 kUniformBufferDescSet, uniformBinding, stage); | 200 kUniformBufferDescSet, uniformBinding, stage); |
| 195 out->appendf("%s\n};\n", uniformsString.c_str()); | 201 out->appendf("%s\n};\n", uniformsString.c_str()); |
| 196 } | 202 } |
| 197 } | 203 } |
| 198 | 204 |
| OLD | NEW |