1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2015 The Khronos Group Inc.
6 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
7 * Copyright (c) 2016 The Android Open Source Project
8 *
9 * Licensed under the Apache License, Version 2.0 (the "License");
10 * you may not use this file except in compliance with the License.
11 * You may obtain a copy of the License at
12 *
13 * http://www.apache.org/licenses/LICENSE-2.0
14 *
15 * Unless required by applicable law or agreed to in writing, software
16 * distributed under the License is distributed on an "AS IS" BASIS,
17 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18 * See the License for the specific language governing permissions and
19 * limitations under the License.
20 *
21 *//*!
22 * \file
23 * \brief Random uniform block layout case.
24 *//*--------------------------------------------------------------------*/
25
26 #include "vktRandomUniformBlockCase.hpp"
27 #include "deRandom.hpp"
28
29 namespace vkt
30 {
31 namespace ubo
32 {
33
34 namespace
35 {
36
genName(char first,char last,int ndx)37 static std::string genName(char first, char last, int ndx)
38 {
39 std::string str = "";
40 int alphabetLen = last - first + 1;
41
42 while (ndx > alphabetLen)
43 {
44 str.insert(str.begin(), (char)(first + ((ndx - 1) % alphabetLen)));
45 ndx = (ndx - 1) / alphabetLen;
46 }
47
48 str.insert(str.begin(), (char)(first + (ndx % (alphabetLen + 1)) - 1));
49
50 return str;
51 }
52
53 } // namespace
54
RandomUniformBlockCase(tcu::TestContext & testCtx,const std::string & name,BufferMode bufferMode,uint32_t features,uint32_t seed)55 RandomUniformBlockCase::RandomUniformBlockCase(tcu::TestContext &testCtx, const std::string &name,
56 BufferMode bufferMode, uint32_t features, uint32_t seed)
57 : UniformBlockCase(testCtx, name, bufferMode, LOAD_FULL_MATRIX, (features & FEATURE_OUT_OF_ORDER_OFFSETS) != 0u)
58 , m_features(features)
59 , m_maxVertexBlocks((features & FEATURE_VERTEX_BLOCKS) ? 4 : 0)
60 , m_maxFragmentBlocks((features & FEATURE_FRAGMENT_BLOCKS) ? 4 : 0)
61 , m_maxSharedBlocks((features & FEATURE_SHARED_BLOCKS) ? 4 : 0)
62 , m_maxInstances((features & FEATURE_INSTANCE_ARRAYS) ? 3 : 0)
63 , m_maxArrayLength((features & FEATURE_ARRAYS) ? 8 : 0)
64 , m_maxStructDepth((features & FEATURE_STRUCTS) ? 2 : 0)
65 , m_maxBlockMembers(5)
66 , m_maxStructMembers(4)
67 , m_seed(seed)
68 , m_blockNdx(1)
69 , m_uniformNdx(1)
70 , m_structNdx(1)
71 , m_availableDescriptorUniformBuffers(12)
72 {
73 de::Random rnd(m_seed);
74
75 int numShared = m_maxSharedBlocks > 0 ? rnd.getInt(1, m_maxSharedBlocks) : 0;
76 int numVtxBlocks = m_maxVertexBlocks - numShared > 0 ? rnd.getInt(1, m_maxVertexBlocks - numShared) : 0;
77 int numFragBlocks = m_maxFragmentBlocks - numShared > 0 ? rnd.getInt(1, m_maxFragmentBlocks - numShared) : 0;
78
79 // calculate how many additional descriptors we can use for arrays
80 // this is needed for descriptor_indexing testing as we need to take in to account
81 // maxPerStageDescriptorUniformBuffers limit and we can't query it as we need to
82 // generate shaders before Context is created; minimal value of this limit is 12
83 m_availableDescriptorUniformBuffers -= numVtxBlocks + numFragBlocks;
84
85 for (int ndx = 0; ndx < numShared; ndx++)
86 generateBlock(rnd, DECLARE_VERTEX | DECLARE_FRAGMENT);
87
88 for (int ndx = 0; ndx < numVtxBlocks; ndx++)
89 generateBlock(rnd, DECLARE_VERTEX);
90
91 for (int ndx = 0; ndx < numFragBlocks; ndx++)
92 generateBlock(rnd, DECLARE_FRAGMENT);
93
94 init();
95 }
96
generateBlock(de::Random & rnd,uint32_t layoutFlags)97 void RandomUniformBlockCase::generateBlock(de::Random &rnd, uint32_t layoutFlags)
98 {
99 DE_ASSERT(m_blockNdx <= 'z' - 'a');
100
101 const float instanceArrayWeight = 0.3f;
102 UniformBlock &block = m_interface.allocBlock(std::string("Block") + (char)('A' + m_blockNdx));
103 int numInstances = (m_maxInstances > 0 && rnd.getFloat() < instanceArrayWeight) ? rnd.getInt(0, m_maxInstances) : 0;
104 int numUniforms = rnd.getInt(1, m_maxBlockMembers);
105
106 if (m_features & FEATURE_DESCRIPTOR_INDEXING)
107 {
108 // generate arrays only when we are within the limit
109 if (m_availableDescriptorUniformBuffers > 3)
110 numInstances = rnd.getInt(2, 4);
111 else if (m_availableDescriptorUniformBuffers > 1)
112 numInstances = m_availableDescriptorUniformBuffers;
113 else
114 numInstances = 0;
115 m_availableDescriptorUniformBuffers -= numInstances;
116 }
117
118 if (numInstances > 0)
119 block.setArraySize(numInstances);
120
121 if (numInstances > 0 || rnd.getBool())
122 block.setInstanceName(std::string("block") + (char)('A' + m_blockNdx));
123
124 // Layout flag candidates.
125 std::vector<uint32_t> layoutFlagCandidates;
126 layoutFlagCandidates.push_back(0);
127
128 if (m_features & FEATURE_STD140_LAYOUT)
129 layoutFlagCandidates.push_back(LAYOUT_STD140);
130
131 if (m_features & FEATURE_STD430_LAYOUT)
132 layoutFlagCandidates.push_back(LAYOUT_STD430);
133
134 if (m_features & FEATURE_SCALAR_LAYOUT)
135 layoutFlagCandidates.push_back(LAYOUT_SCALAR);
136
137 if (m_features & FEATURE_16BIT_STORAGE)
138 layoutFlags |= LAYOUT_16BIT_STORAGE;
139
140 if (m_features & FEATURE_8BIT_STORAGE)
141 layoutFlags |= LAYOUT_8BIT_STORAGE;
142
143 if (m_features & FEATURE_DESCRIPTOR_INDEXING)
144 layoutFlags |= LAYOUT_DESCRIPTOR_INDEXING;
145
146 layoutFlags |= rnd.choose<uint32_t>(layoutFlagCandidates.begin(), layoutFlagCandidates.end());
147
148 if (m_features & FEATURE_MATRIX_LAYOUT)
149 {
150 static const uint32_t matrixCandidates[] = {0, LAYOUT_ROW_MAJOR, LAYOUT_COLUMN_MAJOR};
151 layoutFlags |=
152 rnd.choose<uint32_t>(&matrixCandidates[0], &matrixCandidates[DE_LENGTH_OF_ARRAY(matrixCandidates)]);
153 }
154
155 block.setFlags(layoutFlags);
156
157 for (int ndx = 0; ndx < numUniforms; ndx++)
158 generateUniform(rnd, block, numInstances ? numInstances : 1);
159
160 m_blockNdx += 1;
161 }
162
generateUniform(de::Random & rnd,UniformBlock & block,uint32_t complexity)163 void RandomUniformBlockCase::generateUniform(de::Random &rnd, UniformBlock &block, uint32_t complexity)
164 {
165 const float unusedVtxWeight = 0.15f;
166 const float unusedFragWeight = 0.15f;
167 bool unusedOk = (m_features & FEATURE_UNUSED_UNIFORMS) != 0;
168 uint32_t flags = 0;
169 std::string name = genName('a', 'z', m_uniformNdx);
170 VarType type = generateType(rnd, 0, true, complexity);
171
172 flags |= (unusedOk && rnd.getFloat() < unusedVtxWeight) ? UNUSED_VERTEX : 0;
173 flags |= (unusedOk && rnd.getFloat() < unusedFragWeight) ? UNUSED_FRAGMENT : 0;
174
175 block.addUniform(Uniform(name, type, flags));
176
177 m_uniformNdx += 1;
178 }
179
generateType(de::Random & rnd,int typeDepth,bool arrayOk,uint32_t complexity)180 VarType RandomUniformBlockCase::generateType(de::Random &rnd, int typeDepth, bool arrayOk, uint32_t complexity)
181 {
182 const float structWeight = 0.1f;
183 const float arrayWeight = 0.1f;
184
185 if (typeDepth < m_maxStructDepth && rnd.getFloat() < structWeight)
186 {
187 const float unusedVtxWeight = 0.15f;
188 const float unusedFragWeight = 0.15f;
189 bool unusedOk = (m_features & FEATURE_UNUSED_MEMBERS) != 0;
190 std::vector<VarType> memberTypes;
191 int numMembers = rnd.getInt(1, m_maxStructMembers);
192
193 // Generate members first so nested struct declarations are in correct order.
194 for (int ndx = 0; ndx < numMembers; ndx++)
195 memberTypes.push_back(generateType(rnd, typeDepth + 1, true, complexity));
196
197 StructType &structType = m_interface.allocStruct(std::string("s") + genName('A', 'Z', m_structNdx));
198 m_structNdx += 1;
199
200 DE_ASSERT(numMembers <= 'Z' - 'A');
201 for (int ndx = 0; ndx < numMembers; ndx++)
202 {
203 uint32_t flags = 0;
204
205 flags |= (unusedOk && rnd.getFloat() < unusedVtxWeight) ? UNUSED_VERTEX : 0;
206 flags |= (unusedOk && rnd.getFloat() < unusedFragWeight) ? UNUSED_FRAGMENT : 0;
207
208 structType.addMember(std::string("m") + (char)('A' + ndx), memberTypes[ndx], flags);
209 }
210
211 return VarType(&structType, m_shuffleUniformMembers ? static_cast<uint32_t>(LAYOUT_OFFSET) : 0u);
212 }
213 else if (m_maxArrayLength > 0 && arrayOk && rnd.getFloat() < arrayWeight)
214 {
215 const bool arraysOfArraysOk = (m_features & FEATURE_ARRAYS_OF_ARRAYS) != 0;
216 int arrayLength = rnd.getInt(1, m_maxArrayLength);
217
218 if (complexity * arrayLength >= 70)
219 {
220 // Trim overly complicated cases (affects 18 cases out of 1576)
221 arrayLength = 1;
222 }
223
224 VarType elementType = generateType(rnd, typeDepth, arraysOfArraysOk, complexity * arrayLength);
225 return VarType(elementType, arrayLength);
226 }
227 else
228 {
229 std::vector<glu::DataType> typeCandidates;
230
231 typeCandidates.push_back(glu::TYPE_FLOAT);
232 typeCandidates.push_back(glu::TYPE_INT);
233 typeCandidates.push_back(glu::TYPE_UINT);
234 typeCandidates.push_back(glu::TYPE_BOOL);
235
236 if (m_features & FEATURE_16BIT_STORAGE)
237 {
238 typeCandidates.push_back(glu::TYPE_UINT16);
239 typeCandidates.push_back(glu::TYPE_INT16);
240 typeCandidates.push_back(glu::TYPE_FLOAT16);
241 }
242
243 if (m_features & FEATURE_8BIT_STORAGE)
244 {
245 typeCandidates.push_back(glu::TYPE_UINT8);
246 typeCandidates.push_back(glu::TYPE_INT8);
247 }
248
249 if (m_features & FEATURE_VECTORS)
250 {
251 typeCandidates.push_back(glu::TYPE_FLOAT_VEC2);
252 typeCandidates.push_back(glu::TYPE_FLOAT_VEC3);
253 typeCandidates.push_back(glu::TYPE_FLOAT_VEC4);
254 typeCandidates.push_back(glu::TYPE_INT_VEC2);
255 typeCandidates.push_back(glu::TYPE_INT_VEC3);
256 typeCandidates.push_back(glu::TYPE_INT_VEC4);
257 typeCandidates.push_back(glu::TYPE_UINT_VEC2);
258 typeCandidates.push_back(glu::TYPE_UINT_VEC3);
259 typeCandidates.push_back(glu::TYPE_UINT_VEC4);
260 typeCandidates.push_back(glu::TYPE_BOOL_VEC2);
261 typeCandidates.push_back(glu::TYPE_BOOL_VEC3);
262 typeCandidates.push_back(glu::TYPE_BOOL_VEC4);
263 if (m_features & FEATURE_16BIT_STORAGE)
264 {
265 typeCandidates.push_back(glu::TYPE_FLOAT16_VEC2);
266 typeCandidates.push_back(glu::TYPE_FLOAT16_VEC3);
267 typeCandidates.push_back(glu::TYPE_FLOAT16_VEC4);
268 typeCandidates.push_back(glu::TYPE_INT16_VEC2);
269 typeCandidates.push_back(glu::TYPE_INT16_VEC3);
270 typeCandidates.push_back(glu::TYPE_INT16_VEC4);
271 typeCandidates.push_back(glu::TYPE_UINT16_VEC2);
272 typeCandidates.push_back(glu::TYPE_UINT16_VEC3);
273 typeCandidates.push_back(glu::TYPE_UINT16_VEC4);
274 }
275 if (m_features & FEATURE_8BIT_STORAGE)
276 {
277 typeCandidates.push_back(glu::TYPE_INT8_VEC2);
278 typeCandidates.push_back(glu::TYPE_INT8_VEC3);
279 typeCandidates.push_back(glu::TYPE_INT8_VEC4);
280 typeCandidates.push_back(glu::TYPE_UINT8_VEC2);
281 typeCandidates.push_back(glu::TYPE_UINT8_VEC3);
282 typeCandidates.push_back(glu::TYPE_UINT8_VEC4);
283 }
284 }
285
286 if (m_features & FEATURE_MATRICES)
287 {
288 typeCandidates.push_back(glu::TYPE_FLOAT_MAT2);
289 typeCandidates.push_back(glu::TYPE_FLOAT_MAT2X3);
290 typeCandidates.push_back(glu::TYPE_FLOAT_MAT3X2);
291 typeCandidates.push_back(glu::TYPE_FLOAT_MAT3);
292 typeCandidates.push_back(glu::TYPE_FLOAT_MAT3X4);
293 typeCandidates.push_back(glu::TYPE_FLOAT_MAT4X2);
294 typeCandidates.push_back(glu::TYPE_FLOAT_MAT4X3);
295 typeCandidates.push_back(glu::TYPE_FLOAT_MAT4);
296 }
297
298 glu::DataType type = rnd.choose<glu::DataType>(typeCandidates.begin(), typeCandidates.end());
299 uint32_t flags = (m_shuffleUniformMembers ? static_cast<uint32_t>(LAYOUT_OFFSET) : 0u);
300
301 if (glu::dataTypeSupportsPrecisionModifier(type))
302 {
303 // Precision.
304 static const uint32_t precisionCandidates[] = {PRECISION_LOW, PRECISION_MEDIUM, PRECISION_HIGH};
305 flags |= rnd.choose<uint32_t>(&precisionCandidates[0],
306 &precisionCandidates[DE_LENGTH_OF_ARRAY(precisionCandidates)]);
307 }
308
309 return VarType(type, flags);
310 }
311 }
312
313 } // namespace ubo
314 } // namespace vkt
315