1 /*-------------------------------------------------------------------------
2 * drawElements Quality Program OpenGL ES 3.1 Module
3 * -------------------------------------------------
4 *
5 * Copyright 2014 The Android Open Source Project
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 *//*!
20 * \file
21 * \brief Opaque type (sampler, buffer, atomic counter, ...) indexing tests.
22 *
23 * \todo [2014-03-05 pyry] Extend with following:
24 * + sampler: different filtering modes, multiple sizes, incomplete textures
25 * + SSBO: write, atomic op, unsized array .length()
26 *//*--------------------------------------------------------------------*/
27
28 #include "es31fOpaqueTypeIndexingTests.hpp"
29 #include "tcuTexture.hpp"
30 #include "tcuTestLog.hpp"
31 #include "tcuFormatUtil.hpp"
32 #include "tcuVectorUtil.hpp"
33 #include "gluShaderUtil.hpp"
34 #include "gluShaderProgram.hpp"
35 #include "gluObjectWrapper.hpp"
36 #include "gluTextureUtil.hpp"
37 #include "gluRenderContext.hpp"
38 #include "gluProgramInterfaceQuery.hpp"
39 #include "gluContextInfo.hpp"
40 #include "glsShaderExecUtil.hpp"
41 #include "glwFunctions.hpp"
42 #include "glwEnums.hpp"
43 #include "deUniquePtr.hpp"
44 #include "deStringUtil.hpp"
45 #include "deRandom.hpp"
46
47 #include <sstream>
48
49 namespace deqp
50 {
51 namespace gles31
52 {
53 namespace Functional
54 {
55
56 namespace
57 {
58
59 using namespace gls::ShaderExecUtil;
60 using namespace glu;
61 using std::string;
62 using std::vector;
63 using tcu::TestLog;
64 using tcu::TextureFormat;
65
66 typedef de::UniquePtr<ShaderExecutor> ShaderExecutorPtr;
67
68 enum IndexExprType
69 {
70 INDEX_EXPR_TYPE_CONST_LITERAL = 0,
71 INDEX_EXPR_TYPE_CONST_EXPRESSION,
72 INDEX_EXPR_TYPE_UNIFORM,
73 INDEX_EXPR_TYPE_DYNAMIC_UNIFORM,
74
75 INDEX_EXPR_TYPE_LAST
76 };
77
78 enum TextureType
79 {
80 TEXTURE_TYPE_1D = 0,
81 TEXTURE_TYPE_2D,
82 TEXTURE_TYPE_CUBE,
83 TEXTURE_TYPE_2D_ARRAY,
84 TEXTURE_TYPE_3D,
85 TEXTURE_TYPE_CUBE_ARRAY,
86
87 TEXTURE_TYPE_LAST
88 };
89
declareUniformIndexVars(std::ostream & str,const char * varPrefix,int numVars)90 static void declareUniformIndexVars(std::ostream &str, const char *varPrefix, int numVars)
91 {
92 for (int varNdx = 0; varNdx < numVars; varNdx++)
93 str << "uniform highp int " << varPrefix << varNdx << ";\n";
94 }
95
uploadUniformIndices(const glw::Functions & gl,uint32_t program,const char * varPrefix,int numIndices,const int * indices)96 static void uploadUniformIndices(const glw::Functions &gl, uint32_t program, const char *varPrefix, int numIndices,
97 const int *indices)
98 {
99 for (int varNdx = 0; varNdx < numIndices; varNdx++)
100 {
101 const string varName = varPrefix + de::toString(varNdx);
102 const int loc = gl.getUniformLocation(program, varName.c_str());
103 TCU_CHECK_MSG(loc >= 0, ("No location assigned for uniform '" + varName + "'").c_str());
104
105 gl.uniform1i(loc, indices[varNdx]);
106 }
107 }
108
109 template <typename T>
maxElement(const std::vector<T> & elements)110 static T maxElement(const std::vector<T> &elements)
111 {
112 T maxElem = elements[0];
113
114 for (size_t ndx = 1; ndx < elements.size(); ndx++)
115 maxElem = de::max(maxElem, elements[ndx]);
116
117 return maxElem;
118 }
119
getTextureType(glu::DataType samplerType)120 static TextureType getTextureType(glu::DataType samplerType)
121 {
122 switch (samplerType)
123 {
124 case glu::TYPE_SAMPLER_1D:
125 case glu::TYPE_INT_SAMPLER_1D:
126 case glu::TYPE_UINT_SAMPLER_1D:
127 case glu::TYPE_SAMPLER_1D_SHADOW:
128 return TEXTURE_TYPE_1D;
129
130 case glu::TYPE_SAMPLER_2D:
131 case glu::TYPE_INT_SAMPLER_2D:
132 case glu::TYPE_UINT_SAMPLER_2D:
133 case glu::TYPE_SAMPLER_2D_SHADOW:
134 return TEXTURE_TYPE_2D;
135
136 case glu::TYPE_SAMPLER_CUBE:
137 case glu::TYPE_INT_SAMPLER_CUBE:
138 case glu::TYPE_UINT_SAMPLER_CUBE:
139 case glu::TYPE_SAMPLER_CUBE_SHADOW:
140 return TEXTURE_TYPE_CUBE;
141
142 case glu::TYPE_SAMPLER_2D_ARRAY:
143 case glu::TYPE_INT_SAMPLER_2D_ARRAY:
144 case glu::TYPE_UINT_SAMPLER_2D_ARRAY:
145 case glu::TYPE_SAMPLER_2D_ARRAY_SHADOW:
146 return TEXTURE_TYPE_2D_ARRAY;
147
148 case glu::TYPE_SAMPLER_3D:
149 case glu::TYPE_INT_SAMPLER_3D:
150 case glu::TYPE_UINT_SAMPLER_3D:
151 return TEXTURE_TYPE_3D;
152
153 case glu::TYPE_SAMPLER_CUBE_ARRAY:
154 case glu::TYPE_SAMPLER_CUBE_ARRAY_SHADOW:
155 case glu::TYPE_INT_SAMPLER_CUBE_ARRAY:
156 case glu::TYPE_UINT_SAMPLER_CUBE_ARRAY:
157 return TEXTURE_TYPE_CUBE_ARRAY;
158
159 default:
160 TCU_THROW(InternalError, "Invalid sampler type");
161 }
162 }
163
isShadowSampler(glu::DataType samplerType)164 static bool isShadowSampler(glu::DataType samplerType)
165 {
166 return samplerType == glu::TYPE_SAMPLER_1D_SHADOW || samplerType == glu::TYPE_SAMPLER_2D_SHADOW ||
167 samplerType == glu::TYPE_SAMPLER_2D_ARRAY_SHADOW || samplerType == glu::TYPE_SAMPLER_CUBE_SHADOW ||
168 samplerType == glu::TYPE_SAMPLER_CUBE_ARRAY_SHADOW;
169 }
170
getSamplerOutputType(glu::DataType samplerType)171 static glu::DataType getSamplerOutputType(glu::DataType samplerType)
172 {
173 switch (samplerType)
174 {
175 case glu::TYPE_SAMPLER_1D:
176 case glu::TYPE_SAMPLER_2D:
177 case glu::TYPE_SAMPLER_CUBE:
178 case glu::TYPE_SAMPLER_2D_ARRAY:
179 case glu::TYPE_SAMPLER_3D:
180 case glu::TYPE_SAMPLER_CUBE_ARRAY:
181 return glu::TYPE_FLOAT_VEC4;
182
183 case glu::TYPE_SAMPLER_1D_SHADOW:
184 case glu::TYPE_SAMPLER_2D_SHADOW:
185 case glu::TYPE_SAMPLER_CUBE_SHADOW:
186 case glu::TYPE_SAMPLER_2D_ARRAY_SHADOW:
187 case glu::TYPE_SAMPLER_CUBE_ARRAY_SHADOW:
188 return glu::TYPE_FLOAT;
189
190 case glu::TYPE_INT_SAMPLER_1D:
191 case glu::TYPE_INT_SAMPLER_2D:
192 case glu::TYPE_INT_SAMPLER_CUBE:
193 case glu::TYPE_INT_SAMPLER_2D_ARRAY:
194 case glu::TYPE_INT_SAMPLER_3D:
195 case glu::TYPE_INT_SAMPLER_CUBE_ARRAY:
196 return glu::TYPE_INT_VEC4;
197
198 case glu::TYPE_UINT_SAMPLER_1D:
199 case glu::TYPE_UINT_SAMPLER_2D:
200 case glu::TYPE_UINT_SAMPLER_CUBE:
201 case glu::TYPE_UINT_SAMPLER_2D_ARRAY:
202 case glu::TYPE_UINT_SAMPLER_3D:
203 case glu::TYPE_UINT_SAMPLER_CUBE_ARRAY:
204 return glu::TYPE_UINT_VEC4;
205
206 default:
207 TCU_THROW(InternalError, "Invalid sampler type");
208 }
209 }
210
getSamplerTextureFormat(glu::DataType samplerType)211 static tcu::TextureFormat getSamplerTextureFormat(glu::DataType samplerType)
212 {
213 const glu::DataType outType = getSamplerOutputType(samplerType);
214 const glu::DataType outScalarType = glu::getDataTypeScalarType(outType);
215
216 switch (outScalarType)
217 {
218 case glu::TYPE_FLOAT:
219 if (isShadowSampler(samplerType))
220 return tcu::TextureFormat(tcu::TextureFormat::D, tcu::TextureFormat::UNORM_INT16);
221 else
222 return tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNORM_INT8);
223
224 case glu::TYPE_INT:
225 return tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::SIGNED_INT8);
226 case glu::TYPE_UINT:
227 return tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNSIGNED_INT8);
228
229 default:
230 TCU_THROW(InternalError, "Invalid sampler type");
231 }
232 }
233
getSamplerCoordType(glu::DataType samplerType)234 static glu::DataType getSamplerCoordType(glu::DataType samplerType)
235 {
236 const TextureType texType = getTextureType(samplerType);
237 int numCoords = 0;
238
239 switch (texType)
240 {
241 case TEXTURE_TYPE_1D:
242 numCoords = 1;
243 break;
244 case TEXTURE_TYPE_2D:
245 numCoords = 2;
246 break;
247 case TEXTURE_TYPE_2D_ARRAY:
248 numCoords = 3;
249 break;
250 case TEXTURE_TYPE_CUBE:
251 numCoords = 3;
252 break;
253 case TEXTURE_TYPE_3D:
254 numCoords = 3;
255 break;
256 case TEXTURE_TYPE_CUBE_ARRAY:
257 numCoords = 4;
258 break;
259 default:
260 TCU_THROW(InternalError, "Invalid texture type");
261 }
262
263 if (isShadowSampler(samplerType) && samplerType != TYPE_SAMPLER_CUBE_ARRAY_SHADOW)
264 numCoords += 1;
265
266 DE_ASSERT(de::inRange(numCoords, 1, 4));
267
268 return numCoords == 1 ? glu::TYPE_FLOAT : glu::getDataTypeFloatVec(numCoords);
269 }
270
getGLTextureTarget(TextureType texType)271 static uint32_t getGLTextureTarget(TextureType texType)
272 {
273 switch (texType)
274 {
275 case TEXTURE_TYPE_1D:
276 return GL_TEXTURE_1D;
277 case TEXTURE_TYPE_2D:
278 return GL_TEXTURE_2D;
279 case TEXTURE_TYPE_2D_ARRAY:
280 return GL_TEXTURE_2D_ARRAY;
281 case TEXTURE_TYPE_CUBE:
282 return GL_TEXTURE_CUBE_MAP;
283 case TEXTURE_TYPE_3D:
284 return GL_TEXTURE_3D;
285 case TEXTURE_TYPE_CUBE_ARRAY:
286 return GL_TEXTURE_CUBE_MAP_ARRAY;
287 default:
288 TCU_THROW(InternalError, "Invalid texture type");
289 }
290 }
291
setupTexture(const glw::Functions & gl,uint32_t texture,glu::DataType samplerType,tcu::TextureFormat texFormat,const void * color)292 static void setupTexture(const glw::Functions &gl, uint32_t texture, glu::DataType samplerType,
293 tcu::TextureFormat texFormat, const void *color)
294 {
295 const TextureType texType = getTextureType(samplerType);
296 const uint32_t texTarget = getGLTextureTarget(texType);
297 const uint32_t intFormat = glu::getInternalFormat(texFormat);
298 const glu::TransferFormat transferFmt = glu::getTransferFormat(texFormat);
299
300 // \todo [2014-03-04 pyry] Use larger than 1x1 textures?
301
302 gl.bindTexture(texTarget, texture);
303
304 switch (texType)
305 {
306 case TEXTURE_TYPE_1D:
307 gl.texStorage1D(texTarget, 1, intFormat, 1);
308 gl.texSubImage1D(texTarget, 0, 0, 1, transferFmt.format, transferFmt.dataType, color);
309 break;
310
311 case TEXTURE_TYPE_2D:
312 gl.texStorage2D(texTarget, 1, intFormat, 1, 1);
313 gl.texSubImage2D(texTarget, 0, 0, 0, 1, 1, transferFmt.format, transferFmt.dataType, color);
314 break;
315
316 case TEXTURE_TYPE_2D_ARRAY:
317 case TEXTURE_TYPE_3D:
318 gl.texStorage3D(texTarget, 1, intFormat, 1, 1, 1);
319 gl.texSubImage3D(texTarget, 0, 0, 0, 0, 1, 1, 1, transferFmt.format, transferFmt.dataType, color);
320 break;
321
322 case TEXTURE_TYPE_CUBE_ARRAY:
323 gl.texStorage3D(texTarget, 1, intFormat, 1, 1, 6);
324 for (int zoffset = 0; zoffset < 6; ++zoffset)
325 for (int face = 0; face < tcu::CUBEFACE_LAST; face++)
326 gl.texSubImage3D(texTarget, 0, 0, 0, zoffset, 1, 1, 1, transferFmt.format, transferFmt.dataType, color);
327 break;
328
329 case TEXTURE_TYPE_CUBE:
330 gl.texStorage2D(texTarget, 1, intFormat, 1, 1);
331 for (int face = 0; face < tcu::CUBEFACE_LAST; face++)
332 gl.texSubImage2D(glu::getGLCubeFace((tcu::CubeFace)face), 0, 0, 0, 1, 1, transferFmt.format,
333 transferFmt.dataType, color);
334 break;
335
336 default:
337 TCU_THROW(InternalError, "Invalid texture type");
338 }
339
340 gl.texParameteri(texTarget, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
341 gl.texParameteri(texTarget, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
342
343 if (isShadowSampler(samplerType))
344 gl.texParameteri(texTarget, GL_TEXTURE_COMPARE_MODE, GL_COMPARE_REF_TO_TEXTURE);
345
346 GLU_EXPECT_NO_ERROR(gl.getError(), "Texture setup failed");
347 }
348
349 class SamplerIndexingCase : public TestCase
350 {
351 public:
352 SamplerIndexingCase(Context &context, const char *name, const char *description, glu::ShaderType shaderType,
353 glu::DataType samplerType, IndexExprType indexExprType);
354 ~SamplerIndexingCase(void);
355
356 void init(void);
357 IterateResult iterate(void);
358
359 private:
360 SamplerIndexingCase(const SamplerIndexingCase &);
361 SamplerIndexingCase &operator=(const SamplerIndexingCase &);
362
363 void getShaderSpec(ShaderSpec *spec, int numSamplers, int numLookups, const int *lookupIndices,
364 const RenderContext &renderContext) const;
365
366 const glu::ShaderType m_shaderType;
367 const glu::DataType m_samplerType;
368 const IndexExprType m_indexExprType;
369 };
370
SamplerIndexingCase(Context & context,const char * name,const char * description,glu::ShaderType shaderType,glu::DataType samplerType,IndexExprType indexExprType)371 SamplerIndexingCase::SamplerIndexingCase(Context &context, const char *name, const char *description,
372 glu::ShaderType shaderType, glu::DataType samplerType,
373 IndexExprType indexExprType)
374 : TestCase(context, name, description)
375 , m_shaderType(shaderType)
376 , m_samplerType(samplerType)
377 , m_indexExprType(indexExprType)
378 {
379 }
380
~SamplerIndexingCase(void)381 SamplerIndexingCase::~SamplerIndexingCase(void)
382 {
383 }
384
init(void)385 void SamplerIndexingCase::init(void)
386 {
387 const bool supportsES32 = contextSupports(m_context.getRenderContext().getType(), glu::ApiType::es(3, 2)) ||
388 hasExtension(m_context.getRenderContext().getFunctions(), glu::ApiType::core(4, 5),
389 "GL_ARB_ES3_2_compatibility");
390
391 if (!supportsES32)
392 {
393 if (m_shaderType == SHADERTYPE_GEOMETRY)
394 TCU_CHECK_AND_THROW(NotSupportedError,
395 m_context.getContextInfo().isExtensionSupported("GL_EXT_geometry_shader"),
396 "GL_EXT_geometry_shader extension is required to run geometry shader tests.");
397
398 if (m_shaderType == SHADERTYPE_TESSELLATION_CONTROL || m_shaderType == SHADERTYPE_TESSELLATION_EVALUATION)
399 TCU_CHECK_AND_THROW(NotSupportedError,
400 m_context.getContextInfo().isExtensionSupported("GL_EXT_tessellation_shader"),
401 "GL_EXT_tessellation_shader extension is required to run tessellation shader tests.");
402
403 if (m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL && m_indexExprType != INDEX_EXPR_TYPE_CONST_EXPRESSION)
404 TCU_CHECK_AND_THROW(NotSupportedError,
405 m_context.getContextInfo().isExtensionSupported("GL_EXT_gpu_shader5"),
406 "GL_EXT_gpu_shader5 extension is required for dynamic indexing of sampler arrays.");
407
408 if (m_samplerType == TYPE_SAMPLER_CUBE_ARRAY || m_samplerType == TYPE_SAMPLER_CUBE_ARRAY_SHADOW ||
409 m_samplerType == TYPE_INT_SAMPLER_CUBE_ARRAY || m_samplerType == TYPE_UINT_SAMPLER_CUBE_ARRAY)
410 {
411 TCU_CHECK_AND_THROW(NotSupportedError,
412 m_context.getContextInfo().isExtensionSupported("GL_EXT_texture_cube_map_array"),
413 "GL_EXT_texture_cube_map_array extension is required for cube map arrays.");
414 }
415 }
416 }
417
getShaderSpec(ShaderSpec * spec,int numSamplers,int numLookups,const int * lookupIndices,const RenderContext & renderContext) const418 void SamplerIndexingCase::getShaderSpec(ShaderSpec *spec, int numSamplers, int numLookups, const int *lookupIndices,
419 const RenderContext &renderContext) const
420 {
421 const char *samplersName = "sampler";
422 const char *coordsName = "coords";
423 const char *indicesPrefix = "index";
424 const char *resultPrefix = "result";
425 const DataType coordType = getSamplerCoordType(m_samplerType);
426 const DataType outType = getSamplerOutputType(m_samplerType);
427 const bool supportsES32 =
428 contextSupports(renderContext.getType(), glu::ApiType::es(3, 2)) ||
429 hasExtension(renderContext.getFunctions(), glu::ApiType::core(4, 5), "GL_ARB_ES3_2_compatibility");
430 std::ostringstream global;
431 std::ostringstream code;
432
433 spec->inputs.push_back(Symbol(coordsName, VarType(coordType, PRECISION_HIGHP)));
434
435 if (!supportsES32 && m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL &&
436 m_indexExprType != INDEX_EXPR_TYPE_CONST_EXPRESSION)
437 global << "#extension GL_EXT_gpu_shader5 : require\n";
438
439 if (!supportsES32 &&
440 (m_samplerType == TYPE_SAMPLER_CUBE_ARRAY || m_samplerType == TYPE_SAMPLER_CUBE_ARRAY_SHADOW ||
441 m_samplerType == TYPE_INT_SAMPLER_CUBE_ARRAY || m_samplerType == TYPE_UINT_SAMPLER_CUBE_ARRAY))
442 {
443 global << "#extension GL_EXT_texture_cube_map_array: require\n";
444 }
445
446 if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
447 global << "const highp int indexBase = 1;\n";
448
449 global << "uniform highp " << getDataTypeName(m_samplerType) << " " << samplersName << "[" << numSamplers << "];\n";
450
451 if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
452 {
453 for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
454 {
455 const string varName = indicesPrefix + de::toString(lookupNdx);
456 spec->inputs.push_back(Symbol(varName, VarType(TYPE_INT, PRECISION_HIGHP)));
457 }
458 }
459 else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
460 declareUniformIndexVars(global, indicesPrefix, numLookups);
461
462 for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
463 {
464 const string varName = resultPrefix + de::toString(lookupNdx);
465 spec->outputs.push_back(Symbol(varName, VarType(outType, PRECISION_HIGHP)));
466 }
467
468 for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
469 {
470 code << resultPrefix << "" << lookupNdx << " = texture(" << samplersName << "[";
471
472 if (m_indexExprType == INDEX_EXPR_TYPE_CONST_LITERAL)
473 code << lookupIndices[lookupNdx];
474 else if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
475 code << "indexBase + " << (lookupIndices[lookupNdx] - 1);
476 else
477 code << indicesPrefix << lookupNdx;
478
479 code << "], " << coordsName << (m_samplerType == TYPE_SAMPLER_CUBE_ARRAY_SHADOW ? ", 0.0" : "") << ");\n";
480 }
481
482 spec->version = supportsES32 ? GLSL_VERSION_320_ES : GLSL_VERSION_310_ES;
483 spec->globalDeclarations = global.str();
484 spec->source = code.str();
485 }
486
fillTextureData(const tcu::PixelBufferAccess & access,de::Random & rnd)487 static void fillTextureData(const tcu::PixelBufferAccess &access, de::Random &rnd)
488 {
489 DE_ASSERT(access.getHeight() == 1 && access.getDepth() == 1);
490
491 if (access.getFormat().order == TextureFormat::D)
492 {
493 // \note Texture uses odd values, lookup even values to avoid precision issues.
494 const float values[] = {0.1f, 0.3f, 0.5f, 0.7f, 0.9f};
495
496 for (int ndx = 0; ndx < access.getWidth(); ndx++)
497 access.setPixDepth(rnd.choose<float>(DE_ARRAY_BEGIN(values), DE_ARRAY_END(values)), ndx, 0);
498 }
499 else
500 {
501 TCU_CHECK_INTERNAL(access.getFormat().order == TextureFormat::RGBA && access.getFormat().getPixelSize() == 4);
502
503 for (int ndx = 0; ndx < access.getWidth(); ndx++)
504 *((uint32_t *)access.getDataPtr() + ndx) = rnd.getUint32();
505 }
506 }
507
iterate(void)508 SamplerIndexingCase::IterateResult SamplerIndexingCase::iterate(void)
509 {
510 const int numInvocations = 64;
511 const int numSamplers = 8;
512 const int numLookups = 4;
513 const DataType coordType = getSamplerCoordType(m_samplerType);
514 const DataType outputType = getSamplerOutputType(m_samplerType);
515 const TextureFormat texFormat = getSamplerTextureFormat(m_samplerType);
516 const int outLookupStride = numInvocations * getDataTypeScalarSize(outputType);
517 vector<int> lookupIndices(numLookups);
518 vector<float> coords;
519 vector<uint32_t> outData;
520 vector<uint8_t> texData(numSamplers * texFormat.getPixelSize());
521 const tcu::PixelBufferAccess refTexAccess(texFormat, numSamplers, 1, 1, &texData[0]);
522
523 ShaderSpec shaderSpec;
524 de::Random rnd(deInt32Hash(m_samplerType) ^ deInt32Hash(m_shaderType) ^ deInt32Hash(m_indexExprType));
525
526 for (int ndx = 0; ndx < numLookups; ndx++)
527 lookupIndices[ndx] = rnd.getInt(0, numSamplers - 1);
528
529 getShaderSpec(&shaderSpec, numSamplers, numLookups, &lookupIndices[0], m_context.getRenderContext());
530
531 coords.resize(numInvocations * getDataTypeScalarSize(coordType));
532
533 if (m_samplerType != TYPE_SAMPLER_CUBE_ARRAY_SHADOW && isShadowSampler(m_samplerType))
534 {
535 // Use different comparison value per invocation.
536 // \note Texture uses odd values, comparison even values.
537 const int numCoordComps = getDataTypeScalarSize(coordType);
538 const float cmpValues[] = {0.0f, 0.2f, 0.4f, 0.6f, 0.8f, 1.0f};
539
540 for (int invocationNdx = 0; invocationNdx < numInvocations; invocationNdx++)
541 coords[invocationNdx * numCoordComps + (numCoordComps - 1)] =
542 rnd.choose<float>(DE_ARRAY_BEGIN(cmpValues), DE_ARRAY_END(cmpValues));
543 }
544
545 fillTextureData(refTexAccess, rnd);
546
547 outData.resize(numLookups * outLookupStride);
548
549 {
550 const RenderContext &renderCtx = m_context.getRenderContext();
551 const glw::Functions &gl = renderCtx.getFunctions();
552 ShaderExecutorPtr executor(createExecutor(m_context.getRenderContext(), m_shaderType, shaderSpec));
553 TextureVector textures(renderCtx, numSamplers);
554 vector<void *> inputs;
555 vector<void *> outputs;
556 vector<int> expandedIndices;
557 const int maxIndex = maxElement(lookupIndices);
558
559 m_testCtx.getLog() << *executor;
560
561 if (!executor->isOk())
562 TCU_FAIL("Compile failed");
563
564 executor->useProgram();
565
566 // \todo [2014-03-05 pyry] Do we want to randomize tex unit assignments?
567 for (int samplerNdx = 0; samplerNdx < numSamplers; samplerNdx++)
568 {
569 const string samplerName = string("sampler[") + de::toString(samplerNdx) + "]";
570 const int samplerLoc = gl.getUniformLocation(executor->getProgram(), samplerName.c_str());
571
572 if (samplerNdx > maxIndex && samplerLoc < 0)
573 continue; // Unused uniform eliminated by compiler
574
575 TCU_CHECK_MSG(samplerLoc >= 0, (string("No location for uniform '") + samplerName + "' found").c_str());
576
577 gl.activeTexture(GL_TEXTURE0 + samplerNdx);
578 setupTexture(gl, textures[samplerNdx], m_samplerType, texFormat,
579 &texData[samplerNdx * texFormat.getPixelSize()]);
580
581 gl.uniform1i(samplerLoc, samplerNdx);
582 }
583
584 inputs.push_back(&coords[0]);
585
586 if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
587 {
588 expandedIndices.resize(numInvocations * lookupIndices.size());
589 for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
590 {
591 for (int invNdx = 0; invNdx < numInvocations; invNdx++)
592 expandedIndices[lookupNdx * numInvocations + invNdx] = lookupIndices[lookupNdx];
593 }
594
595 for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
596 inputs.push_back(&expandedIndices[lookupNdx * numInvocations]);
597 }
598 else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
599 uploadUniformIndices(gl, executor->getProgram(), "index", numLookups, &lookupIndices[0]);
600
601 for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
602 outputs.push_back(&outData[outLookupStride * lookupNdx]);
603
604 GLU_EXPECT_NO_ERROR(gl.getError(), "Setup failed");
605
606 executor->execute(numInvocations, &inputs[0], &outputs[0]);
607 }
608
609 m_testCtx.setTestResult(QP_TEST_RESULT_PASS, "Pass");
610
611 if (isShadowSampler(m_samplerType))
612 {
613 const tcu::Sampler refSampler(tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE,
614 tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::NEAREST, tcu::Sampler::NEAREST, 0.0f,
615 false /* non-normalized */, tcu::Sampler::COMPAREMODE_LESS);
616 const int numCoordComps = getDataTypeScalarSize(coordType);
617
618 TCU_CHECK_INTERNAL(getDataTypeScalarSize(outputType) == 1);
619
620 // Each invocation may have different results.
621 for (int invocationNdx = 0; invocationNdx < numInvocations; invocationNdx++)
622 {
623 const float coord = coords[invocationNdx * numCoordComps + (numCoordComps - 1)];
624
625 for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
626 {
627 const int texNdx = lookupIndices[lookupNdx];
628 const float result =
629 *((const float *)(const uint8_t *)&outData[lookupNdx * outLookupStride + invocationNdx]);
630 const float reference = refTexAccess.sample2DCompare(refSampler, tcu::Sampler::NEAREST, coord,
631 (float)texNdx, 0.0f, tcu::IVec3(0));
632
633 if (de::abs(result - reference) > 0.005f)
634 {
635 m_testCtx.getLog() << TestLog::Message << "ERROR: at invocation " << invocationNdx << ", lookup "
636 << lookupNdx << ": expected " << reference << ", got " << result
637 << TestLog::EndMessage;
638
639 if (m_testCtx.getTestResult() == QP_TEST_RESULT_PASS)
640 m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Got invalid lookup result");
641 }
642 }
643 }
644 }
645 else
646 {
647 TCU_CHECK_INTERNAL(getDataTypeScalarSize(outputType) == 4);
648
649 // Validate results from first invocation
650 for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
651 {
652 const int texNdx = lookupIndices[lookupNdx];
653 const uint8_t *resPtr = (const uint8_t *)&outData[lookupNdx * outLookupStride];
654 bool isOk;
655
656 if (outputType == TYPE_FLOAT_VEC4)
657 {
658 const float threshold = 1.0f / 256.0f;
659 const tcu::Vec4 reference = refTexAccess.getPixel(texNdx, 0);
660 const float *floatPtr = (const float *)resPtr;
661 const tcu::Vec4 result(floatPtr[0], floatPtr[1], floatPtr[2], floatPtr[3]);
662
663 isOk = boolAll(lessThanEqual(abs(reference - result), tcu::Vec4(threshold)));
664
665 if (!isOk)
666 {
667 m_testCtx.getLog() << TestLog::Message << "ERROR: at lookup " << lookupNdx << ": expected "
668 << reference << ", got " << result << TestLog::EndMessage;
669 }
670 }
671 else
672 {
673 const tcu::UVec4 reference = refTexAccess.getPixelUint(texNdx, 0);
674 const uint32_t *uintPtr = (const uint32_t *)resPtr;
675 const tcu::UVec4 result(uintPtr[0], uintPtr[1], uintPtr[2], uintPtr[3]);
676
677 isOk = boolAll(equal(reference, result));
678
679 if (!isOk)
680 {
681 m_testCtx.getLog() << TestLog::Message << "ERROR: at lookup " << lookupNdx << ": expected "
682 << reference << ", got " << result << TestLog::EndMessage;
683 }
684 }
685
686 if (!isOk && m_testCtx.getTestResult() == QP_TEST_RESULT_PASS)
687 m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Got invalid lookup result");
688 }
689
690 // Check results of other invocations against first one
691 for (int invocationNdx = 1; invocationNdx < numInvocations; invocationNdx++)
692 {
693 for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
694 {
695 const uint32_t *refPtr = &outData[lookupNdx * outLookupStride];
696 const uint32_t *resPtr = refPtr + invocationNdx * 4;
697 bool isOk = true;
698
699 for (int ndx = 0; ndx < 4; ndx++)
700 isOk = isOk && (refPtr[ndx] == resPtr[ndx]);
701
702 if (!isOk)
703 {
704 m_testCtx.getLog() << TestLog::Message << "ERROR: invocation " << invocationNdx << " result "
705 << tcu::formatArray(tcu::Format::HexIterator<uint32_t>(resPtr),
706 tcu::Format::HexIterator<uint32_t>(resPtr + 4))
707 << " for lookup " << lookupNdx << " doesn't match result from first invocation "
708 << tcu::formatArray(tcu::Format::HexIterator<uint32_t>(refPtr),
709 tcu::Format::HexIterator<uint32_t>(refPtr + 4))
710 << TestLog::EndMessage;
711
712 if (m_testCtx.getTestResult() == QP_TEST_RESULT_PASS)
713 m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Inconsistent lookup results");
714 }
715 }
716 }
717 }
718
719 return STOP;
720 }
721
722 class BlockArrayIndexingCase : public TestCase
723 {
724 public:
725 enum BlockType
726 {
727 BLOCKTYPE_UNIFORM = 0,
728 BLOCKTYPE_BUFFER,
729
730 BLOCKTYPE_LAST
731 };
732 BlockArrayIndexingCase(Context &context, const char *name, const char *description, BlockType blockType,
733 IndexExprType indexExprType, ShaderType shaderType);
734 ~BlockArrayIndexingCase(void);
735
736 void init(void);
737 IterateResult iterate(void);
738
739 private:
740 BlockArrayIndexingCase(const BlockArrayIndexingCase &);
741 BlockArrayIndexingCase &operator=(const BlockArrayIndexingCase &);
742
743 void getShaderSpec(ShaderSpec *spec, int numInstances, int numReads, const int *readIndices,
744 const RenderContext &renderContext) const;
745
746 const BlockType m_blockType;
747 const IndexExprType m_indexExprType;
748 const ShaderType m_shaderType;
749
750 const int m_numInstances;
751 };
752
BlockArrayIndexingCase(Context & context,const char * name,const char * description,BlockType blockType,IndexExprType indexExprType,ShaderType shaderType)753 BlockArrayIndexingCase::BlockArrayIndexingCase(Context &context, const char *name, const char *description,
754 BlockType blockType, IndexExprType indexExprType, ShaderType shaderType)
755 : TestCase(context, name, description)
756 , m_blockType(blockType)
757 , m_indexExprType(indexExprType)
758 , m_shaderType(shaderType)
759 , m_numInstances(4)
760 {
761 }
762
~BlockArrayIndexingCase(void)763 BlockArrayIndexingCase::~BlockArrayIndexingCase(void)
764 {
765 }
766
init(void)767 void BlockArrayIndexingCase::init(void)
768 {
769 const bool supportsES32 = contextSupports(m_context.getRenderContext().getType(), glu::ApiType::es(3, 2)) ||
770 hasExtension(m_context.getRenderContext().getFunctions(), glu::ApiType::core(4, 5),
771 "GL_ARB_ES3_2_compatibility");
772
773 if (!supportsES32)
774 {
775 if (m_shaderType == SHADERTYPE_GEOMETRY)
776 TCU_CHECK_AND_THROW(NotSupportedError,
777 m_context.getContextInfo().isExtensionSupported("GL_EXT_geometry_shader"),
778 "GL_EXT_geometry_shader extension is required to run geometry shader tests.");
779
780 if (m_shaderType == SHADERTYPE_TESSELLATION_CONTROL || m_shaderType == SHADERTYPE_TESSELLATION_EVALUATION)
781 TCU_CHECK_AND_THROW(NotSupportedError,
782 m_context.getContextInfo().isExtensionSupported("GL_EXT_tessellation_shader"),
783 "GL_EXT_tessellation_shader extension is required to run tessellation shader tests.");
784
785 if (m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL && m_indexExprType != INDEX_EXPR_TYPE_CONST_EXPRESSION)
786 TCU_CHECK_AND_THROW(NotSupportedError,
787 m_context.getContextInfo().isExtensionSupported("GL_EXT_gpu_shader5"),
788 "GL_EXT_gpu_shader5 extension is required for dynamic indexing of interface blocks.");
789 }
790
791 if (m_blockType == BLOCKTYPE_BUFFER)
792 {
793 const uint32_t limitPnames[] = {
794 GL_MAX_VERTEX_SHADER_STORAGE_BLOCKS, GL_MAX_FRAGMENT_SHADER_STORAGE_BLOCKS,
795 GL_MAX_GEOMETRY_SHADER_STORAGE_BLOCKS, GL_MAX_TESS_CONTROL_SHADER_STORAGE_BLOCKS,
796 GL_MAX_TESS_EVALUATION_SHADER_STORAGE_BLOCKS, GL_MAX_COMPUTE_SHADER_STORAGE_BLOCKS};
797
798 const glw::Functions &gl = m_context.getRenderContext().getFunctions();
799 int maxBlocks = 0;
800
801 gl.getIntegerv(limitPnames[m_shaderType], &maxBlocks);
802 GLU_EXPECT_NO_ERROR(gl.getError(), "glGetIntegerv()");
803
804 if (maxBlocks < 2 + m_numInstances)
805 throw tcu::NotSupportedError("Not enough shader storage blocks supported for shader type");
806 }
807 }
808
getShaderSpec(ShaderSpec * spec,int numInstances,int numReads,const int * readIndices,const RenderContext & renderContext) const809 void BlockArrayIndexingCase::getShaderSpec(ShaderSpec *spec, int numInstances, int numReads, const int *readIndices,
810 const RenderContext &renderContext) const
811 {
812 const int binding = 2;
813 const char *blockName = "Block";
814 const char *instanceName = "block";
815 const char *indicesPrefix = "index";
816 const char *resultPrefix = "result";
817 const char *interfaceName = m_blockType == BLOCKTYPE_UNIFORM ? "uniform" : "buffer";
818 const char *layout = m_blockType == BLOCKTYPE_UNIFORM ? "std140" : "std430";
819 const bool supportsES32 =
820 contextSupports(renderContext.getType(), glu::ApiType::es(3, 2)) ||
821 hasExtension(renderContext.getFunctions(), glu::ApiType::core(4, 5), "GL_ARB_ES3_2_compatibility");
822 std::ostringstream global;
823 std::ostringstream code;
824
825 if (!supportsES32 && m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL &&
826 m_indexExprType != INDEX_EXPR_TYPE_CONST_EXPRESSION)
827 global << "#extension GL_EXT_gpu_shader5 : require\n";
828
829 if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
830 global << "const highp int indexBase = 1;\n";
831
832 global << "layout(" << layout << ", binding = " << binding << ") " << interfaceName << " " << blockName
833 << "\n"
834 "{\n"
835 " uint value;\n"
836 "} "
837 << instanceName << "[" << numInstances << "];\n";
838
839 if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
840 {
841 for (int readNdx = 0; readNdx < numReads; readNdx++)
842 {
843 const string varName = indicesPrefix + de::toString(readNdx);
844 spec->inputs.push_back(Symbol(varName, VarType(TYPE_INT, PRECISION_HIGHP)));
845 }
846 }
847 else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
848 declareUniformIndexVars(global, indicesPrefix, numReads);
849
850 for (int readNdx = 0; readNdx < numReads; readNdx++)
851 {
852 const string varName = resultPrefix + de::toString(readNdx);
853 spec->outputs.push_back(Symbol(varName, VarType(TYPE_UINT, PRECISION_HIGHP)));
854 }
855
856 for (int readNdx = 0; readNdx < numReads; readNdx++)
857 {
858 code << resultPrefix << readNdx << " = " << instanceName << "[";
859
860 if (m_indexExprType == INDEX_EXPR_TYPE_CONST_LITERAL)
861 code << readIndices[readNdx];
862 else if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
863 code << "indexBase + " << (readIndices[readNdx] - 1);
864 else
865 code << indicesPrefix << readNdx;
866
867 code << "].value;\n";
868 }
869
870 spec->version = supportsES32 ? GLSL_VERSION_320_ES : GLSL_VERSION_310_ES;
871 spec->globalDeclarations = global.str();
872 spec->source = code.str();
873 }
874
iterate(void)875 BlockArrayIndexingCase::IterateResult BlockArrayIndexingCase::iterate(void)
876 {
877 const int numInvocations = 32;
878 const int numInstances = m_numInstances;
879 const int numReads = 4;
880 vector<int> readIndices(numReads);
881 vector<uint32_t> inValues(numInstances);
882 vector<uint32_t> outValues(numInvocations * numReads);
883 ShaderSpec shaderSpec;
884 de::Random rnd(deInt32Hash(m_shaderType) ^ deInt32Hash(m_blockType) ^ deInt32Hash(m_indexExprType));
885
886 for (int readNdx = 0; readNdx < numReads; readNdx++)
887 readIndices[readNdx] = rnd.getInt(0, numInstances - 1);
888
889 for (int instanceNdx = 0; instanceNdx < numInstances; instanceNdx++)
890 inValues[instanceNdx] = rnd.getUint32();
891
892 getShaderSpec(&shaderSpec, numInstances, numReads, &readIndices[0], m_context.getRenderContext());
893
894 {
895 const RenderContext &renderCtx = m_context.getRenderContext();
896 const glw::Functions &gl = renderCtx.getFunctions();
897 const int baseBinding = 2;
898 const BufferVector buffers(renderCtx, numInstances);
899 const uint32_t bufTarget = m_blockType == BLOCKTYPE_BUFFER ? GL_SHADER_STORAGE_BUFFER : GL_UNIFORM_BUFFER;
900 ShaderExecutorPtr shaderExecutor(createExecutor(renderCtx, m_shaderType, shaderSpec));
901 vector<int> expandedIndices;
902 vector<void *> inputs;
903 vector<void *> outputs;
904
905 m_testCtx.getLog() << *shaderExecutor;
906
907 if (!shaderExecutor->isOk())
908 TCU_FAIL("Compile failed");
909
910 shaderExecutor->useProgram();
911
912 for (int instanceNdx = 0; instanceNdx < numInstances; instanceNdx++)
913 {
914 gl.bindBuffer(bufTarget, buffers[instanceNdx]);
915 gl.bufferData(bufTarget, (glw::GLsizeiptr)sizeof(uint32_t), &inValues[instanceNdx], GL_STATIC_DRAW);
916 gl.bindBufferBase(bufTarget, baseBinding + instanceNdx, buffers[instanceNdx]);
917 }
918
919 if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
920 {
921 expandedIndices.resize(numInvocations * readIndices.size());
922
923 for (int readNdx = 0; readNdx < numReads; readNdx++)
924 {
925 int *dst = &expandedIndices[numInvocations * readNdx];
926 std::fill(dst, dst + numInvocations, readIndices[readNdx]);
927 }
928
929 for (int readNdx = 0; readNdx < numReads; readNdx++)
930 inputs.push_back(&expandedIndices[readNdx * numInvocations]);
931 }
932 else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
933 uploadUniformIndices(gl, shaderExecutor->getProgram(), "index", numReads, &readIndices[0]);
934
935 for (int readNdx = 0; readNdx < numReads; readNdx++)
936 outputs.push_back(&outValues[readNdx * numInvocations]);
937
938 GLU_EXPECT_NO_ERROR(gl.getError(), "Setup failed");
939
940 shaderExecutor->execute(numInvocations, inputs.empty() ? DE_NULL : &inputs[0], &outputs[0]);
941 }
942
943 m_testCtx.setTestResult(QP_TEST_RESULT_PASS, "Pass");
944
945 for (int invocationNdx = 0; invocationNdx < numInvocations; invocationNdx++)
946 {
947 for (int readNdx = 0; readNdx < numReads; readNdx++)
948 {
949 const uint32_t refValue = inValues[readIndices[readNdx]];
950 const uint32_t resValue = outValues[readNdx * numInvocations + invocationNdx];
951
952 if (refValue != resValue)
953 {
954 m_testCtx.getLog() << TestLog::Message << "ERROR: at invocation " << invocationNdx << ", read "
955 << readNdx << ": expected " << tcu::toHex(refValue) << ", got "
956 << tcu::toHex(resValue) << TestLog::EndMessage;
957
958 if (m_testCtx.getTestResult() == QP_TEST_RESULT_PASS)
959 m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Invalid result value");
960 }
961 }
962 }
963
964 return STOP;
965 }
966
967 class AtomicCounterIndexingCase : public TestCase
968 {
969 public:
970 AtomicCounterIndexingCase(Context &context, const char *name, const char *description, IndexExprType indexExprType,
971 ShaderType shaderType);
972 ~AtomicCounterIndexingCase(void);
973
974 void init(void);
975 IterateResult iterate(void);
976
977 private:
978 AtomicCounterIndexingCase(const AtomicCounterIndexingCase &);
979 AtomicCounterIndexingCase &operator=(const AtomicCounterIndexingCase &);
980
981 void getShaderSpec(ShaderSpec *spec, int numCounters, int numOps, const int *opIndices,
982 const RenderContext &renderContext) const;
983
984 const IndexExprType m_indexExprType;
985 const glu::ShaderType m_shaderType;
986 int32_t m_numCounters;
987 };
988
AtomicCounterIndexingCase(Context & context,const char * name,const char * description,IndexExprType indexExprType,ShaderType shaderType)989 AtomicCounterIndexingCase::AtomicCounterIndexingCase(Context &context, const char *name, const char *description,
990 IndexExprType indexExprType, ShaderType shaderType)
991 : TestCase(context, name, description)
992 , m_indexExprType(indexExprType)
993 , m_shaderType(shaderType)
994 , m_numCounters(0)
995 {
996 }
997
~AtomicCounterIndexingCase(void)998 AtomicCounterIndexingCase::~AtomicCounterIndexingCase(void)
999 {
1000 }
1001
getMaxAtomicCounterEnum(glu::ShaderType type)1002 uint32_t getMaxAtomicCounterEnum(glu::ShaderType type)
1003 {
1004 switch (type)
1005 {
1006 case glu::SHADERTYPE_VERTEX:
1007 return GL_MAX_VERTEX_ATOMIC_COUNTERS;
1008 case glu::SHADERTYPE_FRAGMENT:
1009 return GL_MAX_FRAGMENT_ATOMIC_COUNTERS;
1010 case glu::SHADERTYPE_GEOMETRY:
1011 return GL_MAX_GEOMETRY_ATOMIC_COUNTERS;
1012 case glu::SHADERTYPE_COMPUTE:
1013 return GL_MAX_COMPUTE_ATOMIC_COUNTERS;
1014 case glu::SHADERTYPE_TESSELLATION_CONTROL:
1015 return GL_MAX_TESS_CONTROL_ATOMIC_COUNTERS;
1016 case glu::SHADERTYPE_TESSELLATION_EVALUATION:
1017 return GL_MAX_TESS_EVALUATION_ATOMIC_COUNTERS;
1018
1019 default:
1020 DE_FATAL("Unknown shader type");
1021 return -1;
1022 }
1023 }
1024
init(void)1025 void AtomicCounterIndexingCase::init(void)
1026 {
1027 const bool supportsES32 = contextSupports(m_context.getRenderContext().getType(), glu::ApiType::es(3, 2)) ||
1028 hasExtension(m_context.getRenderContext().getFunctions(), glu::ApiType::core(4, 5),
1029 "GL_ARB_ES3_2_compatibility");
1030
1031 if (!supportsES32)
1032 {
1033 if (m_shaderType == SHADERTYPE_GEOMETRY)
1034 TCU_CHECK_AND_THROW(NotSupportedError,
1035 m_context.getContextInfo().isExtensionSupported("GL_EXT_geometry_shader"),
1036 "GL_EXT_geometry_shader extension is required to run geometry shader tests.");
1037
1038 if (m_shaderType == SHADERTYPE_TESSELLATION_CONTROL || m_shaderType == SHADERTYPE_TESSELLATION_EVALUATION)
1039 TCU_CHECK_AND_THROW(NotSupportedError,
1040 m_context.getContextInfo().isExtensionSupported("GL_EXT_tessellation_shader"),
1041 "GL_EXT_tessellation_shader extension is required to run tessellation shader tests.");
1042
1043 if (m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL && m_indexExprType != INDEX_EXPR_TYPE_CONST_EXPRESSION)
1044 TCU_CHECK_AND_THROW(NotSupportedError,
1045 m_context.getContextInfo().isExtensionSupported("GL_EXT_gpu_shader5"),
1046 "GL_EXT_gpu_shader5 extension is required for dynamic indexing of atomic counters.");
1047 }
1048
1049 {
1050 m_context.getRenderContext().getFunctions().getIntegerv(getMaxAtomicCounterEnum(m_shaderType), &m_numCounters);
1051
1052 if (m_numCounters < 1)
1053 {
1054 const string message =
1055 "Atomic counters not supported in " + string(glu::getShaderTypeName(m_shaderType)) + " shader";
1056 TCU_THROW(NotSupportedError, message.c_str());
1057 }
1058 }
1059 }
1060
getShaderSpec(ShaderSpec * spec,int numCounters,int numOps,const int * opIndices,const RenderContext & renderContext) const1061 void AtomicCounterIndexingCase::getShaderSpec(ShaderSpec *spec, int numCounters, int numOps, const int *opIndices,
1062 const RenderContext &renderContext) const
1063 {
1064 const char *indicesPrefix = "index";
1065 const char *resultPrefix = "result";
1066 const bool supportsES32 =
1067 contextSupports(renderContext.getType(), glu::ApiType::es(3, 2)) ||
1068 hasExtension(renderContext.getFunctions(), glu::ApiType::core(4, 5), "GL_ARB_ES3_2_compatibility");
1069 std::ostringstream global;
1070 std::ostringstream code;
1071
1072 if (!supportsES32 && m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL &&
1073 m_indexExprType != INDEX_EXPR_TYPE_CONST_EXPRESSION)
1074 global << "#extension GL_EXT_gpu_shader5 : require\n";
1075
1076 if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
1077 global << "const highp int indexBase = 1;\n";
1078
1079 global << "layout(binding = 0) uniform atomic_uint counter[" << numCounters << "];\n";
1080
1081 if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
1082 {
1083 for (int opNdx = 0; opNdx < numOps; opNdx++)
1084 {
1085 const string varName = indicesPrefix + de::toString(opNdx);
1086 spec->inputs.push_back(Symbol(varName, VarType(TYPE_INT, PRECISION_HIGHP)));
1087 }
1088 }
1089 else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
1090 declareUniformIndexVars(global, indicesPrefix, numOps);
1091
1092 for (int opNdx = 0; opNdx < numOps; opNdx++)
1093 {
1094 const string varName = resultPrefix + de::toString(opNdx);
1095 spec->outputs.push_back(Symbol(varName, VarType(TYPE_UINT, PRECISION_HIGHP)));
1096 }
1097
1098 for (int opNdx = 0; opNdx < numOps; opNdx++)
1099 {
1100 code << resultPrefix << opNdx << " = atomicCounterIncrement(counter[";
1101
1102 if (m_indexExprType == INDEX_EXPR_TYPE_CONST_LITERAL)
1103 code << opIndices[opNdx];
1104 else if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
1105 code << "indexBase + " << (opIndices[opNdx] - 1);
1106 else
1107 code << indicesPrefix << opNdx;
1108
1109 code << "]);\n";
1110 }
1111
1112 spec->version = supportsES32 ? GLSL_VERSION_320_ES : GLSL_VERSION_310_ES;
1113 spec->globalDeclarations = global.str();
1114 spec->source = code.str();
1115 }
1116
iterate(void)1117 AtomicCounterIndexingCase::IterateResult AtomicCounterIndexingCase::iterate(void)
1118 {
1119 const RenderContext &renderCtx = m_context.getRenderContext();
1120 const glw::Functions &gl = renderCtx.getFunctions();
1121 const Buffer counterBuffer(renderCtx);
1122
1123 const int numInvocations = 32;
1124 const int numOps = 4;
1125 vector<int> opIndices(numOps);
1126 vector<uint32_t> outValues(numInvocations * numOps);
1127 ShaderSpec shaderSpec;
1128 de::Random rnd(deInt32Hash(m_shaderType) ^ deInt32Hash(m_indexExprType));
1129
1130 for (int opNdx = 0; opNdx < numOps; opNdx++)
1131 opIndices[opNdx] = rnd.getInt(0, numOps - 1);
1132
1133 getShaderSpec(&shaderSpec, m_numCounters, numOps, &opIndices[0], m_context.getRenderContext());
1134
1135 {
1136 const BufferVector buffers(renderCtx, m_numCounters);
1137 ShaderExecutorPtr shaderExecutor(createExecutor(renderCtx, m_shaderType, shaderSpec));
1138 vector<int> expandedIndices;
1139 vector<void *> inputs;
1140 vector<void *> outputs;
1141
1142 m_testCtx.getLog() << *shaderExecutor;
1143
1144 if (!shaderExecutor->isOk())
1145 TCU_FAIL("Compile failed");
1146
1147 {
1148 const int bufSize = getProgramResourceInt(gl, shaderExecutor->getProgram(), GL_ATOMIC_COUNTER_BUFFER, 0,
1149 GL_BUFFER_DATA_SIZE);
1150 const int maxNdx = maxElement(opIndices);
1151 std::vector<uint8_t> emptyData(m_numCounters * 4, 0);
1152
1153 if (bufSize < (maxNdx + 1) * 4)
1154 TCU_FAIL((string("GL reported invalid buffer size " + de::toString(bufSize)).c_str()));
1155
1156 gl.bindBuffer(GL_ATOMIC_COUNTER_BUFFER, *counterBuffer);
1157 gl.bufferData(GL_ATOMIC_COUNTER_BUFFER, (glw::GLsizeiptr)emptyData.size(), &emptyData[0], GL_STATIC_DRAW);
1158 gl.bindBufferBase(GL_ATOMIC_COUNTER_BUFFER, 0, *counterBuffer);
1159 GLU_EXPECT_NO_ERROR(gl.getError(), "Atomic counter buffer initialization failed");
1160 }
1161
1162 shaderExecutor->useProgram();
1163
1164 if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
1165 {
1166 expandedIndices.resize(numInvocations * opIndices.size());
1167
1168 for (int opNdx = 0; opNdx < numOps; opNdx++)
1169 {
1170 int *dst = &expandedIndices[numInvocations * opNdx];
1171 std::fill(dst, dst + numInvocations, opIndices[opNdx]);
1172 }
1173
1174 for (int opNdx = 0; opNdx < numOps; opNdx++)
1175 inputs.push_back(&expandedIndices[opNdx * numInvocations]);
1176 }
1177 else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
1178 uploadUniformIndices(gl, shaderExecutor->getProgram(), "index", numOps, &opIndices[0]);
1179
1180 for (int opNdx = 0; opNdx < numOps; opNdx++)
1181 outputs.push_back(&outValues[opNdx * numInvocations]);
1182
1183 GLU_EXPECT_NO_ERROR(gl.getError(), "Setup failed");
1184
1185 shaderExecutor->execute(numInvocations, inputs.empty() ? DE_NULL : &inputs[0], &outputs[0]);
1186 }
1187
1188 m_testCtx.setTestResult(QP_TEST_RESULT_PASS, "Pass");
1189
1190 {
1191 vector<int> numHits(m_numCounters, 0); // Number of hits per counter.
1192 vector<uint32_t> counterValues(m_numCounters);
1193 vector<vector<bool>> counterMasks(m_numCounters);
1194
1195 for (int opNdx = 0; opNdx < numOps; opNdx++)
1196 numHits[opIndices[opNdx]] += 1;
1197
1198 // Read counter values
1199 {
1200 const void *mapPtr = DE_NULL;
1201
1202 try
1203 {
1204 mapPtr = gl.mapBufferRange(GL_ATOMIC_COUNTER_BUFFER, 0, m_numCounters * 4, GL_MAP_READ_BIT);
1205 GLU_EXPECT_NO_ERROR(gl.getError(), "glMapBufferRange(GL_ATOMIC_COUNTER_BUFFER)");
1206 TCU_CHECK(mapPtr);
1207 std::copy((const uint32_t *)mapPtr, (const uint32_t *)mapPtr + m_numCounters, &counterValues[0]);
1208 gl.unmapBuffer(GL_ATOMIC_COUNTER_BUFFER);
1209 }
1210 catch (...)
1211 {
1212 if (mapPtr)
1213 gl.unmapBuffer(GL_ATOMIC_COUNTER_BUFFER);
1214 throw;
1215 }
1216 }
1217
1218 // Verify counter values
1219 for (int counterNdx = 0; counterNdx < m_numCounters; counterNdx++)
1220 {
1221 const uint32_t refCount = (uint32_t)(numHits[counterNdx] * numInvocations);
1222 const uint32_t resCount = counterValues[counterNdx];
1223
1224 if (refCount != resCount)
1225 {
1226 m_testCtx.getLog() << TestLog::Message << "ERROR: atomic counter " << counterNdx << " has value "
1227 << resCount << ", expected " << refCount << TestLog::EndMessage;
1228
1229 if (m_testCtx.getTestResult() == QP_TEST_RESULT_PASS)
1230 m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Invalid atomic counter value");
1231 }
1232 }
1233
1234 // Allocate bitmasks - one bit per each valid result value
1235 for (int counterNdx = 0; counterNdx < m_numCounters; counterNdx++)
1236 {
1237 const int counterValue = numHits[counterNdx] * numInvocations;
1238 counterMasks[counterNdx].resize(counterValue, false);
1239 }
1240
1241 // Verify result values from shaders
1242 for (int invocationNdx = 0; invocationNdx < numInvocations; invocationNdx++)
1243 {
1244 for (int opNdx = 0; opNdx < numOps; opNdx++)
1245 {
1246 const int counterNdx = opIndices[opNdx];
1247 const uint32_t resValue = outValues[opNdx * numInvocations + invocationNdx];
1248 const bool rangeOk = de::inBounds(resValue, 0u, (uint32_t)counterMasks[counterNdx].size());
1249 const bool notSeen = rangeOk && !counterMasks[counterNdx][resValue];
1250 const bool isOk = rangeOk && notSeen;
1251
1252 if (!isOk)
1253 {
1254 m_testCtx.getLog() << TestLog::Message << "ERROR: at invocation " << invocationNdx << ", op "
1255 << opNdx << ": got invalid result value " << resValue << TestLog::EndMessage;
1256
1257 if (m_testCtx.getTestResult() == QP_TEST_RESULT_PASS)
1258 m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Invalid result value");
1259 }
1260 else
1261 {
1262 // Mark as used - no other invocation should see this value from same counter.
1263 counterMasks[counterNdx][resValue] = true;
1264 }
1265 }
1266 }
1267
1268 if (m_testCtx.getTestResult() == QP_TEST_RESULT_PASS)
1269 {
1270 // Consistency check - all masks should be 1 now
1271 for (int counterNdx = 0; counterNdx < m_numCounters; counterNdx++)
1272 {
1273 for (vector<bool>::const_iterator i = counterMasks[counterNdx].begin();
1274 i != counterMasks[counterNdx].end(); i++)
1275 TCU_CHECK_INTERNAL(*i);
1276 }
1277 }
1278 }
1279
1280 return STOP;
1281 }
1282
1283 } // namespace
1284
OpaqueTypeIndexingTests(Context & context)1285 OpaqueTypeIndexingTests::OpaqueTypeIndexingTests(Context &context)
1286 : TestCaseGroup(context, "opaque_type_indexing", "Opaque Type Indexing Tests")
1287 {
1288 }
1289
~OpaqueTypeIndexingTests(void)1290 OpaqueTypeIndexingTests::~OpaqueTypeIndexingTests(void)
1291 {
1292 }
1293
init(void)1294 void OpaqueTypeIndexingTests::init(void)
1295 {
1296 static const struct
1297 {
1298 IndexExprType type;
1299 const char *name;
1300 const char *description;
1301 } indexingTypes[] = {
1302 {INDEX_EXPR_TYPE_CONST_LITERAL, "const_literal", "Indexing by constant literal"},
1303 {INDEX_EXPR_TYPE_CONST_EXPRESSION, "const_expression", "Indexing by constant expression"},
1304 {INDEX_EXPR_TYPE_UNIFORM, "uniform", "Indexing by uniform value"},
1305 {INDEX_EXPR_TYPE_DYNAMIC_UNIFORM, "dynamically_uniform", "Indexing by dynamically uniform expression"}};
1306
1307 static const struct
1308 {
1309 ShaderType type;
1310 const char *name;
1311 } shaderTypes[] = {{SHADERTYPE_VERTEX, "vertex"},
1312 {SHADERTYPE_FRAGMENT, "fragment"},
1313 {SHADERTYPE_COMPUTE, "compute"},
1314 {SHADERTYPE_GEOMETRY, "geometry"},
1315 {SHADERTYPE_TESSELLATION_CONTROL, "tessellation_control"},
1316 {SHADERTYPE_TESSELLATION_EVALUATION, "tessellation_evaluation"}};
1317
1318 // .sampler
1319 {
1320 static const DataType samplerTypes[] = {
1321 // \note 1D images will be added by a later extension.
1322 // TYPE_SAMPLER_1D,
1323 TYPE_SAMPLER_2D, TYPE_SAMPLER_CUBE, TYPE_SAMPLER_2D_ARRAY, TYPE_SAMPLER_3D,
1324 // TYPE_SAMPLER_1D_SHADOW,
1325 TYPE_SAMPLER_2D_SHADOW, TYPE_SAMPLER_CUBE_SHADOW, TYPE_SAMPLER_2D_ARRAY_SHADOW,
1326 // TYPE_INT_SAMPLER_1D,
1327 TYPE_INT_SAMPLER_2D, TYPE_INT_SAMPLER_CUBE, TYPE_INT_SAMPLER_2D_ARRAY, TYPE_INT_SAMPLER_3D,
1328 // TYPE_UINT_SAMPLER_1D,
1329 TYPE_UINT_SAMPLER_2D, TYPE_UINT_SAMPLER_CUBE, TYPE_UINT_SAMPLER_2D_ARRAY, TYPE_UINT_SAMPLER_3D,
1330 TYPE_SAMPLER_CUBE_ARRAY, TYPE_SAMPLER_CUBE_ARRAY_SHADOW, TYPE_INT_SAMPLER_CUBE_ARRAY,
1331 TYPE_UINT_SAMPLER_CUBE_ARRAY};
1332
1333 tcu::TestCaseGroup *const samplerGroup =
1334 new tcu::TestCaseGroup(m_testCtx, "sampler", "Sampler Array Indexing Tests");
1335 addChild(samplerGroup);
1336
1337 for (int indexTypeNdx = 0; indexTypeNdx < DE_LENGTH_OF_ARRAY(indexingTypes); indexTypeNdx++)
1338 {
1339 const IndexExprType indexExprType = indexingTypes[indexTypeNdx].type;
1340 tcu::TestCaseGroup *const indexGroup = new tcu::TestCaseGroup(m_testCtx, indexingTypes[indexTypeNdx].name,
1341 indexingTypes[indexTypeNdx].description);
1342 samplerGroup->addChild(indexGroup);
1343
1344 for (int shaderTypeNdx = 0; shaderTypeNdx < DE_LENGTH_OF_ARRAY(shaderTypes); shaderTypeNdx++)
1345 {
1346 const ShaderType shaderType = shaderTypes[shaderTypeNdx].type;
1347 tcu::TestCaseGroup *const shaderGroup =
1348 new tcu::TestCaseGroup(m_testCtx, shaderTypes[shaderTypeNdx].name, "");
1349 indexGroup->addChild(shaderGroup);
1350
1351 for (int samplerTypeNdx = 0; samplerTypeNdx < DE_LENGTH_OF_ARRAY(samplerTypes); samplerTypeNdx++)
1352 {
1353 const DataType samplerType = samplerTypes[samplerTypeNdx];
1354 const char *samplerName = getDataTypeName(samplerType);
1355 const string caseName = de::toLower(samplerName);
1356
1357 shaderGroup->addChild(new SamplerIndexingCase(m_context, caseName.c_str(), "", shaderType,
1358 samplerType, indexExprType));
1359 }
1360 }
1361 }
1362 }
1363
1364 // .ubo / .ssbo / .atomic_counter
1365 {
1366 tcu::TestCaseGroup *const uboGroup =
1367 new tcu::TestCaseGroup(m_testCtx, "ubo", "Uniform Block Instance Array Indexing Tests");
1368 tcu::TestCaseGroup *const ssboGroup =
1369 new tcu::TestCaseGroup(m_testCtx, "ssbo", "Buffer Block Instance Array Indexing Tests");
1370 tcu::TestCaseGroup *const acGroup =
1371 new tcu::TestCaseGroup(m_testCtx, "atomic_counter", "Atomic Counter Array Indexing Tests");
1372 addChild(uboGroup);
1373 addChild(ssboGroup);
1374 addChild(acGroup);
1375
1376 for (int indexTypeNdx = 0; indexTypeNdx < DE_LENGTH_OF_ARRAY(indexingTypes); indexTypeNdx++)
1377 {
1378 const IndexExprType indexExprType = indexingTypes[indexTypeNdx].type;
1379 const char *indexExprName = indexingTypes[indexTypeNdx].name;
1380 const char *indexExprDesc = indexingTypes[indexTypeNdx].description;
1381
1382 for (int shaderTypeNdx = 0; shaderTypeNdx < DE_LENGTH_OF_ARRAY(shaderTypes); shaderTypeNdx++)
1383 {
1384 const ShaderType shaderType = shaderTypes[shaderTypeNdx].type;
1385 const string name = string(indexExprName) + "_" + shaderTypes[shaderTypeNdx].name;
1386
1387 uboGroup->addChild(new BlockArrayIndexingCase(m_context, name.c_str(), indexExprDesc,
1388 BlockArrayIndexingCase::BLOCKTYPE_UNIFORM, indexExprType,
1389 shaderType));
1390 acGroup->addChild(
1391 new AtomicCounterIndexingCase(m_context, name.c_str(), indexExprDesc, indexExprType, shaderType));
1392
1393 if (indexExprType == INDEX_EXPR_TYPE_CONST_LITERAL || indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
1394 ssboGroup->addChild(new BlockArrayIndexingCase(m_context, name.c_str(), indexExprDesc,
1395 BlockArrayIndexingCase::BLOCKTYPE_BUFFER,
1396 indexExprType, shaderType));
1397 }
1398 }
1399 }
1400 }
1401
1402 } // namespace Functional
1403 } // namespace gles31
1404 } // namespace deqp
1405