1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2017 The Khronos Group Inc.
6 * Copyright (c) 2017 Samsung Electronics Co., Ltd.
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 *
20 *//*!
21 * \file
22 * \brief Protected memory image access tests
23 *//*--------------------------------------------------------------------*/
24
25 #include "vktProtectedMemShaderImageAccessTests.hpp"
26
27 #include "vktProtectedMemContext.hpp"
28 #include "vktProtectedMemUtils.hpp"
29 #include "vktProtectedMemImageValidator.hpp"
30 #include "vktTestCase.hpp"
31 #include "vktTestGroupUtil.hpp"
32
33 #include "vkPrograms.hpp"
34 #include "vkTypeUtil.hpp"
35 #include "vkBuilderUtil.hpp"
36 #include "vkImageUtil.hpp"
37 #include "vkCmdUtil.hpp"
38 #include "vkObjUtil.hpp"
39
40 #include "tcuTestLog.hpp"
41 #include "tcuVector.hpp"
42 #include "tcuTextureUtil.hpp"
43 #include "tcuStringTemplate.hpp"
44
45 #include "gluTextureTestUtil.hpp"
46
47 #include "deRandom.hpp"
48
49 namespace vkt
50 {
51 namespace ProtectedMem
52 {
53
54 namespace
55 {
56
57 enum
58 {
59 RENDER_WIDTH = 128,
60 RENDER_HEIGHT = 128,
61 IMAGE_WIDTH = 128,
62 IMAGE_HEIGHT = 128,
63 };
64
65 enum AccessType
66 {
67 ACCESS_TYPE_SAMPLING = 0,
68 ACCESS_TYPE_TEXEL_FETCH,
69 ACCESS_TYPE_IMAGE_LOAD,
70 ACCESS_TYPE_IMAGE_STORE,
71 ACCESS_TYPE_IMAGE_ATOMICS,
72
73 ACCESS_TYPE_LAST
74 };
75
76 enum AtomicOperation
77 {
78 ATOMIC_OPERATION_ADD = 0,
79 ATOMIC_OPERATION_MIN,
80 ATOMIC_OPERATION_MAX,
81 ATOMIC_OPERATION_AND,
82 ATOMIC_OPERATION_OR,
83 ATOMIC_OPERATION_XOR,
84 ATOMIC_OPERATION_EXCHANGE,
85
86 ATOMIC_OPERATION_LAST
87 };
88
89 struct Params
90 {
91 glu::ShaderType shaderType;
92 AccessType accessType;
93 vk::VkFormat imageFormat;
94 AtomicOperation atomicOperation;
95 bool pipelineProtectedAccess;
96 bool useMaintenance5;
97 vk::VkPipelineCreateFlags flags;
98 ProtectionMode protectionMode;
99
Paramsvkt::ProtectedMem::__anon1668ec230111::Params100 Params(void)
101 : shaderType(glu::SHADERTYPE_LAST)
102 , accessType(ACCESS_TYPE_LAST)
103 , imageFormat(vk::VK_FORMAT_UNDEFINED)
104 , atomicOperation(ATOMIC_OPERATION_LAST)
105 , pipelineProtectedAccess(false)
106 , useMaintenance5(false)
107 , flags((vk::VkPipelineCreateFlags)0u)
108 , protectionMode(PROTECTION_ENABLED)
109 {
110 }
111
Paramsvkt::ProtectedMem::__anon1668ec230111::Params112 Params(const glu::ShaderType shaderType_, const AccessType accessType_, const vk::VkFormat imageFormat_,
113 const AtomicOperation atomicOperation_, const bool pipelineProtectedAccess_,
114 const vk::VkPipelineCreateFlags flags_)
115 : shaderType(shaderType_)
116 , accessType(accessType_)
117 , imageFormat(imageFormat_)
118 , atomicOperation(atomicOperation_)
119 , pipelineProtectedAccess(pipelineProtectedAccess_)
120 , flags(flags_)
121 , protectionMode(PROTECTION_ENABLED)
122 {
123 #ifndef CTS_USES_VULKANSC
124 if ((flags_ & vk::VK_PIPELINE_CREATE_NO_PROTECTED_ACCESS_BIT_EXT) != 0)
125 {
126 protectionMode = PROTECTION_DISABLED;
127 }
128 #endif
129 }
130 };
131
getSeedValue(const Params & params)132 static uint32_t getSeedValue(const Params ¶ms)
133 {
134 return deInt32Hash(params.shaderType) ^ deInt32Hash(params.accessType) ^ deInt32Hash(params.imageFormat) ^
135 deInt32Hash(params.atomicOperation);
136 }
137
getAtomicOperationCaseName(const AtomicOperation op)138 static std::string getAtomicOperationCaseName(const AtomicOperation op)
139 {
140 switch (op)
141 {
142 case ATOMIC_OPERATION_ADD:
143 return "add";
144 case ATOMIC_OPERATION_MIN:
145 return "min";
146 case ATOMIC_OPERATION_MAX:
147 return "max";
148 case ATOMIC_OPERATION_AND:
149 return "and";
150 case ATOMIC_OPERATION_OR:
151 return "or";
152 case ATOMIC_OPERATION_XOR:
153 return "xor";
154 case ATOMIC_OPERATION_EXCHANGE:
155 return "exchange";
156 default:
157 DE_FATAL("Impossible");
158 return "";
159 }
160 }
161
getAtomicOperationShaderFuncName(const AtomicOperation op)162 static std::string getAtomicOperationShaderFuncName(const AtomicOperation op)
163 {
164 switch (op)
165 {
166 case ATOMIC_OPERATION_ADD:
167 return "imageAtomicAdd";
168 case ATOMIC_OPERATION_MIN:
169 return "imageAtomicMin";
170 case ATOMIC_OPERATION_MAX:
171 return "imageAtomicMax";
172 case ATOMIC_OPERATION_AND:
173 return "imageAtomicAnd";
174 case ATOMIC_OPERATION_OR:
175 return "imageAtomicOr";
176 case ATOMIC_OPERATION_XOR:
177 return "imageAtomicXor";
178 case ATOMIC_OPERATION_EXCHANGE:
179 return "imageAtomicExchange";
180 default:
181 DE_FATAL("Impossible");
182 return "";
183 }
184 }
185
186 //! Computes the result of an atomic operation where "a" is the data operated on and "b" is the parameter to the atomic function.
computeBinaryAtomicOperationResult(const AtomicOperation op,const int32_t a,const int32_t b)187 static int32_t computeBinaryAtomicOperationResult(const AtomicOperation op, const int32_t a, const int32_t b)
188 {
189 switch (op)
190 {
191 case ATOMIC_OPERATION_ADD:
192 return a + b;
193 case ATOMIC_OPERATION_MIN:
194 return de::min(a, b);
195 case ATOMIC_OPERATION_MAX:
196 return de::max(a, b);
197 case ATOMIC_OPERATION_AND:
198 return a & b;
199 case ATOMIC_OPERATION_OR:
200 return a | b;
201 case ATOMIC_OPERATION_XOR:
202 return a ^ b;
203 case ATOMIC_OPERATION_EXCHANGE:
204 return b;
205 default:
206 DE_FATAL("Impossible");
207 return -1;
208 }
209 }
210
getShaderImageFormatQualifier(const tcu::TextureFormat & format)211 static std::string getShaderImageFormatQualifier(const tcu::TextureFormat &format)
212 {
213 const char *orderPart;
214 const char *typePart;
215
216 switch (format.order)
217 {
218 case tcu::TextureFormat::R:
219 orderPart = "r";
220 break;
221 case tcu::TextureFormat::RG:
222 orderPart = "rg";
223 break;
224 case tcu::TextureFormat::RGB:
225 orderPart = "rgb";
226 break;
227 case tcu::TextureFormat::RGBA:
228 orderPart = "rgba";
229 break;
230
231 default:
232 DE_FATAL("Impossible");
233 orderPart = DE_NULL;
234 }
235
236 switch (format.type)
237 {
238 case tcu::TextureFormat::FLOAT:
239 typePart = "32f";
240 break;
241 case tcu::TextureFormat::HALF_FLOAT:
242 typePart = "16f";
243 break;
244
245 case tcu::TextureFormat::UNSIGNED_INT32:
246 typePart = "32ui";
247 break;
248 case tcu::TextureFormat::UNSIGNED_INT16:
249 typePart = "16ui";
250 break;
251 case tcu::TextureFormat::UNSIGNED_INT8:
252 typePart = "8ui";
253 break;
254
255 case tcu::TextureFormat::SIGNED_INT32:
256 typePart = "32i";
257 break;
258 case tcu::TextureFormat::SIGNED_INT16:
259 typePart = "16i";
260 break;
261 case tcu::TextureFormat::SIGNED_INT8:
262 typePart = "8i";
263 break;
264
265 case tcu::TextureFormat::UNORM_INT16:
266 typePart = "16";
267 break;
268 case tcu::TextureFormat::UNORM_INT8:
269 typePart = "8";
270 break;
271
272 case tcu::TextureFormat::SNORM_INT16:
273 typePart = "16_snorm";
274 break;
275 case tcu::TextureFormat::SNORM_INT8:
276 typePart = "8_snorm";
277 break;
278
279 default:
280 DE_FATAL("Impossible");
281 typePart = DE_NULL;
282 }
283
284 return std::string() + orderPart + typePart;
285 }
286
getShaderSamplerOrImageType(const tcu::TextureFormat & format,bool isSampler)287 static std::string getShaderSamplerOrImageType(const tcu::TextureFormat &format, bool isSampler)
288 {
289 const std::string formatPart =
290 tcu::getTextureChannelClass(format.type) == tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER ? "u" :
291 tcu::getTextureChannelClass(format.type) == tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER ? "i" :
292 "";
293
294 return formatPart + (isSampler ? "sampler2D" : "image2D");
295 }
296
297 class ImageAccessTestInstance : public ProtectedTestInstance
298 {
299 public:
300 ImageAccessTestInstance(Context &ctx, const ImageValidator &validator, const Params ¶ms);
301 virtual tcu::TestStatus iterate(void);
302
303 private:
304 de::MovePtr<tcu::Texture2D> createTestTexture2D(void);
305 void calculateAtomicRef(tcu::Texture2D &texture2D);
306 tcu::TestStatus validateResult(vk::VkImage image, vk::VkImageLayout imageLayout, const tcu::Texture2D &texture2D,
307 const tcu::Sampler &refSampler);
308
309 tcu::TestStatus executeFragmentTest(void);
310 tcu::TestStatus executeComputeTest(void);
311
312 const ImageValidator &m_validator;
313 const Params m_params;
314 };
315
316 class ImageAccessTestCase : public TestCase
317 {
318 public:
ImageAccessTestCase(tcu::TestContext & testCtx,const std::string & name,const Params & params)319 ImageAccessTestCase(tcu::TestContext &testCtx, const std::string &name, const Params ¶ms)
320 : TestCase(testCtx, name)
321 , m_validator(params.imageFormat)
322 , m_params(params)
323 {
324 }
325
~ImageAccessTestCase(void)326 virtual ~ImageAccessTestCase(void)
327 {
328 }
createInstance(Context & ctx) const329 virtual TestInstance *createInstance(Context &ctx) const
330 {
331 return new ImageAccessTestInstance(ctx, m_validator, m_params);
332 }
333 virtual void initPrograms(vk::SourceCollections &programCollection) const;
checkSupport(Context & context) const334 virtual void checkSupport(Context &context) const
335 {
336 checkProtectedQueueSupport(context);
337 if (m_params.useMaintenance5)
338 context.requireDeviceFunctionality("VK_KHR_maintenance5");
339 }
340
341 private:
342 ImageValidator m_validator;
343 Params m_params;
344 };
345
initPrograms(vk::SourceCollections & programCollection) const346 void ImageAccessTestCase::initPrograms(vk::SourceCollections &programCollection) const
347 {
348 const tcu::TextureFormat &texFormat = mapVkFormat(m_params.imageFormat);
349 const std::string imageFormat = getShaderImageFormatQualifier(texFormat);
350 const std::string imageType = getShaderSamplerOrImageType(texFormat, false);
351 const std::string samplerType = getShaderSamplerOrImageType(texFormat, true);
352 const std::string colorVecType = isIntFormat(m_params.imageFormat) ? "ivec4" :
353 isUintFormat(m_params.imageFormat) ? "uvec4" :
354 "vec4";
355
356 m_validator.initPrograms(programCollection);
357
358 if (m_params.shaderType == glu::SHADERTYPE_FRAGMENT)
359 {
360 {
361 // Vertex shader
362 const char *vert = "#version 450\n"
363 "layout(location = 0) in mediump vec2 a_position;\n"
364 "layout(location = 1) in mediump vec2 a_texCoord;\n"
365 "layout(location = 0) out mediump vec2 v_texCoord;\n"
366 "\n"
367 "void main() {\n"
368 " gl_Position = vec4(a_position, 0.0, 1.0);\n"
369 " v_texCoord = a_texCoord;\n"
370 "}\n";
371
372 programCollection.glslSources.add("vert") << glu::VertexSource(vert);
373 }
374
375 {
376 // Fragment shader
377 std::ostringstream frag;
378 frag << "#version 450\n"
379 "layout(location = 0) in mediump vec2 v_texCoord;\n"
380 "layout(location = 0) out highp ${COLOR_VEC_TYPE} o_color;\n";
381
382 switch (m_params.accessType)
383 {
384 case ACCESS_TYPE_SAMPLING:
385 case ACCESS_TYPE_TEXEL_FETCH:
386 frag << "layout(set = 0, binding = 0) uniform highp ${SAMPLER_TYPE} u_sampler;\n";
387 break;
388 case ACCESS_TYPE_IMAGE_LOAD:
389 frag << "layout(set = 0, binding = 0, ${IMAGE_FORMAT}) readonly uniform highp ${IMAGE_TYPE} u_image;\n";
390 break;
391 case ACCESS_TYPE_IMAGE_STORE:
392 frag
393 << "layout(set = 0, binding = 0, ${IMAGE_FORMAT}) readonly uniform highp ${IMAGE_TYPE} u_imageA;\n";
394 frag << "layout(set = 0, binding = 1, ${IMAGE_FORMAT}) writeonly uniform highp ${IMAGE_TYPE} "
395 "u_imageB;\n";
396 break;
397 case ACCESS_TYPE_IMAGE_ATOMICS:
398 frag << "layout(set = 0, binding = 0, ${IMAGE_FORMAT}) coherent uniform highp ${IMAGE_TYPE} u_image;\n";
399 break;
400 default:
401 DE_FATAL("Impossible");
402 break;
403 }
404
405 frag << "\n"
406 "void main() {\n";
407
408 switch (m_params.accessType)
409 {
410 case ACCESS_TYPE_SAMPLING:
411 frag << " o_color = texture(u_sampler, v_texCoord);\n";
412 break;
413 case ACCESS_TYPE_TEXEL_FETCH:
414 frag << " const highp int lod = 0;\n";
415 frag << " o_color = texelFetch(u_sampler, ivec2(v_texCoord), lod);\n";
416 break;
417 case ACCESS_TYPE_IMAGE_LOAD:
418 frag << " o_color = imageLoad(u_image, ivec2(v_texCoord));\n";
419 break;
420 case ACCESS_TYPE_IMAGE_STORE:
421 frag << " o_color = imageLoad(u_imageA, ivec2(v_texCoord));\n";
422 frag << " imageStore(u_imageB, ivec2(v_texCoord), o_color);\n";
423 break;
424 case ACCESS_TYPE_IMAGE_ATOMICS:
425 frag << " int gx = int(v_texCoord.x);\n";
426 frag << " int gy = int(v_texCoord.y);\n";
427 frag << " " << getAtomicOperationShaderFuncName(m_params.atomicOperation)
428 << "(u_image, ivec2(v_texCoord), " << (isUintFormat(m_params.imageFormat) ? "uint" : "int")
429 << "(gx*gx + gy*gy));\n";
430 frag << " o_color = imageLoad(u_image, ivec2(v_texCoord));\n";
431 break;
432 default:
433 DE_FATAL("Impossible");
434 break;
435 }
436
437 frag << "}\n";
438
439 std::map<std::string, std::string> fragParams;
440
441 fragParams["IMAGE_FORMAT"] = imageFormat;
442 fragParams["IMAGE_TYPE"] = imageType;
443 fragParams["SAMPLER_TYPE"] = samplerType;
444 fragParams["COLOR_VEC_TYPE"] = colorVecType;
445
446 programCollection.glslSources.add("frag")
447 << glu::FragmentSource(tcu::StringTemplate(frag.str()).specialize(fragParams));
448 }
449 }
450 else if (m_params.shaderType == glu::SHADERTYPE_COMPUTE)
451 {
452 // Compute shader
453 std::ostringstream comp;
454 comp << "#version 450\n"
455 "layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n"
456 "layout(set = 0, binding = 0, ${IMAGE_FORMAT}) ${RES_MEM_QUALIFIER} uniform highp ${IMAGE_TYPE} "
457 "u_resultImage;\n";
458
459 switch (m_params.accessType)
460 {
461 case ACCESS_TYPE_SAMPLING:
462 case ACCESS_TYPE_TEXEL_FETCH:
463 comp << "layout(set = 0, binding = 1) uniform highp ${SAMPLER_TYPE} u_sampler;\n";
464 break;
465 case ACCESS_TYPE_IMAGE_LOAD:
466 case ACCESS_TYPE_IMAGE_STORE:
467 comp << "layout(set = 0, binding = 1, ${IMAGE_FORMAT}) readonly uniform highp ${IMAGE_TYPE} u_srcImage;\n";
468 break;
469 case ACCESS_TYPE_IMAGE_ATOMICS:
470 break;
471 default:
472 DE_FATAL("Impossible");
473 break;
474 }
475
476 comp << "\n"
477 "void main() {\n"
478 " int gx = int(gl_GlobalInvocationID.x);\n"
479 " int gy = int(gl_GlobalInvocationID.y);\n";
480
481 switch (m_params.accessType)
482 {
483 case ACCESS_TYPE_SAMPLING:
484 comp << " ${COLOR_VEC_TYPE} color = texture(u_sampler, vec2(float(gx)/" << de::toString((int)IMAGE_WIDTH)
485 << ", float(gy)/" << de::toString((int)IMAGE_HEIGHT) << "));\n";
486 comp << " imageStore(u_resultImage, ivec2(gx, gy), color);\n";
487 break;
488 case ACCESS_TYPE_TEXEL_FETCH:
489 comp << " const highp int lod = 0;\n";
490 comp << " ${COLOR_VEC_TYPE} color = texelFetch(u_sampler, ivec2(gx, gy), lod);\n";
491 comp << " imageStore(u_resultImage, ivec2(gx, gy), color);\n";
492 break;
493 case ACCESS_TYPE_IMAGE_LOAD:
494 case ACCESS_TYPE_IMAGE_STORE:
495 comp << " ${COLOR_VEC_TYPE} color = imageLoad(u_srcImage, ivec2(gx, gy));\n";
496 comp << " imageStore(u_resultImage, ivec2(gx, gy), color);\n";
497 break;
498 case ACCESS_TYPE_IMAGE_ATOMICS:
499 comp << " " << getAtomicOperationShaderFuncName(m_params.atomicOperation)
500 << "(u_resultImage, ivec2(gx, gy), " << (isUintFormat(m_params.imageFormat) ? "uint" : "int")
501 << "(gx*gx + gy*gy));\n";
502 break;
503 default:
504 DE_FATAL("Impossible");
505 break;
506 }
507
508 comp << "}\n";
509
510 std::map<std::string, std::string> compParams;
511
512 compParams["IMAGE_FORMAT"] = imageFormat;
513 compParams["IMAGE_TYPE"] = imageType;
514 compParams["SAMPLER_TYPE"] = samplerType;
515 compParams["COLOR_VEC_TYPE"] = colorVecType;
516 compParams["RES_MEM_QUALIFIER"] = m_params.accessType == ACCESS_TYPE_IMAGE_ATOMICS ? "coherent" : "writeonly";
517
518 programCollection.glslSources.add("comp")
519 << glu::ComputeSource(tcu::StringTemplate(comp.str()).specialize(compParams));
520 }
521 else
522 DE_FATAL("Impossible");
523 }
524
ImageAccessTestInstance(Context & ctx,const ImageValidator & validator,const Params & params)525 ImageAccessTestInstance::ImageAccessTestInstance(Context &ctx, const ImageValidator &validator, const Params ¶ms)
526 : ProtectedTestInstance(ctx, params.pipelineProtectedAccess ?
527 std::vector<std::string>({"VK_EXT_pipeline_protected_access"}) :
528 std::vector<std::string>())
529 , m_validator(validator)
530 , m_params(params)
531 {
532 }
533
createTestTexture2D(void)534 de::MovePtr<tcu::Texture2D> ImageAccessTestInstance::createTestTexture2D(void)
535 {
536 const tcu::TextureFormat texFmt = mapVkFormat(m_params.imageFormat);
537 const tcu::TextureFormatInfo fmtInfo = tcu::getTextureFormatInfo(texFmt);
538 de::MovePtr<tcu::Texture2D> texture2D(new tcu::Texture2D(texFmt, IMAGE_WIDTH, IMAGE_HEIGHT));
539
540 // \note generate only the base level
541 texture2D->allocLevel(0);
542
543 const tcu::PixelBufferAccess &level = texture2D->getLevel(0);
544
545 if (m_params.accessType == ACCESS_TYPE_IMAGE_ATOMICS)
546 {
547 // use a smaller range than the format would allow
548 const float cMin = isIntFormat(m_params.imageFormat) ? -1000.0f : 0.0f;
549 const float cMax = +1000.0f;
550
551 fillWithRandomColorTiles(level, tcu::Vec4(cMin, 0, 0, 0), tcu::Vec4(cMax, 0, 0, 0), getSeedValue(m_params));
552 }
553 else
554 fillWithRandomColorTiles(level, fmtInfo.valueMin, fmtInfo.valueMax, getSeedValue(m_params));
555
556 return texture2D;
557 }
558
iterate(void)559 tcu::TestStatus ImageAccessTestInstance::iterate(void)
560 {
561 switch (m_params.shaderType)
562 {
563 case glu::SHADERTYPE_FRAGMENT:
564 return executeFragmentTest();
565 case glu::SHADERTYPE_COMPUTE:
566 return executeComputeTest();
567 default:
568 DE_FATAL("Impossible");
569 return tcu::TestStatus::fail("");
570 }
571 }
572
executeComputeTest(void)573 tcu::TestStatus ImageAccessTestInstance::executeComputeTest(void)
574 {
575 ProtectedContext &ctx(m_protectedContext);
576 const vk::DeviceInterface &vk = ctx.getDeviceInterface();
577 const vk::VkDevice device = ctx.getDevice();
578 const vk::VkQueue queue = ctx.getQueue();
579 const uint32_t queueFamilyIndex = ctx.getQueueFamilyIndex();
580
581 vk::Unique<vk::VkCommandPool> cmdPool(makeCommandPool(vk, device, m_params.protectionMode, queueFamilyIndex));
582
583 de::MovePtr<tcu::Texture2D> texture2D = createTestTexture2D();
584 const tcu::Sampler refSampler = tcu::Sampler(
585 tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::NEAREST,
586 tcu::Sampler::NEAREST, 00.0f /* LOD threshold */, true /* normalized coords */, tcu::Sampler::COMPAREMODE_NONE,
587 0 /* cmp channel */, tcu::Vec4(0.0f) /* border color */, true /* seamless cube map */);
588
589 vk::Unique<vk::VkShaderModule> computeShader(
590 vk::createShaderModule(vk, device, ctx.getBinaryCollection().get("comp"), 0));
591
592 de::MovePtr<vk::ImageWithMemory> imageSrc;
593 de::MovePtr<vk::ImageWithMemory> imageDst;
594 vk::Move<vk::VkSampler> sampler;
595 vk::Move<vk::VkImageView> imageViewSrc;
596 vk::Move<vk::VkImageView> imageViewDst;
597
598 vk::Move<vk::VkDescriptorSetLayout> descriptorSetLayout;
599 vk::Move<vk::VkDescriptorPool> descriptorPool;
600 vk::Move<vk::VkDescriptorSet> descriptorSet;
601
602 // Create src and dst images
603 {
604 vk::VkImageUsageFlags imageUsageFlags = vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
605 vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT | vk::VK_IMAGE_USAGE_SAMPLED_BIT |
606 vk::VK_IMAGE_USAGE_STORAGE_BIT;
607
608 imageSrc = createImage2D(ctx, m_params.protectionMode, queueFamilyIndex, IMAGE_WIDTH, IMAGE_HEIGHT,
609 m_params.imageFormat, imageUsageFlags);
610
611 if (m_params.accessType != ACCESS_TYPE_IMAGE_ATOMICS)
612 {
613 imageDst = createImage2D(ctx, m_params.protectionMode, queueFamilyIndex, IMAGE_WIDTH, IMAGE_HEIGHT,
614 m_params.imageFormat, imageUsageFlags);
615 }
616 }
617
618 // Upload source image
619 {
620 de::MovePtr<vk::ImageWithMemory> unprotectedImage =
621 createImage2D(ctx, PROTECTION_DISABLED, queueFamilyIndex, IMAGE_WIDTH, IMAGE_HEIGHT, m_params.imageFormat,
622 vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT | vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT);
623
624 // Upload data to an unprotected image
625 uploadImage(m_protectedContext, **unprotectedImage, *texture2D);
626
627 // Select vkImageLayout based upon accessType
628 vk::VkImageLayout imageSrcLayout = vk::VK_IMAGE_LAYOUT_UNDEFINED;
629
630 switch (m_params.accessType)
631 {
632 case ACCESS_TYPE_SAMPLING:
633 case ACCESS_TYPE_TEXEL_FETCH:
634 {
635 imageSrcLayout = vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
636 break;
637 }
638 case ACCESS_TYPE_IMAGE_LOAD:
639 case ACCESS_TYPE_IMAGE_STORE:
640 case ACCESS_TYPE_IMAGE_ATOMICS:
641 {
642 imageSrcLayout = vk::VK_IMAGE_LAYOUT_GENERAL;
643 break;
644 }
645 default:
646 DE_FATAL("Impossible");
647 break;
648 }
649
650 // Copy unprotected image to protected image
651 copyToProtectedImage(m_protectedContext, **unprotectedImage, **imageSrc, imageSrcLayout, IMAGE_WIDTH,
652 IMAGE_HEIGHT, m_params.protectionMode);
653 }
654
655 // Clear dst image
656 if (m_params.accessType != ACCESS_TYPE_IMAGE_ATOMICS && m_params.protectionMode == PROTECTION_ENABLED)
657 clearImage(m_protectedContext, **imageDst);
658
659 // Create descriptors
660 {
661 vk::DescriptorSetLayoutBuilder layoutBuilder;
662 vk::DescriptorPoolBuilder poolBuilder;
663
664 switch (m_params.accessType)
665 {
666 case ACCESS_TYPE_SAMPLING:
667 case ACCESS_TYPE_TEXEL_FETCH:
668 layoutBuilder.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, vk::VK_SHADER_STAGE_COMPUTE_BIT);
669 layoutBuilder.addSingleSamplerBinding(vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
670 vk::VK_SHADER_STAGE_COMPUTE_BIT, DE_NULL);
671 poolBuilder.addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1u);
672 poolBuilder.addType(vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1u);
673 break;
674 case ACCESS_TYPE_IMAGE_LOAD:
675 case ACCESS_TYPE_IMAGE_STORE:
676 layoutBuilder.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, vk::VK_SHADER_STAGE_COMPUTE_BIT);
677 layoutBuilder.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, vk::VK_SHADER_STAGE_COMPUTE_BIT);
678 poolBuilder.addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 2u);
679 break;
680 case ACCESS_TYPE_IMAGE_ATOMICS:
681 layoutBuilder.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, vk::VK_SHADER_STAGE_COMPUTE_BIT);
682 poolBuilder.addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1u);
683 break;
684 default:
685 DE_FATAL("Impossible");
686 break;
687 }
688
689 descriptorSetLayout = layoutBuilder.build(vk, device);
690 descriptorPool = poolBuilder.build(vk, device, vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
691 descriptorSet = makeDescriptorSet(vk, device, *descriptorPool, *descriptorSetLayout);
692 }
693
694 // Create pipeline layout
695 vk::Unique<vk::VkPipelineLayout> pipelineLayout(makePipelineLayout(vk, device, *descriptorSetLayout));
696
697 // Create sampler and image views
698 {
699 if (m_params.accessType == ACCESS_TYPE_SAMPLING || m_params.accessType == ACCESS_TYPE_TEXEL_FETCH)
700 {
701 const tcu::TextureFormat texFormat = mapVkFormat(m_params.imageFormat);
702 const vk::VkSamplerCreateInfo samplerParams = vk::mapSampler(refSampler, texFormat);
703
704 sampler = createSampler(vk, device, &samplerParams);
705 }
706
707 imageViewSrc = createImageView(ctx, **imageSrc, m_params.imageFormat);
708
709 if (m_params.accessType != ACCESS_TYPE_IMAGE_ATOMICS)
710 imageViewDst = createImageView(ctx, **imageDst, m_params.imageFormat);
711 }
712
713 // Update descriptor set information
714 {
715 vk::DescriptorSetUpdateBuilder updateBuilder;
716
717 switch (m_params.accessType)
718 {
719 case ACCESS_TYPE_SAMPLING:
720 case ACCESS_TYPE_TEXEL_FETCH:
721 {
722 vk::VkDescriptorImageInfo descStorageImgDst =
723 makeDescriptorImageInfo((vk::VkSampler)0, *imageViewDst, vk::VK_IMAGE_LAYOUT_GENERAL);
724 vk::VkDescriptorImageInfo descSampledImgSrc =
725 makeDescriptorImageInfo(*sampler, *imageViewSrc, vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
726
727 updateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u),
728 vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descStorageImgDst);
729 updateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(1u),
730 vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, &descSampledImgSrc);
731 break;
732 }
733 case ACCESS_TYPE_IMAGE_LOAD:
734 case ACCESS_TYPE_IMAGE_STORE:
735 {
736 vk::VkDescriptorImageInfo descStorageImgDst =
737 makeDescriptorImageInfo((vk::VkSampler)0, *imageViewDst, vk::VK_IMAGE_LAYOUT_GENERAL);
738 vk::VkDescriptorImageInfo descStorageImgSrc =
739 makeDescriptorImageInfo((vk::VkSampler)0, *imageViewSrc, vk::VK_IMAGE_LAYOUT_GENERAL);
740
741 updateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u),
742 vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descStorageImgDst);
743 updateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(1u),
744 vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descStorageImgSrc);
745 break;
746 }
747 case ACCESS_TYPE_IMAGE_ATOMICS:
748 {
749 vk::VkDescriptorImageInfo descStorageImg =
750 makeDescriptorImageInfo((vk::VkSampler)0, *imageViewSrc, vk::VK_IMAGE_LAYOUT_GENERAL);
751
752 updateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u),
753 vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descStorageImg);
754 break;
755 }
756 default:
757 DE_FATAL("Impossible");
758 break;
759 }
760
761 updateBuilder.update(vk, device);
762 }
763
764 // Create validation compute commands & submit
765 {
766 const vk::VkPipelineShaderStageCreateInfo pipelineShaderStageParams{
767 vk::VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType;
768 nullptr, // const void* pNext;
769 0u, // VkPipelineShaderStageCreateFlags flags;
770 vk::VK_SHADER_STAGE_COMPUTE_BIT, // VkShaderStageFlagBits stage;
771 *computeShader, // VkShaderModule module;
772 "main", // const char* pName;
773 DE_NULL, // const VkSpecializationInfo* pSpecializationInfo;
774 };
775
776 vk::VkComputePipelineCreateInfo pipelineCreateInfo{
777 vk::VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, // VkStructureType sType;
778 nullptr, // const void* pNext;
779 m_params.flags, // VkPipelineCreateFlags flags;
780 pipelineShaderStageParams, // VkPipelineShaderStageCreateInfo stage;
781 *pipelineLayout, // VkPipelineLayout layout;
782 DE_NULL, // VkPipeline basePipelineHandle;
783 0, // int32_t basePipelineIndex;
784 };
785
786 #ifndef CTS_USES_VULKANSC
787 vk::VkPipelineCreateFlags2CreateInfoKHR pipelineFlags2CreateInfo = vk::initVulkanStructure();
788 if (m_params.useMaintenance5)
789 {
790 pipelineFlags2CreateInfo.flags = (vk::VkPipelineCreateFlagBits2KHR)m_params.flags;
791 pipelineCreateInfo.pNext = &pipelineFlags2CreateInfo;
792 pipelineCreateInfo.flags = 0;
793 }
794 #endif // CTS_USES_VULKANSC
795
796 vk::Unique<vk::VkPipeline> pipeline(createComputePipeline(vk, device, DE_NULL, &pipelineCreateInfo));
797
798 const vk::Unique<vk::VkFence> fence(vk::createFence(vk, device));
799 vk::Unique<vk::VkCommandBuffer> cmdBuffer(
800 vk::allocateCommandBuffer(vk, device, *cmdPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
801
802 beginCommandBuffer(vk, *cmdBuffer);
803
804 vk.cmdBindPipeline(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
805 vk.cmdBindDescriptorSets(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u,
806 &*descriptorSet, 0u, DE_NULL);
807 vk.cmdDispatch(*cmdBuffer, (uint32_t)IMAGE_WIDTH, (uint32_t)IMAGE_HEIGHT, 1u);
808 endCommandBuffer(vk, *cmdBuffer);
809
810 VK_CHECK(queueSubmit(ctx, m_params.protectionMode, queue, *cmdBuffer, *fence, ~0ull));
811 }
812
813 // Calculate reference image
814 if (m_params.accessType == ACCESS_TYPE_IMAGE_ATOMICS)
815 calculateAtomicRef(*texture2D);
816
817 // Validate result
818 {
819 const vk::VkImage resultImage = m_params.accessType == ACCESS_TYPE_IMAGE_ATOMICS ? **imageSrc : **imageDst;
820
821 return validateResult(resultImage, vk::VK_IMAGE_LAYOUT_GENERAL, *texture2D, refSampler);
822 }
823 }
824
executeFragmentTest(void)825 tcu::TestStatus ImageAccessTestInstance::executeFragmentTest(void)
826 {
827 ProtectedContext &ctx(m_protectedContext);
828 const vk::DeviceInterface &vk = ctx.getDeviceInterface();
829 const vk::VkDevice device = ctx.getDevice();
830 const vk::VkQueue queue = ctx.getQueue();
831 const uint32_t queueFamilyIndex = ctx.getQueueFamilyIndex();
832
833 // Create output image
834 de::MovePtr<vk::ImageWithMemory> colorImage(
835 createImage2D(ctx, m_params.protectionMode, queueFamilyIndex, RENDER_WIDTH, RENDER_HEIGHT, m_params.imageFormat,
836 vk::VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | vk::VK_IMAGE_USAGE_SAMPLED_BIT));
837 vk::Unique<vk::VkImageView> colorImageView(createImageView(ctx, **colorImage, m_params.imageFormat));
838
839 vk::Unique<vk::VkRenderPass> renderPass(createRenderPass(ctx, m_params.imageFormat));
840 vk::Unique<vk::VkFramebuffer> framebuffer(
841 createFramebuffer(ctx, RENDER_WIDTH, RENDER_HEIGHT, *renderPass, *colorImageView));
842
843 vk::Unique<vk::VkCommandPool> cmdPool(makeCommandPool(vk, device, m_params.protectionMode, queueFamilyIndex));
844 vk::Unique<vk::VkCommandBuffer> cmdBuffer(
845 vk::allocateCommandBuffer(vk, device, *cmdPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
846
847 de::MovePtr<tcu::Texture2D> texture2D = createTestTexture2D();
848 const tcu::Sampler refSampler = tcu::Sampler(
849 tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::NEAREST,
850 tcu::Sampler::NEAREST, 00.0f /* LOD threshold */, true /* normalized coords */, tcu::Sampler::COMPAREMODE_NONE,
851 0 /* cmp channel */, tcu::Vec4(0.0f) /* border color */, true /* seamless cube map */);
852
853 vk::Move<vk::VkShaderModule> vertexShader =
854 createShaderModule(vk, device, ctx.getBinaryCollection().get("vert"), 0);
855 vk::Move<vk::VkShaderModule> fragmentShader =
856 createShaderModule(vk, device, ctx.getBinaryCollection().get("frag"), 0);
857
858 de::MovePtr<vk::ImageWithMemory> imageSrc;
859 de::MovePtr<vk::ImageWithMemory> imageDst;
860 vk::Move<vk::VkSampler> sampler;
861 vk::Move<vk::VkImageView> imageViewSrc;
862 vk::Move<vk::VkImageView> imageViewDst;
863
864 vk::Move<vk::VkPipeline> graphicsPipeline;
865 vk::Move<vk::VkDescriptorSetLayout> descriptorSetLayout;
866 vk::Move<vk::VkDescriptorPool> descriptorPool;
867 vk::Move<vk::VkDescriptorSet> descriptorSet;
868
869 // Create src and dst images
870 {
871 vk::VkImageUsageFlags imageUsageFlags =
872 vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT | vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT | vk::VK_IMAGE_USAGE_SAMPLED_BIT;
873
874 switch (m_params.accessType)
875 {
876 case ACCESS_TYPE_IMAGE_LOAD:
877 case ACCESS_TYPE_IMAGE_STORE:
878 case ACCESS_TYPE_IMAGE_ATOMICS:
879 imageUsageFlags |= vk::VK_IMAGE_USAGE_STORAGE_BIT;
880 break;
881 default:
882 break;
883 }
884
885 imageSrc = createImage2D(ctx, m_params.protectionMode, queueFamilyIndex, IMAGE_WIDTH, IMAGE_HEIGHT,
886 m_params.imageFormat, imageUsageFlags);
887
888 if (m_params.accessType == ACCESS_TYPE_IMAGE_STORE)
889 {
890 imageDst = createImage2D(ctx, m_params.protectionMode, queueFamilyIndex, IMAGE_WIDTH, IMAGE_HEIGHT,
891 m_params.imageFormat, imageUsageFlags);
892 }
893 }
894
895 // Select vkImageLayout based upon accessType
896 vk::VkImageLayout imageLayout = vk::VK_IMAGE_LAYOUT_UNDEFINED;
897
898 switch (m_params.accessType)
899 {
900 case ACCESS_TYPE_SAMPLING:
901 case ACCESS_TYPE_TEXEL_FETCH:
902 {
903 imageLayout = vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
904 break;
905 }
906 case ACCESS_TYPE_IMAGE_LOAD:
907 case ACCESS_TYPE_IMAGE_STORE:
908 case ACCESS_TYPE_IMAGE_ATOMICS:
909 {
910 imageLayout = vk::VK_IMAGE_LAYOUT_GENERAL;
911 break;
912 }
913 default:
914 DE_FATAL("Impossible");
915 break;
916 }
917
918 // Upload source image
919 {
920 de::MovePtr<vk::ImageWithMemory> unprotectedImage =
921 createImage2D(ctx, PROTECTION_DISABLED, queueFamilyIndex, IMAGE_WIDTH, IMAGE_HEIGHT, m_params.imageFormat,
922 vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT | vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT);
923
924 // Upload data to an unprotected image
925 uploadImage(m_protectedContext, **unprotectedImage, *texture2D);
926
927 // Copy unprotected image to protected image
928 copyToProtectedImage(m_protectedContext, **unprotectedImage, **imageSrc, imageLayout, IMAGE_WIDTH, IMAGE_HEIGHT,
929 m_params.protectionMode);
930 }
931
932 // Clear dst image
933 if (m_params.accessType == ACCESS_TYPE_IMAGE_STORE && m_params.protectionMode == PROTECTION_ENABLED)
934 clearImage(m_protectedContext, **imageDst);
935
936 // Create descriptors
937 {
938 vk::DescriptorSetLayoutBuilder layoutBuilder;
939 vk::DescriptorPoolBuilder poolBuilder;
940
941 switch (m_params.accessType)
942 {
943 case ACCESS_TYPE_SAMPLING:
944 case ACCESS_TYPE_TEXEL_FETCH:
945 layoutBuilder.addSingleSamplerBinding(vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
946 vk::VK_SHADER_STAGE_FRAGMENT_BIT, DE_NULL);
947 poolBuilder.addType(vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1u);
948 break;
949 case ACCESS_TYPE_IMAGE_LOAD:
950 layoutBuilder.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, vk::VK_SHADER_STAGE_FRAGMENT_BIT);
951 poolBuilder.addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1u);
952 break;
953 case ACCESS_TYPE_IMAGE_STORE:
954 layoutBuilder.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, vk::VK_SHADER_STAGE_FRAGMENT_BIT);
955 layoutBuilder.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, vk::VK_SHADER_STAGE_FRAGMENT_BIT);
956 poolBuilder.addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 2u);
957 break;
958 case ACCESS_TYPE_IMAGE_ATOMICS:
959 layoutBuilder.addSingleBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, vk::VK_SHADER_STAGE_FRAGMENT_BIT);
960 poolBuilder.addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1u);
961 break;
962 default:
963 DE_FATAL("Impossible");
964 break;
965 }
966
967 descriptorSetLayout = layoutBuilder.build(vk, device);
968 descriptorPool = poolBuilder.build(vk, device, vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
969 descriptorSet = makeDescriptorSet(vk, device, *descriptorPool, *descriptorSetLayout);
970 }
971
972 // Create pipeline layout
973 vk::Unique<vk::VkPipelineLayout> pipelineLayout(makePipelineLayout(vk, device, *descriptorSetLayout));
974
975 // Create sampler and image views
976 {
977 if (m_params.accessType == ACCESS_TYPE_SAMPLING || m_params.accessType == ACCESS_TYPE_TEXEL_FETCH)
978 {
979 const tcu::TextureFormat texFormat = mapVkFormat(m_params.imageFormat);
980 const vk::VkSamplerCreateInfo samplerParams = vk::mapSampler(refSampler, texFormat);
981
982 sampler = createSampler(vk, device, &samplerParams);
983 }
984
985 imageViewSrc = createImageView(ctx, **imageSrc, m_params.imageFormat);
986
987 if (m_params.accessType == ACCESS_TYPE_IMAGE_STORE)
988 imageViewDst = createImageView(ctx, **imageDst, m_params.imageFormat);
989 }
990
991 // Update descriptor set information
992 {
993 vk::DescriptorSetUpdateBuilder updateBuilder;
994
995 switch (m_params.accessType)
996 {
997 case ACCESS_TYPE_SAMPLING:
998 case ACCESS_TYPE_TEXEL_FETCH:
999 {
1000 vk::VkDescriptorImageInfo descSampledImg =
1001 makeDescriptorImageInfo(*sampler, *imageViewSrc, vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
1002
1003 updateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u),
1004 vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, &descSampledImg);
1005 break;
1006 }
1007 case ACCESS_TYPE_IMAGE_LOAD:
1008 {
1009 vk::VkDescriptorImageInfo descStorageImg =
1010 makeDescriptorImageInfo((vk::VkSampler)0, *imageViewSrc, vk::VK_IMAGE_LAYOUT_GENERAL);
1011
1012 updateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u),
1013 vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descStorageImg);
1014 break;
1015 }
1016 case ACCESS_TYPE_IMAGE_STORE:
1017 {
1018 vk::VkDescriptorImageInfo descStorageImgSrc =
1019 makeDescriptorImageInfo((vk::VkSampler)0, *imageViewSrc, vk::VK_IMAGE_LAYOUT_GENERAL);
1020 vk::VkDescriptorImageInfo descStorageImgDst =
1021 makeDescriptorImageInfo((vk::VkSampler)0, *imageViewDst, vk::VK_IMAGE_LAYOUT_GENERAL);
1022
1023 updateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u),
1024 vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descStorageImgSrc);
1025 updateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(1u),
1026 vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descStorageImgDst);
1027 break;
1028 }
1029 case ACCESS_TYPE_IMAGE_ATOMICS:
1030 {
1031 vk::VkDescriptorImageInfo descStorageImg =
1032 makeDescriptorImageInfo((vk::VkSampler)0, *imageViewSrc, vk::VK_IMAGE_LAYOUT_GENERAL);
1033
1034 updateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u),
1035 vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descStorageImg);
1036 break;
1037 }
1038 default:
1039 DE_FATAL("Impossible");
1040 break;
1041 }
1042
1043 updateBuilder.update(vk, device);
1044 }
1045
1046 // Create vertex buffer and vertex input descriptors
1047 VertexBindings vertexBindings;
1048 VertexAttribs vertexAttribs;
1049 de::MovePtr<vk::BufferWithMemory> vertexBuffer;
1050 {
1051 const float positions[] = {
1052 -1.0f, -1.0f, -1.0f, +1.0f, +1.0f, -1.0f, +1.0f, +1.0f,
1053 };
1054
1055 std::vector<float> texCoord;
1056
1057 {
1058 const tcu::Vec2 minCoords(0.0f, 0.0f);
1059 const tcu::Vec2 maxCoords = m_params.accessType == ACCESS_TYPE_SAMPLING ?
1060 tcu::Vec2(1.0f, 1.0f) :
1061 tcu::Vec2((float)IMAGE_WIDTH - 0.1f, (float)IMAGE_HEIGHT - 0.1f);
1062
1063 glu::TextureTestUtil::computeQuadTexCoord2D(texCoord, minCoords, maxCoords);
1064 }
1065
1066 const uint32_t vertexPositionStrideSize = (uint32_t)sizeof(tcu::Vec2);
1067 const uint32_t vertexTextureStrideSize = (uint32_t)sizeof(tcu::Vec2);
1068 const uint32_t positionDataSize = 4 * vertexPositionStrideSize;
1069 const uint32_t textureCoordDataSize = 4 * vertexTextureStrideSize;
1070 const uint32_t vertexBufferSize = positionDataSize + textureCoordDataSize;
1071
1072 {
1073 const vk::VkVertexInputBindingDescription vertexInputBindingDescriptions[2] = {
1074 {
1075 0u, // uint32_t binding;
1076 vertexPositionStrideSize, // uint32_t strideInBytes;
1077 vk::VK_VERTEX_INPUT_RATE_VERTEX // VkVertexInputStepRate inputRate;
1078 },
1079 {
1080 1u, // uint32_t binding;
1081 vertexTextureStrideSize, // uint32_t strideInBytes;
1082 vk::VK_VERTEX_INPUT_RATE_VERTEX // VkVertexInputStepRate inputRate;
1083 }};
1084 vertexBindings.push_back(vertexInputBindingDescriptions[0]);
1085 vertexBindings.push_back(vertexInputBindingDescriptions[1]);
1086
1087 const vk::VkVertexInputAttributeDescription vertexInputAttributeDescriptions[2] = {
1088 {
1089 0u, // uint32_t location;
1090 0u, // uint32_t binding;
1091 vk::VK_FORMAT_R32G32_SFLOAT, // VkFormat format;
1092 0u // uint32_t offsetInBytes;
1093 },
1094 {
1095 1u, // uint32_t location;
1096 1u, // uint32_t binding;
1097 vk::VK_FORMAT_R32G32_SFLOAT, // VkFormat format;
1098 positionDataSize // uint32_t offsetInBytes;
1099 }};
1100 vertexAttribs.push_back(vertexInputAttributeDescriptions[0]);
1101 vertexAttribs.push_back(vertexInputAttributeDescriptions[1]);
1102 }
1103
1104 vertexBuffer = makeBuffer(ctx, PROTECTION_DISABLED, queueFamilyIndex, vertexBufferSize,
1105 vk::VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, vk::MemoryRequirement::HostVisible);
1106
1107 deMemcpy(vertexBuffer->getAllocation().getHostPtr(), positions, positionDataSize);
1108 deMemcpy(reinterpret_cast<uint8_t *>(vertexBuffer->getAllocation().getHostPtr()) + positionDataSize,
1109 texCoord.data(), textureCoordDataSize);
1110 vk::flushAlloc(vk, device, vertexBuffer->getAllocation());
1111 }
1112
1113 // Create pipeline
1114 graphicsPipeline = makeGraphicsPipeline(vk, device, *pipelineLayout, *renderPass, *vertexShader, *fragmentShader,
1115 vertexBindings, vertexAttribs, tcu::UVec2(RENDER_WIDTH, RENDER_HEIGHT),
1116 vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP, m_params.flags);
1117
1118 // Begin cmd buffer
1119 beginCommandBuffer(vk, *cmdBuffer);
1120
1121 // Start image barrier
1122 {
1123 const vk::VkImageMemoryBarrier startImgBarrier = {vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
1124 DE_NULL, // pNext
1125 0, // srcAccessMask
1126 vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // dstAccessMask
1127 vk::VK_IMAGE_LAYOUT_UNDEFINED, // oldLayout
1128 vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // newLayout
1129 queueFamilyIndex, // srcQueueFamilyIndex
1130 queueFamilyIndex, // dstQueueFamilyIndex
1131 **colorImage, // image
1132 {
1133 vk::VK_IMAGE_ASPECT_COLOR_BIT, // aspectMask
1134 0u, // baseMipLevel
1135 1u, // mipLevels
1136 0u, // baseArraySlice
1137 1u, // subresourceRange
1138 }};
1139
1140 vk.cmdPipelineBarrier(*cmdBuffer,
1141 vk::VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, // srcStageMask
1142 vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, // dstStageMask
1143 (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier *)DE_NULL, 0,
1144 (const vk::VkBufferMemoryBarrier *)DE_NULL, 1, &startImgBarrier);
1145 }
1146
1147 beginRenderPass(vk, *cmdBuffer, *renderPass, *framebuffer, vk::makeRect2D(0, 0, RENDER_WIDTH, RENDER_HEIGHT),
1148 tcu::Vec4(0.0f));
1149
1150 vk.cmdBindPipeline(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *graphicsPipeline);
1151 vk.cmdBindDescriptorSets(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *pipelineLayout, 0u, 1u, &*descriptorSet,
1152 0u, DE_NULL);
1153
1154 {
1155 const vk::VkDeviceSize vertexBufferOffset = 0;
1156
1157 vk.cmdBindVertexBuffers(*cmdBuffer, 0u, 1u, &vertexBuffer->get(), &vertexBufferOffset);
1158 vk.cmdBindVertexBuffers(*cmdBuffer, 1u, 1u, &vertexBuffer->get(), &vertexBufferOffset);
1159 }
1160
1161 vk.cmdDraw(*cmdBuffer, /*vertexCount*/ 4u, 1u, 0u, 1u);
1162
1163 endRenderPass(vk, *cmdBuffer);
1164
1165 {
1166 const vk::VkImageMemoryBarrier endImgBarrier = {vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
1167 DE_NULL, // pNext
1168 vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // srcAccessMask
1169 vk::VK_ACCESS_SHADER_READ_BIT, // dstAccessMask
1170 vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // oldLayout
1171 imageLayout, // newLayout
1172 queueFamilyIndex, // srcQueueFamilyIndex
1173 queueFamilyIndex, // dstQueueFamilyIndex
1174 **colorImage, // image
1175 {
1176 vk::VK_IMAGE_ASPECT_COLOR_BIT, // aspectMask
1177 0u, // baseMipLevel
1178 1u, // mipLevels
1179 0u, // baseArraySlice
1180 1u, // subresourceRange
1181 }};
1182 vk.cmdPipelineBarrier(*cmdBuffer,
1183 vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, // srcStageMask
1184 vk::VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, // dstStageMask
1185 (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier *)DE_NULL, 0,
1186 (const vk::VkBufferMemoryBarrier *)DE_NULL, 1, &endImgBarrier);
1187 }
1188
1189 endCommandBuffer(vk, *cmdBuffer);
1190
1191 // Submit command buffer
1192 {
1193 const vk::Unique<vk::VkFence> fence(vk::createFence(vk, device));
1194 VK_CHECK(queueSubmit(ctx, m_params.protectionMode, queue, *cmdBuffer, *fence, ~0ull));
1195 }
1196
1197 // Calculate reference image
1198 if (m_params.accessType == ACCESS_TYPE_IMAGE_ATOMICS)
1199 calculateAtomicRef(*texture2D);
1200
1201 // Validate result
1202 {
1203 const vk::VkImage resultImage = m_params.accessType == ACCESS_TYPE_IMAGE_ATOMICS ? **imageSrc :
1204 m_params.accessType == ACCESS_TYPE_IMAGE_STORE ? **imageDst :
1205 **colorImage;
1206
1207 return validateResult(resultImage, imageLayout, *texture2D, refSampler);
1208 }
1209 }
1210
calculateAtomicRef(tcu::Texture2D & texture2D)1211 void ImageAccessTestInstance::calculateAtomicRef(tcu::Texture2D &texture2D)
1212 {
1213 DE_ASSERT(m_params.accessType == ACCESS_TYPE_IMAGE_ATOMICS);
1214
1215 const tcu::PixelBufferAccess &reference = texture2D.getLevel(0);
1216
1217 for (int x = 0; x < reference.getWidth(); ++x)
1218 for (int y = 0; y < reference.getHeight(); ++y)
1219 {
1220 const int32_t oldX = reference.getPixelInt(x, y).x();
1221 const int32_t atomicArg = x * x + y * y;
1222 const int32_t newX = computeBinaryAtomicOperationResult(m_params.atomicOperation, oldX, atomicArg);
1223
1224 reference.setPixel(tcu::IVec4(newX, 0, 0, 0), x, y);
1225 }
1226 }
1227
validateResult(vk::VkImage image,vk::VkImageLayout imageLayout,const tcu::Texture2D & texture2D,const tcu::Sampler & refSampler)1228 tcu::TestStatus ImageAccessTestInstance::validateResult(vk::VkImage image, vk::VkImageLayout imageLayout,
1229 const tcu::Texture2D &texture2D, const tcu::Sampler &refSampler)
1230 {
1231 de::Random rnd(getSeedValue(m_params));
1232 ValidationData refData;
1233
1234 for (int ndx = 0; ndx < 4; ++ndx)
1235 {
1236 const float lod = 0.0f;
1237 const float cx = rnd.getFloat(0.0f, 1.0f);
1238 const float cy = rnd.getFloat(0.0f, 1.0f);
1239
1240 refData.coords[ndx] = tcu::Vec4(cx, cy, 0.0f, 0.0f);
1241 refData.values[ndx] = texture2D.sample(refSampler, cx, cy, lod);
1242 }
1243
1244 if (!m_validator.validateImage(m_protectedContext, refData, image, m_params.imageFormat, imageLayout))
1245 return tcu::TestStatus::fail("Something went really wrong");
1246 else
1247 return tcu::TestStatus::pass("Everything went OK");
1248 }
1249
1250 } // namespace
1251
createShaderImageAccessTests(tcu::TestContext & testCtx)1252 tcu::TestCaseGroup *createShaderImageAccessTests(tcu::TestContext &testCtx)
1253 {
1254 de::MovePtr<tcu::TestCaseGroup> accessGroup(new tcu::TestCaseGroup(testCtx, "access"));
1255
1256 static const struct
1257 {
1258 glu::ShaderType type;
1259 const char *name;
1260 } shaderTypes[] = {
1261 // Image access from fragment shader
1262 {glu::SHADERTYPE_FRAGMENT, "fragment"},
1263 // Image access from compute shader
1264 {glu::SHADERTYPE_COMPUTE, "compute"},
1265 };
1266
1267 static const struct
1268 {
1269 AccessType type;
1270 const char *name;
1271 } accessTypes[] = {
1272 // Sampling test
1273 {ACCESS_TYPE_SAMPLING, "sampling"},
1274 // Texel fetch test
1275 {ACCESS_TYPE_TEXEL_FETCH, "texelfetch"},
1276 // Image load test
1277 {ACCESS_TYPE_IMAGE_LOAD, "imageload"},
1278 // Image store test
1279 {ACCESS_TYPE_IMAGE_STORE, "imagestore"},
1280 // Image atomics test
1281 {ACCESS_TYPE_IMAGE_ATOMICS, "imageatomics"},
1282 };
1283
1284 static const struct
1285 {
1286 vk::VkFormat format;
1287 const char *name;
1288 } formats[] = {
1289 {vk::VK_FORMAT_R8G8B8A8_UNORM, "rgba8"},
1290 {vk::VK_FORMAT_R32_SINT, "r32i"},
1291 {vk::VK_FORMAT_R32_UINT, "r32ui"},
1292 };
1293
1294 static const struct
1295 {
1296 bool pipelineProtectedAccess;
1297 const char *name;
1298 } protectedAccess[] = {
1299 {false, "default"},
1300 #ifndef CTS_USES_VULKANSC
1301 {true, "protected_access"},
1302 #endif
1303 };
1304 static const struct
1305 {
1306 vk::VkPipelineCreateFlags flags;
1307 const char *name;
1308 } flags[] = {
1309 {(vk::VkPipelineCreateFlagBits)0u, "none"},
1310 #ifndef CTS_USES_VULKANSC
1311 {vk::VK_PIPELINE_CREATE_PROTECTED_ACCESS_ONLY_BIT_EXT, "protected_access_only"},
1312 {vk::VK_PIPELINE_CREATE_NO_PROTECTED_ACCESS_BIT_EXT, "no_protected_access"},
1313 #endif
1314 };
1315
1316 for (int shaderTypeNdx = 0; shaderTypeNdx < DE_LENGTH_OF_ARRAY(shaderTypes); ++shaderTypeNdx)
1317 {
1318 const glu::ShaderType shaderType = shaderTypes[shaderTypeNdx].type;
1319 de::MovePtr<tcu::TestCaseGroup> shaderGroup(new tcu::TestCaseGroup(testCtx, shaderTypes[shaderTypeNdx].name));
1320
1321 for (int protectedAccessNdx = 0; protectedAccessNdx < DE_LENGTH_OF_ARRAY(protectedAccess); ++protectedAccessNdx)
1322 {
1323 de::MovePtr<tcu::TestCaseGroup> protectedAccessGroup(
1324 new tcu::TestCaseGroup(testCtx, protectedAccess[protectedAccessNdx].name));
1325 for (int flagsNdx = 0; flagsNdx < DE_LENGTH_OF_ARRAY(flags); ++flagsNdx)
1326 {
1327 de::MovePtr<tcu::TestCaseGroup> flagsGroup(new tcu::TestCaseGroup(testCtx, flags[flagsNdx].name));
1328 if (!protectedAccess[protectedAccessNdx].pipelineProtectedAccess && flags[flagsNdx].flags != 0u)
1329 continue;
1330 for (int accessNdx = 0; accessNdx < DE_LENGTH_OF_ARRAY(accessTypes); ++accessNdx)
1331 {
1332 const AccessType accessType = accessTypes[accessNdx].type;
1333
1334 if (shaderType == glu::SHADERTYPE_COMPUTE &&
1335 accessType == ACCESS_TYPE_IMAGE_STORE) // \note already tested in other tests
1336 continue;
1337
1338 de::MovePtr<tcu::TestCaseGroup> accessTypeGroup(
1339 new tcu::TestCaseGroup(testCtx, accessTypes[accessNdx].name));
1340
1341 if (accessType == ACCESS_TYPE_IMAGE_ATOMICS)
1342 {
1343 for (uint32_t atomicOpI = 0; atomicOpI < ATOMIC_OPERATION_LAST; ++atomicOpI)
1344 {
1345 const AtomicOperation atomicOp = (AtomicOperation)atomicOpI;
1346 de::MovePtr<tcu::TestCaseGroup> operationGroup(
1347 new tcu::TestCaseGroup(testCtx, getAtomicOperationCaseName(atomicOp).c_str()));
1348
1349 for (uint32_t formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(formats); formatNdx++)
1350 {
1351 const vk::VkFormat format = formats[formatNdx].format;
1352
1353 if (format != vk::VK_FORMAT_R32_UINT && format != vk::VK_FORMAT_R32_SINT)
1354 continue;
1355
1356 operationGroup->addChild(new ImageAccessTestCase(
1357 testCtx, formats[formatNdx].name,
1358 Params(shaderType, accessType, format, atomicOp,
1359 protectedAccess[protectedAccessNdx].pipelineProtectedAccess,
1360 flags[flagsNdx].flags)));
1361 }
1362
1363 accessTypeGroup->addChild(operationGroup.release());
1364 }
1365 }
1366 else
1367 {
1368 for (uint32_t formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(formats); formatNdx++)
1369 {
1370 const vk::VkFormat format = formats[formatNdx].format;
1371
1372 accessTypeGroup->addChild(new ImageAccessTestCase(
1373 testCtx, formats[formatNdx].name,
1374 Params(shaderType, accessType, format, ATOMIC_OPERATION_LAST,
1375 protectedAccess[protectedAccessNdx].pipelineProtectedAccess,
1376 flags[flagsNdx].flags)));
1377 }
1378 }
1379
1380 flagsGroup->addChild(accessTypeGroup.release());
1381 }
1382 protectedAccessGroup->addChild(flagsGroup.release());
1383 }
1384 shaderGroup->addChild(protectedAccessGroup.release());
1385 }
1386
1387 accessGroup->addChild(shaderGroup.release());
1388 }
1389
1390 #ifndef CTS_USES_VULKANSC
1391 {
1392 Params params(glu::SHADERTYPE_COMPUTE, ACCESS_TYPE_IMAGE_LOAD, vk::VK_FORMAT_R8G8B8A8_UNORM,
1393 ATOMIC_OPERATION_LAST, false, vk::VK_PIPELINE_CREATE_PROTECTED_ACCESS_ONLY_BIT_EXT);
1394 params.useMaintenance5 = true;
1395 de::MovePtr<tcu::TestCaseGroup> miscGroup(new tcu::TestCaseGroup(testCtx, "misc"));
1396 miscGroup->addChild(new ImageAccessTestCase(testCtx, "maintenance5_protected_access", params));
1397 params.flags = vk::VK_PIPELINE_CREATE_NO_PROTECTED_ACCESS_BIT_EXT;
1398 miscGroup->addChild(new ImageAccessTestCase(testCtx, "maintenance5_no_protected_access", params));
1399 accessGroup->addChild(miscGroup.release());
1400 }
1401 #endif // CTS_USES_VULKANSC
1402
1403 return accessGroup.release();
1404 }
1405
1406 } // namespace ProtectedMem
1407 } // namespace vkt
1408