1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2017 The Khronos Group Inc.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 *//*!
20 * \file vktImageCompressionTranscodingSupport.cpp
21 * \brief Compression transcoding support
22 *//*--------------------------------------------------------------------*/
23
24 #include "vktImageCompressionTranscodingSupport.hpp"
25 #include "vktImageLoadStoreUtil.hpp"
26
27 #include "deUniquePtr.hpp"
28 #include "deStringUtil.hpp"
29 #include "deSharedPtr.hpp"
30 #include "deRandom.hpp"
31
32 #include "vktTestCaseUtil.hpp"
33 #include "vkPrograms.hpp"
34 #include "vkImageUtil.hpp"
35 #include "vkBarrierUtil.hpp"
36 #include "vktImageTestsUtil.hpp"
37 #include "vkBuilderUtil.hpp"
38 #include "vkRef.hpp"
39 #include "vkRefUtil.hpp"
40 #include "vkTypeUtil.hpp"
41 #include "vkQueryUtil.hpp"
42 #include "vkCmdUtil.hpp"
43 #include "vkObjUtil.hpp"
44 #include "vkBufferWithMemory.hpp"
45
46 #include "tcuTextureUtil.hpp"
47 #include "tcuTexture.hpp"
48 #include "tcuCompressedTexture.hpp"
49 #include "tcuVectorType.hpp"
50 #include "tcuResource.hpp"
51 #include "tcuImageIO.hpp"
52 #include "tcuImageCompare.hpp"
53 #include "tcuTestLog.hpp"
54 #include "tcuRGBA.hpp"
55 #include "tcuSurface.hpp"
56
57 #include <vector>
58
59 using namespace vk;
60 namespace vkt
61 {
62 namespace image
63 {
64 namespace
65 {
66 using de::MovePtr;
67 using de::Random;
68 using de::SharedPtr;
69 using std::string;
70 using std::vector;
71 using tcu::Archive;
72 using tcu::CompressedTexFormat;
73 using tcu::CompressedTexture;
74 using tcu::ConstPixelBufferAccess;
75 using tcu::IVec3;
76 using tcu::Resource;
77 using tcu::TestContext;
78 using tcu::TestStatus;
79 using tcu::UVec3;
80
81 typedef SharedPtr<MovePtr<Image>> ImageSp;
82 typedef SharedPtr<Move<VkImageView>> ImageViewSp;
83 typedef SharedPtr<Move<VkDescriptorSet>> SharedVkDescriptorSet;
84
85 enum ShaderType
86 {
87 SHADER_TYPE_COMPUTE,
88 SHADER_TYPE_FRAGMENT,
89 SHADER_TYPE_LAST
90 };
91
92 enum Operation
93 {
94 OPERATION_IMAGE_LOAD,
95 OPERATION_TEXEL_FETCH,
96 OPERATION_TEXTURE,
97 OPERATION_IMAGE_STORE,
98 OPERATION_ATTACHMENT_READ,
99 OPERATION_ATTACHMENT_WRITE,
100 OPERATION_TEXTURE_READ,
101 OPERATION_TEXTURE_WRITE,
102 OPERATION_LAST
103 };
104
105 struct TestParameters
106 {
107 Operation operation;
108 ShaderType shader;
109 UVec3 size;
110 uint32_t layers;
111 ImageType imageType;
112 VkFormat formatCompressed;
113 VkFormat formatUncompressed;
114 uint32_t imagesCount;
115 VkImageUsageFlags compressedImageUsage;
116 VkImageUsageFlags compressedImageViewUsage;
117 VkImageUsageFlags uncompressedImageUsage;
118 bool useMipmaps;
119 VkFormat formatForVerify;
120 const bool multiLayerView;
121
formatIsASTCvkt::image::__anon16cda86a0111::TestParameters122 bool formatIsASTC(void) const
123 {
124 return deInRange32(formatCompressed, VK_FORMAT_ASTC_4x4_UNORM_BLOCK, VK_FORMAT_ASTC_12x12_SRGB_BLOCK);
125 }
126
useMultiLayerViewsvkt::image::__anon16cda86a0111::TestParameters127 bool useMultiLayerViews(void) const
128 {
129 return (multiLayerView && layers > 1u);
130 }
131 };
132
133 template <typename T>
makeVkSharedPtr(Move<T> move)134 inline SharedPtr<Move<T>> makeVkSharedPtr(Move<T> move)
135 {
136 return SharedPtr<Move<T>>(new Move<T>(move));
137 }
138
139 template <typename T>
makeVkSharedPtr(MovePtr<T> movePtr)140 inline SharedPtr<MovePtr<T>> makeVkSharedPtr(MovePtr<T> movePtr)
141 {
142 return SharedPtr<MovePtr<T>>(new MovePtr<T>(movePtr));
143 }
144
145 const uint32_t SINGLE_LEVEL = 1u;
146 const uint32_t SINGLE_LAYER = 1u;
147
148 enum BinaryCompareMode
149 {
150 COMPARE_MODE_NORMAL,
151 COMPARE_MODE_ALLOW_ASTC_ERROR_COLOUR_WARNING,
152 };
153
154 enum BinaryCompareResult
155 {
156 COMPARE_RESULT_OK,
157 COMPARE_RESULT_ASTC_QUALITY_WARNING,
158 COMPARE_RESULT_FAILED,
159 };
160
161 const uint32_t ASTC_LDR_ERROR_COLOUR = 0xFFFF00FF;
162 const uint32_t ASTC_HDR_ERROR_COLOUR = 0x00000000;
163
BinaryCompare(const void * reference,const void * result,VkDeviceSize sizeInBytes,VkFormat formatForVerify,BinaryCompareMode mode)164 static BinaryCompareResult BinaryCompare(const void *reference, const void *result, VkDeviceSize sizeInBytes,
165 VkFormat formatForVerify, BinaryCompareMode mode)
166 {
167 DE_UNREF(formatForVerify);
168
169 // Compare quickly using deMemCmp
170 if (deMemCmp(reference, result, (size_t)sizeInBytes) == 0)
171 {
172 return COMPARE_RESULT_OK;
173 }
174 // If deMemCmp indicated a mismatch, we can re-check with a manual comparison of
175 // the ref and res images that allows for ASTC error colour mismatches if the ASTC
176 // comparison mode was selected. This slows down the affected ASTC tests if you
177 // didn't pass in the first comparison, but means in the general case the
178 // comparion is still fast.
179 else if (mode == COMPARE_MODE_ALLOW_ASTC_ERROR_COLOUR_WARNING)
180 {
181 bool bWarn = false;
182 bool bFail = false;
183 const uint32_t *pui32RefVal = (uint32_t *)reference;
184 const uint32_t *pui32ResVal = (uint32_t *)result;
185
186 DE_ASSERT(formatForVerify == VK_FORMAT_R8G8B8A8_UNORM);
187 size_t numPixels = (size_t)(sizeInBytes / 4) /* bytes */;
188 for (size_t i = 0; i < numPixels; i++)
189 {
190 const uint32_t ref = *pui32RefVal++;
191 const uint32_t res = *pui32ResVal++;
192
193 if (ref != res)
194 {
195 // QualityWarning !1231: If the astc pixel was the ASTC LDR error colour
196 // and the result image has the HDR error colour (or vice versa as the test
197 // cases below sometimes reverse the operands) then issue a quality warning
198 // instead of a failure.
199 if ((ref == ASTC_LDR_ERROR_COLOUR && res == ASTC_HDR_ERROR_COLOUR) ||
200 (ref == ASTC_HDR_ERROR_COLOUR && res == ASTC_LDR_ERROR_COLOUR))
201 {
202 bWarn = true;
203 }
204 else
205 {
206 bFail = true;
207 }
208 }
209 }
210
211 if (!bFail)
212 {
213 return (bWarn) ? (COMPARE_RESULT_ASTC_QUALITY_WARNING) : (COMPARE_RESULT_OK);
214 }
215 }
216
217 return COMPARE_RESULT_FAILED;
218 }
219
TestStatusASTCQualityWarning()220 static TestStatus TestStatusASTCQualityWarning()
221 {
222 return TestStatus(QP_TEST_RESULT_QUALITY_WARNING, "ASTC HDR error colour output instead of LDR error colour");
223 }
224
225 class BasicTranscodingTestInstance : public TestInstance
226 {
227 public:
228 BasicTranscodingTestInstance(Context &context, const TestParameters ¶meters);
229 virtual TestStatus iterate(void) = 0;
230
231 protected:
232 void generateData(uint8_t *toFill, const size_t size, const VkFormat format, const uint32_t layer = 0u,
233 const uint32_t level = 0u);
234 uint32_t getLevelCount();
235 uint32_t getLayerCount();
236 UVec3 getLayerDims();
237 vector<UVec3> getMipLevelSizes(UVec3 baseSize);
238 vector<UVec3> getCompressedMipLevelSizes(const VkFormat compressedFormat, const vector<UVec3> &uncompressedSizes);
239
240 const TestParameters m_parameters;
241 const uint32_t m_blockWidth;
242 const uint32_t m_blockHeight;
243 const uint32_t m_levelCount;
244 const UVec3 m_layerSize;
245
246 // Detected error colour mismatch while verifying image. Output
247 // the ASTC quality warning instead of a pass
248 bool m_bASTCErrorColourMismatch;
249
250 private:
251 uint32_t findMipMapLevelCount();
252 };
253
findMipMapLevelCount()254 uint32_t BasicTranscodingTestInstance::findMipMapLevelCount()
255 {
256 uint32_t levelCount = 1;
257
258 // We cannot use mipmap levels which have resolution below block size.
259 // Reduce number of mipmap levels
260 if (m_parameters.useMipmaps)
261 {
262 uint32_t w = m_parameters.size.x();
263 uint32_t h = m_parameters.size.y();
264
265 DE_ASSERT(m_blockWidth > 0u && m_blockHeight > 0u);
266
267 while (w > m_blockWidth && h > m_blockHeight)
268 {
269 w >>= 1;
270 h >>= 1;
271
272 if (w > m_blockWidth && h > m_blockHeight)
273 levelCount++;
274 }
275
276 DE_ASSERT((m_parameters.size.x() >> (levelCount - 1u)) >= m_blockWidth);
277 DE_ASSERT((m_parameters.size.y() >> (levelCount - 1u)) >= m_blockHeight);
278 }
279
280 return levelCount;
281 }
282
BasicTranscodingTestInstance(Context & context,const TestParameters & parameters)283 BasicTranscodingTestInstance::BasicTranscodingTestInstance(Context &context, const TestParameters ¶meters)
284 : TestInstance(context)
285 , m_parameters(parameters)
286 , m_blockWidth(getBlockWidth(m_parameters.formatCompressed))
287 , m_blockHeight(getBlockHeight(m_parameters.formatCompressed))
288 , m_levelCount(findMipMapLevelCount())
289 , m_layerSize(getLayerSize(m_parameters.imageType, m_parameters.size))
290 , m_bASTCErrorColourMismatch(false)
291 {
292 DE_ASSERT(deLog2Floor32(m_parameters.size.x()) == deLog2Floor32(m_parameters.size.y()));
293 }
294
getLevelCount()295 uint32_t BasicTranscodingTestInstance::getLevelCount()
296 {
297 return m_levelCount;
298 }
299
getLayerCount()300 uint32_t BasicTranscodingTestInstance::getLayerCount()
301 {
302 return m_parameters.layers;
303 }
304
getLayerDims()305 UVec3 BasicTranscodingTestInstance::getLayerDims()
306 {
307 return m_layerSize;
308 }
309
getMipLevelSizes(UVec3 baseSize)310 vector<UVec3> BasicTranscodingTestInstance::getMipLevelSizes(UVec3 baseSize)
311 {
312 vector<UVec3> levelSizes;
313 const uint32_t levelCount = getLevelCount();
314
315 baseSize.z() = 1u;
316
317 levelSizes.push_back(baseSize);
318
319 if (m_parameters.imageType == IMAGE_TYPE_1D)
320 {
321 baseSize.y() = 1u;
322
323 while (levelSizes.size() < levelCount && (baseSize.x() != 1))
324 {
325 baseSize.x() = deMax32(baseSize.x() >> 1, 1);
326 levelSizes.push_back(baseSize);
327 }
328 }
329 else
330 {
331 while (levelSizes.size() < levelCount && (baseSize.x() != 1 || baseSize.y() != 1))
332 {
333 baseSize.x() = deMax32(baseSize.x() >> 1, 1);
334 baseSize.y() = deMax32(baseSize.y() >> 1, 1);
335 levelSizes.push_back(baseSize);
336 }
337 }
338
339 DE_ASSERT(levelSizes.size() == getLevelCount());
340
341 return levelSizes;
342 }
343
getCompressedMipLevelSizes(const VkFormat compressedFormat,const vector<UVec3> & uncompressedSizes)344 vector<UVec3> BasicTranscodingTestInstance::getCompressedMipLevelSizes(const VkFormat compressedFormat,
345 const vector<UVec3> &uncompressedSizes)
346 {
347 vector<UVec3> levelSizes;
348 vector<UVec3>::const_iterator it;
349
350 for (it = uncompressedSizes.begin(); it != uncompressedSizes.end(); it++)
351 levelSizes.push_back(getCompressedImageResolutionInBlocks(compressedFormat, *it));
352
353 return levelSizes;
354 }
355
generateData(uint8_t * toFill,const size_t size,const VkFormat format,const uint32_t layer,const uint32_t level)356 void BasicTranscodingTestInstance::generateData(uint8_t *toFill, const size_t size, const VkFormat format,
357 const uint32_t layer, const uint32_t level)
358 {
359 const uint8_t pattern[] = {
360 // 64-bit values
361 0x11,
362 0x11,
363 0x11,
364 0x11,
365 0x22,
366 0x22,
367 0x22,
368 0x22,
369 0x00,
370 0x00,
371 0x00,
372 0x00,
373 0x00,
374 0x00,
375 0x00,
376 0x00,
377 0x00,
378 0x00,
379 0x00,
380 0x00,
381 0x00,
382 0x00,
383 0x00,
384 0x01,
385 0x00,
386 0x00,
387 0x00,
388 0x00,
389 0x00,
390 0x00,
391 0x01,
392 0x00,
393 0x00,
394 0x00,
395 0x00,
396 0x00,
397 0x00,
398 0x01,
399 0x00,
400 0x00,
401 0x00,
402 0x00,
403 0x00,
404 0x00,
405 0x01,
406 0x00,
407 0x00,
408 0x00,
409 0x00,
410 0x00,
411 0x00,
412 0x00,
413 0x00,
414 0x00,
415 0x00,
416 0xFF,
417 0x00,
418 0x00,
419 0x00,
420 0x00,
421 0x00,
422 0x00,
423 0xFF,
424 0x00,
425 0x00,
426 0x00,
427 0x00,
428 0x00,
429 0x00,
430 0xFF,
431 0x00,
432 0x00,
433 0x00,
434 0x00,
435 0x00,
436 0x00,
437 0xFF,
438 0x00,
439 0x00,
440 0x00,
441 0x7F,
442 0xF0,
443 0x00,
444 0x00,
445 0x00,
446 0x00,
447 0x00,
448 0x00, // Positive infinity
449 0xFF,
450 0xF0,
451 0x00,
452 0x00,
453 0x00,
454 0x00,
455 0x00,
456 0x00, // Negative infinity
457 0x7F,
458 0xF0,
459 0x00,
460 0x00,
461 0x00,
462 0x00,
463 0x00,
464 0x01, // Start of a signalling NaN (NANS)
465 0x7F,
466 0xF7,
467 0xFF,
468 0xFF,
469 0xFF,
470 0xFF,
471 0xFF,
472 0xFF, // End of a signalling NaN (NANS)
473 0xFF,
474 0xF0,
475 0x00,
476 0x00,
477 0x00,
478 0x00,
479 0x00,
480 0x01, // Start of a signalling NaN (NANS)
481 0xFF,
482 0xF7,
483 0xFF,
484 0xFF,
485 0xFF,
486 0xFF,
487 0xFF,
488 0xFF, // End of a signalling NaN (NANS)
489 0x7F,
490 0xF8,
491 0x00,
492 0x00,
493 0x00,
494 0x00,
495 0x00,
496 0x00, // Start of a quiet NaN (NANQ)
497 0x7F,
498 0xFF,
499 0xFF,
500 0xFF,
501 0xFF,
502 0xFF,
503 0xFF,
504 0xFF, // End of of a quiet NaN (NANQ)
505 0xFF,
506 0xF8,
507 0x00,
508 0x00,
509 0x00,
510 0x00,
511 0x00,
512 0x00, // Start of a quiet NaN (NANQ)
513 0xFF,
514 0xFF,
515 0xFF,
516 0xFF,
517 0xFF,
518 0xFF,
519 0xFF,
520 0xFF, // End of a quiet NaN (NANQ)
521 // 32-bit values
522 0x7F,
523 0x80,
524 0x00,
525 0x00, // Positive infinity
526 0xFF,
527 0x80,
528 0x00,
529 0x00, // Negative infinity
530 0x7F,
531 0x80,
532 0x00,
533 0x01, // Start of a signalling NaN (NANS)
534 0x7F,
535 0xBF,
536 0xFF,
537 0xFF, // End of a signalling NaN (NANS)
538 0xFF,
539 0x80,
540 0x00,
541 0x01, // Start of a signalling NaN (NANS)
542 0xFF,
543 0xBF,
544 0xFF,
545 0xFF, // End of a signalling NaN (NANS)
546 0x7F,
547 0xC0,
548 0x00,
549 0x00, // Start of a quiet NaN (NANQ)
550 0x7F,
551 0xFF,
552 0xFF,
553 0xFF, // End of of a quiet NaN (NANQ)
554 0xFF,
555 0xC0,
556 0x00,
557 0x00, // Start of a quiet NaN (NANQ)
558 0xFF,
559 0xFF,
560 0xFF,
561 0xFF, // End of a quiet NaN (NANQ)
562 0xAA,
563 0xAA,
564 0xAA,
565 0xAA,
566 0x55,
567 0x55,
568 0x55,
569 0x55,
570 };
571
572 uint8_t *start = toFill;
573 size_t sizeToRnd = size;
574
575 // Pattern part
576 if (layer == 0 && level == 0 && size >= 2 * sizeof(pattern))
577 {
578 // Rotated pattern
579 for (size_t i = 0; i < sizeof(pattern); i++)
580 start[sizeof(pattern) - i - 1] = pattern[i];
581
582 start += sizeof(pattern);
583 sizeToRnd -= sizeof(pattern);
584
585 // Direct pattern
586 deMemcpy(start, pattern, sizeof(pattern));
587
588 start += sizeof(pattern);
589 sizeToRnd -= sizeof(pattern);
590 }
591
592 // Random part
593 {
594 DE_ASSERT(sizeToRnd % sizeof(uint32_t) == 0);
595
596 uint32_t *start32 = reinterpret_cast<uint32_t *>(start);
597 size_t sizeToRnd32 = sizeToRnd / sizeof(uint32_t);
598 uint32_t seed = (layer << 24) ^ (level << 16) ^ static_cast<uint32_t>(format);
599 Random rnd(seed);
600
601 for (size_t i = 0; i < sizeToRnd32; i++)
602 start32[i] = rnd.getUint32();
603 }
604
605 {
606 // Remove certain values that may not be preserved based on the uncompressed view format
607 if (isSnormFormat(m_parameters.formatUncompressed))
608 {
609 for (size_t i = 0; i < size; i += 2)
610 {
611 // SNORM fix: due to write operation in SNORM format
612 // replaces 0x00 0x80 to 0x01 0x80
613 if (toFill[i] == 0x00 && toFill[i + 1] == 0x80)
614 toFill[i + 1] = 0x81;
615 }
616 }
617 else if (isFloatFormat(m_parameters.formatUncompressed))
618 {
619 tcu::TextureFormat textureFormat = mapVkFormat(m_parameters.formatUncompressed);
620
621 if (textureFormat.type == tcu::TextureFormat::HALF_FLOAT)
622 {
623 for (size_t i = 0; i < size; i += 2)
624 {
625 // HALF_FLOAT fix: remove INF and NaN
626 if ((toFill[i + 1] & 0x7C) == 0x7C)
627 toFill[i + 1] = 0x00;
628 }
629 }
630 else if (textureFormat.type == tcu::TextureFormat::FLOAT)
631 {
632 for (size_t i = 0; i < size; i += 4)
633 {
634 // HALF_FLOAT fix: remove INF and NaN
635 if ((toFill[i + 1] & 0x7C) == 0x7C)
636 toFill[i + 1] = 0x00;
637 }
638
639 for (size_t i = 0; i < size; i += 4)
640 {
641 // FLOAT fix: remove INF, NaN, and denorm
642 // Little endian fix
643 if (((toFill[i + 3] & 0x7F) == 0x7F && (toFill[i + 2] & 0x80) == 0x80) ||
644 ((toFill[i + 3] & 0x7F) == 0x00 && (toFill[i + 2] & 0x80) == 0x00))
645 toFill[i + 3] = 0x01;
646 // Big endian fix
647 if (((toFill[i + 0] & 0x7F) == 0x7F && (toFill[i + 1] & 0x80) == 0x80) ||
648 ((toFill[i + 0] & 0x7F) == 0x00 && (toFill[i + 1] & 0x80) == 0x00))
649 toFill[i + 0] = 0x01;
650 }
651 }
652 }
653 }
654 }
655
656 class BasicComputeTestInstance : public BasicTranscodingTestInstance
657 {
658 public:
659 BasicComputeTestInstance(Context &context, const TestParameters ¶meters);
660 TestStatus iterate(void);
661
662 protected:
663 struct ImageData
664 {
getImagesCountvkt::image::__anon16cda86a0111::BasicComputeTestInstance::ImageData665 uint32_t getImagesCount(void)
666 {
667 return static_cast<uint32_t>(images.size());
668 }
getImageViewCountvkt::image::__anon16cda86a0111::BasicComputeTestInstance::ImageData669 uint32_t getImageViewCount(void)
670 {
671 return static_cast<uint32_t>(imagesViews.size());
672 }
getImageInfoCountvkt::image::__anon16cda86a0111::BasicComputeTestInstance::ImageData673 uint32_t getImageInfoCount(void)
674 {
675 return static_cast<uint32_t>(imagesInfos.size());
676 }
getImagevkt::image::__anon16cda86a0111::BasicComputeTestInstance::ImageData677 VkImage getImage(const uint32_t ndx)
678 {
679 return **images[ndx]->get();
680 }
getImageViewvkt::image::__anon16cda86a0111::BasicComputeTestInstance::ImageData681 VkImageView getImageView(const uint32_t ndx)
682 {
683 return **imagesViews[ndx];
684 }
getImageInfovkt::image::__anon16cda86a0111::BasicComputeTestInstance::ImageData685 VkImageCreateInfo getImageInfo(const uint32_t ndx)
686 {
687 return imagesInfos[ndx];
688 }
addImagevkt::image::__anon16cda86a0111::BasicComputeTestInstance::ImageData689 void addImage(MovePtr<Image> image)
690 {
691 images.push_back(makeVkSharedPtr(image));
692 }
addImageViewvkt::image::__anon16cda86a0111::BasicComputeTestInstance::ImageData693 void addImageView(Move<VkImageView> imageView)
694 {
695 imagesViews.push_back(makeVkSharedPtr(imageView));
696 }
addImageInfovkt::image::__anon16cda86a0111::BasicComputeTestInstance::ImageData697 void addImageInfo(const VkImageCreateInfo imageInfo)
698 {
699 imagesInfos.push_back(imageInfo);
700 }
resetViewsvkt::image::__anon16cda86a0111::BasicComputeTestInstance::ImageData701 void resetViews()
702 {
703 imagesViews.clear();
704 }
705
706 private:
707 vector<ImageSp> images;
708 vector<ImageViewSp> imagesViews;
709 vector<VkImageCreateInfo> imagesInfos;
710 };
711 void copyDataToImage(const VkCommandPool &cmdPool, const VkCommandBuffer &cmdBuffer, ImageData &imageData,
712 const vector<UVec3> &mipMapSizes, const bool isCompressed);
713 virtual void executeShader(const VkCommandPool &cmdPool, const VkCommandBuffer &cmdBuffer,
714 const VkDescriptorSetLayout &descriptorSetLayout, const VkDescriptorPool &descriptorPool,
715 vector<ImageData> &imageData);
716 bool copyResultAndCompare(const VkCommandPool &cmdPool, const VkCommandBuffer &cmdBuffer,
717 const VkImage &uncompressed, const VkDeviceSize offset, const UVec3 &size);
718 void descriptorSetUpdate(VkDescriptorSet descriptorSet, const VkDescriptorImageInfo *descriptorImageInfos);
719 void createImageInfos(ImageData &imageData, const vector<UVec3> &mipMapSizes, const bool isCompressed);
720 bool decompressImage(const VkCommandPool &cmdPool, const VkCommandBuffer &cmdBuffer, vector<ImageData> &imageData,
721 const vector<UVec3> &mipMapSizes);
722 vector<uint8_t> m_data;
723 };
724
BasicComputeTestInstance(Context & context,const TestParameters & parameters)725 BasicComputeTestInstance::BasicComputeTestInstance(Context &context, const TestParameters ¶meters)
726 : BasicTranscodingTestInstance(context, parameters)
727 {
728 }
729
iterate(void)730 TestStatus BasicComputeTestInstance::iterate(void)
731 {
732 const DeviceInterface &vk = m_context.getDeviceInterface();
733 const VkDevice device = m_context.getDevice();
734 const uint32_t queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
735 Allocator &allocator = m_context.getDefaultAllocator();
736 const Unique<VkCommandPool> cmdPool(
737 createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
738 const Unique<VkCommandBuffer> cmdBuffer(
739 allocateCommandBuffer(vk, device, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
740 const UVec3 fullSize(m_parameters.size.x(), m_parameters.imageType == IMAGE_TYPE_1D ? 1 : m_parameters.size.y(), 1);
741 const vector<UVec3> mipMapSizes =
742 m_parameters.useMipmaps ? getMipLevelSizes(getLayerDims()) : vector<UVec3>(1, fullSize);
743 vector<ImageData> imageData(m_parameters.imagesCount);
744 const uint32_t compressedNdx = 0u;
745 const uint32_t resultImageNdx = m_parameters.imagesCount - 1u;
746
747 for (uint32_t imageNdx = 0u; imageNdx < m_parameters.imagesCount; ++imageNdx)
748 {
749 const bool isCompressed = compressedNdx == imageNdx ? true : false;
750 createImageInfos(imageData[imageNdx], mipMapSizes, isCompressed);
751 for (uint32_t infoNdx = 0u; infoNdx < imageData[imageNdx].getImageInfoCount(); ++infoNdx)
752 {
753 imageData[imageNdx].addImage(MovePtr<Image>(
754 new Image(vk, device, allocator, imageData[imageNdx].getImageInfo(infoNdx), MemoryRequirement::Any)));
755 if (isCompressed)
756 {
757 const VkImageViewUsageCreateInfo imageViewUsageKHR = {
758 VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO, //VkStructureType sType;
759 DE_NULL, //const void* pNext;
760 m_parameters.compressedImageUsage, //VkImageUsageFlags usage;
761 };
762 if (m_parameters.useMultiLayerViews())
763 {
764 // Single image with all layers.
765 DE_ASSERT(!m_parameters.useMipmaps);
766 DE_ASSERT(m_parameters.imageType == IMAGE_TYPE_2D);
767
768 imageData[imageNdx].addImageView(
769 makeImageView(vk, device, imageData[imageNdx].getImage(infoNdx), VK_IMAGE_VIEW_TYPE_2D_ARRAY,
770 m_parameters.formatUncompressed,
771 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, getLayerCount()),
772 &imageViewUsageKHR));
773 }
774 else
775 {
776 for (uint32_t mipNdx = 0u; mipNdx < mipMapSizes.size(); ++mipNdx)
777 for (uint32_t layerNdx = 0u; layerNdx < getLayerCount(); ++layerNdx)
778 {
779 imageData[imageNdx].addImageView(makeImageView(
780 vk, device, imageData[imageNdx].getImage(infoNdx),
781 mapImageViewType(m_parameters.imageType), m_parameters.formatUncompressed,
782 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, mipNdx, 1u, layerNdx, 1u),
783 &imageViewUsageKHR));
784 }
785 }
786 }
787 else
788 {
789 if (m_parameters.useMultiLayerViews())
790 {
791 // Single image with all layers.
792 DE_ASSERT(!m_parameters.useMipmaps);
793 DE_ASSERT(m_parameters.imageType == IMAGE_TYPE_2D);
794
795 imageData[imageNdx].addImageView(makeImageView(
796 vk, device, imageData[imageNdx].getImage(infoNdx), VK_IMAGE_VIEW_TYPE_2D_ARRAY,
797 m_parameters.formatUncompressed,
798 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, getLayerCount())));
799 }
800 else
801 {
802 imageData[imageNdx].addImageView(
803 makeImageView(vk, device, imageData[imageNdx].getImage(infoNdx),
804 mapImageViewType(m_parameters.imageType), m_parameters.formatUncompressed,
805 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u)));
806 }
807 }
808 }
809 }
810
811 {
812 size_t size = 0ull;
813 for (uint32_t mipNdx = 0u; mipNdx < mipMapSizes.size(); ++mipNdx)
814 {
815 size += static_cast<size_t>(
816 getCompressedImageSizeInBytes(m_parameters.formatCompressed, mipMapSizes[mipNdx]) * getLayerCount());
817 }
818 m_data.resize(size);
819 generateData(&m_data[0], m_data.size(), m_parameters.formatCompressed);
820 }
821
822 switch (m_parameters.operation)
823 {
824 case OPERATION_IMAGE_LOAD:
825 case OPERATION_TEXEL_FETCH:
826 case OPERATION_TEXTURE:
827 copyDataToImage(*cmdPool, *cmdBuffer, imageData[compressedNdx], mipMapSizes, true);
828 break;
829 case OPERATION_IMAGE_STORE:
830 copyDataToImage(*cmdPool, *cmdBuffer, imageData[1], mipMapSizes, false);
831 break;
832 default:
833 DE_ASSERT(false);
834 break;
835 }
836
837 bool pass = true;
838 std::string failString;
839 {
840 Move<VkDescriptorSetLayout> descriptorSetLayout;
841 Move<VkDescriptorPool> descriptorPool;
842
843 DescriptorSetLayoutBuilder descriptorSetLayoutBuilder;
844 DescriptorPoolBuilder descriptorPoolBuilder;
845 for (uint32_t imageNdx = 0u; imageNdx < m_parameters.imagesCount; ++imageNdx)
846 {
847 switch (m_parameters.operation)
848 {
849 case OPERATION_IMAGE_LOAD:
850 case OPERATION_IMAGE_STORE:
851 descriptorSetLayoutBuilder.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
852 VK_SHADER_STAGE_COMPUTE_BIT);
853 descriptorPoolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, imageData[0].getImageViewCount());
854 break;
855 case OPERATION_TEXEL_FETCH:
856 case OPERATION_TEXTURE:
857 descriptorSetLayoutBuilder.addSingleBinding((compressedNdx == imageNdx) ?
858 VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER :
859 VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
860 VK_SHADER_STAGE_COMPUTE_BIT);
861 descriptorPoolBuilder.addType((compressedNdx == imageNdx) ? VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER :
862 VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
863 imageData[0].getImageViewCount());
864 break;
865 default:
866 DE_ASSERT(false);
867 break;
868 }
869 }
870 descriptorSetLayout = descriptorSetLayoutBuilder.build(vk, device);
871 descriptorPool = descriptorPoolBuilder.build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
872 imageData[0].getImageViewCount());
873 executeShader(*cmdPool, *cmdBuffer, *descriptorSetLayout, *descriptorPool, imageData);
874
875 {
876 // For multilayer views we'll do everything in a single iteration.
877 const auto layerIterations = (m_parameters.useMultiLayerViews() ? 1u : getLayerCount());
878 VkDeviceSize offset = 0ull;
879
880 for (uint32_t mipNdx = 0u; mipNdx < mipMapSizes.size(); ++mipNdx)
881 for (uint32_t layerNdx = 0u; layerNdx < layerIterations; ++layerNdx)
882 {
883 const uint32_t imageNdx = layerNdx + mipNdx * getLayerCount();
884 const UVec3 size = UVec3(imageData[resultImageNdx].getImageInfo(imageNdx).extent.width,
885 imageData[resultImageNdx].getImageInfo(imageNdx).extent.height,
886 imageData[resultImageNdx].getImageInfo(imageNdx).extent.depth);
887 if (!copyResultAndCompare(*cmdPool, *cmdBuffer, imageData[resultImageNdx].getImage(imageNdx),
888 offset, size))
889 {
890 pass = false;
891 failString = std::string("Uncompressed output mismatch at offset ") + de::toString(offset) +
892 " even before executing decompression";
893 }
894 offset += getCompressedImageSizeInBytes(m_parameters.formatCompressed, mipMapSizes[mipNdx]);
895 }
896 }
897 }
898 if (!decompressImage(*cmdPool, *cmdBuffer, imageData, mipMapSizes))
899 {
900 pass = false;
901 failString = "Decompression failed";
902 }
903
904 if (!pass)
905 return TestStatus::fail(failString);
906
907 if (m_bASTCErrorColourMismatch)
908 {
909 DE_ASSERT(m_parameters.formatIsASTC());
910 return TestStatusASTCQualityWarning();
911 }
912
913 return TestStatus::pass("Pass");
914 }
915
copyDataToImage(const VkCommandPool & cmdPool,const VkCommandBuffer & cmdBuffer,ImageData & imageData,const vector<UVec3> & mipMapSizes,const bool isCompressed)916 void BasicComputeTestInstance::copyDataToImage(const VkCommandPool &cmdPool, const VkCommandBuffer &cmdBuffer,
917 ImageData &imageData, const vector<UVec3> &mipMapSizes,
918 const bool isCompressed)
919 {
920 const DeviceInterface &vk = m_context.getDeviceInterface();
921 const VkDevice device = m_context.getDevice();
922 const VkQueue queue = m_context.getUniversalQueue();
923 Allocator &allocator = m_context.getDefaultAllocator();
924
925 BufferWithMemory imageBuffer(vk, device, allocator,
926 makeBufferCreateInfo(m_data.size(), VK_BUFFER_USAGE_TRANSFER_SRC_BIT),
927 MemoryRequirement::HostVisible);
928 VkDeviceSize offset = 0ull;
929 {
930 const Allocation &alloc = imageBuffer.getAllocation();
931 deMemcpy(alloc.getHostPtr(), &m_data[0], m_data.size());
932 flushAlloc(vk, device, alloc);
933 }
934
935 beginCommandBuffer(vk, cmdBuffer);
936 const VkImageSubresourceRange subresourceRange = {
937 VK_IMAGE_ASPECT_COLOR_BIT, //VkImageAspectFlags aspectMask
938 0u, //uint32_t baseMipLevel
939 imageData.getImageInfo(0u).mipLevels, //uint32_t levelCount
940 0u, //uint32_t baseArrayLayer
941 imageData.getImageInfo(0u).arrayLayers //uint32_t layerCount
942 };
943
944 for (uint32_t imageNdx = 0u; imageNdx < imageData.getImagesCount(); ++imageNdx)
945 {
946 const VkImageMemoryBarrier preCopyImageBarrier = makeImageMemoryBarrier(
947 0u, VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
948 imageData.getImage(imageNdx), subresourceRange);
949
950 const VkBufferMemoryBarrier FlushHostCopyBarrier = makeBufferMemoryBarrier(
951 VK_ACCESS_HOST_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT, imageBuffer.get(), 0ull, m_data.size());
952
953 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
954 (VkDependencyFlags)0, 0u, (const VkMemoryBarrier *)DE_NULL, 1u, &FlushHostCopyBarrier, 1u,
955 &preCopyImageBarrier);
956
957 for (uint32_t mipNdx = 0u; mipNdx < imageData.getImageInfo(imageNdx).mipLevels; ++mipNdx)
958 {
959 const VkExtent3D imageExtent =
960 isCompressed ? makeExtent3D(mipMapSizes[mipNdx]) : imageData.getImageInfo(imageNdx).extent;
961 const VkBufferImageCopy copyRegion = {
962 offset, //VkDeviceSize bufferOffset;
963 0u, //uint32_t bufferRowLength;
964 0u, //uint32_t bufferImageHeight;
965 makeImageSubresourceLayers(
966 VK_IMAGE_ASPECT_COLOR_BIT, mipNdx, 0u,
967 imageData.getImageInfo(imageNdx).arrayLayers), //VkImageSubresourceLayers imageSubresource;
968 makeOffset3D(0, 0, 0), //VkOffset3D imageOffset;
969 imageExtent, //VkExtent3D imageExtent;
970 };
971
972 vk.cmdCopyBufferToImage(cmdBuffer, imageBuffer.get(), imageData.getImage(imageNdx),
973 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1u, ©Region);
974 offset +=
975 getCompressedImageSizeInBytes(
976 m_parameters.formatCompressed,
977 UVec3(isCompressed ? imageExtent.width : imageExtent.width * m_blockWidth,
978 isCompressed ? imageExtent.height : imageExtent.height * m_blockHeight, imageExtent.depth)) *
979 imageData.getImageInfo(imageNdx).arrayLayers;
980 }
981 }
982 endCommandBuffer(vk, cmdBuffer);
983 submitCommandsAndWait(vk, device, queue, cmdBuffer);
984 m_context.resetCommandPoolForVKSC(device, cmdPool);
985 }
986
executeShader(const VkCommandPool & cmdPool,const VkCommandBuffer & cmdBuffer,const VkDescriptorSetLayout & descriptorSetLayout,const VkDescriptorPool & descriptorPool,vector<ImageData> & imageData)987 void BasicComputeTestInstance::executeShader(const VkCommandPool &cmdPool, const VkCommandBuffer &cmdBuffer,
988 const VkDescriptorSetLayout &descriptorSetLayout,
989 const VkDescriptorPool &descriptorPool, vector<ImageData> &imageData)
990 {
991 const DeviceInterface &vk = m_context.getDeviceInterface();
992 const VkDevice device = m_context.getDevice();
993 const VkQueue queue = m_context.getUniversalQueue();
994 const Unique<VkShaderModule> shaderModule(
995 createShaderModule(vk, device, m_context.getBinaryCollection().get("comp"), 0));
996 vector<SharedVkDescriptorSet> descriptorSets(imageData[0].getImageViewCount());
997 const Unique<VkPipelineLayout> pipelineLayout(makePipelineLayout(vk, device, descriptorSetLayout));
998 const Unique<VkPipeline> pipeline(makeComputePipeline(vk, device, *pipelineLayout, *shaderModule));
999 Move<VkSampler> sampler;
1000 {
1001 const VkSamplerCreateInfo createInfo = {
1002 VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO, //VkStructureType sType;
1003 DE_NULL, //const void* pNext;
1004 0u, //VkSamplerCreateFlags flags;
1005 VK_FILTER_NEAREST, //VkFilter magFilter;
1006 VK_FILTER_NEAREST, //VkFilter minFilter;
1007 VK_SAMPLER_MIPMAP_MODE_NEAREST, //VkSamplerMipmapMode mipmapMode;
1008 VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, //VkSamplerAddressMode addressModeU;
1009 VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, //VkSamplerAddressMode addressModeV;
1010 VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, //VkSamplerAddressMode addressModeW;
1011 0.0f, //float mipLodBias;
1012 VK_FALSE, //VkBool32 anisotropyEnable;
1013 1.0f, //float maxAnisotropy;
1014 VK_FALSE, //VkBool32 compareEnable;
1015 VK_COMPARE_OP_EQUAL, //VkCompareOp compareOp;
1016 0.0f, //float minLod;
1017 0.0f, //float maxLod;
1018 VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK, //VkBorderColor borderColor;
1019 VK_FALSE, //VkBool32 unnormalizedCoordinates;
1020 };
1021 sampler = createSampler(vk, device, &createInfo);
1022 }
1023
1024 vector<VkDescriptorImageInfo> descriptorImageInfos(descriptorSets.size() * m_parameters.imagesCount);
1025 for (uint32_t viewNdx = 0u; viewNdx < descriptorSets.size(); ++viewNdx)
1026 {
1027 const uint32_t descriptorNdx = viewNdx * m_parameters.imagesCount;
1028 for (uint32_t imageNdx = 0; imageNdx < m_parameters.imagesCount; ++imageNdx)
1029 {
1030 descriptorImageInfos[descriptorNdx + imageNdx] =
1031 makeDescriptorImageInfo(*sampler, imageData[imageNdx].getImageView(viewNdx), VK_IMAGE_LAYOUT_GENERAL);
1032 }
1033 }
1034
1035 for (uint32_t ndx = 0u; ndx < descriptorSets.size(); ++ndx)
1036 descriptorSets[ndx] = makeVkSharedPtr(makeDescriptorSet(vk, device, descriptorPool, descriptorSetLayout));
1037
1038 beginCommandBuffer(vk, cmdBuffer);
1039 {
1040 const VkImageSubresourceRange compressedRange = {
1041 VK_IMAGE_ASPECT_COLOR_BIT, //VkImageAspectFlags aspectMask
1042 0u, //uint32_t baseMipLevel
1043 imageData[0].getImageInfo(0u).mipLevels, //uint32_t levelCount
1044 0u, //uint32_t baseArrayLayer
1045 imageData[0].getImageInfo(0u).arrayLayers //uint32_t layerCount
1046 };
1047
1048 if (m_parameters.useMultiLayerViews())
1049 DE_ASSERT(!m_parameters.useMipmaps);
1050
1051 const auto layerCount = (m_parameters.useMultiLayerViews() ? getLayerCount() : 1u);
1052 const VkImageSubresourceRange uncompressedRange = {
1053 VK_IMAGE_ASPECT_COLOR_BIT, //VkImageAspectFlags aspectMask
1054 0u, //uint32_t baseMipLevel
1055 1u, //uint32_t levelCount
1056 0u, //uint32_t baseArrayLayer
1057 layerCount, //uint32_t layerCount
1058 };
1059
1060 vk.cmdBindPipeline(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
1061
1062 vector<VkImageMemoryBarrier> preShaderImageBarriers;
1063 preShaderImageBarriers.resize(descriptorSets.size() + 1u);
1064 for (uint32_t imageNdx = 0u; imageNdx < imageData[1].getImagesCount(); ++imageNdx)
1065 {
1066 preShaderImageBarriers[imageNdx] = makeImageMemoryBarrier(
1067 VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED,
1068 VK_IMAGE_LAYOUT_GENERAL, imageData[1].getImage(imageNdx), uncompressedRange);
1069 }
1070
1071 preShaderImageBarriers[descriptorSets.size()] = makeImageMemoryBarrier(
1072 VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1073 VK_IMAGE_LAYOUT_GENERAL, imageData[0].getImage(0), compressedRange);
1074
1075 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
1076 (VkDependencyFlags)0, 0u, (const VkMemoryBarrier *)DE_NULL, 0u,
1077 (const VkBufferMemoryBarrier *)DE_NULL,
1078 static_cast<uint32_t>(preShaderImageBarriers.size()), &preShaderImageBarriers[0]);
1079
1080 for (uint32_t ndx = 0u; ndx < descriptorSets.size(); ++ndx)
1081 {
1082 descriptorSetUpdate(**descriptorSets[ndx], &descriptorImageInfos[ndx * m_parameters.imagesCount]);
1083 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u,
1084 &(**descriptorSets[ndx]), 0u, DE_NULL);
1085
1086 // For multilayer views, we'll use the Z dimension to handle layers. The extent depth is only 1 in that case.
1087 const auto &extent = imageData[1].getImageInfo(ndx).extent;
1088 vk.cmdDispatch(cmdBuffer, extent.width, extent.height,
1089 (m_parameters.useMultiLayerViews() ? getLayerCount() : extent.depth));
1090 }
1091 }
1092 endCommandBuffer(vk, cmdBuffer);
1093 submitCommandsAndWait(vk, device, queue, cmdBuffer);
1094 m_context.resetCommandPoolForVKSC(device, cmdPool);
1095 }
1096
copyResultAndCompare(const VkCommandPool & cmdPool,const VkCommandBuffer & cmdBuffer,const VkImage & uncompressed,const VkDeviceSize offset,const UVec3 & size)1097 bool BasicComputeTestInstance::copyResultAndCompare(const VkCommandPool &cmdPool, const VkCommandBuffer &cmdBuffer,
1098 const VkImage &uncompressed, const VkDeviceSize offset,
1099 const UVec3 &size)
1100 {
1101 const DeviceInterface &vk = m_context.getDeviceInterface();
1102 const VkQueue queue = m_context.getUniversalQueue();
1103 const VkDevice device = m_context.getDevice();
1104 Allocator &allocator = m_context.getDefaultAllocator();
1105 const auto layerCount = (m_parameters.useMultiLayerViews() ? getLayerCount() : 1u);
1106 const tcu::IVec3 imageDim(size.x(), size.y(), (m_parameters.useMultiLayerViews() ? layerCount : size.z()));
1107 // For multilayer views, the buffer will hold the full image, including all layers, instead of a single layer.
1108 VkDeviceSize imageResultSize = getImageSizeBytes(imageDim, m_parameters.formatUncompressed);
1109 BufferWithMemory imageBufferResult(vk, device, allocator,
1110 makeBufferCreateInfo(imageResultSize, VK_BUFFER_USAGE_TRANSFER_DST_BIT),
1111 MemoryRequirement::HostVisible);
1112
1113 beginCommandBuffer(vk, cmdBuffer);
1114 {
1115 const VkImageSubresourceRange subresourceRange = {
1116 VK_IMAGE_ASPECT_COLOR_BIT, //VkImageAspectFlags aspectMask
1117 0u, //uint32_t baseMipLevel
1118 1u, //uint32_t levelCount
1119 0u, //uint32_t baseArrayLayer
1120 layerCount, //uint32_t layerCount
1121 };
1122
1123 const VkBufferImageCopy copyRegion = {
1124 0ull, // VkDeviceSize bufferOffset;
1125 0u, // uint32_t bufferRowLength;
1126 0u, // uint32_t bufferImageHeight;
1127 makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u,
1128 layerCount), // VkImageSubresourceLayers imageSubresource;
1129 makeOffset3D(0, 0, 0), // VkOffset3D imageOffset;
1130 makeExtent3D(size), // VkExtent3D imageExtent;
1131 };
1132
1133 const VkImageMemoryBarrier prepareForTransferBarrier =
1134 makeImageMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT, VK_IMAGE_LAYOUT_GENERAL,
1135 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, uncompressed, subresourceRange);
1136
1137 const VkBufferMemoryBarrier copyBarrier = makeBufferMemoryBarrier(
1138 VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, imageBufferResult.get(), 0ull, imageResultSize);
1139
1140 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
1141 (VkDependencyFlags)0, 0, (const VkMemoryBarrier *)DE_NULL, 0,
1142 (const VkBufferMemoryBarrier *)DE_NULL, 1u, &prepareForTransferBarrier);
1143 vk.cmdCopyImageToBuffer(cmdBuffer, uncompressed, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, imageBufferResult.get(),
1144 1u, ©Region);
1145 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT,
1146 (VkDependencyFlags)0, 0u, (const VkMemoryBarrier *)DE_NULL, 1, ©Barrier, 0u,
1147 (const VkImageMemoryBarrier *)DE_NULL);
1148 }
1149 endCommandBuffer(vk, cmdBuffer);
1150 submitCommandsAndWait(vk, device, queue, cmdBuffer);
1151 m_context.resetCommandPoolForVKSC(device, cmdPool);
1152
1153 const Allocation &allocResult = imageBufferResult.getAllocation();
1154 invalidateAlloc(vk, device, allocResult);
1155 if (deMemCmp((const void *)allocResult.getHostPtr(), (const void *)&m_data[static_cast<size_t>(offset)],
1156 static_cast<size_t>(imageResultSize)) == 0ull)
1157 return true;
1158 return false;
1159 }
1160
descriptorSetUpdate(VkDescriptorSet descriptorSet,const VkDescriptorImageInfo * descriptorImageInfos)1161 void BasicComputeTestInstance::descriptorSetUpdate(VkDescriptorSet descriptorSet,
1162 const VkDescriptorImageInfo *descriptorImageInfos)
1163 {
1164 const DeviceInterface &vk = m_context.getDeviceInterface();
1165 const VkDevice device = m_context.getDevice();
1166 DescriptorSetUpdateBuilder descriptorSetUpdateBuilder;
1167
1168 switch (m_parameters.operation)
1169 {
1170 case OPERATION_IMAGE_LOAD:
1171 case OPERATION_IMAGE_STORE:
1172 {
1173 for (uint32_t bindingNdx = 0u; bindingNdx < m_parameters.imagesCount; ++bindingNdx)
1174 descriptorSetUpdateBuilder.writeSingle(descriptorSet,
1175 DescriptorSetUpdateBuilder::Location::binding(bindingNdx),
1176 VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorImageInfos[bindingNdx]);
1177
1178 break;
1179 }
1180
1181 case OPERATION_TEXEL_FETCH:
1182 case OPERATION_TEXTURE:
1183 {
1184 for (uint32_t bindingNdx = 0u; bindingNdx < m_parameters.imagesCount; ++bindingNdx)
1185 {
1186 descriptorSetUpdateBuilder.writeSingle(
1187 descriptorSet, DescriptorSetUpdateBuilder::Location::binding(bindingNdx),
1188 bindingNdx == 0u ? VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER : VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
1189 &descriptorImageInfos[bindingNdx]);
1190 }
1191
1192 break;
1193 }
1194
1195 default:
1196 DE_ASSERT(false);
1197 }
1198 descriptorSetUpdateBuilder.update(vk, device);
1199 }
1200
createImageInfos(ImageData & imageData,const vector<UVec3> & mipMapSizes,const bool isCompressed)1201 void BasicComputeTestInstance::createImageInfos(ImageData &imageData, const vector<UVec3> &mipMapSizes,
1202 const bool isCompressed)
1203 {
1204 const VkImageType imageType = mapImageType(m_parameters.imageType);
1205
1206 if (isCompressed)
1207 {
1208 const VkExtent3D extentCompressed = makeExtent3D(getLayerSize(m_parameters.imageType, m_parameters.size));
1209 const VkImageCreateInfo compressedInfo = {
1210 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
1211 DE_NULL, // const void* pNext;
1212 VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT | VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT |
1213 VK_IMAGE_CREATE_EXTENDED_USAGE_BIT, // VkImageCreateFlags flags;
1214 imageType, // VkImageType imageType;
1215 m_parameters.formatCompressed, // VkFormat format;
1216 extentCompressed, // VkExtent3D extent;
1217 static_cast<uint32_t>(mipMapSizes.size()), // uint32_t mipLevels;
1218 getLayerCount(), // uint32_t arrayLayers;
1219 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
1220 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
1221 VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
1222 VK_IMAGE_USAGE_TRANSFER_DST_BIT, // VkImageUsageFlags usage;
1223 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
1224 0u, // uint32_t queueFamilyIndexCount;
1225 DE_NULL, // const uint32_t* pQueueFamilyIndices;
1226 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout;
1227 };
1228
1229 VkImageFormatProperties imageFormatProperties;
1230 if (m_context.getInstanceInterface().getPhysicalDeviceImageFormatProperties(
1231 m_context.getPhysicalDevice(), compressedInfo.format, compressedInfo.imageType, compressedInfo.tiling,
1232 compressedInfo.usage, compressedInfo.flags, &imageFormatProperties) != VK_SUCCESS)
1233 TCU_THROW(NotSupportedError, "Image parameters not supported");
1234
1235 imageData.addImageInfo(compressedInfo);
1236 }
1237 else
1238 {
1239 UVec3 size = m_parameters.size;
1240 if (m_parameters.imageType == IMAGE_TYPE_1D)
1241 {
1242 size.y() = 1;
1243 }
1244 size.z() = 1;
1245
1246 const VkExtent3D originalResolutionInBlocks =
1247 makeExtent3D(getCompressedImageResolutionInBlocks(m_parameters.formatCompressed, size));
1248
1249 // For multilayer views, a single iteration will take care of all layers.
1250 const auto arrayLayers = (m_parameters.useMultiLayerViews() ? getLayerCount() : 1u);
1251 const auto layerIterations = (m_parameters.useMultiLayerViews() ? 1u : getLayerCount());
1252
1253 for (size_t mipNdx = 0ull; mipNdx < mipMapSizes.size(); ++mipNdx)
1254 for (size_t layerNdx = 0ull; layerNdx < layerIterations; ++layerNdx)
1255 {
1256 const VkExtent3D extentUncompressed = m_parameters.useMipmaps ?
1257 makeExtent3D(getCompressedImageResolutionInBlocks(
1258 m_parameters.formatCompressed, mipMapSizes[mipNdx])) :
1259 originalResolutionInBlocks;
1260 const VkImageCreateInfo uncompressedInfo = {
1261 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
1262 DE_NULL, // const void* pNext;
1263 0u, // VkImageCreateFlags flags;
1264 imageType, // VkImageType imageType;
1265 m_parameters.formatUncompressed, // VkFormat format;
1266 extentUncompressed, // VkExtent3D extent;
1267 1u, // uint32_t mipLevels;
1268 arrayLayers, // uint32_t arrayLayers;
1269 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
1270 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
1271 m_parameters.uncompressedImageUsage | VK_IMAGE_USAGE_SAMPLED_BIT, // VkImageUsageFlags usage;
1272 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
1273 0u, // uint32_t queueFamilyIndexCount;
1274 DE_NULL, // const uint32_t* pQueueFamilyIndices;
1275 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout;
1276 };
1277 imageData.addImageInfo(uncompressedInfo);
1278 }
1279 }
1280 }
1281
decompressImage(const VkCommandPool & cmdPool,const VkCommandBuffer & cmdBuffer,vector<ImageData> & imageData,const vector<UVec3> & mipMapSizes)1282 bool BasicComputeTestInstance::decompressImage(const VkCommandPool &cmdPool, const VkCommandBuffer &cmdBuffer,
1283 vector<ImageData> &imageData, const vector<UVec3> &mipMapSizes)
1284 {
1285 const DeviceInterface &vk = m_context.getDeviceInterface();
1286 const VkDevice device = m_context.getDevice();
1287 const VkQueue queue = m_context.getUniversalQueue();
1288 Allocator &allocator = m_context.getDefaultAllocator();
1289 const Unique<VkShaderModule> shaderModule(
1290 createShaderModule(vk, device, m_context.getBinaryCollection().get("decompress"), 0));
1291 const VkImage &compressed = imageData[0].getImage(0);
1292 const VkImageType imageType = mapImageType(m_parameters.imageType);
1293
1294 for (uint32_t ndx = 0u; ndx < imageData.size(); ndx++)
1295 imageData[ndx].resetViews();
1296
1297 if (m_parameters.useMultiLayerViews())
1298 {
1299 DE_ASSERT(!m_parameters.useMipmaps);
1300 DE_ASSERT(m_parameters.imageType == IMAGE_TYPE_2D);
1301 }
1302
1303 // For multilayer views we'll create a single view with all layers.
1304 const auto layerIterations = (m_parameters.useMultiLayerViews() ? 1u : getLayerCount());
1305
1306 for (uint32_t mipNdx = 0u; mipNdx < mipMapSizes.size(); ++mipNdx)
1307 for (uint32_t layerNdx = 0u; layerNdx < layerIterations; ++layerNdx)
1308 {
1309 const bool layoutShaderReadOnly = (layerNdx % 2u) == 1;
1310 const uint32_t imageNdx = layerNdx + mipNdx * getLayerCount();
1311 const auto &mipMapSize = mipMapSizes[mipNdx];
1312 const VkExtent3D extentCompressed = makeExtent3D(mipMapSize);
1313 const VkImage &uncompressed = imageData[m_parameters.imagesCount - 1].getImage(imageNdx);
1314 const VkExtent3D extentUncompressed = imageData[m_parameters.imagesCount - 1].getImageInfo(imageNdx).extent;
1315 const auto bufferSizeExtent =
1316 (m_parameters.useMultiLayerViews() ? UVec3(mipMapSize.x(), mipMapSize.y(), getLayerCount()) :
1317 mipMapSize);
1318 const VkDeviceSize bufferSizeComp =
1319 getCompressedImageSizeInBytes(m_parameters.formatCompressed, bufferSizeExtent);
1320 const auto arrayLayers = (m_parameters.useMultiLayerViews() ? getLayerCount() : 1u);
1321
1322 VkFormatProperties properties;
1323 m_context.getInstanceInterface().getPhysicalDeviceFormatProperties(
1324 m_context.getPhysicalDevice(), m_parameters.formatForVerify, &properties);
1325 if (!(properties.optimalTilingFeatures & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT))
1326 TCU_THROW(NotSupportedError, "Format storage feature not supported");
1327
1328 const VkImageCreateInfo decompressedImageInfo = {
1329 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
1330 DE_NULL, // const void* pNext;
1331 0u, // VkImageCreateFlags flags;
1332 imageType, // VkImageType imageType;
1333 m_parameters.formatForVerify, // VkFormat format;
1334 extentCompressed, // VkExtent3D extent;
1335 1u, // uint32_t mipLevels;
1336 arrayLayers, // uint32_t arrayLayers;
1337 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
1338 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
1339 VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
1340 VK_IMAGE_USAGE_TRANSFER_DST_BIT, // VkImageUsageFlags usage;
1341 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
1342 0u, // uint32_t queueFamilyIndexCount;
1343 DE_NULL, // const uint32_t* pQueueFamilyIndices;
1344 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout;
1345 };
1346
1347 const VkImageCreateInfo compressedImageInfo = {
1348 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
1349 DE_NULL, // const void* pNext;
1350 0u, // VkImageCreateFlags flags;
1351 imageType, // VkImageType imageType;
1352 m_parameters.formatCompressed, // VkFormat format;
1353 extentCompressed, // VkExtent3D extent;
1354 1u, // uint32_t mipLevels;
1355 arrayLayers, // uint32_t arrayLayers;
1356 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
1357 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
1358 VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, // VkImageUsageFlags usage;
1359 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
1360 0u, // uint32_t queueFamilyIndexCount;
1361 DE_NULL, // const uint32_t* pQueueFamilyIndices;
1362 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout;
1363 };
1364 const VkImageUsageFlags compressedViewUsageFlags =
1365 VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
1366 const VkImageViewUsageCreateInfo compressedViewUsageCI = {
1367 VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO, //VkStructureType sType;
1368 DE_NULL, //const void* pNext;
1369 compressedViewUsageFlags, //VkImageUsageFlags usage;
1370 };
1371 const VkImageViewType imageViewType(m_parameters.useMultiLayerViews() ?
1372 VK_IMAGE_VIEW_TYPE_2D_ARRAY :
1373 mapImageViewType(m_parameters.imageType));
1374 Image resultImage(vk, device, allocator, decompressedImageInfo, MemoryRequirement::Any);
1375 Image referenceImage(vk, device, allocator, decompressedImageInfo, MemoryRequirement::Any);
1376 Image uncompressedImage(vk, device, allocator, compressedImageInfo, MemoryRequirement::Any);
1377 Move<VkImageView> resultView = makeImageView(
1378 vk, device, resultImage.get(), imageViewType, decompressedImageInfo.format,
1379 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, decompressedImageInfo.extent.depth, 0u,
1380 decompressedImageInfo.arrayLayers));
1381 Move<VkImageView> referenceView = makeImageView(
1382 vk, device, referenceImage.get(), imageViewType, decompressedImageInfo.format,
1383 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, decompressedImageInfo.extent.depth, 0u,
1384 decompressedImageInfo.arrayLayers));
1385 Move<VkImageView> uncompressedView =
1386 makeImageView(vk, device, uncompressedImage.get(), imageViewType, m_parameters.formatCompressed,
1387 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, compressedImageInfo.extent.depth,
1388 0u, compressedImageInfo.arrayLayers));
1389 bool const useMultiLayer = m_parameters.imageType == IMAGE_TYPE_2D && m_parameters.layers > 1u;
1390 Move<VkImageView> compressedView =
1391 (useMultiLayer) ?
1392 makeImageView(vk, device, compressed, VK_IMAGE_VIEW_TYPE_2D_ARRAY, m_parameters.formatCompressed,
1393 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u,
1394 static_cast<uint32_t>(mipMapSizes.size()), 0u,
1395 m_parameters.layers),
1396 &compressedViewUsageCI) :
1397 makeImageView(vk, device, compressed, imageViewType, m_parameters.formatCompressed,
1398 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, mipNdx, 1u, layerNdx, 1u),
1399 &compressedViewUsageCI);
1400 Move<VkDescriptorSetLayout> descriptorSetLayout =
1401 DescriptorSetLayoutBuilder()
1402 .addSingleBinding(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, VK_SHADER_STAGE_COMPUTE_BIT)
1403 .addSingleBinding(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, VK_SHADER_STAGE_COMPUTE_BIT)
1404 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
1405 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
1406 .build(vk, device);
1407 Move<VkDescriptorPool> descriptorPool =
1408 DescriptorPoolBuilder()
1409 .addType(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, decompressedImageInfo.arrayLayers)
1410 .addType(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, decompressedImageInfo.arrayLayers)
1411 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, decompressedImageInfo.arrayLayers)
1412 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, decompressedImageInfo.arrayLayers)
1413 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
1414 decompressedImageInfo.arrayLayers);
1415
1416 Move<VkDescriptorSet> descriptorSet = makeDescriptorSet(vk, device, *descriptorPool, *descriptorSetLayout);
1417 const struct PushData
1418 {
1419 uint32_t layer;
1420 uint32_t level;
1421 } pushData = {layerNdx, mipNdx};
1422 const VkPushConstantRange pushConstantRange = {VK_SHADER_STAGE_COMPUTE_BIT, 0u,
1423 static_cast<uint32_t>(sizeof pushData)};
1424 const Unique<VkPipelineLayout> pipelineLayout(
1425 makePipelineLayout(vk, device, 1u, &descriptorSetLayout.get(), 1u, &pushConstantRange));
1426 const Unique<VkPipeline> pipeline(makeComputePipeline(vk, device, *pipelineLayout, *shaderModule));
1427 // For multilayer views, the buffer will contain the full image with all layers instead of a single layer.
1428 const auto bufferSizeExtentVerif =
1429 IVec3((int)extentCompressed.width, (int)extentCompressed.height,
1430 (m_parameters.useMultiLayerViews() ? (int)getLayerCount() : (int)extentCompressed.depth));
1431 const VkDeviceSize bufferSize = getImageSizeBytes(bufferSizeExtentVerif, m_parameters.formatForVerify);
1432 BufferWithMemory resultBuffer(vk, device, allocator,
1433 makeBufferCreateInfo(bufferSize, VK_BUFFER_USAGE_TRANSFER_DST_BIT),
1434 MemoryRequirement::HostVisible);
1435 BufferWithMemory referenceBuffer(vk, device, allocator,
1436 makeBufferCreateInfo(bufferSize, VK_BUFFER_USAGE_TRANSFER_DST_BIT),
1437 MemoryRequirement::HostVisible);
1438 BufferWithMemory transferBuffer(vk, device, allocator,
1439 makeBufferCreateInfo(bufferSizeComp, VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
1440 VK_BUFFER_USAGE_TRANSFER_DST_BIT),
1441 MemoryRequirement::HostVisible);
1442 Move<VkSampler> sampler;
1443 {
1444 const VkSamplerCreateInfo createInfo = {
1445 VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO, //VkStructureType sType;
1446 DE_NULL, //const void* pNext;
1447 0u, //VkSamplerCreateFlags flags;
1448 VK_FILTER_NEAREST, //VkFilter magFilter;
1449 VK_FILTER_NEAREST, //VkFilter minFilter;
1450 VK_SAMPLER_MIPMAP_MODE_NEAREST, //VkSamplerMipmapMode mipmapMode;
1451 VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, //VkSamplerAddressMode addressModeU;
1452 VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, //VkSamplerAddressMode addressModeV;
1453 VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, //VkSamplerAddressMode addressModeW;
1454 0.0f, //float mipLodBias;
1455 VK_FALSE, //VkBool32 anisotropyEnable;
1456 1.0f, //float maxAnisotropy;
1457 VK_FALSE, //VkBool32 compareEnable;
1458 VK_COMPARE_OP_EQUAL, //VkCompareOp compareOp;
1459 0.0f, //float minLod;
1460 (float)mipMapSizes.size(), //float maxLod;
1461 VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK, //VkBorderColor borderColor;
1462 VK_FALSE, //VkBool32 unnormalizedCoordinates;
1463 };
1464 sampler = createSampler(vk, device, &createInfo);
1465 }
1466
1467 VkDescriptorImageInfo descriptorImageInfos[] = {
1468 makeDescriptorImageInfo(*sampler, *uncompressedView,
1469 layoutShaderReadOnly ? VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL :
1470 VK_IMAGE_LAYOUT_GENERAL),
1471 makeDescriptorImageInfo(*sampler, *compressedView,
1472 layoutShaderReadOnly ? VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL :
1473 VK_IMAGE_LAYOUT_GENERAL),
1474 makeDescriptorImageInfo(DE_NULL, *resultView, VK_IMAGE_LAYOUT_GENERAL),
1475 makeDescriptorImageInfo(DE_NULL, *referenceView, VK_IMAGE_LAYOUT_GENERAL)};
1476 DescriptorSetUpdateBuilder()
1477 .writeSingle(descriptorSet.get(), DescriptorSetUpdateBuilder::Location::binding(0u),
1478 VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, &descriptorImageInfos[0])
1479 .writeSingle(descriptorSet.get(), DescriptorSetUpdateBuilder::Location::binding(1u),
1480 VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, &descriptorImageInfos[1])
1481 .writeSingle(descriptorSet.get(), DescriptorSetUpdateBuilder::Location::binding(2u),
1482 VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorImageInfos[2])
1483 .writeSingle(descriptorSet.get(), DescriptorSetUpdateBuilder::Location::binding(3u),
1484 VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorImageInfos[3])
1485 .update(vk, device);
1486
1487 beginCommandBuffer(vk, cmdBuffer);
1488 {
1489 const VkImageSubresourceRange subresourceRange = {
1490 VK_IMAGE_ASPECT_COLOR_BIT, //VkImageAspectFlags aspectMask
1491 0u, //uint32_t baseMipLevel
1492 1u, //uint32_t levelCount
1493 0u, //uint32_t baseArrayLayer
1494 arrayLayers, //uint32_t layerCount
1495 };
1496
1497 const VkImageSubresourceRange subresourceRangeComp = {
1498 VK_IMAGE_ASPECT_COLOR_BIT, //VkImageAspectFlags aspectMask
1499 (useMultiLayer) ? 0u : mipNdx, //uint32_t baseMipLevel
1500 (useMultiLayer) ? static_cast<uint32_t>(mipMapSizes.size()) :
1501 1u, //uint32_t levelCount
1502 (useMultiLayer) ? 0u : layerNdx, //uint32_t baseArrayLayer
1503 (useMultiLayer) ? m_parameters.layers : 1u //uint32_t layerCount
1504 };
1505
1506 const VkBufferImageCopy copyRegion = {
1507 0ull, // VkDeviceSize bufferOffset;
1508 0u, // uint32_t bufferRowLength;
1509 0u, // uint32_t bufferImageHeight;
1510 makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u,
1511 arrayLayers), // VkImageSubresourceLayers imageSubresource;
1512 makeOffset3D(0, 0, 0), // VkOffset3D imageOffset;
1513 decompressedImageInfo.extent, // VkExtent3D imageExtent;
1514 };
1515
1516 const VkBufferImageCopy compressedCopyRegion = {
1517 0ull, // VkDeviceSize bufferOffset;
1518 0u, // uint32_t bufferRowLength;
1519 0u, // uint32_t bufferImageHeight;
1520 makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u,
1521 arrayLayers), // VkImageSubresourceLayers imageSubresource;
1522 makeOffset3D(0, 0, 0), // VkOffset3D imageOffset;
1523 extentUncompressed, // VkExtent3D imageExtent;
1524 };
1525
1526 {
1527
1528 const VkBufferMemoryBarrier preCopyBufferBarriers = makeBufferMemoryBarrier(
1529 0u, VK_ACCESS_TRANSFER_WRITE_BIT, transferBuffer.get(), 0ull, bufferSizeComp);
1530
1531 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
1532 (VkDependencyFlags)0, 0, (const VkMemoryBarrier *)DE_NULL, 1u,
1533 &preCopyBufferBarriers, 0u, (const VkImageMemoryBarrier *)DE_NULL);
1534 }
1535
1536 vk.cmdCopyImageToBuffer(cmdBuffer, uncompressed, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1537 transferBuffer.get(), 1u, &compressedCopyRegion);
1538
1539 {
1540 const VkBufferMemoryBarrier postCopyBufferBarriers =
1541 makeBufferMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT,
1542 transferBuffer.get(), 0ull, bufferSizeComp);
1543
1544 const VkImageMemoryBarrier preCopyImageBarriers = makeImageMemoryBarrier(
1545 0u, VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED,
1546 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, uncompressedImage.get(), subresourceRange);
1547
1548 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
1549 (VkDependencyFlags)0, 0u, (const VkMemoryBarrier *)DE_NULL, 1u,
1550 &postCopyBufferBarriers, 1u, &preCopyImageBarriers);
1551 }
1552
1553 vk.cmdCopyBufferToImage(cmdBuffer, transferBuffer.get(), uncompressedImage.get(),
1554 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1u, ©Region);
1555
1556 vk.cmdBindPipeline(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
1557 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u,
1558 &descriptorSet.get(), 0u, DE_NULL);
1559
1560 {
1561 const VkImageMemoryBarrier preShaderImageBarriers[] = {
1562
1563 makeImageMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT,
1564 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1565 layoutShaderReadOnly ? VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL :
1566 VK_IMAGE_LAYOUT_GENERAL,
1567 uncompressedImage.get(), subresourceRange),
1568
1569 makeImageMemoryBarrier(0, VK_ACCESS_SHADER_READ_BIT,
1570 (useMultiLayer && !layoutShaderReadOnly && layerNdx) ?
1571 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL :
1572 VK_IMAGE_LAYOUT_GENERAL,
1573 layoutShaderReadOnly ? VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL :
1574 VK_IMAGE_LAYOUT_GENERAL,
1575 compressed, subresourceRangeComp),
1576
1577 makeImageMemoryBarrier(0u, VK_ACCESS_SHADER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED,
1578 VK_IMAGE_LAYOUT_GENERAL, resultImage.get(), subresourceRange),
1579
1580 makeImageMemoryBarrier(0u, VK_ACCESS_SHADER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED,
1581 VK_IMAGE_LAYOUT_GENERAL, referenceImage.get(), subresourceRange)};
1582
1583 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT,
1584 VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, (VkDependencyFlags)0, 0,
1585 (const VkMemoryBarrier *)DE_NULL, 0u, (const VkBufferMemoryBarrier *)DE_NULL,
1586 DE_LENGTH_OF_ARRAY(preShaderImageBarriers), preShaderImageBarriers);
1587 }
1588
1589 vk.cmdPushConstants(cmdBuffer, *pipelineLayout, VK_SHADER_STAGE_COMPUTE_BIT, 0u, sizeof pushData,
1590 &pushData);
1591
1592 // For multilayer views, we'll use the Z dimension to iterate over the layers. The extent depth would be 1 in that case.
1593 vk.cmdDispatch(cmdBuffer, extentCompressed.width, extentCompressed.height,
1594 (m_parameters.useMultiLayerViews() ? getLayerCount() : extentCompressed.depth));
1595
1596 {
1597 const VkImageMemoryBarrier postShaderImageBarriers[] = {
1598 makeImageMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT,
1599 VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1600 resultImage.get(), subresourceRange),
1601
1602 makeImageMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT,
1603 VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1604 referenceImage.get(), subresourceRange)};
1605
1606 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
1607 VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0u,
1608 (const VkMemoryBarrier *)DE_NULL, 0u, (const VkBufferMemoryBarrier *)DE_NULL,
1609 DE_LENGTH_OF_ARRAY(postShaderImageBarriers), postShaderImageBarriers);
1610 }
1611
1612 vk.cmdCopyImageToBuffer(cmdBuffer, resultImage.get(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1613 resultBuffer.get(), 1u, ©Region);
1614 vk.cmdCopyImageToBuffer(cmdBuffer, referenceImage.get(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1615 referenceBuffer.get(), 1u, ©Region);
1616
1617 {
1618 const VkBufferMemoryBarrier postCopyBufferBarrier[] = {
1619 makeBufferMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT,
1620 resultBuffer.get(), 0ull, bufferSize),
1621
1622 makeBufferMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT,
1623 referenceBuffer.get(), 0ull, bufferSize),
1624 };
1625
1626 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT,
1627 (VkDependencyFlags)0, 0u, (const VkMemoryBarrier *)DE_NULL,
1628 DE_LENGTH_OF_ARRAY(postCopyBufferBarrier), postCopyBufferBarrier, 0u,
1629 (const VkImageMemoryBarrier *)DE_NULL);
1630 }
1631 }
1632 endCommandBuffer(vk, cmdBuffer);
1633 submitCommandsAndWait(vk, device, queue, cmdBuffer);
1634 m_context.resetCommandPoolForVKSC(device, cmdPool);
1635
1636 const Allocation &resultAlloc = resultBuffer.getAllocation();
1637 const Allocation &referenceAlloc = referenceBuffer.getAllocation();
1638 invalidateAlloc(vk, device, resultAlloc);
1639 invalidateAlloc(vk, device, referenceAlloc);
1640
1641 BinaryCompareMode compareMode =
1642 (m_parameters.formatIsASTC()) ? (COMPARE_MODE_ALLOW_ASTC_ERROR_COLOUR_WARNING) : (COMPARE_MODE_NORMAL);
1643
1644 BinaryCompareResult res = BinaryCompare(referenceAlloc.getHostPtr(), resultAlloc.getHostPtr(),
1645 (size_t)bufferSize, m_parameters.formatForVerify, compareMode);
1646
1647 if (res == COMPARE_RESULT_FAILED)
1648 {
1649 // For multilayer views we'll iterate only once (see above) creating views that contain all layers. However, it's easier
1650 // to compare images layer by layer, and that also lets us log images which are easier to analyze, so in the multilayer
1651 // view case we have to run this comparison loop multiple times instead of once. In the single-layer views case, the
1652 // outer loop will iterate over the layers, and this inner loop only has to be run once.
1653 const auto innerIterations = (m_parameters.useMultiLayerViews() ? getLayerCount() : 1u);
1654
1655 for (uint32_t i = 0u; i < innerIterations; ++i)
1656 {
1657 const auto tcuFormat = mapVkFormat(decompressedImageInfo.format);
1658 const auto &extent = decompressedImageInfo.extent;
1659 const auto pixelSize = tcu::getPixelSize(tcuFormat);
1660 const auto layerSize =
1661 extent.width * extent.height * extent.depth * static_cast<uint32_t>(pixelSize);
1662 const auto imageName =
1663 "Image Comparison Layer " + std::to_string(m_parameters.useMultiLayerViews() ? i : layerNdx);
1664
1665 const auto resLayerPtr = reinterpret_cast<const char *>(resultAlloc.getHostPtr()) + layerSize * i;
1666 const auto refLayerPtr =
1667 reinterpret_cast<const char *>(referenceAlloc.getHostPtr()) + layerSize * i;
1668
1669 ConstPixelBufferAccess resultPixels(tcuFormat, extent.width, extent.height, extent.depth,
1670 resLayerPtr);
1671 ConstPixelBufferAccess referencePixels(tcuFormat, extent.width, extent.height, extent.depth,
1672 refLayerPtr);
1673
1674 if (!fuzzyCompare(m_context.getTestContext().getLog(), imageName.c_str(), "", resultPixels,
1675 referencePixels, 0.001f, tcu::COMPARE_LOG_EVERYTHING))
1676 return false;
1677 }
1678 }
1679 else if (res == COMPARE_RESULT_ASTC_QUALITY_WARNING)
1680 {
1681 m_bASTCErrorColourMismatch = true;
1682 }
1683 }
1684
1685 return true;
1686 }
1687
1688 class ImageStoreComputeTestInstance : public BasicComputeTestInstance
1689 {
1690 public:
1691 ImageStoreComputeTestInstance(Context &context, const TestParameters ¶meters);
1692
1693 protected:
1694 virtual void executeShader(const VkCommandPool &cmdPool, const VkCommandBuffer &cmdBuffer,
1695 const VkDescriptorSetLayout &descriptorSetLayout, const VkDescriptorPool &descriptorPool,
1696 vector<ImageData> &imageData);
1697
1698 private:
1699 };
1700
ImageStoreComputeTestInstance(Context & context,const TestParameters & parameters)1701 ImageStoreComputeTestInstance::ImageStoreComputeTestInstance(Context &context, const TestParameters ¶meters)
1702 : BasicComputeTestInstance(context, parameters)
1703 {
1704 }
1705
executeShader(const VkCommandPool & cmdPool,const VkCommandBuffer & cmdBuffer,const VkDescriptorSetLayout & descriptorSetLayout,const VkDescriptorPool & descriptorPool,vector<ImageData> & imageData)1706 void ImageStoreComputeTestInstance::executeShader(const VkCommandPool &cmdPool, const VkCommandBuffer &cmdBuffer,
1707 const VkDescriptorSetLayout &descriptorSetLayout,
1708 const VkDescriptorPool &descriptorPool, vector<ImageData> &imageData)
1709 {
1710 const DeviceInterface &vk = m_context.getDeviceInterface();
1711 const VkDevice device = m_context.getDevice();
1712 const VkQueue queue = m_context.getUniversalQueue();
1713 const Unique<VkShaderModule> shaderModule(
1714 createShaderModule(vk, device, m_context.getBinaryCollection().get("comp"), 0));
1715 vector<SharedVkDescriptorSet> descriptorSets(imageData[0].getImageViewCount());
1716 const Unique<VkPipelineLayout> pipelineLayout(makePipelineLayout(vk, device, descriptorSetLayout));
1717 const Unique<VkPipeline> pipeline(makeComputePipeline(vk, device, *pipelineLayout, *shaderModule));
1718 const auto layerCount = (m_parameters.useMultiLayerViews() ? getLayerCount() : 1u);
1719 Move<VkSampler> sampler;
1720 {
1721 const VkSamplerCreateInfo createInfo = {
1722 VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO, //VkStructureType sType;
1723 DE_NULL, //const void* pNext;
1724 0u, //VkSamplerCreateFlags flags;
1725 VK_FILTER_NEAREST, //VkFilter magFilter;
1726 VK_FILTER_NEAREST, //VkFilter minFilter;
1727 VK_SAMPLER_MIPMAP_MODE_NEAREST, //VkSamplerMipmapMode mipmapMode;
1728 VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, //VkSamplerAddressMode addressModeU;
1729 VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, //VkSamplerAddressMode addressModeV;
1730 VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, //VkSamplerAddressMode addressModeW;
1731 0.0f, //float mipLodBias;
1732 VK_FALSE, //VkBool32 anisotropyEnable;
1733 1.0f, //float maxAnisotropy;
1734 VK_FALSE, //VkBool32 compareEnable;
1735 VK_COMPARE_OP_EQUAL, //VkCompareOp compareOp;
1736 0.0f, //float minLod;
1737 0.0f, //float maxLod;
1738 VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK, //VkBorderColor borderColor;
1739 VK_TRUE, //VkBool32 unnormalizedCoordinates;
1740 };
1741 sampler = createSampler(vk, device, &createInfo);
1742 }
1743
1744 vector<VkDescriptorImageInfo> descriptorImageInfos(descriptorSets.size() * m_parameters.imagesCount);
1745 for (uint32_t viewNdx = 0u; viewNdx < descriptorSets.size(); ++viewNdx)
1746 {
1747 const uint32_t descriptorNdx = viewNdx * m_parameters.imagesCount;
1748 for (uint32_t imageNdx = 0u; imageNdx < m_parameters.imagesCount; ++imageNdx)
1749 {
1750 descriptorImageInfos[descriptorNdx + imageNdx] =
1751 makeDescriptorImageInfo(*sampler, imageData[imageNdx].getImageView(viewNdx), VK_IMAGE_LAYOUT_GENERAL);
1752 }
1753 }
1754
1755 for (uint32_t ndx = 0u; ndx < descriptorSets.size(); ++ndx)
1756 descriptorSets[ndx] = makeVkSharedPtr(makeDescriptorSet(vk, device, descriptorPool, descriptorSetLayout));
1757
1758 beginCommandBuffer(vk, cmdBuffer);
1759 {
1760 const VkImageSubresourceRange compressedRange = {
1761 VK_IMAGE_ASPECT_COLOR_BIT, //VkImageAspectFlags aspectMask
1762 0u, //uint32_t baseMipLevel
1763 imageData[0].getImageInfo(0).mipLevels, //uint32_t levelCount
1764 0u, //uint32_t baseArrayLayer
1765 imageData[0].getImageInfo(0).arrayLayers //uint32_t layerCount
1766 };
1767
1768 const VkImageSubresourceRange uncompressedRange = {
1769 VK_IMAGE_ASPECT_COLOR_BIT, //VkImageAspectFlags aspectMask
1770 0u, //uint32_t baseMipLevel
1771 1u, //uint32_t levelCount
1772 0u, //uint32_t baseArrayLayer
1773 layerCount, //uint32_t layerCount
1774 };
1775
1776 vk.cmdBindPipeline(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
1777
1778 vector<VkImageMemoryBarrier> preShaderImageBarriers(descriptorSets.size() * 2u + 1u);
1779 for (uint32_t imageNdx = 0u; imageNdx < imageData[1].getImagesCount(); ++imageNdx)
1780 {
1781 preShaderImageBarriers[imageNdx] = makeImageMemoryBarrier(
1782 VK_ACCESS_TRANSFER_WRITE_BIT, (VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT),
1783 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL, imageData[1].getImage(imageNdx),
1784 uncompressedRange);
1785
1786 preShaderImageBarriers[imageNdx + imageData[1].getImagesCount()] = makeImageMemoryBarrier(
1787 VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED,
1788 VK_IMAGE_LAYOUT_GENERAL, imageData[2].getImage(imageNdx), uncompressedRange);
1789 }
1790
1791 preShaderImageBarriers[preShaderImageBarriers.size() - 1] = makeImageMemoryBarrier(
1792 VK_ACCESS_TRANSFER_WRITE_BIT, (VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT),
1793 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL, imageData[0].getImage(0u), compressedRange);
1794
1795 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
1796 (VkDependencyFlags)0, 0u, (const VkMemoryBarrier *)DE_NULL, 0u,
1797 (const VkBufferMemoryBarrier *)DE_NULL,
1798 static_cast<uint32_t>(preShaderImageBarriers.size()), &preShaderImageBarriers[0]);
1799
1800 for (uint32_t ndx = 0u; ndx < descriptorSets.size(); ++ndx)
1801 {
1802 descriptorSetUpdate(**descriptorSets[ndx], &descriptorImageInfos[ndx * m_parameters.imagesCount]);
1803 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u,
1804 &(**descriptorSets[ndx]), 0u, DE_NULL);
1805
1806 // For multilayer views, we'll use the Z dimension to iterate over the layers, while the extent depth is only 1.
1807 const auto &extent = imageData[1].getImageInfo(ndx).extent;
1808 vk.cmdDispatch(cmdBuffer, extent.width, extent.height,
1809 (m_parameters.useMultiLayerViews() ? getLayerCount() : extent.depth));
1810 }
1811 }
1812 endCommandBuffer(vk, cmdBuffer);
1813 submitCommandsAndWait(vk, device, queue, cmdBuffer);
1814 m_context.resetCommandPoolForVKSC(device, cmdPool);
1815 }
1816
1817 class GraphicsAttachmentsTestInstance : public BasicTranscodingTestInstance
1818 {
1819 public:
1820 GraphicsAttachmentsTestInstance(Context &context, const TestParameters ¶meters);
1821 virtual TestStatus iterate(void);
1822
1823 protected:
1824 virtual bool isWriteToCompressedOperation();
1825 VkImageCreateInfo makeCreateImageInfo(const VkFormat format, const ImageType type, const UVec3 &size,
1826 const VkImageUsageFlags usageFlags, const VkImageCreateFlags *createFlags,
1827 const uint32_t levels, const uint32_t layers);
1828 VkDeviceSize getCompressedImageData(const VkFormat format, const UVec3 &size, std::vector<uint8_t> &data,
1829 const uint32_t layer, const uint32_t level);
1830 VkDeviceSize getUncompressedImageData(const VkFormat format, const UVec3 &size, std::vector<uint8_t> &data,
1831 const uint32_t layer, const uint32_t level);
1832 virtual void prepareData();
1833 virtual void prepareVertexBuffer();
1834 virtual void transcodeRead(const VkCommandPool &cmdPool);
1835 virtual void transcodeWrite(const VkCommandPool &cmdPool);
1836 bool verifyDecompression(const VkCommandPool &cmdPool, const std::vector<uint8_t> &refCompressedData,
1837 const de::MovePtr<Image> &resCompressedImage, const uint32_t layer, const uint32_t level,
1838 const UVec3 &mipmapDims);
1839
1840 typedef std::vector<uint8_t> RawDataVector;
1841 typedef SharedPtr<RawDataVector> RawDataPtr;
1842 typedef std::vector<RawDataPtr> LevelData;
1843 typedef std::vector<LevelData> FullImageData;
1844
1845 FullImageData m_srcData;
1846 FullImageData m_dstData;
1847
1848 typedef SharedPtr<Image> ImagePtr;
1849 typedef std::vector<ImagePtr> LevelImages;
1850 typedef std::vector<LevelImages> ImagesArray;
1851
1852 ImagesArray m_uncompressedImages;
1853 MovePtr<Image> m_compressedImage;
1854
1855 VkImageViewUsageCreateInfo m_imageViewUsageKHR;
1856 VkImageViewUsageCreateInfo *m_srcImageViewUsageKHR;
1857 VkImageViewUsageCreateInfo *m_dstImageViewUsageKHR;
1858 std::vector<tcu::UVec3> m_compressedImageResVec;
1859 std::vector<tcu::UVec3> m_uncompressedImageResVec;
1860 VkFormat m_srcFormat;
1861 VkFormat m_dstFormat;
1862 VkImageUsageFlags m_srcImageUsageFlags;
1863 VkImageUsageFlags m_dstImageUsageFlags;
1864 std::vector<tcu::UVec3> m_srcImageResolutions;
1865 std::vector<tcu::UVec3> m_dstImageResolutions;
1866
1867 MovePtr<BufferWithMemory> m_vertexBuffer;
1868 uint32_t m_vertexCount;
1869 VkDeviceSize m_vertexBufferOffset;
1870 };
1871
GraphicsAttachmentsTestInstance(Context & context,const TestParameters & parameters)1872 GraphicsAttachmentsTestInstance::GraphicsAttachmentsTestInstance(Context &context, const TestParameters ¶meters)
1873 : BasicTranscodingTestInstance(context, parameters)
1874 , m_srcData()
1875 , m_dstData()
1876 , m_uncompressedImages()
1877 , m_compressedImage()
1878 , m_imageViewUsageKHR()
1879 , m_srcImageViewUsageKHR()
1880 , m_dstImageViewUsageKHR()
1881 , m_compressedImageResVec()
1882 , m_uncompressedImageResVec()
1883 , m_srcFormat()
1884 , m_dstFormat()
1885 , m_srcImageUsageFlags()
1886 , m_dstImageUsageFlags()
1887 , m_srcImageResolutions()
1888 , m_dstImageResolutions()
1889 , m_vertexBuffer()
1890 , m_vertexCount(0u)
1891 , m_vertexBufferOffset(0ull)
1892 {
1893 }
1894
iterate(void)1895 TestStatus GraphicsAttachmentsTestInstance::iterate(void)
1896 {
1897 const DeviceInterface &vk = m_context.getDeviceInterface();
1898 const VkDevice device = m_context.getDevice();
1899 const uint32_t queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
1900 const Unique<VkCommandPool> cmdPool(
1901 createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
1902
1903 prepareData();
1904 prepareVertexBuffer();
1905
1906 for (uint32_t levelNdx = 0; levelNdx < getLevelCount(); ++levelNdx)
1907 for (uint32_t layerNdx = 0; layerNdx < getLayerCount(); ++layerNdx)
1908 DE_ASSERT(m_srcData[levelNdx][layerNdx]->size() == m_dstData[levelNdx][layerNdx]->size());
1909
1910 if (isWriteToCompressedOperation())
1911 transcodeWrite(*cmdPool);
1912 else
1913 transcodeRead(*cmdPool);
1914
1915 bool pass = true;
1916 for (uint32_t levelNdx = 0; levelNdx < getLevelCount(); ++levelNdx)
1917 for (uint32_t layerNdx = 0; layerNdx < getLayerCount(); ++layerNdx)
1918 if (isWriteToCompressedOperation())
1919 {
1920 if (!verifyDecompression(*cmdPool, *m_srcData[levelNdx][layerNdx], m_compressedImage, levelNdx,
1921 layerNdx, m_compressedImageResVec[levelNdx]))
1922 pass = false;
1923 }
1924 else
1925 {
1926 if (!verifyDecompression(*cmdPool, *m_dstData[levelNdx][layerNdx], m_compressedImage, levelNdx,
1927 layerNdx, m_compressedImageResVec[levelNdx]))
1928 pass = false;
1929 }
1930
1931 if (!pass)
1932 return TestStatus::fail("Images difference detected");
1933 ;
1934
1935 if (m_bASTCErrorColourMismatch)
1936 {
1937 DE_ASSERT(m_parameters.formatIsASTC());
1938 return TestStatusASTCQualityWarning();
1939 }
1940
1941 return TestStatus::pass("Pass");
1942 }
1943
prepareData()1944 void GraphicsAttachmentsTestInstance::prepareData()
1945 {
1946 VkImageViewUsageCreateInfo *imageViewUsageKHRNull = (VkImageViewUsageCreateInfo *)DE_NULL;
1947
1948 m_imageViewUsageKHR = makeImageViewUsageCreateInfo(m_parameters.compressedImageViewUsage);
1949
1950 m_srcImageViewUsageKHR = isWriteToCompressedOperation() ? imageViewUsageKHRNull : &m_imageViewUsageKHR;
1951 m_dstImageViewUsageKHR = isWriteToCompressedOperation() ? &m_imageViewUsageKHR : imageViewUsageKHRNull;
1952
1953 m_srcFormat = isWriteToCompressedOperation() ? m_parameters.formatUncompressed : m_parameters.formatCompressed;
1954 m_dstFormat = isWriteToCompressedOperation() ? m_parameters.formatCompressed : m_parameters.formatUncompressed;
1955
1956 m_srcImageUsageFlags =
1957 isWriteToCompressedOperation() ? m_parameters.uncompressedImageUsage : m_parameters.compressedImageUsage;
1958 m_dstImageUsageFlags =
1959 isWriteToCompressedOperation() ? m_parameters.compressedImageUsage : m_parameters.uncompressedImageUsage;
1960
1961 m_compressedImageResVec = getMipLevelSizes(getLayerDims());
1962 m_uncompressedImageResVec = getCompressedMipLevelSizes(m_parameters.formatCompressed, m_compressedImageResVec);
1963
1964 m_srcImageResolutions = isWriteToCompressedOperation() ? m_uncompressedImageResVec : m_compressedImageResVec;
1965 m_dstImageResolutions = isWriteToCompressedOperation() ? m_compressedImageResVec : m_uncompressedImageResVec;
1966
1967 m_srcData.resize(getLevelCount());
1968 m_dstData.resize(getLevelCount());
1969 m_uncompressedImages.resize(getLevelCount());
1970
1971 for (uint32_t levelNdx = 0; levelNdx < getLevelCount(); ++levelNdx)
1972 {
1973 m_srcData[levelNdx].resize(getLayerCount());
1974 m_dstData[levelNdx].resize(getLayerCount());
1975 m_uncompressedImages[levelNdx].resize(getLayerCount());
1976
1977 for (uint32_t layerNdx = 0; layerNdx < getLayerCount(); ++layerNdx)
1978 {
1979 m_srcData[levelNdx][layerNdx] = SharedPtr<RawDataVector>(new RawDataVector);
1980 m_dstData[levelNdx][layerNdx] = SharedPtr<RawDataVector>(new RawDataVector);
1981
1982 if (isWriteToCompressedOperation())
1983 {
1984 getUncompressedImageData(m_srcFormat, m_srcImageResolutions[levelNdx], *m_srcData[levelNdx][layerNdx],
1985 layerNdx, levelNdx);
1986
1987 m_dstData[levelNdx][layerNdx]->resize(
1988 (size_t)getCompressedImageSizeInBytes(m_dstFormat, m_dstImageResolutions[levelNdx]));
1989 }
1990 else
1991 {
1992 getCompressedImageData(m_srcFormat, m_srcImageResolutions[levelNdx], *m_srcData[levelNdx][layerNdx],
1993 layerNdx, levelNdx);
1994
1995 m_dstData[levelNdx][layerNdx]->resize(
1996 (size_t)getUncompressedImageSizeInBytes(m_dstFormat, m_dstImageResolutions[levelNdx]));
1997 }
1998
1999 DE_ASSERT(m_srcData[levelNdx][layerNdx]->size() == m_dstData[levelNdx][layerNdx]->size());
2000 }
2001 }
2002 }
2003
prepareVertexBuffer()2004 void GraphicsAttachmentsTestInstance::prepareVertexBuffer()
2005 {
2006 const DeviceInterface &vk = m_context.getDeviceInterface();
2007 const VkDevice device = m_context.getDevice();
2008 Allocator &allocator = m_context.getDefaultAllocator();
2009
2010 const std::vector<tcu::Vec4> vertexArray = createFullscreenQuad();
2011 const size_t vertexBufferSizeInBytes = vertexArray.size() * sizeof(vertexArray[0]);
2012
2013 m_vertexCount = static_cast<uint32_t>(vertexArray.size());
2014 m_vertexBuffer = MovePtr<BufferWithMemory>(new BufferWithMemory(
2015 vk, device, allocator, makeBufferCreateInfo(vertexBufferSizeInBytes, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT),
2016 MemoryRequirement::HostVisible));
2017
2018 // Upload vertex data
2019 const Allocation &vertexBufferAlloc = m_vertexBuffer->getAllocation();
2020 deMemcpy(vertexBufferAlloc.getHostPtr(), &vertexArray[0], vertexBufferSizeInBytes);
2021 flushAlloc(vk, device, vertexBufferAlloc);
2022 }
2023
transcodeRead(const VkCommandPool & cmdPool)2024 void GraphicsAttachmentsTestInstance::transcodeRead(const VkCommandPool &cmdPool)
2025 {
2026 const DeviceInterface &vk = m_context.getDeviceInterface();
2027 const VkDevice device = m_context.getDevice();
2028 const VkQueue queue = m_context.getUniversalQueue();
2029 Allocator &allocator = m_context.getDefaultAllocator();
2030
2031 const VkImageCreateFlags *imgCreateFlagsOverride = DE_NULL;
2032
2033 const VkImageCreateInfo srcImageCreateInfo =
2034 makeCreateImageInfo(m_srcFormat, m_parameters.imageType, m_srcImageResolutions[0], m_srcImageUsageFlags,
2035 imgCreateFlagsOverride, getLevelCount(), getLayerCount());
2036 MovePtr<Image> srcImage(new Image(vk, device, allocator, srcImageCreateInfo, MemoryRequirement::Any));
2037
2038 const Unique<VkShaderModule> vertShaderModule(
2039 createShaderModule(vk, device, m_context.getBinaryCollection().get("vert"), 0));
2040 const Unique<VkShaderModule> fragShaderModule(
2041 createShaderModule(vk, device, m_context.getBinaryCollection().get("frag"), 0));
2042
2043 const Unique<VkRenderPass> renderPass(
2044 vkt::image::makeRenderPass(vk, device, m_parameters.formatUncompressed, m_parameters.formatUncompressed));
2045
2046 const Move<VkDescriptorSetLayout> descriptorSetLayout(
2047 DescriptorSetLayoutBuilder()
2048 .addSingleBinding(VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, VK_SHADER_STAGE_FRAGMENT_BIT)
2049 .build(vk, device));
2050 const Move<VkDescriptorPool> descriptorPool(
2051 DescriptorPoolBuilder()
2052 .addType(VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT)
2053 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u));
2054 const Move<VkDescriptorSet> descriptorSet(makeDescriptorSet(vk, device, *descriptorPool, *descriptorSetLayout));
2055
2056 const VkExtent2D renderSizeUnused(makeExtent2D(1u, 1u));
2057 const Unique<VkPipelineLayout> pipelineLayout(makePipelineLayout(vk, device, *descriptorSetLayout));
2058 const Unique<VkPipeline> pipeline(makeGraphicsPipeline(vk, device, *pipelineLayout, *renderPass, *vertShaderModule,
2059 *fragShaderModule, renderSizeUnused, 1u, true));
2060
2061 const Unique<VkCommandBuffer> cmdBuffer(
2062 allocateCommandBuffer(vk, device, cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
2063
2064 for (uint32_t levelNdx = 0; levelNdx < getLevelCount(); ++levelNdx)
2065 {
2066 const UVec3 &uncompressedImageRes = m_uncompressedImageResVec[levelNdx];
2067 const UVec3 &srcImageResolution = m_srcImageResolutions[levelNdx];
2068 const UVec3 &dstImageResolution = m_dstImageResolutions[levelNdx];
2069 const size_t srcImageSizeInBytes = m_srcData[levelNdx][0]->size();
2070 const size_t dstImageSizeInBytes = m_dstData[levelNdx][0]->size();
2071 const UVec3 srcImageResBlocked =
2072 getCompressedImageResolutionBlockCeil(m_parameters.formatCompressed, srcImageResolution);
2073
2074 const VkImageCreateInfo dstImageCreateInfo =
2075 makeCreateImageInfo(m_dstFormat, m_parameters.imageType, dstImageResolution, m_dstImageUsageFlags,
2076 imgCreateFlagsOverride, SINGLE_LEVEL, SINGLE_LAYER);
2077
2078 const VkBufferCreateInfo srcImageBufferInfo =
2079 makeBufferCreateInfo(srcImageSizeInBytes, VK_BUFFER_USAGE_TRANSFER_SRC_BIT);
2080 const MovePtr<BufferWithMemory> srcImageBuffer = MovePtr<BufferWithMemory>(
2081 new BufferWithMemory(vk, device, allocator, srcImageBufferInfo, MemoryRequirement::HostVisible));
2082
2083 const VkBufferCreateInfo dstImageBufferInfo =
2084 makeBufferCreateInfo(dstImageSizeInBytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT);
2085 MovePtr<BufferWithMemory> dstImageBuffer = MovePtr<BufferWithMemory>(
2086 new BufferWithMemory(vk, device, allocator, dstImageBufferInfo, MemoryRequirement::HostVisible));
2087
2088 const VkExtent2D renderSize(makeExtent2D(uncompressedImageRes.x(), uncompressedImageRes.y()));
2089 const VkViewport viewport = makeViewport(renderSize);
2090 const VkRect2D scissor = makeRect2D(renderSize);
2091
2092 for (uint32_t layerNdx = 0; layerNdx < getLayerCount(); ++layerNdx)
2093 {
2094 const VkImageSubresourceRange srcSubresourceRange =
2095 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, levelNdx, SINGLE_LEVEL, layerNdx, SINGLE_LAYER);
2096 const VkImageSubresourceRange dstSubresourceRange =
2097 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, SINGLE_LEVEL, 0u, SINGLE_LAYER);
2098
2099 Move<VkImageView> srcImageView(
2100 makeImageView(vk, device, srcImage->get(), mapImageViewType(m_parameters.imageType),
2101 m_parameters.formatUncompressed, srcSubresourceRange, m_srcImageViewUsageKHR));
2102
2103 de::MovePtr<Image> dstImage(new Image(vk, device, allocator, dstImageCreateInfo, MemoryRequirement::Any));
2104 Move<VkImageView> dstImageView(
2105 makeImageView(vk, device, dstImage->get(), mapImageViewType(m_parameters.imageType),
2106 m_parameters.formatUncompressed, dstSubresourceRange, m_dstImageViewUsageKHR));
2107
2108 const VkBufferImageCopy srcCopyRegion =
2109 makeBufferImageCopy(srcImageResolution.x(), srcImageResolution.y(), levelNdx, layerNdx,
2110 srcImageResBlocked.x(), srcImageResBlocked.y());
2111 const VkBufferMemoryBarrier srcCopyBufferBarrierPre =
2112 makeBufferMemoryBarrier(VK_ACCESS_HOST_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT, srcImageBuffer->get(),
2113 0ull, srcImageSizeInBytes);
2114 const VkImageMemoryBarrier srcCopyImageBarrierPre =
2115 makeImageMemoryBarrier(0u, VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED,
2116 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, srcImage->get(), srcSubresourceRange);
2117 const VkImageMemoryBarrier srcCopyImageBarrierPost = makeImageMemoryBarrier(
2118 VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2119 VK_IMAGE_LAYOUT_GENERAL, srcImage->get(), srcSubresourceRange);
2120 const VkBufferImageCopy dstCopyRegion = makeBufferImageCopy(dstImageResolution.x(), dstImageResolution.y());
2121 const VkImageMemoryBarrier dstInitImageBarrier =
2122 makeImageMemoryBarrier(0u, VK_ACCESS_SHADER_READ_BIT, VK_IMAGE_LAYOUT_UNDEFINED,
2123 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, dstImage->get(), dstSubresourceRange);
2124
2125 const VkImageView attachmentBindInfos[] = {*srcImageView, *dstImageView};
2126 const VkExtent2D framebufferSize(makeExtent2D(dstImageResolution[0], dstImageResolution[1]));
2127 const Move<VkFramebuffer> framebuffer(
2128 makeFramebuffer(vk, device, *renderPass, DE_LENGTH_OF_ARRAY(attachmentBindInfos), attachmentBindInfos,
2129 framebufferSize.width, framebufferSize.height, SINGLE_LAYER));
2130
2131 // Upload source image data
2132 const Allocation &alloc = srcImageBuffer->getAllocation();
2133 deMemcpy(alloc.getHostPtr(), &m_srcData[levelNdx][layerNdx]->at(0), srcImageSizeInBytes);
2134 flushAlloc(vk, device, alloc);
2135
2136 beginCommandBuffer(vk, *cmdBuffer);
2137 vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *pipeline);
2138
2139 // Copy buffer to image
2140 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
2141 (VkDependencyFlags)0, 0, (const VkMemoryBarrier *)DE_NULL, 1u,
2142 &srcCopyBufferBarrierPre, 1u, &srcCopyImageBarrierPre);
2143 vk.cmdCopyBufferToImage(*cmdBuffer, srcImageBuffer->get(), srcImage->get(),
2144 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1u, &srcCopyRegion);
2145 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
2146 (VkDependencyFlags)0, 0, (const VkMemoryBarrier *)DE_NULL, 0u, DE_NULL, 1u,
2147 &srcCopyImageBarrierPost);
2148
2149 // Define destination image layout
2150 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
2151 (VkDependencyFlags)0, 0, (const VkMemoryBarrier *)DE_NULL, 0u, DE_NULL, 1u,
2152 &dstInitImageBarrier);
2153
2154 beginRenderPass(vk, *cmdBuffer, *renderPass, *framebuffer, renderSize);
2155
2156 const VkDescriptorImageInfo descriptorSrcImageInfo(
2157 makeDescriptorImageInfo(DE_NULL, *srcImageView, VK_IMAGE_LAYOUT_GENERAL));
2158 DescriptorSetUpdateBuilder()
2159 .writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u),
2160 VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, &descriptorSrcImageInfo)
2161 .update(vk, device);
2162
2163 vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *pipelineLayout, 0u, 1u,
2164 &descriptorSet.get(), 0u, DE_NULL);
2165 vk.cmdBindVertexBuffers(*cmdBuffer, 0u, 1u, &m_vertexBuffer->get(), &m_vertexBufferOffset);
2166
2167 vk.cmdSetViewport(*cmdBuffer, 0u, 1u, &viewport);
2168 vk.cmdSetScissor(*cmdBuffer, 0u, 1u, &scissor);
2169
2170 vk.cmdDraw(*cmdBuffer, (uint32_t)m_vertexCount, 1, 0, 0);
2171
2172 endRenderPass(vk, *cmdBuffer);
2173
2174 const VkImageMemoryBarrier prepareForTransferBarrier = makeImageMemoryBarrier(
2175 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT, VK_IMAGE_LAYOUT_GENERAL,
2176 VK_IMAGE_LAYOUT_GENERAL, dstImage->get(), dstSubresourceRange);
2177
2178 const VkBufferMemoryBarrier copyBarrier =
2179 makeBufferMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, dstImageBuffer->get(),
2180 0ull, dstImageSizeInBytes);
2181
2182 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
2183 VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0,
2184 (const VkMemoryBarrier *)DE_NULL, 0, (const VkBufferMemoryBarrier *)DE_NULL, 1,
2185 &prepareForTransferBarrier);
2186 vk.cmdCopyImageToBuffer(*cmdBuffer, dstImage->get(), VK_IMAGE_LAYOUT_GENERAL, dstImageBuffer->get(), 1u,
2187 &dstCopyRegion);
2188 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT,
2189 (VkDependencyFlags)0, 0, (const VkMemoryBarrier *)DE_NULL, 1, ©Barrier, 0,
2190 (const VkImageMemoryBarrier *)DE_NULL);
2191
2192 endCommandBuffer(vk, *cmdBuffer);
2193
2194 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
2195 m_context.resetCommandPoolForVKSC(device, cmdPool);
2196
2197 const Allocation &dstImageBufferAlloc = dstImageBuffer->getAllocation();
2198 invalidateAlloc(vk, device, dstImageBufferAlloc);
2199 deMemcpy(&m_dstData[levelNdx][layerNdx]->at(0), dstImageBufferAlloc.getHostPtr(), dstImageSizeInBytes);
2200 }
2201 }
2202
2203 m_compressedImage = srcImage;
2204 }
2205
transcodeWrite(const VkCommandPool & cmdPool)2206 void GraphicsAttachmentsTestInstance::transcodeWrite(const VkCommandPool &cmdPool)
2207 {
2208 const DeviceInterface &vk = m_context.getDeviceInterface();
2209 const VkDevice device = m_context.getDevice();
2210 const VkQueue queue = m_context.getUniversalQueue();
2211 Allocator &allocator = m_context.getDefaultAllocator();
2212
2213 const VkImageCreateFlags *imgCreateFlagsOverride = DE_NULL;
2214
2215 const VkImageCreateInfo dstImageCreateInfo =
2216 makeCreateImageInfo(m_dstFormat, m_parameters.imageType, m_dstImageResolutions[0], m_dstImageUsageFlags,
2217 imgCreateFlagsOverride, getLevelCount(), getLayerCount());
2218 MovePtr<Image> dstImage(new Image(vk, device, allocator, dstImageCreateInfo, MemoryRequirement::Any));
2219
2220 const Unique<VkShaderModule> vertShaderModule(
2221 createShaderModule(vk, device, m_context.getBinaryCollection().get("vert"), 0));
2222 const Unique<VkShaderModule> fragShaderModule(
2223 createShaderModule(vk, device, m_context.getBinaryCollection().get("frag"), 0));
2224
2225 const Unique<VkRenderPass> renderPass(
2226 vkt::image::makeRenderPass(vk, device, m_parameters.formatUncompressed, m_parameters.formatUncompressed));
2227
2228 const Move<VkDescriptorSetLayout> descriptorSetLayout(
2229 DescriptorSetLayoutBuilder()
2230 .addSingleBinding(VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, VK_SHADER_STAGE_FRAGMENT_BIT)
2231 .build(vk, device));
2232 const Move<VkDescriptorPool> descriptorPool(
2233 DescriptorPoolBuilder()
2234 .addType(VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT)
2235 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u));
2236 const Move<VkDescriptorSet> descriptorSet(makeDescriptorSet(vk, device, *descriptorPool, *descriptorSetLayout));
2237
2238 const VkExtent2D renderSizeUnused(makeExtent2D(1u, 1u));
2239 const Unique<VkPipelineLayout> pipelineLayout(makePipelineLayout(vk, device, *descriptorSetLayout));
2240 const Unique<VkPipeline> pipeline(makeGraphicsPipeline(vk, device, *pipelineLayout, *renderPass, *vertShaderModule,
2241 *fragShaderModule, renderSizeUnused, 1u, true));
2242
2243 const Unique<VkCommandBuffer> cmdBuffer(
2244 allocateCommandBuffer(vk, device, cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
2245
2246 for (uint32_t levelNdx = 0; levelNdx < getLevelCount(); ++levelNdx)
2247 {
2248 const UVec3 &uncompressedImageRes = m_uncompressedImageResVec[levelNdx];
2249 const UVec3 &srcImageResolution = m_srcImageResolutions[levelNdx];
2250 const UVec3 &dstImageResolution = m_dstImageResolutions[levelNdx];
2251 const UVec3 dstImageResBlocked =
2252 getCompressedImageResolutionBlockCeil(m_parameters.formatCompressed, dstImageResolution);
2253 const size_t srcImageSizeInBytes = m_srcData[levelNdx][0]->size();
2254 const size_t dstImageSizeInBytes = m_dstData[levelNdx][0]->size();
2255
2256 const VkImageCreateInfo srcImageCreateInfo =
2257 makeCreateImageInfo(m_srcFormat, m_parameters.imageType, srcImageResolution, m_srcImageUsageFlags,
2258 imgCreateFlagsOverride, SINGLE_LEVEL, SINGLE_LAYER);
2259
2260 const VkExtent2D renderSize(makeExtent2D(uncompressedImageRes.x(), uncompressedImageRes.y()));
2261 const VkViewport viewport = makeViewport(renderSize);
2262 const VkRect2D scissor = makeRect2D(renderSize);
2263
2264 for (uint32_t layerNdx = 0; layerNdx < getLayerCount(); ++layerNdx)
2265 {
2266 const VkBufferCreateInfo srcImageBufferInfo =
2267 makeBufferCreateInfo(srcImageSizeInBytes, VK_BUFFER_USAGE_TRANSFER_SRC_BIT);
2268 const MovePtr<BufferWithMemory> srcImageBuffer = MovePtr<BufferWithMemory>(
2269 new BufferWithMemory(vk, device, allocator, srcImageBufferInfo, MemoryRequirement::HostVisible));
2270
2271 const VkBufferCreateInfo dstImageBufferInfo =
2272 makeBufferCreateInfo(dstImageSizeInBytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT);
2273 MovePtr<BufferWithMemory> dstImageBuffer = MovePtr<BufferWithMemory>(
2274 new BufferWithMemory(vk, device, allocator, dstImageBufferInfo, MemoryRequirement::HostVisible));
2275
2276 const VkImageSubresourceRange srcSubresourceRange =
2277 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, SINGLE_LEVEL, 0u, SINGLE_LAYER);
2278 const VkImageSubresourceRange dstSubresourceRange =
2279 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, levelNdx, SINGLE_LEVEL, layerNdx, SINGLE_LAYER);
2280
2281 Move<VkImageView> dstImageView(
2282 makeImageView(vk, device, dstImage->get(), mapImageViewType(m_parameters.imageType),
2283 m_parameters.formatUncompressed, dstSubresourceRange, m_dstImageViewUsageKHR));
2284
2285 de::MovePtr<Image> srcImage(new Image(vk, device, allocator, srcImageCreateInfo, MemoryRequirement::Any));
2286 Move<VkImageView> srcImageView(
2287 makeImageView(vk, device, srcImage->get(), mapImageViewType(m_parameters.imageType),
2288 m_parameters.formatUncompressed, srcSubresourceRange, m_srcImageViewUsageKHR));
2289
2290 const VkBufferImageCopy srcCopyRegion =
2291 makeBufferImageCopy(srcImageResolution.x(), srcImageResolution.y(), 0u, 0u);
2292 const VkBufferMemoryBarrier srcCopyBufferBarrierPre =
2293 makeBufferMemoryBarrier(VK_ACCESS_HOST_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT, srcImageBuffer->get(),
2294 0ull, srcImageSizeInBytes);
2295 const VkImageMemoryBarrier srcCopyImageBarrierPre =
2296 makeImageMemoryBarrier(0u, VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED,
2297 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, srcImage->get(), srcSubresourceRange);
2298 const VkImageMemoryBarrier srcCopyImageBarrierPost = makeImageMemoryBarrier(
2299 VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_INPUT_ATTACHMENT_READ_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2300 VK_IMAGE_LAYOUT_GENERAL, srcImage->get(), srcSubresourceRange);
2301 const VkBufferImageCopy dstCopyRegion =
2302 makeBufferImageCopy(dstImageResolution.x(), dstImageResolution.y(), levelNdx, layerNdx,
2303 dstImageResBlocked.x(), dstImageResBlocked.y());
2304 const VkImageMemoryBarrier dstInitImageBarrier =
2305 makeImageMemoryBarrier(0u, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED,
2306 VK_IMAGE_LAYOUT_GENERAL, dstImage->get(), dstSubresourceRange);
2307
2308 const VkImageView attachmentBindInfos[] = {*srcImageView, *dstImageView};
2309 const VkExtent2D framebufferSize(renderSize);
2310 const Move<VkFramebuffer> framebuffer(
2311 makeFramebuffer(vk, device, *renderPass, DE_LENGTH_OF_ARRAY(attachmentBindInfos), attachmentBindInfos,
2312 framebufferSize.width, framebufferSize.height, SINGLE_LAYER));
2313
2314 // Upload source image data
2315 const Allocation &alloc = srcImageBuffer->getAllocation();
2316 deMemcpy(alloc.getHostPtr(), &m_srcData[levelNdx][layerNdx]->at(0), srcImageSizeInBytes);
2317 flushAlloc(vk, device, alloc);
2318
2319 beginCommandBuffer(vk, *cmdBuffer);
2320 vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *pipeline);
2321
2322 // Copy buffer to image
2323 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
2324 (VkDependencyFlags)0, 0, (const VkMemoryBarrier *)DE_NULL, 1u,
2325 &srcCopyBufferBarrierPre, 1u, &srcCopyImageBarrierPre);
2326 vk.cmdCopyBufferToImage(*cmdBuffer, srcImageBuffer->get(), srcImage->get(),
2327 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1u, &srcCopyRegion);
2328 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
2329 (VkDependencyFlags)0, 0, (const VkMemoryBarrier *)DE_NULL, 0u, DE_NULL, 1u,
2330 &srcCopyImageBarrierPost);
2331
2332 // Define destination image layout
2333 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
2334 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, (VkDependencyFlags)0, 0,
2335 (const VkMemoryBarrier *)DE_NULL, 0u, DE_NULL, 1u, &dstInitImageBarrier);
2336
2337 beginRenderPass(vk, *cmdBuffer, *renderPass, *framebuffer, renderSize);
2338
2339 const VkDescriptorImageInfo descriptorSrcImageInfo(
2340 makeDescriptorImageInfo(DE_NULL, *srcImageView, VK_IMAGE_LAYOUT_GENERAL));
2341 DescriptorSetUpdateBuilder()
2342 .writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u),
2343 VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, &descriptorSrcImageInfo)
2344 .update(vk, device);
2345
2346 vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *pipelineLayout, 0u, 1u,
2347 &descriptorSet.get(), 0u, DE_NULL);
2348 vk.cmdBindVertexBuffers(*cmdBuffer, 0u, 1u, &m_vertexBuffer->get(), &m_vertexBufferOffset);
2349
2350 vk.cmdSetViewport(*cmdBuffer, 0u, 1u, &viewport);
2351 vk.cmdSetScissor(*cmdBuffer, 0u, 1u, &scissor);
2352
2353 vk.cmdDraw(*cmdBuffer, (uint32_t)m_vertexCount, 1, 0, 0);
2354
2355 endRenderPass(vk, *cmdBuffer);
2356
2357 const VkImageMemoryBarrier prepareForTransferBarrier = makeImageMemoryBarrier(
2358 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT, VK_IMAGE_LAYOUT_GENERAL,
2359 VK_IMAGE_LAYOUT_GENERAL, dstImage->get(), dstSubresourceRange);
2360
2361 const VkBufferMemoryBarrier copyBarrier =
2362 makeBufferMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, dstImageBuffer->get(),
2363 0ull, dstImageSizeInBytes);
2364
2365 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
2366 VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0,
2367 (const VkMemoryBarrier *)DE_NULL, 0, (const VkBufferMemoryBarrier *)DE_NULL, 1,
2368 &prepareForTransferBarrier);
2369 vk.cmdCopyImageToBuffer(*cmdBuffer, dstImage->get(), VK_IMAGE_LAYOUT_GENERAL, dstImageBuffer->get(), 1u,
2370 &dstCopyRegion);
2371 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT,
2372 (VkDependencyFlags)0, 0, (const VkMemoryBarrier *)DE_NULL, 1, ©Barrier, 0,
2373 (const VkImageMemoryBarrier *)DE_NULL);
2374
2375 endCommandBuffer(vk, *cmdBuffer);
2376
2377 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
2378 m_context.resetCommandPoolForVKSC(device, cmdPool);
2379
2380 const Allocation &dstImageBufferAlloc = dstImageBuffer->getAllocation();
2381 invalidateAlloc(vk, device, dstImageBufferAlloc);
2382 deMemcpy(&m_dstData[levelNdx][layerNdx]->at(0), dstImageBufferAlloc.getHostPtr(), dstImageSizeInBytes);
2383 }
2384 }
2385
2386 m_compressedImage = dstImage;
2387 }
2388
isWriteToCompressedOperation()2389 bool GraphicsAttachmentsTestInstance::isWriteToCompressedOperation()
2390 {
2391 return (m_parameters.operation == OPERATION_ATTACHMENT_WRITE);
2392 }
2393
makeCreateImageInfo(const VkFormat format,const ImageType type,const UVec3 & size,const VkImageUsageFlags usageFlags,const VkImageCreateFlags * createFlags,const uint32_t levels,const uint32_t layers)2394 VkImageCreateInfo GraphicsAttachmentsTestInstance::makeCreateImageInfo(const VkFormat format, const ImageType type,
2395 const UVec3 &size,
2396 const VkImageUsageFlags usageFlags,
2397 const VkImageCreateFlags *createFlags,
2398 const uint32_t levels, const uint32_t layers)
2399 {
2400 const VkImageType imageType = mapImageType(type);
2401 const VkImageCreateFlags imageCreateFlagsBase = VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
2402 const VkImageCreateFlags imageCreateFlagsAddOn =
2403 isCompressedFormat(format) ?
2404 VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT | VK_IMAGE_CREATE_EXTENDED_USAGE_BIT :
2405 0;
2406 const VkImageCreateFlags imageCreateFlags =
2407 (createFlags != DE_NULL) ? *createFlags : (imageCreateFlagsBase | imageCreateFlagsAddOn);
2408
2409 VkFormatProperties properties;
2410 m_context.getInstanceInterface().getPhysicalDeviceFormatProperties(m_context.getPhysicalDevice(), format,
2411 &properties);
2412 if ((usageFlags & VK_IMAGE_USAGE_STORAGE_BIT) &&
2413 !(properties.optimalTilingFeatures & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT))
2414 TCU_THROW(NotSupportedError, "Format storage feature not supported");
2415 if ((usageFlags & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) &&
2416 !(properties.optimalTilingFeatures & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT))
2417 TCU_THROW(NotSupportedError, "Format color attachment feature not supported");
2418 if ((usageFlags & VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT) &&
2419 !(properties.optimalTilingFeatures & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT) &&
2420 !(properties.optimalTilingFeatures & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT))
2421 TCU_THROW(NotSupportedError,
2422 "Format color/depth/stencil attachment feature not supported for input attachment usage");
2423
2424 const VkImageCreateInfo createImageInfo = {
2425 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
2426 DE_NULL, // const void* pNext;
2427 imageCreateFlags, // VkImageCreateFlags flags;
2428 imageType, // VkImageType imageType;
2429 format, // VkFormat format;
2430 makeExtent3D(getLayerSize(type, size)), // VkExtent3D extent;
2431 levels, // uint32_t mipLevels;
2432 layers, // uint32_t arrayLayers;
2433 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
2434 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
2435 usageFlags, // VkImageUsageFlags usage;
2436 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
2437 0u, // uint32_t queueFamilyIndexCount;
2438 DE_NULL, // const uint32_t* pQueueFamilyIndices;
2439 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout;
2440 };
2441
2442 return createImageInfo;
2443 }
2444
getCompressedImageData(const VkFormat format,const UVec3 & size,std::vector<uint8_t> & data,const uint32_t layer,const uint32_t level)2445 VkDeviceSize GraphicsAttachmentsTestInstance::getCompressedImageData(const VkFormat format, const UVec3 &size,
2446 std::vector<uint8_t> &data, const uint32_t layer,
2447 const uint32_t level)
2448 {
2449 VkDeviceSize sizeBytes = getCompressedImageSizeInBytes(format, size);
2450
2451 data.resize((size_t)sizeBytes);
2452 generateData(&data[0], data.size(), format, layer, level);
2453
2454 return sizeBytes;
2455 }
2456
getUncompressedImageData(const VkFormat format,const UVec3 & size,std::vector<uint8_t> & data,const uint32_t layer,const uint32_t level)2457 VkDeviceSize GraphicsAttachmentsTestInstance::getUncompressedImageData(const VkFormat format, const UVec3 &size,
2458 std::vector<uint8_t> &data, const uint32_t layer,
2459 const uint32_t level)
2460 {
2461 tcu::IVec3 sizeAsIVec3 =
2462 tcu::IVec3(static_cast<int>(size[0]), static_cast<int>(size[1]), static_cast<int>(size[2]));
2463 VkDeviceSize sizeBytes = getImageSizeBytes(sizeAsIVec3, format);
2464
2465 data.resize((size_t)sizeBytes);
2466 generateData(&data[0], data.size(), format, layer, level);
2467
2468 return sizeBytes;
2469 }
2470
verifyDecompression(const VkCommandPool & cmdPool,const std::vector<uint8_t> & refCompressedData,const de::MovePtr<Image> & resCompressedImage,const uint32_t level,const uint32_t layer,const UVec3 & mipmapDims)2471 bool GraphicsAttachmentsTestInstance::verifyDecompression(const VkCommandPool &cmdPool,
2472 const std::vector<uint8_t> &refCompressedData,
2473 const de::MovePtr<Image> &resCompressedImage,
2474 const uint32_t level, const uint32_t layer,
2475 const UVec3 &mipmapDims)
2476 {
2477 const DeviceInterface &vk = m_context.getDeviceInterface();
2478 const VkDevice device = m_context.getDevice();
2479 const VkQueue queue = m_context.getUniversalQueue();
2480 Allocator &allocator = m_context.getDefaultAllocator();
2481
2482 const bool layoutShaderReadOnly = (layer % 2u) == 1;
2483 const UVec3 mipmapDimsBlocked = getCompressedImageResolutionBlockCeil(m_parameters.formatCompressed, mipmapDims);
2484
2485 const VkImageSubresourceRange subresourceRange =
2486 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, SINGLE_LEVEL, 0u, SINGLE_LAYER);
2487 const VkImageSubresourceRange resSubresourceRange =
2488 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, level, SINGLE_LEVEL, layer, SINGLE_LAYER);
2489
2490 const VkDeviceSize dstBufferSize = getUncompressedImageSizeInBytes(m_parameters.formatForVerify, mipmapDims);
2491 const VkImageUsageFlags refSrcImageUsageFlags =
2492 VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
2493
2494 const VkBufferCreateInfo refSrcImageBufferInfo(
2495 makeBufferCreateInfo(refCompressedData.size(), VK_BUFFER_USAGE_TRANSFER_SRC_BIT));
2496 const MovePtr<BufferWithMemory> refSrcImageBuffer = MovePtr<BufferWithMemory>(
2497 new BufferWithMemory(vk, device, allocator, refSrcImageBufferInfo, MemoryRequirement::HostVisible));
2498
2499 const VkImageCreateFlags refSrcImageCreateFlags = 0;
2500 const VkImageCreateInfo refSrcImageCreateInfo =
2501 makeCreateImageInfo(m_parameters.formatCompressed, m_parameters.imageType, mipmapDimsBlocked,
2502 refSrcImageUsageFlags, &refSrcImageCreateFlags, SINGLE_LEVEL, SINGLE_LAYER);
2503 const MovePtr<Image> refSrcImage(new Image(vk, device, allocator, refSrcImageCreateInfo, MemoryRequirement::Any));
2504 Move<VkImageView> refSrcImageView(makeImageView(vk, device, refSrcImage->get(),
2505 mapImageViewType(m_parameters.imageType),
2506 m_parameters.formatCompressed, subresourceRange));
2507
2508 const VkImageUsageFlags resSrcImageUsageFlags =
2509 VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
2510 const VkImageViewUsageCreateInfo resSrcImageViewUsageKHR = makeImageViewUsageCreateInfo(resSrcImageUsageFlags);
2511 Move<VkImageView> resSrcImageView(
2512 makeImageView(vk, device, resCompressedImage->get(), mapImageViewType(m_parameters.imageType),
2513 m_parameters.formatCompressed, resSubresourceRange, &resSrcImageViewUsageKHR));
2514
2515 const VkImageCreateFlags refDstImageCreateFlags = 0;
2516 const VkImageUsageFlags refDstImageUsageFlags =
2517 VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
2518 const VkImageCreateInfo refDstImageCreateInfo =
2519 makeCreateImageInfo(m_parameters.formatForVerify, m_parameters.imageType, mipmapDims, refDstImageUsageFlags,
2520 &refDstImageCreateFlags, SINGLE_LEVEL, SINGLE_LAYER);
2521 const MovePtr<Image> refDstImage(new Image(vk, device, allocator, refDstImageCreateInfo, MemoryRequirement::Any));
2522 const Move<VkImageView> refDstImageView(makeImageView(vk, device, refDstImage->get(),
2523 mapImageViewType(m_parameters.imageType),
2524 m_parameters.formatForVerify, subresourceRange));
2525 const VkImageMemoryBarrier refDstInitImageBarrier =
2526 makeImageMemoryBarrier(0u, VK_ACCESS_SHADER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL,
2527 refDstImage->get(), subresourceRange);
2528 const VkBufferCreateInfo refDstBufferInfo(makeBufferCreateInfo(dstBufferSize, VK_BUFFER_USAGE_TRANSFER_DST_BIT));
2529 const MovePtr<BufferWithMemory> refDstBuffer = MovePtr<BufferWithMemory>(
2530 new BufferWithMemory(vk, device, allocator, refDstBufferInfo, MemoryRequirement::HostVisible));
2531
2532 const VkImageCreateFlags resDstImageCreateFlags = 0;
2533 const VkImageUsageFlags resDstImageUsageFlags =
2534 VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
2535 const VkImageCreateInfo resDstImageCreateInfo =
2536 makeCreateImageInfo(m_parameters.formatForVerify, m_parameters.imageType, mipmapDims, resDstImageUsageFlags,
2537 &resDstImageCreateFlags, SINGLE_LEVEL, SINGLE_LAYER);
2538 const MovePtr<Image> resDstImage(new Image(vk, device, allocator, resDstImageCreateInfo, MemoryRequirement::Any));
2539 const Move<VkImageView> resDstImageView(makeImageView(vk, device, resDstImage->get(),
2540 mapImageViewType(m_parameters.imageType),
2541 m_parameters.formatForVerify, subresourceRange));
2542 const VkImageMemoryBarrier resDstInitImageBarrier =
2543 makeImageMemoryBarrier(0u, VK_ACCESS_SHADER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL,
2544 resDstImage->get(), subresourceRange);
2545 const VkBufferCreateInfo resDstBufferInfo(makeBufferCreateInfo(dstBufferSize, VK_BUFFER_USAGE_TRANSFER_DST_BIT));
2546 const MovePtr<BufferWithMemory> resDstBuffer = MovePtr<BufferWithMemory>(
2547 new BufferWithMemory(vk, device, allocator, resDstBufferInfo, MemoryRequirement::HostVisible));
2548
2549 const Unique<VkShaderModule> vertShaderModule(
2550 createShaderModule(vk, device, m_context.getBinaryCollection().get("vert"), 0));
2551 const Unique<VkShaderModule> fragShaderModule(
2552 createShaderModule(vk, device, m_context.getBinaryCollection().get("frag_verify"), 0));
2553
2554 const Unique<VkRenderPass> renderPass(vk::makeRenderPass(vk, device));
2555
2556 const Move<VkDescriptorSetLayout> descriptorSetLayout(
2557 DescriptorSetLayoutBuilder()
2558 .addSingleBinding(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, VK_SHADER_STAGE_FRAGMENT_BIT)
2559 .addSingleBinding(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, VK_SHADER_STAGE_FRAGMENT_BIT)
2560 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_FRAGMENT_BIT)
2561 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_FRAGMENT_BIT)
2562 .build(vk, device));
2563 const Move<VkDescriptorPool> descriptorPool(
2564 DescriptorPoolBuilder()
2565 .addType(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
2566 .addType(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
2567 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
2568 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
2569 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u));
2570 const Move<VkDescriptorSet> descriptorSet(makeDescriptorSet(vk, device, *descriptorPool, *descriptorSetLayout));
2571 const VkSamplerCreateInfo refSrcSamplerInfo(makeSamplerCreateInfo());
2572 const Move<VkSampler> refSrcSampler = vk::createSampler(vk, device, &refSrcSamplerInfo);
2573 const VkSamplerCreateInfo resSrcSamplerInfo(makeSamplerCreateInfo());
2574 const Move<VkSampler> resSrcSampler = vk::createSampler(vk, device, &resSrcSamplerInfo);
2575 const VkDescriptorImageInfo descriptorRefSrcImage(makeDescriptorImageInfo(
2576 *refSrcSampler, *refSrcImageView,
2577 layoutShaderReadOnly ? VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL : VK_IMAGE_LAYOUT_GENERAL));
2578 const VkDescriptorImageInfo descriptorResSrcImage(makeDescriptorImageInfo(
2579 *resSrcSampler, *resSrcImageView,
2580 layoutShaderReadOnly ? VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL : VK_IMAGE_LAYOUT_GENERAL));
2581 const VkDescriptorImageInfo descriptorRefDstImage(
2582 makeDescriptorImageInfo(DE_NULL, *refDstImageView, VK_IMAGE_LAYOUT_GENERAL));
2583 const VkDescriptorImageInfo descriptorResDstImage(
2584 makeDescriptorImageInfo(DE_NULL, *resDstImageView, VK_IMAGE_LAYOUT_GENERAL));
2585
2586 const VkExtent2D renderSize(makeExtent2D(mipmapDims.x(), mipmapDims.y()));
2587 const Unique<VkPipelineLayout> pipelineLayout(makePipelineLayout(vk, device, *descriptorSetLayout));
2588 const Unique<VkPipeline> pipeline(makeGraphicsPipeline(vk, device, *pipelineLayout, *renderPass, *vertShaderModule,
2589 *fragShaderModule, renderSize, 0u));
2590
2591 const Unique<VkCommandBuffer> cmdBuffer(
2592 allocateCommandBuffer(vk, device, cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
2593
2594 const VkBufferImageCopy copyBufferToImageRegion = makeBufferImageCopy(
2595 mipmapDimsBlocked.x(), mipmapDimsBlocked.y(), 0u, 0u, mipmapDimsBlocked.x(), mipmapDimsBlocked.y());
2596 const VkBufferImageCopy copyRegion = makeBufferImageCopy(mipmapDims.x(), mipmapDims.y(), 0u, 0u);
2597 const VkBufferMemoryBarrier refSrcCopyBufferBarrierPre =
2598 makeBufferMemoryBarrier(VK_ACCESS_HOST_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT, refSrcImageBuffer->get(), 0ull,
2599 refCompressedData.size());
2600 const VkImageMemoryBarrier refSrcCopyImageBarrierPre =
2601 makeImageMemoryBarrier(0u, VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL,
2602 refSrcImage->get(), subresourceRange);
2603 const VkImageMemoryBarrier refSrcCopyImageBarrierPost = makeImageMemoryBarrier(
2604 VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT, VK_IMAGE_LAYOUT_GENERAL,
2605 layoutShaderReadOnly ? VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL : VK_IMAGE_LAYOUT_GENERAL, refSrcImage->get(),
2606 subresourceRange);
2607 const VkImageMemoryBarrier resCompressedImageBarrier = makeImageMemoryBarrier(
2608 0, VK_ACCESS_SHADER_READ_BIT, VK_IMAGE_LAYOUT_GENERAL,
2609 layoutShaderReadOnly ? VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL : VK_IMAGE_LAYOUT_GENERAL,
2610 resCompressedImage->get(), resSubresourceRange);
2611
2612 const Move<VkFramebuffer> framebuffer(
2613 makeFramebuffer(vk, device, *renderPass, 0, DE_NULL, renderSize.width, renderSize.height, getLayerCount()));
2614
2615 // Upload source image data
2616 {
2617 const Allocation &refSrcImageBufferAlloc = refSrcImageBuffer->getAllocation();
2618 deMemcpy(refSrcImageBufferAlloc.getHostPtr(), &refCompressedData[0], refCompressedData.size());
2619 flushAlloc(vk, device, refSrcImageBufferAlloc);
2620 }
2621
2622 beginCommandBuffer(vk, *cmdBuffer);
2623 vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *pipeline);
2624
2625 // Copy buffer to image
2626 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0,
2627 0, (const VkMemoryBarrier *)DE_NULL, 1u, &refSrcCopyBufferBarrierPre, 1u,
2628 &refSrcCopyImageBarrierPre);
2629 vk.cmdCopyBufferToImage(*cmdBuffer, refSrcImageBuffer->get(), refSrcImage->get(), VK_IMAGE_LAYOUT_GENERAL, 1u,
2630 ©BufferToImageRegion);
2631 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
2632 (VkDependencyFlags)0, 0, (const VkMemoryBarrier *)DE_NULL, 0, DE_NULL, 1u,
2633 &refSrcCopyImageBarrierPost);
2634
2635 // Make reference and result images readable
2636 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
2637 (VkDependencyFlags)0, 0, (const VkMemoryBarrier *)DE_NULL, 0u, DE_NULL, 1u,
2638 &refDstInitImageBarrier);
2639 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
2640 (VkDependencyFlags)0, 0, (const VkMemoryBarrier *)DE_NULL, 0u, DE_NULL, 1u,
2641 &resDstInitImageBarrier);
2642 {
2643 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
2644 (VkDependencyFlags)0, 0, (const VkMemoryBarrier *)DE_NULL, 0u, DE_NULL, 1u,
2645 &resCompressedImageBarrier);
2646 }
2647
2648 beginRenderPass(vk, *cmdBuffer, *renderPass, *framebuffer, renderSize);
2649 {
2650 DescriptorSetUpdateBuilder()
2651 .writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u),
2652 VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, &descriptorRefSrcImage)
2653 .writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u),
2654 VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, &descriptorResSrcImage)
2655 .writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(2u),
2656 VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorRefDstImage)
2657 .writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(3u),
2658 VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorResDstImage)
2659 .update(vk, device);
2660
2661 vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *pipelineLayout, 0u, 1u,
2662 &descriptorSet.get(), 0u, DE_NULL);
2663 vk.cmdBindVertexBuffers(*cmdBuffer, 0, 1, &m_vertexBuffer->get(), &m_vertexBufferOffset);
2664 vk.cmdDraw(*cmdBuffer, m_vertexCount, 1, 0, 0);
2665 }
2666 endRenderPass(vk, *cmdBuffer);
2667
2668 // Decompress reference image
2669 {
2670 const VkImageMemoryBarrier refDstImageBarrier =
2671 makeImageMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT, VK_IMAGE_LAYOUT_GENERAL,
2672 VK_IMAGE_LAYOUT_GENERAL, refDstImage->get(), subresourceRange);
2673
2674 const VkBufferMemoryBarrier refDstBufferBarrier = makeBufferMemoryBarrier(
2675 VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, refDstBuffer->get(), 0ull, dstBufferSize);
2676
2677 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
2678 (VkDependencyFlags)0, 0, (const VkMemoryBarrier *)DE_NULL, 0,
2679 (const VkBufferMemoryBarrier *)DE_NULL, 1, &refDstImageBarrier);
2680 vk.cmdCopyImageToBuffer(*cmdBuffer, refDstImage->get(), VK_IMAGE_LAYOUT_GENERAL, refDstBuffer->get(), 1u,
2681 ©Region);
2682 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT,
2683 (VkDependencyFlags)0, 0, (const VkMemoryBarrier *)DE_NULL, 1, &refDstBufferBarrier, 0,
2684 (const VkImageMemoryBarrier *)DE_NULL);
2685 }
2686
2687 // Decompress result image
2688 {
2689 const VkImageMemoryBarrier resDstImageBarrier =
2690 makeImageMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT, VK_IMAGE_LAYOUT_GENERAL,
2691 VK_IMAGE_LAYOUT_GENERAL, resDstImage->get(), subresourceRange);
2692
2693 const VkBufferMemoryBarrier resDstBufferBarrier = makeBufferMemoryBarrier(
2694 VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, resDstBuffer->get(), 0ull, dstBufferSize);
2695
2696 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
2697 (VkDependencyFlags)0, 0, (const VkMemoryBarrier *)DE_NULL, 0,
2698 (const VkBufferMemoryBarrier *)DE_NULL, 1, &resDstImageBarrier);
2699 vk.cmdCopyImageToBuffer(*cmdBuffer, resDstImage->get(), VK_IMAGE_LAYOUT_GENERAL, resDstBuffer->get(), 1u,
2700 ©Region);
2701 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT,
2702 (VkDependencyFlags)0, 0, (const VkMemoryBarrier *)DE_NULL, 1, &resDstBufferBarrier, 0,
2703 (const VkImageMemoryBarrier *)DE_NULL);
2704 }
2705
2706 endCommandBuffer(vk, *cmdBuffer);
2707
2708 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
2709 m_context.resetCommandPoolForVKSC(device, cmdPool);
2710
2711 // Compare decompressed pixel data in reference and result images
2712 {
2713 const Allocation &refDstBufferAlloc = refDstBuffer->getAllocation();
2714 invalidateAlloc(vk, device, refDstBufferAlloc);
2715
2716 const Allocation &resDstBufferAlloc = resDstBuffer->getAllocation();
2717 invalidateAlloc(vk, device, resDstBufferAlloc);
2718
2719 BinaryCompareMode compareMode =
2720 (m_parameters.formatIsASTC()) ? (COMPARE_MODE_ALLOW_ASTC_ERROR_COLOUR_WARNING) : (COMPARE_MODE_NORMAL);
2721
2722 BinaryCompareResult res = BinaryCompare(refDstBufferAlloc.getHostPtr(), resDstBufferAlloc.getHostPtr(),
2723 dstBufferSize, m_parameters.formatForVerify, compareMode);
2724
2725 if (res == COMPARE_RESULT_FAILED)
2726 {
2727 // Do fuzzy to log error mask
2728 invalidateAlloc(vk, device, resDstBufferAlloc);
2729 invalidateAlloc(vk, device, refDstBufferAlloc);
2730
2731 tcu::ConstPixelBufferAccess resPixels(mapVkFormat(m_parameters.formatForVerify), renderSize.width,
2732 renderSize.height, 1u, resDstBufferAlloc.getHostPtr());
2733 tcu::ConstPixelBufferAccess refPixels(mapVkFormat(m_parameters.formatForVerify), renderSize.width,
2734 renderSize.height, 1u, refDstBufferAlloc.getHostPtr());
2735
2736 string comment = string("Image Comparison (level=") + de::toString(level) + string(", layer=") +
2737 de::toString(layer) + string(")");
2738
2739 if (isWriteToCompressedOperation())
2740 tcu::fuzzyCompare(m_context.getTestContext().getLog(), "ImageComparison", comment.c_str(), refPixels,
2741 resPixels, 0.001f, tcu::COMPARE_LOG_EVERYTHING);
2742 else
2743 tcu::fuzzyCompare(m_context.getTestContext().getLog(), "ImageComparison", comment.c_str(), resPixels,
2744 refPixels, 0.001f, tcu::COMPARE_LOG_EVERYTHING);
2745
2746 return false;
2747 }
2748 else if (res == COMPARE_RESULT_ASTC_QUALITY_WARNING)
2749 {
2750 m_bASTCErrorColourMismatch = true;
2751 }
2752 }
2753
2754 return true;
2755 }
2756
2757 class GraphicsTextureTestInstance : public GraphicsAttachmentsTestInstance
2758 {
2759 public:
2760 GraphicsTextureTestInstance(Context &context, const TestParameters ¶meters);
2761
2762 protected:
2763 virtual bool isWriteToCompressedOperation();
2764 virtual void transcodeRead(const VkCommandPool &cmdPool);
2765 virtual void transcodeWrite(const VkCommandPool &cmdPool);
2766 };
2767
GraphicsTextureTestInstance(Context & context,const TestParameters & parameters)2768 GraphicsTextureTestInstance::GraphicsTextureTestInstance(Context &context, const TestParameters ¶meters)
2769 : GraphicsAttachmentsTestInstance(context, parameters)
2770 {
2771 }
2772
isWriteToCompressedOperation()2773 bool GraphicsTextureTestInstance::isWriteToCompressedOperation()
2774 {
2775 return (m_parameters.operation == OPERATION_TEXTURE_WRITE);
2776 }
2777
transcodeRead(const VkCommandPool & cmdPool)2778 void GraphicsTextureTestInstance::transcodeRead(const VkCommandPool &cmdPool)
2779 {
2780 const DeviceInterface &vk = m_context.getDeviceInterface();
2781 const VkDevice device = m_context.getDevice();
2782 const VkQueue queue = m_context.getUniversalQueue();
2783 Allocator &allocator = m_context.getDefaultAllocator();
2784
2785 const VkImageCreateFlags *imgCreateFlagsOverride = DE_NULL;
2786
2787 const VkImageCreateInfo srcImageCreateInfo =
2788 makeCreateImageInfo(m_srcFormat, m_parameters.imageType, m_srcImageResolutions[0], m_srcImageUsageFlags,
2789 imgCreateFlagsOverride, getLevelCount(), getLayerCount());
2790 MovePtr<Image> srcImage(new Image(vk, device, allocator, srcImageCreateInfo, MemoryRequirement::Any));
2791
2792 const Unique<VkShaderModule> vertShaderModule(
2793 createShaderModule(vk, device, m_context.getBinaryCollection().get("vert"), 0));
2794 const Unique<VkShaderModule> fragShaderModule(
2795 createShaderModule(vk, device, m_context.getBinaryCollection().get("frag"), 0));
2796
2797 const Unique<VkRenderPass> renderPass(vk::makeRenderPass(vk, device));
2798
2799 const Move<VkDescriptorSetLayout> descriptorSetLayout(
2800 DescriptorSetLayoutBuilder()
2801 .addSingleBinding(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, VK_SHADER_STAGE_FRAGMENT_BIT)
2802 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_FRAGMENT_BIT)
2803 .build(vk, device));
2804 const Move<VkDescriptorPool> descriptorPool(
2805 DescriptorPoolBuilder()
2806 .addType(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
2807 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
2808 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u));
2809 const Move<VkDescriptorSet> descriptorSet(makeDescriptorSet(vk, device, *descriptorPool, *descriptorSetLayout));
2810
2811 const VkExtent2D renderSizeUnused(makeExtent2D(1u, 1u));
2812 const Unique<VkPipelineLayout> pipelineLayout(makePipelineLayout(vk, device, *descriptorSetLayout));
2813 const Unique<VkPipeline> pipeline(makeGraphicsPipeline(vk, device, *pipelineLayout, *renderPass, *vertShaderModule,
2814 *fragShaderModule, renderSizeUnused, 0u, true));
2815
2816 const Unique<VkCommandBuffer> cmdBuffer(
2817 allocateCommandBuffer(vk, device, cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
2818
2819 for (uint32_t levelNdx = 0; levelNdx < getLevelCount(); ++levelNdx)
2820 {
2821 const UVec3 &uncompressedImageRes = m_uncompressedImageResVec[levelNdx];
2822 const UVec3 &srcImageResolution = m_srcImageResolutions[levelNdx];
2823 const UVec3 &dstImageResolution = m_dstImageResolutions[levelNdx];
2824 const size_t srcImageSizeInBytes = m_srcData[levelNdx][0]->size();
2825 const size_t dstImageSizeInBytes = m_dstData[levelNdx][0]->size();
2826 const UVec3 srcImageResBlocked =
2827 getCompressedImageResolutionBlockCeil(m_parameters.formatCompressed, srcImageResolution);
2828
2829 const VkImageCreateInfo dstImageCreateInfo =
2830 makeCreateImageInfo(m_dstFormat, m_parameters.imageType, dstImageResolution, m_dstImageUsageFlags,
2831 imgCreateFlagsOverride, SINGLE_LEVEL, SINGLE_LAYER);
2832
2833 const VkBufferCreateInfo srcImageBufferInfo =
2834 makeBufferCreateInfo(srcImageSizeInBytes, VK_BUFFER_USAGE_TRANSFER_SRC_BIT);
2835 const MovePtr<BufferWithMemory> srcImageBuffer = MovePtr<BufferWithMemory>(
2836 new BufferWithMemory(vk, device, allocator, srcImageBufferInfo, MemoryRequirement::HostVisible));
2837
2838 const VkBufferCreateInfo dstImageBufferInfo =
2839 makeBufferCreateInfo(dstImageSizeInBytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT);
2840 MovePtr<BufferWithMemory> dstImageBuffer = MovePtr<BufferWithMemory>(
2841 new BufferWithMemory(vk, device, allocator, dstImageBufferInfo, MemoryRequirement::HostVisible));
2842
2843 const VkExtent2D renderSize(makeExtent2D(uncompressedImageRes.x(), uncompressedImageRes.y()));
2844 const VkViewport viewport = makeViewport(renderSize);
2845 const VkRect2D scissor = makeRect2D(renderSize);
2846
2847 for (uint32_t layerNdx = 0; layerNdx < getLayerCount(); ++layerNdx)
2848 {
2849 const VkImageSubresourceRange srcSubresourceRange =
2850 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, levelNdx, SINGLE_LEVEL, layerNdx, SINGLE_LAYER);
2851 const VkImageSubresourceRange dstSubresourceRange =
2852 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, SINGLE_LEVEL, 0u, SINGLE_LAYER);
2853
2854 Move<VkImageView> srcImageView(
2855 makeImageView(vk, device, srcImage->get(), mapImageViewType(m_parameters.imageType),
2856 m_parameters.formatUncompressed, srcSubresourceRange, m_srcImageViewUsageKHR));
2857
2858 de::MovePtr<Image> dstImage(new Image(vk, device, allocator, dstImageCreateInfo, MemoryRequirement::Any));
2859 Move<VkImageView> dstImageView(
2860 makeImageView(vk, device, dstImage->get(), mapImageViewType(m_parameters.imageType),
2861 m_parameters.formatUncompressed, dstSubresourceRange, m_dstImageViewUsageKHR));
2862
2863 const VkSamplerCreateInfo srcSamplerInfo(makeSamplerCreateInfo());
2864 const Move<VkSampler> srcSampler = vk::createSampler(vk, device, &srcSamplerInfo);
2865 const VkDescriptorImageInfo descriptorSrcImage(
2866 makeDescriptorImageInfo(*srcSampler, *srcImageView, VK_IMAGE_LAYOUT_GENERAL));
2867 const VkDescriptorImageInfo descriptorDstImage(
2868 makeDescriptorImageInfo(DE_NULL, *dstImageView, VK_IMAGE_LAYOUT_GENERAL));
2869
2870 const VkBufferImageCopy srcCopyRegion =
2871 makeBufferImageCopy(srcImageResolution.x(), srcImageResolution.y(), levelNdx, layerNdx,
2872 srcImageResBlocked.x(), srcImageResBlocked.y());
2873 const VkBufferMemoryBarrier srcCopyBufferBarrierPre =
2874 makeBufferMemoryBarrier(VK_ACCESS_HOST_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT, srcImageBuffer->get(),
2875 0ull, srcImageSizeInBytes);
2876 const VkImageMemoryBarrier srcCopyImageBarrierPre =
2877 makeImageMemoryBarrier(0u, VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED,
2878 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, srcImage->get(), srcSubresourceRange);
2879 const VkImageMemoryBarrier srcCopyImageBarrierPost = makeImageMemoryBarrier(
2880 VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2881 VK_IMAGE_LAYOUT_GENERAL, srcImage->get(), srcSubresourceRange);
2882 const VkBufferImageCopy dstCopyRegion = makeBufferImageCopy(dstImageResolution.x(), dstImageResolution.y());
2883 const VkImageMemoryBarrier dstInitImageBarrier = makeImageMemoryBarrier(
2884 0u, (VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT), VK_IMAGE_LAYOUT_UNDEFINED,
2885 VK_IMAGE_LAYOUT_GENERAL, dstImage->get(), dstSubresourceRange);
2886
2887 const VkExtent2D framebufferSize(makeExtent2D(dstImageResolution[0], dstImageResolution[1]));
2888 const Move<VkFramebuffer> framebuffer(makeFramebuffer(
2889 vk, device, *renderPass, 0, DE_NULL, framebufferSize.width, framebufferSize.height, SINGLE_LAYER));
2890
2891 // Upload source image data
2892 const Allocation &alloc = srcImageBuffer->getAllocation();
2893 deMemcpy(alloc.getHostPtr(), &m_srcData[levelNdx][layerNdx]->at(0), srcImageSizeInBytes);
2894 flushAlloc(vk, device, alloc);
2895
2896 beginCommandBuffer(vk, *cmdBuffer);
2897 vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *pipeline);
2898
2899 // Copy buffer to image
2900 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
2901 (VkDependencyFlags)0, 0, (const VkMemoryBarrier *)DE_NULL, 1u,
2902 &srcCopyBufferBarrierPre, 1u, &srcCopyImageBarrierPre);
2903 vk.cmdCopyBufferToImage(*cmdBuffer, srcImageBuffer->get(), srcImage->get(),
2904 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1u, &srcCopyRegion);
2905 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
2906 (VkDependencyFlags)0, 0, (const VkMemoryBarrier *)DE_NULL, 0u, DE_NULL, 1u,
2907 &srcCopyImageBarrierPost);
2908
2909 // Define destination image layout
2910 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
2911 (VkDependencyFlags)0, 0, (const VkMemoryBarrier *)DE_NULL, 0u, DE_NULL, 1u,
2912 &dstInitImageBarrier);
2913
2914 beginRenderPass(vk, *cmdBuffer, *renderPass, *framebuffer, renderSize);
2915
2916 DescriptorSetUpdateBuilder()
2917 .writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u),
2918 VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, &descriptorSrcImage)
2919 .writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u),
2920 VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorDstImage)
2921 .update(vk, device);
2922
2923 vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *pipelineLayout, 0u, 1u,
2924 &descriptorSet.get(), 0u, DE_NULL);
2925 vk.cmdBindVertexBuffers(*cmdBuffer, 0u, 1u, &m_vertexBuffer->get(), &m_vertexBufferOffset);
2926
2927 vk.cmdSetViewport(*cmdBuffer, 0u, 1u, &viewport);
2928 vk.cmdSetScissor(*cmdBuffer, 0u, 1u, &scissor);
2929
2930 vk.cmdDraw(*cmdBuffer, (uint32_t)m_vertexCount, 1, 0, 0);
2931
2932 endRenderPass(vk, *cmdBuffer);
2933
2934 const VkImageMemoryBarrier prepareForTransferBarrier =
2935 makeImageMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT, VK_IMAGE_LAYOUT_GENERAL,
2936 VK_IMAGE_LAYOUT_GENERAL, dstImage->get(), dstSubresourceRange);
2937
2938 const VkBufferMemoryBarrier copyBarrier =
2939 makeBufferMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, dstImageBuffer->get(),
2940 0ull, dstImageSizeInBytes);
2941
2942 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
2943 (VkDependencyFlags)0, 0, (const VkMemoryBarrier *)DE_NULL, 0,
2944 (const VkBufferMemoryBarrier *)DE_NULL, 1, &prepareForTransferBarrier);
2945 vk.cmdCopyImageToBuffer(*cmdBuffer, dstImage->get(), VK_IMAGE_LAYOUT_GENERAL, dstImageBuffer->get(), 1u,
2946 &dstCopyRegion);
2947 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT,
2948 (VkDependencyFlags)0, 0, (const VkMemoryBarrier *)DE_NULL, 1, ©Barrier, 0,
2949 (const VkImageMemoryBarrier *)DE_NULL);
2950
2951 endCommandBuffer(vk, *cmdBuffer);
2952
2953 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
2954 m_context.resetCommandPoolForVKSC(device, cmdPool);
2955
2956 const Allocation &dstImageBufferAlloc = dstImageBuffer->getAllocation();
2957 invalidateAlloc(vk, device, dstImageBufferAlloc);
2958 deMemcpy(&m_dstData[levelNdx][layerNdx]->at(0), dstImageBufferAlloc.getHostPtr(), dstImageSizeInBytes);
2959 }
2960 }
2961
2962 m_compressedImage = srcImage;
2963 }
2964
transcodeWrite(const VkCommandPool & cmdPool)2965 void GraphicsTextureTestInstance::transcodeWrite(const VkCommandPool &cmdPool)
2966 {
2967 const DeviceInterface &vk = m_context.getDeviceInterface();
2968 const VkDevice device = m_context.getDevice();
2969 const VkQueue queue = m_context.getUniversalQueue();
2970 Allocator &allocator = m_context.getDefaultAllocator();
2971
2972 const VkImageCreateFlags *imgCreateFlagsOverride = DE_NULL;
2973
2974 const VkImageCreateInfo dstImageCreateInfo =
2975 makeCreateImageInfo(m_dstFormat, m_parameters.imageType, m_dstImageResolutions[0], m_dstImageUsageFlags,
2976 imgCreateFlagsOverride, getLevelCount(), getLayerCount());
2977 MovePtr<Image> dstImage(new Image(vk, device, allocator, dstImageCreateInfo, MemoryRequirement::Any));
2978
2979 const Unique<VkShaderModule> vertShaderModule(
2980 createShaderModule(vk, device, m_context.getBinaryCollection().get("vert"), 0));
2981 const Unique<VkShaderModule> fragShaderModule(
2982 createShaderModule(vk, device, m_context.getBinaryCollection().get("frag"), 0));
2983
2984 const Unique<VkRenderPass> renderPass(vk::makeRenderPass(vk, device));
2985
2986 const Move<VkDescriptorSetLayout> descriptorSetLayout(
2987 DescriptorSetLayoutBuilder()
2988 .addSingleBinding(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, VK_SHADER_STAGE_FRAGMENT_BIT)
2989 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_FRAGMENT_BIT)
2990 .build(vk, device));
2991 const Move<VkDescriptorPool> descriptorPool(
2992 DescriptorPoolBuilder()
2993 .addType(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
2994 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
2995 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u));
2996 const Move<VkDescriptorSet> descriptorSet(makeDescriptorSet(vk, device, *descriptorPool, *descriptorSetLayout));
2997
2998 const VkExtent2D renderSizeUnused(makeExtent2D(1u, 1u));
2999 const Unique<VkPipelineLayout> pipelineLayout(makePipelineLayout(vk, device, *descriptorSetLayout));
3000 const Unique<VkPipeline> pipeline(makeGraphicsPipeline(vk, device, *pipelineLayout, *renderPass, *vertShaderModule,
3001 *fragShaderModule, renderSizeUnused, 0u, true));
3002
3003 const Unique<VkCommandBuffer> cmdBuffer(
3004 allocateCommandBuffer(vk, device, cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
3005
3006 for (uint32_t levelNdx = 0; levelNdx < getLevelCount(); ++levelNdx)
3007 {
3008 const UVec3 &uncompressedImageRes = m_uncompressedImageResVec[levelNdx];
3009 const UVec3 &srcImageResolution = m_srcImageResolutions[levelNdx];
3010 const UVec3 &dstImageResolution = m_dstImageResolutions[levelNdx];
3011 const size_t srcImageSizeInBytes = m_srcData[levelNdx][0]->size();
3012 const size_t dstImageSizeInBytes = m_dstData[levelNdx][0]->size();
3013 const UVec3 dstImageResBlocked =
3014 getCompressedImageResolutionBlockCeil(m_parameters.formatCompressed, dstImageResolution);
3015
3016 const VkImageCreateInfo srcImageCreateInfo =
3017 makeCreateImageInfo(m_srcFormat, m_parameters.imageType, srcImageResolution, m_srcImageUsageFlags,
3018 imgCreateFlagsOverride, SINGLE_LEVEL, SINGLE_LAYER);
3019
3020 const VkExtent2D renderSize(makeExtent2D(uncompressedImageRes.x(), uncompressedImageRes.y()));
3021 const VkViewport viewport = makeViewport(renderSize);
3022 const VkRect2D scissor = makeRect2D(renderSize);
3023
3024 for (uint32_t layerNdx = 0; layerNdx < getLayerCount(); ++layerNdx)
3025 {
3026 const VkBufferCreateInfo srcImageBufferInfo =
3027 makeBufferCreateInfo(srcImageSizeInBytes, VK_BUFFER_USAGE_TRANSFER_SRC_BIT);
3028 const MovePtr<BufferWithMemory> srcImageBuffer = MovePtr<BufferWithMemory>(
3029 new BufferWithMemory(vk, device, allocator, srcImageBufferInfo, MemoryRequirement::HostVisible));
3030
3031 const VkBufferCreateInfo dstImageBufferInfo =
3032 makeBufferCreateInfo(dstImageSizeInBytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT);
3033 MovePtr<BufferWithMemory> dstImageBuffer = MovePtr<BufferWithMemory>(
3034 new BufferWithMemory(vk, device, allocator, dstImageBufferInfo, MemoryRequirement::HostVisible));
3035
3036 const VkImageSubresourceRange srcSubresourceRange =
3037 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, SINGLE_LEVEL, 0u, SINGLE_LAYER);
3038 const VkImageSubresourceRange dstSubresourceRange =
3039 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, levelNdx, SINGLE_LEVEL, layerNdx, SINGLE_LAYER);
3040
3041 Move<VkImageView> dstImageView(
3042 makeImageView(vk, device, dstImage->get(), mapImageViewType(m_parameters.imageType),
3043 m_parameters.formatUncompressed, dstSubresourceRange, m_dstImageViewUsageKHR));
3044
3045 de::MovePtr<Image> srcImage(new Image(vk, device, allocator, srcImageCreateInfo, MemoryRequirement::Any));
3046 Move<VkImageView> srcImageView(
3047 makeImageView(vk, device, srcImage->get(), mapImageViewType(m_parameters.imageType),
3048 m_parameters.formatUncompressed, srcSubresourceRange, m_srcImageViewUsageKHR));
3049
3050 const VkSamplerCreateInfo srcSamplerInfo(makeSamplerCreateInfo());
3051 const Move<VkSampler> srcSampler = vk::createSampler(vk, device, &srcSamplerInfo);
3052 const VkDescriptorImageInfo descriptorSrcImage(
3053 makeDescriptorImageInfo(*srcSampler, *srcImageView, VK_IMAGE_LAYOUT_GENERAL));
3054 const VkDescriptorImageInfo descriptorDstImage(
3055 makeDescriptorImageInfo(DE_NULL, *dstImageView, VK_IMAGE_LAYOUT_GENERAL));
3056
3057 const VkBufferImageCopy srcCopyRegion =
3058 makeBufferImageCopy(srcImageResolution.x(), srcImageResolution.y(), 0u, 0u);
3059 const VkBufferMemoryBarrier srcCopyBufferBarrierPre =
3060 makeBufferMemoryBarrier(VK_ACCESS_HOST_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT, srcImageBuffer->get(),
3061 0ull, srcImageSizeInBytes);
3062 const VkImageMemoryBarrier srcCopyImageBarrierPre =
3063 makeImageMemoryBarrier(0u, VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED,
3064 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, srcImage->get(), srcSubresourceRange);
3065 const VkImageMemoryBarrier srcCopyImageBarrierPost = makeImageMemoryBarrier(
3066 VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3067 VK_IMAGE_LAYOUT_GENERAL, srcImage->get(), srcSubresourceRange);
3068 const VkBufferImageCopy dstCopyRegion =
3069 makeBufferImageCopy(dstImageResolution.x(), dstImageResolution.y(), levelNdx, layerNdx,
3070 dstImageResBlocked.x(), dstImageResBlocked.y());
3071 const VkImageMemoryBarrier dstInitImageBarrier =
3072 makeImageMemoryBarrier(0u, VK_ACCESS_SHADER_READ_BIT, VK_IMAGE_LAYOUT_UNDEFINED,
3073 VK_IMAGE_LAYOUT_GENERAL, dstImage->get(), dstSubresourceRange);
3074
3075 const VkExtent2D framebufferSize(makeExtent2D(dstImageResolution[0], dstImageResolution[1]));
3076 const Move<VkFramebuffer> framebuffer(makeFramebuffer(
3077 vk, device, *renderPass, 0, DE_NULL, framebufferSize.width, framebufferSize.height, SINGLE_LAYER));
3078
3079 // Upload source image data
3080 const Allocation &alloc = srcImageBuffer->getAllocation();
3081 deMemcpy(alloc.getHostPtr(), &m_srcData[levelNdx][layerNdx]->at(0), srcImageSizeInBytes);
3082 flushAlloc(vk, device, alloc);
3083
3084 beginCommandBuffer(vk, *cmdBuffer);
3085 vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *pipeline);
3086
3087 // Copy buffer to image
3088 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
3089 (VkDependencyFlags)0, 0, (const VkMemoryBarrier *)DE_NULL, 1u,
3090 &srcCopyBufferBarrierPre, 1u, &srcCopyImageBarrierPre);
3091 vk.cmdCopyBufferToImage(*cmdBuffer, srcImageBuffer->get(), srcImage->get(),
3092 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1u, &srcCopyRegion);
3093 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
3094 (VkDependencyFlags)0, 0, (const VkMemoryBarrier *)DE_NULL, 0u, DE_NULL, 1u,
3095 &srcCopyImageBarrierPost);
3096
3097 // Define destination image layout
3098 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
3099 (VkDependencyFlags)0, 0, (const VkMemoryBarrier *)DE_NULL, 0u, DE_NULL, 1u,
3100 &dstInitImageBarrier);
3101
3102 beginRenderPass(vk, *cmdBuffer, *renderPass, *framebuffer, renderSize);
3103
3104 DescriptorSetUpdateBuilder()
3105 .writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u),
3106 VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, &descriptorSrcImage)
3107 .writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u),
3108 VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorDstImage)
3109 .update(vk, device);
3110
3111 vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *pipelineLayout, 0u, 1u,
3112 &descriptorSet.get(), 0u, DE_NULL);
3113 vk.cmdBindVertexBuffers(*cmdBuffer, 0u, 1u, &m_vertexBuffer->get(), &m_vertexBufferOffset);
3114
3115 vk.cmdSetViewport(*cmdBuffer, 0u, 1u, &viewport);
3116 vk.cmdSetScissor(*cmdBuffer, 0u, 1u, &scissor);
3117
3118 vk.cmdDraw(*cmdBuffer, (uint32_t)m_vertexCount, 1, 0, 0);
3119
3120 endRenderPass(vk, *cmdBuffer);
3121
3122 const VkImageMemoryBarrier prepareForTransferBarrier =
3123 makeImageMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT, VK_IMAGE_LAYOUT_GENERAL,
3124 VK_IMAGE_LAYOUT_GENERAL, dstImage->get(), dstSubresourceRange);
3125
3126 const VkBufferMemoryBarrier copyBarrier =
3127 makeBufferMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, dstImageBuffer->get(),
3128 0ull, dstImageSizeInBytes);
3129
3130 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
3131 (VkDependencyFlags)0, 0, (const VkMemoryBarrier *)DE_NULL, 0,
3132 (const VkBufferMemoryBarrier *)DE_NULL, 1, &prepareForTransferBarrier);
3133 vk.cmdCopyImageToBuffer(*cmdBuffer, dstImage->get(), VK_IMAGE_LAYOUT_GENERAL, dstImageBuffer->get(), 1u,
3134 &dstCopyRegion);
3135 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT,
3136 (VkDependencyFlags)0, 0, (const VkMemoryBarrier *)DE_NULL, 1, ©Barrier, 0,
3137 (const VkImageMemoryBarrier *)DE_NULL);
3138
3139 endCommandBuffer(vk, *cmdBuffer);
3140
3141 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
3142 m_context.resetCommandPoolForVKSC(device, cmdPool);
3143
3144 const Allocation &dstImageBufferAlloc = dstImageBuffer->getAllocation();
3145 invalidateAlloc(vk, device, dstImageBufferAlloc);
3146 deMemcpy(&m_dstData[levelNdx][layerNdx]->at(0), dstImageBufferAlloc.getHostPtr(), dstImageSizeInBytes);
3147 }
3148 }
3149
3150 m_compressedImage = dstImage;
3151 }
3152
3153 class TexelViewCompatibleCase : public TestCase
3154 {
3155 public:
3156 TexelViewCompatibleCase(TestContext &testCtx, const std::string &name, const TestParameters ¶meters);
3157 void initPrograms(SourceCollections &programCollection) const;
3158 TestInstance *createInstance(Context &context) const;
3159 virtual void checkSupport(Context &context) const;
3160
3161 protected:
3162 const TestParameters m_parameters;
3163 };
3164
TexelViewCompatibleCase(TestContext & testCtx,const std::string & name,const TestParameters & parameters)3165 TexelViewCompatibleCase::TexelViewCompatibleCase(TestContext &testCtx, const std::string &name,
3166 const TestParameters ¶meters)
3167 : TestCase(testCtx, name)
3168 , m_parameters(parameters)
3169 {
3170 }
3171
initPrograms(vk::SourceCollections & programCollection) const3172 void TexelViewCompatibleCase::initPrograms(vk::SourceCollections &programCollection) const
3173 {
3174 DE_ASSERT(m_parameters.size.x() > 0);
3175 DE_ASSERT(m_parameters.size.y() > 0);
3176
3177 const unsigned int imageTypeIndex =
3178 (m_parameters.imageType == IMAGE_TYPE_2D) + (m_parameters.imageType == IMAGE_TYPE_3D) * 2;
3179
3180 switch (m_parameters.shader)
3181 {
3182 case SHADER_TYPE_COMPUTE:
3183 {
3184 // The Array suffix is normally handled by the getShader* and getGlsl* functions below, but in these tests we never use
3185 // IMAGE_TYPE_*_ARRAY, so we add the Array suffix manually for the multilayer view case only.
3186 const std::string imageTypeSuffix = (m_parameters.useMultiLayerViews() ? "Array" : "");
3187 const std::string imageTypeStr =
3188 getShaderImageType(mapVkFormat(m_parameters.formatUncompressed), m_parameters.imageType) + imageTypeSuffix;
3189 const std::string samplerTypeStr =
3190 getGlslSamplerType(mapVkFormat(m_parameters.formatUncompressed), mapImageViewType(m_parameters.imageType)) +
3191 imageTypeSuffix;
3192 const std::string formatQualifierStr =
3193 getShaderImageFormatQualifier(mapVkFormat(m_parameters.formatUncompressed));
3194 std::ostringstream src;
3195 std::ostringstream src_decompress;
3196
3197 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450) << "\n"
3198 << "layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n\n";
3199 src_decompress << src.str();
3200
3201 switch (m_parameters.operation)
3202 {
3203 case OPERATION_IMAGE_LOAD:
3204 {
3205 const char *posDefinitions[3] = {
3206 // IMAGE_TYPE_1D
3207 " highp int pos = int(gl_GlobalInvocationID.x);\n",
3208 // IMAGE_TYPE_2D
3209 (m_parameters.useMultiLayerViews() ?
3210 " ivec3 pos = ivec3(gl_GlobalInvocationID);\n" // Z dimension handles layers.
3211 :
3212 " ivec2 pos = ivec2(gl_GlobalInvocationID.xy);\n"),
3213 // IMAGE_TYPE_3D
3214 " ivec3 pos = ivec3(gl_GlobalInvocationID);\n",
3215 };
3216
3217 src << "layout (binding = 0, " << formatQualifierStr << ") readonly uniform " << imageTypeStr
3218 << " u_image0;\n"
3219 << "layout (binding = 1, " << formatQualifierStr << ") writeonly uniform " << imageTypeStr
3220 << " u_image1;\n\n"
3221 << "void main (void)\n"
3222 << "{\n"
3223 << posDefinitions[imageTypeIndex] << " imageStore(u_image1, pos, imageLoad(u_image0, pos));\n"
3224 << "}\n";
3225
3226 break;
3227 }
3228
3229 case OPERATION_TEXEL_FETCH:
3230 {
3231 const char *storeDefinitions[3] = {
3232 // IMAGE_TYPE_1D
3233 " imageStore(u_image1, pos.x, texelFetch(u_image0, pos.x, pos.z));\n",
3234 // IMAGE_TYPE_2D
3235 (m_parameters.useMultiLayerViews() ?
3236 " imageStore(u_image1, pos, texelFetch(u_image0, pos, 0));\n" // Z dimension handles layers
3237 :
3238 " imageStore(u_image1, pos.xy, texelFetch(u_image0, pos.xy, pos.z));\n"),
3239 // IMAGE_TYPE_3D
3240 " imageStore(u_image1, pos, texelFetch(u_image0, pos, pos.z));\n",
3241 };
3242
3243 src << "layout (binding = 0) uniform " << samplerTypeStr << " u_image0;\n"
3244 << "layout (binding = 1, " << formatQualifierStr << ") writeonly uniform " << imageTypeStr
3245 << " u_image1;\n\n"
3246 << "void main (void)\n"
3247 << "{\n"
3248 << " ivec3 pos = ivec3(gl_GlobalInvocationID.xyz);\n"
3249 << storeDefinitions[imageTypeIndex] << "}\n";
3250
3251 break;
3252 }
3253
3254 case OPERATION_TEXTURE:
3255 {
3256 const char *coordDefinitions[3] = {
3257 // IMAGE_TYPE_1D
3258 " const int pos = int(gl_GlobalInvocationID.x);\n"
3259 " const float coord = (float(gl_GlobalInvocationID.x) + 0.5) / pixels_resolution.x;\n",
3260 // IMAGE_TYPE_2D
3261 (m_parameters.useMultiLayerViews() // Z dimension handles layers in the multilayer view case.
3262 ?
3263 " const ivec3 pos = ivec3(gl_GlobalInvocationID.xyz);\n"
3264 " const vec2 v2 = (vec2(gl_GlobalInvocationID.xy) + 0.5) / vec2(pixels_resolution);\n"
3265 " const vec3 coord = vec3(v2, pos.z);\n" :
3266 " const ivec2 pos = ivec2(gl_GlobalInvocationID.xy);\n"
3267 " const vec2 coord = (vec2(gl_GlobalInvocationID.xy) + 0.5) / vec2(pixels_resolution);\n"),
3268 // IMAGE_TYPE_3D
3269 " const ivec3 pos = ivec3(gl_GlobalInvocationID.xy, 0);\n"
3270 " const vec2 v2 = (vec2(gl_GlobalInvocationID.xy) + 0.5) / vec2(pixels_resolution);\n"
3271 " const vec3 coord = vec3(v2, 0.0);\n",
3272 };
3273
3274 src << "layout (binding = 0) uniform " << samplerTypeStr << " u_image0;\n"
3275 << "layout (binding = 1, " << formatQualifierStr << ") writeonly uniform " << imageTypeStr
3276 << " u_image1;\n\n"
3277 << "void main (void)\n"
3278 << "{\n"
3279 << " const vec2 pixels_resolution = vec2(gl_NumWorkGroups.x, gl_NumWorkGroups.y);\n"
3280 << coordDefinitions[imageTypeIndex] << " imageStore(u_image1, pos, texture(u_image0, coord));\n"
3281 << "}\n";
3282
3283 break;
3284 }
3285
3286 case OPERATION_IMAGE_STORE:
3287 {
3288 const char *posDefinitions[3] = {
3289 // IMAGE_TYPE_1D
3290 " highp int pos = int(gl_GlobalInvocationID.x);\n",
3291 // IMAGE_TYPE_2D
3292 (m_parameters.useMultiLayerViews() ?
3293 " ivec3 pos = ivec3(gl_GlobalInvocationID);\n" // Z dimension handles layers.
3294 :
3295 " ivec2 pos = ivec2(gl_GlobalInvocationID.xy);\n"),
3296 // IMAGE_TYPE_3D
3297 " ivec3 pos = ivec3(gl_GlobalInvocationID);\n",
3298 };
3299
3300 src << "layout (binding = 0, " << formatQualifierStr << ") uniform " << imageTypeStr
3301 << " u_image0;\n"
3302 << "layout (binding = 1, " << formatQualifierStr << ") readonly uniform " << imageTypeStr
3303 << " u_image1;\n"
3304 << "layout (binding = 2, " << formatQualifierStr << ") writeonly uniform " << imageTypeStr
3305 << " u_image2;\n\n"
3306 << "void main (void)\n"
3307 << "{\n"
3308 << posDefinitions[imageTypeIndex] << " imageStore(u_image0, pos, imageLoad(u_image1, pos));\n"
3309 << " imageStore(u_image2, pos, imageLoad(u_image0, pos));\n"
3310 << "}\n";
3311
3312 break;
3313 }
3314
3315 default:
3316 DE_ASSERT(false);
3317 }
3318
3319 const ImageType compressedReferenceImageType =
3320 (m_parameters.imageType == IMAGE_TYPE_2D && m_parameters.layers > 1u) ? IMAGE_TYPE_2D_ARRAY :
3321 m_parameters.imageType;
3322 const char *coordDefinitions[3] = {
3323 // IMAGE_TYPE_1D
3324 " const highp float coord = float(gl_GlobalInvocationID.x) / pixels_resolution.x;\n"
3325 " const highp int pos = int(gl_GlobalInvocationID.x); \n",
3326 // IMAGE_TYPE_2D
3327 (m_parameters.useMultiLayerViews() // Z dimension handles layers in the multilayer view case.
3328 ?
3329 " const vec2 v2 = vec2(gl_GlobalInvocationID.xy) / vec2(pixels_resolution);\n"
3330 " const vec3 coord = vec3(v2, gl_GlobalInvocationID.z);\n"
3331 " const ivec3 pos = ivec3(gl_GlobalInvocationID); \n" :
3332 " const vec2 coord = vec2(gl_GlobalInvocationID.xy) / vec2(pixels_resolution);\n"
3333 " const ivec2 pos = ivec2(gl_GlobalInvocationID.xy); \n"),
3334 // IMAGE_TYPE_3D
3335 " const vec2 v2 = vec2(gl_GlobalInvocationID.xy) / vec2(pixels_resolution);\n"
3336 " const vec3 coord = vec3(v2, 0.0);\n"
3337 " const ivec3 pos = ivec3(gl_GlobalInvocationID); \n",
3338 };
3339
3340 const auto compressedResSamplerType =
3341 getGlslSamplerType(mapVkFormat(m_parameters.formatForVerify), mapImageViewType(m_parameters.imageType)) +
3342 imageTypeSuffix;
3343 const auto compressedRefSamplerType = getGlslSamplerType(mapVkFormat(m_parameters.formatForVerify),
3344 mapImageViewType(compressedReferenceImageType));
3345 const auto decompressedImageType =
3346 getShaderImageType(mapVkFormat(m_parameters.formatForVerify), m_parameters.imageType) + imageTypeSuffix;
3347
3348 src_decompress << "layout (binding = 0) uniform " << compressedResSamplerType << " compressed_result;\n"
3349 << "layout (binding = 1) uniform " << compressedRefSamplerType << " compressed_reference;\n"
3350 << "layout (binding = 2, "
3351 << getShaderImageFormatQualifier(mapVkFormat(m_parameters.formatForVerify))
3352 << ") writeonly uniform " << decompressedImageType << " decompressed_result;\n"
3353 << "layout (binding = 3, "
3354 << getShaderImageFormatQualifier(mapVkFormat(m_parameters.formatForVerify))
3355 << ") writeonly uniform " << decompressedImageType << " decompressed_reference;\n"
3356 << "layout (push_constant, std430) uniform PushConstants { uint layer; uint level; };\n\n"
3357 << "void main (void)\n"
3358 << "{\n"
3359 << " const vec2 pixels_resolution = vec2(gl_NumWorkGroups.xy);\n"
3360 << coordDefinitions[imageTypeIndex]
3361 << " imageStore(decompressed_result, pos, texture(compressed_result, coord));\n";
3362 if (compressedReferenceImageType == IMAGE_TYPE_2D_ARRAY && !m_parameters.useMultiLayerViews())
3363 src_decompress << " imageStore(decompressed_reference, pos, textureLod(compressed_reference, "
3364 "vec3(coord, layer), level));\n";
3365 else
3366 src_decompress << " imageStore(decompressed_reference, pos, texture(compressed_reference, coord));\n";
3367 src_decompress << "}\n";
3368
3369 programCollection.glslSources.add("comp") << glu::ComputeSource(src.str());
3370 programCollection.glslSources.add("decompress") << glu::ComputeSource(src_decompress.str());
3371
3372 break;
3373 }
3374
3375 case SHADER_TYPE_FRAGMENT:
3376 {
3377 ImageType imageTypeForFS =
3378 (m_parameters.imageType == IMAGE_TYPE_2D_ARRAY) ? IMAGE_TYPE_2D : m_parameters.imageType;
3379
3380 // Vertex shader
3381 {
3382 std::ostringstream src;
3383 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450) << "\n\n"
3384 << "layout(location = 0) in vec4 v_in_position;\n"
3385 << "\n"
3386 << "void main (void)\n"
3387 << "{\n"
3388 << " gl_Position = v_in_position;\n"
3389 << "}\n";
3390
3391 programCollection.glslSources.add("vert") << glu::VertexSource(src.str());
3392 }
3393
3394 // Fragment shader
3395 {
3396 switch (m_parameters.operation)
3397 {
3398 case OPERATION_ATTACHMENT_READ:
3399 case OPERATION_ATTACHMENT_WRITE:
3400 {
3401 std::ostringstream src;
3402
3403 const std::string dstTypeStr = getGlslFormatType(m_parameters.formatUncompressed);
3404 const std::string srcTypeStr = getGlslInputFormatType(m_parameters.formatUncompressed);
3405
3406 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450) << "\n\n"
3407 << "precision highp int;\n"
3408 << "precision highp float;\n"
3409 << "\n"
3410 << "layout (location = 0) out highp " << dstTypeStr << " o_color;\n"
3411 << "layout (input_attachment_index = 0, set = 0, binding = 0) uniform highp " << srcTypeStr
3412 << " inputImage1;\n"
3413 << "\n"
3414 << "void main (void)\n"
3415 << "{\n"
3416 << " o_color = " << dstTypeStr << "(subpassLoad(inputImage1));\n"
3417 << "}\n";
3418
3419 programCollection.glslSources.add("frag") << glu::FragmentSource(src.str());
3420
3421 break;
3422 }
3423
3424 case OPERATION_TEXTURE_READ:
3425 case OPERATION_TEXTURE_WRITE:
3426 {
3427 std::ostringstream src;
3428
3429 const std::string srcSamplerTypeStr =
3430 getGlslSamplerType(mapVkFormat(m_parameters.formatUncompressed), mapImageViewType(imageTypeForFS));
3431 const std::string dstImageTypeStr =
3432 getShaderImageType(mapVkFormat(m_parameters.formatUncompressed), imageTypeForFS);
3433 const std::string dstFormatQualifierStr =
3434 getShaderImageFormatQualifier(mapVkFormat(m_parameters.formatUncompressed));
3435
3436 const char *inDefinitions[3] = {
3437 // IMAGE_TYPE_1D
3438 " const highp int out_pos = int(gl_FragCoord.x);\n"
3439 " const highp float pixels_resolution = textureSize(u_imageIn, 0);\n"
3440 " const highp float in_pos = gl_FragCoord.x / pixels_resolution;\n",
3441 // IMAGE_TYPE_2D
3442 " const ivec2 out_pos = ivec2(gl_FragCoord.xy);\n"
3443 " const vec2 pixels_resolution = vec2(textureSize(u_imageIn, 0));\n"
3444 " const vec2 in_pos = vec2(gl_FragCoord.xy) / vec2(pixels_resolution);\n",
3445 // IMAGE_TYPE_3D
3446 " const ivec3 out_pos = ivec3(gl_FragCoord.xy, 0);\n"
3447 " const vec3 pixels_resolution = vec3(textureSize(u_imageIn, 0));\n"
3448 " const vec3 in_pos = vec3(gl_FragCoord.xy, 0) / vec3(pixels_resolution.xy, 1.0);\n",
3449 };
3450
3451 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450) << "\n\n"
3452 << "layout (binding = 0) uniform " << srcSamplerTypeStr << " u_imageIn;\n"
3453 << "layout (binding = 1, " << dstFormatQualifierStr << ") writeonly uniform " << dstImageTypeStr
3454 << " u_imageOut;\n"
3455 << "\n"
3456 << "void main (void)\n"
3457 << "{\n"
3458 << inDefinitions[imageTypeIndex]
3459 << " imageStore(u_imageOut, out_pos, texture(u_imageIn, in_pos));\n"
3460 << "}\n";
3461
3462 programCollection.glslSources.add("frag") << glu::FragmentSource(src.str());
3463
3464 break;
3465 }
3466
3467 default:
3468 DE_ASSERT(false);
3469 }
3470 }
3471
3472 // Verification fragment shader
3473 {
3474 std::ostringstream src;
3475
3476 const std::string samplerType =
3477 getGlslSamplerType(mapVkFormat(m_parameters.formatForVerify), mapImageViewType(imageTypeForFS));
3478 const std::string imageTypeStr =
3479 getShaderImageType(mapVkFormat(m_parameters.formatForVerify), imageTypeForFS);
3480 const std::string formatQualifierStr =
3481 getShaderImageFormatQualifier(mapVkFormat(m_parameters.formatForVerify));
3482
3483 const char *pos0Definitions[3] = {
3484 // IMAGE_TYPE_1D
3485 " const highp int out_pos = int(gl_FragCoord.x);\n"
3486 " const highp float pixels_resolution0 = textureSize(u_imageIn0, 0);\n"
3487 " const highp float in_pos0 = gl_FragCoord.x / pixels_resolution0;\n",
3488 // IMAGE_TYPE_2D
3489 " const ivec2 out_pos = ivec2(gl_FragCoord.xy);\n"
3490 " const vec2 pixels_resolution0 = vec2(textureSize(u_imageIn0, 0));\n"
3491 " const vec2 in_pos0 = vec2(gl_FragCoord.xy) / vec2(pixels_resolution0);\n",
3492 // IMAGE_TYPE_3D
3493 " const ivec3 out_pos = ivec3(ivec2(gl_FragCoord.xy), 0);\n"
3494 " const vec3 pixels_resolution0 = vec3(textureSize(u_imageIn0, 0));\n"
3495 " const vec3 in_pos0 = vec3(gl_FragCoord.xy, 0) / vec3(pixels_resolution0.xy, 1.0);\n",
3496 };
3497 const char *pos1Definitions[3] = {
3498 // IMAGE_TYPE_1D
3499 " const highp float pixels_resolution1 = textureSize(u_imageIn1, 0);\n"
3500 " const highp float in_pos1 = gl_FragCoord.x / pixels_resolution1;\n",
3501 // IMAGE_TYPE_2D
3502 " const vec2 pixels_resolution1 = vec2(textureSize(u_imageIn1, 0));\n"
3503 " const vec2 in_pos1 = vec2(gl_FragCoord.xy) / vec2(pixels_resolution1);\n",
3504 // IMAGE_TYPE_3D
3505 " const vec3 pixels_resolution1 = vec3(textureSize(u_imageIn1, 0));\n"
3506 " const vec3 in_pos1 = vec3(gl_FragCoord.xy, 0) / vec3(pixels_resolution1.xy, 1.0);\n",
3507 };
3508
3509 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450) << "\n\n"
3510 << "layout (binding = 0) uniform "
3511 << getGlslSamplerType(mapVkFormat(m_parameters.formatForVerify),
3512 mapImageViewType(m_parameters.imageType))
3513 << " u_imageIn0;\n"
3514 << "layout (binding = 1) uniform "
3515 << getGlslSamplerType(mapVkFormat(m_parameters.formatForVerify),
3516 mapImageViewType(m_parameters.imageType))
3517 << " u_imageIn1;\n"
3518 << "layout (binding = 2, " << getShaderImageFormatQualifier(mapVkFormat(m_parameters.formatForVerify))
3519 << ") writeonly uniform "
3520 << getShaderImageType(mapVkFormat(m_parameters.formatForVerify), m_parameters.imageType)
3521 << " u_imageOut0;\n"
3522 << "layout (binding = 3, " << getShaderImageFormatQualifier(mapVkFormat(m_parameters.formatForVerify))
3523 << ") writeonly uniform "
3524 << getShaderImageType(mapVkFormat(m_parameters.formatForVerify), m_parameters.imageType)
3525 << " u_imageOut1;\n"
3526 << "\n"
3527 << "void main (void)\n"
3528 << "{\n"
3529 << pos0Definitions[imageTypeIndex]
3530 << " imageStore(u_imageOut0, out_pos, texture(u_imageIn0, in_pos0));\n"
3531 << "\n"
3532 << pos1Definitions[imageTypeIndex]
3533 << " imageStore(u_imageOut1, out_pos, texture(u_imageIn1, in_pos1));\n"
3534 << "}\n";
3535
3536 programCollection.glslSources.add("frag_verify") << glu::FragmentSource(src.str());
3537 }
3538
3539 break;
3540 }
3541
3542 default:
3543 DE_ASSERT(false);
3544 }
3545 }
3546
checkSupport(Context & context) const3547 void TexelViewCompatibleCase::checkSupport(Context &context) const
3548 {
3549 const VkPhysicalDevice physicalDevice = context.getPhysicalDevice();
3550 const InstanceInterface &vk = context.getInstanceInterface();
3551
3552 context.requireDeviceFunctionality("VK_KHR_maintenance2");
3553
3554 {
3555 VkImageFormatProperties imageFormatProperties;
3556
3557 if (vk.getPhysicalDeviceImageFormatProperties(physicalDevice, m_parameters.formatUncompressed,
3558 mapImageType(m_parameters.imageType), VK_IMAGE_TILING_OPTIMAL,
3559 m_parameters.uncompressedImageUsage, 0u,
3560 &imageFormatProperties) == VK_ERROR_FORMAT_NOT_SUPPORTED)
3561 TCU_THROW(NotSupportedError, "Operation not supported with this image format");
3562
3563 if (VK_ERROR_FORMAT_NOT_SUPPORTED ==
3564 vk.getPhysicalDeviceImageFormatProperties(
3565 physicalDevice, m_parameters.formatCompressed, mapImageType(m_parameters.imageType),
3566 VK_IMAGE_TILING_OPTIMAL, m_parameters.compressedImageUsage,
3567 VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT | VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT |
3568 VK_IMAGE_CREATE_EXTENDED_USAGE_BIT,
3569 &imageFormatProperties))
3570 TCU_THROW(NotSupportedError, "Operation not supported with this image format");
3571 }
3572
3573 {
3574 const VkPhysicalDeviceFeatures physicalDeviceFeatures = getPhysicalDeviceFeatures(vk, physicalDevice);
3575
3576 if (deInRange32(m_parameters.formatCompressed, VK_FORMAT_BC1_RGB_UNORM_BLOCK, VK_FORMAT_BC7_SRGB_BLOCK) &&
3577 !physicalDeviceFeatures.textureCompressionBC)
3578 TCU_THROW(NotSupportedError, "textureCompressionBC not supported");
3579
3580 if (deInRange32(m_parameters.formatCompressed, VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK,
3581 VK_FORMAT_EAC_R11G11_SNORM_BLOCK) &&
3582 !physicalDeviceFeatures.textureCompressionETC2)
3583 TCU_THROW(NotSupportedError, "textureCompressionETC2 not supported");
3584
3585 if (m_parameters.formatIsASTC() && !physicalDeviceFeatures.textureCompressionASTC_LDR)
3586 TCU_THROW(NotSupportedError, "textureCompressionASTC_LDR not supported");
3587
3588 if (m_parameters.uncompressedImageUsage & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT)
3589 {
3590 const VkFormatProperties p =
3591 getPhysicalDeviceFormatProperties(vk, physicalDevice, m_parameters.formatUncompressed);
3592 if ((p.optimalTilingFeatures & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT) == 0)
3593 TCU_THROW(NotSupportedError, "Storage view format not supported");
3594 }
3595 }
3596
3597 #ifndef CTS_USES_VULKANSC
3598 if (m_parameters.useMultiLayerViews())
3599 {
3600 const auto &maint6Properties = context.getMaintenance6Properties();
3601 if (!maint6Properties.blockTexelViewCompatibleMultipleLayers)
3602 TCU_THROW(NotSupportedError, "blockTexelViewCompatibleMultipleLayers not supported");
3603 }
3604 #endif // CTS_USES_VULKANSC
3605 }
3606
createInstance(Context & context) const3607 TestInstance *TexelViewCompatibleCase::createInstance(Context &context) const
3608 {
3609 if (!m_parameters.useMipmaps)
3610 DE_ASSERT(getNumLayers(m_parameters.imageType, m_parameters.size) == 1u);
3611
3612 DE_ASSERT(getLayerSize(m_parameters.imageType, m_parameters.size).x() > 0u);
3613 DE_ASSERT(getLayerSize(m_parameters.imageType, m_parameters.size).y() > 0u);
3614
3615 switch (m_parameters.shader)
3616 {
3617 case SHADER_TYPE_COMPUTE:
3618 {
3619 switch (m_parameters.operation)
3620 {
3621 case OPERATION_IMAGE_LOAD:
3622 case OPERATION_TEXEL_FETCH:
3623 case OPERATION_TEXTURE:
3624 return new BasicComputeTestInstance(context, m_parameters);
3625 case OPERATION_IMAGE_STORE:
3626 return new ImageStoreComputeTestInstance(context, m_parameters);
3627 default:
3628 TCU_THROW(InternalError, "Impossible");
3629 }
3630 }
3631
3632 case SHADER_TYPE_FRAGMENT:
3633 {
3634 switch (m_parameters.operation)
3635 {
3636 case OPERATION_ATTACHMENT_READ:
3637 case OPERATION_ATTACHMENT_WRITE:
3638 return new GraphicsAttachmentsTestInstance(context, m_parameters);
3639
3640 case OPERATION_TEXTURE_READ:
3641 case OPERATION_TEXTURE_WRITE:
3642 return new GraphicsTextureTestInstance(context, m_parameters);
3643
3644 default:
3645 TCU_THROW(InternalError, "Impossible");
3646 }
3647 }
3648
3649 default:
3650 TCU_THROW(InternalError, "Impossible");
3651 }
3652 }
3653
3654 } // namespace
3655
getUnniceResolution(const VkFormat format,const uint32_t layers)3656 static tcu::UVec3 getUnniceResolution(const VkFormat format, const uint32_t layers)
3657 {
3658 const uint32_t unniceMipmapTextureSize[] = {1, 1, 1, 8, 22, 48, 117, 275, 604, 208, 611, 274, 1211};
3659 const uint32_t baseTextureWidth = unniceMipmapTextureSize[getBlockWidth(format)];
3660 const uint32_t baseTextureHeight = unniceMipmapTextureSize[getBlockHeight(format)];
3661 const uint32_t baseTextureWidthLevels = deLog2Floor32(baseTextureWidth);
3662 const uint32_t baseTextureHeightLevels = deLog2Floor32(baseTextureHeight);
3663 const uint32_t widthMultiplier = (baseTextureHeightLevels > baseTextureWidthLevels) ?
3664 1u << (baseTextureHeightLevels - baseTextureWidthLevels) :
3665 1u;
3666 const uint32_t heightMultiplier = (baseTextureWidthLevels > baseTextureHeightLevels) ?
3667 1u << (baseTextureWidthLevels - baseTextureHeightLevels) :
3668 1u;
3669 const uint32_t width = baseTextureWidth * widthMultiplier;
3670 const uint32_t height = baseTextureHeight * heightMultiplier;
3671
3672 // Number of levels should be same on both axises
3673 DE_ASSERT(deLog2Floor32(width) == deLog2Floor32(height));
3674
3675 return tcu::UVec3(width, height, layers);
3676 }
3677
createImageCompressionTranscodingTests(tcu::TestContext & testCtx)3678 tcu::TestCaseGroup *createImageCompressionTranscodingTests(tcu::TestContext &testCtx)
3679 {
3680 struct FormatsArray
3681 {
3682 const VkFormat *formats;
3683 uint32_t count;
3684 };
3685
3686 const bool mipmapness[] = {
3687 false,
3688 true,
3689 };
3690
3691 const std::string pipelineName[SHADER_TYPE_LAST] = {
3692 "compute",
3693 "graphic",
3694 };
3695
3696 const std::string mipmanpnessName[DE_LENGTH_OF_ARRAY(mipmapness)] = {
3697 "basic",
3698 "extended",
3699 };
3700
3701 const std::string operationName[OPERATION_LAST] = {
3702 "image_load", "texel_fetch", "texture", "image_store",
3703 "attachment_read", "attachment_write", "texture_read", "texture_write",
3704 };
3705
3706 struct ImageTypeName
3707 {
3708 ImageType type;
3709 std::string name;
3710 };
3711 ImageTypeName imageTypes[] = {
3712 {IMAGE_TYPE_1D, "1d_image"},
3713 {IMAGE_TYPE_2D, "2d_image"},
3714 {IMAGE_TYPE_3D, "3d_image"},
3715 };
3716
3717 const VkImageUsageFlags baseImageUsageFlagSet = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
3718 const VkImageUsageFlags compressedImageUsageFlags[OPERATION_LAST] = {
3719 baseImageUsageFlagSet | static_cast<VkImageUsageFlagBits>(VK_IMAGE_USAGE_STORAGE_BIT), // "image_load"
3720 baseImageUsageFlagSet |
3721 static_cast<VkImageUsageFlagBits>(VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT), // "texel_fetch"
3722 baseImageUsageFlagSet |
3723 static_cast<VkImageUsageFlagBits>(VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT), // "texture"
3724 baseImageUsageFlagSet |
3725 static_cast<VkImageUsageFlagBits>(VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT), // "image_store"
3726 baseImageUsageFlagSet | static_cast<VkImageUsageFlagBits>(VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT |
3727 VK_IMAGE_USAGE_SAMPLED_BIT), // "attachment_read"
3728 baseImageUsageFlagSet |
3729 static_cast<VkImageUsageFlagBits>(VK_IMAGE_USAGE_SAMPLED_BIT |
3730 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT), // "attachment_write"
3731 baseImageUsageFlagSet | static_cast<VkImageUsageFlagBits>(VK_IMAGE_USAGE_SAMPLED_BIT), // "texture_read"
3732 baseImageUsageFlagSet | static_cast<VkImageUsageFlagBits>(VK_IMAGE_USAGE_SAMPLED_BIT |
3733 VK_IMAGE_USAGE_STORAGE_BIT), // "texture_write"
3734 };
3735
3736 const VkImageUsageFlags compressedImageViewUsageFlags[OPERATION_LAST] = {
3737 compressedImageUsageFlags[0], //"image_load"
3738 compressedImageUsageFlags[1], //"texel_fetch"
3739 compressedImageUsageFlags[2], //"texture"
3740 compressedImageUsageFlags[3], //"image_store"
3741 compressedImageUsageFlags[4], //"attachment_read"
3742 compressedImageUsageFlags[5] | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, //"attachment_write"
3743 compressedImageUsageFlags[6], //"texture_read"
3744 compressedImageUsageFlags[7], //"texture_write"
3745 };
3746
3747 const VkImageUsageFlags uncompressedImageUsageFlags[OPERATION_LAST] = {
3748 baseImageUsageFlagSet | static_cast<VkImageUsageFlagBits>(VK_IMAGE_USAGE_STORAGE_BIT), //"image_load"
3749 baseImageUsageFlagSet |
3750 static_cast<VkImageUsageFlagBits>(VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT), //"texel_fetch"
3751 baseImageUsageFlagSet |
3752 static_cast<VkImageUsageFlagBits>(VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT), //"texture"
3753 baseImageUsageFlagSet |
3754 static_cast<VkImageUsageFlagBits>(VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT), //"image_store"
3755 baseImageUsageFlagSet | static_cast<VkImageUsageFlagBits>(VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
3756 VK_IMAGE_USAGE_SAMPLED_BIT), //"attachment_read"
3757 baseImageUsageFlagSet |
3758 static_cast<VkImageUsageFlagBits>(VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT), //"attachment_write"
3759 baseImageUsageFlagSet |
3760 static_cast<VkImageUsageFlagBits>(VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_SAMPLED_BIT), //"texture_read"
3761 baseImageUsageFlagSet | static_cast<VkImageUsageFlagBits>(VK_IMAGE_USAGE_SAMPLED_BIT), //"texture_write"
3762 };
3763
3764 const VkFormat compressedFormats64bit[] = {
3765 VK_FORMAT_BC1_RGB_UNORM_BLOCK, VK_FORMAT_BC1_RGB_SRGB_BLOCK, VK_FORMAT_BC1_RGBA_UNORM_BLOCK,
3766 VK_FORMAT_BC1_RGBA_SRGB_BLOCK, VK_FORMAT_BC4_UNORM_BLOCK, VK_FORMAT_BC4_SNORM_BLOCK,
3767 VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK, VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK, VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK,
3768 VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK, VK_FORMAT_EAC_R11_UNORM_BLOCK, VK_FORMAT_EAC_R11_SNORM_BLOCK,
3769 };
3770
3771 const VkFormat compressedFormats128bit[] = {
3772 VK_FORMAT_BC2_UNORM_BLOCK, VK_FORMAT_BC2_SRGB_BLOCK,
3773 VK_FORMAT_BC3_UNORM_BLOCK, VK_FORMAT_BC3_SRGB_BLOCK,
3774 VK_FORMAT_BC5_UNORM_BLOCK, VK_FORMAT_BC5_SNORM_BLOCK,
3775 VK_FORMAT_BC6H_UFLOAT_BLOCK, VK_FORMAT_BC6H_SFLOAT_BLOCK,
3776 VK_FORMAT_BC7_UNORM_BLOCK, VK_FORMAT_BC7_SRGB_BLOCK,
3777 VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK, VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK,
3778 VK_FORMAT_EAC_R11G11_UNORM_BLOCK, VK_FORMAT_EAC_R11G11_SNORM_BLOCK,
3779 VK_FORMAT_ASTC_4x4_UNORM_BLOCK, VK_FORMAT_ASTC_4x4_SRGB_BLOCK,
3780 VK_FORMAT_ASTC_5x4_UNORM_BLOCK, VK_FORMAT_ASTC_5x4_SRGB_BLOCK,
3781 VK_FORMAT_ASTC_5x5_UNORM_BLOCK, VK_FORMAT_ASTC_5x5_SRGB_BLOCK,
3782 VK_FORMAT_ASTC_6x5_UNORM_BLOCK, VK_FORMAT_ASTC_6x5_SRGB_BLOCK,
3783 VK_FORMAT_ASTC_6x6_UNORM_BLOCK, VK_FORMAT_ASTC_6x6_SRGB_BLOCK,
3784 VK_FORMAT_ASTC_8x5_UNORM_BLOCK, VK_FORMAT_ASTC_8x5_SRGB_BLOCK,
3785 VK_FORMAT_ASTC_8x6_UNORM_BLOCK, VK_FORMAT_ASTC_8x6_SRGB_BLOCK,
3786 VK_FORMAT_ASTC_8x8_UNORM_BLOCK, VK_FORMAT_ASTC_8x8_SRGB_BLOCK,
3787 VK_FORMAT_ASTC_10x5_UNORM_BLOCK, VK_FORMAT_ASTC_10x5_SRGB_BLOCK,
3788 VK_FORMAT_ASTC_10x6_UNORM_BLOCK, VK_FORMAT_ASTC_10x6_SRGB_BLOCK,
3789 VK_FORMAT_ASTC_10x8_UNORM_BLOCK, VK_FORMAT_ASTC_10x8_SRGB_BLOCK,
3790 VK_FORMAT_ASTC_10x10_UNORM_BLOCK, VK_FORMAT_ASTC_10x10_SRGB_BLOCK,
3791 VK_FORMAT_ASTC_12x10_UNORM_BLOCK, VK_FORMAT_ASTC_12x10_SRGB_BLOCK,
3792 VK_FORMAT_ASTC_12x12_UNORM_BLOCK, VK_FORMAT_ASTC_12x12_SRGB_BLOCK,
3793 };
3794
3795 const VkFormat uncompressedFormats64bit[] = {
3796 VK_FORMAT_R16G16B16A16_UNORM, VK_FORMAT_R16G16B16A16_SNORM, VK_FORMAT_R16G16B16A16_USCALED,
3797 VK_FORMAT_R16G16B16A16_SSCALED, VK_FORMAT_R16G16B16A16_UINT, VK_FORMAT_R16G16B16A16_SINT,
3798 //VK_FORMAT_R16G16B16A16_SFLOAT, removed as float views can't preserve NAN/INF/Denorm values
3799 VK_FORMAT_R32G32_UINT, VK_FORMAT_R32G32_SINT,
3800 //VK_FORMAT_R32G32_SFLOAT, removed as float views can't preserve NAN/INF/Denorm values
3801 //VK_FORMAT_R64_UINT, remove from the test it couldn't be used
3802 //VK_FORMAT_R64_SINT, remove from the test it couldn't be used
3803 //VK_FORMAT_R64_SFLOAT, remove from the test it couldn't be used
3804 };
3805
3806 const VkFormat uncompressedFormats128bit[] = {
3807 VK_FORMAT_R32G32B32A32_UINT, VK_FORMAT_R32G32B32A32_SINT,
3808 //VK_FORMAT_R32G32B32A32_SFLOAT, removed as float views can't preserve NAN/INF/Denorm values
3809 //VK_FORMAT_R64G64_UINT, remove from the test it couldn't be used
3810 //VK_FORMAT_R64G64_SINT, remove from the test it couldn't be used
3811 //VK_FORMAT_R64G64_SFLOAT, remove from the test it couldn't be used
3812 };
3813
3814 const FormatsArray formatsCompressedSets[] = {
3815 {compressedFormats64bit, DE_LENGTH_OF_ARRAY(compressedFormats64bit)},
3816 {compressedFormats128bit, DE_LENGTH_OF_ARRAY(compressedFormats128bit)},
3817 };
3818
3819 // Uncompressed formats - floating point formats should not be used in these
3820 // tests as they cannot be relied upon to preserve all possible values in the
3821 // underlying texture data. Refer to the note under the 'VkImageViewCreateInfo'
3822 // section of the specification.
3823 const FormatsArray formatsUncompressedSets[] = {
3824 {uncompressedFormats64bit, DE_LENGTH_OF_ARRAY(uncompressedFormats64bit)},
3825 {uncompressedFormats128bit, DE_LENGTH_OF_ARRAY(uncompressedFormats128bit)},
3826 };
3827
3828 DE_ASSERT(DE_LENGTH_OF_ARRAY(formatsCompressedSets) == DE_LENGTH_OF_ARRAY(formatsUncompressedSets));
3829
3830 MovePtr<tcu::TestCaseGroup> texelViewCompatibleTests(new tcu::TestCaseGroup(testCtx, "texel_view_compatible"));
3831
3832 for (int shaderType = SHADER_TYPE_COMPUTE; shaderType < SHADER_TYPE_LAST; ++shaderType)
3833 {
3834 MovePtr<tcu::TestCaseGroup> pipelineTypeGroup(
3835 new tcu::TestCaseGroup(testCtx, pipelineName[shaderType].c_str()));
3836
3837 for (int mipmapTestNdx = 0; mipmapTestNdx < DE_LENGTH_OF_ARRAY(mipmapness); mipmapTestNdx++)
3838 {
3839 const bool mipmapTest = mipmapness[mipmapTestNdx];
3840
3841 MovePtr<tcu::TestCaseGroup> mipmapTypeGroup(
3842 new tcu::TestCaseGroup(testCtx, mipmanpnessName[mipmapTestNdx].c_str()));
3843
3844 for (int imageTypeNdx = 0; imageTypeNdx < DE_LENGTH_OF_ARRAY(imageTypes); imageTypeNdx++)
3845 {
3846 MovePtr<tcu::TestCaseGroup> imageTypeGroup(
3847 new tcu::TestCaseGroup(testCtx, imageTypes[imageTypeNdx].name.c_str()));
3848 ImageType imageType = imageTypes[imageTypeNdx].type;
3849
3850 for (int operationNdx = OPERATION_IMAGE_LOAD; operationNdx < OPERATION_LAST; ++operationNdx)
3851 {
3852 if (shaderType != SHADER_TYPE_FRAGMENT &&
3853 deInRange32(operationNdx, OPERATION_ATTACHMENT_READ, OPERATION_TEXTURE_WRITE))
3854 continue;
3855
3856 if (shaderType != SHADER_TYPE_COMPUTE &&
3857 deInRange32(operationNdx, OPERATION_IMAGE_LOAD, OPERATION_IMAGE_STORE))
3858 continue;
3859
3860 if (imageType == IMAGE_TYPE_3D &&
3861 (operationNdx == OPERATION_ATTACHMENT_READ || operationNdx == OPERATION_ATTACHMENT_WRITE))
3862 continue;
3863
3864 MovePtr<tcu::TestCaseGroup> imageOperationGroup(
3865 new tcu::TestCaseGroup(testCtx, operationName[operationNdx].c_str()));
3866
3867 uint32_t depth = 1u + 2 * (imageType == IMAGE_TYPE_3D);
3868 uint32_t imageCount = 2u + (operationNdx == OPERATION_IMAGE_STORE);
3869
3870 // Iterate through bitness groups (64 bit, 128 bit, etc)
3871 for (uint32_t formatBitnessGroup = 0;
3872 formatBitnessGroup < DE_LENGTH_OF_ARRAY(formatsCompressedSets); ++formatBitnessGroup)
3873 {
3874 for (uint32_t formatCompressedNdx = 0;
3875 formatCompressedNdx < formatsCompressedSets[formatBitnessGroup].count;
3876 ++formatCompressedNdx)
3877 {
3878 const VkFormat formatCompressed =
3879 formatsCompressedSets[formatBitnessGroup].formats[formatCompressedNdx];
3880 const std::string compressedFormatGroupName = getFormatShortString(formatCompressed);
3881 MovePtr<tcu::TestCaseGroup> compressedFormatGroup(
3882 new tcu::TestCaseGroup(testCtx, compressedFormatGroupName.c_str()));
3883
3884 for (uint32_t formatUncompressedNdx = 0;
3885 formatUncompressedNdx < formatsUncompressedSets[formatBitnessGroup].count;
3886 ++formatUncompressedNdx)
3887 {
3888 const VkFormat formatUncompressed =
3889 formatsUncompressedSets[formatBitnessGroup].formats[formatUncompressedNdx];
3890 const std::string uncompressedFormatGroupName =
3891 getFormatShortString(formatUncompressed);
3892
3893 const TestParameters parameters = {
3894 static_cast<Operation>(operationNdx),
3895 static_cast<ShaderType>(shaderType),
3896 mipmapTest ? getUnniceResolution(formatCompressed, 1u) : UVec3(64u, 64u, depth),
3897 1u + 2u * mipmapTest *
3898 (imageType !=
3899 IMAGE_TYPE_3D), // 1 or 3 if mipmapTest is true but image is not 3d
3900 imageType,
3901 formatCompressed,
3902 formatUncompressed,
3903 imageCount,
3904 compressedImageUsageFlags[operationNdx],
3905 compressedImageViewUsageFlags[operationNdx],
3906 uncompressedImageUsageFlags[operationNdx],
3907 mipmapTest,
3908 VK_FORMAT_R8G8B8A8_UNORM,
3909 false,
3910 };
3911
3912 compressedFormatGroup->addChild(
3913 new TexelViewCompatibleCase(testCtx, uncompressedFormatGroupName, parameters));
3914 }
3915
3916 imageOperationGroup->addChild(compressedFormatGroup.release());
3917 }
3918 }
3919
3920 imageTypeGroup->addChild(imageOperationGroup.release());
3921 }
3922
3923 mipmapTypeGroup->addChild(imageTypeGroup.release());
3924 }
3925
3926 pipelineTypeGroup->addChild(mipmapTypeGroup.release());
3927 }
3928
3929 texelViewCompatibleTests->addChild(pipelineTypeGroup.release());
3930 }
3931
3932 #ifndef CTS_USES_VULKANSC
3933 {
3934 MovePtr<tcu::TestCaseGroup> multiLayerGroup(new tcu::TestCaseGroup(
3935 testCtx, "multi_layer_views", "Texel view compatible cases using multi-layer image views"));
3936
3937 const int shaderType = SHADER_TYPE_COMPUTE;
3938 const bool mipmapTest = false;
3939 const ImageType imageType = IMAGE_TYPE_2D;
3940
3941 for (int operationNdx = OPERATION_IMAGE_LOAD; operationNdx <= OPERATION_IMAGE_STORE; ++operationNdx)
3942 {
3943 MovePtr<tcu::TestCaseGroup> imageOperationGroup(
3944 new tcu::TestCaseGroup(testCtx, operationName[operationNdx].c_str(), ""));
3945
3946 const uint32_t imageCount = 2u + (operationNdx == OPERATION_IMAGE_STORE);
3947
3948 // Iterate through bitness groups (64 bit, 128 bit, etc)
3949 for (uint32_t formatBitnessGroup = 0; formatBitnessGroup < DE_LENGTH_OF_ARRAY(formatsCompressedSets);
3950 ++formatBitnessGroup)
3951 {
3952 for (uint32_t formatCompressedNdx = 0;
3953 formatCompressedNdx < formatsCompressedSets[formatBitnessGroup].count; ++formatCompressedNdx)
3954 {
3955 const VkFormat formatCompressed =
3956 formatsCompressedSets[formatBitnessGroup].formats[formatCompressedNdx];
3957 const std::string compressedFormatGroupName = getFormatShortString(formatCompressed);
3958 MovePtr<tcu::TestCaseGroup> compressedFormatGroup(
3959 new tcu::TestCaseGroup(testCtx, compressedFormatGroupName.c_str(), ""));
3960
3961 for (uint32_t formatUncompressedNdx = 0;
3962 formatUncompressedNdx < formatsUncompressedSets[formatBitnessGroup].count;
3963 ++formatUncompressedNdx)
3964 {
3965 const VkFormat formatUncompressed =
3966 formatsUncompressedSets[formatBitnessGroup].formats[formatUncompressedNdx];
3967 const std::string uncompressedFormatGroupName = getFormatShortString(formatUncompressed);
3968
3969 const TestParameters parameters = {
3970 static_cast<Operation>(operationNdx),
3971 static_cast<ShaderType>(shaderType),
3972 UVec3(64u, 64u, 1u),
3973 3u,
3974 imageType,
3975 formatCompressed,
3976 formatUncompressed,
3977 imageCount,
3978 compressedImageUsageFlags[operationNdx],
3979 compressedImageViewUsageFlags[operationNdx],
3980 uncompressedImageUsageFlags[operationNdx],
3981 mipmapTest,
3982 VK_FORMAT_R8G8B8A8_UNORM,
3983 true,
3984 };
3985
3986 compressedFormatGroup->addChild(
3987 new TexelViewCompatibleCase(testCtx, uncompressedFormatGroupName, parameters));
3988 }
3989
3990 imageOperationGroup->addChild(compressedFormatGroup.release());
3991 }
3992 }
3993
3994 multiLayerGroup->addChild(imageOperationGroup.release());
3995 }
3996
3997 texelViewCompatibleTests->addChild(multiLayerGroup.release());
3998 }
3999 #endif // CTS_USES_VULKANSC
4000
4001 return texelViewCompatibleTests.release();
4002 }
4003
4004 } // namespace image
4005 } // namespace vkt
4006