1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2022 The Khronos Group Inc.
6 * Copyright (c) 2022 Valve Corporation.
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 *
20 *//*!
21 * \file
22 * \brief
23 *//*--------------------------------------------------------------------*/
24
25 #include "vktPipelineImageSlicedViewOf3DTests.hpp"
26 #include "vktTestCase.hpp"
27
28 #include "vkImageUtil.hpp"
29 #include "vkTypeUtil.hpp"
30 #include "vkObjUtil.hpp"
31 #include "vkCmdUtil.hpp"
32 #include "vkBuilderUtil.hpp"
33 #include "vkImageWithMemory.hpp"
34 #include "vkBufferWithMemory.hpp"
35 #include "vkBarrierUtil.hpp"
36
37 #include "tcuTexture.hpp"
38 #include "tcuImageCompare.hpp"
39 #include "tcuTextureUtil.hpp"
40
41 #include "deRandom.hpp"
42
43 #include <sstream>
44 #include <vector>
45 #include <tuple>
46 #include <set>
47 #include <limits>
48 #include <string>
49 #include <algorithm>
50
51 namespace vkt
52 {
53 namespace pipeline
54 {
55
56 using namespace vk;
57
58 namespace
59 {
60
61 constexpr uint32_t kWidth = 8u;
62 constexpr uint32_t kHeight = 8u;
63 constexpr VkFormat kFormat = VK_FORMAT_R8G8B8A8_UINT;
64 constexpr uint32_t kVertexCount = 3u;
65 constexpr auto kUsageLayout = VK_IMAGE_LAYOUT_GENERAL;
66
67 enum class TestType
68 {
69 LOAD = 0,
70 STORE,
71 };
72
73 struct TestParams
74 {
75 TestType testType;
76 VkShaderStageFlagBits stage;
77 uint32_t width;
78 uint32_t height;
79 uint32_t depth;
80 uint32_t offset;
81
82 private:
83 // We want to test both normal ranges and VK_REMAINING_3D_SLICES_EXT, but in the latter case we cannot blindly use the range
84 // value for some operations. See getActualRange() and getSlicedViewRange().
85 uint32_t range;
86
87 public:
88 tcu::Maybe<uint32_t> mipLevel;
89 bool sampleImg;
90
TestParamsvkt::pipeline::__anondc5fc5560111::TestParams91 TestParams(TestType testType_, VkShaderStageFlagBits stage_, uint32_t width_, uint32_t height_, uint32_t depth_,
92 uint32_t offset_, uint32_t range_, const tcu::Maybe<uint32_t> &mipLevel_, bool sampleImg_)
93 : testType(testType_)
94 , stage(stage_)
95 , width(width_)
96 , height(height_)
97 , depth(depth_)
98 , offset(offset_)
99 , range(range_)
100 , mipLevel(mipLevel_)
101 , sampleImg(sampleImg_)
102 {
103 DE_ASSERT(stage == VK_SHADER_STAGE_COMPUTE_BIT || stage == VK_SHADER_STAGE_FRAGMENT_BIT);
104 DE_ASSERT(range > 0u);
105
106 const auto selectedLevel = getSelectedLevel();
107
108 if (useMipMaps())
109 {
110 // To simplify things.
111 DE_ASSERT(width == height && width == depth);
112
113 const auto maxMipLevelCount = getMaxMipLevelCount();
114 DE_ASSERT(selectedLevel < maxMipLevelCount);
115 DE_UNREF(maxMipLevelCount); // For release builds.
116 }
117
118 const uint32_t selectedLevelDepth = (depth >> selectedLevel);
119 DE_UNREF(selectedLevelDepth); // For release builds.
120
121 if (!useRemainingSlices())
122 DE_ASSERT(offset + range <= selectedLevelDepth);
123 else
124 DE_ASSERT(offset < selectedLevelDepth);
125 }
126
getSelectedLevelvkt::pipeline::__anondc5fc5560111::TestParams127 uint32_t getSelectedLevel(void) const
128 {
129 return (useMipMaps() ? mipLevel.get() : 0u);
130 }
131
getFullImageLevelsvkt::pipeline::__anondc5fc5560111::TestParams132 uint32_t getFullImageLevels(void) const
133 {
134 return (useMipMaps() ? getMaxMipLevelCount() : 1u);
135 }
136
getActualRangevkt::pipeline::__anondc5fc5560111::TestParams137 uint32_t getActualRange(void) const
138 {
139 const auto levelDepth = (depth >> getSelectedLevel());
140 DE_ASSERT(levelDepth > 0u);
141
142 return (useRemainingSlices() ? (levelDepth - offset) : range);
143 }
144
getSlicedViewRangevkt::pipeline::__anondc5fc5560111::TestParams145 uint32_t getSlicedViewRange(void) const
146 {
147 return range;
148 }
149
getSliceExtentvkt::pipeline::__anondc5fc5560111::TestParams150 VkExtent3D getSliceExtent(void) const
151 {
152 const auto selectedLevel = getSelectedLevel();
153 const auto extent = makeExtent3D((width >> selectedLevel), (height >> selectedLevel), getActualRange());
154
155 DE_ASSERT(extent.width > 0u);
156 DE_ASSERT(extent.height > 0u);
157 DE_ASSERT(extent.depth > 0u);
158 return extent;
159 }
160
getFullLevelExtentvkt::pipeline::__anondc5fc5560111::TestParams161 VkExtent3D getFullLevelExtent(void) const
162 {
163 const auto selectedLevel = getSelectedLevel();
164 const auto extent = makeExtent3D((width >> selectedLevel), (height >> selectedLevel), (depth >> selectedLevel));
165
166 DE_ASSERT(extent.width > 0u);
167 DE_ASSERT(extent.height > 0u);
168 DE_ASSERT(extent.depth > 0u);
169 return extent;
170 }
171
getMaxMipLevelCountForSizevkt::pipeline::__anondc5fc5560111::TestParams172 static uint32_t getMaxMipLevelCountForSize(uint32_t size)
173 {
174 DE_ASSERT(size <= static_cast<uint32_t>(std::numeric_limits<int32_t>::max()));
175 return static_cast<uint32_t>(deLog2Floor32(static_cast<int32_t>(size)) + 1);
176 }
177
178 private:
getMaxMipLevelCountvkt::pipeline::__anondc5fc5560111::TestParams179 uint32_t getMaxMipLevelCount(void) const
180 {
181 return getMaxMipLevelCountForSize(depth);
182 }
183
useMipMapsvkt::pipeline::__anondc5fc5560111::TestParams184 bool useMipMaps(void) const
185 {
186 return static_cast<bool>(mipLevel);
187 }
188
useRemainingSlicesvkt::pipeline::__anondc5fc5560111::TestParams189 bool useRemainingSlices(void) const
190 {
191 return (range == VK_REMAINING_3D_SLICES_EXT);
192 }
193 };
194
195 class SlicedViewTestCase : public vkt::TestCase
196 {
197 public:
SlicedViewTestCase(tcu::TestContext & testCtx,const std::string & name,const TestParams & params)198 SlicedViewTestCase(tcu::TestContext &testCtx, const std::string &name, const TestParams ¶ms)
199 : vkt::TestCase(testCtx, name)
200 , m_params(params)
201 {
202 }
~SlicedViewTestCase(void)203 virtual ~SlicedViewTestCase(void)
204 {
205 }
206
207 void initPrograms(vk::SourceCollections &programCollection) const override;
208 TestInstance *createInstance(Context &context) const override;
209 void checkSupport(Context &context) const override;
210
211 protected:
212 const TestParams m_params;
213 };
214
215 class SlicedViewTestInstance : public vkt::TestInstance
216 {
217 public:
SlicedViewTestInstance(Context & context,const TestParams & params)218 SlicedViewTestInstance(Context &context, const TestParams ¶ms) : vkt::TestInstance(context), m_params(params)
219 {
220 }
~SlicedViewTestInstance(void)221 virtual ~SlicedViewTestInstance(void)
222 {
223 }
224
225 protected:
226 virtual void runPipeline(const DeviceInterface &vkd, const VkDevice device, const VkCommandBuffer cmdBuffer,
227 const VkImageView slicedImage, const VkImageView auxiliarImage);
228 virtual void runGraphicsPipeline(const DeviceInterface &vkd, const VkDevice device,
229 const VkCommandBuffer cmdBuffer);
230 virtual void runComputePipeline(const DeviceInterface &vkd, const VkDevice device, const VkCommandBuffer cmdBuffer);
231 bool runSamplingPipeline(const VkImage fullImage, const VkImageView slicedView, const VkExtent3D &levelExtent);
232
233 const TestParams m_params;
234
235 Move<VkDescriptorSetLayout> m_setLayout;
236 Move<VkDescriptorPool> m_descriptorPool;
237 Move<VkDescriptorSet> m_descriptorSet;
238 Move<VkPipelineLayout> m_pipelineLayout;
239
240 // Only for graphics pipelines.
241 Move<VkRenderPass> m_renderPass;
242 Move<VkFramebuffer> m_framebuffer;
243
244 Move<VkPipeline> m_pipeline;
245 };
246
247 class SlicedViewLoadTestInstance : public SlicedViewTestInstance
248 {
249 public:
SlicedViewLoadTestInstance(Context & context,const TestParams & params)250 SlicedViewLoadTestInstance(Context &context, const TestParams ¶ms) : SlicedViewTestInstance(context, params)
251 {
252 }
~SlicedViewLoadTestInstance(void)253 virtual ~SlicedViewLoadTestInstance(void)
254 {
255 }
256
257 tcu::TestStatus iterate(void);
258 };
259
260 class SlicedViewStoreTestInstance : public SlicedViewTestInstance
261 {
262 public:
SlicedViewStoreTestInstance(Context & context,const TestParams & params)263 SlicedViewStoreTestInstance(Context &context, const TestParams ¶ms) : SlicedViewTestInstance(context, params)
264 {
265 }
~SlicedViewStoreTestInstance(void)266 virtual ~SlicedViewStoreTestInstance(void)
267 {
268 }
269
270 tcu::TestStatus iterate(void);
271 };
272
checkSupport(Context & context) const273 void SlicedViewTestCase::checkSupport(Context &context) const
274 {
275 context.requireDeviceFunctionality(VK_EXT_IMAGE_SLICED_VIEW_OF_3D_EXTENSION_NAME);
276
277 if (m_params.stage == VK_SHADER_STAGE_FRAGMENT_BIT)
278 context.requireDeviceCoreFeature(DEVICE_CORE_FEATURE_FRAGMENT_STORES_AND_ATOMICS);
279 }
280
initPrograms(vk::SourceCollections & programCollection) const281 void SlicedViewTestCase::initPrograms(vk::SourceCollections &programCollection) const
282 {
283 const std::string bindings = "layout (rgba8ui, set=0, binding=0) uniform uimage3D slicedImage;\n"
284 "layout (rgba8ui, set=0, binding=1) uniform uimage3D auxiliarImage;\n";
285
286 std::string loadFrom;
287 std::string storeTo;
288
289 // We may need to load stuff from the sliced image into an auxiliary image if we're testing load, or we may need to store stuff
290 // to the sliced image, read from the auxiliary image if we're testing stores.
291 if (m_params.testType == TestType::LOAD)
292 {
293 loadFrom = "slicedImage";
294 storeTo = "auxiliarImage";
295 }
296 else if (m_params.testType == TestType::STORE)
297 {
298 loadFrom = "auxiliarImage";
299 storeTo = "slicedImage";
300 }
301 else
302 DE_ASSERT(false);
303
304 std::ostringstream mainOperation;
305
306 // Note: "coords" will vary depending on the shader stage.
307 mainOperation << " const ivec3 size = imageSize(slicedImage);\n"
308 << " const uvec4 badColor = uvec4(0, 0, 0, 0);\n"
309 << " const uvec4 goodColor = imageLoad(" << loadFrom << ", coords);\n"
310 << " const uvec4 storedColor = ((size.z == " << m_params.getActualRange()
311 << ") ? goodColor : badColor);\n"
312 << " imageStore(" << storeTo << ", coords, storedColor);\n";
313
314 if (m_params.stage == VK_SHADER_STAGE_COMPUTE_BIT)
315 {
316 // For compute, we'll launch as many workgroups as slices, and each invocation will handle one pixel.
317 const auto sliceExtent = m_params.getSliceExtent();
318 std::ostringstream comp;
319 comp << "#version 460\n"
320 << "layout (local_size_x=" << sliceExtent.width << ", local_size_y=" << sliceExtent.height
321 << ", local_size_z=1) in;\n"
322 << bindings << "void main (void) {\n"
323 << " const ivec3 coords = ivec3(ivec2(gl_LocalInvocationID.xy), int(gl_WorkGroupID.x));\n"
324 << mainOperation.str() << "}\n";
325 programCollection.glslSources.add("comp") << glu::ComputeSource(comp.str());
326 }
327 else if (m_params.stage == VK_SHADER_STAGE_FRAGMENT_BIT)
328 {
329 // For fragment, we'll draw as many instances as slices, and each draw will use a full-screen triangle to generate as many
330 // fragment shader invocations as pixels in the image (the framebuffer needs to have the same size as the storage images).
331 std::ostringstream frag;
332 frag << "#version 460\n"
333 << "layout (location=0) in flat int zCoord;\n"
334 << bindings << "void main (void) {\n"
335 << " const ivec3 coords = ivec3(ivec2(gl_FragCoord.xy), zCoord);\n"
336 << mainOperation.str() << "}\n";
337
338 std::ostringstream vert;
339 vert << "#version 460\n"
340 << "layout (location=0) out flat int zCoord;\n"
341 << "vec2 positions[3] = vec2[](\n"
342 << " vec2(-1.0, -1.0),\n"
343 << " vec2( 3.0, -1.0),\n"
344 << " vec2(-1.0, 3.0)\n"
345 << ");\n"
346 << "void main() {\n"
347 << " gl_Position = vec4(positions[gl_VertexIndex % 3], 0.0, 1.0);\n"
348 << " zCoord = int(gl_InstanceIndex);\n"
349 << "}\n";
350
351 programCollection.glslSources.add("vert") << glu::VertexSource(vert.str());
352 programCollection.glslSources.add("frag") << glu::FragmentSource(frag.str());
353 }
354 else
355 {
356 DE_ASSERT(false);
357 }
358
359 if (m_params.sampleImg)
360 {
361 // Prepare a compute shader that will sample the whole level to verify it's available.
362 const auto levelExtent = m_params.getFullLevelExtent();
363
364 std::ostringstream comp;
365 comp << "#version 460\n"
366 << "layout (local_size_x=" << levelExtent.width << ", local_size_y=" << levelExtent.height
367 << ", local_size_z=" << levelExtent.depth << ") in;\n"
368 << "layout (set=0, binding=0) uniform usampler3D combinedSampler;\n" // The image being tested.
369 << "layout (set=0, binding=1, rgba8ui) uniform uimage3D auxiliarImage;\n" // Verification storage image.
370 << "void main() {\n"
371 << " const vec3 levelExtent = vec3(" << levelExtent.width << ", " << levelExtent.height << ", "
372 << levelExtent.depth << ");\n"
373 << " const vec3 sampleCoords = vec3(\n"
374 << " (float(gl_LocalInvocationID.x) + 0.5) / levelExtent.x,\n"
375 << " (float(gl_LocalInvocationID.y) + 0.5) / levelExtent.y,\n"
376 << " (float(gl_LocalInvocationID.z) + 0.5) / levelExtent.z);\n"
377 << " const ivec3 storeCoords = ivec3(int(gl_LocalInvocationID.x), int(gl_LocalInvocationID.y), "
378 "int(gl_LocalInvocationID.z));\n"
379 << " const uvec4 sampledColor = texture(combinedSampler, sampleCoords);\n"
380 << " imageStore(auxiliarImage, storeCoords, sampledColor);\n"
381 << "}\n";
382 programCollection.glslSources.add("compSample") << glu::ComputeSource(comp.str());
383 }
384 }
385
createInstance(Context & context) const386 TestInstance *SlicedViewTestCase::createInstance(Context &context) const
387 {
388 if (m_params.testType == TestType::LOAD)
389 return new SlicedViewLoadTestInstance(context, m_params);
390 if (m_params.testType == TestType::STORE)
391 return new SlicedViewStoreTestInstance(context, m_params);
392
393 DE_ASSERT(false);
394 return nullptr;
395 }
396
makeIVec3(uint32_t width,uint32_t height,uint32_t depth)397 tcu::IVec3 makeIVec3(uint32_t width, uint32_t height, uint32_t depth)
398 {
399 return tcu::IVec3(static_cast<int>(width), static_cast<int>(height), static_cast<int>(depth));
400 }
401
makePixelBufferAccess(const BufferWithMemory & buffer,const tcu::IVec3 & size,const tcu::TextureFormat & format)402 de::MovePtr<tcu::PixelBufferAccess> makePixelBufferAccess(const BufferWithMemory &buffer, const tcu::IVec3 &size,
403 const tcu::TextureFormat &format)
404 {
405 de::MovePtr<tcu::PixelBufferAccess> bufferImage(
406 new tcu::PixelBufferAccess(format, size, buffer.getAllocation().getHostPtr()));
407 return bufferImage;
408 }
409
makeTransferBuffer(const VkExtent3D & extent,const tcu::TextureFormat & format,const DeviceInterface & vkd,const VkDevice device,Allocator & alloc)410 de::MovePtr<BufferWithMemory> makeTransferBuffer(const VkExtent3D &extent, const tcu::TextureFormat &format,
411 const DeviceInterface &vkd, const VkDevice device, Allocator &alloc)
412 {
413 DE_ASSERT(extent.width > 0u);
414 DE_ASSERT(extent.height > 0u);
415 DE_ASSERT(extent.depth > 0u);
416
417 const auto pixelSizeBytes = tcu::getPixelSize(format);
418 const auto pixelCount = extent.width * extent.height * extent.depth;
419 const auto bufferSize = static_cast<VkDeviceSize>(pixelCount) * static_cast<VkDeviceSize>(pixelSizeBytes);
420 const auto bufferUsage = (VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT);
421 const auto bufferCreateInfo = makeBufferCreateInfo(bufferSize, bufferUsage);
422
423 de::MovePtr<BufferWithMemory> buffer(
424 new BufferWithMemory(vkd, device, alloc, bufferCreateInfo, MemoryRequirement::HostVisible));
425 return buffer;
426 }
427
makeAndFillTransferBuffer(const VkExtent3D & extent,const tcu::TextureFormat & format,const DeviceInterface & vkd,const VkDevice device,Allocator & alloc)428 de::MovePtr<BufferWithMemory> makeAndFillTransferBuffer(const VkExtent3D &extent, const tcu::TextureFormat &format,
429 const DeviceInterface &vkd, const VkDevice device,
430 Allocator &alloc)
431 {
432 DE_ASSERT(tcu::getTextureChannelClass(format.type) == tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER);
433
434 auto buffer = makeTransferBuffer(extent, format, vkd, device, alloc);
435 const auto size = makeIVec3(extent.width, extent.height, extent.depth);
436 auto bufferImg = makePixelBufferAccess(*buffer, size, format);
437
438 // Fill image with predefined pattern.
439 for (int z = 0; z < size.z(); ++z)
440 for (int y = 0; y < size.y(); ++y)
441 for (int x = 0; x < size.x(); ++x)
442 {
443 const tcu::UVec4 color(static_cast<uint32_t>(0x80 | x), static_cast<uint32_t>(0x80 | y),
444 static_cast<uint32_t>(0x80 | z), 1u);
445 bufferImg->setPixel(color, x, y, z);
446 }
447
448 return buffer;
449 }
450
make3DImage(const DeviceInterface & vkd,const VkDevice device,Allocator & alloc,const VkFormat format,const VkExtent3D & extent,uint32_t mipLevels,const bool sampling)451 de::MovePtr<ImageWithMemory> make3DImage(const DeviceInterface &vkd, const VkDevice device, Allocator &alloc,
452 const VkFormat format, const VkExtent3D &extent, uint32_t mipLevels,
453 const bool sampling)
454 {
455 const VkImageUsageFlags imageUsage =
456 (VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
457 (sampling ? VK_IMAGE_USAGE_SAMPLED_BIT : static_cast<VkImageUsageFlagBits>(0)));
458
459 const VkImageCreateInfo imageCreateInfo = {
460 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
461 nullptr, // const void* pNext;
462 0u, // VkImageCreateFlags flags;
463 VK_IMAGE_TYPE_3D, // VkImageType imageType;
464 format, // VkFormat format;
465 extent, // VkExtent3D extent;
466 mipLevels, // uint32_t mipLevels;
467 1u, // uint32_t arrayLayers;
468 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
469 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
470 imageUsage, // VkImageUsageFlags usage;
471 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
472 0u, // uint32_t queueFamilyIndexCount;
473 nullptr, // const uint32_t* pQueueFamilyIndices;
474 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout;
475 };
476
477 de::MovePtr<ImageWithMemory> image(
478 new ImageWithMemory(vkd, device, alloc, imageCreateInfo, MemoryRequirement::Any));
479 return image;
480 }
481
makeCommonImageSubresourceRange(uint32_t baseLevel,uint32_t levelCount)482 VkImageSubresourceRange makeCommonImageSubresourceRange(uint32_t baseLevel, uint32_t levelCount)
483 {
484 return makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, baseLevel, levelCount, 0u, 1u);
485 }
486
makeCommonImageSubresourceLayers(uint32_t mipLevel)487 VkImageSubresourceLayers makeCommonImageSubresourceLayers(uint32_t mipLevel)
488 {
489 return makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, mipLevel, 0u, 1u);
490 }
491
make3DImageView(const DeviceInterface & vkd,const VkDevice device,const VkImage image,const VkFormat format,const tcu::Maybe<tcu::UVec2> & slices,uint32_t mipLevel,uint32_t levelCount)492 Move<VkImageView> make3DImageView(const DeviceInterface &vkd, const VkDevice device, const VkImage image,
493 const VkFormat format, const tcu::Maybe<tcu::UVec2> &slices /*x=offset, y=range)*/,
494 uint32_t mipLevel, uint32_t levelCount)
495 {
496 const bool subSlice = static_cast<bool>(slices);
497
498 VkImageViewSlicedCreateInfoEXT sliceCreateInfo = initVulkanStructure();
499
500 if (subSlice)
501 {
502 sliceCreateInfo.sliceOffset = slices->x();
503 sliceCreateInfo.sliceCount = slices->y();
504 }
505
506 const VkImageViewCreateInfo viewCreateInfo = {
507 VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, // VkStructureType sType;
508 (subSlice ? &sliceCreateInfo : nullptr), // const void* pNext;
509 0u, // VkImageViewCreateFlags flags;
510 image, // VkImage image;
511 VK_IMAGE_VIEW_TYPE_3D, // VkImageViewType viewType;
512 format, // VkFormat format;
513 makeComponentMappingRGBA(), // VkComponentMapping components;
514 makeCommonImageSubresourceRange(mipLevel, levelCount), // VkImageSubresourceRange subresourceRange;
515 };
516
517 return createImageView(vkd, device, &viewCreateInfo);
518 }
519
makePipelineStage(VkShaderStageFlagBits shaderStage)520 VkPipelineStageFlagBits makePipelineStage(VkShaderStageFlagBits shaderStage)
521 {
522 if (shaderStage == VK_SHADER_STAGE_FRAGMENT_BIT)
523 return VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
524 if (shaderStage == VK_SHADER_STAGE_COMPUTE_BIT)
525 return VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
526
527 DE_ASSERT(false);
528 return VK_PIPELINE_STAGE_FLAG_BITS_MAX_ENUM;
529 }
530
runPipeline(const DeviceInterface & vkd,const VkDevice device,const VkCommandBuffer cmdBuffer,const VkImageView slicedImage,const VkImageView auxiliarImage)531 void SlicedViewTestInstance::runPipeline(const DeviceInterface &vkd, const VkDevice device,
532 const VkCommandBuffer cmdBuffer, const VkImageView slicedImage,
533 const VkImageView auxiliarImage)
534 {
535 // The layouts created and used here must match the shaders.
536 const auto descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
537
538 DescriptorSetLayoutBuilder layoutBuilder;
539 layoutBuilder.addSingleBinding(descriptorType, m_params.stage);
540 layoutBuilder.addSingleBinding(descriptorType, m_params.stage);
541 m_setLayout = layoutBuilder.build(vkd, device);
542
543 DescriptorPoolBuilder poolBuilder;
544 poolBuilder.addType(descriptorType, 2u);
545 m_descriptorPool = poolBuilder.build(vkd, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
546
547 m_descriptorSet = makeDescriptorSet(vkd, device, m_descriptorPool.get(), m_setLayout.get());
548 m_pipelineLayout = makePipelineLayout(vkd, device, m_setLayout.get());
549
550 DescriptorSetUpdateBuilder updateBuilder;
551 const auto slicedImageDescInfo = makeDescriptorImageInfo(DE_NULL, slicedImage, kUsageLayout);
552 const auto auxiliarImageDescInfo = makeDescriptorImageInfo(DE_NULL, auxiliarImage, kUsageLayout);
553 updateBuilder.writeSingle(m_descriptorSet.get(), DescriptorSetUpdateBuilder::Location::binding(0u), descriptorType,
554 &slicedImageDescInfo);
555 updateBuilder.writeSingle(m_descriptorSet.get(), DescriptorSetUpdateBuilder::Location::binding(1u), descriptorType,
556 &auxiliarImageDescInfo);
557 updateBuilder.update(vkd, device);
558
559 if (m_params.stage == VK_SHADER_STAGE_FRAGMENT_BIT)
560 runGraphicsPipeline(vkd, device, cmdBuffer);
561 else if (m_params.stage == VK_SHADER_STAGE_COMPUTE_BIT)
562 runComputePipeline(vkd, device, cmdBuffer);
563 else
564 DE_ASSERT(false);
565 }
566
runGraphicsPipeline(const DeviceInterface & vkd,const VkDevice device,const VkCommandBuffer cmdBuffer)567 void SlicedViewTestInstance::runGraphicsPipeline(const DeviceInterface &vkd, const VkDevice device,
568 const VkCommandBuffer cmdBuffer)
569 {
570 const auto sliceExtent = m_params.getSliceExtent();
571 const auto &binaries = m_context.getBinaryCollection();
572 const auto vertShader = createShaderModule(vkd, device, binaries.get("vert"));
573 const auto fragShader = createShaderModule(vkd, device, binaries.get("frag"));
574 const auto extent = makeExtent3D(sliceExtent.width, sliceExtent.height, 1u);
575 const auto bindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
576
577 m_renderPass = makeRenderPass(vkd, device);
578 m_framebuffer =
579 makeFramebuffer(vkd, device, m_renderPass.get(), 0u, nullptr, sliceExtent.width, sliceExtent.height);
580
581 const std::vector<VkViewport> viewports(1u, makeViewport(extent));
582 const std::vector<VkRect2D> scissors(1u, makeRect2D(extent));
583
584 const VkPipelineVertexInputStateCreateInfo vertexInputStateCreateInfo = initVulkanStructure();
585
586 m_pipeline = makeGraphicsPipeline(vkd, device, m_pipelineLayout.get(), vertShader.get(), DE_NULL, DE_NULL, DE_NULL,
587 fragShader.get(), m_renderPass.get(), viewports, scissors,
588 VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 0u, 0u, &vertexInputStateCreateInfo);
589
590 beginRenderPass(vkd, cmdBuffer, m_renderPass.get(), m_framebuffer.get(), scissors.at(0u));
591 vkd.cmdBindPipeline(cmdBuffer, bindPoint, m_pipeline.get());
592 vkd.cmdBindDescriptorSets(cmdBuffer, bindPoint, m_pipelineLayout.get(), 0u, 1u, &m_descriptorSet.get(), 0u,
593 nullptr);
594 vkd.cmdDraw(cmdBuffer, kVertexCount, sliceExtent.depth, 0u, 0u);
595 endRenderPass(vkd, cmdBuffer);
596 }
597
runComputePipeline(const DeviceInterface & vkd,const VkDevice device,const VkCommandBuffer cmdBuffer)598 void SlicedViewTestInstance::runComputePipeline(const DeviceInterface &vkd, const VkDevice device,
599 const VkCommandBuffer cmdBuffer)
600 {
601 const auto bindPoint = VK_PIPELINE_BIND_POINT_COMPUTE;
602 const auto compShader = createShaderModule(vkd, device, m_context.getBinaryCollection().get("comp"));
603
604 m_pipeline = makeComputePipeline(vkd, device, m_pipelineLayout.get(), compShader.get());
605
606 vkd.cmdBindPipeline(cmdBuffer, bindPoint, m_pipeline.get());
607 vkd.cmdBindDescriptorSets(cmdBuffer, bindPoint, m_pipelineLayout.get(), 0u, 1u, &m_descriptorSet.get(), 0u,
608 nullptr);
609 vkd.cmdDispatch(cmdBuffer, m_params.getActualRange(), 1u, 1u);
610 }
611
runSamplingPipeline(const VkImage fullImage,const VkImageView slicedView,const VkExtent3D & levelExtent)612 bool SlicedViewTestInstance::runSamplingPipeline(const VkImage fullImage, const VkImageView slicedView,
613 const VkExtent3D &levelExtent)
614 {
615 const auto &vkd = m_context.getDeviceInterface();
616 const auto device = m_context.getDevice();
617 const auto qfIndex = m_context.getUniversalQueueFamilyIndex();
618 const auto queue = m_context.getUniversalQueue();
619 auto &alloc = m_context.getDefaultAllocator();
620
621 const auto bindPoint = VK_PIPELINE_BIND_POINT_COMPUTE;
622 const auto shaderStage = VK_SHADER_STAGE_COMPUTE_BIT;
623 const auto pipelineStage = makePipelineStage(shaderStage);
624
625 // Command pool and buffer.
626 const auto cmdPool = makeCommandPool(vkd, device, qfIndex);
627 const auto cmdBufferPtr = allocateCommandBuffer(vkd, device, cmdPool.get(), VK_COMMAND_BUFFER_LEVEL_PRIMARY);
628 const auto cmdBuffer = cmdBufferPtr.get();
629
630 // Descriptor set layout and pipeline layout.
631 DescriptorSetLayoutBuilder setLayoutBuilder;
632 setLayoutBuilder.addSingleBinding(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, shaderStage);
633 setLayoutBuilder.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, shaderStage);
634 const auto setLayout = setLayoutBuilder.build(vkd, device);
635 const auto pipelineLayout = makePipelineLayout(vkd, device, setLayout.get());
636
637 // Pipeline.
638 const auto compShader = createShaderModule(vkd, device, m_context.getBinaryCollection().get("compSample"));
639 const auto pipeline = makeComputePipeline(vkd, device, pipelineLayout.get(), compShader.get());
640
641 // Descriptor pool and set.
642 DescriptorPoolBuilder poolBuilder;
643 poolBuilder.addType(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER);
644 poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE);
645 const auto descriptorPool = poolBuilder.build(vkd, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
646 const auto descriptorSet = makeDescriptorSet(vkd, device, descriptorPool.get(), setLayout.get());
647
648 // Update descriptor set.
649 const VkSamplerCreateInfo samplerCreateInfo = {
650 VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO, // VkStructureType sType;
651 nullptr, // const void* pNext;
652 0u, // VkSamplerCreateFlags flags;
653 VK_FILTER_NEAREST, // VkFilter magFilter;
654 VK_FILTER_NEAREST, // VkFilter minFilter;
655 VK_SAMPLER_MIPMAP_MODE_NEAREST, // VkSamplerMipmapMode mipmapMode;
656 VK_SAMPLER_ADDRESS_MODE_REPEAT, // VkSamplerAddressMode addressModeU;
657 VK_SAMPLER_ADDRESS_MODE_REPEAT, // VkSamplerAddressMode addressModeV;
658 VK_SAMPLER_ADDRESS_MODE_REPEAT, // VkSamplerAddressMode addressModeW;
659 0.0f, // float mipLodBias;
660 VK_FALSE, // VkBool32 anisotropyEnable;
661 1.0f, // float maxAnisotropy;
662 VK_FALSE, // VkBool32 compareEnable;
663 VK_COMPARE_OP_NEVER, // VkCompareOp compareOp;
664 0.0f, // float minLod;
665 0.0f, // float maxLod;
666 VK_BORDER_COLOR_INT_TRANSPARENT_BLACK, // VkBorderColor borderColor;
667 VK_FALSE, // VkBool32 unnormalizedCoordinates;
668 };
669 const auto sampler = createSampler(vkd, device, &samplerCreateInfo);
670
671 // This will be used as a storage image to verify the sampling results.
672 // It has the same size as the full level extent, but only a single level and not sliced.
673 const auto auxiliarImage = make3DImage(vkd, device, alloc, kFormat, levelExtent, 1u, false /*sampling*/);
674 const auto auxiliarView = make3DImageView(vkd, device, auxiliarImage->get(), kFormat, tcu::Nothing, 0u, 1u);
675
676 DescriptorSetUpdateBuilder updateBuilder;
677 const auto sampledImageInfo = makeDescriptorImageInfo(sampler.get(), slicedView, kUsageLayout);
678 const auto storageImageInfo = makeDescriptorImageInfo(DE_NULL, auxiliarView.get(), kUsageLayout);
679 updateBuilder.writeSingle(descriptorSet.get(), DescriptorSetUpdateBuilder::Location::binding(0u),
680 VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, &sampledImageInfo);
681 updateBuilder.writeSingle(descriptorSet.get(), DescriptorSetUpdateBuilder::Location::binding(1u),
682 VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &storageImageInfo);
683 updateBuilder.update(vkd, device);
684
685 const auto tcuFormat = mapVkFormat(kFormat);
686 const auto verifBuffer = makeTransferBuffer(levelExtent, tcuFormat, vkd, device, alloc);
687 const auto refBuffer = makeTransferBuffer(levelExtent, tcuFormat, vkd, device, alloc);
688
689 beginCommandBuffer(vkd, cmdBuffer);
690
691 // Move auxiliar image to the proper layout.
692 const auto shaderAccess = (VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_SHADER_READ_BIT);
693 const auto colorSRR = makeCommonImageSubresourceRange(0u, 1u);
694 const auto preDispatchBarrier = makeImageMemoryBarrier(0u, shaderAccess, VK_IMAGE_LAYOUT_UNDEFINED,
695 VK_IMAGE_LAYOUT_GENERAL, auxiliarImage->get(), colorSRR);
696 cmdPipelineImageMemoryBarrier(vkd, cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, pipelineStage,
697 &preDispatchBarrier);
698
699 vkd.cmdBindPipeline(cmdBuffer, bindPoint, pipeline.get());
700 vkd.cmdBindDescriptorSets(cmdBuffer, bindPoint, pipelineLayout.get(), 0u, 1u, &descriptorSet.get(), 0u, nullptr);
701 vkd.cmdDispatch(cmdBuffer, 1u, 1u, 1u);
702
703 // Sync shader writes before copying to verification buffer.
704 const auto preCopyBarrier = makeMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT);
705 cmdPipelineMemoryBarrier(vkd, cmdBuffer, pipelineStage, VK_PIPELINE_STAGE_TRANSFER_BIT, &preCopyBarrier);
706
707 // Copy storage image to verification buffer.
708 const auto colorSRL = makeCommonImageSubresourceLayers(0u);
709 const auto copyRegion = makeBufferImageCopy(levelExtent, colorSRL);
710 vkd.cmdCopyImageToBuffer(cmdBuffer, auxiliarImage->get(), kUsageLayout, verifBuffer->get(), 1u, ©Region);
711
712 // Copy full level from the original full image to the reference buffer to compare them.
713 const auto refSRL = makeCommonImageSubresourceLayers(m_params.getSelectedLevel());
714 const auto refCopy = makeBufferImageCopy(levelExtent, refSRL);
715 vkd.cmdCopyImageToBuffer(cmdBuffer, fullImage, kUsageLayout, refBuffer->get(), 1u, &refCopy);
716
717 // Sync copies to host.
718 const auto postCopyBarrier = makeMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT);
719 cmdPipelineMemoryBarrier(vkd, cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT,
720 &postCopyBarrier);
721
722 endCommandBuffer(vkd, cmdBuffer);
723 submitCommandsAndWait(vkd, device, queue, cmdBuffer);
724
725 // Compare both buffers.
726 auto &verifBufferAlloc = verifBuffer->getAllocation();
727 auto &refBufferAlloc = refBuffer->getAllocation();
728 invalidateAlloc(vkd, device, verifBufferAlloc);
729 invalidateAlloc(vkd, device, refBufferAlloc);
730
731 const auto iExtent = makeIVec3(levelExtent.width, levelExtent.height, levelExtent.depth);
732 const tcu::ConstPixelBufferAccess verifAcces(tcuFormat, iExtent, verifBufferAlloc.getHostPtr());
733 const tcu::ConstPixelBufferAccess refAccess(tcuFormat, iExtent, refBufferAlloc.getHostPtr());
734
735 auto &log = m_context.getTestContext().getLog();
736 const tcu::UVec4 threshold(0u, 0u, 0u, 0u);
737 return tcu::intThresholdCompare(log, "SamplingResult", "", refAccess, verifAcces, threshold,
738 tcu::COMPARE_LOG_ON_ERROR);
739 }
740
iterate(void)741 tcu::TestStatus SlicedViewLoadTestInstance::iterate(void)
742 {
743 const auto &vkd = m_context.getDeviceInterface();
744 const auto device = m_context.getDevice();
745 auto &alloc = m_context.getDefaultAllocator();
746 const auto qfIndex = m_context.getUniversalQueueFamilyIndex();
747 const auto queue = m_context.getUniversalQueue();
748
749 const auto mipLevel = m_params.getSelectedLevel();
750 const auto fullExtent = makeExtent3D(m_params.width, m_params.height, m_params.depth);
751 const auto sliceExtent = m_params.getSliceExtent();
752 const auto tcuFormat = mapVkFormat(kFormat);
753 const auto auxiliarBuffer = makeAndFillTransferBuffer(sliceExtent, tcuFormat, vkd, device, alloc);
754 const auto verifBuffer = makeTransferBuffer(sliceExtent, tcuFormat, vkd, device, alloc);
755 const auto fullImage =
756 make3DImage(vkd, device, alloc, kFormat, fullExtent, m_params.getFullImageLevels(), m_params.sampleImg);
757 const auto fullSRR = makeCommonImageSubresourceRange(0u, VK_REMAINING_MIP_LEVELS);
758 const auto singleSRR = makeCommonImageSubresourceRange(0u, 1u);
759 const auto targetLevelSRL = makeCommonImageSubresourceLayers(mipLevel);
760 const auto baseLevelSRL = makeCommonImageSubresourceLayers(0u);
761 const auto clearColor = makeClearValueColorU32(0u, 0u, 0u, 0u);
762 const auto pipelineStage = makePipelineStage(m_params.stage);
763
764 const auto cmdPool = makeCommandPool(vkd, device, qfIndex);
765 const auto cmdBufferPtr = allocateCommandBuffer(vkd, device, cmdPool.get(), VK_COMMAND_BUFFER_LEVEL_PRIMARY);
766 const auto cmdBuffer = cmdBufferPtr.get();
767
768 beginCommandBuffer(vkd, cmdBuffer);
769
770 // Zero-out full image.
771 const auto preClearBarrier =
772 makeImageMemoryBarrier(0u, VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED,
773 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, fullImage->get(), fullSRR);
774 cmdPipelineImageMemoryBarrier(vkd, cmdBuffer, 0u, VK_PIPELINE_STAGE_TRANSFER_BIT, &preClearBarrier);
775 vkd.cmdClearColorImage(cmdBuffer, fullImage->get(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clearColor.color, 1u,
776 &fullSRR);
777
778 // Copy reference buffer to full image at the right offset.
779 const auto preCopyBarrier = makeImageMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_TRANSFER_WRITE_BIT,
780 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
781 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, fullImage->get(), fullSRR);
782 cmdPipelineImageMemoryBarrier(vkd, cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
783 &preCopyBarrier);
784
785 const VkBufferImageCopy sliceCopy = {
786 0ull, // VkDeviceSize bufferOffset;
787 0u, // uint32_t bufferRowLength;
788 0u, // uint32_t bufferImageHeight;
789 targetLevelSRL, // VkImageSubresourceLayers imageSubresource;
790 makeOffset3D(0, 0, static_cast<int32_t>(m_params.offset)), // VkOffset3D imageOffset;
791 sliceExtent, // VkExtent3D imageExtent;
792 };
793 vkd.cmdCopyBufferToImage(cmdBuffer, auxiliarBuffer->get(), fullImage->get(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
794 1u, &sliceCopy);
795
796 // Move full image to the general layout to be able to read from or write to it from the shader.
797 // Note: read-only optimal is not a valid layout for this.
798 const auto postCopyBarrier =
799 makeImageMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT,
800 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, kUsageLayout, fullImage->get(), fullSRR);
801 cmdPipelineImageMemoryBarrier(vkd, cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, pipelineStage, &postCopyBarrier);
802
803 // Create sliced view of the full image.
804 const auto slicedView =
805 make3DImageView(vkd, device, fullImage->get(), kFormat,
806 tcu::just(tcu::UVec2(m_params.offset, m_params.getSlicedViewRange())), mipLevel, 1u);
807
808 // Create storage image and view with reduced size (this will be the destination image in the shader).
809 const auto auxiliarImage = make3DImage(vkd, device, alloc, kFormat, sliceExtent, 1u, false /*sampling*/);
810 const auto auxiliarView = make3DImageView(vkd, device, auxiliarImage->get(), kFormat, tcu::Nothing, 0u, 1u);
811
812 // Move the auxiliar image to the general layout for writing.
813 const auto preWriteBarrier = makeImageMemoryBarrier(0u, VK_ACCESS_SHADER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED,
814 kUsageLayout, auxiliarImage->get(), singleSRR);
815 cmdPipelineImageMemoryBarrier(vkd, cmdBuffer, 0u, pipelineStage, &preWriteBarrier);
816
817 // Run load operation.
818 runPipeline(vkd, device, cmdBuffer, slicedView.get(), auxiliarView.get());
819
820 // Copy auxiliar image (result) to verification buffer.
821 const auto preVerifCopyBarrier = makeMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT);
822 cmdPipelineMemoryBarrier(vkd, cmdBuffer, pipelineStage, VK_PIPELINE_STAGE_TRANSFER_BIT, &preVerifCopyBarrier);
823 const auto verifCopyRegion = makeBufferImageCopy(sliceExtent, baseLevelSRL);
824 vkd.cmdCopyImageToBuffer(cmdBuffer, auxiliarImage->get(), kUsageLayout, verifBuffer->get(), 1u, &verifCopyRegion);
825
826 // Sync verification buffer with host reads.
827 const auto preHostBarrier = makeMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT);
828 cmdPipelineMemoryBarrier(vkd, cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT,
829 &preHostBarrier);
830
831 endCommandBuffer(vkd, cmdBuffer);
832 submitCommandsAndWait(vkd, device, queue, cmdBuffer);
833
834 const auto sliceExtentIV3 = makeIVec3(sliceExtent.width, sliceExtent.height, sliceExtent.depth);
835 auto &auxiliarBufferAlloc = auxiliarBuffer->getAllocation();
836 auto &verifBufferAlloc = verifBuffer->getAllocation();
837
838 // Invalidate verification buffer allocation.
839 invalidateAlloc(vkd, device, verifBufferAlloc);
840
841 // Compare auxiliar buffer and verification buffer.
842 const tcu::ConstPixelBufferAccess initialImage(tcuFormat, sliceExtentIV3, auxiliarBufferAlloc.getHostPtr());
843 const tcu::ConstPixelBufferAccess finalImage(tcuFormat, sliceExtentIV3, verifBufferAlloc.getHostPtr());
844
845 auto &log = m_context.getTestContext().getLog();
846 const tcu::UVec4 threshold(0u, 0u, 0u, 0u);
847
848 if (!tcu::intThresholdCompare(log, "Comparison", "Comparison of reference and result", initialImage, finalImage,
849 threshold, tcu::COMPARE_LOG_ON_ERROR))
850 return tcu::TestStatus::fail("Image comparison failed; check log for details");
851
852 if (m_params.sampleImg && !runSamplingPipeline(fullImage->get(), slicedView.get(), m_params.getFullLevelExtent()))
853 return tcu::TestStatus::fail("Sampling full level failed; check log for details");
854
855 return tcu::TestStatus::pass("Pass");
856 }
857
iterate(void)858 tcu::TestStatus SlicedViewStoreTestInstance::iterate(void)
859 {
860 const auto &vkd = m_context.getDeviceInterface();
861 const auto device = m_context.getDevice();
862 auto &alloc = m_context.getDefaultAllocator();
863 const auto qfIndex = m_context.getUniversalQueueFamilyIndex();
864 const auto queue = m_context.getUniversalQueue();
865
866 const auto mipLevel = m_params.getSelectedLevel();
867 const auto fullExtent = makeExtent3D(m_params.width, m_params.height, m_params.depth);
868 const auto sliceExtent = m_params.getSliceExtent();
869 const auto tcuFormat = mapVkFormat(kFormat);
870 const auto auxiliarBuffer = makeAndFillTransferBuffer(sliceExtent, tcuFormat, vkd, device, alloc);
871 const auto verifBuffer = makeTransferBuffer(sliceExtent, tcuFormat, vkd, device, alloc);
872 const auto fullImage =
873 make3DImage(vkd, device, alloc, kFormat, fullExtent, m_params.getFullImageLevels(), m_params.sampleImg);
874 const auto fullSRR = makeCommonImageSubresourceRange(0u, VK_REMAINING_MIP_LEVELS);
875 const auto singleSRR = makeCommonImageSubresourceRange(0u, 1u);
876 const auto targetLevelSRL = makeCommonImageSubresourceLayers(mipLevel);
877 const auto baseLevelSRL = makeCommonImageSubresourceLayers(0u);
878 const auto clearColor = makeClearValueColorU32(0u, 0u, 0u, 0u);
879 const auto pipelineStage = makePipelineStage(m_params.stage);
880
881 const auto cmdPool = makeCommandPool(vkd, device, qfIndex);
882 const auto cmdBufferPtr = allocateCommandBuffer(vkd, device, cmdPool.get(), VK_COMMAND_BUFFER_LEVEL_PRIMARY);
883 const auto cmdBuffer = cmdBufferPtr.get();
884
885 beginCommandBuffer(vkd, cmdBuffer);
886
887 // Zero-out full image.
888 const auto preClearBarrier =
889 makeImageMemoryBarrier(0u, VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED,
890 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, fullImage->get(), fullSRR);
891 cmdPipelineImageMemoryBarrier(vkd, cmdBuffer, 0u, VK_PIPELINE_STAGE_TRANSFER_BIT, &preClearBarrier);
892 vkd.cmdClearColorImage(cmdBuffer, fullImage->get(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clearColor.color, 1u,
893 &fullSRR);
894
895 // Create sliced view of the full image.
896 const auto slicedView =
897 make3DImageView(vkd, device, fullImage->get(), kFormat,
898 tcu::just(tcu::UVec2(m_params.offset, m_params.getSlicedViewRange())), mipLevel, 1u);
899
900 // Create storage image and view with reduced size (this will be the source image in the shader).
901 const auto auxiliarImage = make3DImage(vkd, device, alloc, kFormat, sliceExtent, 1u, false /*sampling*/);
902 const auto auxiliarView = make3DImageView(vkd, device, auxiliarImage->get(), kFormat, tcu::Nothing, 0u, 1u);
903
904 // Copy reference buffer into auxiliar image.
905 const auto preCopyBarrier =
906 makeImageMemoryBarrier(0u, VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED,
907 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, auxiliarImage->get(), singleSRR);
908 cmdPipelineImageMemoryBarrier(vkd, cmdBuffer, 0u, VK_PIPELINE_STAGE_TRANSFER_BIT, &preCopyBarrier);
909 const auto sliceCopy = makeBufferImageCopy(sliceExtent, baseLevelSRL);
910 vkd.cmdCopyBufferToImage(cmdBuffer, auxiliarBuffer->get(), auxiliarImage->get(),
911 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1u, &sliceCopy);
912
913 // Move both images to the general layout for reading and writing.
914 // Note: read-only optimal is not a valid layout for the read image.
915 const auto preShaderBarrierAux =
916 makeImageMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT,
917 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, kUsageLayout, auxiliarImage->get(), singleSRR);
918 cmdPipelineImageMemoryBarrier(vkd, cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, pipelineStage, &preShaderBarrierAux);
919 const auto preShaderBarrierFull =
920 makeImageMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_WRITE_BIT,
921 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, kUsageLayout, fullImage->get(), fullSRR);
922 cmdPipelineImageMemoryBarrier(vkd, cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, pipelineStage, &preShaderBarrierFull);
923
924 // Run store operation.
925 runPipeline(vkd, device, cmdBuffer, slicedView.get(), auxiliarView.get());
926
927 // Copy the right section of the full image (result) to verification buffer.
928 const auto preVerifCopyBarrier = makeMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT);
929 cmdPipelineMemoryBarrier(vkd, cmdBuffer, pipelineStage, VK_PIPELINE_STAGE_TRANSFER_BIT, &preVerifCopyBarrier);
930
931 const VkBufferImageCopy verifCopy = {
932 0ull, // VkDeviceSize bufferOffset;
933 0u, // uint32_t bufferRowLength;
934 0u, // uint32_t bufferImageHeight;
935 targetLevelSRL, // VkImageSubresourceLayers imageSubresource;
936 makeOffset3D(0, 0, static_cast<int32_t>(m_params.offset)), // VkOffset3D imageOffset;
937 sliceExtent, // VkExtent3D imageExtent;
938 };
939 vkd.cmdCopyImageToBuffer(cmdBuffer, fullImage->get(), kUsageLayout, verifBuffer->get(), 1u, &verifCopy);
940
941 // Sync verification buffer with host reads.
942 const auto preHostBarrier = makeMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT);
943 cmdPipelineMemoryBarrier(vkd, cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT,
944 &preHostBarrier);
945
946 endCommandBuffer(vkd, cmdBuffer);
947 submitCommandsAndWait(vkd, device, queue, cmdBuffer);
948
949 const auto sliceExtentIV3 = makeIVec3(sliceExtent.width, sliceExtent.height, sliceExtent.depth);
950 auto &auxiliarBufferAlloc = auxiliarBuffer->getAllocation();
951 auto &verifBufferAlloc = verifBuffer->getAllocation();
952
953 // Invalidate verification buffer allocation.
954 invalidateAlloc(vkd, device, verifBufferAlloc);
955
956 // Compare auxiliar buffer and verification buffer.
957 const tcu::ConstPixelBufferAccess initialImage(tcuFormat, sliceExtentIV3, auxiliarBufferAlloc.getHostPtr());
958 const tcu::ConstPixelBufferAccess finalImage(tcuFormat, sliceExtentIV3, verifBufferAlloc.getHostPtr());
959
960 auto &log = m_context.getTestContext().getLog();
961 const tcu::UVec4 threshold(0u, 0u, 0u, 0u);
962
963 if (!tcu::intThresholdCompare(log, "Comparison", "Comparison of reference and result", initialImage, finalImage,
964 threshold, tcu::COMPARE_LOG_ON_ERROR))
965 return tcu::TestStatus::fail("Image comparison failed; check log for details");
966
967 if (m_params.sampleImg && !runSamplingPipeline(fullImage->get(), slicedView.get(), m_params.getFullLevelExtent()))
968 return tcu::TestStatus::fail("Sampling full level failed; check log for details");
969
970 return tcu::TestStatus::pass("Pass");
971 }
972
973 using TestCaseGroupPtr = de::MovePtr<tcu::TestCaseGroup>;
974
975 } // namespace
976
createImageSlicedViewOf3DTests(tcu::TestContext & testCtx)977 tcu::TestCaseGroup *createImageSlicedViewOf3DTests(tcu::TestContext &testCtx)
978 {
979 TestCaseGroupPtr imageTests(new tcu::TestCaseGroup(testCtx, "sliced_view_of_3d_image"));
980
981 const struct
982 {
983 VkShaderStageFlagBits stage;
984 const char *name;
985 } stageCases[] = {
986 {VK_SHADER_STAGE_COMPUTE_BIT, "comp"},
987 {VK_SHADER_STAGE_FRAGMENT_BIT, "frag"},
988 };
989
990 const struct
991 {
992 TestType testType;
993 const char *name;
994 } testTypeCases[] = {
995 {TestType::LOAD, "load"},
996 {TestType::STORE, "store"},
997 };
998
999 const struct
1000 {
1001 bool sampleImg;
1002 const char *suffix;
1003 } samplingCases[] = {
1004 {false, ""},
1005 {true, "_with_sampling"},
1006 };
1007
1008 const uint32_t seed = 1667817299u;
1009 de::Random rnd(seed);
1010
1011 // Basic tests with 2 slices and a view of the first or second slice.
1012 {
1013 const uint32_t basicDepth = 2u;
1014 const uint32_t basicRange = 1u;
1015
1016 TestCaseGroupPtr basicTests(new tcu::TestCaseGroup(testCtx, "basic"));
1017
1018 for (const auto &testTypeCase : testTypeCases)
1019 {
1020 TestCaseGroupPtr testTypeGroup(new tcu::TestCaseGroup(testCtx, testTypeCase.name));
1021
1022 for (const auto &stageCase : stageCases)
1023 {
1024 TestCaseGroupPtr stageGroup(new tcu::TestCaseGroup(testCtx, stageCase.name));
1025
1026 for (uint32_t offset = 0u; offset < basicDepth; ++offset)
1027 {
1028 for (const auto &samplingCase : samplingCases)
1029 {
1030 const auto testName = "offset_" + std::to_string(offset) + samplingCase.suffix;
1031 TestParams params(testTypeCase.testType, stageCase.stage, kWidth, kHeight, basicDepth, offset,
1032 basicRange, tcu::Nothing, samplingCase.sampleImg);
1033
1034 stageGroup->addChild(new SlicedViewTestCase(testCtx, testName, params));
1035 }
1036 }
1037
1038 testTypeGroup->addChild(stageGroup.release());
1039 }
1040
1041 basicTests->addChild(testTypeGroup.release());
1042 }
1043
1044 imageTests->addChild(basicTests.release());
1045 }
1046
1047 // Full slice tests.
1048 {
1049 const uint32_t fullDepth = 4u;
1050
1051 TestCaseGroupPtr fullSliceTests(new tcu::TestCaseGroup(testCtx, "full_slice"));
1052
1053 for (const auto &testTypeCase : testTypeCases)
1054 {
1055 TestCaseGroupPtr testTypeGroup(new tcu::TestCaseGroup(testCtx, testTypeCase.name));
1056
1057 for (const auto &stageCase : stageCases)
1058 {
1059 for (const auto &samplingCase : samplingCases)
1060 {
1061 const auto testName = std::string(stageCase.name) + samplingCase.suffix;
1062 TestParams params(testTypeCase.testType, stageCase.stage, kWidth, kHeight, fullDepth, 0u, fullDepth,
1063 tcu::Nothing, samplingCase.sampleImg);
1064 testTypeGroup->addChild(new SlicedViewTestCase(testCtx, testName, params));
1065 }
1066 }
1067
1068 fullSliceTests->addChild(testTypeGroup.release());
1069 }
1070
1071 imageTests->addChild(fullSliceTests.release());
1072 }
1073
1074 // Pseudorandom test cases.
1075 {
1076 using CaseId = std::tuple<uint32_t, uint32_t, uint32_t>; // depth, offset, range
1077 using CaseIdSet = std::set<CaseId>;
1078
1079 const uint32_t depthCases = 5u;
1080 const uint32_t rangeCases = 5u;
1081 const int minDepth = 10u;
1082 const int maxDepth = 32u;
1083
1084 TestCaseGroupPtr randomTests(new tcu::TestCaseGroup(testCtx, "random"));
1085
1086 for (const auto &testTypeCase : testTypeCases)
1087 {
1088 TestCaseGroupPtr testTypeGroup(new tcu::TestCaseGroup(testCtx, testTypeCase.name));
1089
1090 for (const auto &stageCase : stageCases)
1091 {
1092 TestCaseGroupPtr stageGroup(new tcu::TestCaseGroup(testCtx, stageCase.name));
1093
1094 CaseIdSet generatedCases;
1095
1096 for (uint32_t i = 0u; i < depthCases; ++i)
1097 {
1098 const uint32_t depth = static_cast<uint32_t>(rnd.getInt(minDepth, maxDepth));
1099
1100 for (uint32_t j = 0u; j < rangeCases; ++j)
1101 {
1102 uint32_t offset = 0u;
1103 uint32_t range = 0u;
1104
1105 for (;;)
1106 {
1107 DE_ASSERT(depth > 0u);
1108 offset = static_cast<uint32_t>(rnd.getInt(0, static_cast<int>(depth - 1u)));
1109
1110 DE_ASSERT(offset < depth);
1111 range = static_cast<uint32_t>(rnd.getInt(0, static_cast<int>(depth - offset)));
1112
1113 // 0 is interpreted as VK_REMAINING_3D_SLICES_EXT.
1114 if (range == 0u)
1115 range = VK_REMAINING_3D_SLICES_EXT;
1116
1117 // The current seed may generate duplicate cases with non-unique names, so we filter those out.
1118 const CaseId currentCase(depth, offset, range);
1119 if (de::contains(begin(generatedCases), end(generatedCases), currentCase))
1120 continue;
1121
1122 generatedCases.insert(currentCase);
1123 break;
1124 }
1125
1126 const auto rangeStr =
1127 ((range == VK_REMAINING_3D_SLICES_EXT) ? "remaining_3d_slices" : std::to_string(range));
1128 const auto testName = "depth_" + std::to_string(depth) + "_offset_" + std::to_string(offset) +
1129 "_range_" + rangeStr;
1130 TestParams params(testTypeCase.testType, stageCase.stage, kWidth, kHeight, depth, offset, range,
1131 tcu::Nothing, false);
1132
1133 stageGroup->addChild(new SlicedViewTestCase(testCtx, testName, params));
1134 }
1135 }
1136
1137 testTypeGroup->addChild(stageGroup.release());
1138 }
1139
1140 randomTests->addChild(testTypeGroup.release());
1141 }
1142
1143 imageTests->addChild(randomTests.release());
1144 }
1145
1146 // Mip level test cases.
1147 {
1148 using CaseId = std::tuple<uint32_t, uint32_t>; // depth, offset, range
1149 using CaseIdSet = std::set<CaseId>;
1150
1151 const uint32_t casesPerLevel = 2u;
1152 const uint32_t width = kWidth;
1153 const uint32_t height = kWidth;
1154 const uint32_t depth = kWidth;
1155 const uint32_t maxLevels = TestParams::getMaxMipLevelCountForSize(kWidth);
1156
1157 TestCaseGroupPtr mipLevelTests(new tcu::TestCaseGroup(testCtx, "mip_level"));
1158
1159 for (const auto &testTypeCase : testTypeCases)
1160 {
1161 TestCaseGroupPtr testTypeGroup(new tcu::TestCaseGroup(testCtx, testTypeCase.name));
1162
1163 for (const auto &stageCase : stageCases)
1164 {
1165 TestCaseGroupPtr stageGroup(new tcu::TestCaseGroup(testCtx, stageCase.name));
1166
1167 for (uint32_t level = 0u; level < maxLevels; ++level)
1168 {
1169 const auto levelSize = (depth >> level);
1170 const auto groupName = "level_" + std::to_string(level);
1171 CaseIdSet generatedCases;
1172
1173 DE_ASSERT(levelSize > 0u);
1174
1175 TestCaseGroupPtr levelGroup(new tcu::TestCaseGroup(testCtx, groupName.c_str()));
1176
1177 // Generate a few pseudorandom cases per mip level.
1178 for (uint32_t i = 0u; i < casesPerLevel; ++i)
1179 {
1180 uint32_t offset = 0u;
1181 uint32_t range = 0u;
1182
1183 for (;;)
1184 {
1185 offset = static_cast<uint32_t>(rnd.getInt(0, static_cast<int>(levelSize - 1u)));
1186 DE_ASSERT(offset < levelSize);
1187
1188 range = static_cast<uint32_t>(rnd.getInt(0, static_cast<int>(levelSize - offset)));
1189
1190 // 0 is interpreted as VK_REMAINING_3D_SLICES_EXT.
1191 if (range == 0u)
1192 range = VK_REMAINING_3D_SLICES_EXT;
1193
1194 const CaseId currentCase(offset, range);
1195 if (de::contains(begin(generatedCases), end(generatedCases), currentCase))
1196 continue;
1197
1198 generatedCases.insert(currentCase);
1199 break;
1200 }
1201
1202 const auto rangeStr =
1203 ((range == VK_REMAINING_3D_SLICES_EXT) ? "remaining_3d_slices" : std::to_string(range));
1204 const auto testName = "offset_" + std::to_string(offset) + "_range_" + rangeStr;
1205 TestParams params(testTypeCase.testType, stageCase.stage, width, height, depth, offset, range,
1206 tcu::just(level), false);
1207
1208 levelGroup->addChild(new SlicedViewTestCase(testCtx, testName, params));
1209 }
1210
1211 stageGroup->addChild(levelGroup.release());
1212 }
1213
1214 testTypeGroup->addChild(stageGroup.release());
1215 }
1216
1217 mipLevelTests->addChild(testTypeGroup.release());
1218 }
1219
1220 imageTests->addChild(mipLevelTests.release());
1221 }
1222
1223 return imageTests.release();
1224 }
1225
1226 } // namespace pipeline
1227 } // namespace vkt
1228