1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2022 The Khronos Group Inc.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 *//*!
20 * \file vktGlobalPriorityQueueTests.cpp
21 * \brief Global Priority Queue Tests
22 *//*--------------------------------------------------------------------*/
23
24 #include "vktGlobalPriorityQueueTests.hpp"
25 #include "vktGlobalPriorityQueueUtils.hpp"
26
27 #include "vkBarrierUtil.hpp"
28 #include "vkQueryUtil.hpp"
29 #include "vkBuilderUtil.hpp"
30 #include "vkCmdUtil.hpp"
31 #include "vkImageUtil.hpp"
32 #include "../image/vktImageTestsUtil.hpp"
33 #include "vkTypeUtil.hpp"
34 #include "vkObjUtil.hpp"
35 #include "vkStrUtil.hpp"
36 #include "vkRefUtil.hpp"
37
38 #include "vktTestGroupUtil.hpp"
39 #include "vktTestCase.hpp"
40
41 #include "deDefs.h"
42 #include "deMath.h"
43 #include "deRandom.h"
44 #include "deRandom.hpp"
45 #include "deSharedPtr.hpp"
46 #include "deString.h"
47 #include "deMemory.h"
48
49 #include "tcuStringTemplate.hpp"
50
51 #include <string>
52 #include <sstream>
53 #include <map>
54 #include <iostream>
55
56 using namespace vk;
57
58 namespace vkt
59 {
60 namespace synchronization
61 {
62 namespace
63 {
64
65 enum class SyncType
66 {
67 None,
68 Semaphore
69 };
70
71 struct TestConfig
72 {
73 VkQueueFlagBits transitionFrom;
74 VkQueueFlagBits transitionTo;
75 VkQueueGlobalPriorityKHR priorityFrom;
76 VkQueueGlobalPriorityKHR priorityTo;
77 bool enableProtected;
78 bool enableSparseBinding;
79 SyncType syncType;
80 uint32_t width;
81 uint32_t height;
82 VkFormat format;
83 bool selectFormat(const InstanceInterface &vk, VkPhysicalDevice dev, std::initializer_list<VkFormat> formats);
84 };
85
selectFormat(const InstanceInterface & vk,VkPhysicalDevice dev,std::initializer_list<VkFormat> formats)86 bool TestConfig::selectFormat(const InstanceInterface &vk, VkPhysicalDevice dev,
87 std::initializer_list<VkFormat> formats)
88 {
89 auto doesFormatMatch = [](const VkFormat fmt) -> bool
90 {
91 const auto tcuFmt = mapVkFormat(fmt);
92 return tcuFmt.order == tcu::TextureFormat::ChannelOrder::R;
93 };
94 VkFormatProperties2 props{};
95 const VkFormatFeatureFlags flags = VK_FORMAT_FEATURE_TRANSFER_SRC_BIT | VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT;
96 for (auto i = formats.begin(); i != formats.end(); ++i)
97 {
98 props.sType = VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2;
99 props.pNext = nullptr;
100 props.formatProperties = {};
101 const VkFormat fmt = *i;
102 vk.getPhysicalDeviceFormatProperties2(dev, fmt, &props);
103 if (doesFormatMatch(fmt) && ((props.formatProperties.optimalTilingFeatures & flags) == flags))
104 {
105 this->format = fmt;
106 return true;
107 }
108 }
109 return false;
110 }
111
112 template <class T, class P = T (*)[1]>
begin(void * p)113 auto begin(void *p) -> decltype(std::begin(*std::declval<P>()))
114 {
115 return std::begin(*static_cast<P>(p));
116 }
117
118 class GPQInstanceBase : public TestInstance
119 {
120 public:
121 typedef std::initializer_list<VkDescriptorSetLayout> DSLayouts;
122 typedef tcu::ConstPixelBufferAccess BufferAccess;
123
124 GPQInstanceBase(Context &ctx, const TestConfig &cfg);
125 template <class PushConstant = void>
126 auto createPipelineLayout(DSLayouts setLayouts) const -> Move<VkPipelineLayout>;
127 auto makeCommandPool(uint32_t qFamilyIndex) const -> Move<VkCommandPool>;
128 auto createGraphicsPipeline(VkPipelineLayout pipelineLayout, VkRenderPass renderPass) -> Move<VkPipeline>;
129 auto createComputePipeline(VkPipelineLayout pipelineLayout, bool producer) -> Move<VkPipeline>;
130 auto createImage(VkImageUsageFlags usage, uint32_t queueFamilyIdx, VkQueue queue) const
131 -> de::MovePtr<ImageWithMemory>;
132 auto createView(VkImage image, VkImageSubresourceRange &range) const -> Move<VkImageView>;
133 void submitCommands(VkCommandBuffer producerCmd, VkCommandBuffer consumerCmd) const;
134
135 protected:
136 auto createPipelineLayout(const VkPushConstantRange *pRange, DSLayouts setLayouts) const -> Move<VkPipelineLayout>;
137 const TestConfig m_config;
138 const SpecialDevice m_device;
139 struct NamedShader
140 {
141 std::string name;
142 Move<VkShaderModule> handle;
143 } m_shaders[4];
144 };
GPQInstanceBase(Context & ctx,const TestConfig & cfg)145 GPQInstanceBase::GPQInstanceBase(Context &ctx, const TestConfig &cfg)
146 : TestInstance(ctx)
147 , m_config(cfg)
148 , m_device(ctx, cfg.transitionFrom, cfg.transitionTo, cfg.priorityFrom, cfg.priorityTo, cfg.enableProtected,
149 cfg.enableSparseBinding)
150 , m_shaders()
151 {
152 m_shaders[0].name = "vert"; // vertex
153 m_shaders[1].name = "frag"; // fragment
154 m_shaders[2].name = "cpyb"; // compute
155 m_shaders[3].name = "cpyi"; // compute
156 }
157
createImage(VkImageUsageFlags usage,uint32_t queueFamilyIdx,VkQueue queue) const158 de::MovePtr<ImageWithMemory> GPQInstanceBase::createImage(VkImageUsageFlags usage, uint32_t queueFamilyIdx,
159 VkQueue queue) const
160 {
161 const InstanceInterface &vki = m_context.getInstanceInterface();
162 const DeviceInterface &vkd = m_context.getDeviceInterface();
163 const VkPhysicalDevice phys = m_context.getPhysicalDevice();
164 const VkDevice dev = m_device.handle;
165 Allocator &alloc = m_device.getAllocator();
166 VkImageCreateFlags flags = 0;
167
168 if (m_config.enableProtected)
169 flags |= VK_IMAGE_CREATE_PROTECTED_BIT;
170 if (m_config.enableSparseBinding)
171 flags |= (VK_IMAGE_CREATE_SPARSE_BINDING_BIT | VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT);
172 const MemoryRequirement memReqs = m_config.enableProtected ? MemoryRequirement::Protected : MemoryRequirement::Any;
173
174 VkImageCreateInfo imageInfo{};
175 imageInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
176 imageInfo.pNext = nullptr;
177 imageInfo.flags = flags;
178 imageInfo.imageType = VK_IMAGE_TYPE_2D;
179 imageInfo.format = m_config.format;
180 imageInfo.extent.width = m_config.width;
181 imageInfo.extent.height = m_config.height;
182 imageInfo.extent.depth = 1;
183 imageInfo.mipLevels = 1;
184 imageInfo.arrayLayers = 1;
185 imageInfo.samples = VK_SAMPLE_COUNT_1_BIT;
186 imageInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
187 imageInfo.usage = usage;
188 imageInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
189 imageInfo.queueFamilyIndexCount = 1;
190 imageInfo.pQueueFamilyIndices = &queueFamilyIdx;
191 imageInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
192
193 return de::MovePtr<ImageWithMemory>(new ImageWithMemory(vki, vkd, phys, dev, alloc, imageInfo, queue, memReqs));
194 }
195
createView(VkImage image,VkImageSubresourceRange & range) const196 Move<VkImageView> GPQInstanceBase::createView(VkImage image, VkImageSubresourceRange &range) const
197 {
198 const DeviceInterface &vkd = m_context.getDeviceInterface();
199 const VkDevice dev = m_device.handle;
200
201 range = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1);
202 return makeImageView(vkd, dev, image, VK_IMAGE_VIEW_TYPE_2D, m_config.format, range);
203 }
204
createPipelineLayout(const VkPushConstantRange * pRange,DSLayouts setLayouts) const205 Move<VkPipelineLayout> GPQInstanceBase::createPipelineLayout(const VkPushConstantRange *pRange,
206 DSLayouts setLayouts) const
207 {
208 std::vector<VkDescriptorSetLayout> layouts(setLayouts.size());
209 auto ii = setLayouts.begin();
210 for (auto i = ii; i != setLayouts.end(); ++i)
211 layouts[std::distance(ii, i)] = *i;
212
213 VkPipelineLayoutCreateInfo info{};
214 info.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
215 info.pNext = nullptr;
216 info.flags = VkPipelineLayoutCreateFlags(0);
217 info.setLayoutCount = static_cast<uint32_t>(layouts.size());
218 info.pSetLayouts = layouts.size() ? layouts.data() : nullptr;
219 info.pushConstantRangeCount = (pRange != nullptr && pRange->size > 0) ? 1 : 0;
220 info.pPushConstantRanges = (pRange != nullptr && pRange->size > 0) ? pRange : nullptr;
221
222 return ::vk::createPipelineLayout(m_context.getDeviceInterface(), m_device.handle, &info);
223 }
224
225 template <>
createPipelineLayout(DSLayouts setLayouts) const226 Move<VkPipelineLayout> DE_UNUSED_FUNCTION GPQInstanceBase::createPipelineLayout<void>(DSLayouts setLayouts) const
227 {
228 return createPipelineLayout(nullptr, setLayouts);
229 }
230
231 template <class PushConstant>
createPipelineLayout(DSLayouts setLayouts) const232 Move<VkPipelineLayout> GPQInstanceBase::createPipelineLayout(DSLayouts setLayouts) const
233 {
234 VkPushConstantRange range{};
235 range.stageFlags = VK_SHADER_STAGE_ALL;
236 range.offset = 0;
237 range.size = static_cast<uint32_t>(sizeof(PushConstant));
238 return createPipelineLayout(&range, setLayouts);
239 }
240
makeCommandPool(uint32_t qFamilyIndex) const241 Move<VkCommandPool> GPQInstanceBase::makeCommandPool(uint32_t qFamilyIndex) const
242 {
243 const VkCommandPoolCreateFlags flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT |
244 (m_config.enableProtected ? VK_COMMAND_POOL_CREATE_PROTECTED_BIT : 0);
245 const VkCommandPoolCreateInfo commandPoolParams = {
246 VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // VkStructureType sType;
247 DE_NULL, // const void* pNext;
248 flags, // VkCommandPoolCreateFlags flags;
249 qFamilyIndex, // uint32_t queueFamilyIndex;
250 };
251
252 return createCommandPool(m_context.getDeviceInterface(), m_device.handle, &commandPoolParams);
253 }
254
createGraphicsPipeline(VkPipelineLayout pipelineLayout,VkRenderPass renderPass)255 Move<VkPipeline> GPQInstanceBase::createGraphicsPipeline(VkPipelineLayout pipelineLayout, VkRenderPass renderPass)
256 {
257 const DeviceInterface &vkd = m_context.getDeviceInterface();
258 const VkDevice dev = m_device.handle;
259
260 auto sh = std::find_if(std::begin(m_shaders), std::end(m_shaders),
261 [](const NamedShader &ns) { return ns.name == "vert"; });
262 if (*sh->handle == DE_NULL)
263 sh->handle = createShaderModule(vkd, dev, m_context.getBinaryCollection().get("vert"));
264 VkShaderModule vertex = *sh->handle;
265
266 sh = std::find_if(std::begin(m_shaders), std::end(m_shaders),
267 [](const NamedShader &ns) { return ns.name == "frag"; });
268 if (*sh->handle == DE_NULL)
269 sh->handle = createShaderModule(vkd, dev, m_context.getBinaryCollection().get("frag"));
270 VkShaderModule fragment = *sh->handle;
271
272 const std::vector<VkViewport> viewports{makeViewport(m_config.width, m_config.height)};
273 const std::vector<VkRect2D> scissors{makeRect2D(m_config.width, m_config.height)};
274 const auto vertexBinding =
275 makeVertexInputBindingDescription(0u, static_cast<uint32_t>(2 * sizeof(float)), VK_VERTEX_INPUT_RATE_VERTEX);
276 const auto vertexAttrib = makeVertexInputAttributeDescription(0u, 0u, VK_FORMAT_R32G32_SFLOAT, 0u);
277 const VkPipelineVertexInputStateCreateInfo vertexInputStateCreateInfo{
278 vk::VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // VkStructureType sType;
279 nullptr, // const void* pNext;
280 0u, // VkPipelineVertexInputStateCreateFlags flags;
281 1u, // uint32_t vertexBindingDescriptionCount;
282 &vertexBinding, // const VkVertexInputBindingDescription* pVertexBindingDescriptions;
283 1u, // uint32_t vertexAttributeDescriptionCount;
284 &vertexAttrib // const VkVertexInputAttributeDescription* pVertexAttributeDescriptions;
285 };
286
287 return makeGraphicsPipeline(vkd, dev, pipelineLayout, vertex, VkShaderModule(0), VkShaderModule(0),
288 VkShaderModule(0), fragment, renderPass, viewports, scissors,
289 VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 0, 0, &vertexInputStateCreateInfo);
290 }
291
createComputePipeline(VkPipelineLayout pipelineLayout,bool producer)292 Move<VkPipeline> GPQInstanceBase::createComputePipeline(VkPipelineLayout pipelineLayout, bool producer)
293 {
294 const DeviceInterface &vk = m_context.getDeviceInterface();
295 const VkDevice dev = m_device.handle;
296
297 const std::string compName = producer ? "cpyb" : "cpyi";
298 auto comp = std::find_if(std::begin(m_shaders), std::end(m_shaders),
299 [&](const NamedShader &ns) { return ns.name == compName; });
300 if (*comp->handle == DE_NULL)
301 comp->handle = createShaderModule(vk, dev, m_context.getBinaryCollection().get(compName));
302 VkShaderModule compute = *comp->handle;
303
304 VkPipelineShaderStageCreateInfo sci{};
305 sci.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
306 sci.pNext = nullptr;
307 sci.flags = VkPipelineShaderStageCreateFlags(0);
308 sci.stage = VK_SHADER_STAGE_COMPUTE_BIT;
309 sci.module = compute;
310 sci.pName = "main";
311 sci.pSpecializationInfo = nullptr;
312
313 VkComputePipelineCreateInfo ci{};
314 ci.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO;
315 ci.pNext = nullptr;
316 ci.flags = VkPipelineCreateFlags(0);
317 ci.stage = sci;
318 ci.layout = pipelineLayout;
319 ci.basePipelineHandle = VkPipeline(0);
320 ci.basePipelineIndex = 0;
321
322 return vk::createComputePipeline(vk, dev, VkPipelineCache(0), &ci, nullptr);
323 }
324
325 VkPipelineStageFlags queueFlagBitToPipelineStage(VkQueueFlagBits bit);
submitCommands(VkCommandBuffer producerCmd,VkCommandBuffer consumerCmd) const326 void GPQInstanceBase::submitCommands(VkCommandBuffer producerCmd, VkCommandBuffer consumerCmd) const
327 {
328 const DeviceInterface &vkd = m_context.getDeviceInterface();
329 const VkDevice dev = m_device.handle;
330
331 Move<VkSemaphore> sem = createSemaphore(vkd, dev);
332 Move<VkFence> fence = createFence(vkd, dev);
333
334 VkProtectedSubmitInfo protectedSubmitInfo{
335 VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO, // VkStructureType sType;
336 nullptr, // void* pNext;
337 VK_TRUE // VkBool32 protectedSubmit;
338 };
339
340 const VkSubmitInfo producerSubmitInfo{
341 VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType;
342 m_config.enableProtected ? &protectedSubmitInfo : nullptr, // const void* pNext;
343 0, // uint32_t waitSemaphoreCount;
344 nullptr, // const VkSemaphore* pWaitSemaphores;
345 nullptr, // const VkPipelineStageFlags* pWaitDstStageMask;
346 1u, // uint32_t commandBufferCount;
347 &producerCmd, // const VkCommandBuffer* pCommandBuffers;
348 1u, // uint32_t signalSemaphoreCount;
349 &sem.get(), // const VkSemaphore* pSignalSemaphores;
350 };
351
352 const VkPipelineStageFlags dstWaitStages =
353 VK_PIPELINE_STAGE_TRANSFER_BIT | queueFlagBitToPipelineStage(m_config.transitionTo);
354 const VkSubmitInfo consumerSubmitInfo{
355 VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType;
356 m_config.enableProtected ? &protectedSubmitInfo : nullptr, // const void* pNext;
357 1u, // uint32_t waitSemaphoreCount;
358 &sem.get(), // const VkSemaphore* pWaitSemaphores;
359 &dstWaitStages, // const VkPipelineStageFlags* pWaitDstStageMask;
360 1u, // uint32_t commandBufferCount;
361 &consumerCmd, // const VkCommandBuffer* pCommandBuffers;
362 0, // uint32_t signalSemaphoreCount;
363 nullptr, // const VkSemaphore* pSignalSemaphores;
364 };
365
366 switch (m_config.syncType)
367 {
368 case SyncType::None:
369 submitCommandsAndWait(vkd, dev, m_device.queueFrom, producerCmd);
370 submitCommandsAndWait(vkd, dev, m_device.queueTo, consumerCmd);
371 break;
372 case SyncType::Semaphore:
373 VK_CHECK(vkd.queueSubmit(m_device.queueFrom, 1u, &producerSubmitInfo, VkFence(0)));
374 VK_CHECK(vkd.queueSubmit(m_device.queueTo, 1u, &consumerSubmitInfo, *fence));
375 VK_CHECK(vkd.waitForFences(dev, 1u, &fence.get(), true, ~0ull));
376 break;
377 }
378 }
379
380 template <VkQueueFlagBits, VkQueueFlagBits>
381 class GPQInstance;
382 #define DECLARE_INSTANCE(flagsFrom_, flagsTo_) \
383 template <> \
384 class GPQInstance<flagsFrom_, flagsTo_> : public GPQInstanceBase \
385 { \
386 public: \
387 GPQInstance(Context &ctx, const TestConfig &cfg) : GPQInstanceBase(ctx, cfg) \
388 { \
389 } \
390 virtual tcu::TestStatus iterate(void) override; \
391 }
392
393 DECLARE_INSTANCE(VK_QUEUE_GRAPHICS_BIT, VK_QUEUE_COMPUTE_BIT);
394 DECLARE_INSTANCE(VK_QUEUE_COMPUTE_BIT, VK_QUEUE_GRAPHICS_BIT);
395
396 class GPQCase;
397 typedef TestInstance *(GPQCase::*CreateInstanceProc)(Context &) const;
398 typedef std::pair<VkQueueFlagBits, VkQueueFlagBits> CreateInstanceKey;
399 typedef std::map<CreateInstanceKey, CreateInstanceProc> CreateInstanceMap;
400 #define MAPENTRY(from_, to_) m_createInstanceMap[{from_, to_}] = &GPQCase::createInstance<from_, to_>
401
402 class GPQCase : public TestCase
403 {
404 public:
405 GPQCase(tcu::TestContext &ctx, const std::string &name, const TestConfig &cfg);
406 void initPrograms(SourceCollections &programs) const override;
407 TestInstance *createInstance(Context &context) const override;
408 void checkSupport(Context &context) const override;
409 static uint32_t testValue;
410
411 private:
412 template <VkQueueFlagBits, VkQueueFlagBits>
413 TestInstance *createInstance(Context &context) const;
414 mutable TestConfig m_config;
415 CreateInstanceMap m_createInstanceMap;
416 };
417 uint32_t GPQCase::testValue = 113;
418
GPQCase(tcu::TestContext & ctx,const std::string & name,const TestConfig & cfg)419 GPQCase::GPQCase(tcu::TestContext &ctx, const std::string &name, const TestConfig &cfg)
420 : TestCase(ctx, name)
421 , m_config(cfg)
422 , m_createInstanceMap()
423 {
424 MAPENTRY(VK_QUEUE_GRAPHICS_BIT, VK_QUEUE_COMPUTE_BIT);
425 MAPENTRY(VK_QUEUE_COMPUTE_BIT, VK_QUEUE_GRAPHICS_BIT);
426 }
427
queueFlagBitToPipelineStage(VkQueueFlagBits bit)428 VkPipelineStageFlags queueFlagBitToPipelineStage(VkQueueFlagBits bit)
429 {
430 switch (bit)
431 {
432 case VK_QUEUE_COMPUTE_BIT:
433 return VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
434 case VK_QUEUE_GRAPHICS_BIT:
435 return VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
436 default:
437 DE_ASSERT(VK_FALSE);
438 }
439 return VK_QUEUE_FLAG_BITS_MAX_ENUM;
440 }
441
442 template <VkQueueFlagBits flagsFrom, VkQueueFlagBits flagsTo>
createInstance(Context & context) const443 TestInstance *GPQCase::createInstance(Context &context) const
444 {
445 return new GPQInstance<flagsFrom, flagsTo>(context, m_config);
446 }
447
createInstance(Context & context) const448 TestInstance *GPQCase::createInstance(Context &context) const
449 {
450 const CreateInstanceKey key(m_config.transitionFrom, m_config.transitionTo);
451 return (this->*(m_createInstanceMap.at(key)))(context);
452 }
453
operator <<(std::ostream & str,const VkQueueFlagBits & bit)454 std::ostream &operator<<(std::ostream &str, const VkQueueFlagBits &bit)
455 {
456 const char *s = nullptr;
457 const auto d = std::to_string(bit);
458 switch (bit)
459 {
460 case VK_QUEUE_GRAPHICS_BIT:
461 s = "VK_QUEUE_GRAPHICS_BIT";
462 break;
463 case VK_QUEUE_COMPUTE_BIT:
464 s = "VK_QUEUE_COMPUTE_BIT";
465 break;
466 case VK_QUEUE_TRANSFER_BIT:
467 s = "VK_QUEUE_TRANSFER_BIT";
468 break;
469 case VK_QUEUE_SPARSE_BINDING_BIT:
470 s = "VK_QUEUE_SPARSE_BINDING_BIT";
471 break;
472 case VK_QUEUE_PROTECTED_BIT:
473 s = "VK_QUEUE_PROTECTED_BIT";
474 break;
475 default:
476 s = d.c_str();
477 break;
478 }
479 return (str << s);
480 }
481
checkSupport(Context & context) const482 void GPQCase::checkSupport(Context &context) const
483 {
484 const InstanceInterface &vki = context.getInstanceInterface();
485 const VkPhysicalDevice dev = context.getPhysicalDevice();
486
487 context.requireInstanceFunctionality("VK_KHR_get_physical_device_properties2");
488 context.requireDeviceFunctionality("VK_EXT_global_priority_query");
489 context.requireDeviceFunctionality("VK_EXT_global_priority");
490
491 if (!m_config.selectFormat(vki, dev,
492 {VK_FORMAT_R32_SINT, VK_FORMAT_R32_UINT, VK_FORMAT_R8_SINT, VK_FORMAT_R8_UINT}))
493 {
494 TCU_THROW(NotSupportedError, "Unable to find a proper format");
495 }
496
497 VkPhysicalDeviceProtectedMemoryFeatures memFeatures{VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES,
498 nullptr, VK_FALSE};
499 VkPhysicalDeviceFeatures2 devFeatures{VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2, &memFeatures, {}};
500 vki.getPhysicalDeviceFeatures2(dev, &devFeatures);
501
502 if (m_config.enableProtected && (VK_FALSE == memFeatures.protectedMemory))
503 {
504 TCU_THROW(NotSupportedError, "Queue families with VK_QUEUE_PROTECTED_BIT not supported");
505 }
506
507 const VkBool32 sparseEnabled = devFeatures.features.sparseBinding & devFeatures.features.sparseResidencyBuffer &
508 devFeatures.features.sparseResidencyImage2D;
509 if (m_config.enableSparseBinding && (VK_FALSE == sparseEnabled))
510 {
511 TCU_THROW(NotSupportedError, "Queue families with VK_QUEUE_SPARSE_BINDING_BIT not supported");
512 }
513
514 auto assertUnavailableQueue = [](const uint32_t qIdx, VkQueueFlagBits qfb, VkQueueGlobalPriorityKHR qgp)
515 {
516 if (qIdx == INVALID_UINT32)
517 {
518 std::ostringstream buf;
519 buf << "Unable to find queue " << qfb << " with priority " << qgp;
520 buf.flush();
521 TCU_THROW(NotSupportedError, buf.str());
522 }
523 };
524
525 VkQueueFlags flagsFrom = m_config.transitionFrom;
526 VkQueueFlags flagsTo = m_config.transitionTo;
527 if (m_config.enableProtected)
528 {
529 flagsFrom |= VK_QUEUE_PROTECTED_BIT;
530 flagsTo |= VK_QUEUE_PROTECTED_BIT;
531 }
532 if (m_config.enableSparseBinding)
533 {
534 flagsFrom |= VK_QUEUE_SPARSE_BINDING_BIT;
535 flagsTo |= VK_QUEUE_SPARSE_BINDING_BIT;
536 }
537
538 const uint32_t queueFromIndex = findQueueFamilyIndex(vki, dev, m_config.priorityFrom, flagsFrom,
539 SpecialDevice::getColissionFlags(flagsFrom), INVALID_UINT32);
540 assertUnavailableQueue(queueFromIndex, m_config.transitionFrom, m_config.priorityFrom);
541
542 const uint32_t queueToIndex = findQueueFamilyIndex(vki, dev, m_config.priorityTo, flagsTo,
543 SpecialDevice::getColissionFlags(flagsTo), queueFromIndex);
544 assertUnavailableQueue(queueToIndex, m_config.transitionTo, m_config.priorityTo);
545
546 if (queueFromIndex == queueToIndex)
547 {
548 std::ostringstream buf;
549 buf << "Unable to find separate queues " << m_config.transitionFrom << " and " << m_config.transitionTo;
550 buf.flush();
551 TCU_THROW(NotSupportedError, buf.str());
552 }
553 }
554
initPrograms(SourceCollections & programs) const555 void GPQCase::initPrograms(SourceCollections &programs) const
556 {
557 const std::string producerComp(R"glsl(
558 #version 450
559 layout(binding=0) buffer S { float src[]; };
560 layout(binding=1) buffer D { float dst[]; };
561 layout(local_size_x=1,local_size_y=1) in;
562 void main() {
563 dst[gl_GlobalInvocationID.x] = src[gl_GlobalInvocationID.x];
564 }
565 )glsl");
566
567 const tcu::StringTemplate consumerComp(R"glsl(
568 #version 450
569 layout(local_size_x=1,local_size_y=1) in;
570 layout(${IMAGE_FORMAT}, binding=0) readonly uniform ${IMAGE_TYPE} srcImage;
571 layout(binding=1) writeonly coherent buffer Pixels { uint data[]; } dstBuffer;
572 void main()
573 {
574 ivec2 srcIdx = ivec2(gl_GlobalInvocationID.xy);
575 int width = imageSize(srcImage).x;
576 int dstIdx = int(gl_GlobalInvocationID.y * width + gl_GlobalInvocationID.x);
577 dstBuffer.data[dstIdx] = uint(imageLoad(srcImage, srcIdx).r) == ${TEST_VALUE} ? 1 : 0;
578 }
579 )glsl");
580
581 const std::string vert(R"glsl(
582 #version 450
583 layout(location = 0) in vec2 pos;
584 void main()
585 {
586 gl_Position = vec4(pos, 0.0, 1.01);
587 }
588 )glsl");
589
590 const tcu::StringTemplate frag(R"glsl(
591 #version 450
592 layout(location = 0) out ${COLOR_TYPE} color;
593 void main()
594 {
595 color = ${COLOR_TYPE}(${TEST_VALUE},0,0,1);
596 }
597 )glsl");
598
599 const auto format = mapVkFormat(m_config.format);
600 const auto imageFormat = image::getShaderImageFormatQualifier(format);
601 const auto imageType = image::getShaderImageType(format, image::ImageType::IMAGE_TYPE_2D, false);
602 const auto colorType = image::getGlslAttachmentType(m_config.format); // ivec4
603
604 const std::map<std::string, std::string> abbreviations{
605 {std::string("TEST_VALUE"), std::to_string(testValue)},
606 {std::string("IMAGE_FORMAT"), std::string(imageFormat)},
607 {std::string("IMAGE_TYPE"), std::string(imageType)},
608 {std::string("COLOR_TYPE"), std::string(colorType)},
609 };
610
611 programs.glslSources.add("cpyb") << glu::ComputeSource(producerComp);
612 programs.glslSources.add("cpyi") << glu::ComputeSource(consumerComp.specialize(abbreviations));
613 programs.glslSources.add("vert") << glu::VertexSource(vert);
614 programs.glslSources.add("frag") << glu::FragmentSource(frag.specialize(abbreviations));
615 }
616
iterate(void)617 tcu::TestStatus GPQInstance<VK_QUEUE_COMPUTE_BIT, VK_QUEUE_GRAPHICS_BIT>::iterate(void)
618 {
619 if (VK_SUCCESS != m_device.createResult)
620 {
621 if (VK_ERROR_NOT_PERMITTED_KHR == m_device.createResult)
622 return tcu::TestStatus(QP_TEST_RESULT_QUALITY_WARNING,
623 "Custom device creation returned " +
624 std::string(getResultName(m_device.createResult)));
625 throw NotSupportedError(m_device.createResult, getResultName(m_device.createResult), m_device.createExpression,
626 m_device.createFileName, m_device.createFileLine);
627 }
628
629 const InstanceInterface &vki = m_context.getInstanceInterface();
630 const DeviceInterface &vkd = m_context.getDeviceInterface();
631 const VkPhysicalDevice phys = m_context.getPhysicalDevice();
632 const VkDevice device = m_device.handle;
633 Allocator &allocator = m_device.getAllocator();
634 const uint32_t producerIndex = m_device.queueFamilyIndexFrom;
635 const uint32_t consumerIndex = m_device.queueFamilyIndexTo;
636 const std::vector<uint32_t> producerIndices{producerIndex};
637 const std::vector<uint32_t> consumerIndices{consumerIndex};
638 const VkQueue producerQueue = m_device.queueFrom;
639 const VkQueue consumerQueue = m_device.queueTo;
640
641 // stagging buffer for vertices
642 const std::vector<float> positions{+1.f, -1.f, -1.f, -1.f, 0.f, +1.f};
643 const VkBufferCreateInfo posBuffInfo =
644 makeBufferCreateInfo(positions.size() * sizeof(float), VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, producerIndices);
645 BufferWithMemory positionsBuffer(vki, vkd, phys, device, allocator, posBuffInfo, MemoryRequirement::HostVisible);
646 std::copy_n(positions.data(), positions.size(), begin<float>(positionsBuffer.getHostPtr()));
647 const VkDescriptorBufferInfo posDsBuffInfo =
648 makeDescriptorBufferInfo(positionsBuffer.get(), 0, positionsBuffer.getSize());
649
650 // vertex buffer
651 VkBufferCreateFlags vertCreateFlags = 0;
652 if (m_config.enableProtected)
653 vertCreateFlags |= VK_BUFFER_CREATE_PROTECTED_BIT;
654 if (m_config.enableSparseBinding)
655 vertCreateFlags |= VK_BUFFER_CREATE_SPARSE_BINDING_BIT;
656 const VkBufferUsageFlags vertBuffUsage = VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
657 const MemoryRequirement vertMemReqs =
658 (m_config.enableProtected ? MemoryRequirement::Protected : MemoryRequirement::Any);
659 const VkBufferCreateInfo vertBuffInfo =
660 makeBufferCreateInfo(positionsBuffer.getSize(), vertBuffUsage, producerIndices, vertCreateFlags);
661 const BufferWithMemory vertexBuffer(vki, vkd, phys, device, allocator, vertBuffInfo, vertMemReqs, producerQueue);
662 const VkDescriptorBufferInfo vertDsBuffInfo =
663 makeDescriptorBufferInfo(vertexBuffer.get(), 0ull, vertexBuffer.getSize());
664
665 // descriptor set for stagging and vertex buffers
666 Move<VkDescriptorPool> producerDsPool =
667 DescriptorPoolBuilder()
668 .addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER)
669 .addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER)
670 .build(vkd, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
671 Move<VkDescriptorSetLayout> producerDsLayout =
672 DescriptorSetLayoutBuilder()
673 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_ALL)
674 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_ALL)
675 .build(vkd, device);
676 Move<VkDescriptorSet> producerDs = makeDescriptorSet(vkd, device, *producerDsPool, *producerDsLayout);
677 DescriptorSetUpdateBuilder()
678 .writeSingle(*producerDs, DescriptorSetUpdateBuilder::Location::binding(0), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
679 &posDsBuffInfo)
680 .writeSingle(*producerDs, DescriptorSetUpdateBuilder::Location::binding(1), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
681 &vertDsBuffInfo)
682 .update(vkd, device);
683
684 // consumer image
685 const uint32_t clearComp = 97;
686 const VkClearValue clearColor = makeClearValueColorU32(clearComp, clearComp, clearComp, clearComp);
687 VkImageSubresourceRange imageResourceRange{};
688 const VkImageUsageFlags imageUsage = (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_STORAGE_BIT);
689 de::MovePtr<ImageWithMemory> image = createImage(imageUsage, consumerIndex, consumerQueue);
690 Move<VkImageView> view = createView(**image, imageResourceRange);
691 Move<VkRenderPass> renderPass = makeRenderPass(vkd, device, m_config.format);
692 Move<VkFramebuffer> framebuffer = makeFramebuffer(vkd, device, *renderPass, *view, m_config.width, m_config.height);
693 const VkDescriptorImageInfo imageDsInfo = makeDescriptorImageInfo(VkSampler(0), *view, VK_IMAGE_LAYOUT_GENERAL);
694 const VkImageMemoryBarrier imageReadyBarrier = makeImageMemoryBarrier(
695 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
696 VK_IMAGE_LAYOUT_GENERAL, **image, imageResourceRange, consumerIndex, consumerIndex);
697 // stagging buffer for result
698 const VkDeviceSize resultBuffSize =
699 (m_config.width * m_config.height * mapVkFormat(m_config.format).getPixelSize());
700 const VkBufferCreateInfo resultBuffInfo =
701 makeBufferCreateInfo(resultBuffSize, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, consumerIndices);
702 BufferWithMemory resultBuffer(vki, vkd, phys, device, allocator, resultBuffInfo, MemoryRequirement::HostVisible);
703 const VkDescriptorBufferInfo resultDsBuffInfo = makeDescriptorBufferInfo(resultBuffer.get(), 0ull, resultBuffSize);
704 const VkMemoryBarrier resultReadyBarrier = makeMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT);
705
706 // descriptor set for consumer image and result buffer
707 Move<VkDescriptorPool> consumerDsPool =
708 DescriptorPoolBuilder()
709 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
710 .addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER)
711 .build(vkd, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
712 Move<VkDescriptorSetLayout> consumerDsLayout =
713 DescriptorSetLayoutBuilder()
714 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_ALL)
715 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_ALL)
716 .build(vkd, device);
717 Move<VkDescriptorSet> consumerDs = makeDescriptorSet(vkd, device, *consumerDsPool, *consumerDsLayout);
718
719 DescriptorSetUpdateBuilder()
720 .writeSingle(*consumerDs, DescriptorSetUpdateBuilder::Location::binding(0), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
721 &imageDsInfo)
722 .writeSingle(*consumerDs, DescriptorSetUpdateBuilder::Location::binding(1), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
723 &resultDsBuffInfo)
724 .update(vkd, device);
725
726 Move<VkPipelineLayout> producerLayout = createPipelineLayout<>({*producerDsLayout});
727 Move<VkPipeline> producerPipeline = createComputePipeline(*producerLayout, true);
728
729 Move<VkPipelineLayout> consumerLayout = createPipelineLayout<>({*consumerDsLayout});
730 Move<VkPipeline> consumerPipeline = createGraphicsPipeline(*consumerLayout, *renderPass);
731
732 Move<VkPipelineLayout> resultLayout = createPipelineLayout<>({*consumerDsLayout});
733 Move<VkCommandPool> resultPool = makeCommandPool(consumerIndex);
734 Move<VkPipeline> resultPipeline = createComputePipeline(*resultLayout, false);
735
736 Move<VkCommandPool> producerPool = makeCommandPool(producerIndex);
737 Move<VkCommandPool> consumerPool = makeCommandPool(consumerIndex);
738 Move<VkCommandBuffer> producerCmd =
739 allocateCommandBuffer(vkd, device, *producerPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY);
740 Move<VkCommandBuffer> consumerCmd =
741 allocateCommandBuffer(vkd, device, *consumerPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY);
742
743 beginCommandBuffer(vkd, *producerCmd);
744 vkd.cmdBindPipeline(*producerCmd, VK_PIPELINE_BIND_POINT_COMPUTE, *producerPipeline);
745 vkd.cmdBindDescriptorSets(*producerCmd, VK_PIPELINE_BIND_POINT_COMPUTE, *producerLayout, 0, 1, &(*producerDs), 0,
746 nullptr);
747 vkd.cmdDispatch(*producerCmd, uint32_t(positions.size()), 1, 1);
748 endCommandBuffer(vkd, *producerCmd);
749
750 beginCommandBuffer(vkd, *consumerCmd);
751 vkd.cmdBindPipeline(*consumerCmd, VK_PIPELINE_BIND_POINT_GRAPHICS, *consumerPipeline);
752 vkd.cmdBindPipeline(*consumerCmd, VK_PIPELINE_BIND_POINT_COMPUTE, *resultPipeline);
753 vkd.cmdBindDescriptorSets(*consumerCmd, VK_PIPELINE_BIND_POINT_GRAPHICS, *consumerLayout, 0, 1, &(*consumerDs), 0,
754 nullptr);
755 vkd.cmdBindDescriptorSets(*consumerCmd, VK_PIPELINE_BIND_POINT_COMPUTE, *resultLayout, 0, 1, &(*consumerDs), 0,
756 nullptr);
757 vkd.cmdBindVertexBuffers(*consumerCmd, 0, 1, vertexBuffer.getPtr(), &static_cast<const VkDeviceSize &>(0));
758
759 beginRenderPass(vkd, *consumerCmd, *renderPass, *framebuffer, makeRect2D(m_config.width, m_config.height),
760 clearColor);
761 vkd.cmdDraw(*consumerCmd, uint32_t(positions.size()), 1, 0, 0);
762 endRenderPass(vkd, *consumerCmd);
763 vkd.cmdPipelineBarrier(*consumerCmd, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
764 VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0u, 0u, nullptr, 0u, nullptr, 1u, &imageReadyBarrier);
765
766 vkd.cmdDispatch(*consumerCmd, m_config.width, m_config.height, 1);
767 vkd.cmdPipelineBarrier(*consumerCmd, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 1u,
768 &resultReadyBarrier, 0u, nullptr, 0u, nullptr);
769 endCommandBuffer(vkd, *consumerCmd);
770
771 submitCommands(*producerCmd, *consumerCmd);
772
773 resultBuffer.invalidateAlloc(vkd, device);
774 const tcu::ConstPixelBufferAccess resultBufferAccess(mapVkFormat(m_config.format), m_config.width, m_config.height,
775 1, resultBuffer.getHostPtr());
776 const uint32_t resultValue = resultBufferAccess.getPixelUint(0, 0).x();
777 const uint32_t expectedValue = 1;
778 const bool ok = (resultValue == expectedValue);
779 if (!ok)
780 {
781 m_context.getTestContext().getLog() << tcu::TestLog::Message << "Expected value: " << expectedValue << ", got "
782 << resultValue << tcu::TestLog::EndMessage;
783 }
784
785 return ok ? tcu::TestStatus::pass("") : tcu::TestStatus::fail("");
786 }
787
iterate(void)788 tcu::TestStatus GPQInstance<VK_QUEUE_GRAPHICS_BIT, VK_QUEUE_COMPUTE_BIT>::iterate(void)
789 {
790 if (VK_SUCCESS != m_device.createResult)
791 {
792 if (VK_ERROR_NOT_PERMITTED_KHR == m_device.createResult)
793 return tcu::TestStatus(QP_TEST_RESULT_QUALITY_WARNING,
794 "Custom device creation returned " +
795 std::string(getResultName(m_device.createResult)));
796 throw NotSupportedError(m_device.createResult, getResultName(m_device.createResult), m_device.createExpression,
797 m_device.createFileName, m_device.createFileLine);
798 }
799
800 const InstanceInterface &vki = m_context.getInstanceInterface();
801 const DeviceInterface &vkd = m_context.getDeviceInterface();
802 const VkPhysicalDevice phys = m_context.getPhysicalDevice();
803 const VkDevice device = m_device.handle;
804 Allocator &allocator = m_device.getAllocator();
805 const uint32_t producerIndex = m_device.queueFamilyIndexFrom;
806 const uint32_t consumerIndex = m_device.queueFamilyIndexTo;
807 const std::vector<uint32_t> producerIndices{producerIndex};
808 const std::vector<uint32_t> consumerIndices{consumerIndex};
809 const VkQueue producerQueue = m_device.queueFrom;
810
811 // stagging buffer for vertices
812 const std::vector<float> positions{+1.f, -1.f, -1.f, -1.f, 0.f, +1.f};
813 const VkBufferCreateInfo positionBuffInfo =
814 makeBufferCreateInfo(positions.size() * sizeof(float), VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, producerIndices);
815 BufferWithMemory positionsBuffer(vki, vkd, phys, device, allocator, positionBuffInfo,
816 MemoryRequirement::HostVisible);
817 std::copy_n(positions.data(), positions.size(), begin<float>(positionsBuffer.getHostPtr()));
818 const VkDescriptorBufferInfo posDsBuffInfo =
819 makeDescriptorBufferInfo(positionsBuffer.get(), 0, positionsBuffer.getSize());
820
821 // vertex buffer
822 VkBufferCreateFlags vertCreateFlags = 0;
823 if (m_config.enableProtected)
824 vertCreateFlags |= VK_BUFFER_CREATE_PROTECTED_BIT;
825 if (m_config.enableSparseBinding)
826 vertCreateFlags |= VK_BUFFER_CREATE_SPARSE_BINDING_BIT;
827 const VkBufferUsageFlags vertBuffUsage = VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
828 const MemoryRequirement vertMemReqs =
829 (m_config.enableProtected ? MemoryRequirement::Protected : MemoryRequirement::Any);
830 const VkBufferCreateInfo vertBuffInfo =
831 makeBufferCreateInfo(positionsBuffer.getSize(), vertBuffUsage, producerIndices, vertCreateFlags);
832 const BufferWithMemory vertexBuffer(vki, vkd, phys, device, allocator, vertBuffInfo, vertMemReqs, producerQueue);
833 const VkDescriptorBufferInfo vertDsBuffInfo =
834 makeDescriptorBufferInfo(vertexBuffer.get(), 0ull, vertexBuffer.getSize());
835 const VkBufferMemoryBarrier producerReadyBarrier =
836 makeBufferMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT, vertexBuffer.get(), 0,
837 vertexBuffer.getSize(), producerIndex, producerIndex);
838
839 // descriptor set for stagging and vertex buffers
840 Move<VkDescriptorPool> producerDsPool =
841 DescriptorPoolBuilder()
842 .addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER)
843 .addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER)
844 .build(vkd, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
845 Move<VkDescriptorSetLayout> producerDsLayout =
846 DescriptorSetLayoutBuilder()
847 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_ALL)
848 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_ALL)
849 .build(vkd, device);
850 Move<VkDescriptorSet> producerDs = makeDescriptorSet(vkd, device, *producerDsPool, *producerDsLayout);
851 DescriptorSetUpdateBuilder()
852 .writeSingle(*producerDs, DescriptorSetUpdateBuilder::Location::binding(0), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
853 &posDsBuffInfo)
854 .writeSingle(*producerDs, DescriptorSetUpdateBuilder::Location::binding(1), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
855 &vertDsBuffInfo)
856 .update(vkd, device);
857
858 // producer image
859 const uint32_t clearComp = 97;
860 const VkClearValue clearColor = makeClearValueColorU32(clearComp, clearComp, clearComp, clearComp);
861 VkImageSubresourceRange imageResourceRange{};
862 const VkImageUsageFlags imageUsage = (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_STORAGE_BIT);
863 de::MovePtr<ImageWithMemory> image = createImage(imageUsage, producerIndex, producerQueue);
864 Move<VkImageView> view = createView(**image, imageResourceRange);
865 Move<VkRenderPass> renderPass = makeRenderPass(vkd, device, m_config.format);
866 Move<VkFramebuffer> framebuffer = makeFramebuffer(vkd, device, *renderPass, *view, m_config.width, m_config.height);
867 const VkDescriptorImageInfo imageDsInfo = makeDescriptorImageInfo(VkSampler(0), *view, VK_IMAGE_LAYOUT_GENERAL);
868 const VkImageMemoryBarrier imageReadyBarrier = makeImageMemoryBarrier(
869 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
870 VK_IMAGE_LAYOUT_GENERAL, **image, imageResourceRange, producerIndex, producerIndex);
871
872 // stagging buffer for result
873 const VkDeviceSize resultBufferSize =
874 (m_config.width * m_config.height * mapVkFormat(m_config.format).getPixelSize());
875 const VkBufferCreateInfo resultBufferInfo =
876 makeBufferCreateInfo(resultBufferSize, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, consumerIndices);
877 BufferWithMemory resultBuffer(vki, vkd, phys, device, allocator, resultBufferInfo, MemoryRequirement::HostVisible);
878 const VkDescriptorBufferInfo resultDsBuffInfo =
879 makeDescriptorBufferInfo(resultBuffer.get(), 0ull, resultBufferSize);
880 const VkBufferMemoryBarrier resultReadyBarrier =
881 makeBufferMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, resultBuffer.get(), 0,
882 resultBufferSize, consumerIndex, consumerIndex);
883
884 // descriptor set for consumer image and result buffer
885 Move<VkDescriptorPool> consumerDsPool =
886 DescriptorPoolBuilder()
887 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
888 .addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER)
889 .build(vkd, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
890 Move<VkDescriptorSetLayout> consumerDsLayout =
891 DescriptorSetLayoutBuilder()
892 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_ALL)
893 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_ALL)
894 .build(vkd, device);
895 Move<VkDescriptorSet> consumerDs = makeDescriptorSet(vkd, device, *consumerDsPool, *consumerDsLayout);
896
897 DescriptorSetUpdateBuilder()
898 .writeSingle(*consumerDs, DescriptorSetUpdateBuilder::Location::binding(0), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
899 &imageDsInfo)
900 .writeSingle(*consumerDs, DescriptorSetUpdateBuilder::Location::binding(1), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
901 &resultDsBuffInfo)
902 .update(vkd, device);
903
904 Move<VkPipelineLayout> producer1Layout = createPipelineLayout<>({*producerDsLayout});
905 Move<VkPipeline> producer1Pipeline = createComputePipeline(*producer1Layout, true);
906 Move<VkPipelineLayout> producer2Layout = createPipelineLayout<>({});
907 Move<VkPipeline> producer2Pipeline = createGraphicsPipeline(*producer2Layout, *renderPass);
908
909 Move<VkPipelineLayout> consumerLayout = createPipelineLayout<>({*consumerDsLayout});
910 Move<VkPipeline> consumerPipeline = createComputePipeline(*consumerLayout, false);
911
912 Move<VkCommandPool> producerPool = makeCommandPool(producerIndex);
913 Move<VkCommandPool> consumerPool = makeCommandPool(consumerIndex);
914 Move<VkCommandBuffer> producerCmd =
915 allocateCommandBuffer(vkd, device, *producerPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY);
916 Move<VkCommandBuffer> consumerCmd =
917 allocateCommandBuffer(vkd, device, *consumerPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY);
918
919 beginCommandBuffer(vkd, *producerCmd);
920 vkd.cmdBindPipeline(*producerCmd, VK_PIPELINE_BIND_POINT_COMPUTE, *producer1Pipeline);
921 vkd.cmdBindPipeline(*producerCmd, VK_PIPELINE_BIND_POINT_GRAPHICS, *producer2Pipeline);
922 vkd.cmdBindVertexBuffers(*producerCmd, 0, 1, vertexBuffer.getPtr(), &static_cast<const VkDeviceSize &>(0));
923 vkd.cmdBindDescriptorSets(*producerCmd, VK_PIPELINE_BIND_POINT_COMPUTE, *producer1Layout, 0, 1, &producerDs.get(),
924 0, nullptr);
925 vkd.cmdDispatch(*producerCmd, uint32_t(positions.size()), 1, 1);
926 vkd.cmdPipelineBarrier(*producerCmd, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, 0,
927 nullptr, 1, &producerReadyBarrier, 0, nullptr);
928 beginRenderPass(vkd, *producerCmd, *renderPass, *framebuffer, makeRect2D(m_config.width, m_config.height),
929 clearColor);
930 vkd.cmdDraw(*producerCmd, uint32_t(positions.size()), 1, 0, 0);
931 endRenderPass(vkd, *producerCmd);
932 vkd.cmdPipelineBarrier(*producerCmd, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
933 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, 0, 0u, nullptr, 0u, nullptr, 1u, &imageReadyBarrier);
934 endCommandBuffer(vkd, *producerCmd);
935
936 beginCommandBuffer(vkd, *consumerCmd);
937 vkd.cmdBindPipeline(*consumerCmd, VK_PIPELINE_BIND_POINT_COMPUTE, *consumerPipeline);
938 vkd.cmdBindDescriptorSets(*consumerCmd, VK_PIPELINE_BIND_POINT_COMPUTE, *consumerLayout, 0, 1, &consumerDs.get(), 0,
939 nullptr);
940 vkd.cmdDispatch(*consumerCmd, m_config.width, m_config.height, 1);
941 vkd.cmdPipelineBarrier(*consumerCmd, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0, 0,
942 nullptr, 1, &resultReadyBarrier, 0, nullptr);
943 endCommandBuffer(vkd, *consumerCmd);
944
945 submitCommands(*producerCmd, *consumerCmd);
946
947 resultBuffer.invalidateAlloc(vkd, device);
948 const tcu::ConstPixelBufferAccess resultBufferAccess(mapVkFormat(m_config.format), m_config.width, m_config.height,
949 1, resultBuffer.getHostPtr());
950 const uint32_t resultValue = resultBufferAccess.getPixelUint(0, 0).x();
951 const uint32_t expectedValue = 1;
952 const bool ok = (resultValue == expectedValue);
953 if (!ok)
954 {
955 m_context.getTestContext().getLog() << tcu::TestLog::Message << "Expected value: " << expectedValue << ", got "
956 << resultValue << tcu::TestLog::EndMessage;
957 }
958
959 return ok ? tcu::TestStatus::pass("") : tcu::TestStatus::fail("");
960 }
961
962 } // namespace
963
createGlobalPriorityQueueTests(tcu::TestContext & testCtx)964 tcu::TestCaseGroup *createGlobalPriorityQueueTests(tcu::TestContext &testCtx)
965 {
966 typedef std::pair<VkQueueFlagBits, const char *> TransitionItem;
967 TransitionItem const transitions[]{
968 {VK_QUEUE_GRAPHICS_BIT, "graphics"},
969 {VK_QUEUE_COMPUTE_BIT, "compute"},
970 };
971
972 auto mkGroupName = [](const TransitionItem &from, const TransitionItem &to) -> std::string
973 { return std::string("from_") + from.second + std::string("_to_") + to.second; };
974
975 std::pair<VkQueueFlags, const char *> const modifiers[]{
976 {0, "no_modifiers"}, {VK_QUEUE_SPARSE_BINDING_BIT, "sparse"}, {VK_QUEUE_PROTECTED_BIT, "protected"}};
977
978 std::pair<VkQueueGlobalPriorityKHR, const char *> const prios[]{
979 {VK_QUEUE_GLOBAL_PRIORITY_LOW_KHR, "low"},
980 {VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_KHR, "medium"},
981 {VK_QUEUE_GLOBAL_PRIORITY_HIGH_KHR, "high"},
982 {VK_QUEUE_GLOBAL_PRIORITY_REALTIME_KHR, "realtime"},
983 };
984
985 std::pair<SyncType, const char *> const syncs[]{
986 {SyncType::None, "no_sync"},
987 {SyncType::Semaphore, "semaphore"},
988 };
989
990 const uint32_t dim0 = 34;
991 const uint32_t dim1 = 25;
992 bool swap = true;
993
994 auto rootGroup = new tcu::TestCaseGroup(testCtx, "global_priority_transition");
995
996 for (const auto &prio : prios)
997 {
998 auto prioGroup = new tcu::TestCaseGroup(testCtx, prio.second);
999
1000 for (const auto &sync : syncs)
1001 {
1002 auto syncGroup = new tcu::TestCaseGroup(testCtx, sync.second);
1003
1004 for (const auto &mod : modifiers)
1005 {
1006 auto modGroup = new tcu::TestCaseGroup(testCtx, mod.second);
1007
1008 for (const auto &transitionFrom : transitions)
1009 {
1010 for (const auto &transitionTo : transitions)
1011 {
1012 if (transitionFrom != transitionTo)
1013 {
1014 TestConfig cfg{};
1015 cfg.transitionFrom = transitionFrom.first;
1016 cfg.transitionTo = transitionTo.first;
1017 cfg.priorityFrom = prio.first;
1018 cfg.priorityTo = prio.first;
1019 cfg.syncType = sync.first;
1020 cfg.enableProtected = (mod.first & VK_QUEUE_PROTECTED_BIT) != 0;
1021 cfg.enableSparseBinding = (mod.first & VK_QUEUE_SPARSE_BINDING_BIT) != 0;
1022 // Note that format is changing in GPQCase::checkSupport(...)
1023 cfg.format = VK_FORMAT_R32G32B32A32_SFLOAT;
1024 cfg.width = swap ? dim0 : dim1;
1025 cfg.height = swap ? dim1 : dim0;
1026
1027 swap ^= true;
1028
1029 modGroup->addChild(new GPQCase(testCtx, mkGroupName(transitionFrom, transitionTo), cfg));
1030 }
1031 }
1032 }
1033 syncGroup->addChild(modGroup);
1034 }
1035 prioGroup->addChild(syncGroup);
1036 }
1037 rootGroup->addChild(prioGroup);
1038 }
1039
1040 return rootGroup;
1041 }
1042
1043 } // namespace synchronization
1044 } // namespace vkt
1045