1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2016 The Khronos Group Inc.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 *//*!
20 * \file
21 * \brief Synchronization tests for resources shared between instances.
22 *//*--------------------------------------------------------------------*/
23
24 #include "vktSynchronizationCrossInstanceSharingTests.hpp"
25
26 #include "vkDeviceUtil.hpp"
27 #include "vkPlatform.hpp"
28 #include "vkBarrierUtil.hpp"
29 #include "vkCmdUtil.hpp"
30 #include "vktTestCaseUtil.hpp"
31 #include "deSharedPtr.hpp"
32
33 #include "vktSynchronizationUtil.hpp"
34 #include "vktSynchronizationOperation.hpp"
35 #include "vktSynchronizationOperationTestData.hpp"
36 #include "vktSynchronizationOperationResources.hpp"
37 #include "vktExternalMemoryUtil.hpp"
38 #include "vktTestGroupUtil.hpp"
39 #include "vktCustomInstancesDevices.hpp"
40
41 #include "deRandom.hpp"
42
43 #include "tcuResultCollector.hpp"
44 #include "tcuTestLog.hpp"
45 #include "tcuCommandLine.hpp"
46
47 using tcu::TestLog;
48 using namespace vkt::ExternalMemoryUtil;
49
50 namespace vkt
51 {
52 namespace synchronization
53 {
54 namespace
55 {
56 using namespace vk;
57 using de::SharedPtr;
58
59 struct TestConfig
60 {
TestConfigvkt::synchronization::__anon41c17ebb0111::TestConfig61 TestConfig(SynchronizationType type_, const ResourceDescription &resource_, vk::VkSemaphoreType semaphoreType_,
62 OperationName writeOp_, OperationName readOp_, vk::VkExternalMemoryHandleTypeFlagBits memoryHandleType_,
63 vk::VkExternalSemaphoreHandleTypeFlagBits semaphoreHandleType_, bool dedicated_)
64 : type(type_)
65 , resource(resource_)
66 , semaphoreType(semaphoreType_)
67 , writeOp(writeOp_)
68 , readOp(readOp_)
69 , memoryHandleType(memoryHandleType_)
70 , semaphoreHandleType(semaphoreHandleType_)
71 , dedicated(dedicated_)
72 {
73 }
74
75 const SynchronizationType type;
76 const ResourceDescription resource;
77 const vk::VkSemaphoreType semaphoreType;
78 const OperationName writeOp;
79 const OperationName readOp;
80 const vk::VkExternalMemoryHandleTypeFlagBits memoryHandleType;
81 const vk::VkExternalSemaphoreHandleTypeFlagBits semaphoreHandleType;
82 const bool dedicated;
83 };
84
85 // A helper class to test for extensions upfront and throw not supported to speed up test runtimes compared to failing only
86 // after creating unnecessary vkInstances. A common example of this is win32 platforms taking a long time to run _fd tests.
87 class NotSupportedChecker
88 {
89 public:
NotSupportedChecker(const Context & context,TestConfig config,const OperationSupport & writeOp,const OperationSupport & readOp)90 NotSupportedChecker(const Context &context, TestConfig config, const OperationSupport &writeOp,
91 const OperationSupport &readOp)
92 : m_context(context)
93 {
94 // Check instance support
95 m_context.requireInstanceFunctionality("VK_KHR_get_physical_device_properties2");
96
97 m_context.requireInstanceFunctionality("VK_KHR_external_semaphore_capabilities");
98 m_context.requireInstanceFunctionality("VK_KHR_external_memory_capabilities");
99
100 // Check device support
101 if (config.dedicated)
102 m_context.requireDeviceFunctionality("VK_KHR_dedicated_allocation");
103
104 m_context.requireDeviceFunctionality("VK_KHR_external_semaphore");
105 m_context.requireDeviceFunctionality("VK_KHR_external_memory");
106
107 if (config.semaphoreType == vk::VK_SEMAPHORE_TYPE_TIMELINE)
108 m_context.requireDeviceFunctionality("VK_KHR_timeline_semaphore");
109
110 if (config.type == SynchronizationType::SYNCHRONIZATION2)
111 m_context.requireDeviceFunctionality("VK_KHR_synchronization2");
112
113 if (config.memoryHandleType == vk::VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR ||
114 config.semaphoreHandleType == vk::VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR ||
115 config.semaphoreHandleType == vk::VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR)
116 {
117 m_context.requireDeviceFunctionality("VK_KHR_external_semaphore_fd");
118 m_context.requireDeviceFunctionality("VK_KHR_external_memory_fd");
119 }
120
121 if (config.memoryHandleType == vk::VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT)
122 {
123 m_context.requireDeviceFunctionality("VK_EXT_external_memory_dma_buf");
124 }
125
126 if (config.memoryHandleType == vk::VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT ||
127 config.memoryHandleType == vk::VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT ||
128 config.semaphoreHandleType == vk::VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT ||
129 config.semaphoreHandleType == vk::VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT)
130 {
131 m_context.requireDeviceFunctionality("VK_KHR_external_semaphore_win32");
132 m_context.requireDeviceFunctionality("VK_KHR_external_memory_win32");
133 }
134
135 if (config.memoryHandleType == vk::VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA ||
136 config.semaphoreHandleType == vk::VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA)
137 {
138 m_context.requireDeviceFunctionality("VK_FUCHSIA_external_semaphore");
139 m_context.requireDeviceFunctionality("VK_FUCHSIA_external_memory");
140 }
141
142 TestLog &log = context.getTestContext().getLog();
143 const vk::InstanceInterface &vki = context.getInstanceInterface();
144 const vk::VkPhysicalDevice physicalDevice = context.getPhysicalDevice();
145
146 // Check resource support
147 if (config.resource.type == RESOURCE_TYPE_IMAGE)
148 {
149 const vk::VkPhysicalDeviceExternalImageFormatInfo externalInfo = {
150 vk::VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO, DE_NULL, config.memoryHandleType};
151 const vk::VkPhysicalDeviceImageFormatInfo2 imageFormatInfo = {
152 vk::VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2,
153 &externalInfo,
154 config.resource.imageFormat,
155 config.resource.imageType,
156 vk::VK_IMAGE_TILING_OPTIMAL,
157 readOp.getInResourceUsageFlags() | writeOp.getOutResourceUsageFlags(),
158 0u};
159 vk::VkExternalImageFormatProperties externalProperties = {
160 vk::VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES, DE_NULL, {0u, 0u, 0u}};
161 vk::VkImageFormatProperties2 formatProperties = {vk::VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2,
162 &externalProperties,
163 {
164 {0u, 0u, 0u},
165 0u,
166 0u,
167 0u,
168 0u,
169 }};
170
171 {
172 const vk::VkResult res =
173 vki.getPhysicalDeviceImageFormatProperties2(physicalDevice, &imageFormatInfo, &formatProperties);
174
175 if (res == vk::VK_ERROR_FORMAT_NOT_SUPPORTED)
176 TCU_THROW(NotSupportedError, "Image format not supported");
177
178 VK_CHECK(res); // Check other errors
179 }
180
181 log << TestLog::Message << "External image format properties: " << imageFormatInfo << "\n"
182 << externalProperties << TestLog::EndMessage;
183
184 if ((externalProperties.externalMemoryProperties.externalMemoryFeatures &
185 vk::VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT) == 0)
186 TCU_THROW(NotSupportedError, "Exporting image resource not supported");
187
188 if ((externalProperties.externalMemoryProperties.externalMemoryFeatures &
189 vk::VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT) == 0)
190 TCU_THROW(NotSupportedError, "Importing image resource not supported");
191
192 if (!config.dedicated && (externalProperties.externalMemoryProperties.externalMemoryFeatures &
193 vk::VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT) != 0)
194 {
195 TCU_THROW(NotSupportedError, "Handle requires dedicated allocation, but test uses suballocated memory");
196 }
197
198 if (!(formatProperties.imageFormatProperties.sampleCounts & config.resource.imageSamples))
199 {
200 TCU_THROW(NotSupportedError, "Specified sample count for format not supported");
201 }
202 }
203 else
204 {
205 const vk::VkPhysicalDeviceExternalBufferInfo info = {
206 vk::VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO, DE_NULL,
207
208 0u, readOp.getInResourceUsageFlags() | writeOp.getOutResourceUsageFlags(), config.memoryHandleType};
209 vk::VkExternalBufferProperties properties = {
210 vk::VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES, DE_NULL, {0u, 0u, 0u}};
211 vki.getPhysicalDeviceExternalBufferProperties(physicalDevice, &info, &properties);
212
213 log << TestLog::Message << "External buffer properties: " << info << "\n"
214 << properties << TestLog::EndMessage;
215
216 if ((properties.externalMemoryProperties.externalMemoryFeatures &
217 vk::VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT) == 0 ||
218 (properties.externalMemoryProperties.externalMemoryFeatures &
219 vk::VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT) == 0)
220 TCU_THROW(NotSupportedError, "Exporting and importing memory type not supported");
221
222 if (!config.dedicated && (properties.externalMemoryProperties.externalMemoryFeatures &
223 vk::VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT) != 0)
224 {
225 TCU_THROW(NotSupportedError, "Handle requires dedicated allocation, but test uses suballocated memory");
226 }
227 }
228
229 // Check semaphore support
230 {
231 const vk::VkSemaphoreTypeCreateInfo semaphoreTypeInfo = {
232 vk::VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO,
233 DE_NULL,
234 config.semaphoreType,
235 0,
236 };
237 const vk::VkPhysicalDeviceExternalSemaphoreInfo info = {
238 vk::VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO, &semaphoreTypeInfo,
239 config.semaphoreHandleType};
240
241 vk::VkExternalSemaphoreProperties properties = {vk::VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES,
242 DE_NULL, 0u, 0u, 0u};
243
244 vki.getPhysicalDeviceExternalSemaphoreProperties(physicalDevice, &info, &properties);
245
246 log << TestLog::Message << info << "\n" << properties << TestLog::EndMessage;
247
248 if ((properties.externalSemaphoreFeatures & vk::VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT) == 0 ||
249 (properties.externalSemaphoreFeatures & vk::VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT) == 0)
250 TCU_THROW(NotSupportedError, "Exporting and importing semaphore type not supported");
251 }
252 }
253
254 private:
255 const Context &m_context;
256 };
257
checkQueueFlags(vk::VkQueueFlags availableFlags,const vk::VkQueueFlags neededFlags)258 bool checkQueueFlags(vk::VkQueueFlags availableFlags, const vk::VkQueueFlags neededFlags)
259 {
260 if ((availableFlags & (vk::VK_QUEUE_GRAPHICS_BIT | vk::VK_QUEUE_COMPUTE_BIT)) != 0)
261 availableFlags |= vk::VK_QUEUE_TRANSFER_BIT;
262
263 return (availableFlags & neededFlags) != 0;
264 }
265
266 class SimpleAllocation : public vk::Allocation
267 {
268 public:
269 SimpleAllocation(const vk::DeviceInterface &vkd, vk::VkDevice device, const vk::VkDeviceMemory memory);
270 ~SimpleAllocation(void);
271
272 private:
273 const vk::DeviceInterface &m_vkd;
274 const vk::VkDevice m_device;
275 };
276
SimpleAllocation(const vk::DeviceInterface & vkd,vk::VkDevice device,const vk::VkDeviceMemory memory)277 SimpleAllocation::SimpleAllocation(const vk::DeviceInterface &vkd, vk::VkDevice device, const vk::VkDeviceMemory memory)
278 : Allocation(memory, 0, DE_NULL)
279 , m_vkd(vkd)
280 , m_device(device)
281 {
282 }
283
~SimpleAllocation(void)284 SimpleAllocation::~SimpleAllocation(void)
285 {
286 m_vkd.freeMemory(m_device, getMemory(), DE_NULL);
287 }
288
createTestInstance(Context & context)289 CustomInstance createTestInstance(Context &context)
290 {
291 std::vector<std::string> extensions;
292 extensions.push_back("VK_KHR_get_physical_device_properties2");
293 extensions.push_back("VK_KHR_external_semaphore_capabilities");
294 extensions.push_back("VK_KHR_external_memory_capabilities");
295
296 return createCustomInstanceWithExtensions(context, extensions);
297 }
298
createTestDevice(const Context & context,const vk::PlatformInterface & vkp,vk::VkInstance instance,const vk::InstanceInterface & vki,const vk::VkPhysicalDevice physicalDevice)299 vk::Move<vk::VkDevice> createTestDevice(const Context &context, const vk::PlatformInterface &vkp,
300 vk::VkInstance instance, const vk::InstanceInterface &vki,
301 const vk::VkPhysicalDevice physicalDevice)
302 {
303 const bool validationEnabled = context.getTestContext().getCommandLine().isValidationEnabled();
304 const float priority = 0.0f;
305 const std::vector<vk::VkQueueFamilyProperties> queueFamilyProperties =
306 vk::getPhysicalDeviceQueueFamilyProperties(vki, physicalDevice);
307 std::vector<uint32_t> queueFamilyIndices(queueFamilyProperties.size(), 0xFFFFFFFFu);
308
309 VkPhysicalDeviceFeatures2 createPhysicalFeature{VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2, DE_NULL,
310 context.getDeviceFeatures()};
311 VkPhysicalDeviceTimelineSemaphoreFeatures timelineSemaphoreFeatures{
312 VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES, DE_NULL, true};
313 VkPhysicalDeviceSynchronization2FeaturesKHR synchronization2Features{
314 VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES_KHR, DE_NULL, true};
315 void **nextPtr = &createPhysicalFeature.pNext;
316 std::vector<const char *> extensions;
317
318 if (context.isDeviceFunctionalitySupported("VK_KHR_dedicated_allocation"))
319 extensions.push_back("VK_KHR_dedicated_allocation");
320
321 if (context.isDeviceFunctionalitySupported("VK_KHR_get_memory_requirements2"))
322 extensions.push_back("VK_KHR_get_memory_requirements2");
323
324 if (context.isDeviceFunctionalitySupported("VK_KHR_external_semaphore"))
325 extensions.push_back("VK_KHR_external_semaphore");
326 if (context.isDeviceFunctionalitySupported("VK_KHR_external_memory"))
327 extensions.push_back("VK_KHR_external_memory");
328
329 if (context.isDeviceFunctionalitySupported("VK_KHR_external_semaphore_fd"))
330 extensions.push_back("VK_KHR_external_semaphore_fd");
331 if (context.isDeviceFunctionalitySupported("VK_KHR_external_memory_fd"))
332 extensions.push_back("VK_KHR_external_memory_fd");
333
334 if (context.isDeviceFunctionalitySupported("VK_EXT_external_memory_dma_buf"))
335 extensions.push_back("VK_EXT_external_memory_dma_buf");
336
337 if (context.isDeviceFunctionalitySupported("VK_KHR_external_semaphore_win32"))
338 extensions.push_back("VK_KHR_external_semaphore_win32");
339 if (context.isDeviceFunctionalitySupported("VK_KHR_external_memory_win32"))
340 extensions.push_back("VK_KHR_external_memory_win32");
341
342 if (context.isDeviceFunctionalitySupported("VK_FUCHSIA_external_semaphore"))
343 extensions.push_back("VK_FUCHSIA_external_semaphore");
344 if (context.isDeviceFunctionalitySupported("VK_FUCHSIA_external_memory"))
345 extensions.push_back("VK_FUCHSIA_external_memory");
346
347 if (context.isDeviceFunctionalitySupported("VK_KHR_timeline_semaphore"))
348 {
349 extensions.push_back("VK_KHR_timeline_semaphore");
350 addToChainVulkanStructure(&nextPtr, timelineSemaphoreFeatures);
351 }
352 if (context.isDeviceFunctionalitySupported("VK_KHR_synchronization2"))
353 {
354 extensions.push_back("VK_KHR_synchronization2");
355 addToChainVulkanStructure(&nextPtr, synchronization2Features);
356 }
357
358 try
359 {
360 std::vector<vk::VkDeviceQueueCreateInfo> queues;
361
362 for (size_t ndx = 0; ndx < queueFamilyProperties.size(); ndx++)
363 {
364 const vk::VkDeviceQueueCreateInfo createInfo = {vk::VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO,
365 DE_NULL,
366 0u,
367
368 (uint32_t)ndx,
369 1u,
370 &priority};
371
372 queues.push_back(createInfo);
373 }
374
375 const vk::VkDeviceCreateInfo createInfo = {vk::VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,
376 &createPhysicalFeature,
377 0u,
378
379 (uint32_t)queues.size(),
380 &queues[0],
381
382 0u,
383 DE_NULL,
384
385 (uint32_t)extensions.size(),
386 extensions.empty() ? DE_NULL : &extensions[0],
387 0u};
388
389 return vkt::createCustomDevice(validationEnabled, vkp, instance, vki, physicalDevice, &createInfo);
390 }
391 catch (const vk::Error &error)
392 {
393 if (error.getError() == vk::VK_ERROR_EXTENSION_NOT_PRESENT)
394 TCU_THROW(NotSupportedError, "Required extensions not supported");
395 else
396 throw;
397 }
398 }
399
400 // Class to wrap a singleton instance and device
401 class InstanceAndDevice
402 {
InstanceAndDevice(Context & context)403 InstanceAndDevice(Context &context)
404 : m_instance(createTestInstance(context))
405 , m_vki(m_instance.getDriver())
406 , m_physicalDevice(vk::chooseDevice(m_vki, m_instance, context.getTestContext().getCommandLine()))
407 , m_logicalDevice(
408 createTestDevice(context, context.getPlatformInterface(), m_instance, m_vki, m_physicalDevice))
409 {
410 }
411
412 public:
getInstanceA(Context & context)413 static vk::VkInstance getInstanceA(Context &context)
414 {
415 if (!m_instanceA)
416 m_instanceA = SharedPtr<InstanceAndDevice>(new InstanceAndDevice(context));
417
418 return m_instanceA->m_instance;
419 }
getInstanceB(Context & context)420 static vk::VkInstance getInstanceB(Context &context)
421 {
422 if (!m_instanceB)
423 m_instanceB = SharedPtr<InstanceAndDevice>(new InstanceAndDevice(context));
424
425 return m_instanceB->m_instance;
426 }
getDriverA()427 static const vk::InstanceDriver &getDriverA()
428 {
429 DE_ASSERT(m_instanceA);
430 return m_instanceA->m_instance.getDriver();
431 }
getDriverB()432 static const vk::InstanceDriver &getDriverB()
433 {
434 DE_ASSERT(m_instanceB);
435 return m_instanceB->m_instance.getDriver();
436 }
getPhysicalDeviceA()437 static vk::VkPhysicalDevice getPhysicalDeviceA()
438 {
439 DE_ASSERT(m_instanceA);
440 return m_instanceA->m_physicalDevice;
441 }
getPhysicalDeviceB()442 static vk::VkPhysicalDevice getPhysicalDeviceB()
443 {
444 DE_ASSERT(m_instanceB);
445 return m_instanceB->m_physicalDevice;
446 }
getDeviceA()447 static const Unique<vk::VkDevice> &getDeviceA()
448 {
449 DE_ASSERT(m_instanceA);
450 return m_instanceA->m_logicalDevice;
451 }
getDeviceB()452 static const Unique<vk::VkDevice> &getDeviceB()
453 {
454 DE_ASSERT(m_instanceB);
455 return m_instanceB->m_logicalDevice;
456 }
collectMessagesA()457 static void collectMessagesA()
458 {
459 DE_ASSERT(m_instanceA);
460 m_instanceA->m_instance.collectMessages();
461 }
collectMessagesB()462 static void collectMessagesB()
463 {
464 DE_ASSERT(m_instanceB);
465 m_instanceB->m_instance.collectMessages();
466 }
destroy()467 static void destroy()
468 {
469 m_instanceA.clear();
470 m_instanceB.clear();
471 }
472
473 private:
474 CustomInstance m_instance;
475 const vk::InstanceDriver &m_vki;
476 const vk::VkPhysicalDevice m_physicalDevice;
477 const Unique<vk::VkDevice> m_logicalDevice;
478
479 static SharedPtr<InstanceAndDevice> m_instanceA;
480 static SharedPtr<InstanceAndDevice> m_instanceB;
481 };
482 SharedPtr<InstanceAndDevice> InstanceAndDevice::m_instanceA;
483 SharedPtr<InstanceAndDevice> InstanceAndDevice::m_instanceB;
484
getQueue(const vk::DeviceInterface & vkd,const vk::VkDevice device,uint32_t familyIndex)485 vk::VkQueue getQueue(const vk::DeviceInterface &vkd, const vk::VkDevice device, uint32_t familyIndex)
486 {
487 vk::VkQueue queue;
488
489 vkd.getDeviceQueue(device, familyIndex, 0u, &queue);
490
491 return queue;
492 }
493
createCommandPool(const vk::DeviceInterface & vkd,vk::VkDevice device,uint32_t queueFamilyIndex)494 vk::Move<vk::VkCommandPool> createCommandPool(const vk::DeviceInterface &vkd, vk::VkDevice device,
495 uint32_t queueFamilyIndex)
496 {
497 const vk::VkCommandPoolCreateInfo createInfo = {vk::VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, DE_NULL,
498
499 0u, queueFamilyIndex};
500
501 return vk::createCommandPool(vkd, device, &createInfo);
502 }
503
createCommandBuffer(const vk::DeviceInterface & vkd,vk::VkDevice device,vk::VkCommandPool commandPool)504 vk::Move<vk::VkCommandBuffer> createCommandBuffer(const vk::DeviceInterface &vkd, vk::VkDevice device,
505 vk::VkCommandPool commandPool)
506 {
507 const vk::VkCommandBufferLevel level = vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY;
508 const vk::VkCommandBufferAllocateInfo allocateInfo = {vk::VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, DE_NULL,
509
510 commandPool, level, 1u};
511
512 return vk::allocateCommandBuffer(vkd, device, &allocateInfo);
513 }
514
getMemoryRequirements(const vk::DeviceInterface & vkd,vk::VkDevice device,vk::VkImage image,bool dedicated,bool getMemReq2Supported)515 vk::VkMemoryRequirements getMemoryRequirements(const vk::DeviceInterface &vkd, vk::VkDevice device, vk::VkImage image,
516 bool dedicated, bool getMemReq2Supported)
517 {
518 vk::VkMemoryRequirements memoryRequirements = {
519 0u,
520 0u,
521 0u,
522 };
523
524 if (getMemReq2Supported)
525 {
526 const vk::VkImageMemoryRequirementsInfo2 requirementInfo = {
527 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2, DE_NULL, image};
528 vk::VkMemoryDedicatedRequirements dedicatedRequirements = {vk::VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS,
529 DE_NULL, VK_FALSE, VK_FALSE};
530 vk::VkMemoryRequirements2 requirements = {vk::VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2,
531 &dedicatedRequirements,
532 {
533 0u,
534 0u,
535 0u,
536 }};
537 vkd.getImageMemoryRequirements2(device, &requirementInfo, &requirements);
538
539 if (!dedicated && dedicatedRequirements.requiresDedicatedAllocation)
540 TCU_THROW(NotSupportedError, "Memory requires dedicated allocation");
541
542 memoryRequirements = requirements.memoryRequirements;
543 }
544 else
545 {
546 vkd.getImageMemoryRequirements(device, image, &memoryRequirements);
547 }
548
549 return memoryRequirements;
550 }
551
getMemoryRequirements(const vk::DeviceInterface & vkd,vk::VkDevice device,vk::VkBuffer buffer,bool dedicated,bool getMemReq2Supported)552 vk::VkMemoryRequirements getMemoryRequirements(const vk::DeviceInterface &vkd, vk::VkDevice device, vk::VkBuffer buffer,
553 bool dedicated, bool getMemReq2Supported)
554 {
555 vk::VkMemoryRequirements memoryRequirements = {
556 0u,
557 0u,
558 0u,
559 };
560
561 if (getMemReq2Supported)
562 {
563 const vk::VkBufferMemoryRequirementsInfo2 requirementInfo = {
564 vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2, DE_NULL, buffer};
565 vk::VkMemoryDedicatedRequirements dedicatedRequirements = {vk::VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS,
566 DE_NULL, VK_FALSE, VK_FALSE};
567 vk::VkMemoryRequirements2 requirements = {vk::VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2,
568 &dedicatedRequirements,
569 {
570 0u,
571 0u,
572 0u,
573 }};
574 vkd.getBufferMemoryRequirements2(device, &requirementInfo, &requirements);
575
576 if (!dedicated && dedicatedRequirements.requiresDedicatedAllocation)
577 TCU_THROW(NotSupportedError, "Memory requires dedicated allocation");
578
579 memoryRequirements = requirements.memoryRequirements;
580 }
581 else
582 {
583 vkd.getBufferMemoryRequirements(device, buffer, &memoryRequirements);
584 }
585
586 return memoryRequirements;
587 }
588
createImage(const vk::DeviceInterface & vkd,vk::VkDevice device,const ResourceDescription & resourceDesc,const vk::VkExtent3D extent,const std::vector<uint32_t> & queueFamilyIndices,const OperationSupport & readOp,const OperationSupport & writeOp,vk::VkExternalMemoryHandleTypeFlagBits externalType)589 Move<VkImage> createImage(const vk::DeviceInterface &vkd, vk::VkDevice device, const ResourceDescription &resourceDesc,
590 const vk::VkExtent3D extent, const std::vector<uint32_t> &queueFamilyIndices,
591 const OperationSupport &readOp, const OperationSupport &writeOp,
592 vk::VkExternalMemoryHandleTypeFlagBits externalType)
593 {
594 const vk::VkExternalMemoryImageCreateInfo externalInfo = {vk::VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO,
595 DE_NULL,
596 (vk::VkExternalMemoryHandleTypeFlags)externalType};
597 const vk::VkImageCreateInfo createInfo = {vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
598 &externalInfo,
599 0u,
600
601 resourceDesc.imageType,
602 resourceDesc.imageFormat,
603 extent,
604 1u,
605 1u,
606 resourceDesc.imageSamples,
607 vk::VK_IMAGE_TILING_OPTIMAL,
608 readOp.getInResourceUsageFlags() | writeOp.getOutResourceUsageFlags(),
609 vk::VK_SHARING_MODE_EXCLUSIVE,
610
611 (uint32_t)queueFamilyIndices.size(),
612 &queueFamilyIndices[0],
613 vk::VK_IMAGE_LAYOUT_UNDEFINED};
614
615 return vk::createImage(vkd, device, &createInfo);
616 }
617
createBuffer(const vk::DeviceInterface & vkd,vk::VkDevice device,const vk::VkDeviceSize size,const vk::VkBufferUsageFlags usage,const vk::VkExternalMemoryHandleTypeFlagBits memoryHandleType,const std::vector<uint32_t> & queueFamilyIndices)618 Move<VkBuffer> createBuffer(const vk::DeviceInterface &vkd, vk::VkDevice device, const vk::VkDeviceSize size,
619 const vk::VkBufferUsageFlags usage,
620 const vk::VkExternalMemoryHandleTypeFlagBits memoryHandleType,
621 const std::vector<uint32_t> &queueFamilyIndices)
622 {
623 const vk::VkExternalMemoryBufferCreateInfo externalInfo = {vk::VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO,
624 DE_NULL,
625 (vk::VkExternalMemoryHandleTypeFlags)memoryHandleType};
626 const vk::VkBufferCreateInfo createInfo = {vk::VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
627 &externalInfo,
628 0u,
629
630 size,
631 usage,
632 vk::VK_SHARING_MODE_EXCLUSIVE,
633 (uint32_t)queueFamilyIndices.size(),
634 &queueFamilyIndices[0]};
635 return vk::createBuffer(vkd, device, &createInfo);
636 }
637
importAndBindMemory(const vk::DeviceInterface & vkd,vk::VkDevice device,vk::VkBuffer buffer,NativeHandle & nativeHandle,vk::VkExternalMemoryHandleTypeFlagBits externalType,uint32_t exportedMemoryTypeIndex,bool dedicated)638 de::MovePtr<vk::Allocation> importAndBindMemory(const vk::DeviceInterface &vkd, vk::VkDevice device,
639 vk::VkBuffer buffer, NativeHandle &nativeHandle,
640 vk::VkExternalMemoryHandleTypeFlagBits externalType,
641 uint32_t exportedMemoryTypeIndex, bool dedicated)
642 {
643 const vk::VkMemoryRequirements requirements = vk::getBufferMemoryRequirements(vkd, device, buffer);
644 vk::Move<vk::VkDeviceMemory> memory =
645 dedicated ? importDedicatedMemory(vkd, device, buffer, requirements, externalType, exportedMemoryTypeIndex,
646 nativeHandle) :
647 importMemory(vkd, device, requirements, externalType, exportedMemoryTypeIndex, nativeHandle);
648
649 VK_CHECK(vkd.bindBufferMemory(device, buffer, *memory, 0u));
650
651 return de::MovePtr<vk::Allocation>(new SimpleAllocation(vkd, device, memory.disown()));
652 }
653
importAndBindMemory(const vk::DeviceInterface & vkd,vk::VkDevice device,vk::VkImage image,NativeHandle & nativeHandle,vk::VkExternalMemoryHandleTypeFlagBits externalType,uint32_t exportedMemoryTypeIndex,bool dedicated)654 de::MovePtr<vk::Allocation> importAndBindMemory(const vk::DeviceInterface &vkd, vk::VkDevice device, vk::VkImage image,
655 NativeHandle &nativeHandle,
656 vk::VkExternalMemoryHandleTypeFlagBits externalType,
657 uint32_t exportedMemoryTypeIndex, bool dedicated)
658 {
659 const vk::VkMemoryRequirements requirements = vk::getImageMemoryRequirements(vkd, device, image);
660 vk::Move<vk::VkDeviceMemory> memory =
661 dedicated ? importDedicatedMemory(vkd, device, image, requirements, externalType, exportedMemoryTypeIndex,
662 nativeHandle) :
663 importMemory(vkd, device, requirements, externalType, exportedMemoryTypeIndex, nativeHandle);
664 VK_CHECK(vkd.bindImageMemory(device, image, *memory, 0u));
665
666 return de::MovePtr<vk::Allocation>(new SimpleAllocation(vkd, device, memory.disown()));
667 }
668
importResource(const vk::DeviceInterface & vkd,vk::VkDevice device,const ResourceDescription & resourceDesc,const std::vector<uint32_t> & queueFamilyIndices,const OperationSupport & readOp,const OperationSupport & writeOp,NativeHandle & nativeHandle,vk::VkExternalMemoryHandleTypeFlagBits externalType,uint32_t exportedMemoryTypeIndex,bool dedicated)669 de::MovePtr<Resource> importResource(const vk::DeviceInterface &vkd, vk::VkDevice device,
670 const ResourceDescription &resourceDesc,
671 const std::vector<uint32_t> &queueFamilyIndices, const OperationSupport &readOp,
672 const OperationSupport &writeOp, NativeHandle &nativeHandle,
673 vk::VkExternalMemoryHandleTypeFlagBits externalType,
674 uint32_t exportedMemoryTypeIndex, bool dedicated)
675 {
676 if (resourceDesc.type == RESOURCE_TYPE_IMAGE)
677 {
678 const vk::VkExtent3D extent = {(uint32_t)resourceDesc.size.x(), de::max(1u, (uint32_t)resourceDesc.size.y()),
679 de::max(1u, (uint32_t)resourceDesc.size.z())};
680 const vk::VkImageSubresourceRange subresourceRange = {resourceDesc.imageAspect, 0u, 1u, 0u, 1u};
681 const vk::VkImageSubresourceLayers subresourceLayers = {resourceDesc.imageAspect, 0u, 0u, 1u};
682 const vk::VkExternalMemoryImageCreateInfo externalInfo = {
683 vk::VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO, DE_NULL,
684 (vk::VkExternalMemoryHandleTypeFlags)externalType};
685 const vk::VkImageTiling tiling = vk::VK_IMAGE_TILING_OPTIMAL;
686 const vk::VkImageCreateInfo createInfo = {vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
687 &externalInfo,
688 0u,
689
690 resourceDesc.imageType,
691 resourceDesc.imageFormat,
692 extent,
693 1u,
694 1u,
695 resourceDesc.imageSamples,
696 tiling,
697 readOp.getInResourceUsageFlags() | writeOp.getOutResourceUsageFlags(),
698 vk::VK_SHARING_MODE_EXCLUSIVE,
699
700 (uint32_t)queueFamilyIndices.size(),
701 &queueFamilyIndices[0],
702 vk::VK_IMAGE_LAYOUT_UNDEFINED};
703
704 vk::Move<vk::VkImage> image = vk::createImage(vkd, device, &createInfo);
705 de::MovePtr<vk::Allocation> allocation =
706 importAndBindMemory(vkd, device, *image, nativeHandle, externalType, exportedMemoryTypeIndex, dedicated);
707
708 return de::MovePtr<Resource>(new Resource(image, allocation, extent, resourceDesc.imageType,
709 resourceDesc.imageFormat, subresourceRange, subresourceLayers,
710 tiling));
711 }
712 else
713 {
714 const vk::VkDeviceSize offset = 0u;
715 const vk::VkDeviceSize size = static_cast<vk::VkDeviceSize>(resourceDesc.size.x());
716 const vk::VkBufferUsageFlags usage = readOp.getInResourceUsageFlags() | writeOp.getOutResourceUsageFlags();
717 const vk::VkExternalMemoryBufferCreateInfo externalInfo = {
718 vk::VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO, DE_NULL,
719 (vk::VkExternalMemoryHandleTypeFlags)externalType};
720 const vk::VkBufferCreateInfo createInfo = {vk::VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
721 &externalInfo,
722 0u,
723
724 size,
725 usage,
726 vk::VK_SHARING_MODE_EXCLUSIVE,
727 (uint32_t)queueFamilyIndices.size(),
728 &queueFamilyIndices[0]};
729 vk::Move<vk::VkBuffer> buffer = vk::createBuffer(vkd, device, &createInfo);
730 de::MovePtr<vk::Allocation> allocation =
731 importAndBindMemory(vkd, device, *buffer, nativeHandle, externalType, exportedMemoryTypeIndex, dedicated);
732
733 return de::MovePtr<Resource>(new Resource(resourceDesc.type, buffer, allocation, offset, size));
734 }
735 }
736
recordWriteBarrier(SynchronizationWrapperPtr synchronizationWrapper,vk::VkCommandBuffer commandBuffer,const Resource & resource,const SyncInfo & writeSync,uint32_t writeQueueFamilyIndex,const SyncInfo & readSync)737 void recordWriteBarrier(SynchronizationWrapperPtr synchronizationWrapper, vk::VkCommandBuffer commandBuffer,
738 const Resource &resource, const SyncInfo &writeSync, uint32_t writeQueueFamilyIndex,
739 const SyncInfo &readSync)
740 {
741 const vk::VkPipelineStageFlags2KHR srcStageMask = writeSync.stageMask;
742 const vk::VkAccessFlags2KHR srcAccessMask = writeSync.accessMask;
743
744 const vk::VkPipelineStageFlags2KHR dstStageMask = readSync.stageMask;
745 const vk::VkAccessFlags2KHR dstAccessMask = readSync.accessMask;
746
747 if (resource.getType() == RESOURCE_TYPE_IMAGE)
748 {
749 const VkImageMemoryBarrier2KHR imageMemoryBarrier2 = makeImageMemoryBarrier2(
750 srcStageMask, // VkPipelineStageFlags2KHR srcStageMask
751 srcAccessMask, // VkAccessFlags2KHR srcAccessMask
752 dstStageMask, // VkPipelineStageFlags2KHR dstStageMask
753 dstAccessMask, // VkAccessFlags2KHR dstAccessMask
754 writeSync.imageLayout, // VkImageLayout oldLayout
755 readSync.imageLayout, // VkImageLayout newLayout
756 resource.getImage().handle, // VkImage image
757 resource.getImage().subresourceRange, // VkImageSubresourceRange subresourceRange
758 writeQueueFamilyIndex, // uint32_t srcQueueFamilyIndex
759 VK_QUEUE_FAMILY_EXTERNAL // uint32_t dstQueueFamilyIndex
760 );
761 VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, DE_NULL, &imageMemoryBarrier2);
762 synchronizationWrapper->cmdPipelineBarrier(commandBuffer, &dependencyInfo);
763 }
764 else
765 {
766 const VkBufferMemoryBarrier2KHR bufferMemoryBarrier2 =
767 makeBufferMemoryBarrier2(srcStageMask, // VkPipelineStageFlags2KHR srcStageMask
768 srcAccessMask, // VkAccessFlags2KHR srcAccessMask
769 dstStageMask, // VkPipelineStageFlags2KHR dstStageMask
770 dstAccessMask, // VkAccessFlags2KHR dstAccessMask
771 resource.getBuffer().handle, // VkBuffer buffer
772 0, // VkDeviceSize offset
773 VK_WHOLE_SIZE, // VkDeviceSize size
774 writeQueueFamilyIndex, // uint32_t srcQueueFamilyIndex
775 VK_QUEUE_FAMILY_EXTERNAL // uint32_t dstQueueFamilyIndex
776 );
777 VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, &bufferMemoryBarrier2);
778 synchronizationWrapper->cmdPipelineBarrier(commandBuffer, &dependencyInfo);
779 }
780 }
781
recordReadBarrier(SynchronizationWrapperPtr synchronizationWrapper,vk::VkCommandBuffer commandBuffer,const Resource & resource,const SyncInfo & writeSync,const SyncInfo & readSync,uint32_t readQueueFamilyIndex)782 void recordReadBarrier(SynchronizationWrapperPtr synchronizationWrapper, vk::VkCommandBuffer commandBuffer,
783 const Resource &resource, const SyncInfo &writeSync, const SyncInfo &readSync,
784 uint32_t readQueueFamilyIndex)
785 {
786 const vk::VkPipelineStageFlags2KHR srcStageMask = readSync.stageMask;
787 const vk::VkAccessFlags2KHR srcAccessMask = readSync.accessMask;
788
789 const vk::VkPipelineStageFlags2KHR dstStageMask = readSync.stageMask;
790 const vk::VkAccessFlags2KHR dstAccessMask = readSync.accessMask;
791
792 if (resource.getType() == RESOURCE_TYPE_IMAGE)
793 {
794 const VkImageMemoryBarrier2KHR imageMemoryBarrier2 = makeImageMemoryBarrier2(
795 srcStageMask, // VkPipelineStageFlags2KHR srcStageMask
796 srcAccessMask, // VkAccessFlags2KHR srcAccessMask
797 dstStageMask, // VkPipelineStageFlags2KHR dstStageMask
798 dstAccessMask, // VkAccessFlags2KHR dstAccessMask
799 writeSync.imageLayout, // VkImageLayout oldLayout
800 readSync.imageLayout, // VkImageLayout newLayout
801 resource.getImage().handle, // VkImage image
802 resource.getImage().subresourceRange, // VkImageSubresourceRange subresourceRange
803 VK_QUEUE_FAMILY_EXTERNAL, // uint32_t srcQueueFamilyIndex
804 readQueueFamilyIndex // uint32_t dstQueueFamilyIndex
805 );
806 VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, DE_NULL, &imageMemoryBarrier2);
807 synchronizationWrapper->cmdPipelineBarrier(commandBuffer, &dependencyInfo);
808 }
809 else
810 {
811 const VkBufferMemoryBarrier2KHR bufferMemoryBarrier2 = makeBufferMemoryBarrier2(
812 srcStageMask, // VkPipelineStageFlags2KHR srcStageMask
813 srcAccessMask, // VkAccessFlags2KHR srcAccessMask
814 dstStageMask, // VkPipelineStageFlags2KHR dstStageMask
815 dstAccessMask, // VkAccessFlags2KHR dstAccessMask
816 resource.getBuffer().handle, // VkBuffer buffer
817 0, // VkDeviceSize offset
818 VK_WHOLE_SIZE, // VkDeviceSize size
819 VK_QUEUE_FAMILY_EXTERNAL, // uint32_t srcQueueFamilyIndex
820 readQueueFamilyIndex // uint32_t dstQueueFamilyIndex
821 );
822 VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, &bufferMemoryBarrier2);
823 synchronizationWrapper->cmdPipelineBarrier(commandBuffer, &dependencyInfo);
824 }
825 }
826
getFamilyIndices(const std::vector<vk::VkQueueFamilyProperties> & properties)827 std::vector<uint32_t> getFamilyIndices(const std::vector<vk::VkQueueFamilyProperties> &properties)
828 {
829 std::vector<uint32_t> indices(properties.size(), 0);
830
831 for (uint32_t ndx = 0; ndx < properties.size(); ndx++)
832 indices[ndx] = ndx;
833
834 return indices;
835 }
836
837 class SharingTestInstance : public TestInstance
838 {
839 public:
840 SharingTestInstance(Context &context, TestConfig config);
841
842 virtual tcu::TestStatus iterate(void);
843
844 private:
845 const TestConfig m_config;
846
847 const de::UniquePtr<OperationSupport> m_supportWriteOp;
848 const de::UniquePtr<OperationSupport> m_supportReadOp;
849 const NotSupportedChecker m_notSupportedChecker; // Must declare before VkInstance to effectively reduce runtimes!
850
851 const bool m_getMemReq2Supported;
852
853 const vk::VkInstance m_instanceA;
854 const vk::InstanceDriver &m_vkiA;
855 const vk::VkPhysicalDevice m_physicalDeviceA;
856 const std::vector<vk::VkQueueFamilyProperties> m_queueFamiliesA;
857 const std::vector<uint32_t> m_queueFamilyIndicesA;
858 const vk::Unique<vk::VkDevice> &m_deviceA;
859 const vk::DeviceDriver m_vkdA;
860
861 const vk::VkInstance m_instanceB;
862 const vk::InstanceDriver &m_vkiB;
863 const vk::VkPhysicalDevice m_physicalDeviceB;
864 const std::vector<vk::VkQueueFamilyProperties> m_queueFamiliesB;
865 const std::vector<uint32_t> m_queueFamilyIndicesB;
866 const vk::Unique<vk::VkDevice> &m_deviceB;
867 const vk::DeviceDriver m_vkdB;
868
869 const vk::VkExternalSemaphoreHandleTypeFlagBits m_semaphoreHandleType;
870 const vk::VkExternalMemoryHandleTypeFlagBits m_memoryHandleType;
871
872 // \todo Should this be moved to the group same way as in the other tests?
873 PipelineCacheData m_pipelineCacheData;
874 tcu::ResultCollector m_resultCollector;
875 size_t m_queueANdx;
876 size_t m_queueBNdx;
877 };
878
SharingTestInstance(Context & context,TestConfig config)879 SharingTestInstance::SharingTestInstance(Context &context, TestConfig config)
880 : TestInstance(context)
881 , m_config(config)
882 , m_supportWriteOp(makeOperationSupport(config.writeOp, config.resource))
883 , m_supportReadOp(makeOperationSupport(config.readOp, config.resource))
884 , m_notSupportedChecker(context, m_config, *m_supportWriteOp, *m_supportReadOp)
885 , m_getMemReq2Supported(context.isDeviceFunctionalitySupported("VK_KHR_get_memory_requirements2"))
886
887 , m_instanceA(InstanceAndDevice::getInstanceA(context))
888 , m_vkiA(InstanceAndDevice::getDriverA())
889 , m_physicalDeviceA(InstanceAndDevice::getPhysicalDeviceA())
890 , m_queueFamiliesA(vk::getPhysicalDeviceQueueFamilyProperties(m_vkiA, m_physicalDeviceA))
891 , m_queueFamilyIndicesA(getFamilyIndices(m_queueFamiliesA))
892 , m_deviceA(InstanceAndDevice::getDeviceA())
893 , m_vkdA(context.getPlatformInterface(), m_instanceA, *m_deviceA, context.getUsedApiVersion(),
894 context.getTestContext().getCommandLine())
895
896 , m_instanceB(InstanceAndDevice::getInstanceB(context))
897 , m_vkiB(InstanceAndDevice::getDriverB())
898 , m_physicalDeviceB(InstanceAndDevice::getPhysicalDeviceB())
899 , m_queueFamiliesB(vk::getPhysicalDeviceQueueFamilyProperties(m_vkiB, m_physicalDeviceB))
900 , m_queueFamilyIndicesB(getFamilyIndices(m_queueFamiliesB))
901 , m_deviceB(InstanceAndDevice::getDeviceB())
902 , m_vkdB(context.getPlatformInterface(), m_instanceB, *m_deviceB, context.getUsedApiVersion(),
903 context.getTestContext().getCommandLine())
904
905 , m_semaphoreHandleType(m_config.semaphoreHandleType)
906 , m_memoryHandleType(m_config.memoryHandleType)
907
908 , m_resultCollector(context.getTestContext().getLog())
909 , m_queueANdx(0)
910 , m_queueBNdx(0)
911 {
912 }
913
iterate(void)914 tcu::TestStatus SharingTestInstance::iterate(void)
915 {
916 TestLog &log(m_context.getTestContext().getLog());
917 bool isTimelineSemaphore(m_config.semaphoreType == vk::VK_SEMAPHORE_TYPE_TIMELINE_KHR);
918 try
919 {
920 const uint32_t queueFamilyA = (uint32_t)m_queueANdx;
921 const uint32_t queueFamilyB = (uint32_t)m_queueBNdx;
922
923 const tcu::ScopedLogSection queuePairSection(
924 log, "WriteQueue-" + de::toString(queueFamilyA) + "-ReadQueue-" + de::toString(queueFamilyB),
925 "WriteQueue-" + de::toString(queueFamilyA) + "-ReadQueue-" + de::toString(queueFamilyB));
926
927 const vk::Unique<vk::VkSemaphore> semaphoreA(
928 createExportableSemaphoreType(m_vkdA, *m_deviceA, m_config.semaphoreType, m_semaphoreHandleType));
929 const vk::Unique<vk::VkSemaphore> semaphoreB(createSemaphoreType(m_vkdB, *m_deviceB, m_config.semaphoreType));
930
931 const ResourceDescription &resourceDesc = m_config.resource;
932 de::MovePtr<Resource> resourceA;
933
934 uint32_t exportedMemoryTypeIndex = ~0U;
935 if (resourceDesc.type == RESOURCE_TYPE_IMAGE)
936 {
937 const vk::VkExtent3D extent = {(uint32_t)resourceDesc.size.x(),
938 de::max(1u, (uint32_t)resourceDesc.size.y()),
939 de::max(1u, (uint32_t)resourceDesc.size.z())};
940 const vk::VkImageSubresourceRange subresourceRange = {resourceDesc.imageAspect, 0u, 1u, 0u, 1u};
941 const vk::VkImageSubresourceLayers subresourceLayers = {resourceDesc.imageAspect, 0u, 0u, 1u};
942
943 if ((resourceDesc.imageSamples != VK_SAMPLE_COUNT_1_BIT) &&
944 ((m_supportReadOp->getInResourceUsageFlags() | m_supportWriteOp->getOutResourceUsageFlags()) &
945 VK_IMAGE_USAGE_STORAGE_BIT) &&
946 !m_context.getDeviceFeatures().shaderStorageImageMultisample)
947 TCU_THROW(NotSupportedError, "shaderStorageImageMultisample not supported");
948
949 vk::Move<vk::VkImage> image = createImage(m_vkdA, *m_deviceA, resourceDesc, extent, m_queueFamilyIndicesA,
950 *m_supportReadOp, *m_supportWriteOp, m_memoryHandleType);
951 const vk::VkImageTiling tiling = vk::VK_IMAGE_TILING_OPTIMAL;
952 const vk::VkMemoryRequirements requirements =
953 getMemoryRequirements(m_vkdA, *m_deviceA, *image, m_config.dedicated, m_getMemReq2Supported);
954 exportedMemoryTypeIndex = chooseMemoryType(requirements.memoryTypeBits);
955 vk::Move<vk::VkDeviceMemory> memory =
956 allocateExportableMemory(m_vkdA, *m_deviceA, requirements.size, exportedMemoryTypeIndex,
957 m_memoryHandleType, m_config.dedicated ? *image : (vk::VkImage)0);
958
959 VK_CHECK(m_vkdA.bindImageMemory(*m_deviceA, *image, *memory, 0u));
960
961 de::MovePtr<vk::Allocation> allocation =
962 de::MovePtr<vk::Allocation>(new SimpleAllocation(m_vkdA, *m_deviceA, memory.disown()));
963 resourceA = de::MovePtr<Resource>(new Resource(image, allocation, extent, resourceDesc.imageType,
964 resourceDesc.imageFormat, subresourceRange,
965 subresourceLayers, tiling));
966 }
967 else
968 {
969 const vk::VkDeviceSize offset = 0u;
970 const vk::VkDeviceSize size = static_cast<vk::VkDeviceSize>(resourceDesc.size.x());
971 const vk::VkBufferUsageFlags usage =
972 m_supportReadOp->getInResourceUsageFlags() | m_supportWriteOp->getOutResourceUsageFlags();
973 vk::Move<vk::VkBuffer> buffer =
974 createBuffer(m_vkdA, *m_deviceA, size, usage, m_memoryHandleType, m_queueFamilyIndicesA);
975 const vk::VkMemoryRequirements requirements =
976 getMemoryRequirements(m_vkdA, *m_deviceA, *buffer, m_config.dedicated, m_getMemReq2Supported);
977 exportedMemoryTypeIndex = chooseMemoryType(requirements.memoryTypeBits);
978 vk::Move<vk::VkDeviceMemory> memory =
979 allocateExportableMemory(m_vkdA, *m_deviceA, requirements.size, exportedMemoryTypeIndex,
980 m_memoryHandleType, m_config.dedicated ? *buffer : (vk::VkBuffer)0);
981
982 VK_CHECK(m_vkdA.bindBufferMemory(*m_deviceA, *buffer, *memory, 0u));
983
984 de::MovePtr<vk::Allocation> allocation =
985 de::MovePtr<vk::Allocation>(new SimpleAllocation(m_vkdA, *m_deviceA, memory.disown()));
986 resourceA = de::MovePtr<Resource>(new Resource(resourceDesc.type, buffer, allocation, offset, size));
987 }
988
989 NativeHandle nativeMemoryHandle;
990 getMemoryNative(m_vkdA, *m_deviceA, resourceA->getMemory(), m_memoryHandleType, nativeMemoryHandle);
991
992 const de::UniquePtr<Resource> resourceB(
993 importResource(m_vkdB, *m_deviceB, resourceDesc, m_queueFamilyIndicesB, *m_supportReadOp, *m_supportWriteOp,
994 nativeMemoryHandle, m_memoryHandleType, exportedMemoryTypeIndex, m_config.dedicated));
995 const vk::VkQueue queueA(getQueue(m_vkdA, *m_deviceA, queueFamilyA));
996 const vk::Unique<vk::VkCommandPool> commandPoolA(createCommandPool(m_vkdA, *m_deviceA, queueFamilyA));
997 const vk::Unique<vk::VkCommandBuffer> commandBufferA(createCommandBuffer(m_vkdA, *m_deviceA, *commandPoolA));
998 vk::SimpleAllocator allocatorA(m_vkdA, *m_deviceA,
999 vk::getPhysicalDeviceMemoryProperties(m_vkiA, m_physicalDeviceA));
1000 OperationContext operationContextA(m_context, m_config.type, m_vkiA, m_vkdA, m_physicalDeviceA, *m_deviceA,
1001 allocatorA, m_context.getBinaryCollection(), m_pipelineCacheData);
1002
1003 if (!checkQueueFlags(m_queueFamiliesA[m_queueANdx].queueFlags,
1004 m_supportWriteOp->getQueueFlags(operationContextA)))
1005 TCU_THROW(NotSupportedError, "Operation not supported by the source queue");
1006
1007 const vk::VkQueue queueB(getQueue(m_vkdB, *m_deviceB, queueFamilyB));
1008 const vk::Unique<vk::VkCommandPool> commandPoolB(createCommandPool(m_vkdB, *m_deviceB, queueFamilyB));
1009 const vk::Unique<vk::VkCommandBuffer> commandBufferB(createCommandBuffer(m_vkdB, *m_deviceB, *commandPoolB));
1010 vk::SimpleAllocator allocatorB(m_vkdB, *m_deviceB,
1011 vk::getPhysicalDeviceMemoryProperties(m_vkiB, m_physicalDeviceB));
1012 OperationContext operationContextB(m_context, m_config.type, m_vkiB, m_vkdB, m_physicalDeviceB, *m_deviceB,
1013 allocatorB, m_context.getBinaryCollection(), m_pipelineCacheData);
1014
1015 if (!checkQueueFlags(m_queueFamiliesB[m_queueBNdx].queueFlags,
1016 m_supportReadOp->getQueueFlags(operationContextB)))
1017 TCU_THROW(NotSupportedError, "Operation not supported by the destination queue");
1018
1019 const de::UniquePtr<Operation> writeOp(m_supportWriteOp->build(operationContextA, *resourceA));
1020 const de::UniquePtr<Operation> readOp(m_supportReadOp->build(operationContextB, *resourceB));
1021
1022 const SyncInfo writeSync = writeOp->getOutSyncInfo();
1023 const SyncInfo readSync = readOp->getInSyncInfo();
1024 SynchronizationWrapperPtr synchronizationWrapperA =
1025 getSynchronizationWrapper(m_config.type, m_vkdA, isTimelineSemaphore);
1026 SynchronizationWrapperPtr synchronizationWrapperB =
1027 getSynchronizationWrapper(m_config.type, m_vkdB, isTimelineSemaphore);
1028
1029 const vk::VkPipelineStageFlags2 graphicsFlags =
1030 vk::VK_PIPELINE_STAGE_2_VERTEX_SHADER_BIT | vk::VK_PIPELINE_STAGE_2_TESSELLATION_CONTROL_SHADER_BIT |
1031 vk::VK_PIPELINE_STAGE_2_TESSELLATION_EVALUATION_SHADER_BIT | vk::VK_PIPELINE_STAGE_2_GEOMETRY_SHADER_BIT |
1032 vk::VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT;
1033
1034 if ((writeSync.stageMask & graphicsFlags) != 0 || (readSync.stageMask) != 0)
1035 {
1036 if (!checkQueueFlags(m_queueFamiliesA[m_queueANdx].queueFlags, VK_QUEUE_GRAPHICS_BIT))
1037 TCU_THROW(NotSupportedError, "Operation not supported by the source queue");
1038
1039 if (!checkQueueFlags(m_queueFamiliesB[m_queueBNdx].queueFlags, VK_QUEUE_GRAPHICS_BIT))
1040 TCU_THROW(NotSupportedError, "Operation not supported by the destination queue");
1041 }
1042
1043 beginCommandBuffer(m_vkdA, *commandBufferA);
1044 writeOp->recordCommands(*commandBufferA);
1045 recordWriteBarrier(synchronizationWrapperA, *commandBufferA, *resourceA, writeSync, queueFamilyA, readSync);
1046 endCommandBuffer(m_vkdA, *commandBufferA);
1047
1048 beginCommandBuffer(m_vkdB, *commandBufferB);
1049 recordReadBarrier(synchronizationWrapperB, *commandBufferB, *resourceB, writeSync, readSync, queueFamilyB);
1050 readOp->recordCommands(*commandBufferB);
1051 endCommandBuffer(m_vkdB, *commandBufferB);
1052
1053 {
1054 de::Random rng(1234);
1055 vk::VkCommandBufferSubmitInfoKHR cmdBufferInfos = makeCommonCommandBufferSubmitInfo(*commandBufferA);
1056 VkSemaphoreSubmitInfoKHR signalSemaphoreSubmitInfo = makeCommonSemaphoreSubmitInfo(
1057 *semaphoreA, rng.getInt(1, deIntMaxValue32(32)), VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR);
1058
1059 synchronizationWrapperA->addSubmitInfo(0u, DE_NULL, 1u, &cmdBufferInfos, 1u, &signalSemaphoreSubmitInfo,
1060 false, isTimelineSemaphore);
1061
1062 VK_CHECK(synchronizationWrapperA->queueSubmit(queueA, DE_NULL));
1063
1064 {
1065 NativeHandle nativeSemaphoreHandle;
1066 const vk::VkSemaphoreImportFlags flags =
1067 isSupportedPermanence(m_semaphoreHandleType, PERMANENCE_PERMANENT) ?
1068 (vk::VkSemaphoreImportFlagBits)0u :
1069 vk::VK_SEMAPHORE_IMPORT_TEMPORARY_BIT;
1070
1071 getSemaphoreNative(m_vkdA, *m_deviceA, *semaphoreA, m_semaphoreHandleType, nativeSemaphoreHandle);
1072 importSemaphore(m_vkdB, *m_deviceB, *semaphoreB, m_semaphoreHandleType, nativeSemaphoreHandle, flags);
1073 }
1074 }
1075 {
1076 vk::VkCommandBufferSubmitInfoKHR cmdBufferInfos = makeCommonCommandBufferSubmitInfo(*commandBufferB);
1077 VkSemaphoreSubmitInfoKHR waitSemaphoreSubmitInfo =
1078 makeCommonSemaphoreSubmitInfo(*semaphoreB, 1u, readSync.stageMask);
1079
1080 synchronizationWrapperB->addSubmitInfo(1u, &waitSemaphoreSubmitInfo, 1u, &cmdBufferInfos, 0u, DE_NULL,
1081 isTimelineSemaphore);
1082
1083 VK_CHECK(synchronizationWrapperB->queueSubmit(queueB, DE_NULL));
1084 }
1085
1086 VK_CHECK(m_vkdA.queueWaitIdle(queueA));
1087 VK_CHECK(m_vkdB.queueWaitIdle(queueB));
1088
1089 if (m_config.semaphoreType == vk::VK_SEMAPHORE_TYPE_TIMELINE)
1090 {
1091 uint64_t valueA;
1092 uint64_t valueB;
1093
1094 VK_CHECK(m_vkdA.getSemaphoreCounterValue(*m_deviceA, *semaphoreA, &valueA));
1095 VK_CHECK(m_vkdB.getSemaphoreCounterValue(*m_deviceB, *semaphoreB, &valueB));
1096
1097 if (valueA != valueB)
1098 return tcu::TestStatus::fail("Inconsistent values between shared semaphores");
1099 }
1100
1101 {
1102 const Data expected = writeOp->getData();
1103 const Data actual = readOp->getData();
1104
1105 DE_ASSERT(expected.size == actual.size);
1106
1107 if (!isIndirectBuffer(m_config.resource.type))
1108 {
1109 if (0 != deMemCmp(expected.data, actual.data, expected.size))
1110 {
1111 const size_t maxBytesLogged = 256;
1112 std::ostringstream expectedData;
1113 std::ostringstream actualData;
1114 size_t byteNdx = 0;
1115
1116 // Find first byte difference
1117 for (; actual.data[byteNdx] == expected.data[byteNdx]; byteNdx++)
1118 {
1119 // Nothing
1120 }
1121
1122 log << TestLog::Message << "First different byte at offset: " << byteNdx << TestLog::EndMessage;
1123
1124 // Log 8 previous bytes before the first incorrect byte
1125 if (byteNdx > 8)
1126 {
1127 expectedData << "... ";
1128 actualData << "... ";
1129
1130 byteNdx -= 8;
1131 }
1132 else
1133 byteNdx = 0;
1134
1135 for (size_t i = 0; i < maxBytesLogged && byteNdx < expected.size; i++, byteNdx++)
1136 {
1137 expectedData << (i > 0 ? ", " : "") << (uint32_t)expected.data[byteNdx];
1138 actualData << (i > 0 ? ", " : "") << (uint32_t)actual.data[byteNdx];
1139 }
1140
1141 if (expected.size > byteNdx)
1142 {
1143 expectedData << "...";
1144 actualData << "...";
1145 }
1146
1147 log << TestLog::Message << "Expected data: (" << expectedData.str() << ")" << TestLog::EndMessage;
1148 log << TestLog::Message << "Actual data: (" << actualData.str() << ")" << TestLog::EndMessage;
1149
1150 m_resultCollector.fail("Memory contents don't match");
1151 }
1152 }
1153 else
1154 {
1155 const uint32_t expectedValue = reinterpret_cast<const uint32_t *>(expected.data)[0];
1156 const uint32_t actualValue = reinterpret_cast<const uint32_t *>(actual.data)[0];
1157
1158 if (actualValue < expectedValue)
1159 {
1160 log << TestLog::Message << "Expected counter value: (" << expectedValue << ")"
1161 << TestLog::EndMessage;
1162 log << TestLog::Message << "Actual counter value: (" << actualValue << ")" << TestLog::EndMessage;
1163
1164 m_resultCollector.fail("Counter value is smaller than expected");
1165 }
1166 }
1167 }
1168 }
1169 catch (const tcu::NotSupportedError &error)
1170 {
1171 log << TestLog::Message << "Not supported: " << error.getMessage() << TestLog::EndMessage;
1172 }
1173 catch (const tcu::TestError &error)
1174 {
1175 m_resultCollector.fail(std::string("Exception: ") + error.getMessage());
1176 }
1177
1178 // Collect possible validation errors.
1179 InstanceAndDevice::collectMessagesA();
1180 InstanceAndDevice::collectMessagesB();
1181
1182 // Move to next queue
1183 {
1184 m_queueBNdx++;
1185
1186 if (m_queueBNdx >= m_queueFamiliesB.size())
1187 {
1188 m_queueANdx++;
1189
1190 if (m_queueANdx >= m_queueFamiliesA.size())
1191 {
1192 return tcu::TestStatus(m_resultCollector.getResult(), m_resultCollector.getMessage());
1193 }
1194 else
1195 {
1196 m_queueBNdx = 0;
1197
1198 return tcu::TestStatus::incomplete();
1199 }
1200 }
1201 else
1202 return tcu::TestStatus::incomplete();
1203 }
1204 }
1205
1206 struct Progs
1207 {
initvkt::synchronization::__anon41c17ebb0111::Progs1208 void init(vk::SourceCollections &dst, TestConfig config) const
1209 {
1210 const de::UniquePtr<OperationSupport> readOp(makeOperationSupport(config.readOp, config.resource));
1211 const de::UniquePtr<OperationSupport> writeOp(makeOperationSupport(config.writeOp, config.resource));
1212
1213 readOp->initPrograms(dst);
1214 writeOp->initPrograms(dst);
1215 }
1216 };
1217
1218 } // namespace
1219
createTests(tcu::TestCaseGroup * group,SynchronizationType type)1220 static void createTests(tcu::TestCaseGroup *group, SynchronizationType type)
1221 {
1222 tcu::TestContext &testCtx = group->getTestContext();
1223 const struct
1224 {
1225 vk::VkExternalMemoryHandleTypeFlagBits memoryType;
1226 vk::VkExternalSemaphoreHandleTypeFlagBits semaphoreType;
1227 const char *nameSuffix;
1228 } cases[] = {
1229 {vk::VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT, vk::VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT, "_fd"},
1230 {vk::VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT, vk::VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT,
1231 "_fence_fd"},
1232 {vk::VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT,
1233 vk::VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT, "_win32_kmt"},
1234 {vk::VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT, vk::VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT,
1235 "_win32"},
1236 {vk::VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT, vk::VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT,
1237 "_dma_buf"},
1238 {vk::VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA,
1239 vk::VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA, "_zircon_handle"},
1240 };
1241
1242 const std::string semaphoreNames[vk::VK_SEMAPHORE_TYPE_LAST] = {
1243 "_binary_semaphore",
1244 "_timeline_semaphore",
1245 };
1246
1247 for (size_t dedicatedNdx = 0; dedicatedNdx < 2; dedicatedNdx++)
1248 {
1249 const bool dedicated(dedicatedNdx == 1);
1250 de::MovePtr<tcu::TestCaseGroup> dedicatedGroup(
1251 new tcu::TestCaseGroup(testCtx, dedicated ? "dedicated" : "suballocated"));
1252
1253 for (size_t writeOpNdx = 0; writeOpNdx < DE_LENGTH_OF_ARRAY(s_writeOps); ++writeOpNdx)
1254 for (size_t readOpNdx = 0; readOpNdx < DE_LENGTH_OF_ARRAY(s_readOps); ++readOpNdx)
1255 {
1256 const OperationName writeOp = s_writeOps[writeOpNdx];
1257 const OperationName readOp = s_readOps[readOpNdx];
1258 const std::string opGroupName = getOperationName(writeOp) + "_" + getOperationName(readOp);
1259 bool empty = true;
1260
1261 de::MovePtr<tcu::TestCaseGroup> opGroup(new tcu::TestCaseGroup(testCtx, opGroupName.c_str()));
1262
1263 for (size_t resourceNdx = 0; resourceNdx < DE_LENGTH_OF_ARRAY(s_resources); ++resourceNdx)
1264 {
1265 const ResourceDescription &resource = s_resources[resourceNdx];
1266
1267 for (size_t caseNdx = 0; caseNdx < DE_LENGTH_OF_ARRAY(cases); caseNdx++)
1268 {
1269 for (int semaphoreType = 0; semaphoreType < vk::VK_SEMAPHORE_TYPE_LAST; semaphoreType++)
1270 {
1271 if (cases[caseNdx].semaphoreType == vk::VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT &&
1272 (vk::VkSemaphoreType)semaphoreType == vk::VK_SEMAPHORE_TYPE_TIMELINE)
1273 {
1274 continue;
1275 }
1276
1277 if (isResourceSupported(writeOp, resource) && isResourceSupported(readOp, resource))
1278 {
1279 const TestConfig config(type, resource, (vk::VkSemaphoreType)semaphoreType, writeOp,
1280 readOp, cases[caseNdx].memoryType, cases[caseNdx].semaphoreType,
1281 dedicated);
1282 std::string name = getResourceName(resource) + semaphoreNames[semaphoreType] +
1283 cases[caseNdx].nameSuffix;
1284
1285 opGroup->addChild(new InstanceFactory1<SharingTestInstance, TestConfig, Progs>(
1286 testCtx, name, Progs(), config));
1287 empty = false;
1288 }
1289 }
1290 }
1291 }
1292
1293 if (!empty)
1294 dedicatedGroup->addChild(opGroup.release());
1295 }
1296
1297 group->addChild(dedicatedGroup.release());
1298 }
1299 }
1300
cleanupGroup(tcu::TestCaseGroup * group,SynchronizationType type)1301 static void cleanupGroup(tcu::TestCaseGroup *group, SynchronizationType type)
1302 {
1303 DE_UNREF(group);
1304 DE_UNREF(type);
1305 // Destroy singleton object
1306 InstanceAndDevice::destroy();
1307 }
1308
createCrossInstanceSharingTest(tcu::TestContext & testCtx,SynchronizationType type)1309 tcu::TestCaseGroup *createCrossInstanceSharingTest(tcu::TestContext &testCtx, SynchronizationType type)
1310 {
1311 return createTestGroup(testCtx, "cross_instance", createTests, type, cleanupGroup);
1312 }
1313
1314 } // namespace synchronization
1315 } // namespace vkt
1316