1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2016 The Khronos Group Inc.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 *//*!
20 * \file
21 * \brief Synchronization primitive tests with single queue
22 *//*--------------------------------------------------------------------*/
23
24 #include "vktSynchronizationOperationSingleQueueTests.hpp"
25 #include "vkDefs.hpp"
26 #include "vktTestCase.hpp"
27 #include "vktTestCaseUtil.hpp"
28 #include "vktTestGroupUtil.hpp"
29 #include "vkRef.hpp"
30 #include "vkRefUtil.hpp"
31 #include "vkMemUtil.hpp"
32 #include "vkBarrierUtil.hpp"
33 #include "vkQueryUtil.hpp"
34 #include "vkCmdUtil.hpp"
35 #include "vkTypeUtil.hpp"
36 #include "vkCmdUtil.hpp"
37 #include "deRandom.hpp"
38 #include "deUniquePtr.hpp"
39 #include "tcuTestLog.hpp"
40 #include "vktSynchronizationUtil.hpp"
41 #include "vktSynchronizationOperation.hpp"
42 #include "vktSynchronizationOperationTestData.hpp"
43 #include "vktSynchronizationOperationResources.hpp"
44
45 namespace vkt
46 {
47 namespace synchronization
48 {
49 namespace
50 {
51 using namespace vk;
52 using tcu::TestLog;
53
54 class BaseTestInstance : public TestInstance
55 {
56 public:
BaseTestInstance(Context & context,SynchronizationType type,const ResourceDescription & resourceDesc,const OperationSupport & writeOp,const OperationSupport & readOp,PipelineCacheData & pipelineCacheData)57 BaseTestInstance(Context &context, SynchronizationType type, const ResourceDescription &resourceDesc,
58 const OperationSupport &writeOp, const OperationSupport &readOp,
59 PipelineCacheData &pipelineCacheData)
60 : TestInstance(context)
61 , m_type(type)
62 , m_opContext(context, type, pipelineCacheData)
63 , m_resource(new Resource(m_opContext, resourceDesc,
64 writeOp.getOutResourceUsageFlags() | readOp.getInResourceUsageFlags()))
65 , m_writeOp(writeOp.build(m_opContext, *m_resource))
66 , m_readOp(readOp.build(m_opContext, *m_resource))
67 {
68 }
69
70 protected:
71 SynchronizationType m_type;
72 OperationContext m_opContext;
73 const de::UniquePtr<Resource> m_resource;
74 const de::UniquePtr<Operation> m_writeOp;
75 const de::UniquePtr<Operation> m_readOp;
76 };
77
78 class EventTestInstance : public BaseTestInstance
79 {
80 public:
EventTestInstance(Context & context,SynchronizationType type,const ResourceDescription & resourceDesc,const OperationSupport & writeOp,const OperationSupport & readOp,PipelineCacheData & pipelineCacheData)81 EventTestInstance(Context &context, SynchronizationType type, const ResourceDescription &resourceDesc,
82 const OperationSupport &writeOp, const OperationSupport &readOp,
83 PipelineCacheData &pipelineCacheData)
84 : BaseTestInstance(context, type, resourceDesc, writeOp, readOp, pipelineCacheData)
85 {
86 }
87
iterate(void)88 tcu::TestStatus iterate(void)
89 {
90 const DeviceInterface &vk = m_context.getDeviceInterface();
91 const VkDevice device = m_context.getDevice();
92 const VkQueue queue = m_context.getUniversalQueue();
93 const uint32_t queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
94 const Unique<VkCommandPool> cmdPool(
95 createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
96 const Unique<VkCommandBuffer> cmdBuffer(makeCommandBuffer(vk, device, *cmdPool));
97 const Unique<VkEvent> event(createEvent(vk, device));
98 const SyncInfo writeSync = m_writeOp->getOutSyncInfo();
99 const SyncInfo readSync = m_readOp->getInSyncInfo();
100 SynchronizationWrapperPtr synchronizationWrapper = getSynchronizationWrapper(m_type, vk, false);
101
102 beginCommandBuffer(vk, *cmdBuffer);
103
104 m_writeOp->recordCommands(*cmdBuffer);
105
106 if (m_resource->getType() == RESOURCE_TYPE_IMAGE)
107 {
108 const VkImageMemoryBarrier2KHR imageMemoryBarrier2 = makeImageMemoryBarrier2(
109 writeSync.stageMask, // VkPipelineStageFlags2KHR srcStageMask
110 writeSync.accessMask, // VkAccessFlags2KHR srcAccessMask
111 readSync.stageMask, // VkPipelineStageFlags2KHR dstStageMask
112 readSync.accessMask, // VkAccessFlags2KHR dstAccessMask
113 writeSync.imageLayout, // VkImageLayout oldLayout
114 readSync.imageLayout, // VkImageLayout newLayout
115 m_resource->getImage().handle, // VkImage image
116 m_resource->getImage().subresourceRange // VkImageSubresourceRange subresourceRange
117 );
118 VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, DE_NULL, &imageMemoryBarrier2, true);
119 synchronizationWrapper->cmdSetEvent(*cmdBuffer, *event, &dependencyInfo);
120 synchronizationWrapper->cmdWaitEvents(*cmdBuffer, 1u, &event.get(), &dependencyInfo);
121 }
122 else
123 {
124 const VkBufferMemoryBarrier2KHR bufferMemoryBarrier2 =
125 makeBufferMemoryBarrier2(writeSync.stageMask, // VkPipelineStageFlags2KHR srcStageMask
126 writeSync.accessMask, // VkAccessFlags2KHR srcAccessMask
127 readSync.stageMask, // VkPipelineStageFlags2KHR dstStageMask
128 readSync.accessMask, // VkAccessFlags2KHR dstAccessMask
129 m_resource->getBuffer().handle, // VkBuffer buffer
130 m_resource->getBuffer().offset, // VkDeviceSize offset
131 m_resource->getBuffer().size // VkDeviceSize size
132 );
133 VkDependencyInfoKHR dependencyInfo =
134 makeCommonDependencyInfo(DE_NULL, &bufferMemoryBarrier2, DE_NULL, true);
135 synchronizationWrapper->cmdSetEvent(*cmdBuffer, *event, &dependencyInfo);
136 synchronizationWrapper->cmdWaitEvents(*cmdBuffer, 1u, &event.get(), &dependencyInfo);
137 }
138
139 m_readOp->recordCommands(*cmdBuffer);
140
141 endCommandBuffer(vk, *cmdBuffer);
142 submitCommandsAndWait(synchronizationWrapper, vk, device, queue, *cmdBuffer);
143
144 {
145 const Data expected = m_writeOp->getData();
146 const Data actual = m_readOp->getData();
147
148 if (isIndirectBuffer(m_resource->getType()))
149 {
150 const uint32_t expectedValue = reinterpret_cast<const uint32_t *>(expected.data)[0];
151 const uint32_t actualValue = reinterpret_cast<const uint32_t *>(actual.data)[0];
152
153 if (actualValue < expectedValue)
154 return tcu::TestStatus::fail("Counter value is smaller than expected");
155 }
156 else
157 {
158 if (0 != deMemCmp(expected.data, actual.data, expected.size))
159 return tcu::TestStatus::fail("Memory contents don't match");
160 }
161 }
162
163 return tcu::TestStatus::pass("OK");
164 }
165 };
166
167 class BarrierTestInstance : public BaseTestInstance
168 {
169 public:
BarrierTestInstance(Context & context,SynchronizationType type,const ResourceDescription & resourceDesc,const OperationSupport & writeOp,const OperationSupport & readOp,PipelineCacheData & pipelineCacheData)170 BarrierTestInstance(Context &context, SynchronizationType type, const ResourceDescription &resourceDesc,
171 const OperationSupport &writeOp, const OperationSupport &readOp,
172 PipelineCacheData &pipelineCacheData)
173 : BaseTestInstance(context, type, resourceDesc, writeOp, readOp, pipelineCacheData)
174 {
175 }
176
iterate(void)177 tcu::TestStatus iterate(void)
178 {
179 const DeviceInterface &vk = m_context.getDeviceInterface();
180 const VkDevice device = m_context.getDevice();
181 const VkQueue queue = m_context.getUniversalQueue();
182 const uint32_t queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
183 const Unique<VkCommandPool> cmdPool(
184 createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
185 const Move<VkCommandBuffer> cmdBuffer(makeCommandBuffer(vk, device, *cmdPool));
186 const SyncInfo writeSync = m_writeOp->getOutSyncInfo();
187 const SyncInfo readSync = m_readOp->getInSyncInfo();
188 SynchronizationWrapperPtr synchronizationWrapper = getSynchronizationWrapper(m_type, vk, false);
189
190 beginCommandBuffer(vk, *cmdBuffer);
191
192 m_writeOp->recordCommands(*cmdBuffer);
193
194 if (m_resource->getType() == RESOURCE_TYPE_IMAGE)
195 {
196 const VkImageMemoryBarrier2KHR imageMemoryBarrier2 = makeImageMemoryBarrier2(
197 writeSync.stageMask, // VkPipelineStageFlags2KHR srcStageMask
198 writeSync.accessMask, // VkAccessFlags2KHR srcAccessMask
199 readSync.stageMask, // VkPipelineStageFlags2KHR dstStageMask
200 readSync.accessMask, // VkAccessFlags2KHR dstAccessMask
201 writeSync.imageLayout, // VkImageLayout oldLayout
202 readSync.imageLayout, // VkImageLayout newLayout
203 m_resource->getImage().handle, // VkImage image
204 m_resource->getImage().subresourceRange // VkImageSubresourceRange subresourceRange
205 );
206 VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, DE_NULL, &imageMemoryBarrier2);
207 synchronizationWrapper->cmdPipelineBarrier(*cmdBuffer, &dependencyInfo);
208 }
209 else
210 {
211 const VkBufferMemoryBarrier2KHR bufferMemoryBarrier2 =
212 makeBufferMemoryBarrier2(writeSync.stageMask, // VkPipelineStageFlags2KHR srcStageMask
213 writeSync.accessMask, // VkAccessFlags2KHR srcAccessMask
214 readSync.stageMask, // VkPipelineStageFlags2KHR dstStageMask
215 readSync.accessMask, // VkAccessFlags2KHR dstAccessMask
216 m_resource->getBuffer().handle, // VkBuffer buffer
217 m_resource->getBuffer().offset, // VkDeviceSize offset
218 m_resource->getBuffer().size // VkDeviceSize size
219 );
220 VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, &bufferMemoryBarrier2);
221 synchronizationWrapper->cmdPipelineBarrier(*cmdBuffer, &dependencyInfo);
222 }
223
224 m_readOp->recordCommands(*cmdBuffer);
225
226 endCommandBuffer(vk, *cmdBuffer);
227
228 submitCommandsAndWait(synchronizationWrapper, vk, device, queue, *cmdBuffer);
229
230 {
231 const Data expected = m_writeOp->getData();
232 const Data actual = m_readOp->getData();
233
234 if (isIndirectBuffer(m_resource->getType()))
235 {
236 const uint32_t expectedValue = reinterpret_cast<const uint32_t *>(expected.data)[0];
237 const uint32_t actualValue = reinterpret_cast<const uint32_t *>(actual.data)[0];
238
239 if (actualValue < expectedValue)
240 return tcu::TestStatus::fail("Counter value is smaller than expected");
241 }
242 else
243 {
244 if (0 != deMemCmp(expected.data, actual.data, expected.size))
245 return tcu::TestStatus::fail("Memory contents don't match");
246 }
247 }
248
249 return tcu::TestStatus::pass("OK");
250 }
251 };
252
253 class BinarySemaphoreTestInstance : public BaseTestInstance
254 {
255 public:
BinarySemaphoreTestInstance(Context & context,SynchronizationType type,const ResourceDescription & resourceDesc,const OperationSupport & writeOp,const OperationSupport & readOp,PipelineCacheData & pipelineCacheData)256 BinarySemaphoreTestInstance(Context &context, SynchronizationType type, const ResourceDescription &resourceDesc,
257 const OperationSupport &writeOp, const OperationSupport &readOp,
258 PipelineCacheData &pipelineCacheData)
259 : BaseTestInstance(context, type, resourceDesc, writeOp, readOp, pipelineCacheData)
260 {
261 }
262
iterate(void)263 tcu::TestStatus iterate(void)
264 {
265 enum
266 {
267 WRITE = 0,
268 READ,
269 COUNT
270 };
271 const DeviceInterface &vk = m_context.getDeviceInterface();
272 const VkDevice device = m_context.getDevice();
273 const VkQueue queue = m_context.getUniversalQueue();
274 const uint32_t queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
275 const Unique<VkSemaphore> semaphore(createSemaphore(vk, device));
276 const Unique<VkCommandPool> cmdPool(
277 createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
278 const Move<VkCommandBuffer> ptrCmdBuffer[COUNT] = {makeCommandBuffer(vk, device, *cmdPool),
279 makeCommandBuffer(vk, device, *cmdPool)};
280 VkCommandBuffer cmdBuffers[COUNT] = {*ptrCmdBuffer[WRITE], *ptrCmdBuffer[READ]};
281 SynchronizationWrapperPtr synchronizationWrapper = getSynchronizationWrapper(m_type, vk, false, 2u);
282 const SyncInfo writeSync = m_writeOp->getOutSyncInfo();
283 const SyncInfo readSync = m_readOp->getInSyncInfo();
284 VkSemaphoreSubmitInfoKHR signalSemaphoreSubmitInfo =
285 makeCommonSemaphoreSubmitInfo(*semaphore, 0u, VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR);
286 VkSemaphoreSubmitInfoKHR waitSemaphoreSubmitInfo =
287 makeCommonSemaphoreSubmitInfo(*semaphore, 0u, VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR);
288 VkCommandBufferSubmitInfoKHR commandBufferSubmitInfo[]{makeCommonCommandBufferSubmitInfo(cmdBuffers[WRITE]),
289 makeCommonCommandBufferSubmitInfo(cmdBuffers[READ])};
290
291 synchronizationWrapper->addSubmitInfo(0u, DE_NULL, 1u, &commandBufferSubmitInfo[WRITE], 1u,
292 &signalSemaphoreSubmitInfo);
293 synchronizationWrapper->addSubmitInfo(1u, &waitSemaphoreSubmitInfo, 1u, &commandBufferSubmitInfo[READ], 0u,
294 DE_NULL);
295
296 beginCommandBuffer(vk, cmdBuffers[WRITE]);
297
298 m_writeOp->recordCommands(cmdBuffers[WRITE]);
299
300 if (m_resource->getType() == RESOURCE_TYPE_IMAGE)
301 {
302 const VkImageMemoryBarrier2KHR imageMemoryBarrier2 = makeImageMemoryBarrier2(
303 writeSync.stageMask, // VkPipelineStageFlags2KHR srcStageMask
304 writeSync.accessMask, // VkAccessFlags2KHR srcAccessMask
305 readSync.stageMask, // VkPipelineStageFlags2KHR dstStageMask
306 readSync.accessMask, // VkAccessFlags2KHR dstAccessMask
307 writeSync.imageLayout, // VkImageLayout oldLayout
308 readSync.imageLayout, // VkImageLayout newLayout
309 m_resource->getImage().handle, // VkImage image
310 m_resource->getImage().subresourceRange // VkImageSubresourceRange subresourceRange
311 );
312 VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, DE_NULL, &imageMemoryBarrier2);
313 synchronizationWrapper->cmdPipelineBarrier(cmdBuffers[WRITE], &dependencyInfo);
314 }
315 else
316 {
317 const VkBufferMemoryBarrier2KHR bufferMemoryBarrier2 =
318 makeBufferMemoryBarrier2(writeSync.stageMask, // VkPipelineStageFlags2KHR srcStageMask
319 writeSync.accessMask, // VkAccessFlags2KHR srcAccessMask
320 readSync.stageMask, // VkPipelineStageFlags2KHR dstStageMask
321 readSync.accessMask, // VkAccessFlags2KHR dstAccessMask
322 m_resource->getBuffer().handle, // VkBuffer buffer
323 0, // VkDeviceSize offset
324 VK_WHOLE_SIZE // VkDeviceSize size
325 );
326 VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, &bufferMemoryBarrier2);
327 synchronizationWrapper->cmdPipelineBarrier(cmdBuffers[WRITE], &dependencyInfo);
328 }
329
330 endCommandBuffer(vk, cmdBuffers[WRITE]);
331
332 beginCommandBuffer(vk, cmdBuffers[READ]);
333
334 m_readOp->recordCommands(cmdBuffers[READ]);
335
336 endCommandBuffer(vk, cmdBuffers[READ]);
337
338 VK_CHECK(synchronizationWrapper->queueSubmit(queue, DE_NULL));
339 VK_CHECK(vk.queueWaitIdle(queue));
340
341 {
342 const Data expected = m_writeOp->getData();
343 const Data actual = m_readOp->getData();
344
345 if (isIndirectBuffer(m_resource->getType()))
346 {
347 const uint32_t expectedValue = reinterpret_cast<const uint32_t *>(expected.data)[0];
348 const uint32_t actualValue = reinterpret_cast<const uint32_t *>(actual.data)[0];
349
350 if (actualValue < expectedValue)
351 return tcu::TestStatus::fail("Counter value is smaller than expected");
352 }
353 else
354 {
355 if (0 != deMemCmp(expected.data, actual.data, expected.size))
356 return tcu::TestStatus::fail("Memory contents don't match");
357 }
358 }
359
360 return tcu::TestStatus::pass("OK");
361 }
362 };
363
364 template <typename T>
makeVkSharedPtr(Move<T> move)365 inline de::SharedPtr<Move<T>> makeVkSharedPtr(Move<T> move)
366 {
367 return de::SharedPtr<Move<T>>(new Move<T>(move));
368 }
369
370 class TimelineSemaphoreTestInstance : public TestInstance
371 {
372 public:
TimelineSemaphoreTestInstance(Context & context,SynchronizationType type,const ResourceDescription & resourceDesc,const de::SharedPtr<OperationSupport> & writeOp,const de::SharedPtr<OperationSupport> & readOp,PipelineCacheData & pipelineCacheData)373 TimelineSemaphoreTestInstance(Context &context, SynchronizationType type, const ResourceDescription &resourceDesc,
374 const de::SharedPtr<OperationSupport> &writeOp,
375 const de::SharedPtr<OperationSupport> &readOp, PipelineCacheData &pipelineCacheData)
376 : TestInstance(context)
377 , m_type(type)
378 , m_opContext(context, type, pipelineCacheData)
379 {
380
381 // Create a chain operation copying data from one resource to
382 // another, each of the operation will be executing with a
383 // dependency on the previous using timeline points.
384 m_opSupports.push_back(writeOp);
385 for (uint32_t copyOpNdx = 0; copyOpNdx < DE_LENGTH_OF_ARRAY(s_copyOps); copyOpNdx++)
386 {
387 if (isResourceSupported(s_copyOps[copyOpNdx], resourceDesc))
388 m_opSupports.push_back(de::SharedPtr<OperationSupport>(
389 makeOperationSupport(s_copyOps[copyOpNdx], resourceDesc, false).release()));
390 }
391 m_opSupports.push_back(readOp);
392
393 for (uint32_t opNdx = 0; opNdx < (m_opSupports.size() - 1); opNdx++)
394 {
395 uint32_t usage =
396 m_opSupports[opNdx]->getOutResourceUsageFlags() | m_opSupports[opNdx + 1]->getInResourceUsageFlags();
397
398 m_resources.push_back(de::SharedPtr<Resource>(new Resource(m_opContext, resourceDesc, usage)));
399 }
400
401 m_ops.push_back(de::SharedPtr<Operation>(m_opSupports[0]->build(m_opContext, *m_resources[0]).release()));
402 for (uint32_t opNdx = 1; opNdx < (m_opSupports.size() - 1); opNdx++)
403 m_ops.push_back(de::SharedPtr<Operation>(
404 m_opSupports[opNdx]->build(m_opContext, *m_resources[opNdx - 1], *m_resources[opNdx]).release()));
405 m_ops.push_back(de::SharedPtr<Operation>(
406 m_opSupports[m_opSupports.size() - 1]->build(m_opContext, *m_resources.back()).release()));
407 }
408
iterate(void)409 tcu::TestStatus iterate(void)
410 {
411 const DeviceInterface &vk = m_context.getDeviceInterface();
412 const VkDevice device = m_context.getDevice();
413 const VkQueue queue = m_context.getUniversalQueue();
414 const uint32_t queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
415 de::Random rng(1234);
416 const Unique<VkSemaphore> semaphore(createSemaphoreType(vk, device, VK_SEMAPHORE_TYPE_TIMELINE));
417 const Unique<VkCommandPool> cmdPool(
418 createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
419 std::vector<de::SharedPtr<Move<VkCommandBuffer>>> ptrCmdBuffers;
420 std::vector<VkCommandBufferSubmitInfoKHR> cmdBuffersInfo(m_ops.size(), makeCommonCommandBufferSubmitInfo(0u));
421 std::vector<VkSemaphoreSubmitInfoKHR> waitSemaphoreSubmitInfos(
422 m_ops.size(), makeCommonSemaphoreSubmitInfo(*semaphore, 0u, VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR));
423 std::vector<VkSemaphoreSubmitInfoKHR> signalSemaphoreSubmitInfos(
424 m_ops.size(), makeCommonSemaphoreSubmitInfo(*semaphore, 0u, VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR));
425 SynchronizationWrapperPtr synchronizationWrapper =
426 getSynchronizationWrapper(m_type, vk, true, static_cast<uint32_t>(m_ops.size()));
427 uint64_t increment = 0u;
428
429 for (uint32_t opNdx = 0; opNdx < m_ops.size(); opNdx++)
430 {
431 ptrCmdBuffers.push_back(makeVkSharedPtr(makeCommandBuffer(vk, device, *cmdPool)));
432 cmdBuffersInfo[opNdx].commandBuffer = **(ptrCmdBuffers.back());
433 }
434
435 for (uint32_t opNdx = 0; opNdx < m_ops.size(); opNdx++)
436 {
437 increment += (1 + rng.getUint8());
438 signalSemaphoreSubmitInfos[opNdx].value = increment;
439 waitSemaphoreSubmitInfos[opNdx].value = increment;
440
441 synchronizationWrapper->addSubmitInfo(
442 opNdx == 0 ? 0u : 1u, opNdx == 0 ? DE_NULL : &waitSemaphoreSubmitInfos[opNdx - 1], 1u,
443 &cmdBuffersInfo[opNdx], 1u, &signalSemaphoreSubmitInfos[opNdx], opNdx == 0 ? false : true, true);
444
445 VkCommandBuffer cmdBuffer = cmdBuffersInfo[opNdx].commandBuffer;
446 beginCommandBuffer(vk, cmdBuffer);
447
448 if (opNdx > 0)
449 {
450 const SyncInfo lastSync = m_ops[opNdx - 1]->getOutSyncInfo();
451 const SyncInfo currentSync = m_ops[opNdx]->getInSyncInfo();
452 const Resource &resource = *m_resources[opNdx - 1].get();
453
454 if (resource.getType() == RESOURCE_TYPE_IMAGE)
455 {
456 DE_ASSERT(lastSync.imageLayout != VK_IMAGE_LAYOUT_UNDEFINED);
457 DE_ASSERT(currentSync.imageLayout != VK_IMAGE_LAYOUT_UNDEFINED);
458
459 const VkImageMemoryBarrier2KHR imageMemoryBarrier2 = makeImageMemoryBarrier2(
460 lastSync.stageMask, // VkPipelineStageFlags2KHR srcStageMask
461 lastSync.accessMask, // VkAccessFlags2KHR srcAccessMask
462 currentSync.stageMask, // VkPipelineStageFlags2KHR dstStageMask
463 currentSync.accessMask, // VkAccessFlags2KHR dstAccessMask
464 lastSync.imageLayout, // VkImageLayout oldLayout
465 currentSync.imageLayout, // VkImageLayout newLayout
466 resource.getImage().handle, // VkImage image
467 resource.getImage().subresourceRange // VkImageSubresourceRange subresourceRange
468 );
469 VkDependencyInfoKHR dependencyInfo =
470 makeCommonDependencyInfo(DE_NULL, DE_NULL, &imageMemoryBarrier2);
471 synchronizationWrapper->cmdPipelineBarrier(cmdBuffer, &dependencyInfo);
472 }
473 else
474 {
475 const VkBufferMemoryBarrier2KHR bufferMemoryBarrier2 = makeBufferMemoryBarrier2(
476 lastSync.stageMask, // VkPipelineStageFlags2KHR srcStageMask
477 lastSync.accessMask, // VkAccessFlags2KHR srcAccessMask
478 currentSync.stageMask, // VkPipelineStageFlags2KHR dstStageMask
479 currentSync.accessMask, // VkAccessFlags2KHR dstAccessMask
480 resource.getBuffer().handle, // VkBuffer buffer
481 0, // VkDeviceSize offset
482 VK_WHOLE_SIZE // VkDeviceSize size
483 );
484 VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, &bufferMemoryBarrier2);
485 synchronizationWrapper->cmdPipelineBarrier(cmdBuffer, &dependencyInfo);
486 }
487 }
488
489 m_ops[opNdx]->recordCommands(cmdBuffer);
490
491 endCommandBuffer(vk, cmdBuffer);
492 }
493
494 VK_CHECK(synchronizationWrapper->queueSubmit(queue, DE_NULL));
495 VK_CHECK(vk.queueWaitIdle(queue));
496
497 {
498 const Data expected = m_ops.front()->getData();
499 const Data actual = m_ops.back()->getData();
500
501 if (isIndirectBuffer(m_resources[0]->getType()))
502 {
503 const uint32_t expectedValue = reinterpret_cast<const uint32_t *>(expected.data)[0];
504 const uint32_t actualValue = reinterpret_cast<const uint32_t *>(actual.data)[0];
505
506 if (actualValue < expectedValue)
507 return tcu::TestStatus::fail("Counter value is smaller than expected");
508 }
509 else
510 {
511 if (0 != deMemCmp(expected.data, actual.data, expected.size))
512 return tcu::TestStatus::fail("Memory contents don't match");
513 }
514 }
515
516 return tcu::TestStatus::pass("OK");
517 }
518
519 protected:
520 SynchronizationType m_type;
521 OperationContext m_opContext;
522 std::vector<de::SharedPtr<OperationSupport>> m_opSupports;
523 std::vector<de::SharedPtr<Operation>> m_ops;
524 std::vector<de::SharedPtr<Resource>> m_resources;
525 };
526
527 class FenceTestInstance : public BaseTestInstance
528 {
529 public:
FenceTestInstance(Context & context,SynchronizationType type,const ResourceDescription & resourceDesc,const OperationSupport & writeOp,const OperationSupport & readOp,PipelineCacheData & pipelineCacheData)530 FenceTestInstance(Context &context, SynchronizationType type, const ResourceDescription &resourceDesc,
531 const OperationSupport &writeOp, const OperationSupport &readOp,
532 PipelineCacheData &pipelineCacheData)
533 : BaseTestInstance(context, type, resourceDesc, writeOp, readOp, pipelineCacheData)
534 {
535 }
536
iterate(void)537 tcu::TestStatus iterate(void)
538 {
539 enum
540 {
541 WRITE = 0,
542 READ,
543 COUNT
544 };
545 const DeviceInterface &vk = m_context.getDeviceInterface();
546 const VkDevice device = m_context.getDevice();
547 const VkQueue queue = m_context.getUniversalQueue();
548 const uint32_t queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
549 const Unique<VkCommandPool> cmdPool(
550 createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
551 const Move<VkCommandBuffer> ptrCmdBuffer[COUNT] = {makeCommandBuffer(vk, device, *cmdPool),
552 makeCommandBuffer(vk, device, *cmdPool)};
553 VkCommandBuffer cmdBuffers[COUNT] = {*ptrCmdBuffer[WRITE], *ptrCmdBuffer[READ]};
554 const SyncInfo writeSync = m_writeOp->getOutSyncInfo();
555 const SyncInfo readSync = m_readOp->getInSyncInfo();
556 SynchronizationWrapperPtr synchronizationWrapper[COUNT]{getSynchronizationWrapper(m_type, vk, false),
557 getSynchronizationWrapper(m_type, vk, false)};
558
559 beginCommandBuffer(vk, cmdBuffers[WRITE]);
560
561 m_writeOp->recordCommands(cmdBuffers[WRITE]);
562
563 if (m_resource->getType() == RESOURCE_TYPE_IMAGE)
564 {
565 const VkImageMemoryBarrier2KHR imageMemoryBarrier2 = makeImageMemoryBarrier2(
566 writeSync.stageMask, // VkPipelineStageFlags2KHR srcStageMask
567 writeSync.accessMask, // VkAccessFlags2KHR srcAccessMask
568 readSync.stageMask, // VkPipelineStageFlags2KHR dstStageMask
569 readSync.accessMask, // VkAccessFlags2KHR dstAccessMask
570 writeSync.imageLayout, // VkImageLayout oldLayout
571 readSync.imageLayout, // VkImageLayout newLayout
572 m_resource->getImage().handle, // VkImage image
573 m_resource->getImage().subresourceRange // VkImageSubresourceRange subresourceRange
574 );
575 VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, DE_NULL, &imageMemoryBarrier2);
576 synchronizationWrapper[WRITE]->cmdPipelineBarrier(cmdBuffers[WRITE], &dependencyInfo);
577 }
578
579 endCommandBuffer(vk, cmdBuffers[WRITE]);
580
581 submitCommandsAndWait(synchronizationWrapper[WRITE], vk, device, queue, cmdBuffers[WRITE]);
582
583 beginCommandBuffer(vk, cmdBuffers[READ]);
584
585 m_readOp->recordCommands(cmdBuffers[READ]);
586
587 endCommandBuffer(vk, cmdBuffers[READ]);
588
589 submitCommandsAndWait(synchronizationWrapper[READ], vk, device, queue, cmdBuffers[READ]);
590
591 {
592 const Data expected = m_writeOp->getData();
593 const Data actual = m_readOp->getData();
594
595 if (isIndirectBuffer(m_resource->getType()))
596 {
597 const uint32_t expectedValue = reinterpret_cast<const uint32_t *>(expected.data)[0];
598 const uint32_t actualValue = reinterpret_cast<const uint32_t *>(actual.data)[0];
599
600 if (actualValue < expectedValue)
601 return tcu::TestStatus::fail("Counter value is smaller than expected");
602 }
603 else
604 {
605 if (0 != deMemCmp(expected.data, actual.data, expected.size))
606 return tcu::TestStatus::fail("Memory contents don't match");
607 }
608 }
609
610 return tcu::TestStatus::pass("OK");
611 }
612 };
613
614 class SyncTestCase : public TestCase
615 {
616 public:
SyncTestCase(tcu::TestContext & testCtx,const std::string & name,SynchronizationType type,const SyncPrimitive syncPrimitive,const ResourceDescription resourceDesc,const OperationName writeOp,const OperationName readOp,const bool specializedAccess,PipelineCacheData & pipelineCacheData)617 SyncTestCase(tcu::TestContext &testCtx, const std::string &name, SynchronizationType type,
618 const SyncPrimitive syncPrimitive, const ResourceDescription resourceDesc, const OperationName writeOp,
619 const OperationName readOp, const bool specializedAccess, PipelineCacheData &pipelineCacheData)
620 : TestCase(testCtx, name)
621 , m_type(type)
622 , m_resourceDesc(resourceDesc)
623 , m_writeOp(makeOperationSupport(writeOp, resourceDesc, specializedAccess).release())
624 , m_readOp(makeOperationSupport(readOp, resourceDesc, specializedAccess).release())
625 , m_syncPrimitive(syncPrimitive)
626 , m_pipelineCacheData(pipelineCacheData)
627 {
628 }
629
initPrograms(SourceCollections & programCollection) const630 void initPrograms(SourceCollections &programCollection) const
631 {
632 m_writeOp->initPrograms(programCollection);
633 m_readOp->initPrograms(programCollection);
634
635 if (m_syncPrimitive == SYNC_PRIMITIVE_TIMELINE_SEMAPHORE)
636 {
637 for (uint32_t copyOpNdx = 0; copyOpNdx < DE_LENGTH_OF_ARRAY(s_copyOps); copyOpNdx++)
638 {
639 if (isResourceSupported(s_copyOps[copyOpNdx], m_resourceDesc))
640 makeOperationSupport(s_copyOps[copyOpNdx], m_resourceDesc, false)->initPrograms(programCollection);
641 }
642 }
643 }
644
checkSupport(Context & context) const645 void checkSupport(Context &context) const
646 {
647 if (m_type == SynchronizationType::SYNCHRONIZATION2)
648 context.requireDeviceFunctionality("VK_KHR_synchronization2");
649
650 #ifndef CTS_USES_VULKANSC
651 if (SYNC_PRIMITIVE_EVENT == m_syncPrimitive &&
652 context.isDeviceFunctionalitySupported("VK_KHR_portability_subset") &&
653 !context.getPortabilitySubsetFeatures().events)
654 {
655 TCU_THROW(NotSupportedError, "VK_KHR_portability_subset: Events are not supported by this implementation");
656 }
657 #endif // CTS_USES_VULKANSC
658
659 if (m_syncPrimitive == SYNC_PRIMITIVE_TIMELINE_SEMAPHORE &&
660 !context.getTimelineSemaphoreFeatures().timelineSemaphore)
661 TCU_THROW(NotSupportedError, "Timeline semaphore not supported");
662
663 if (m_resourceDesc.type == RESOURCE_TYPE_IMAGE)
664 {
665 VkImageFormatProperties imageFormatProperties;
666 const uint32_t usage = m_writeOp->getOutResourceUsageFlags() | m_readOp->getInResourceUsageFlags();
667 const InstanceInterface &instance = context.getInstanceInterface();
668 const VkPhysicalDevice physicalDevice = context.getPhysicalDevice();
669 const VkResult formatResult = instance.getPhysicalDeviceImageFormatProperties(
670 physicalDevice, m_resourceDesc.imageFormat, m_resourceDesc.imageType, VK_IMAGE_TILING_OPTIMAL, usage,
671 (VkImageCreateFlags)0, &imageFormatProperties);
672
673 if (formatResult != VK_SUCCESS)
674 TCU_THROW(NotSupportedError, "Image format is not supported");
675
676 if ((imageFormatProperties.sampleCounts & m_resourceDesc.imageSamples) != m_resourceDesc.imageSamples)
677 TCU_THROW(NotSupportedError, "Requested sample count is not supported");
678 }
679 }
680
createInstance(Context & context) const681 TestInstance *createInstance(Context &context) const
682 {
683 switch (m_syncPrimitive)
684 {
685 case SYNC_PRIMITIVE_FENCE:
686 return new FenceTestInstance(context, m_type, m_resourceDesc, *m_writeOp, *m_readOp, m_pipelineCacheData);
687 case SYNC_PRIMITIVE_BINARY_SEMAPHORE:
688 return new BinarySemaphoreTestInstance(context, m_type, m_resourceDesc, *m_writeOp, *m_readOp,
689 m_pipelineCacheData);
690 case SYNC_PRIMITIVE_TIMELINE_SEMAPHORE:
691 return new TimelineSemaphoreTestInstance(context, m_type, m_resourceDesc, m_writeOp, m_readOp,
692 m_pipelineCacheData);
693 case SYNC_PRIMITIVE_BARRIER:
694 return new BarrierTestInstance(context, m_type, m_resourceDesc, *m_writeOp, *m_readOp, m_pipelineCacheData);
695 case SYNC_PRIMITIVE_EVENT:
696 return new EventTestInstance(context, m_type, m_resourceDesc, *m_writeOp, *m_readOp, m_pipelineCacheData);
697 }
698
699 DE_ASSERT(0);
700 return DE_NULL;
701 }
702
703 private:
704 SynchronizationType m_type;
705 const ResourceDescription m_resourceDesc;
706 const de::SharedPtr<OperationSupport> m_writeOp;
707 const de::SharedPtr<OperationSupport> m_readOp;
708 const SyncPrimitive m_syncPrimitive;
709 PipelineCacheData &m_pipelineCacheData;
710 };
711
712 struct TestData
713 {
714 SynchronizationType type;
715 PipelineCacheData *pipelineCacheData;
716 };
717
createTests(tcu::TestCaseGroup * group,TestData data)718 void createTests(tcu::TestCaseGroup *group, TestData data)
719 {
720 tcu::TestContext &testCtx = group->getTestContext();
721
722 static const struct
723 {
724 const char *name;
725 SyncPrimitive syncPrimitive;
726 int numOptions;
727 } groups[] = {
728 {
729 "fence",
730 SYNC_PRIMITIVE_FENCE,
731 0,
732 },
733 {
734 "binary_semaphore",
735 SYNC_PRIMITIVE_BINARY_SEMAPHORE,
736 0,
737 },
738 {
739 "timeline_semaphore",
740 SYNC_PRIMITIVE_TIMELINE_SEMAPHORE,
741 0,
742 },
743 {
744 "barrier",
745 SYNC_PRIMITIVE_BARRIER,
746 1,
747 },
748 {
749 "event",
750 SYNC_PRIMITIVE_EVENT,
751 1,
752 },
753 };
754
755 for (int groupNdx = 0; groupNdx < DE_LENGTH_OF_ARRAY(groups); ++groupNdx)
756 {
757 de::MovePtr<tcu::TestCaseGroup> synchGroup(new tcu::TestCaseGroup(testCtx, groups[groupNdx].name));
758
759 for (int writeOpNdx = 0; writeOpNdx < DE_LENGTH_OF_ARRAY(s_writeOps); ++writeOpNdx)
760 for (int readOpNdx = 0; readOpNdx < DE_LENGTH_OF_ARRAY(s_readOps); ++readOpNdx)
761 {
762 const OperationName writeOp = s_writeOps[writeOpNdx];
763 const OperationName readOp = s_readOps[readOpNdx];
764 const std::string opGroupName = getOperationName(writeOp) + "_" + getOperationName(readOp);
765 bool empty = true;
766
767 de::MovePtr<tcu::TestCaseGroup> opGroup(new tcu::TestCaseGroup(testCtx, opGroupName.c_str()));
768
769 for (int resourceNdx = 0; resourceNdx < DE_LENGTH_OF_ARRAY(s_resources); ++resourceNdx)
770 {
771 const ResourceDescription &resource = s_resources[resourceNdx];
772 std::string name = getResourceName(resource);
773
774 if (isResourceSupported(writeOp, resource) && isResourceSupported(readOp, resource))
775 {
776 if (data.type == SynchronizationType::SYNCHRONIZATION2)
777 {
778 if ((isSpecializedAccessFlagSupported(writeOp) || isSpecializedAccessFlagSupported(readOp)))
779 {
780 const std::string nameSp = name + "_specialized_access_flag";
781 opGroup->addChild(new SyncTestCase(testCtx, nameSp, data.type,
782 groups[groupNdx].syncPrimitive, resource, writeOp,
783 readOp, true, *data.pipelineCacheData));
784 }
785 }
786
787 opGroup->addChild(new SyncTestCase(testCtx, name, data.type, groups[groupNdx].syncPrimitive,
788 resource, writeOp, readOp, false, *data.pipelineCacheData));
789
790 empty = false;
791 }
792 }
793 if (!empty)
794 synchGroup->addChild(opGroup.release());
795 }
796
797 group->addChild(synchGroup.release());
798 }
799 }
800
801 } // namespace
802
createSynchronizedOperationSingleQueueTests(tcu::TestContext & testCtx,SynchronizationType type,PipelineCacheData & pipelineCacheData)803 tcu::TestCaseGroup *createSynchronizedOperationSingleQueueTests(tcu::TestContext &testCtx, SynchronizationType type,
804 PipelineCacheData &pipelineCacheData)
805 {
806 TestData data{type, &pipelineCacheData};
807
808 // Synchronization of a memory-modifying operation
809 return createTestGroup(testCtx, "single_queue", createTests, data);
810 }
811
812 } // namespace synchronization
813 } // namespace vkt
814