xref: /aosp_15_r20/external/deqp/external/vulkancts/modules/vulkan/api/vktApiFillBufferTests.cpp (revision 35238bce31c2a825756842865a792f8cf7f89930)
1 /*------------------------------------------------------------------------
2  * Vulkan Conformance Tests
3  * ------------------------
4  *
5  * Copyright (c) 2016 The Khronos Group Inc.
6  * Copyright (c) 2016 Samsung Electronics Co., Ltd.
7  *
8  * Licensed under the Apache License, Version 2.0 (the "License");
9  * you may not use this file except in compliance with the License.
10  * You may obtain a copy of the License at
11  *
12  *      http://www.apache.org/licenses/LICENSE-2.0
13  *
14  * Unless required by applicable law or agreed to in writing, software
15  * distributed under the License is distributed on an "AS IS" BASIS,
16  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17  * See the License for the specific language governing permissions and
18  * limitations under the License.
19  *
20  *//*!
21  * \file
22  * \brief Vulkan Fill Buffer Tests
23  *//*--------------------------------------------------------------------*/
24 
25 #include "vktApiFillBufferTests.hpp"
26 #include "vktApiBufferAndImageAllocationUtil.hpp"
27 #include "vktCustomInstancesDevices.hpp"
28 
29 #include "deStringUtil.hpp"
30 #include "deUniquePtr.hpp"
31 #include "vkImageUtil.hpp"
32 #include "vkMemUtil.hpp"
33 #include "vkCmdUtil.hpp"
34 #include "vktTestCase.hpp"
35 #include "vktTestCaseUtil.hpp"
36 #include "vkQueryUtil.hpp"
37 #include "vkRefUtil.hpp"
38 #include "vkCmdUtil.hpp"
39 #include "vkSafetyCriticalUtil.hpp"
40 #include "vkDeviceUtil.hpp"
41 #include "tcuImageCompare.hpp"
42 #include "tcuCommandLine.hpp"
43 #include "tcuTexture.hpp"
44 #include "tcuTextureUtil.hpp"
45 #include "tcuVectorType.hpp"
46 #include "deSharedPtr.hpp"
47 #include <limits>
48 
49 namespace vkt
50 {
51 
52 namespace api
53 {
54 
55 using namespace vk;
56 
57 namespace
58 {
59 
60 struct TestParams
61 {
62     enum
63     {
64         TEST_DATA_SIZE = 256
65     };
66 
67     VkDeviceSize dstSize;
68     VkDeviceSize dstOffset;
69     VkDeviceSize size;
70     uint32_t testData[TEST_DATA_SIZE];
71     de::SharedPtr<IBufferAllocator> bufferAllocator;
72     bool useTransferOnlyQueue;
73 };
74 
75 // Creates a device that has transfer only operations
createCustomDevice(Context & context,const vkt::CustomInstance & customInstance,uint32_t & queueFamilyIndex)76 Move<VkDevice> createCustomDevice(Context &context,
77 #ifdef CTS_USES_VULKANSC
78                                   const vkt::CustomInstance &customInstance,
79 #endif // CTS_USES_VULKANSC
80                                   uint32_t &queueFamilyIndex)
81 {
82 #ifdef CTS_USES_VULKANSC
83     const vk::InstanceInterface &instanceDriver = customInstance.getDriver();
84     const vk::VkPhysicalDevice physicalDevice =
85         chooseDevice(instanceDriver, customInstance, context.getTestContext().getCommandLine());
86 #else
87     const vk::VkInstance customInstance         = context.getInstance();
88     const vk::InstanceInterface &instanceDriver = context.getInstanceInterface();
89     const vk::VkPhysicalDevice physicalDevice   = context.getPhysicalDevice();
90 #endif // CTS_USES_VULKANSC
91 
92     queueFamilyIndex = findQueueFamilyIndexWithCaps(instanceDriver, physicalDevice, VK_QUEUE_TRANSFER_BIT,
93                                                     VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT);
94 
95     const std::vector<VkQueueFamilyProperties> queueFamilies =
96         getPhysicalDeviceQueueFamilyProperties(instanceDriver, physicalDevice);
97 
98     // This must be found, findQueueFamilyIndexWithCaps would have
99     // thrown a NotSupported exception if the requested queue type did
100     // not exist. Similarly, this was written with the assumption the
101     // "alternative" queue would be different to the universal queue.
102     DE_ASSERT(queueFamilyIndex < queueFamilies.size() && queueFamilyIndex != context.getUniversalQueueFamilyIndex());
103     const float queuePriority = 1.0f;
104     const VkDeviceQueueCreateInfo deviceQueueCreateInfos{
105         VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // VkStructureType sType;
106         nullptr,                                    // const void* pNext;
107         (VkDeviceQueueCreateFlags)0u,               // VkDeviceQueueCreateFlags flags;
108         queueFamilyIndex,                           // uint32_t queueFamilyIndex;
109         1u,                                         // uint32_t queueCount;
110         &queuePriority,                             // const float* pQueuePriorities;
111     };
112 
113     // Replicate default device extension list.
114     const auto extensionNames     = context.getDeviceCreationExtensions();
115     auto synchronization2Features = context.getSynchronization2Features();
116     auto deviceFeatures2          = context.getDeviceFeatures2();
117     const void *pNext             = &deviceFeatures2;
118 
119     if (context.isDeviceFunctionalitySupported("VK_KHR_synchronization2"))
120     {
121         if (context.getUsedApiVersion() < VK_API_VERSION_1_3)
122         {
123             synchronization2Features.pNext = &deviceFeatures2;
124             pNext                          = &synchronization2Features;
125         }
126     }
127 
128 #ifdef CTS_USES_VULKANSC
129     VkDeviceObjectReservationCreateInfo memReservationInfo = context.getTestContext().getCommandLine().isSubProcess() ?
130                                                                  context.getResourceInterface()->getStatMax() :
131                                                                  resetDeviceObjectReservationCreateInfo();
132     memReservationInfo.pNext                               = pNext;
133     pNext                                                  = &memReservationInfo;
134 
135     VkPipelineCacheCreateInfo pcCI;
136     std::vector<VkPipelinePoolSize> poolSizes;
137     if (context.getTestContext().getCommandLine().isSubProcess())
138     {
139         if (context.getResourceInterface()->getCacheDataSize() > 0)
140         {
141             pcCI = {
142                 VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO, // VkStructureType sType;
143                 DE_NULL,                                      // const void* pNext;
144                 VK_PIPELINE_CACHE_CREATE_READ_ONLY_BIT |
145                     VK_PIPELINE_CACHE_CREATE_USE_APPLICATION_STORAGE_BIT, // VkPipelineCacheCreateFlags flags;
146                 context.getResourceInterface()->getCacheDataSize(),       // uintptr_t initialDataSize;
147                 context.getResourceInterface()->getCacheData()            // const void* pInitialData;
148             };
149             memReservationInfo.pipelineCacheCreateInfoCount = 1;
150             memReservationInfo.pPipelineCacheCreateInfos    = &pcCI;
151         }
152         poolSizes = context.getResourceInterface()->getPipelinePoolSizes();
153         if (!poolSizes.empty())
154         {
155             memReservationInfo.pipelinePoolSizeCount = uint32_t(poolSizes.size());
156             memReservationInfo.pPipelinePoolSizes    = poolSizes.data();
157         }
158     }
159 #endif // CTS_USES_VULKANSC
160 
161     const VkDeviceCreateInfo deviceCreateInfo{
162         VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,         // VkStructureType sType;
163         pNext,                                        // const void* pNext;
164         (VkDeviceCreateFlags)0u,                      // VkDeviceCreateFlags flags;
165         1u,                                           // uint32_t queueCreateInfoCount;
166         &deviceQueueCreateInfos,                      // const VkDeviceQueueCreateInfo* pQueueCreateInfos;
167         0u,                                           // uint32_t enabledLayerCount;
168         DE_NULL,                                      // const char* const* ppEnabledLayerNames;
169         static_cast<uint32_t>(extensionNames.size()), // uint32_t enabledExtensionCount;
170         extensionNames.data(),                        // const char* const* ppEnabledExtensionNames;
171         DE_NULL,                                      // const VkPhysicalDeviceFeatures* pEnabledFeatures;
172     };
173 
174     return vkt::createCustomDevice(context.getTestContext().getCommandLine().isValidationEnabled(),
175                                    context.getPlatformInterface(), customInstance, instanceDriver, physicalDevice,
176                                    &deviceCreateInfo);
177 }
178 
179 class FillWholeBufferTestInstance : public vkt::TestInstance
180 {
181 public:
182     FillWholeBufferTestInstance(Context &context, const TestParams &testParams);
183     virtual tcu::TestStatus iterate(void) override;
184 
185 protected:
186     // dstSize will be used as the buffer size.
187     // dstOffset will be used as the offset for vkCmdFillBuffer.
188     // size in vkCmdFillBuffer will always be VK_WHOLE_SIZE.
189     const TestParams m_params;
190 
191     Move<VkDevice> m_customDevice;
192     de::MovePtr<Allocator> m_customAllocator;
193 
194     VkDevice m_device;
195 #ifdef CTS_USES_VULKANSC
196     const CustomInstance m_customInstance;
197 #endif // CTS_USES_VULKANSC
198     Allocator *m_allocator;
199     uint32_t m_queueFamilyIndex;
200 
201     Move<VkCommandPool> m_cmdPool;
202     Move<VkCommandBuffer> m_cmdBuffer;
203 
204     Move<VkBuffer> m_destination;
205     de::MovePtr<Allocation> m_destinationBufferAlloc;
206 };
207 
FillWholeBufferTestInstance(Context & context,const TestParams & testParams)208 FillWholeBufferTestInstance::FillWholeBufferTestInstance(Context &context, const TestParams &testParams)
209     : vkt::TestInstance(context)
210     , m_params(testParams)
211 #ifdef CTS_USES_VULKANSC
212     , m_customInstance(createCustomInstanceFromContext(context))
213 #endif // CTS_USES_VULKANSC
214 {
215 #ifdef CTS_USES_VULKANSC
216     const vk::InstanceInterface &vki = m_customInstance.getDriver();
217     const VkPhysicalDevice physDevice =
218         vk::chooseDevice(vki, m_customInstance, m_context.getTestContext().getCommandLine());
219 #else
220     const vk::InstanceInterface &vki            = m_context.getInstanceInterface();
221     const VkPhysicalDevice physDevice           = m_context.getPhysicalDevice();
222 #endif // CTS_USES_VULKANSC
223     const DeviceInterface &vk = m_context.getDeviceInterface();
224 
225     if (testParams.useTransferOnlyQueue)
226     {
227         m_customDevice = createCustomDevice(context,
228 #ifdef CTS_USES_VULKANSC
229                                             m_customInstance,
230 #endif
231                                             m_queueFamilyIndex);
232         m_customAllocator = de::MovePtr<Allocator>(
233             new SimpleAllocator(vk, *m_customDevice, getPhysicalDeviceMemoryProperties(vki, physDevice)));
234 
235         m_device    = *m_customDevice;
236         m_allocator = &(*m_customAllocator);
237     }
238     else
239     {
240         m_device           = context.getDevice();
241         m_allocator        = &context.getDefaultAllocator();
242         m_queueFamilyIndex = context.getUniversalQueueFamilyIndex();
243     }
244 
245     m_cmdPool   = createCommandPool(vk, m_device, VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, m_queueFamilyIndex);
246     m_cmdBuffer = allocateCommandBuffer(vk, m_device, *m_cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY);
247     testParams.bufferAllocator->createTestBuffer(vk, m_device, m_queueFamilyIndex, m_params.dstSize,
248                                                  VK_BUFFER_USAGE_TRANSFER_DST_BIT, context, *m_allocator, m_destination,
249                                                  MemoryRequirement::HostVisible, m_destinationBufferAlloc);
250 }
251 
iterate(void)252 tcu::TestStatus FillWholeBufferTestInstance::iterate(void)
253 {
254     const DeviceInterface &vk = m_context.getDeviceInterface();
255     const VkQueue queue       = getDeviceQueue(vk, m_device, m_queueFamilyIndex, 0);
256 
257     // if posible use synchronization2 when testing transfer only queue
258     const bool useSynchronization2 =
259         m_context.isDeviceFunctionalitySupported("VK_KHR_synchronization2") && m_params.useTransferOnlyQueue;
260 
261     // Make sure some stuff below will work.
262     DE_ASSERT(m_params.dstSize >= sizeof(uint32_t));
263     DE_ASSERT(m_params.dstSize < static_cast<VkDeviceSize>(std::numeric_limits<size_t>::max()));
264     DE_ASSERT(m_params.dstOffset < m_params.dstSize);
265 
266     // Fill buffer from the host and flush buffer memory.
267     uint8_t *bytes = reinterpret_cast<uint8_t *>(m_destinationBufferAlloc->getHostPtr());
268     deMemset(bytes, 0xff, static_cast<size_t>(m_params.dstSize));
269     flushAlloc(vk, m_device, *m_destinationBufferAlloc);
270 
271     const VkBufferMemoryBarrier gpuToHostBarrier{
272         VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
273         DE_NULL,                                 // const void* pNext;
274         VK_ACCESS_TRANSFER_WRITE_BIT,            // VkAccessFlags srcAccessMask;
275         VK_ACCESS_HOST_READ_BIT,                 // VkAccessFlags dstAccessMask;
276         VK_QUEUE_FAMILY_IGNORED,                 // uint32_t srcQueueFamilyIndex;
277         VK_QUEUE_FAMILY_IGNORED,                 // uint32_t dstQueueFamilyIndex;
278         *m_destination,                          // VkBuffer buffer;
279         0u,                                      // VkDeviceSize offset;
280         VK_WHOLE_SIZE                            // VkDeviceSize size;
281     };
282 
283 #ifndef CTS_USES_VULKANSC
284     using BufferMemoryBarrier2    = VkBufferMemoryBarrier2;
285     using DependencyInfo          = VkDependencyInfo;
286     using CommandBufferSubmitInfo = VkCommandBufferSubmitInfo;
287     using SubmitInfo2             = VkSubmitInfo2;
288     auto cmdPipelineBarrier2Fun   = &DeviceInterface::cmdPipelineBarrier2;
289     auto queueSubmit2Fun          = &DeviceInterface::queueSubmit2;
290 #else
291     using BufferMemoryBarrier2                  = VkBufferMemoryBarrier2KHR;
292     using DependencyInfo                        = VkDependencyInfoKHR;
293     using CommandBufferSubmitInfo               = VkCommandBufferSubmitInfoKHR;
294     using SubmitInfo2                           = VkSubmitInfo2KHR;
295     auto cmdPipelineBarrier2Fun                 = &DeviceInterface::cmdPipelineBarrier2KHR;
296     auto queueSubmit2Fun                        = &DeviceInterface::queueSubmit2KHR;
297 #endif // CTS_USES_VULKANSC
298 
299     BufferMemoryBarrier2 gpuToHostBarrier2 = initVulkanStructure();
300     gpuToHostBarrier2.srcStageMask         = VK_PIPELINE_STAGE_2_TRANSFER_BIT_KHR;
301     gpuToHostBarrier2.srcAccessMask        = VK_ACCESS_2_TRANSFER_WRITE_BIT_KHR;
302     gpuToHostBarrier2.dstStageMask         = VK_PIPELINE_STAGE_2_HOST_BIT_KHR;
303     gpuToHostBarrier2.dstAccessMask        = VK_ACCESS_2_HOST_READ_BIT_KHR;
304     gpuToHostBarrier2.srcQueueFamilyIndex  = VK_QUEUE_FAMILY_IGNORED;
305     gpuToHostBarrier2.dstQueueFamilyIndex  = VK_QUEUE_FAMILY_IGNORED;
306     gpuToHostBarrier2.buffer               = *m_destination;
307     gpuToHostBarrier2.size                 = VK_WHOLE_SIZE;
308 
309     DependencyInfo depInfo           = initVulkanStructure();
310     depInfo.bufferMemoryBarrierCount = 1;
311     depInfo.pBufferMemoryBarriers    = &gpuToHostBarrier2;
312 
313     // Fill buffer using VK_WHOLE_SIZE.
314     beginCommandBuffer(vk, *m_cmdBuffer);
315     vk.cmdFillBuffer(*m_cmdBuffer, *m_destination, m_params.dstOffset, VK_WHOLE_SIZE, uint32_t{0x01010101});
316 
317     if (useSynchronization2)
318         (vk.*(cmdPipelineBarrier2Fun))(*m_cmdBuffer, &depInfo);
319     else
320         vk.cmdPipelineBarrier(*m_cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0, 0, DE_NULL,
321                               1, &gpuToHostBarrier, 0, DE_NULL);
322 
323     endCommandBuffer(vk, *m_cmdBuffer);
324 
325     Move<VkFence> fence(createFence(vk, m_device));
326     if (useSynchronization2)
327     {
328         CommandBufferSubmitInfo commandBufferInfos = initVulkanStructure();
329         commandBufferInfos.commandBuffer           = *m_cmdBuffer;
330 
331         SubmitInfo2 submitInfo2            = initVulkanStructure();
332         submitInfo2.commandBufferInfoCount = 1u;
333         submitInfo2.pCommandBufferInfos    = &commandBufferInfos;
334 
335         (vk.*(queueSubmit2Fun))(queue, 1u, &submitInfo2, *fence);
336     }
337     else
338     {
339         VkSubmitInfo submitInfo       = initVulkanStructure();
340         submitInfo.commandBufferCount = 1u;
341         submitInfo.pCommandBuffers    = &m_cmdBuffer.get();
342 
343         VK_CHECK(vk.queueSubmit(queue, 1u, &submitInfo, *fence));
344     }
345     waitForFence(vk, m_device, *fence);
346 
347     // Invalidate buffer memory and check the buffer contains the expected results.
348     invalidateAlloc(vk, m_device, *m_destinationBufferAlloc);
349 
350     const VkDeviceSize startOfExtra = (m_params.dstSize / sizeof(uint32_t)) * sizeof(uint32_t);
351     for (VkDeviceSize i = 0; i < m_params.dstSize; ++i)
352     {
353         const uint8_t expectedByte = ((i >= m_params.dstOffset && i < startOfExtra) ? 0x01 : 0xff);
354         if (bytes[i] != expectedByte)
355         {
356             std::ostringstream msg;
357             msg << "Invalid byte at position " << i << " in the buffer (found 0x" << std::hex
358                 << static_cast<int>(bytes[i]) << " but expected 0x" << static_cast<int>(expectedByte) << ")";
359             return tcu::TestStatus::fail(msg.str());
360         }
361     }
362 
363     return tcu::TestStatus::pass("Pass");
364 }
365 
366 class FillWholeBufferTestCase : public vkt::TestCase
367 {
368 public:
FillWholeBufferTestCase(tcu::TestContext & testCtx,const std::string & name,const TestParams params)369     FillWholeBufferTestCase(tcu::TestContext &testCtx, const std::string &name, const TestParams params)
370         : vkt::TestCase(testCtx, name)
371         , m_params(params)
372     {
373     }
374 
createInstance(Context & context) const375     virtual TestInstance *createInstance(Context &context) const override
376     {
377         return static_cast<TestInstance *>(new FillWholeBufferTestInstance(context, m_params));
378     }
379 
380 private:
381     const TestParams m_params;
382 };
383 
384 class FillBufferTestInstance : public vkt::TestInstance
385 {
386 public:
387     FillBufferTestInstance(Context &context, TestParams testParams);
388     virtual tcu::TestStatus iterate(void);
389 
390 protected:
391     const TestParams m_params;
392 
393     Move<VkDevice> m_customDevice;
394     de::MovePtr<Allocator> m_customAllocator;
395 
396     VkDevice m_device;
397 #ifdef CTS_USES_VULKANSC
398     const CustomInstance m_customInstance;
399 #endif // CTS_USES_VULKANSC
400     Allocator *m_allocator;
401     uint32_t m_queueFamilyIndex;
402 
403     Move<VkCommandPool> m_cmdPool;
404     Move<VkCommandBuffer> m_cmdBuffer;
405     de::MovePtr<tcu::TextureLevel> m_destinationTextureLevel;
406     de::MovePtr<tcu::TextureLevel> m_expectedTextureLevel;
407 
408     VkCommandBufferBeginInfo m_cmdBufferBeginInfo;
409 
410     Move<VkBuffer> m_destination;
411     de::MovePtr<Allocation> m_destinationBufferAlloc;
412 
413     void generateBuffer(tcu::PixelBufferAccess buffer, int width, int height, int depth = 1);
414     virtual void generateExpectedResult(void);
415     void uploadBuffer(tcu::ConstPixelBufferAccess bufferAccess, const Allocation &bufferAlloc);
416     virtual tcu::TestStatus checkTestResult(tcu::ConstPixelBufferAccess result);
calculateSize(tcu::ConstPixelBufferAccess src) const417     uint32_t calculateSize(tcu::ConstPixelBufferAccess src) const
418     {
419         return src.getWidth() * src.getHeight() * src.getDepth() * tcu::getPixelSize(src.getFormat());
420     }
421 };
422 
FillBufferTestInstance(Context & context,TestParams testParams)423 FillBufferTestInstance::FillBufferTestInstance(Context &context, TestParams testParams)
424     : vkt::TestInstance(context)
425     , m_params(testParams)
426 #ifdef CTS_USES_VULKANSC
427     , m_customInstance(createCustomInstanceFromContext(context))
428 #endif // CTS_USES_VULKANSC
429 {
430     const InstanceInterface &vki      = m_context.getInstanceInterface();
431     const DeviceInterface &vk         = m_context.getDeviceInterface();
432     const VkPhysicalDevice physDevice = m_context.getPhysicalDevice();
433 
434     if (testParams.useTransferOnlyQueue)
435     {
436         m_customDevice = createCustomDevice(context,
437 #ifdef CTS_USES_VULKANSC
438                                             m_customInstance,
439 #endif // CTS_USES_VULKANSC
440                                             m_queueFamilyIndex);
441         m_customAllocator = de::MovePtr<Allocator>(
442             new SimpleAllocator(vk, *m_customDevice, getPhysicalDeviceMemoryProperties(vki, physDevice)));
443 
444         m_device    = *m_customDevice;
445         m_allocator = &(*m_customAllocator);
446     }
447     else
448     {
449         m_device           = context.getDevice();
450         m_allocator        = &context.getDefaultAllocator();
451         m_queueFamilyIndex = context.getUniversalQueueFamilyIndex();
452     }
453 
454     // Create command pool
455     m_cmdPool = createCommandPool(vk, m_device, VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, m_queueFamilyIndex);
456 
457     // Create command buffer
458     m_cmdBuffer = allocateCommandBuffer(vk, m_device, *m_cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY);
459 
460     testParams.bufferAllocator->createTestBuffer(vk, m_device, m_queueFamilyIndex, m_params.dstSize,
461                                                  VK_BUFFER_USAGE_TRANSFER_DST_BIT, context, *m_allocator, m_destination,
462                                                  MemoryRequirement::HostVisible, m_destinationBufferAlloc);
463 }
464 
iterate(void)465 tcu::TestStatus FillBufferTestInstance::iterate(void)
466 {
467     const int dstLevelWidth = (int)(m_params.dstSize / 4);
468     m_destinationTextureLevel =
469         de::MovePtr<tcu::TextureLevel>(new tcu::TextureLevel(mapVkFormat(VK_FORMAT_R8G8B8A8_UINT), dstLevelWidth, 1));
470 
471     generateBuffer(m_destinationTextureLevel->getAccess(), dstLevelWidth, 1, 1);
472 
473     generateExpectedResult();
474 
475     uploadBuffer(m_destinationTextureLevel->getAccess(), *m_destinationBufferAlloc);
476 
477     const DeviceInterface &vk = m_context.getDeviceInterface();
478     const VkQueue queue       = getDeviceQueue(vk, m_device, m_queueFamilyIndex, 0);
479 
480     const VkBufferMemoryBarrier dstBufferBarrier = {
481         VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
482         DE_NULL,                                 // const void* pNext;
483         VK_ACCESS_TRANSFER_WRITE_BIT,            // VkAccessFlags srcAccessMask;
484         VK_ACCESS_HOST_READ_BIT,                 // VkAccessFlags dstAccessMask;
485         VK_QUEUE_FAMILY_IGNORED,                 // uint32_t srcQueueFamilyIndex;
486         VK_QUEUE_FAMILY_IGNORED,                 // uint32_t dstQueueFamilyIndex;
487         *m_destination,                          // VkBuffer buffer;
488         m_params.dstOffset,                      // VkDeviceSize offset;
489         VK_WHOLE_SIZE                            // VkDeviceSize size;
490     };
491 
492     beginCommandBuffer(vk, *m_cmdBuffer);
493     vk.cmdFillBuffer(*m_cmdBuffer, *m_destination, m_params.dstOffset, m_params.size, m_params.testData[0]);
494     vk.cmdPipelineBarrier(*m_cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT,
495                           (VkDependencyFlags)0, 0, (const VkMemoryBarrier *)DE_NULL, 1, &dstBufferBarrier, 0,
496                           (const VkImageMemoryBarrier *)DE_NULL);
497     endCommandBuffer(vk, *m_cmdBuffer);
498 
499     submitCommandsAndWait(vk, m_device, queue, m_cmdBuffer.get());
500 
501     // Read buffer data
502     de::MovePtr<tcu::TextureLevel> resultLevel(
503         new tcu::TextureLevel(m_destinationTextureLevel->getAccess().getFormat(), dstLevelWidth, 1));
504     invalidateAlloc(vk, m_device, *m_destinationBufferAlloc);
505     tcu::copy(*resultLevel, tcu::ConstPixelBufferAccess(resultLevel->getFormat(), resultLevel->getSize(),
506                                                         m_destinationBufferAlloc->getHostPtr()));
507 
508     return checkTestResult(resultLevel->getAccess());
509 }
510 
generateBuffer(tcu::PixelBufferAccess buffer,int width,int height,int depth)511 void FillBufferTestInstance::generateBuffer(tcu::PixelBufferAccess buffer, int width, int height, int depth)
512 {
513     for (int z = 0; z < depth; z++)
514     {
515         for (int y = 0; y < height; y++)
516         {
517             for (int x = 0; x < width; x++)
518                 buffer.setPixel(tcu::UVec4(x, y, z, 255), x, y, z);
519         }
520     }
521 }
522 
uploadBuffer(tcu::ConstPixelBufferAccess bufferAccess,const Allocation & bufferAlloc)523 void FillBufferTestInstance::uploadBuffer(tcu::ConstPixelBufferAccess bufferAccess, const Allocation &bufferAlloc)
524 {
525     const DeviceInterface &vk = m_context.getDeviceInterface();
526     const uint32_t bufferSize = calculateSize(bufferAccess);
527 
528     // Write buffer data
529     deMemcpy(bufferAlloc.getHostPtr(), bufferAccess.getDataPtr(), bufferSize);
530     flushAlloc(vk, m_device, bufferAlloc);
531 }
532 
checkTestResult(tcu::ConstPixelBufferAccess result)533 tcu::TestStatus FillBufferTestInstance::checkTestResult(tcu::ConstPixelBufferAccess result)
534 {
535     const tcu::ConstPixelBufferAccess expected = m_expectedTextureLevel->getAccess();
536     const tcu::UVec4 threshold(0, 0, 0, 0);
537 
538     if (!tcu::intThresholdCompare(m_context.getTestContext().getLog(), "Compare", "Result comparsion", expected, result,
539                                   threshold, tcu::COMPARE_LOG_RESULT))
540     {
541         return tcu::TestStatus::fail("Fill and Update Buffer test");
542     }
543 
544     return tcu::TestStatus::pass("Fill and Update Buffer test");
545 }
546 
generateExpectedResult(void)547 void FillBufferTestInstance::generateExpectedResult(void)
548 {
549     const tcu::ConstPixelBufferAccess dst = m_destinationTextureLevel->getAccess();
550 
551     m_expectedTextureLevel = de::MovePtr<tcu::TextureLevel>(
552         new tcu::TextureLevel(dst.getFormat(), dst.getWidth(), dst.getHeight(), dst.getDepth()));
553     tcu::copy(m_expectedTextureLevel->getAccess(), dst);
554 
555     uint32_t *currentPtr = (uint32_t *)m_expectedTextureLevel->getAccess().getDataPtr() + m_params.dstOffset / 4;
556     uint32_t *endPtr     = currentPtr + m_params.size / 4;
557 
558     while (currentPtr < endPtr)
559     {
560         *currentPtr = m_params.testData[0];
561         currentPtr++;
562     }
563 }
564 
565 class FillBufferTestCase : public vkt::TestCase
566 {
567 public:
FillBufferTestCase(tcu::TestContext & testCtx,const std::string & name,const TestParams params)568     FillBufferTestCase(tcu::TestContext &testCtx, const std::string &name, const TestParams params)
569         : vkt::TestCase(testCtx, name)
570         , m_params(params)
571     {
572     }
573 
createInstance(Context & context) const574     virtual TestInstance *createInstance(Context &context) const
575     {
576         return static_cast<TestInstance *>(new FillBufferTestInstance(context, m_params));
577     }
578 
579 private:
580     const TestParams m_params;
581 };
582 
583 // Update Buffer
584 
585 class UpdateBufferTestInstance : public FillBufferTestInstance
586 {
587 public:
UpdateBufferTestInstance(Context & context,TestParams testParams)588     UpdateBufferTestInstance(Context &context, TestParams testParams) : FillBufferTestInstance(context, testParams)
589     {
590     }
591     virtual tcu::TestStatus iterate(void);
592 
593 protected:
594     virtual void generateExpectedResult(void);
595 };
596 
iterate(void)597 tcu::TestStatus UpdateBufferTestInstance::iterate(void)
598 {
599     const int dstLevelWidth = (int)(m_params.dstSize / 4);
600     m_destinationTextureLevel =
601         de::MovePtr<tcu::TextureLevel>(new tcu::TextureLevel(mapVkFormat(VK_FORMAT_R8G8B8A8_UINT), dstLevelWidth, 1));
602 
603     generateBuffer(m_destinationTextureLevel->getAccess(), dstLevelWidth, 1, 1);
604 
605     generateExpectedResult();
606 
607     uploadBuffer(m_destinationTextureLevel->getAccess(), *m_destinationBufferAlloc);
608 
609     const DeviceInterface &vk = m_context.getDeviceInterface();
610     const VkQueue queue       = getDeviceQueue(vk, m_device, m_queueFamilyIndex, 0);
611 
612     const VkBufferMemoryBarrier dstBufferBarrier = {
613         VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
614         DE_NULL,                                 // const void* pNext;
615         VK_ACCESS_TRANSFER_WRITE_BIT,            // VkAccessFlags srcAccessMask;
616         VK_ACCESS_HOST_READ_BIT,                 // VkAccessFlags dstAccessMask;
617         VK_QUEUE_FAMILY_IGNORED,                 // uint32_t srcQueueFamilyIndex;
618         VK_QUEUE_FAMILY_IGNORED,                 // uint32_t dstQueueFamilyIndex;
619         *m_destination,                          // VkBuffer buffer;
620         m_params.dstOffset,                      // VkDeviceSize offset;
621         VK_WHOLE_SIZE                            // VkDeviceSize size;
622     };
623 
624     beginCommandBuffer(vk, *m_cmdBuffer);
625     vk.cmdUpdateBuffer(*m_cmdBuffer, *m_destination, m_params.dstOffset, m_params.size, m_params.testData);
626     vk.cmdPipelineBarrier(*m_cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT,
627                           (VkDependencyFlags)0, 0, (const VkMemoryBarrier *)DE_NULL, 1, &dstBufferBarrier, 0,
628                           (const VkImageMemoryBarrier *)DE_NULL);
629     endCommandBuffer(vk, *m_cmdBuffer);
630 
631     submitCommandsAndWait(vk, m_device, queue, m_cmdBuffer.get());
632 
633     // Read buffer data
634     de::MovePtr<tcu::TextureLevel> resultLevel(
635         new tcu::TextureLevel(m_destinationTextureLevel->getAccess().getFormat(), dstLevelWidth, 1));
636     invalidateAlloc(vk, m_device, *m_destinationBufferAlloc);
637     tcu::copy(*resultLevel, tcu::ConstPixelBufferAccess(resultLevel->getFormat(), resultLevel->getSize(),
638                                                         m_destinationBufferAlloc->getHostPtr()));
639 
640     return checkTestResult(resultLevel->getAccess());
641 }
642 
generateExpectedResult(void)643 void UpdateBufferTestInstance::generateExpectedResult(void)
644 {
645     const tcu::ConstPixelBufferAccess dst = m_destinationTextureLevel->getAccess();
646 
647     m_expectedTextureLevel = de::MovePtr<tcu::TextureLevel>(
648         new tcu::TextureLevel(dst.getFormat(), dst.getWidth(), dst.getHeight(), dst.getDepth()));
649     tcu::copy(m_expectedTextureLevel->getAccess(), dst);
650 
651     uint32_t *currentPtr = (uint32_t *)m_expectedTextureLevel->getAccess().getDataPtr() + m_params.dstOffset / 4;
652 
653     deMemcpy(currentPtr, m_params.testData, (size_t)m_params.size);
654 }
655 
656 class UpdateBufferTestCase : public vkt::TestCase
657 {
658 public:
UpdateBufferTestCase(tcu::TestContext & testCtx,const std::string & name,const TestParams params)659     UpdateBufferTestCase(tcu::TestContext &testCtx, const std::string &name, const TestParams params)
660         : vkt::TestCase(testCtx, name)
661         , m_params(params)
662     {
663     }
664 
createInstance(Context & context) const665     virtual TestInstance *createInstance(Context &context) const
666     {
667         return (TestInstance *)new UpdateBufferTestInstance(context, m_params);
668     }
669 
670 private:
671     TestParams m_params;
672 };
673 
674 } // namespace
675 
createFillAndUpdateBufferTests(tcu::TestContext & testCtx)676 tcu::TestCaseGroup *createFillAndUpdateBufferTests(tcu::TestContext &testCtx)
677 {
678     const de::SharedPtr<IBufferAllocator> bufferAllocators[]{
679         de::SharedPtr<BufferSuballocation>(new BufferSuballocation()),
680         de::SharedPtr<BufferDedicatedAllocation>(new BufferDedicatedAllocation())};
681 
682     de::MovePtr<tcu::TestCaseGroup> fillAndUpdateBufferTests(new tcu::TestCaseGroup(testCtx, "fill_and_update_buffer"));
683 
684     struct TestGroupData
685     {
686         const char *name;
687         bool useDedicatedAllocation;
688         bool useTransferOnlyQueue;
689     };
690     const TestGroupData testGroupData[]{
691         // BufferView Fill and Update Tests for Suballocated Objects
692         {"suballocation", false, false},
693         // BufferView Fill and Update Tests for Suballocated Objects on transfer only queue
694         {"suballocation_transfer_queue", false, true},
695         // BufferView Fill and Update Tests for Dedicatedly Allocated Objects
696         {"dedicated_alloc", true, false},
697     };
698 
699     TestParams params;
700     for (const auto &groupData : testGroupData)
701     {
702         de::MovePtr<tcu::TestCaseGroup> currentTestsGroup(new tcu::TestCaseGroup(testCtx, groupData.name));
703 
704         params.dstSize              = TestParams::TEST_DATA_SIZE;
705         params.bufferAllocator      = bufferAllocators[groupData.useDedicatedAllocation];
706         params.useTransferOnlyQueue = groupData.useTransferOnlyQueue;
707 
708         uint8_t *data = (uint8_t *)params.testData;
709         for (uint32_t b = 0u; b < (params.dstSize * sizeof(params.testData[0])); b++)
710             data[b] = (uint8_t)(b % 255);
711 
712         {
713             const std::string testName("buffer_whole");
714 
715             params.dstOffset = 0;
716             params.size      = params.dstSize;
717 
718             currentTestsGroup->addChild(new FillBufferTestCase(testCtx, "fill_" + testName, params));
719             currentTestsGroup->addChild(new UpdateBufferTestCase(testCtx, "update_" + testName, params));
720         }
721 
722         {
723             const std::string testName("buffer_first_one");
724 
725             params.dstOffset = 0;
726             params.size      = 4;
727 
728             currentTestsGroup->addChild(new FillBufferTestCase(testCtx, "fill_" + testName, params));
729             currentTestsGroup->addChild(new UpdateBufferTestCase(testCtx, "update_" + testName, params));
730         }
731 
732         {
733             const std::string testName("buffer_second_one");
734 
735             params.dstOffset = 4;
736             params.size      = 4;
737 
738             currentTestsGroup->addChild(new FillBufferTestCase(testCtx, "fill_" + testName, params));
739             currentTestsGroup->addChild(new UpdateBufferTestCase(testCtx, "update_" + testName, params));
740         }
741 
742         {
743             const std::string testName("buffer_second_part");
744 
745             params.dstOffset = params.dstSize / 2;
746             params.size      = params.dstSize / 2;
747 
748             currentTestsGroup->addChild(new FillBufferTestCase(testCtx, "fill_" + testName, params));
749             currentTestsGroup->addChild(new UpdateBufferTestCase(testCtx, "update_" + testName, params));
750         }
751 
752         // VK_WHOLE_SIZE tests.
753         {
754             for (VkDeviceSize i = 0; i < sizeof(uint32_t); ++i)
755             {
756                 for (VkDeviceSize j = 0; j < sizeof(uint32_t); ++j)
757                 {
758                     params.dstSize   = TestParams::TEST_DATA_SIZE + i;
759                     params.dstOffset = j * sizeof(uint32_t);
760                     params.size      = VK_WHOLE_SIZE;
761 
762                     const VkDeviceSize extraBytes = params.dstSize % sizeof(uint32_t);
763                     const std::string name        = "fill_buffer_vk_whole_size_" + de::toString(extraBytes) +
764                                              "_extra_bytes_offset_" + de::toString(params.dstOffset);
765 
766                     currentTestsGroup->addChild(new FillWholeBufferTestCase{testCtx, name, params});
767                 }
768             }
769         }
770 
771         fillAndUpdateBufferTests->addChild(currentTestsGroup.release());
772     }
773 
774     return fillAndUpdateBufferTests.release();
775 }
776 
777 } // namespace api
778 } // namespace vkt
779