1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2015 The Khronos Group Inc.
6 * Copyright (c) 2015 Intel Corporation
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 *
20 *//*!
21 * \file
22 * \brief Draw Indexed Tests
23 *//*--------------------------------------------------------------------*/
24
25 #include "vktDrawIndexedTest.hpp"
26
27 #include "vktTestCaseUtil.hpp"
28 #include "vktDrawTestCaseUtil.hpp"
29
30 #include "vktDrawBaseClass.hpp"
31
32 #include "tcuTestLog.hpp"
33 #include "tcuResource.hpp"
34 #include "tcuImageCompare.hpp"
35 #include "tcuTextureUtil.hpp"
36 #include "tcuRGBA.hpp"
37
38 #include "vkDefs.hpp"
39 #include "vkCmdUtil.hpp"
40
41 #include "tcuTestCase.hpp"
42 #include "tcuVectorUtil.hpp"
43 #include "rrRenderer.hpp"
44
45 namespace vkt
46 {
47 namespace Draw
48 {
49 namespace
50 {
51
52 enum
53 {
54 VERTEX_OFFSET_DEFAULT = 13,
55 VERTEX_OFFSET_MINUS_ONE = -1,
56 VERTEX_OFFSET_NEGATIVE = -13,
57 };
58
59 enum class IndexBindOffset
60 {
61 DEFAULT = 0,
62 POSITIVE = 16, // Must be aligned to the index data type size.
63 };
64
65 enum class MemoryBindOffset
66 {
67 DEFAULT = 0,
68 POSITIVE = 16, // Will be rounded up to the alignment requirement.
69 };
70
71 enum TestType
72 {
73 TEST_TYPE_NON_MAINTENANCE_6 = 0,
74 TEST_TYPE_MAINTENANCE6_INDEXED,
75 TEST_TYPE_MAINTENANCE6_INDEXED_INDIRECT,
76 // TEST_TYPE_MAINTENANCE6_INDEXED_INDIRECT_COUNT_KHR,
77 TEST_TYPE_MAINTENANCE6_INDEXED_INDIRECT_COUNT,
78 #ifndef CTS_USES_VULKANSC
79 TEST_TYPE_MAINTENANCE6_MULTI_INDEXED_EXT,
80 #endif
81 TEST_TYPE_LAST
82 };
83
84 struct TestSpec2 : TestSpecBase
85 {
86 const int32_t vertexOffset;
87 const vk::VkDeviceSize bindIndexBufferOffset;
88 const vk::VkDeviceSize memoryBindOffset;
89 const TestType testType;
90 bool useMaintenance5Ext;
91 const bool nullDescriptor;
92 const bool bindIndexBuffer2;
93
TestSpec2vkt::Draw::__anon1dc159d60111::TestSpec294 TestSpec2(const ShaderMap &shaders_, vk::VkPrimitiveTopology topology_, SharedGroupParams groupParams_,
95 int32_t vertexOffset_, vk::VkDeviceSize bindIndexBufferOffset_, vk::VkDeviceSize memoryBindOffset_,
96 TestType testType_, bool useMaintenance5Ext_, bool nullDescriptor_, bool bindIndexBuffer2_)
97 : TestSpecBase{shaders_, topology_, groupParams_}
98 , vertexOffset(vertexOffset_)
99 , bindIndexBufferOffset(bindIndexBufferOffset_)
100 , memoryBindOffset(memoryBindOffset_)
101 , testType(testType_)
102 , useMaintenance5Ext(useMaintenance5Ext_)
103 , nullDescriptor(nullDescriptor_)
104 , bindIndexBuffer2(bindIndexBuffer2_)
105 {
106 }
107 };
108
109 class DrawIndexed : public DrawTestsBaseClass
110 {
111 public:
112 typedef TestSpec2 TestSpec;
113
114 DrawIndexed(Context &context, TestSpec testSpec);
115 virtual tcu::TestStatus iterate(void);
116
117 protected:
118 void cmdBindIndexBufferImpl(vk::VkCommandBuffer commandBuffer, vk::VkBuffer indexBuffer, vk::VkDeviceSize offset,
119 vk::VkDeviceSize size, vk::VkIndexType indexType);
120 std::vector<uint32_t> m_indexes;
121 de::SharedPtr<Buffer> m_indexBuffer;
122 const TestSpec m_testSpec;
123 };
124
125 class DrawInstancedIndexed : public DrawIndexed
126 {
127 public:
128 DrawInstancedIndexed(Context &context, TestSpec testSpec);
129 virtual tcu::TestStatus iterate(void);
130 };
131
DrawIndexed(Context & context,TestSpec testSpec)132 DrawIndexed::DrawIndexed(Context &context, TestSpec testSpec)
133 : DrawTestsBaseClass(context, testSpec.shaders[glu::SHADERTYPE_VERTEX], testSpec.shaders[glu::SHADERTYPE_FRAGMENT],
134 testSpec.groupParams, testSpec.topology)
135 , m_testSpec(testSpec)
136 {
137 if (testSpec.testType == TEST_TYPE_NON_MAINTENANCE_6)
138 {
139 // When using a positive vertex offset, the strategy is:
140 // - Storing vertices with that offset in the vertex buffer.
141 // - Using indices normally as if they were stored at the start of the buffer.
142 //
143 // When using a negative vertex offset, the strategy is:
144 // - Store vertices at the start of the vertex buffer.
145 // - Increase indices by abs(offset) so when substracting it, it results in the regular positions.
146
147 const uint32_t indexOffset =
148 (m_testSpec.vertexOffset < 0 ? static_cast<uint32_t>(-m_testSpec.vertexOffset) : 0u);
149
150 switch (m_topology)
151 {
152 case vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST:
153 m_indexes.push_back(0 + indexOffset);
154 m_indexes.push_back(0 + indexOffset);
155 m_indexes.push_back(2 + indexOffset);
156 m_indexes.push_back(0 + indexOffset);
157 m_indexes.push_back(6 + indexOffset);
158 m_indexes.push_back(6 + indexOffset);
159 m_indexes.push_back(0 + indexOffset);
160 m_indexes.push_back(7 + indexOffset);
161 break;
162 case vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP:
163 m_indexes.push_back(0 + indexOffset);
164 m_indexes.push_back(0 + indexOffset);
165 m_indexes.push_back(2 + indexOffset);
166 m_indexes.push_back(0 + indexOffset);
167 m_indexes.push_back(6 + indexOffset);
168 m_indexes.push_back(5 + indexOffset);
169 m_indexes.push_back(0 + indexOffset);
170 m_indexes.push_back(7 + indexOffset);
171 break;
172
173 case vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST:
174 case vk::VK_PRIMITIVE_TOPOLOGY_LINE_LIST:
175 case vk::VK_PRIMITIVE_TOPOLOGY_LINE_STRIP:
176 case vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN:
177 case vk::VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY:
178 case vk::VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY:
179 case vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY:
180 case vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY:
181 case vk::VK_PRIMITIVE_TOPOLOGY_PATCH_LIST:
182 case vk::VK_PRIMITIVE_TOPOLOGY_LAST:
183 DE_FATAL("Topology not implemented");
184 break;
185 default:
186 DE_FATAL("Unknown topology");
187 break;
188 }
189 }
190
191 // This works for both positive and negative vertex offsets.
192 for (int unusedIdx = 0; unusedIdx < testSpec.vertexOffset; unusedIdx++)
193 {
194 m_data.push_back(VertexElementData(tcu::Vec4(-1.0f, 1.0f, 1.0f, 1.0f), tcu::RGBA::blue().toVec(), -1));
195 }
196
197 int vertexIndex = (testSpec.vertexOffset >= 0 ? testSpec.vertexOffset : 0);
198
199 m_data.push_back(VertexElementData(tcu::Vec4(-0.3f, 0.3f, 1.0f, 1.0f), tcu::RGBA::blue().toVec(), vertexIndex++));
200 m_data.push_back(VertexElementData(tcu::Vec4(-1.0f, 1.0f, 1.0f, 1.0f), tcu::RGBA::blue().toVec(), vertexIndex++));
201 m_data.push_back(VertexElementData(tcu::Vec4(-0.3f, -0.3f, 1.0f, 1.0f), tcu::RGBA::blue().toVec(), vertexIndex++));
202 m_data.push_back(VertexElementData(tcu::Vec4(1.0f, -1.0f, 1.0f, 1.0f), tcu::RGBA::blue().toVec(), vertexIndex++));
203 m_data.push_back(VertexElementData(tcu::Vec4(-0.3f, -0.3f, 1.0f, 1.0f), tcu::RGBA::blue().toVec(), vertexIndex++));
204 m_data.push_back(VertexElementData(tcu::Vec4(0.3f, 0.3f, 1.0f, 1.0f), tcu::RGBA::blue().toVec(), vertexIndex++));
205 m_data.push_back(VertexElementData(tcu::Vec4(0.3f, -0.3f, 1.0f, 1.0f), tcu::RGBA::blue().toVec(), vertexIndex++));
206 m_data.push_back(VertexElementData(tcu::Vec4(0.3f, 0.3f, 1.0f, 1.0f), tcu::RGBA::blue().toVec(), vertexIndex++));
207
208 m_data.push_back(VertexElementData(tcu::Vec4(-1.0f, 1.0f, 1.0f, 1.0f), tcu::RGBA::blue().toVec(), -1));
209
210 initialize();
211 }
212
cmdBindIndexBufferImpl(vk::VkCommandBuffer commandBuffer,vk::VkBuffer indexBuffer,vk::VkDeviceSize offset,vk::VkDeviceSize dataSize,vk::VkIndexType indexType)213 void DrawIndexed::cmdBindIndexBufferImpl(vk::VkCommandBuffer commandBuffer, vk::VkBuffer indexBuffer,
214 vk::VkDeviceSize offset, vk::VkDeviceSize dataSize, vk::VkIndexType indexType)
215 {
216 #ifndef CTS_USES_VULKANSC
217 if (m_testSpec.useMaintenance5Ext)
218 m_vk.cmdBindIndexBuffer2KHR(commandBuffer, indexBuffer, offset, dataSize, indexType);
219 else
220 #endif
221 {
222 DE_UNREF(dataSize);
223 m_vk.cmdBindIndexBuffer(commandBuffer, indexBuffer, offset, indexType);
224 }
225 }
226
iterate(void)227 tcu::TestStatus DrawIndexed::iterate(void)
228 {
229 tcu::TestLog &log = m_context.getTestContext().getLog();
230 const auto &vki = m_context.getInstanceInterface();
231 const auto physDev = m_context.getPhysicalDevice();
232 const vk::VkQueue queue = m_context.getUniversalQueue();
233 const vk::VkDevice device = m_context.getDevice();
234 const auto memProps = vk::getPhysicalDeviceMemoryProperties(vki, physDev);
235 const auto atomSize = m_context.getDeviceProperties().limits.nonCoherentAtomSize;
236 const auto dataSize = static_cast<vk::VkDeviceSize>(de::dataSize(m_indexes));
237 const auto bufferSize = dataSize + m_testSpec.bindIndexBufferOffset;
238 vk::SimpleAllocator allocator(m_vk, device, memProps,
239 vk::SimpleAllocator::OptionalOffsetParams({atomSize, m_testSpec.memoryBindOffset}));
240
241 m_indexBuffer =
242 Buffer::createAndAlloc(m_vk, device, BufferCreateInfo(bufferSize, vk::VK_BUFFER_USAGE_INDEX_BUFFER_BIT),
243 allocator, vk::MemoryRequirement::HostVisible);
244
245 uint8_t *ptr = reinterpret_cast<uint8_t *>(m_indexBuffer->getBoundMemory().getHostPtr());
246
247 deMemset(ptr, 0xFF, static_cast<size_t>(m_testSpec.bindIndexBufferOffset));
248 deMemcpy(ptr + m_testSpec.bindIndexBufferOffset, de::dataOrNull(m_indexes), de::dataSize(m_indexes));
249 vk::flushAlloc(m_vk, device, m_indexBuffer->getBoundMemory());
250
251 const vk::VkDeviceSize vertexBufferOffset = 0;
252 const vk::VkBuffer vertexBuffer = m_vertexBuffer->object();
253 const vk::VkBuffer indexBuffer = m_indexBuffer->object();
254
255 #ifndef CTS_USES_VULKANSC
256 if (m_groupParams->useSecondaryCmdBuffer)
257 {
258 // record secondary command buffer
259 if (m_groupParams->secondaryCmdBufferCompletelyContainsDynamicRenderpass)
260 {
261 beginSecondaryCmdBuffer(m_vk, vk::VK_RENDERING_CONTENTS_SECONDARY_COMMAND_BUFFERS_BIT);
262 beginDynamicRender(*m_secCmdBuffer);
263 }
264 else
265 beginSecondaryCmdBuffer(m_vk);
266
267 m_vk.cmdBindVertexBuffers(*m_secCmdBuffer, 0, 1, &vertexBuffer, &vertexBufferOffset);
268 cmdBindIndexBufferImpl(*m_secCmdBuffer, indexBuffer, m_testSpec.bindIndexBufferOffset, dataSize,
269 vk::VK_INDEX_TYPE_UINT32);
270 m_vk.cmdBindPipeline(*m_secCmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_pipeline);
271 m_vk.cmdDrawIndexed(*m_secCmdBuffer, 6, 1, 2, m_testSpec.vertexOffset, 0);
272
273 if (m_groupParams->secondaryCmdBufferCompletelyContainsDynamicRenderpass)
274 endDynamicRender(*m_secCmdBuffer);
275
276 endCommandBuffer(m_vk, *m_secCmdBuffer);
277
278 // record primary command buffer
279 beginCommandBuffer(m_vk, *m_cmdBuffer, 0u);
280 preRenderBarriers();
281
282 if (!m_groupParams->secondaryCmdBufferCompletelyContainsDynamicRenderpass)
283 beginDynamicRender(*m_cmdBuffer, vk::VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS);
284
285 m_vk.cmdExecuteCommands(*m_cmdBuffer, 1u, &*m_secCmdBuffer);
286
287 if (!m_groupParams->secondaryCmdBufferCompletelyContainsDynamicRenderpass)
288 endDynamicRender(*m_cmdBuffer);
289
290 endCommandBuffer(m_vk, *m_cmdBuffer);
291 }
292 else if (m_groupParams->useDynamicRendering)
293 {
294 beginCommandBuffer(m_vk, *m_cmdBuffer, 0u);
295 preRenderBarriers();
296 beginDynamicRender(*m_cmdBuffer);
297
298 m_vk.cmdBindVertexBuffers(*m_cmdBuffer, 0, 1, &vertexBuffer, &vertexBufferOffset);
299 cmdBindIndexBufferImpl(*m_cmdBuffer, indexBuffer, m_testSpec.bindIndexBufferOffset, dataSize,
300 vk::VK_INDEX_TYPE_UINT32);
301 m_vk.cmdBindPipeline(*m_cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_pipeline);
302 m_vk.cmdDrawIndexed(*m_cmdBuffer, 6, 1, 2, m_testSpec.vertexOffset, 0);
303
304 endDynamicRender(*m_cmdBuffer);
305 endCommandBuffer(m_vk, *m_cmdBuffer);
306 }
307 #endif // CTS_USES_VULKANSC
308
309 if (!m_groupParams->useDynamicRendering)
310 {
311 beginCommandBuffer(m_vk, *m_cmdBuffer, 0u);
312 preRenderBarriers();
313 beginLegacyRender(*m_cmdBuffer);
314
315 m_vk.cmdBindVertexBuffers(*m_cmdBuffer, 0, 1, &vertexBuffer, &vertexBufferOffset);
316 cmdBindIndexBufferImpl(*m_cmdBuffer, indexBuffer, m_testSpec.bindIndexBufferOffset, dataSize,
317 vk::VK_INDEX_TYPE_UINT32);
318 m_vk.cmdBindPipeline(*m_cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_pipeline);
319 m_vk.cmdDrawIndexed(*m_cmdBuffer, 6, 1, 2, m_testSpec.vertexOffset, 0);
320
321 endLegacyRender(*m_cmdBuffer);
322 endCommandBuffer(m_vk, *m_cmdBuffer);
323 }
324
325 submitCommandsAndWait(m_vk, device, queue, m_cmdBuffer.get());
326
327 // Validation
328 tcu::Texture2D referenceFrame(vk::mapVkFormat(m_colorAttachmentFormat), (int)(0.5f + static_cast<float>(WIDTH)),
329 (int)(0.5f + static_cast<float>(HEIGHT)));
330 referenceFrame.allocLevel(0);
331
332 const int32_t frameWidth = referenceFrame.getWidth();
333 const int32_t frameHeight = referenceFrame.getHeight();
334
335 tcu::clear(referenceFrame.getLevel(0), tcu::Vec4(0.0f, 0.0f, 0.0f, 1.0f));
336
337 ReferenceImageCoordinates refCoords;
338
339 for (int y = 0; y < frameHeight; y++)
340 {
341 const float yCoord = (float)(y / (0.5 * frameHeight)) - 1.0f;
342
343 for (int x = 0; x < frameWidth; x++)
344 {
345 const float xCoord = (float)(x / (0.5 * frameWidth)) - 1.0f;
346
347 if ((yCoord >= refCoords.bottom && yCoord <= refCoords.top && xCoord >= refCoords.left &&
348 xCoord <= refCoords.right))
349 referenceFrame.getLevel(0).setPixel(tcu::Vec4(0.0f, 0.0f, 1.0f, 1.0f), x, y);
350 }
351 }
352
353 const vk::VkOffset3D zeroOffset = {0, 0, 0};
354 const tcu::ConstPixelBufferAccess renderedFrame =
355 m_colorTargetImage->readSurface(queue, m_context.getDefaultAllocator(), vk::VK_IMAGE_LAYOUT_GENERAL, zeroOffset,
356 WIDTH, HEIGHT, vk::VK_IMAGE_ASPECT_COLOR_BIT);
357
358 qpTestResult res = QP_TEST_RESULT_PASS;
359
360 if (!tcu::fuzzyCompare(log, "Result", "Image comparison result", referenceFrame.getLevel(0), renderedFrame, 0.05f,
361 tcu::COMPARE_LOG_RESULT))
362 {
363 res = QP_TEST_RESULT_FAIL;
364 }
365
366 return tcu::TestStatus(res, qpGetTestResultName(res));
367 }
368
DrawInstancedIndexed(Context & context,TestSpec testSpec)369 DrawInstancedIndexed::DrawInstancedIndexed(Context &context, TestSpec testSpec) : DrawIndexed(context, testSpec)
370 {
371 }
372
iterate(void)373 tcu::TestStatus DrawInstancedIndexed::iterate(void)
374 {
375 tcu::TestLog &log = m_context.getTestContext().getLog();
376 const auto &vki = m_context.getInstanceInterface();
377 const auto physDev = m_context.getPhysicalDevice();
378 const vk::VkQueue queue = m_context.getUniversalQueue();
379 const vk::VkDevice device = m_context.getDevice();
380 const auto memProps = vk::getPhysicalDeviceMemoryProperties(vki, physDev);
381 const auto dataSize = static_cast<vk::VkDeviceSize>(de::dataSize(m_indexes));
382 const vk::VkDeviceSize bufferSize = dataSize + m_testSpec.bindIndexBufferOffset;
383 const auto atomSize = m_context.getDeviceProperties().limits.nonCoherentAtomSize;
384 vk::SimpleAllocator allocator(m_vk, device, memProps,
385 vk::SimpleAllocator::OptionalOffsetParams({atomSize, m_testSpec.memoryBindOffset}));
386
387 beginCommandBuffer(m_vk, *m_cmdBuffer, 0u);
388 preRenderBarriers();
389
390 #ifndef CTS_USES_VULKANSC
391 if (m_groupParams->useDynamicRendering)
392 beginDynamicRender(*m_cmdBuffer);
393 else
394 beginLegacyRender(*m_cmdBuffer);
395 #else
396 beginLegacyRender(*m_cmdBuffer);
397 #endif // CTS_USES_VULKANSC
398
399 m_indexBuffer =
400 Buffer::createAndAlloc(m_vk, device, BufferCreateInfo(bufferSize, vk::VK_BUFFER_USAGE_INDEX_BUFFER_BIT),
401 allocator, vk::MemoryRequirement::HostVisible);
402
403 uint8_t *ptr = reinterpret_cast<uint8_t *>(m_indexBuffer->getBoundMemory().getHostPtr());
404
405 deMemset(ptr, 0xFF, static_cast<size_t>(m_testSpec.bindIndexBufferOffset));
406 deMemcpy(ptr + m_testSpec.bindIndexBufferOffset, de::dataOrNull(m_indexes), de::dataSize(m_indexes));
407 vk::flushAlloc(m_vk, device, m_indexBuffer->getBoundMemory());
408
409 const vk::VkDeviceSize vertexBufferOffset = 0;
410 const vk::VkBuffer vertexBuffer = m_vertexBuffer->object();
411 const vk::VkBuffer indexBuffer = m_indexBuffer->object();
412
413 m_vk.cmdBindVertexBuffers(*m_cmdBuffer, 0, 1, &vertexBuffer, &vertexBufferOffset);
414 cmdBindIndexBufferImpl(*m_cmdBuffer, indexBuffer, m_testSpec.bindIndexBufferOffset, dataSize,
415 vk::VK_INDEX_TYPE_UINT32);
416 m_vk.cmdBindPipeline(*m_cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_pipeline);
417
418 switch (m_topology)
419 {
420 case vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST:
421 m_vk.cmdDrawIndexed(*m_cmdBuffer, 6, 4, 2, m_testSpec.vertexOffset, 2);
422 break;
423 case vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP:
424 m_vk.cmdDrawIndexed(*m_cmdBuffer, 4, 4, 2, m_testSpec.vertexOffset, 2);
425 break;
426 case vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST:
427 case vk::VK_PRIMITIVE_TOPOLOGY_LINE_LIST:
428 case vk::VK_PRIMITIVE_TOPOLOGY_LINE_STRIP:
429 case vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN:
430 case vk::VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY:
431 case vk::VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY:
432 case vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY:
433 case vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY:
434 case vk::VK_PRIMITIVE_TOPOLOGY_PATCH_LIST:
435 case vk::VK_PRIMITIVE_TOPOLOGY_LAST:
436 DE_FATAL("Topology not implemented");
437 break;
438 default:
439 DE_FATAL("Unknown topology");
440 break;
441 }
442
443 #ifndef CTS_USES_VULKANSC
444 if (m_groupParams->useDynamicRendering)
445 endDynamicRender(*m_cmdBuffer);
446 else
447 endLegacyRender(*m_cmdBuffer);
448 #else
449 endLegacyRender(*m_cmdBuffer);
450 #endif // CTS_USES_VULKANSC
451
452 endCommandBuffer(m_vk, *m_cmdBuffer);
453
454 submitCommandsAndWait(m_vk, device, queue, m_cmdBuffer.get());
455
456 // Validation
457 VK_CHECK(m_vk.queueWaitIdle(queue));
458
459 tcu::Texture2D referenceFrame(vk::mapVkFormat(m_colorAttachmentFormat), (int)(0.5f + static_cast<float>(WIDTH)),
460 (int)(0.5f + static_cast<float>(HEIGHT)));
461 referenceFrame.allocLevel(0);
462
463 const int32_t frameWidth = referenceFrame.getWidth();
464 const int32_t frameHeight = referenceFrame.getHeight();
465
466 tcu::clear(referenceFrame.getLevel(0), tcu::Vec4(0.0f, 0.0f, 0.0f, 1.0f));
467
468 ReferenceImageInstancedCoordinates refInstancedCoords;
469
470 for (int y = 0; y < frameHeight; y++)
471 {
472 const float yCoord = (float)(y / (0.5 * frameHeight)) - 1.0f;
473
474 for (int x = 0; x < frameWidth; x++)
475 {
476 const float xCoord = (float)(x / (0.5 * frameWidth)) - 1.0f;
477
478 if ((yCoord >= refInstancedCoords.bottom && yCoord <= refInstancedCoords.top &&
479 xCoord >= refInstancedCoords.left && xCoord <= refInstancedCoords.right))
480 referenceFrame.getLevel(0).setPixel(tcu::Vec4(0.0f, 0.0f, 1.0f, 1.0f), x, y);
481 }
482 }
483
484 const vk::VkOffset3D zeroOffset = {0, 0, 0};
485 const tcu::ConstPixelBufferAccess renderedFrame =
486 m_colorTargetImage->readSurface(queue, m_context.getDefaultAllocator(), vk::VK_IMAGE_LAYOUT_GENERAL, zeroOffset,
487 WIDTH, HEIGHT, vk::VK_IMAGE_ASPECT_COLOR_BIT);
488
489 qpTestResult res = QP_TEST_RESULT_PASS;
490
491 if (!tcu::fuzzyCompare(log, "Result", "Image comparison result", referenceFrame.getLevel(0), renderedFrame, 0.05f,
492 tcu::COMPARE_LOG_RESULT))
493 {
494 res = QP_TEST_RESULT_FAIL;
495 }
496
497 return tcu::TestStatus(res, qpGetTestResultName(res));
498 }
499
500 class DrawIndexedMaintenance6 : public DrawIndexed
501 {
502 public:
503 DrawIndexedMaintenance6(Context &context, TestSpec testSpec);
504 virtual tcu::TestStatus iterate(void);
505 };
506
DrawIndexedMaintenance6(Context & context,TestSpec testSpec)507 DrawIndexedMaintenance6::DrawIndexedMaintenance6(Context &context, TestSpec testSpec) : DrawIndexed(context, testSpec)
508 {
509 }
510
511 // Reference renderer shaders
512 class PassthruVertShader : public rr::VertexShader
513 {
514 public:
PassthruVertShader(void)515 PassthruVertShader(void) : rr::VertexShader(2, 1)
516 {
517 m_inputs[0].type = rr::GENERICVECTYPE_FLOAT;
518 m_inputs[1].type = rr::GENERICVECTYPE_FLOAT;
519 m_outputs[0].type = rr::GENERICVECTYPE_FLOAT;
520 }
521
~PassthruVertShader()522 virtual ~PassthruVertShader()
523 {
524 }
525
shadeVertices(const rr::VertexAttrib * inputs,rr::VertexPacket * const * packets,const int numPackets) const526 void shadeVertices(const rr::VertexAttrib *inputs, rr::VertexPacket *const *packets, const int numPackets) const
527 {
528 for (int packetNdx = 0; packetNdx < numPackets; ++packetNdx)
529 {
530 packets[packetNdx]->position =
531 rr::readVertexAttribFloat(inputs[0], packets[packetNdx]->instanceNdx, packets[packetNdx]->vertexNdx);
532
533 tcu::Vec4 color =
534 rr::readVertexAttribFloat(inputs[1], packets[packetNdx]->instanceNdx, packets[packetNdx]->vertexNdx);
535
536 packets[packetNdx]->outputs[0] = color;
537 }
538 }
539 };
540
541 class PassthruFragShader : public rr::FragmentShader
542 {
543 public:
PassthruFragShader(void)544 PassthruFragShader(void) : rr::FragmentShader(1, 1)
545 {
546 m_inputs[0].type = rr::GENERICVECTYPE_FLOAT;
547 m_outputs[0].type = rr::GENERICVECTYPE_FLOAT;
548 }
549
~PassthruFragShader()550 virtual ~PassthruFragShader()
551 {
552 }
553
shadeFragments(rr::FragmentPacket * packets,const int numPackets,const rr::FragmentShadingContext & context) const554 void shadeFragments(rr::FragmentPacket *packets, const int numPackets,
555 const rr::FragmentShadingContext &context) const
556 {
557 for (int packetNdx = 0; packetNdx < numPackets; ++packetNdx)
558 {
559 rr::FragmentPacket &packet = packets[packetNdx];
560 for (uint32_t fragNdx = 0; fragNdx < rr::NUM_FRAGMENTS_PER_PACKET; ++fragNdx)
561 {
562 tcu::Vec4 color = rr::readVarying<float>(packet, context, 0, fragNdx);
563 rr::writeFragmentOutput(context, packetNdx, fragNdx, 0, color);
564 }
565 }
566 }
567 };
568
iterate(void)569 tcu::TestStatus DrawIndexedMaintenance6::iterate(void)
570 {
571 tcu::TestLog &log = m_context.getTestContext().getLog();
572 const auto &vki = m_context.getInstanceInterface();
573 const auto physDev = m_context.getPhysicalDevice();
574 const vk::VkQueue queue = m_context.getUniversalQueue();
575 const vk::VkDevice device = m_context.getDevice();
576 const auto memProps = vk::getPhysicalDeviceMemoryProperties(vki, physDev);
577 const auto atomSize = m_context.getDeviceProperties().limits.nonCoherentAtomSize;
578 vk::SimpleAllocator allocator(m_vk, device, memProps,
579 vk::SimpleAllocator::OptionalOffsetParams({atomSize, m_testSpec.memoryBindOffset}));
580
581 beginCommandBuffer(m_vk, *m_cmdBuffer, 0u);
582 preRenderBarriers();
583
584 #ifndef CTS_USES_VULKANSC
585 if (m_groupParams->useDynamicRendering)
586 beginDynamicRender(*m_cmdBuffer);
587 else
588 beginLegacyRender(*m_cmdBuffer);
589 #else
590 beginLegacyRender(*m_cmdBuffer);
591 #endif // CTS_USES_VULKANSC
592
593 const uint32_t indexCount = m_testSpec.nullDescriptor ? 3 : 0;
594
595 const vk::VkDrawIndexedIndirectCommand drawParams = {indexCount, 1, 0, 0, 0};
596
597 const auto drawParamsBuffer = Buffer::createAndAlloc(
598 m_vk, device,
599 BufferCreateInfo(sizeof(vk::VkDrawIndexedIndirectCommand), vk::VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT), allocator,
600 vk::MemoryRequirement::HostVisible);
601
602 uint8_t *ptr = reinterpret_cast<uint8_t *>(drawParamsBuffer->getBoundMemory().getHostPtr());
603
604 deMemcpy(ptr, &drawParams, sizeof(vk::VkDrawIndexedIndirectCommand));
605 vk::flushAlloc(m_vk, device, drawParamsBuffer->getBoundMemory());
606
607 const auto countBuffer = Buffer::createAndAlloc(
608 m_vk, device, BufferCreateInfo(sizeof(uint32_t), vk::VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT), allocator,
609 vk::MemoryRequirement::HostVisible);
610
611 ptr = reinterpret_cast<uint8_t *>(countBuffer->getBoundMemory().getHostPtr());
612
613 deMemset(ptr, 1, 1);
614 vk::flushAlloc(m_vk, device, countBuffer->getBoundMemory());
615
616 const vk::VkBuffer vertexBuffer = m_vertexBuffer->object();
617 const vk::VkDeviceSize vertexBufferOffset = 0;
618
619 m_vk.cmdBindVertexBuffers(*m_cmdBuffer, 0, 1, &vertexBuffer, &vertexBufferOffset);
620
621 #ifndef CTS_USES_VULKANSC
622 if (m_testSpec.bindIndexBuffer2)
623 {
624 m_vk.cmdBindIndexBuffer2KHR(*m_cmdBuffer, VK_NULL_HANDLE, 0, 0, vk::VK_INDEX_TYPE_UINT32);
625 }
626 else
627 #endif
628 {
629 m_vk.cmdBindIndexBuffer(*m_cmdBuffer, VK_NULL_HANDLE, 0, vk::VK_INDEX_TYPE_UINT32);
630 }
631
632 m_vk.cmdBindPipeline(*m_cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_pipeline);
633
634 switch (m_testSpec.testType)
635 {
636 case TEST_TYPE_MAINTENANCE6_INDEXED:
637 {
638 m_vk.cmdDrawIndexed(*m_cmdBuffer, indexCount, 1, 0, 0, 0);
639
640 break;
641 }
642 case TEST_TYPE_MAINTENANCE6_INDEXED_INDIRECT:
643 {
644 m_vk.cmdDrawIndexedIndirect(*m_cmdBuffer, drawParamsBuffer->object(), 0, 1,
645 sizeof(vk::VkDrawIndexedIndirectCommand));
646
647 break;
648 }
649 case TEST_TYPE_MAINTENANCE6_INDEXED_INDIRECT_COUNT:
650 {
651 m_vk.cmdDrawIndexedIndirectCount(*m_cmdBuffer, drawParamsBuffer->object(), 0, countBuffer->object(), 0, 1,
652 sizeof(vk::VkDrawIndexedIndirectCommand));
653
654 break;
655 }
656 #ifndef CTS_USES_VULKANSC
657 case TEST_TYPE_MAINTENANCE6_MULTI_INDEXED_EXT:
658 {
659 const vk::VkMultiDrawIndexedInfoEXT indexInfo = {0, indexCount, 0};
660 const int32_t vertexOffset = 0;
661
662 m_vk.cmdDrawMultiIndexedEXT(*m_cmdBuffer, 1, &indexInfo, 1, 0, sizeof(vk::VkMultiDrawIndexedInfoEXT),
663 &vertexOffset);
664
665 break;
666 }
667 #endif
668 default:
669 {
670 DE_FATAL("Unknown test type");
671 break;
672 }
673 }
674
675 #ifndef CTS_USES_VULKANSC
676 if (m_groupParams->useDynamicRendering)
677 endDynamicRender(*m_cmdBuffer);
678 else
679 endLegacyRender(*m_cmdBuffer);
680 #else
681 endLegacyRender(*m_cmdBuffer);
682 #endif // CTS_USES_VULKANSC
683
684 endCommandBuffer(m_vk, *m_cmdBuffer);
685
686 submitCommandsAndWait(m_vk, device, queue, m_cmdBuffer.get());
687
688 // Validation
689 VK_CHECK(m_vk.queueWaitIdle(queue));
690
691 tcu::TextureLevel refImage(vk::mapVkFormat(m_colorAttachmentFormat), (int)(0.5f + static_cast<float>(WIDTH)),
692 (int)(0.5f + static_cast<float>(HEIGHT)));
693 tcu::clear(refImage.getAccess(), tcu::Vec4(0.0f, 0.0f, 0.0f, 1.0f));
694
695 if (m_testSpec.nullDescriptor)
696 {
697 std::vector<tcu::Vec4> vertices;
698 std::vector<tcu::Vec4> colors;
699
700 // Draw just the first point
701 vertices.push_back(m_data[0].position);
702 colors.push_back(m_data[0].color);
703
704 {
705 const PassthruVertShader vertShader;
706 const PassthruFragShader fragShader;
707 const rr::Program program(&vertShader, &fragShader);
708 const rr::MultisamplePixelBufferAccess colorBuffer =
709 rr::MultisamplePixelBufferAccess::fromSinglesampleAccess(refImage.getAccess());
710 const rr::RenderTarget renderTarget(colorBuffer);
711 const rr::RenderState renderState((rr::ViewportState(colorBuffer)),
712 m_context.getDeviceProperties().limits.subPixelPrecisionBits);
713 const rr::Renderer renderer;
714
715 const rr::VertexAttrib vertexAttribs[] = {
716 rr::VertexAttrib(rr::VERTEXATTRIBTYPE_FLOAT, 4, sizeof(tcu::Vec4), 0, &vertices[0]),
717 rr::VertexAttrib(rr::VERTEXATTRIBTYPE_FLOAT, 4, sizeof(tcu::Vec4), 0, &colors[0])};
718 renderer.draw(rr::DrawCommand(renderState, renderTarget, program, DE_LENGTH_OF_ARRAY(vertexAttribs),
719 &vertexAttribs[0],
720 rr::PrimitiveList(rr::PRIMITIVETYPE_POINTS, (uint32_t)vertices.size(), 0)));
721 }
722 }
723
724 const vk::VkOffset3D zeroOffset = {0, 0, 0};
725 const tcu::ConstPixelBufferAccess renderedFrame =
726 m_colorTargetImage->readSurface(queue, m_context.getDefaultAllocator(), vk::VK_IMAGE_LAYOUT_GENERAL, zeroOffset,
727 WIDTH, HEIGHT, vk::VK_IMAGE_ASPECT_COLOR_BIT);
728
729 qpTestResult res = QP_TEST_RESULT_PASS;
730
731 if (!tcu::intThresholdPositionDeviationCompare(log, "Result", "Image comparison result", refImage.getAccess(),
732 renderedFrame,
733 tcu::UVec4(4u), // color threshold
734 tcu::IVec3(1, 1, 0), // position deviation tolerance
735 true, // don't check the pixels at the boundary
736 tcu::COMPARE_LOG_RESULT))
737 {
738 res = QP_TEST_RESULT_FAIL;
739 }
740
741 return tcu::TestStatus(res, qpGetTestResultName(res));
742 }
743
checkSupport(Context & context,DrawIndexed::TestSpec testSpec)744 void checkSupport(Context &context, DrawIndexed::TestSpec testSpec)
745 {
746 if (testSpec.groupParams->useDynamicRendering)
747 context.requireDeviceFunctionality("VK_KHR_dynamic_rendering");
748
749 if (testSpec.testType != TEST_TYPE_NON_MAINTENANCE_6)
750 {
751 context.requireDeviceFunctionality("VK_KHR_maintenance6");
752
753 if (testSpec.nullDescriptor)
754 {
755 vk::VkPhysicalDeviceFeatures2 features2 = vk::initVulkanStructure();
756 vk::VkPhysicalDeviceRobustness2FeaturesEXT robustness2Features = vk::initVulkanStructure();
757
758 features2.pNext = &robustness2Features;
759
760 context.getInstanceInterface().getPhysicalDeviceFeatures2(context.getPhysicalDevice(), &features2);
761
762 if (robustness2Features.nullDescriptor == VK_FALSE)
763 {
764 TCU_THROW(NotSupportedError, "robustness2 nullDescriptor is not supported");
765 }
766
767 DE_ASSERT(features2.features.robustBufferAccess);
768 }
769
770 if (testSpec.bindIndexBuffer2)
771 {
772 context.requireDeviceFunctionality("VK_KHR_maintenance5");
773 }
774
775 #ifndef CTS_USES_VULKANSC
776 if (testSpec.testType == TEST_TYPE_MAINTENANCE6_MULTI_INDEXED_EXT)
777 {
778 context.requireDeviceFunctionality("VK_EXT_multi_draw");
779 }
780 #endif
781
782 if (testSpec.testType == TEST_TYPE_MAINTENANCE6_INDEXED_INDIRECT_COUNT)
783 {
784 context.requireDeviceFunctionality("VK_KHR_draw_indirect_count");
785 }
786 }
787 #ifndef CTS_USES_VULKANSC
788 if (testSpec.useMaintenance5Ext)
789 context.requireDeviceFunctionality(VK_KHR_MAINTENANCE_5_EXTENSION_NAME);
790 #endif
791 }
792
793 } // namespace
794
DrawIndexedTests(tcu::TestContext & testCtx,const SharedGroupParams groupParams)795 DrawIndexedTests::DrawIndexedTests(tcu::TestContext &testCtx, const SharedGroupParams groupParams)
796 : TestCaseGroup(testCtx, "indexed_draw")
797 , m_groupParams(groupParams)
798 {
799 /* Left blank on purpose */
800 }
801
~DrawIndexedTests(void)802 DrawIndexedTests::~DrawIndexedTests(void)
803 {
804 }
805
init(void)806 void DrawIndexedTests::init(void)
807 {
808 init(false);
809 #ifndef CTS_USES_VULKANSC
810 init(true);
811 #endif
812 }
813
init(bool useMaintenance5Ext)814 void DrawIndexedTests::init(bool useMaintenance5Ext)
815 {
816 std::string maintenance5ExtNameSuffix = useMaintenance5Ext ? "_maintenance_5" : "";
817
818 const struct
819 {
820 const vk::VkPrimitiveTopology topology;
821 const char *nameSuffix;
822 } TopologyCases[] = {
823 // triangle list
824 {vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, "triangle_list"},
825 // triangle strip
826 {vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP, "triangle_strip"},
827 };
828
829 const struct
830 {
831 const int offset;
832 const char *nameSuffix;
833 } OffsetCases[] = {
834 {VERTEX_OFFSET_DEFAULT, ""},
835 // using -1 as the vertex offset
836 {VERTEX_OFFSET_MINUS_ONE, "_offset_minus_one"},
837 // using a large negative number as the vertex offset
838 {VERTEX_OFFSET_NEGATIVE, "_offset_negative_large"},
839 };
840
841 const struct
842 {
843 IndexBindOffset bindOffset;
844 const char *nameSuffix;
845 } IndexBindOffsetCases[] = {
846 {IndexBindOffset::DEFAULT, ""},
847 // and applying an index buffer bind offset
848 {IndexBindOffset::POSITIVE, "_with_bind_offset"},
849 };
850
851 const struct
852 {
853 MemoryBindOffset memoryBindOffset;
854 const char *nameSuffix;
855 } MemoryBindOffsetCases[] = {
856 {MemoryBindOffset::DEFAULT, ""},
857 // and applying an extra memory allocation offset
858 {MemoryBindOffset::POSITIVE, "_with_alloc_offset"},
859 };
860
861 for (const auto &offsetCase : OffsetCases)
862 {
863 for (const auto &indexBindOffsetCase : IndexBindOffsetCases)
864 {
865 const auto indexBindOffset = static_cast<vk::VkDeviceSize>(indexBindOffsetCase.bindOffset);
866
867 for (const auto &memoryBindOffsetCase : MemoryBindOffsetCases)
868 {
869 const auto memoryBindOffset = static_cast<vk::VkDeviceSize>(memoryBindOffsetCase.memoryBindOffset);
870
871 for (const auto &topologyCase : TopologyCases)
872 {
873 {
874 DrawIndexed::TestSpec testSpec({{glu::SHADERTYPE_VERTEX, "vulkan/draw/VertexFetch.vert"},
875 {glu::SHADERTYPE_FRAGMENT, "vulkan/draw/VertexFetch.frag"}},
876 topologyCase.topology, m_groupParams, offsetCase.offset,
877 indexBindOffset, memoryBindOffset, TEST_TYPE_NON_MAINTENANCE_6,
878 useMaintenance5Ext, false, false);
879
880 const auto testName = std::string("draw_indexed_") + topologyCase.nameSuffix +
881 offsetCase.nameSuffix + indexBindOffsetCase.nameSuffix +
882 memoryBindOffsetCase.nameSuffix + maintenance5ExtNameSuffix;
883
884 addChild(new InstanceFactory<DrawIndexed, FunctionSupport1<DrawIndexed::TestSpec>>(
885 m_testCtx, testName, testSpec,
886 FunctionSupport1<DrawIndexed::TestSpec>::Args(checkSupport, testSpec)));
887 }
888 {
889 DrawInstancedIndexed::TestSpec testSpec(
890 {{glu::SHADERTYPE_VERTEX, "vulkan/draw/VertexFetchInstancedFirstInstance.vert"},
891 {glu::SHADERTYPE_FRAGMENT, "vulkan/draw/VertexFetch.frag"}},
892 topologyCase.topology, m_groupParams, offsetCase.offset, indexBindOffset, memoryBindOffset,
893 TEST_TYPE_NON_MAINTENANCE_6, useMaintenance5Ext, false, false);
894
895 const auto testName = std::string("draw_instanced_indexed_") + topologyCase.nameSuffix +
896 offsetCase.nameSuffix + indexBindOffsetCase.nameSuffix +
897 memoryBindOffsetCase.nameSuffix + maintenance5ExtNameSuffix;
898
899 addChild(
900 new InstanceFactory<DrawInstancedIndexed, FunctionSupport1<DrawInstancedIndexed::TestSpec>>(
901 m_testCtx, testName, testSpec,
902 FunctionSupport1<DrawInstancedIndexed::TestSpec>::Args(checkSupport, testSpec)));
903 }
904 }
905 }
906 }
907 }
908
909 const struct
910 {
911 TestType testType;
912 std::string nameSuffix;
913 } Maintenance6Cases[] = {
914 {TEST_TYPE_MAINTENANCE6_INDEXED, ""},
915 {TEST_TYPE_MAINTENANCE6_INDEXED_INDIRECT, "_indirect"},
916 {TEST_TYPE_MAINTENANCE6_INDEXED_INDIRECT_COUNT, "_indirect_count"},
917 #ifndef CTS_USES_VULKANSC
918 {TEST_TYPE_MAINTENANCE6_MULTI_INDEXED_EXT, "_multi"},
919 #endif
920 };
921
922 for (const auto &maintenance6Case : Maintenance6Cases)
923 {
924 for (int m5 = 0; m5 < 2; m5++)
925 {
926 for (int null = 0; null < 2; null++)
927 {
928 DrawIndexedMaintenance6::TestSpec testSpec({{glu::SHADERTYPE_VERTEX, "vulkan/draw/VertexFetch.vert"},
929 {glu::SHADERTYPE_FRAGMENT, "vulkan/draw/VertexFetch.frag"}},
930 vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, m_groupParams, 0, 0, 0,
931 maintenance6Case.testType, useMaintenance5Ext, null == 1,
932 m5 == 1);
933
934 const char *m5Suffix = m5 == 0 ? "" : "_bindindexbuffer2";
935 const char *nullSuffix = null == 0 ? "" : "_nulldescriptor";
936
937 const auto testName = std::string("draw_indexed") + maintenance6Case.nameSuffix + m5Suffix +
938 nullSuffix + maintenance5ExtNameSuffix + std::string("_maintenance6");
939
940 addChild(new InstanceFactory<DrawIndexedMaintenance6, FunctionSupport1<DrawIndexed::TestSpec>>(
941 m_testCtx, testName, testSpec,
942 FunctionSupport1<DrawIndexedMaintenance6::TestSpec>::Args(checkSupport, testSpec)));
943 }
944 }
945 }
946 }
947
948 } // namespace Draw
949 } // namespace vkt
950