1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2015 The Khronos Group Inc.
6 * Copyright (c) 2015 Intel Corporation
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 *
20 *//*!
21 * \file
22 * \brief Vulkan Occlusion Query Tests
23 *//*--------------------------------------------------------------------*/
24
25 #include "vktQueryPoolOcclusionTests.hpp"
26
27 #include "vktTestCase.hpp"
28
29 #include "vktDrawImageObjectUtil.hpp"
30 #include "vktDrawBufferObjectUtil.hpp"
31 #include "vktDrawCreateInfoUtil.hpp"
32 #include "vkBuilderUtil.hpp"
33 #include "vkRefUtil.hpp"
34 #include "vkPrograms.hpp"
35 #include "vkTypeUtil.hpp"
36 #include "vkCmdUtil.hpp"
37
38 #include "tcuTestLog.hpp"
39 #include "tcuResource.hpp"
40 #include "tcuImageCompare.hpp"
41 #include "tcuCommandLine.hpp"
42
43 namespace vkt
44 {
45
46 namespace QueryPool
47 {
48
49 using namespace Draw;
50
51 namespace
52 {
53
54 struct StateObjects
55 {
56 StateObjects(const vk::DeviceInterface &vk, vkt::Context &context, const int numVertices,
57 vk::VkPrimitiveTopology primitive, const bool noColorAttachments);
58 void setVertices(const vk::DeviceInterface &vk, std::vector<tcu::Vec4> vertices);
59
60 enum
61 {
62 WIDTH = 128,
63 HEIGHT = 128
64 };
65
66 vkt::Context &m_context;
67
68 vk::Move<vk::VkPipeline> m_pipeline;
69 vk::Move<vk::VkPipelineLayout> m_pipelineLayout;
70
71 de::SharedPtr<Image> m_colorAttachmentImage, m_DepthImage;
72 vk::Move<vk::VkImageView> m_attachmentView;
73 vk::Move<vk::VkImageView> m_depthView;
74
75 vk::Move<vk::VkRenderPass> m_renderPass;
76 vk::Move<vk::VkFramebuffer> m_framebuffer;
77
78 de::SharedPtr<Buffer> m_vertexBuffer;
79
80 vk::VkFormat m_colorAttachmentFormat;
81 };
82
StateObjects(const vk::DeviceInterface & vk,vkt::Context & context,const int numVertices,vk::VkPrimitiveTopology primitive,const bool noColorAttachments)83 StateObjects::StateObjects(const vk::DeviceInterface &vk, vkt::Context &context, const int numVertices,
84 vk::VkPrimitiveTopology primitive, const bool noColorAttachments)
85 : m_context(context)
86 , m_colorAttachmentFormat(vk::VK_FORMAT_R8G8B8A8_UNORM)
87
88 {
89 vk::VkFormat depthFormat = vk::VK_FORMAT_D16_UNORM;
90 const vk::VkDevice device = m_context.getDevice();
91
92 vk::VkExtent3D imageExtent = {
93 WIDTH, // width;
94 HEIGHT, // height;
95 1 // depth;
96 };
97
98 ImageCreateInfo depthImageCreateInfo(vk::VK_IMAGE_TYPE_2D, depthFormat, imageExtent, 1, 1,
99 vk::VK_SAMPLE_COUNT_1_BIT, vk::VK_IMAGE_TILING_OPTIMAL,
100 vk::VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT);
101
102 m_DepthImage = Image::createAndAlloc(vk, device, depthImageCreateInfo, m_context.getDefaultAllocator(),
103 m_context.getUniversalQueueFamilyIndex());
104
105 // Construct a depth view from depth image
106 const ImageViewCreateInfo depthViewInfo(m_DepthImage->object(), vk::VK_IMAGE_VIEW_TYPE_2D, depthFormat);
107 m_depthView = vk::createImageView(vk, device, &depthViewInfo);
108
109 // Renderpass and Framebuffer
110 if (noColorAttachments)
111 {
112 RenderPassCreateInfo renderPassCreateInfo;
113
114 renderPassCreateInfo.addAttachment(
115 AttachmentDescription(depthFormat, // format
116 vk::VK_SAMPLE_COUNT_1_BIT, // samples
117 vk::VK_ATTACHMENT_LOAD_OP_CLEAR, // loadOp
118 vk::VK_ATTACHMENT_STORE_OP_DONT_CARE, // storeOp
119 vk::VK_ATTACHMENT_LOAD_OP_DONT_CARE, // stencilLoadOp
120 vk::VK_ATTACHMENT_STORE_OP_DONT_CARE, // stencilLoadOp
121 vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, // initialLauout
122 vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL)); // finalLayout
123
124 const vk::VkAttachmentReference depthAttachmentReference = {
125 0, // attachment
126 vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL // layout
127 };
128
129 renderPassCreateInfo.addSubpass(SubpassDescription(vk::VK_PIPELINE_BIND_POINT_GRAPHICS, // pipelineBindPoint
130 0, // flags
131 0, // inputCount
132 DE_NULL, // pInputAttachments
133 0, // colorCount
134 DE_NULL, // pColorAttachments
135 DE_NULL, // pResolveAttachments
136 depthAttachmentReference, // depthStencilAttachment
137 0, // preserveCount
138 DE_NULL)); // preserveAttachments
139
140 m_renderPass = vk::createRenderPass(vk, device, &renderPassCreateInfo);
141
142 std::vector<vk::VkImageView> attachments(1);
143 attachments[0] = *m_depthView;
144 FramebufferCreateInfo framebufferCreateInfo(*m_renderPass, attachments, WIDTH, HEIGHT, 1);
145 m_framebuffer = vk::createFramebuffer(vk, device, &framebufferCreateInfo);
146 }
147 else
148 {
149 const ImageCreateInfo colorImageCreateInfo(
150 vk::VK_IMAGE_TYPE_2D, m_colorAttachmentFormat, imageExtent, 1, 1, vk::VK_SAMPLE_COUNT_1_BIT,
151 vk::VK_IMAGE_TILING_OPTIMAL, vk::VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT);
152
153 m_colorAttachmentImage =
154 Image::createAndAlloc(vk, device, colorImageCreateInfo, m_context.getDefaultAllocator(),
155 m_context.getUniversalQueueFamilyIndex());
156
157 const ImageViewCreateInfo attachmentViewInfo(m_colorAttachmentImage->object(), vk::VK_IMAGE_VIEW_TYPE_2D,
158 m_colorAttachmentFormat);
159
160 m_attachmentView = vk::createImageView(vk, device, &attachmentViewInfo);
161
162 RenderPassCreateInfo renderPassCreateInfo;
163 renderPassCreateInfo.addAttachment(AttachmentDescription(m_colorAttachmentFormat, // format
164 vk::VK_SAMPLE_COUNT_1_BIT, // samples
165 vk::VK_ATTACHMENT_LOAD_OP_CLEAR, // loadOp
166 vk::VK_ATTACHMENT_STORE_OP_DONT_CARE, // storeOp
167 vk::VK_ATTACHMENT_LOAD_OP_DONT_CARE, // stencilLoadOp
168 vk::VK_ATTACHMENT_STORE_OP_DONT_CARE, // stencilLoadOp
169 vk::VK_IMAGE_LAYOUT_GENERAL, // initialLauout
170 vk::VK_IMAGE_LAYOUT_GENERAL)); // finalLayout
171
172 renderPassCreateInfo.addAttachment(
173 AttachmentDescription(depthFormat, // format
174 vk::VK_SAMPLE_COUNT_1_BIT, // samples
175 vk::VK_ATTACHMENT_LOAD_OP_CLEAR, // loadOp
176 vk::VK_ATTACHMENT_STORE_OP_DONT_CARE, // storeOp
177 vk::VK_ATTACHMENT_LOAD_OP_DONT_CARE, // stencilLoadOp
178 vk::VK_ATTACHMENT_STORE_OP_DONT_CARE, // stencilLoadOp
179 vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, // initialLauout
180 vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL)); // finalLayout
181
182 const vk::VkAttachmentReference colorAttachmentReference = {
183 0, // attachment
184 vk::VK_IMAGE_LAYOUT_GENERAL // layout
185 };
186
187 const vk::VkAttachmentReference depthAttachmentReference = {
188 1, // attachment
189 vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL // layout
190 };
191
192 renderPassCreateInfo.addSubpass(SubpassDescription(vk::VK_PIPELINE_BIND_POINT_GRAPHICS, // pipelineBindPoint
193 0, // flags
194 0, // inputCount
195 DE_NULL, // pInputAttachments
196 1, // colorCount
197 &colorAttachmentReference, // pColorAttachments
198 DE_NULL, // pResolveAttachments
199 depthAttachmentReference, // depthStencilAttachment
200 0, // preserveCount
201 DE_NULL)); // preserveAttachments
202
203 m_renderPass = vk::createRenderPass(vk, device, &renderPassCreateInfo);
204
205 std::vector<vk::VkImageView> attachments(2);
206 attachments[0] = *m_attachmentView;
207 attachments[1] = *m_depthView;
208
209 FramebufferCreateInfo framebufferCreateInfo(*m_renderPass, attachments, WIDTH, HEIGHT, 1);
210 m_framebuffer = vk::createFramebuffer(vk, device, &framebufferCreateInfo);
211 }
212
213 {
214 // Pipeline
215
216 vk::Unique<vk::VkShaderModule> vs(
217 vk::createShaderModule(vk, device, m_context.getBinaryCollection().get("vert"), 0));
218 vk::Unique<vk::VkShaderModule> fs(
219 vk::createShaderModule(vk, device, m_context.getBinaryCollection().get("frag"), 0));
220
221 const PipelineCreateInfo::ColorBlendState::Attachment attachmentState;
222
223 const PipelineLayoutCreateInfo pipelineLayoutCreateInfo;
224 m_pipelineLayout = vk::createPipelineLayout(vk, device, &pipelineLayoutCreateInfo);
225
226 const vk::VkVertexInputBindingDescription vf_binding_desc = {
227 0, // binding;
228 4 * (uint32_t)sizeof(float), // stride;
229 vk::VK_VERTEX_INPUT_RATE_VERTEX // inputRate
230 };
231
232 const vk::VkVertexInputAttributeDescription vf_attribute_desc = {
233 0, // location;
234 0, // binding;
235 vk::VK_FORMAT_R32G32B32A32_SFLOAT, // format;
236 0 // offset;
237 };
238
239 const vk::VkPipelineVertexInputStateCreateInfo vf_info = {
240 // sType;
241 vk::VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // pNext;
242 NULL, // flags;
243 0u, // vertexBindingDescriptionCount;
244 1, // pVertexBindingDescriptions;
245 &vf_binding_desc, // vertexAttributeDescriptionCount;
246 1, // pVertexAttributeDescriptions;
247 &vf_attribute_desc};
248
249 PipelineCreateInfo pipelineCreateInfo(*m_pipelineLayout, *m_renderPass, 0, 0);
250 pipelineCreateInfo.addShader(
251 PipelineCreateInfo::PipelineShaderStage(*vs, "main", vk::VK_SHADER_STAGE_VERTEX_BIT));
252 pipelineCreateInfo.addShader(
253 PipelineCreateInfo::PipelineShaderStage(*fs, "main", vk::VK_SHADER_STAGE_FRAGMENT_BIT));
254 pipelineCreateInfo.addState(PipelineCreateInfo::InputAssemblerState(primitive));
255 pipelineCreateInfo.addState(PipelineCreateInfo::ColorBlendState(1, &attachmentState));
256 const vk::VkViewport viewport = vk::makeViewport(WIDTH, HEIGHT);
257 const vk::VkRect2D scissor = vk::makeRect2D(WIDTH, HEIGHT);
258 pipelineCreateInfo.addState(PipelineCreateInfo::ViewportState(1, std::vector<vk::VkViewport>(1, viewport),
259 std::vector<vk::VkRect2D>(1, scissor)));
260 pipelineCreateInfo.addState(
261 PipelineCreateInfo::DepthStencilState(true, true, vk::VK_COMPARE_OP_GREATER_OR_EQUAL));
262 pipelineCreateInfo.addState(PipelineCreateInfo::RasterizerState());
263 pipelineCreateInfo.addState(PipelineCreateInfo::MultiSampleState());
264 pipelineCreateInfo.addState(vf_info);
265 m_pipeline = vk::createGraphicsPipeline(vk, device, DE_NULL, &pipelineCreateInfo);
266 }
267
268 {
269 // Vertex buffer
270 const size_t kBufferSize = numVertices * sizeof(tcu::Vec4);
271 m_vertexBuffer =
272 Buffer::createAndAlloc(vk, device, BufferCreateInfo(kBufferSize, vk::VK_BUFFER_USAGE_VERTEX_BUFFER_BIT),
273 m_context.getDefaultAllocator(), vk::MemoryRequirement::HostVisible);
274 }
275 }
276
setVertices(const vk::DeviceInterface & vk,std::vector<tcu::Vec4> vertices)277 void StateObjects::setVertices(const vk::DeviceInterface &vk, std::vector<tcu::Vec4> vertices)
278 {
279 const vk::VkDevice device = m_context.getDevice();
280
281 tcu::Vec4 *ptr = reinterpret_cast<tcu::Vec4 *>(m_vertexBuffer->getBoundMemory().getHostPtr());
282 std::copy(vertices.begin(), vertices.end(), ptr);
283
284 vk::flushAlloc(vk, device, m_vertexBuffer->getBoundMemory());
285 }
286
287 enum OcclusionQueryResultSize
288 {
289 RESULT_SIZE_64_BIT,
290 RESULT_SIZE_32_BIT,
291 };
292
293 enum OcclusionQueryWait
294 {
295 WAIT_QUEUE,
296 WAIT_QUERY,
297 WAIT_NONE
298 };
299
300 enum OcclusionQueryResultsMode
301 {
302 RESULTS_MODE_GET,
303 RESULTS_MODE_GET_RESET,
304 RESULTS_MODE_COPY,
305 RESULTS_MODE_COPY_RESET
306 };
307
308 enum OcculusionQueryClearOp
309 {
310 CLEAR_NOOP,
311 CLEAR_COLOR,
312 CLEAR_DEPTH
313 };
314
315 struct OcclusionQueryTestVector
316 {
317 vk::VkQueryControlFlags queryControlFlags;
318 OcclusionQueryResultSize queryResultSize;
319 OcclusionQueryWait queryWait;
320 OcclusionQueryResultsMode queryResultsMode;
321 vk::VkDeviceSize queryResultsStride;
322 bool queryResultsAvailability;
323 vk::VkPrimitiveTopology primitiveTopology;
324 bool discardHalf;
325 bool queryResultsDstOffset;
326 OcculusionQueryClearOp clearOp;
327 bool noColorAttachments;
328 };
329
330 class BasicOcclusionQueryTestInstance : public vkt::TestInstance
331 {
332 public:
333 BasicOcclusionQueryTestInstance(vkt::Context &context, const OcclusionQueryTestVector &testVector);
334 ~BasicOcclusionQueryTestInstance(void);
335
336 private:
337 tcu::TestStatus iterate(void);
338
339 enum
340 {
341 NUM_QUERIES_IN_POOL = 2,
342 QUERY_INDEX_CAPTURE_EMPTY = 0,
343 QUERY_INDEX_CAPTURE_DRAWCALL = 1,
344 NUM_VERTICES_IN_DRAWCALL = 3
345 };
346
347 OcclusionQueryTestVector m_testVector;
348 StateObjects *m_stateObjects;
349 vk::VkQueryPool m_queryPool;
350 };
351
BasicOcclusionQueryTestInstance(vkt::Context & context,const OcclusionQueryTestVector & testVector)352 BasicOcclusionQueryTestInstance::BasicOcclusionQueryTestInstance(vkt::Context &context,
353 const OcclusionQueryTestVector &testVector)
354 : TestInstance(context)
355 , m_testVector(testVector)
356 {
357 DE_ASSERT(
358 testVector.queryResultSize == RESULT_SIZE_64_BIT && testVector.queryWait == WAIT_QUEUE &&
359 (testVector.queryResultsMode == RESULTS_MODE_GET || testVector.queryResultsMode == RESULTS_MODE_GET_RESET) &&
360 testVector.queryResultsStride == sizeof(uint64_t) && testVector.queryResultsAvailability == false &&
361 testVector.primitiveTopology == vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST);
362
363 if ((m_testVector.queryControlFlags & vk::VK_QUERY_CONTROL_PRECISE_BIT) &&
364 !m_context.getDeviceFeatures().occlusionQueryPrecise)
365 throw tcu::NotSupportedError("Precise occlusion queries are not supported");
366
367 m_stateObjects = new StateObjects(m_context.getDeviceInterface(), m_context, NUM_VERTICES_IN_DRAWCALL,
368 m_testVector.primitiveTopology, m_testVector.noColorAttachments);
369
370 const vk::VkDevice device = m_context.getDevice();
371 const vk::DeviceInterface &vk = m_context.getDeviceInterface();
372
373 const vk::VkQueryPoolCreateInfo queryPoolCreateInfo = {
374 vk::VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO, DE_NULL, 0u, vk::VK_QUERY_TYPE_OCCLUSION, NUM_QUERIES_IN_POOL, 0};
375 VK_CHECK(vk.createQueryPool(device, &queryPoolCreateInfo, /*pAllocator*/ DE_NULL, &m_queryPool));
376
377 std::vector<tcu::Vec4> vertices(NUM_VERTICES_IN_DRAWCALL);
378 vertices[0] = tcu::Vec4(0.5, 0.5, 0.0, 1.0);
379 vertices[1] = tcu::Vec4(0.5, 0.0, 0.0, 1.0);
380 vertices[2] = tcu::Vec4(0.0, 0.5, 0.0, 1.0);
381 m_stateObjects->setVertices(vk, vertices);
382 }
383
~BasicOcclusionQueryTestInstance(void)384 BasicOcclusionQueryTestInstance::~BasicOcclusionQueryTestInstance(void)
385 {
386 if (m_stateObjects)
387 delete m_stateObjects;
388
389 if (m_queryPool != DE_NULL)
390 {
391 #ifndef CTS_USES_VULKANSC
392 const vk::VkDevice device = m_context.getDevice();
393 const vk::DeviceInterface &vk = m_context.getDeviceInterface();
394 vk.destroyQueryPool(device, m_queryPool, /*pAllocator*/ DE_NULL);
395 #endif
396 }
397 }
398
iterate(void)399 tcu::TestStatus BasicOcclusionQueryTestInstance::iterate(void)
400 {
401 tcu::TestLog &log = m_context.getTestContext().getLog();
402 const vk::VkDevice device = m_context.getDevice();
403 const vk::VkQueue queue = m_context.getUniversalQueue();
404 const vk::DeviceInterface &vk = m_context.getDeviceInterface();
405
406 if (m_testVector.queryResultsMode == RESULTS_MODE_GET_RESET)
407 {
408 // Check VK_EXT_host_query_reset is supported
409 m_context.requireDeviceFunctionality("VK_EXT_host_query_reset");
410 if (m_context.getHostQueryResetFeatures().hostQueryReset == VK_FALSE)
411 throw tcu::NotSupportedError(
412 std::string("Implementation doesn't support resetting queries from the host").c_str());
413 }
414
415 const CmdPoolCreateInfo cmdPoolCreateInfo(m_context.getUniversalQueueFamilyIndex());
416 vk::Move<vk::VkCommandPool> cmdPool = vk::createCommandPool(vk, device, &cmdPoolCreateInfo);
417
418 vk::Unique<vk::VkCommandBuffer> cmdBuffer(
419 vk::allocateCommandBuffer(vk, device, *cmdPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
420
421 beginCommandBuffer(vk, *cmdBuffer);
422
423 if (!m_testVector.noColorAttachments)
424 initialTransitionColor2DImage(vk, *cmdBuffer, m_stateObjects->m_colorAttachmentImage->object(),
425 vk::VK_IMAGE_LAYOUT_GENERAL, vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
426 vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT);
427 initialTransitionDepth2DImage(
428 vk, *cmdBuffer, m_stateObjects->m_DepthImage->object(), vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
429 vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
430 vk::VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | vk::VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT);
431
432 std::vector<vk::VkClearValue> renderPassClearValues(2);
433 deMemset(&renderPassClearValues[0], 0, static_cast<int>(renderPassClearValues.size()) * sizeof(vk::VkClearValue));
434
435 if (m_testVector.queryResultsMode != RESULTS_MODE_GET_RESET)
436 vk.cmdResetQueryPool(*cmdBuffer, m_queryPool, 0, NUM_QUERIES_IN_POOL);
437
438 beginRenderPass(vk, *cmdBuffer, *m_stateObjects->m_renderPass, *m_stateObjects->m_framebuffer,
439 vk::makeRect2D(0, 0, StateObjects::WIDTH, StateObjects::HEIGHT),
440 (uint32_t)renderPassClearValues.size(), &renderPassClearValues[0]);
441
442 vk.cmdBindPipeline(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_stateObjects->m_pipeline);
443
444 vk::VkBuffer vertexBuffer = m_stateObjects->m_vertexBuffer->object();
445 const vk::VkDeviceSize vertexBufferOffset = 0;
446 vk.cmdBindVertexBuffers(*cmdBuffer, 0, 1, &vertexBuffer, &vertexBufferOffset);
447
448 vk.cmdBeginQuery(*cmdBuffer, m_queryPool, QUERY_INDEX_CAPTURE_EMPTY, m_testVector.queryControlFlags);
449 vk.cmdEndQuery(*cmdBuffer, m_queryPool, QUERY_INDEX_CAPTURE_EMPTY);
450
451 vk.cmdBeginQuery(*cmdBuffer, m_queryPool, QUERY_INDEX_CAPTURE_DRAWCALL, m_testVector.queryControlFlags);
452 vk.cmdDraw(*cmdBuffer, NUM_VERTICES_IN_DRAWCALL, 1, 0, 0);
453 vk.cmdEndQuery(*cmdBuffer, m_queryPool, QUERY_INDEX_CAPTURE_DRAWCALL);
454
455 endRenderPass(vk, *cmdBuffer);
456
457 if (!m_testVector.noColorAttachments)
458 transition2DImage(vk, *cmdBuffer, m_stateObjects->m_colorAttachmentImage->object(),
459 vk::VK_IMAGE_ASPECT_COLOR_BIT, vk::VK_IMAGE_LAYOUT_GENERAL,
460 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
461 vk::VK_ACCESS_TRANSFER_READ_BIT, vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
462 vk::VK_PIPELINE_STAGE_TRANSFER_BIT);
463
464 endCommandBuffer(vk, *cmdBuffer);
465
466 if (m_testVector.queryResultsMode == RESULTS_MODE_GET_RESET)
467 vk.resetQueryPool(device, m_queryPool, 0, NUM_QUERIES_IN_POOL);
468
469 submitCommandsAndWait(vk, device, queue, cmdBuffer.get());
470
471 uint64_t queryResults[NUM_QUERIES_IN_POOL] = {0};
472 size_t queryResultsSize = sizeof(queryResults);
473
474 vk::VkResult queryResult =
475 vk.getQueryPoolResults(device, m_queryPool, 0, NUM_QUERIES_IN_POOL, queryResultsSize, queryResults,
476 sizeof(queryResults[0]), vk::VK_QUERY_RESULT_64_BIT);
477
478 if (queryResult == vk::VK_NOT_READY)
479 {
480 TCU_FAIL("Query result not avaliable, but vkWaitIdle() was called.");
481 }
482
483 VK_CHECK(queryResult);
484
485 log << tcu::TestLog::Section("OcclusionQueryResults", "Occlusion query results");
486 for (int ndx = 0; ndx < DE_LENGTH_OF_ARRAY(queryResults); ++ndx)
487 {
488 log << tcu::TestLog::Message << "query[slot == " << ndx << "] result == " << queryResults[ndx]
489 << tcu::TestLog::EndMessage;
490 }
491
492 bool passed = true;
493
494 for (int queryNdx = 0; queryNdx < DE_LENGTH_OF_ARRAY(queryResults); ++queryNdx)
495 {
496
497 uint64_t expectedValue;
498
499 switch (queryNdx)
500 {
501 case QUERY_INDEX_CAPTURE_EMPTY:
502 expectedValue = 0;
503 break;
504 case QUERY_INDEX_CAPTURE_DRAWCALL:
505 expectedValue = NUM_VERTICES_IN_DRAWCALL;
506 break;
507 }
508
509 if ((m_testVector.queryControlFlags & vk::VK_QUERY_CONTROL_PRECISE_BIT) || expectedValue == 0)
510 {
511 // require precise value
512 if (queryResults[queryNdx] != expectedValue)
513 {
514 log << tcu::TestLog::Message
515 << "vkGetQueryPoolResults returned "
516 "wrong value of query for index "
517 << queryNdx << ", expected " << expectedValue << ", got " << queryResults[0] << "."
518 << tcu::TestLog::EndMessage;
519 passed = false;
520 }
521 }
522 else
523 {
524 // require imprecize value > 0
525 if (queryResults[queryNdx] == 0)
526 {
527 log << tcu::TestLog::Message
528 << "vkGetQueryPoolResults returned "
529 "wrong value of query for index "
530 << queryNdx << ", expected any non-zero value, got " << queryResults[0] << "."
531 << tcu::TestLog::EndMessage;
532 passed = false;
533 }
534 }
535 }
536 log << tcu::TestLog::EndSection;
537
538 if (passed)
539 {
540 return tcu::TestStatus(QP_TEST_RESULT_PASS, "Query result verification passed");
541 }
542 return tcu::TestStatus(QP_TEST_RESULT_FAIL, "Query result verification failed");
543 }
544
545 class OcclusionQueryTestInstance : public vkt::TestInstance
546 {
547 public:
548 OcclusionQueryTestInstance(vkt::Context &context, const OcclusionQueryTestVector &testVector);
549 ~OcclusionQueryTestInstance(void);
550
551 private:
552 tcu::TestStatus iterate(void);
553
554 bool hasSeparateResetCmdBuf(void) const;
555 bool hasSeparateCopyCmdBuf(void) const;
556 void commandClearAttachment(const vk::DeviceInterface &vk, const vk::VkCommandBuffer commandBuffer);
557
558 vk::Move<vk::VkCommandBuffer> recordQueryPoolReset(vk::VkCommandPool commandPool);
559 vk::Move<vk::VkCommandBuffer> recordRender(vk::VkCommandPool commandPool);
560 vk::Move<vk::VkCommandBuffer> recordCopyResults(vk::VkCommandPool commandPool);
561
562 void captureResults(uint64_t *retResults, uint64_t *retAvailability, bool allowNotReady);
563 void logResults(const uint64_t *results, const uint64_t *availability);
564 bool validateResults(const uint64_t *results, const uint64_t *availability, bool allowUnavailable,
565 vk::VkPrimitiveTopology primitiveTopology);
566
567 enum
568 {
569 NUM_QUERIES_IN_POOL = 3,
570 QUERY_INDEX_CAPTURE_ALL = 0,
571 QUERY_INDEX_CAPTURE_PARTIALLY_OCCLUDED = 1,
572 QUERY_INDEX_CAPTURE_OCCLUDED = 2
573 };
574 enum
575 {
576 NUM_VERTICES_IN_DRAWCALL = 3,
577 NUM_VERTICES_IN_PARTIALLY_OCCLUDED_DRAWCALL = 3,
578 NUM_VERTICES_IN_OCCLUDER_DRAWCALL = 3,
579 NUM_VERTICES =
580 NUM_VERTICES_IN_DRAWCALL + NUM_VERTICES_IN_PARTIALLY_OCCLUDED_DRAWCALL + NUM_VERTICES_IN_OCCLUDER_DRAWCALL
581 };
582 enum
583 {
584 START_VERTEX = 0,
585 START_VERTEX_PARTIALLY_OCCLUDED = START_VERTEX + NUM_VERTICES_IN_DRAWCALL,
586 START_VERTEX_OCCLUDER = START_VERTEX_PARTIALLY_OCCLUDED + NUM_VERTICES_IN_PARTIALLY_OCCLUDED_DRAWCALL
587 };
588
589 OcclusionQueryTestVector m_testVector;
590
591 const vk::VkQueryResultFlags m_queryResultFlags;
592
593 StateObjects *m_stateObjects;
594 vk::VkQueryPool m_queryPool;
595 de::SharedPtr<Buffer> m_queryPoolResultsBuffer;
596
597 vk::Move<vk::VkCommandPool> m_commandPool;
598 vk::Move<vk::VkCommandBuffer> m_queryPoolResetCommandBuffer;
599 vk::Move<vk::VkCommandBuffer> m_renderCommandBuffer;
600 vk::Move<vk::VkCommandBuffer> m_copyResultsCommandBuffer;
601 };
602
OcclusionQueryTestInstance(vkt::Context & context,const OcclusionQueryTestVector & testVector)603 OcclusionQueryTestInstance::OcclusionQueryTestInstance(vkt::Context &context,
604 const OcclusionQueryTestVector &testVector)
605 : vkt::TestInstance(context)
606 , m_testVector(testVector)
607 , m_queryResultFlags(
608 ((m_testVector.queryWait == WAIT_QUERY && m_testVector.queryResultsMode != RESULTS_MODE_COPY_RESET) ?
609 vk::VK_QUERY_RESULT_WAIT_BIT :
610 0) |
611 (m_testVector.queryResultSize == RESULT_SIZE_64_BIT ? vk::VK_QUERY_RESULT_64_BIT : 0) |
612 (m_testVector.queryResultsAvailability ? vk::VK_QUERY_RESULT_WITH_AVAILABILITY_BIT : 0))
613 {
614 const vk::VkDevice device = m_context.getDevice();
615 const vk::DeviceInterface &vk = m_context.getDeviceInterface();
616
617 if ((m_testVector.queryControlFlags & vk::VK_QUERY_CONTROL_PRECISE_BIT) &&
618 !m_context.getDeviceFeatures().occlusionQueryPrecise)
619 throw tcu::NotSupportedError("Precise occlusion queries are not supported");
620
621 m_stateObjects = new StateObjects(m_context.getDeviceInterface(), m_context,
622 NUM_VERTICES_IN_DRAWCALL + NUM_VERTICES_IN_PARTIALLY_OCCLUDED_DRAWCALL +
623 NUM_VERTICES_IN_OCCLUDER_DRAWCALL,
624 m_testVector.primitiveTopology, m_testVector.noColorAttachments);
625
626 const vk::VkQueryPoolCreateInfo queryPoolCreateInfo = {
627 vk::VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO, DE_NULL, 0u, vk::VK_QUERY_TYPE_OCCLUSION, NUM_QUERIES_IN_POOL, 0};
628
629 VK_CHECK(vk.createQueryPool(device, &queryPoolCreateInfo, /*pAllocator*/ DE_NULL, &m_queryPool));
630
631 if (m_testVector.queryResultsMode == RESULTS_MODE_COPY || m_testVector.queryResultsMode == RESULTS_MODE_COPY_RESET)
632 {
633 uint32_t numQueriesinPool = NUM_QUERIES_IN_POOL + (m_testVector.queryResultsDstOffset ? 1 : 0);
634 const vk::VkDeviceSize elementSize =
635 m_testVector.queryResultSize == RESULT_SIZE_32_BIT ? sizeof(uint32_t) : sizeof(uint64_t);
636 const vk::VkDeviceSize resultsBufferSize =
637 m_testVector.queryResultsStride == 0 ?
638 (elementSize + elementSize * m_testVector.queryResultsAvailability) * numQueriesinPool :
639 m_testVector.queryResultsStride * numQueriesinPool;
640 m_queryPoolResultsBuffer = Buffer::createAndAlloc(
641 vk, device, BufferCreateInfo(resultsBufferSize, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT),
642 m_context.getDefaultAllocator(), vk::MemoryRequirement::HostVisible);
643 }
644
645 const CmdPoolCreateInfo cmdPoolCreateInfo(m_context.getUniversalQueueFamilyIndex());
646 m_commandPool = vk::createCommandPool(vk, device, &cmdPoolCreateInfo);
647 m_renderCommandBuffer = recordRender(*m_commandPool);
648
649 if (hasSeparateResetCmdBuf())
650 {
651 m_queryPoolResetCommandBuffer = recordQueryPoolReset(*m_commandPool);
652 }
653
654 if (hasSeparateCopyCmdBuf())
655 {
656 m_copyResultsCommandBuffer = recordCopyResults(*m_commandPool);
657 }
658 }
659
~OcclusionQueryTestInstance(void)660 OcclusionQueryTestInstance::~OcclusionQueryTestInstance(void)
661 {
662
663 if (m_stateObjects)
664 delete m_stateObjects;
665
666 if (m_queryPool != DE_NULL)
667 {
668 #ifndef CTS_USES_VULKANSC
669 const vk::VkDevice device = m_context.getDevice();
670 const vk::DeviceInterface &vk = m_context.getDeviceInterface();
671 vk.destroyQueryPool(device, m_queryPool, /*pAllocator*/ DE_NULL);
672 #endif
673 }
674 }
675
iterate(void)676 tcu::TestStatus OcclusionQueryTestInstance::iterate(void)
677 {
678 const vk::VkQueue queue = m_context.getUniversalQueue();
679 const vk::DeviceInterface &vk = m_context.getDeviceInterface();
680 tcu::TestLog &log = m_context.getTestContext().getLog();
681 std::vector<tcu::Vec4> vertices(NUM_VERTICES);
682
683 if (m_testVector.queryResultsMode == RESULTS_MODE_GET_RESET)
684 {
685 // Check VK_EXT_host_query_reset is supported
686 m_context.requireDeviceFunctionality("VK_EXT_host_query_reset");
687 if (m_context.getHostQueryResetFeatures().hostQueryReset == VK_FALSE)
688 throw tcu::NotSupportedError(
689 std::string("Implementation doesn't support resetting queries from the host").c_str());
690 }
691
692 // 1st triangle
693 vertices[START_VERTEX + 0] = tcu::Vec4(0.5, 0.5, 0.5, 1.0);
694 vertices[START_VERTEX + 1] = tcu::Vec4(0.5, -0.5, 0.5, 1.0);
695 vertices[START_VERTEX + 2] = tcu::Vec4(-0.5, 0.5, 0.5, 1.0);
696 // 2nd triangle - partially occluding the scene
697 vertices[START_VERTEX_PARTIALLY_OCCLUDED + 0] = tcu::Vec4(-0.5, -0.5, 1.0, 1.0);
698 vertices[START_VERTEX_PARTIALLY_OCCLUDED + 1] = tcu::Vec4(0.5, -0.5, 1.0, 1.0);
699 vertices[START_VERTEX_PARTIALLY_OCCLUDED + 2] = tcu::Vec4(-0.5, 0.5, 1.0, 1.0);
700 // 3nd triangle - fully occluding the scene
701 vertices[START_VERTEX_OCCLUDER + 0] = tcu::Vec4(0.5, 0.5, 1.0, 1.0);
702 vertices[START_VERTEX_OCCLUDER + 1] = tcu::Vec4(0.5, -0.5, 1.0, 1.0);
703 vertices[START_VERTEX_OCCLUDER + 2] = tcu::Vec4(-0.5, 0.5, 1.0, 1.0);
704
705 m_stateObjects->setVertices(vk, vertices);
706
707 if (hasSeparateResetCmdBuf())
708 {
709 const vk::VkSubmitInfo submitInfoReset = {
710 vk::VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType;
711 DE_NULL, // const void* pNext;
712 0u, // uint32_t waitSemaphoreCount;
713 DE_NULL, // const VkSemaphore* pWaitSemaphores;
714 (const vk::VkPipelineStageFlags *)DE_NULL,
715 1u, // uint32_t commandBufferCount;
716 &m_queryPoolResetCommandBuffer.get(), // const VkCommandBuffer* pCommandBuffers;
717 0u, // uint32_t signalSemaphoreCount;
718 DE_NULL // const VkSemaphore* pSignalSemaphores;
719 };
720
721 vk.queueSubmit(queue, 1, &submitInfoReset, DE_NULL);
722
723 // Trivially wait for reset to complete. This is to ensure the query pool is in reset state before
724 // host accesses, so as to not insert any synchronization before capturing the results needed for WAIT_NONE
725 // variant of test.
726 VK_CHECK(vk.queueWaitIdle(queue));
727 }
728
729 {
730 const vk::VkSubmitInfo submitInfoRender = {
731 vk::VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType;
732 DE_NULL, // const void* pNext;
733 0, // uint32_t waitSemaphoreCount;
734 DE_NULL, // const VkSemaphore* pWaitSemaphores;
735 (const vk::VkPipelineStageFlags *)DE_NULL,
736 1, // uint32_t commandBufferCount;
737 &m_renderCommandBuffer.get(), // const VkCommandBuffer* pCommandBuffers;
738 0, // uint32_t signalSemaphoreCount;
739 DE_NULL // const VkSemaphore* pSignalSemaphores;
740 };
741
742 if (!hasSeparateResetCmdBuf() && m_testVector.queryResultsMode == RESULTS_MODE_GET_RESET)
743 vk.resetQueryPool(m_context.getDevice(), m_queryPool, 0, NUM_QUERIES_IN_POOL);
744 vk.queueSubmit(queue, 1, &submitInfoRender, DE_NULL);
745 }
746
747 if (m_testVector.queryWait == WAIT_QUEUE)
748 {
749 VK_CHECK(vk.queueWaitIdle(queue));
750 }
751
752 if (hasSeparateCopyCmdBuf())
753 {
754 // In case of WAIT_QUEUE test variant, the previously submitted m_renderCommandBuffer did not
755 // contain vkCmdCopyQueryResults, so additional cmd buffer is needed.
756
757 // In the case of WAIT_NONE or WAIT_QUERY, vkCmdCopyQueryResults is stored in m_renderCommandBuffer.
758
759 const vk::VkSubmitInfo submitInfo = {
760 vk::VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType;
761 DE_NULL, // const void* pNext;
762 0, // uint32_t waitSemaphoreCount;
763 DE_NULL, // const VkSemaphore* pWaitSemaphores;
764 (const vk::VkPipelineStageFlags *)DE_NULL,
765 1, // uint32_t commandBufferCount;
766 &m_copyResultsCommandBuffer.get(), // const VkCommandBuffer* pCommandBuffers;
767 0, // uint32_t signalSemaphoreCount;
768 DE_NULL // const VkSemaphore* pSignalSemaphores;
769 };
770 vk.queueSubmit(queue, 1, &submitInfo, DE_NULL);
771 }
772
773 if (m_testVector.queryResultsMode == RESULTS_MODE_COPY || m_testVector.queryResultsMode == RESULTS_MODE_COPY_RESET)
774 {
775 // In case of vkCmdCopyQueryResults is used, test must always wait for it
776 // to complete before we can read the result buffer.
777
778 VK_CHECK(vk.queueWaitIdle(queue));
779 }
780
781 uint64_t queryResults[NUM_QUERIES_IN_POOL];
782 uint64_t queryAvailability[NUM_QUERIES_IN_POOL];
783
784 // Allow not ready results only if nobody waited before getting the query results
785 const bool allowNotReady = (m_testVector.queryWait == WAIT_NONE);
786
787 captureResults(queryResults, queryAvailability, allowNotReady);
788
789 log << tcu::TestLog::Section("OcclusionQueryResults", "Occlusion query results");
790
791 logResults(queryResults, queryAvailability);
792 bool passed = validateResults(queryResults, queryAvailability, allowNotReady, m_testVector.primitiveTopology);
793
794 log << tcu::TestLog::EndSection;
795
796 if (m_testVector.queryResultsMode != RESULTS_MODE_COPY && m_testVector.queryResultsMode != RESULTS_MODE_COPY_RESET)
797 {
798 VK_CHECK(vk.queueWaitIdle(queue));
799 }
800
801 if (passed)
802 {
803 return tcu::TestStatus(QP_TEST_RESULT_PASS, "Query result verification passed");
804 }
805 return tcu::TestStatus(QP_TEST_RESULT_FAIL, "Query result verification failed");
806 }
807
hasSeparateResetCmdBuf(void) const808 bool OcclusionQueryTestInstance::hasSeparateResetCmdBuf(void) const
809 {
810 // Determine if resetting query pool should be performed in separate command buffer
811 // to avoid race condition between host query access and device query reset.
812
813 if (m_testVector.queryResultsMode == RESULTS_MODE_COPY || m_testVector.queryResultsMode == RESULTS_MODE_COPY_RESET)
814 {
815 // We copy query results on device, so there is no race condition between
816 // host and device
817 return false;
818 }
819 if (m_testVector.queryWait == WAIT_QUEUE)
820 {
821 // We wait for queue to be complete before accessing query results
822 return false;
823 }
824
825 // Separate command buffer with reset must be submitted & completed before
826 // host accesses the query results
827 return true;
828 }
829
hasSeparateCopyCmdBuf(void) const830 bool OcclusionQueryTestInstance::hasSeparateCopyCmdBuf(void) const
831 {
832 // Copy query results must go into separate command buffer, if we want to wait on queue before that
833 return ((m_testVector.queryResultsMode == RESULTS_MODE_COPY ||
834 m_testVector.queryResultsMode == RESULTS_MODE_COPY_RESET) &&
835 m_testVector.queryWait == WAIT_QUEUE);
836 }
837
recordQueryPoolReset(vk::VkCommandPool cmdPool)838 vk::Move<vk::VkCommandBuffer> OcclusionQueryTestInstance::recordQueryPoolReset(vk::VkCommandPool cmdPool)
839 {
840 const vk::VkDevice device = m_context.getDevice();
841 const vk::DeviceInterface &vk = m_context.getDeviceInterface();
842
843 DE_ASSERT(hasSeparateResetCmdBuf());
844
845 vk::Move<vk::VkCommandBuffer> cmdBuffer(
846 vk::allocateCommandBuffer(vk, device, cmdPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
847
848 beginCommandBuffer(vk, *cmdBuffer);
849 vk.cmdResetQueryPool(*cmdBuffer, m_queryPool, 0, NUM_QUERIES_IN_POOL);
850 endCommandBuffer(vk, *cmdBuffer);
851
852 return cmdBuffer;
853 }
854
commandClearAttachment(const vk::DeviceInterface & vk,const vk::VkCommandBuffer commandBuffer)855 void OcclusionQueryTestInstance::commandClearAttachment(const vk::DeviceInterface &vk,
856 const vk::VkCommandBuffer commandBuffer)
857 {
858 if (m_testVector.clearOp == CLEAR_NOOP)
859 return;
860
861 const vk::VkOffset2D offset = vk::makeOffset2D(0, 0);
862 const vk::VkExtent2D extent = vk::makeExtent2D(StateObjects::WIDTH, StateObjects::HEIGHT);
863
864 const vk::VkClearAttachment attachment = {
865 m_testVector.clearOp == CLEAR_COLOR ?
866 (vk::VkImageAspectFlags)vk::VK_IMAGE_ASPECT_COLOR_BIT :
867 (vk::VkImageAspectFlags)vk::VK_IMAGE_ASPECT_DEPTH_BIT, // VkImageAspectFlags aspectMask;
868 m_testVector.clearOp == CLEAR_COLOR ? 0u : 1u, // uint32_t colorAttachment;
869 m_testVector.clearOp == CLEAR_COLOR ? vk::makeClearValueColor(tcu::Vec4(0.0f, 0.0f, 0.0f, 0.0f)) :
870 vk::makeClearValueDepthStencil(0.0f, 0u) // VkClearValue clearValue;
871 };
872
873 const vk::VkClearRect rect = {
874 {offset, extent}, // VkRect2D rect;
875 0u, // uint32_t baseArrayLayer;
876 1u, // uint32_t layerCount;
877 };
878
879 vk.cmdClearAttachments(commandBuffer, 1u, &attachment, 1u, &rect);
880 }
881
recordRender(vk::VkCommandPool cmdPool)882 vk::Move<vk::VkCommandBuffer> OcclusionQueryTestInstance::recordRender(vk::VkCommandPool cmdPool)
883 {
884 const vk::VkDevice device = m_context.getDevice();
885 const vk::DeviceInterface &vk = m_context.getDeviceInterface();
886
887 vk::Move<vk::VkCommandBuffer> cmdBuffer(
888 vk::allocateCommandBuffer(vk, device, cmdPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
889
890 beginCommandBuffer(vk, *cmdBuffer);
891
892 if (!m_testVector.noColorAttachments)
893 initialTransitionColor2DImage(vk, *cmdBuffer, m_stateObjects->m_colorAttachmentImage->object(),
894 vk::VK_IMAGE_LAYOUT_GENERAL, vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
895 vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT);
896
897 initialTransitionDepth2DImage(
898 vk, *cmdBuffer, m_stateObjects->m_DepthImage->object(), vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
899 vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
900 vk::VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | vk::VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT);
901
902 std::vector<vk::VkClearValue> renderPassClearValues(2);
903 deMemset(&renderPassClearValues[0], 0, static_cast<int>(renderPassClearValues.size()) * sizeof(vk::VkClearValue));
904
905 if (!hasSeparateResetCmdBuf() && m_testVector.queryResultsMode != RESULTS_MODE_GET_RESET)
906 {
907 vk.cmdResetQueryPool(*cmdBuffer, m_queryPool, 0, NUM_QUERIES_IN_POOL);
908 }
909
910 beginRenderPass(vk, *cmdBuffer, *m_stateObjects->m_renderPass, *m_stateObjects->m_framebuffer,
911 vk::makeRect2D(0, 0, StateObjects::WIDTH, StateObjects::HEIGHT),
912 (uint32_t)renderPassClearValues.size(), &renderPassClearValues[0]);
913
914 vk.cmdBindPipeline(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_stateObjects->m_pipeline);
915
916 vk::VkBuffer vertexBuffer = m_stateObjects->m_vertexBuffer->object();
917 const vk::VkDeviceSize vertexBufferOffset = 0;
918 vk.cmdBindVertexBuffers(*cmdBuffer, 0, 1, &vertexBuffer, &vertexBufferOffset);
919
920 // Draw un-occluded geometry
921 vk.cmdBeginQuery(*cmdBuffer, m_queryPool, QUERY_INDEX_CAPTURE_ALL, m_testVector.queryControlFlags);
922 vk.cmdDraw(*cmdBuffer, NUM_VERTICES_IN_DRAWCALL, 1, START_VERTEX, 0);
923 commandClearAttachment(vk, *cmdBuffer);
924 vk.cmdEndQuery(*cmdBuffer, m_queryPool, QUERY_INDEX_CAPTURE_ALL);
925
926 endRenderPass(vk, *cmdBuffer);
927
928 beginRenderPass(vk, *cmdBuffer, *m_stateObjects->m_renderPass, *m_stateObjects->m_framebuffer,
929 vk::makeRect2D(0, 0, StateObjects::WIDTH, StateObjects::HEIGHT),
930 (uint32_t)renderPassClearValues.size(), &renderPassClearValues[0]);
931
932 vk.cmdBindPipeline(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_stateObjects->m_pipeline);
933
934 // Draw un-occluded geometry
935 vk.cmdDraw(*cmdBuffer, NUM_VERTICES_IN_DRAWCALL, 1, START_VERTEX, 0);
936
937 // Partially occlude geometry
938 vk.cmdDraw(*cmdBuffer, NUM_VERTICES_IN_PARTIALLY_OCCLUDED_DRAWCALL, 1, START_VERTEX_PARTIALLY_OCCLUDED, 0);
939
940 // Draw partially-occluded geometry
941 vk.cmdBeginQuery(*cmdBuffer, m_queryPool, QUERY_INDEX_CAPTURE_PARTIALLY_OCCLUDED, m_testVector.queryControlFlags);
942 vk.cmdDraw(*cmdBuffer, NUM_VERTICES_IN_DRAWCALL, 1, START_VERTEX, 0);
943 commandClearAttachment(vk, *cmdBuffer);
944 vk.cmdEndQuery(*cmdBuffer, m_queryPool, QUERY_INDEX_CAPTURE_PARTIALLY_OCCLUDED);
945
946 endRenderPass(vk, *cmdBuffer);
947
948 beginRenderPass(vk, *cmdBuffer, *m_stateObjects->m_renderPass, *m_stateObjects->m_framebuffer,
949 vk::makeRect2D(0, 0, StateObjects::WIDTH, StateObjects::HEIGHT),
950 (uint32_t)renderPassClearValues.size(), &renderPassClearValues[0]);
951
952 vk.cmdBindPipeline(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_stateObjects->m_pipeline);
953
954 // Draw un-occluded geometry
955 vk.cmdDraw(*cmdBuffer, NUM_VERTICES_IN_DRAWCALL, 1, START_VERTEX, 0);
956
957 // Partially occlude geometry
958 vk.cmdDraw(*cmdBuffer, NUM_VERTICES_IN_PARTIALLY_OCCLUDED_DRAWCALL, 1, START_VERTEX_PARTIALLY_OCCLUDED, 0);
959
960 // Occlude geometry
961 vk.cmdDraw(*cmdBuffer, NUM_VERTICES_IN_OCCLUDER_DRAWCALL, 1, START_VERTEX_OCCLUDER, 0);
962
963 // Draw occluded geometry
964 vk.cmdBeginQuery(*cmdBuffer, m_queryPool, QUERY_INDEX_CAPTURE_OCCLUDED, m_testVector.queryControlFlags);
965 vk.cmdDraw(*cmdBuffer, NUM_VERTICES_IN_DRAWCALL, 1, START_VERTEX, 0);
966 commandClearAttachment(vk, *cmdBuffer);
967 vk.cmdEndQuery(*cmdBuffer, m_queryPool, QUERY_INDEX_CAPTURE_OCCLUDED);
968
969 endRenderPass(vk, *cmdBuffer);
970
971 if (m_testVector.queryResultsMode == RESULTS_MODE_COPY_RESET)
972 {
973 vk.cmdResetQueryPool(*cmdBuffer, m_queryPool, 0, NUM_QUERIES_IN_POOL);
974 }
975
976 if ((m_testVector.queryResultsMode == RESULTS_MODE_COPY ||
977 m_testVector.queryResultsMode == RESULTS_MODE_COPY_RESET) &&
978 !hasSeparateCopyCmdBuf())
979 {
980 vk::VkDeviceSize dstOffset = m_testVector.queryResultsDstOffset ? m_testVector.queryResultsStride : 0u;
981
982 if (m_testVector.queryResultsStride != 0u)
983 {
984 vk.cmdCopyQueryPoolResults(*cmdBuffer, m_queryPool, 0, NUM_QUERIES_IN_POOL,
985 m_queryPoolResultsBuffer->object(), dstOffset, m_testVector.queryResultsStride,
986 m_queryResultFlags);
987 }
988 else
989 {
990 const vk::VkDeviceSize elementSize =
991 m_testVector.queryResultSize == RESULT_SIZE_32_BIT ? sizeof(uint32_t) : sizeof(uint64_t);
992 const vk::VkDeviceSize strideSize = elementSize + elementSize * m_testVector.queryResultsAvailability;
993
994 for (int queryNdx = 0; queryNdx < NUM_QUERIES_IN_POOL; queryNdx++)
995 {
996 vk.cmdCopyQueryPoolResults(*cmdBuffer, m_queryPool, queryNdx, 1, m_queryPoolResultsBuffer->object(),
997 strideSize * queryNdx, 0, m_queryResultFlags);
998 }
999 }
1000
1001 bufferBarrier(vk, *cmdBuffer, m_queryPoolResultsBuffer->object(), vk::VK_ACCESS_TRANSFER_WRITE_BIT,
1002 vk::VK_ACCESS_HOST_READ_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT);
1003 }
1004
1005 if (!m_testVector.noColorAttachments)
1006 transition2DImage(vk, *cmdBuffer, m_stateObjects->m_colorAttachmentImage->object(),
1007 vk::VK_IMAGE_ASPECT_COLOR_BIT, vk::VK_IMAGE_LAYOUT_GENERAL,
1008 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
1009 vk::VK_ACCESS_TRANSFER_READ_BIT, vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
1010 vk::VK_PIPELINE_STAGE_TRANSFER_BIT);
1011
1012 endCommandBuffer(vk, *cmdBuffer);
1013
1014 return cmdBuffer;
1015 }
1016
recordCopyResults(vk::VkCommandPool cmdPool)1017 vk::Move<vk::VkCommandBuffer> OcclusionQueryTestInstance::recordCopyResults(vk::VkCommandPool cmdPool)
1018 {
1019 const vk::VkDevice device = m_context.getDevice();
1020 const vk::DeviceInterface &vk = m_context.getDeviceInterface();
1021
1022 vk::Move<vk::VkCommandBuffer> cmdBuffer(
1023 vk::allocateCommandBuffer(vk, device, cmdPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
1024
1025 beginCommandBuffer(vk, *cmdBuffer);
1026
1027 vk::VkDeviceSize dstOffset = m_testVector.queryResultsDstOffset ? m_testVector.queryResultsStride : 0u;
1028
1029 if (m_testVector.queryResultsStride != 0u)
1030 {
1031 vk.cmdCopyQueryPoolResults(*cmdBuffer, m_queryPool, 0, NUM_QUERIES_IN_POOL, m_queryPoolResultsBuffer->object(),
1032 dstOffset, m_testVector.queryResultsStride, m_queryResultFlags);
1033 }
1034 else
1035 {
1036 const vk::VkDeviceSize elementSize =
1037 m_testVector.queryResultSize == RESULT_SIZE_32_BIT ? sizeof(uint32_t) : sizeof(uint64_t);
1038 const vk::VkDeviceSize strideSize = elementSize + elementSize * m_testVector.queryResultsAvailability;
1039
1040 for (int queryNdx = 0; queryNdx < NUM_QUERIES_IN_POOL; queryNdx++)
1041 {
1042 vk.cmdCopyQueryPoolResults(*cmdBuffer, m_queryPool, queryNdx, 1, m_queryPoolResultsBuffer->object(),
1043 strideSize * queryNdx, 0, m_queryResultFlags);
1044 }
1045 }
1046
1047 bufferBarrier(vk, *cmdBuffer, m_queryPoolResultsBuffer->object(), vk::VK_ACCESS_TRANSFER_WRITE_BIT,
1048 vk::VK_ACCESS_HOST_READ_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT);
1049
1050 endCommandBuffer(vk, *cmdBuffer);
1051
1052 return cmdBuffer;
1053 }
1054
captureResults(uint64_t * retResults,uint64_t * retAvailAbility,bool allowNotReady)1055 void OcclusionQueryTestInstance::captureResults(uint64_t *retResults, uint64_t *retAvailAbility, bool allowNotReady)
1056 {
1057 const vk::VkDevice device = m_context.getDevice();
1058 const vk::DeviceInterface &vk = m_context.getDeviceInterface();
1059 const vk::VkDeviceSize elementSize =
1060 m_testVector.queryResultSize == RESULT_SIZE_32_BIT ? sizeof(uint32_t) : sizeof(uint64_t);
1061 const vk::VkDeviceSize resultsSize = m_testVector.queryResultsStride == 0 ?
1062 elementSize + elementSize * m_testVector.queryResultsAvailability :
1063 m_testVector.queryResultsStride;
1064 std::vector<uint8_t> resultsBuffer(static_cast<size_t>(resultsSize * NUM_QUERIES_IN_POOL));
1065
1066 if (m_testVector.queryResultsMode == RESULTS_MODE_GET || m_testVector.queryResultsMode == RESULTS_MODE_GET_RESET)
1067 {
1068 vk::VkResult queryResult =
1069 vk.getQueryPoolResults(device, m_queryPool, 0, NUM_QUERIES_IN_POOL, resultsBuffer.size(), &resultsBuffer[0],
1070 m_testVector.queryResultsStride, m_queryResultFlags);
1071 if (queryResult == vk::VK_NOT_READY && !allowNotReady)
1072 {
1073 TCU_FAIL("getQueryPoolResults returned VK_NOT_READY, but results should be already available.");
1074 }
1075 else
1076 {
1077 VK_CHECK(queryResult);
1078 }
1079 }
1080 else if (m_testVector.queryResultsMode == RESULTS_MODE_COPY ||
1081 m_testVector.queryResultsMode == RESULTS_MODE_COPY_RESET)
1082 {
1083 const vk::Allocation &allocation = m_queryPoolResultsBuffer->getBoundMemory();
1084 const uint8_t *allocationData = static_cast<uint8_t *>(allocation.getHostPtr());
1085 const int32_t indexData = m_testVector.queryResultsDstOffset ? (int32_t)m_testVector.queryResultsStride : 0u;
1086
1087 vk::invalidateAlloc(vk, device, allocation);
1088
1089 deMemcpy(&resultsBuffer[0], &allocationData[indexData], resultsBuffer.size());
1090 }
1091
1092 for (int queryNdx = 0; queryNdx < NUM_QUERIES_IN_POOL; queryNdx++)
1093 {
1094 const void *srcPtr = &resultsBuffer[queryNdx * static_cast<size_t>(resultsSize)];
1095
1096 if (m_testVector.queryResultSize == RESULT_SIZE_32_BIT)
1097 {
1098 const uint32_t *srcPtrTyped = static_cast<const uint32_t *>(srcPtr);
1099 retResults[queryNdx] = *srcPtrTyped;
1100 if (m_testVector.queryResultsAvailability)
1101 {
1102 retAvailAbility[queryNdx] = *(srcPtrTyped + 1);
1103 }
1104 }
1105 else if (m_testVector.queryResultSize == RESULT_SIZE_64_BIT)
1106 {
1107 const uint64_t *srcPtrTyped = static_cast<const uint64_t *>(srcPtr);
1108 retResults[queryNdx] = *srcPtrTyped;
1109
1110 if (m_testVector.queryResultsAvailability)
1111 {
1112 retAvailAbility[queryNdx] = *(srcPtrTyped + 1);
1113 }
1114 }
1115 else
1116 {
1117 TCU_FAIL("Wrong m_testVector.queryResultSize");
1118 }
1119 }
1120
1121 if (m_testVector.queryResultsMode == RESULTS_MODE_GET_RESET)
1122 {
1123 vk.resetQueryPool(device, m_queryPool, 0, NUM_QUERIES_IN_POOL);
1124
1125 vk::VkResult queryResult =
1126 vk.getQueryPoolResults(device, m_queryPool, 0, NUM_QUERIES_IN_POOL, resultsBuffer.size(), &resultsBuffer[0],
1127 m_testVector.queryResultsStride, m_queryResultFlags);
1128
1129 if (queryResult != vk::VK_NOT_READY)
1130 {
1131 TCU_FAIL("getQueryPoolResults did not return VK_NOT_READY");
1132 }
1133
1134 /* From Vulkan spec:
1135 *
1136 * If VK_QUERY_RESULT_WAIT_BIT and VK_QUERY_RESULT_PARTIAL_BIT are both not set then no result values are written to pData
1137 * for queries that are in the unavailable state at the time of the call, and vkGetQueryPoolResults returns VK_NOT_READY.
1138 * However, availability state is still written to pData for those queries if VK_QUERY_RESULT_WITH_AVAILABILITY_BIT is set.
1139 */
1140 for (int queryNdx = 0; queryNdx < NUM_QUERIES_IN_POOL; queryNdx++)
1141 {
1142 const void *srcPtr = &resultsBuffer[queryNdx * static_cast<size_t>(resultsSize)];
1143 if (m_testVector.queryResultSize == RESULT_SIZE_32_BIT)
1144 {
1145 const uint32_t *srcPtrTyped = static_cast<const uint32_t *>(srcPtr);
1146 if (*srcPtrTyped != retResults[queryNdx])
1147 {
1148 TCU_FAIL("getQueryPoolResults returned modified values");
1149 }
1150
1151 if (m_testVector.queryResultsAvailability && *(srcPtrTyped + 1) != 0)
1152 {
1153 TCU_FAIL("resetQueryPool did not disable availability bit");
1154 }
1155 }
1156 else if (m_testVector.queryResultSize == RESULT_SIZE_64_BIT)
1157 {
1158 const uint64_t *srcPtrTyped = static_cast<const uint64_t *>(srcPtr);
1159 if (*srcPtrTyped != retResults[queryNdx])
1160 {
1161 TCU_FAIL("getQueryPoolResults returned modified values");
1162 }
1163
1164 if (m_testVector.queryResultsAvailability && *(srcPtrTyped + 1) != 0)
1165 {
1166 TCU_FAIL("resetQueryPool did not disable availability bit");
1167 }
1168 }
1169 else
1170 {
1171 TCU_FAIL("Wrong m_testVector.queryResultSize");
1172 }
1173 }
1174 }
1175 }
1176
logResults(const uint64_t * results,const uint64_t * availability)1177 void OcclusionQueryTestInstance::logResults(const uint64_t *results, const uint64_t *availability)
1178 {
1179 tcu::TestLog &log = m_context.getTestContext().getLog();
1180
1181 for (int ndx = 0; ndx < NUM_QUERIES_IN_POOL; ++ndx)
1182 {
1183 if (!m_testVector.queryResultsAvailability)
1184 {
1185 log << tcu::TestLog::Message << "query[slot == " << ndx << "] result == " << results[ndx]
1186 << tcu::TestLog::EndMessage;
1187 }
1188 else
1189 {
1190 log << tcu::TestLog::Message << "query[slot == " << ndx << "] result == " << results[ndx]
1191 << ", availability == " << availability[ndx] << tcu::TestLog::EndMessage;
1192 }
1193 }
1194 }
1195
validateResults(const uint64_t * results,const uint64_t * availability,bool allowUnavailable,vk::VkPrimitiveTopology primitiveTopology)1196 bool OcclusionQueryTestInstance::validateResults(const uint64_t *results, const uint64_t *availability,
1197 bool allowUnavailable, vk::VkPrimitiveTopology primitiveTopology)
1198 {
1199 bool passed = true;
1200 tcu::TestLog &log = m_context.getTestContext().getLog();
1201
1202 for (int queryNdx = 0; queryNdx < NUM_QUERIES_IN_POOL; ++queryNdx)
1203 {
1204 uint64_t expectedValueMin = 0;
1205 uint64_t expectedValueMax = 0;
1206
1207 if (m_testVector.queryResultsMode == RESULTS_MODE_COPY_RESET)
1208 {
1209 DE_ASSERT(m_testVector.queryResultsAvailability);
1210 if (availability[queryNdx] != 0)
1211 {
1212 // In copy-reset mode results should always be unavailable due to the reset command issued before copying results.
1213 log << tcu::TestLog::Message << "query results availability was nonzero for index " << queryNdx
1214 << " when resetting the query before copying results" << tcu::TestLog::EndMessage;
1215 passed = false;
1216 }
1217
1218 // Not interested in the actual results.
1219 continue;
1220 }
1221 else if (m_testVector.queryResultsAvailability && availability[queryNdx] == 0)
1222 {
1223 // query result was not available
1224 if (!allowUnavailable)
1225 {
1226 log << tcu::TestLog::Message << "query results availability was 0 for index " << queryNdx
1227 << ", expected any value greater than 0." << tcu::TestLog::EndMessage;
1228 passed = false;
1229 continue;
1230 }
1231 }
1232 else
1233 {
1234 // query is available, so expect proper result values
1235 if (primitiveTopology == vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST)
1236 {
1237 switch (queryNdx)
1238 {
1239 case QUERY_INDEX_CAPTURE_OCCLUDED:
1240 expectedValueMin = 0;
1241 expectedValueMax = 0;
1242 break;
1243 case QUERY_INDEX_CAPTURE_PARTIALLY_OCCLUDED:
1244 expectedValueMin = 1;
1245 expectedValueMax = 1;
1246 break;
1247 case QUERY_INDEX_CAPTURE_ALL:
1248 expectedValueMin = NUM_VERTICES_IN_DRAWCALL;
1249 expectedValueMax = NUM_VERTICES_IN_DRAWCALL;
1250 break;
1251 }
1252 }
1253 else if (primitiveTopology == vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST)
1254 {
1255 switch (queryNdx)
1256 {
1257 case QUERY_INDEX_CAPTURE_OCCLUDED:
1258 expectedValueMin = 0;
1259 expectedValueMax = 0;
1260 break;
1261 case QUERY_INDEX_CAPTURE_PARTIALLY_OCCLUDED:
1262 case QUERY_INDEX_CAPTURE_ALL:
1263 {
1264 const int primWidth = StateObjects::WIDTH / 2;
1265 const int primHeight = StateObjects::HEIGHT / 2;
1266 const int primArea = primWidth * primHeight / 2;
1267
1268 if (m_testVector.discardHalf)
1269 {
1270 expectedValueMin = (int)(0.95f * primArea * 0.5f);
1271 expectedValueMax = (int)(1.05f * primArea * 0.5f);
1272 }
1273 else
1274 {
1275 expectedValueMin = (int)(0.97f * primArea);
1276 expectedValueMax = (int)(1.03f * primArea);
1277 }
1278 }
1279 }
1280 }
1281 else
1282 {
1283 TCU_FAIL("Unsupported primitive topology");
1284 }
1285 }
1286
1287 if ((m_testVector.queryControlFlags & vk::VK_QUERY_CONTROL_PRECISE_BIT) ||
1288 (expectedValueMin == 0 && expectedValueMax == 0))
1289 {
1290 // require precise value
1291 if (results[queryNdx] < expectedValueMin || results[queryNdx] > expectedValueMax)
1292 {
1293 log << tcu::TestLog::Message << "wrong value of query for index " << queryNdx
1294 << ", expected the value minimum of " << expectedValueMin << ", maximum of " << expectedValueMax
1295 << " got " << results[queryNdx] << "." << tcu::TestLog::EndMessage;
1296 passed = false;
1297 }
1298 }
1299 else
1300 {
1301 // require imprecise value greater than 0
1302 if (results[queryNdx] == 0)
1303 {
1304 log << tcu::TestLog::Message << "wrong value of query for index " << queryNdx
1305 << ", expected any non-zero value, got " << results[queryNdx] << "." << tcu::TestLog::EndMessage;
1306 passed = false;
1307 }
1308 }
1309 }
1310 return passed;
1311 }
1312
1313 template <class Instance>
1314 class QueryPoolOcclusionTest : public vkt::TestCase
1315 {
1316 public:
QueryPoolOcclusionTest(tcu::TestContext & context,const char * name,const OcclusionQueryTestVector & testVector)1317 QueryPoolOcclusionTest(tcu::TestContext &context, const char *name, const OcclusionQueryTestVector &testVector)
1318 : TestCase(context, name)
1319 , m_testVector(testVector)
1320 {
1321 }
1322
1323 private:
createInstance(vkt::Context & context) const1324 vkt::TestInstance *createInstance(vkt::Context &context) const
1325 {
1326 return new Instance(context, m_testVector);
1327 }
1328
initPrograms(vk::SourceCollections & programCollection) const1329 void initPrograms(vk::SourceCollections &programCollection) const
1330 {
1331 const char *const discard = " if ((int(gl_FragCoord.x) % 2) == (int(gl_FragCoord.y) % 2))\n"
1332 " discard;\n";
1333
1334 const std::string fragSrc = std::string("#version 400\n"
1335 "layout(location = 0) out vec4 out_FragColor;\n"
1336 "void main()\n"
1337 "{\n"
1338 " out_FragColor = vec4(0.07, 0.48, 0.75, 1.0);\n") +
1339 std::string(m_testVector.discardHalf ? discard : "") + "}\n";
1340
1341 programCollection.glslSources.add("frag") << glu::FragmentSource(fragSrc.c_str());
1342
1343 programCollection.glslSources.add("vert")
1344 << glu::VertexSource("#version 430\n"
1345 "layout(location = 0) in vec4 in_Position;\n"
1346 "out gl_PerVertex { vec4 gl_Position; float gl_PointSize; };\n"
1347 "void main() {\n"
1348 " gl_Position = in_Position;\n"
1349 " gl_PointSize = 1.0;\n"
1350 "}\n");
1351 }
1352
1353 OcclusionQueryTestVector m_testVector;
1354 };
1355
1356 } // namespace
1357
QueryPoolOcclusionTests(tcu::TestContext & testCtx)1358 QueryPoolOcclusionTests::QueryPoolOcclusionTests(tcu::TestContext &testCtx) : TestCaseGroup(testCtx, "occlusion_query")
1359 {
1360 /* Left blank on purpose */
1361 }
1362
~QueryPoolOcclusionTests(void)1363 QueryPoolOcclusionTests::~QueryPoolOcclusionTests(void)
1364 {
1365 /* Left blank on purpose */
1366 }
1367
init(void)1368 void QueryPoolOcclusionTests::init(void)
1369 {
1370 OcclusionQueryTestVector baseTestVector;
1371 baseTestVector.queryControlFlags = 0;
1372 baseTestVector.queryResultSize = RESULT_SIZE_64_BIT;
1373 baseTestVector.queryWait = WAIT_QUEUE;
1374 baseTestVector.queryResultsMode = RESULTS_MODE_GET;
1375 baseTestVector.queryResultsStride = sizeof(uint64_t);
1376 baseTestVector.queryResultsAvailability = false;
1377 baseTestVector.primitiveTopology = vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST;
1378 baseTestVector.discardHalf = false;
1379 baseTestVector.clearOp = CLEAR_NOOP;
1380 baseTestVector.noColorAttachments = false;
1381
1382 //Basic tests
1383 {
1384 OcclusionQueryTestVector testVector = baseTestVector;
1385 testVector.queryControlFlags = 0;
1386 addChild(
1387 new QueryPoolOcclusionTest<BasicOcclusionQueryTestInstance>(m_testCtx, "basic_conservative", testVector));
1388 testVector.queryControlFlags = vk::VK_QUERY_CONTROL_PRECISE_BIT;
1389 addChild(new QueryPoolOcclusionTest<BasicOcclusionQueryTestInstance>(m_testCtx, "basic_precise", testVector));
1390 }
1391
1392 // Functional test
1393 {
1394 const vk::VkQueryControlFlags controlFlags[] = {0, vk::VK_QUERY_CONTROL_PRECISE_BIT};
1395 const char *const controlFlagsStr[] = {"conservative", "precise"};
1396
1397 for (int controlFlagIdx = 0; controlFlagIdx < DE_LENGTH_OF_ARRAY(controlFlags); ++controlFlagIdx)
1398 {
1399
1400 const vk::VkPrimitiveTopology primitiveTopology[] = {vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST,
1401 vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST};
1402 const char *const primitiveTopologyStr[] = {"points", "triangles"};
1403 for (int primitiveTopologyIdx = 0; primitiveTopologyIdx < DE_LENGTH_OF_ARRAY(primitiveTopology);
1404 ++primitiveTopologyIdx)
1405 {
1406
1407 const OcclusionQueryResultSize resultSize[] = {RESULT_SIZE_32_BIT, RESULT_SIZE_64_BIT};
1408 const char *const resultSizeStr[] = {"32", "64"};
1409
1410 for (int resultSizeIdx = 0; resultSizeIdx < DE_LENGTH_OF_ARRAY(resultSize); ++resultSizeIdx)
1411 {
1412
1413 const OcclusionQueryWait wait[] = {WAIT_QUEUE, WAIT_QUERY};
1414 const char *const waitStr[] = {"queue", "query"};
1415
1416 for (int waitIdx = 0; waitIdx < DE_LENGTH_OF_ARRAY(wait); ++waitIdx)
1417 {
1418 const OcclusionQueryResultsMode resultsMode[] = {RESULTS_MODE_GET, RESULTS_MODE_GET_RESET,
1419 RESULTS_MODE_COPY, RESULTS_MODE_COPY_RESET};
1420 const char *const resultsModeStr[] = {"get", "get_reset", "copy", "copy_reset"};
1421
1422 for (int resultsModeIdx = 0; resultsModeIdx < DE_LENGTH_OF_ARRAY(resultsMode); ++resultsModeIdx)
1423 {
1424 if (wait[waitIdx] == WAIT_QUERY && resultsMode[resultsModeIdx] == RESULTS_MODE_GET_RESET)
1425 {
1426 /* In RESULTS_MODE_GET_RESET we are going to reset the queries and get the query pool results again
1427 * without issueing them, in order to check the availability field. In Vulkan spec it mentions that
1428 * vkGetQueryPoolResults may not return in finite time. Because of that, we skip those tests.
1429 */
1430 continue;
1431 }
1432
1433 const bool testAvailability[] = {false, true};
1434 const char *const testAvailabilityStr[] = {"without", "with"};
1435
1436 for (int testAvailabilityIdx = 0;
1437 testAvailabilityIdx < DE_LENGTH_OF_ARRAY(testAvailability); ++testAvailabilityIdx)
1438 {
1439 if (resultsMode[resultsModeIdx] == RESULTS_MODE_COPY_RESET &&
1440 (!testAvailability[testAvailabilityIdx]))
1441 {
1442 /* In RESULTS_MODE_COPY_RESET mode we will reset queries and make sure the availability flag is
1443 * set to zero. It does not make sense to run in this mode without obtaining the availability
1444 * flag.
1445 */
1446 continue;
1447 }
1448
1449 const bool discardHalf[] = {false, true};
1450 const char *const discardHalfStr[] = {"", "_discard"};
1451
1452 for (int discardHalfIdx = 0; discardHalfIdx < DE_LENGTH_OF_ARRAY(discardHalf);
1453 ++discardHalfIdx)
1454 {
1455 OcclusionQueryTestVector testVector = baseTestVector;
1456 testVector.queryControlFlags = controlFlags[controlFlagIdx];
1457 testVector.queryResultSize = resultSize[resultSizeIdx];
1458 testVector.queryWait = wait[waitIdx];
1459 testVector.queryResultsMode = resultsMode[resultsModeIdx];
1460 testVector.queryResultsStride = testVector.queryResultSize == RESULT_SIZE_32_BIT ?
1461 sizeof(uint32_t) :
1462 sizeof(uint64_t);
1463 testVector.queryResultsAvailability = testAvailability[testAvailabilityIdx];
1464 testVector.primitiveTopology = primitiveTopology[primitiveTopologyIdx];
1465 testVector.discardHalf = discardHalf[discardHalfIdx];
1466
1467 if (testVector.discardHalf &&
1468 testVector.primitiveTopology == vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST)
1469 continue; // Discarding half of the pixels in fragment shader doesn't make sense with one-pixel-sized points.
1470
1471 if (testVector.queryResultsAvailability)
1472 {
1473 testVector.queryResultsStride *= 2;
1474 }
1475
1476 std::ostringstream testName;
1477
1478 testName << resultsModeStr[resultsModeIdx] << "_results"
1479 << "_" << controlFlagsStr[controlFlagIdx] << "_size_"
1480 << resultSizeStr[resultSizeIdx] << "_wait_" << waitStr[waitIdx] << "_"
1481 << testAvailabilityStr[testAvailabilityIdx] << "_availability"
1482 << "_draw_" << primitiveTopologyStr[primitiveTopologyIdx]
1483 << discardHalfStr[discardHalfIdx];
1484
1485 addChild(new QueryPoolOcclusionTest<OcclusionQueryTestInstance>(
1486 m_testCtx, testName.str().c_str(), testVector));
1487 }
1488 }
1489 }
1490
1491 /* Tests for clear operation within a occulusion query activated.
1492 * The query shouldn't count internal driver operations relevant to the clear operations.
1493 */
1494 const OcculusionQueryClearOp clearOp[] = {CLEAR_COLOR, CLEAR_DEPTH};
1495 const char *const clearOpStr[] = {"clear_color", "clear_depth"};
1496
1497 for (int clearOpIdx = 0; clearOpIdx < DE_LENGTH_OF_ARRAY(clearOp); ++clearOpIdx)
1498 {
1499 OcclusionQueryTestVector testVector = baseTestVector;
1500 testVector.queryControlFlags = controlFlags[controlFlagIdx];
1501 testVector.queryResultSize = resultSize[resultSizeIdx];
1502 testVector.queryWait = wait[waitIdx];
1503 testVector.queryResultsMode = RESULTS_MODE_GET;
1504 testVector.queryResultsStride =
1505 testVector.queryResultSize == RESULT_SIZE_32_BIT ? sizeof(uint32_t) : sizeof(uint64_t);
1506 testVector.primitiveTopology = primitiveTopology[primitiveTopologyIdx];
1507 testVector.clearOp = clearOp[clearOpIdx];
1508
1509 std::ostringstream testName;
1510
1511 testName << "get_results"
1512 << "_" << controlFlagsStr[controlFlagIdx] << "_size_"
1513 << resultSizeStr[resultSizeIdx] << "_wait_" << waitStr[waitIdx]
1514 << "_without_availability"
1515 << "_draw_" << primitiveTopologyStr[primitiveTopologyIdx] << "_"
1516 << clearOpStr[clearOpIdx];
1517
1518 addChild(new QueryPoolOcclusionTest<OcclusionQueryTestInstance>(
1519 m_testCtx, testName.str().c_str(), testVector));
1520 }
1521
1522 // Tests with no color attachments.
1523 {
1524 OcclusionQueryTestVector testVector = baseTestVector;
1525 testVector.queryControlFlags = controlFlags[controlFlagIdx];
1526 testVector.queryResultSize = resultSize[resultSizeIdx];
1527 testVector.queryWait = wait[waitIdx];
1528 testVector.queryResultsMode = RESULTS_MODE_GET;
1529 testVector.queryResultsStride =
1530 testVector.queryResultSize == RESULT_SIZE_32_BIT ? sizeof(uint32_t) : sizeof(uint64_t);
1531 testVector.primitiveTopology = primitiveTopology[primitiveTopologyIdx];
1532 testVector.noColorAttachments = true;
1533
1534 std::ostringstream testName;
1535
1536 testName << "get_results"
1537 << "_" << controlFlagsStr[controlFlagIdx] << "_size_"
1538 << resultSizeStr[resultSizeIdx] << "_wait_" << waitStr[waitIdx]
1539 << "_without_availability"
1540 << "_draw_" << primitiveTopologyStr[primitiveTopologyIdx]
1541 << "_no_color_attachments";
1542
1543 addChild(new QueryPoolOcclusionTest<OcclusionQueryTestInstance>(
1544 m_testCtx, testName.str().c_str(), testVector));
1545 }
1546 }
1547 }
1548 }
1549 }
1550 }
1551 // Test different strides
1552 {
1553 const OcclusionQueryResultsMode resultsMode[] = {RESULTS_MODE_GET, RESULTS_MODE_GET_RESET, RESULTS_MODE_COPY,
1554 RESULTS_MODE_COPY_RESET};
1555 const char *const resultsModeStr[] = {"get", "get_reset", "copy", "copy_reset"};
1556
1557 for (int resultsModeIdx = 0; resultsModeIdx < DE_LENGTH_OF_ARRAY(resultsMode); ++resultsModeIdx)
1558 {
1559 const OcclusionQueryResultSize resultSizes[] = {RESULT_SIZE_32_BIT, RESULT_SIZE_64_BIT};
1560 const char *const resultSizeStr[] = {"32", "64"};
1561
1562 const bool copyQueryDstOffset[] = {true, false};
1563 const char *const copyQueryDstOffsetStr[] = {"_dstoffset", ""};
1564
1565 const bool testAvailability[] = {false, true};
1566 const char *const testAvailabilityStr[] = {"without", "with"};
1567
1568 for (int testAvailabilityIdx = 0; testAvailabilityIdx < DE_LENGTH_OF_ARRAY(testAvailability);
1569 ++testAvailabilityIdx)
1570 {
1571 if (resultsMode[resultsModeIdx] == RESULTS_MODE_COPY_RESET && (!testAvailability[testAvailabilityIdx]))
1572 {
1573 /* In RESULTS_MODE_COPY_RESET mode we will reset queries and make sure the availability flag is set to zero. It
1574 * does not make sense to run in this mode without obtaining the availability flag.
1575 */
1576 continue;
1577 }
1578
1579 for (int resultSizeIdx = 0; resultSizeIdx < DE_LENGTH_OF_ARRAY(resultSizes); ++resultSizeIdx)
1580 {
1581 const vk::VkDeviceSize resultSize =
1582 (resultSizes[resultSizeIdx] == RESULT_SIZE_32_BIT ? sizeof(uint32_t) : sizeof(uint64_t));
1583
1584 // \todo [2015-12-18 scygan] Ensure only stride values aligned to resultSize are allowed. Otherwise test should be extended.
1585 const vk::VkDeviceSize strides[] = {0u,
1586 1 * resultSize,
1587 2 * resultSize,
1588 3 * resultSize,
1589 4 * resultSize,
1590 5 * resultSize,
1591 13 * resultSize,
1592 1024 * resultSize};
1593
1594 for (int dstOffsetIdx = 0; dstOffsetIdx < DE_LENGTH_OF_ARRAY(copyQueryDstOffset); dstOffsetIdx++)
1595 {
1596 for (int strideIdx = 0; strideIdx < DE_LENGTH_OF_ARRAY(strides); strideIdx++)
1597 {
1598 OcclusionQueryTestVector testVector = baseTestVector;
1599 testVector.queryResultsMode = resultsMode[resultsModeIdx];
1600 testVector.queryResultSize = resultSizes[resultSizeIdx];
1601 testVector.queryResultsAvailability = testAvailability[testAvailabilityIdx];
1602 testVector.queryResultsStride = strides[strideIdx];
1603 testVector.queryResultsDstOffset = copyQueryDstOffset[dstOffsetIdx];
1604
1605 const vk::VkDeviceSize elementSize =
1606 (testVector.queryResultsAvailability ? resultSize * 2 : resultSize);
1607
1608 if (elementSize > testVector.queryResultsStride && strides[strideIdx] != 0)
1609 {
1610 continue;
1611 }
1612
1613 if (strides[strideIdx] == 0)
1614 {
1615 // Due to the nature of the test, the dstOffset is tested automatically when stride size is 0.
1616 if (testVector.queryResultsDstOffset)
1617 {
1618 continue;
1619 }
1620
1621 // We are testing only VkCmdCopyQueryPoolResults with stride 0.
1622 if (testVector.queryResultsMode != RESULTS_MODE_COPY)
1623 {
1624 continue;
1625 }
1626 }
1627
1628 std::ostringstream testName;
1629
1630 testName << resultsModeStr[resultsModeIdx] << "_results_size_"
1631 << resultSizeStr[resultSizeIdx] << "_stride_" << strides[strideIdx] << "_"
1632 << testAvailabilityStr[testAvailabilityIdx] << "_availability"
1633 << copyQueryDstOffsetStr[dstOffsetIdx];
1634
1635 addChild(new QueryPoolOcclusionTest<OcclusionQueryTestInstance>(
1636 m_testCtx, testName.str().c_str(), testVector));
1637 }
1638 }
1639 }
1640 }
1641 }
1642 }
1643 }
1644
1645 } // namespace QueryPool
1646 } // namespace vkt
1647