1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2023 The Khronos Group Inc.
6 * Copyright (c) 2023 Valve Corporation.
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 *
20 *//*!
21 * \file
22 * \brief Vulkan Fragment Shader Invocation and Sample Cound Tests
23 *//*--------------------------------------------------------------------*/
24 #include "vktQueryPoolFragInvocationTests.hpp"
25 #include "tcuImageCompare.hpp"
26 #include "vkBarrierUtil.hpp"
27 #include "vkCmdUtil.hpp"
28 #include "vkImageUtil.hpp"
29 #include "vkObjUtil.hpp"
30 #include "vkTypeUtil.hpp"
31 #include "vktTestCaseUtil.hpp"
32 #include "vkBuilderUtil.hpp"
33
34 #include <sstream>
35
36 namespace vkt
37 {
38 namespace QueryPool
39 {
40
41 namespace
42 {
43
44 using namespace vk;
45
46 enum class QueryType
47 {
48 INVOCATIONS = 0,
49 OCCLUSION
50 };
51
getQueryTypeName(const QueryType qType)52 std::string getQueryTypeName(const QueryType qType)
53 {
54 switch (qType)
55 {
56 case QueryType::INVOCATIONS:
57 return "frag_invs";
58 case QueryType::OCCLUSION:
59 return "occlusion";
60 default:
61 break;
62 }
63
64 DE_ASSERT(false);
65 return "";
66 }
67
68 enum class FragShaderVariant
69 {
70 FLAT = 0,
71 VERTEX_COLOR = 1,
72 ATOMIC_COUNTER = 2,
73 };
74
75 struct TestParams
76 {
77 const QueryType queryType;
78 const bool secondary;
79 const FragShaderVariant fragShaderVariant;
80 };
81
getGeometryColor(void)82 tcu::Vec4 getGeometryColor(void)
83 {
84 return tcu::Vec4(0.0f, 0.0f, 1.0f, 1.0f);
85 }
86
getClearColor(void)87 tcu::Vec4 getClearColor(void)
88 {
89 return tcu::Vec4(0.0f, 0.0f, 0.0f, 0.0f);
90 }
91
checkSupport(Context & context,TestParams params)92 void checkSupport(Context &context, TestParams params)
93 {
94 if (params.secondary)
95 context.requireDeviceCoreFeature(DEVICE_CORE_FEATURE_INHERITED_QUERIES);
96
97 if (params.queryType == QueryType::OCCLUSION)
98 context.requireDeviceCoreFeature(DEVICE_CORE_FEATURE_OCCLUSION_QUERY_PRECISE);
99 else if (params.queryType == QueryType::INVOCATIONS)
100 context.requireDeviceCoreFeature(DEVICE_CORE_FEATURE_PIPELINE_STATISTICS_QUERY);
101
102 if (params.fragShaderVariant == FragShaderVariant::ATOMIC_COUNTER)
103 context.requireDeviceCoreFeature(DEVICE_CORE_FEATURE_FRAGMENT_STORES_AND_ATOMICS);
104 }
105
initPrograms(vk::SourceCollections & programCollection,TestParams params)106 void initPrograms(vk::SourceCollections &programCollection, TestParams params)
107 {
108 const bool isAtomic = (params.fragShaderVariant == FragShaderVariant::ATOMIC_COUNTER);
109 const bool isVertexColor = (params.fragShaderVariant == FragShaderVariant::VERTEX_COLOR);
110
111 std::ostringstream vert;
112 vert << "#version 460\n"
113 << "layout (location=0) in vec4 inPos;\n"
114 << (isVertexColor ? "layout (location=1) in vec4 inColor;\n" : "")
115 << (isVertexColor ? "layout (location=0) out vec4 outColor;\n" : "") << "void main() {\n"
116 << " gl_Position = inPos;\n"
117 << (isVertexColor ? " outColor = inColor;\n" : "") << "}";
118 programCollection.glslSources.add("vert") << glu::VertexSource(vert.str());
119
120 std::ostringstream frag;
121 frag << "#version 460\n"
122 << "layout (location=0) out vec4 outColor;\n"
123 << (isVertexColor ? "layout (location=0) in vec4 vtxColor;\n" : "")
124 << (isAtomic ? "layout (set=0, binding=0) buffer CounterBlock { uint counter; } cb;\n" : "")
125 << "void main() {\n"
126 << (isAtomic ? " atomicAdd(cb.counter, 1u);\n" : "")
127 << " outColor = " << (isVertexColor ? "vtxColor" : ("vec4" + de::toString(getGeometryColor()))) << ";\n"
128 << "}";
129 ;
130 programCollection.glslSources.add("frag") << glu::FragmentSource(frag.str());
131 }
132
133 // Records the render pass commands (bind pipeline and draw) and optionally binds descriptor sets.
recordRenderPassCommands(const DeviceInterface & vkd,const VkCommandBuffer cmdBuffer,const VkPipelineBindPoint bindPoint,const VkPipeline pipeline,const VkPipelineLayout pipelineLayout,const VkDescriptorSet descriptorSet,const VkBuffer vertexBuffer,const VkDeviceSize vertexBufferOffset)134 void recordRenderPassCommands(const DeviceInterface &vkd, const VkCommandBuffer cmdBuffer,
135 const VkPipelineBindPoint bindPoint, const VkPipeline pipeline,
136 const VkPipelineLayout pipelineLayout,
137 const VkDescriptorSet descriptorSet, // Can be made VK_NULL_HANDLE to avoid binding.
138 const VkBuffer vertexBuffer, const VkDeviceSize vertexBufferOffset)
139 {
140 vkd.cmdBindPipeline(cmdBuffer, bindPoint, pipeline);
141
142 if (descriptorSet != VK_NULL_HANDLE)
143 vkd.cmdBindDescriptorSets(cmdBuffer, bindPoint, pipelineLayout, 0u, 1u, &descriptorSet, 0u, nullptr);
144
145 vkd.cmdBindVertexBuffers(cmdBuffer, 0u, 1u, &vertexBuffer, &vertexBufferOffset);
146 vkd.cmdDraw(cmdBuffer, 3u, 1u, 0u, 0u);
147 }
148
testInvocations(Context & context,const TestParams params)149 tcu::TestStatus testInvocations(Context &context, const TestParams params)
150 {
151 const auto ctx = context.getContextCommonData();
152 const tcu::IVec3 fbExtent(64, 64, 1);
153 const auto vkExtent = makeExtent3D(fbExtent);
154 const auto colorFormat = VK_FORMAT_R8G8B8A8_UNORM;
155 const auto colorSRR = makeDefaultImageSubresourceRange();
156 const auto colorSRL = makeDefaultImageSubresourceLayers();
157 const auto colorUsage = (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT);
158 const auto imageType = VK_IMAGE_TYPE_2D;
159 const auto bindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
160 const auto isAtomic = (params.fragShaderVariant == FragShaderVariant::ATOMIC_COUNTER);
161 const auto resShaderStages = VK_SHADER_STAGE_FRAGMENT_BIT; // Shader stages for the resources.
162 const auto geometryColor = getGeometryColor();
163
164 ImageWithBuffer colorBuffer(ctx.vkd, ctx.device, ctx.allocator, vkExtent, colorFormat, colorUsage, imageType,
165 colorSRR);
166
167 const auto &binaries = context.getBinaryCollection();
168 const auto vertModule = createShaderModule(ctx.vkd, ctx.device, binaries.get("vert"));
169 const auto fragModule = createShaderModule(ctx.vkd, ctx.device, binaries.get("frag"));
170
171 // Vertex buffer, which is going to contain interleaved pairs of positions and colors.
172 const std::vector<tcu::Vec4> vertices = {
173 // position // color
174 tcu::Vec4(-1.0f, -1.0f, 0.0f, 1.0f), geometryColor, tcu::Vec4(3.0f, -1.0f, 0.0f, 1.0f), geometryColor,
175 tcu::Vec4(-1.0f, 3.0f, 0.0f, 1.0f), geometryColor,
176 };
177
178 const uint32_t kVec4Sz = static_cast<uint32_t>(sizeof(tcu::Vec4));
179 const uint32_t vertexStride = (kVec4Sz * 2u);
180 const uint32_t positionOffset = 0u;
181 const uint32_t colorOffset = kVec4Sz;
182
183 // Vertex buffer
184 const auto vbSize = static_cast<VkDeviceSize>(de::dataSize(vertices));
185 const auto vbInfo = makeBufferCreateInfo(vbSize, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT);
186 BufferWithMemory vertexBuffer(ctx.vkd, ctx.device, ctx.allocator, vbInfo, MemoryRequirement::HostVisible);
187 const auto vbAlloc = vertexBuffer.getAllocation();
188 void *vbData = vbAlloc.getHostPtr();
189 const auto vbOffset = static_cast<VkDeviceSize>(0);
190
191 deMemcpy(vbData, de::dataOrNull(vertices), de::dataSize(vertices));
192 flushAlloc(ctx.vkd, ctx.device, vbAlloc); // strictly speaking, not needed.
193
194 using BufferWithMemoryPtr = std::unique_ptr<BufferWithMemory>;
195
196 // Used in the atomic buffer case.
197 Move<VkDescriptorSetLayout> setLayout;
198 Move<VkDescriptorPool> descriptorPool;
199 Move<VkDescriptorSet> descriptorSet;
200 BufferWithMemoryPtr atomicBuffer;
201
202 const auto atomicBufferType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
203 const auto atomicBufferSize = static_cast<VkDeviceSize>(sizeof(uint32_t));
204
205 if (isAtomic)
206 {
207 // Zero-out atomic counter.
208 const auto atomicBufferInfo = makeBufferCreateInfo(atomicBufferSize, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT);
209 atomicBuffer.reset(
210 new BufferWithMemory(ctx.vkd, ctx.device, ctx.allocator, atomicBufferInfo, MemoryRequirement::HostVisible));
211 deMemset(atomicBuffer->getAllocation().getHostPtr(), 0, static_cast<size_t>(atomicBufferSize));
212
213 DescriptorSetLayoutBuilder layoutBuilder;
214 layoutBuilder.addSingleBinding(atomicBufferType, resShaderStages);
215 setLayout = layoutBuilder.build(ctx.vkd, ctx.device);
216
217 DescriptorPoolBuilder descPoolBuilder;
218 descPoolBuilder.addType(atomicBufferType);
219 descriptorPool =
220 descPoolBuilder.build(ctx.vkd, ctx.device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
221
222 descriptorSet = makeDescriptorSet(ctx.vkd, ctx.device, *descriptorPool, *setLayout);
223
224 DescriptorSetUpdateBuilder updateBuilder;
225 const auto atomicBufferDescriptorInfo = makeDescriptorBufferInfo(atomicBuffer->get(), 0ull, atomicBufferSize);
226 updateBuilder.writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), atomicBufferType,
227 &atomicBufferDescriptorInfo);
228 updateBuilder.update(ctx.vkd, ctx.device);
229 }
230
231 // Pipeline layout, render pass and framebuffer.
232 const auto pipelineLayout = makePipelineLayout(ctx.vkd, ctx.device, *setLayout);
233 const auto renderPass = makeRenderPass(ctx.vkd, ctx.device, colorFormat);
234 const auto framebuffer = makeFramebuffer(ctx.vkd, ctx.device, renderPass.get(), colorBuffer.getImageView(),
235 vkExtent.width, vkExtent.height);
236
237 const bool isInvQuery = (params.queryType == QueryType::INVOCATIONS);
238 const auto queryType = (isInvQuery ? VK_QUERY_TYPE_PIPELINE_STATISTICS : VK_QUERY_TYPE_OCCLUSION);
239 const auto statFlags =
240 (isInvQuery ?
241 static_cast<VkQueryPipelineStatisticFlags>(VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT) :
242 0u);
243 const auto controlFlags = (isInvQuery ? 0u : static_cast<VkQueryControlFlags>(VK_QUERY_CONTROL_PRECISE_BIT));
244
245 const VkQueryPoolCreateInfo queryPoolCreateInfo = {
246 VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO, // VkStructureType sType;
247 nullptr, // const void* pNext;
248 0u, // VkQueryPoolCreateFlags flags;
249 queryType, // VkQueryType queryType;
250 1u, // uint32_t queryCount;
251 statFlags, // VkQueryPipelineStatisticFlags pipelineStatistics;
252 };
253 const auto queryPool = createQueryPool(ctx.vkd, ctx.device, &queryPoolCreateInfo);
254
255 const std::vector<VkViewport> viewports(1u, makeViewport(vkExtent));
256 const std::vector<VkRect2D> scissors(1u, makeRect2D(fbExtent));
257
258 // Vertex buffer description.
259 const auto vtxBinding = makeVertexInputBindingDescription(0u, vertexStride, VK_VERTEX_INPUT_RATE_VERTEX);
260 const std::vector<VkVertexInputAttributeDescription> vtxAttribs{
261 makeVertexInputAttributeDescription(0u, 0u, VK_FORMAT_R32G32B32A32_SFLOAT, positionOffset),
262 makeVertexInputAttributeDescription(1u, 0u, VK_FORMAT_R32G32B32A32_SFLOAT, colorOffset),
263 };
264 const VkPipelineVertexInputStateCreateInfo inputStateCreateInfo = {
265 VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // VkStructureType sType;
266 nullptr, // const void* pNext;
267 0u, // VkPipelineVertexInputStateCreateFlags flags;
268 1u, // uint32_t vertexBindingDescriptionCount;
269 &vtxBinding, // const VkVertexInputBindingDescription* pVertexBindingDescriptions;
270 de::sizeU32(vtxAttribs), // uint32_t vertexAttributeDescriptionCount;
271 de::dataOrNull(vtxAttribs), // const VkVertexInputAttributeDescription* pVertexAttributeDescriptions;
272 };
273
274 const auto pipeline =
275 makeGraphicsPipeline(ctx.vkd, ctx.device, pipelineLayout.get(), vertModule.get(), VK_NULL_HANDLE,
276 VK_NULL_HANDLE, VK_NULL_HANDLE, fragModule.get(), renderPass.get(), viewports, scissors,
277 VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 0u, 0u, &inputStateCreateInfo);
278
279 CommandPoolWithBuffer cmd(ctx.vkd, ctx.device, ctx.qfIndex);
280 VkCommandBuffer primaryCmdBuffer = cmd.cmdBuffer.get();
281 Move<VkCommandBuffer> secCmdBufferPtr;
282
283 if (params.secondary)
284 {
285 secCmdBufferPtr =
286 allocateCommandBuffer(ctx.vkd, ctx.device, cmd.cmdPool.get(), VK_COMMAND_BUFFER_LEVEL_SECONDARY);
287 const auto secCmdBuffer = secCmdBufferPtr.get();
288
289 const VkCommandBufferInheritanceInfo inheritanceInfo = {
290 VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO, // VkStructureType sType;
291 nullptr, // const void* pNext;
292 renderPass.get(), // VkRenderPass renderPass;
293 0u, // uint32_t subpass;
294 framebuffer.get(), // VkFramebuffer framebuffer;
295 ((queryType == VK_QUERY_TYPE_OCCLUSION) ? VK_TRUE : VK_FALSE), // VkBool32 occlusionQueryEnable;
296 controlFlags, // VkQueryControlFlags queryFlags;
297 statFlags, // VkQueryPipelineStatisticFlags pipelineStatistics;
298 };
299
300 const auto usageFlags =
301 (VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT | VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT);
302 const VkCommandBufferBeginInfo beginInfo = {
303 VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, // VkStructureType sType;
304 nullptr, // const void* pNext;
305 usageFlags, // VkCommandBufferUsageFlags flags;
306 &inheritanceInfo, // const VkCommandBufferInheritanceInfo* pInheritanceInfo;
307 };
308
309 VK_CHECK(ctx.vkd.beginCommandBuffer(secCmdBuffer, &beginInfo));
310 recordRenderPassCommands(ctx.vkd, secCmdBuffer, bindPoint, pipeline.get(), *pipelineLayout, *descriptorSet,
311 *vertexBuffer, vbOffset);
312 endCommandBuffer(ctx.vkd, secCmdBuffer);
313 }
314
315 const auto subpassContents =
316 (params.secondary ? VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS : VK_SUBPASS_CONTENTS_INLINE);
317 const auto clearColor = makeClearValueColor(getClearColor());
318
319 beginCommandBuffer(ctx.vkd, primaryCmdBuffer);
320 ctx.vkd.cmdResetQueryPool(primaryCmdBuffer, queryPool.get(), 0u, 1u);
321 ctx.vkd.cmdBeginQuery(primaryCmdBuffer, queryPool.get(), 0u, controlFlags);
322 beginRenderPass(ctx.vkd, primaryCmdBuffer, renderPass.get(), framebuffer.get(), scissors.at(0), clearColor,
323 subpassContents);
324 if (!params.secondary)
325 recordRenderPassCommands(ctx.vkd, primaryCmdBuffer, bindPoint, pipeline.get(), *pipelineLayout, *descriptorSet,
326 *vertexBuffer, vbOffset);
327 else
328 ctx.vkd.cmdExecuteCommands(primaryCmdBuffer, 1u, &secCmdBufferPtr.get());
329 endRenderPass(ctx.vkd, primaryCmdBuffer);
330 ctx.vkd.cmdEndQuery(primaryCmdBuffer, queryPool.get(), 0u);
331 {
332 const auto preTransferBarrier = makeImageMemoryBarrier(
333 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
334 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, colorBuffer.getImage(), colorSRR);
335 cmdPipelineImageMemoryBarrier(ctx.vkd, primaryCmdBuffer, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
336 VK_PIPELINE_STAGE_TRANSFER_BIT, &preTransferBarrier);
337
338 const auto copyRegion = makeBufferImageCopy(vkExtent, colorSRL);
339 ctx.vkd.cmdCopyImageToBuffer(primaryCmdBuffer, colorBuffer.getImage(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
340 colorBuffer.getBuffer(), 1u, ©Region);
341
342 // Synchronize color buffer copy, and possible atomic writes from the frag shader, to the host.
343 {
344 VkAccessFlags srcAccess = VK_ACCESS_TRANSFER_WRITE_BIT;
345 VkPipelineStageFlags srcStages = VK_PIPELINE_STAGE_TRANSFER_BIT;
346
347 if (isAtomic)
348 {
349 srcAccess |= VK_ACCESS_SHADER_WRITE_BIT;
350 srcStages |= VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
351 }
352
353 const auto preHostBarrier = makeMemoryBarrier(srcAccess, VK_ACCESS_HOST_READ_BIT);
354 cmdPipelineMemoryBarrier(ctx.vkd, primaryCmdBuffer, srcStages, VK_PIPELINE_STAGE_HOST_BIT, &preHostBarrier);
355 }
356 }
357 endCommandBuffer(ctx.vkd, primaryCmdBuffer);
358 submitCommandsAndWait(ctx.vkd, ctx.device, ctx.queue, primaryCmdBuffer);
359
360 const auto resultAllocation = colorBuffer.getBufferAllocation();
361 invalidateAlloc(ctx.vkd, ctx.device, resultAllocation);
362
363 uint32_t queryResult = 0u;
364 VK_CHECK(ctx.vkd.getQueryPoolResults(ctx.device, queryPool.get(), 0u, 1u, sizeof(queryResult), &queryResult,
365 static_cast<VkDeviceSize>(sizeof(queryResult)), VK_QUERY_RESULT_WAIT_BIT));
366
367 const auto pixelCount = vkExtent.width * vkExtent.height * vkExtent.depth;
368
369 // Check query results.
370 if (isInvQuery)
371 {
372 uint32_t minCount = std::numeric_limits<uint32_t>::max();
373
374 if (params.fragShaderVariant == FragShaderVariant::FLAT)
375 {
376 // Implementations are allowed to reuse fragment shader invocations to shade different fragments under some circumstances:
377 // - The frag shader statically computes the same value for different framebuffer locations, and
378 // - It does not write to any storage resources.
379 // The spec does not mention a minimum number of invocations, but in practice we're tying this to enabling fragment shading
380 // rate support automatically. We'll suppose implementations not supporting fragment shading rate will not do this, and
381 // those supporting it will not run less invocations than the whole framebuffer divided into areas of maxFramentSize pixels.
382 // If this proves problematic, we can relax the check later.
383 const auto maxFragmentSize = (context.isDeviceFunctionalitySupported("VK_KHR_fragment_shading_rate") ?
384 context.getFragmentShadingRateProperties().maxFragmentSize :
385 makeExtent2D(1u, 1u));
386 const auto maxFragmentWidth =
387 std::max(maxFragmentSize.width, 1u); // In case an implementation reports zero.
388 const auto maxFragmentHeight = std::max(maxFragmentSize.height, 1u); // Ditto.
389 const auto minCols = vkExtent.width / maxFragmentWidth;
390 const auto minRows = vkExtent.height / maxFragmentHeight;
391
392 minCount = minCols * minRows * vkExtent.depth;
393 }
394 else
395 minCount = pixelCount;
396
397 if (queryResult < minCount)
398 {
399 std::ostringstream msg;
400 msg << "Framebuffer size: " << vkExtent.width << "x" << vkExtent.height
401 << "; expected query result to be at least " << minCount << " but found " << queryResult;
402 return tcu::TestStatus::fail(msg.str());
403 }
404 }
405 else
406 {
407 if (pixelCount != queryResult)
408 {
409 std::ostringstream msg;
410 msg << "Framebuffer size: " << vkExtent.width << "x" << vkExtent.height << "; expected query result to be "
411 << pixelCount << " but found " << queryResult;
412 return tcu::TestStatus::fail(msg.str());
413 }
414 }
415
416 if (isAtomic)
417 {
418 // Verify atomic counter.
419 auto &atomicBufferAlloc = atomicBuffer->getAllocation();
420 invalidateAlloc(ctx.vkd, ctx.device, atomicBufferAlloc);
421
422 uint32_t atomicCounter = 0u;
423 deMemcpy(&atomicCounter, atomicBufferAlloc.getHostPtr(), sizeof(atomicCounter));
424
425 if (atomicCounter != pixelCount)
426 {
427 std::ostringstream msg;
428 msg << "Framebuffer size: " << vkExtent.width << "x" << vkExtent.height
429 << "; expected atomic counter to be " << pixelCount << " but found " << atomicCounter;
430 return tcu::TestStatus::fail(msg.str());
431 }
432 }
433
434 // Check color buffer.
435 const auto tcuFormat = mapVkFormat(colorFormat);
436 auto &log = context.getTestContext().getLog();
437 const tcu::Vec4 colorThreshold(0.0f, 0.0f, 0.0f, 0.0f); // Expect exact color result.
438 tcu::ConstPixelBufferAccess resultAccess(tcuFormat, fbExtent, resultAllocation.getHostPtr());
439
440 if (!tcu::floatThresholdCompare(log, "Result", "", geometryColor, resultAccess, colorThreshold,
441 tcu::COMPARE_LOG_ON_ERROR))
442 return tcu::TestStatus::fail("Unexpected results in color buffer -- check log for details");
443
444 return tcu::TestStatus::pass("Pass");
445 }
446
447 } // anonymous namespace
448
createFragInvocationTests(tcu::TestContext & testContext)449 tcu::TestCaseGroup *createFragInvocationTests(tcu::TestContext &testContext)
450 {
451 using GroupPtr = de::MovePtr<tcu::TestCaseGroup>;
452
453 // Test implementations do not optimize out fragment shader invocations
454 GroupPtr mainGroup(new tcu::TestCaseGroup(testContext, "frag_invocations"));
455
456 const struct
457 {
458 FragShaderVariant fragShaderVariant;
459 const std::string suffix;
460 } fragShaderVariantCases[] = {
461 {FragShaderVariant::FLAT, ""},
462 {FragShaderVariant::VERTEX_COLOR, "_with_vertex_color"},
463 {FragShaderVariant::ATOMIC_COUNTER, "_with_atomic_counter"},
464 };
465
466 for (const auto queryType : {QueryType::OCCLUSION, QueryType::INVOCATIONS})
467 {
468 const auto groupName = getQueryTypeName(queryType);
469 GroupPtr queryTypeGroup(new tcu::TestCaseGroup(testContext, groupName.c_str()));
470
471 for (const auto secondaryCase : {false, true})
472 {
473 for (const auto &fragShaderVariantCase : fragShaderVariantCases)
474 {
475 const auto testName = (secondaryCase ? "secondary" : "primary") + fragShaderVariantCase.suffix;
476 const TestParams params{queryType, secondaryCase, fragShaderVariantCase.fragShaderVariant};
477 addFunctionCaseWithPrograms(queryTypeGroup.get(), testName, checkSupport, initPrograms, testInvocations,
478 params);
479 }
480 }
481
482 mainGroup->addChild(queryTypeGroup.release());
483 }
484
485 return mainGroup.release();
486 }
487
488 } // namespace QueryPool
489 } // namespace vkt
490