1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2015 The Khronos Group Inc.
6 * Copyright (c) 2015 ARM Ltd.
7 * Copyright (c) 2023 LunarG, Inc.
8 * Copyright (c) 2023 Nintendo
9 *
10 * Licensed under the Apache License, Version 2.0 (the "License");
11 * you may not use this file except in compliance with the License.
12 * You may obtain a copy of the License at
13 *
14 * http://www.apache.org/licenses/LICENSE-2.0
15 *
16 * Unless required by applicable law or agreed to in writing, software
17 * distributed under the License is distributed on an "AS IS" BASIS,
18 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 * See the License for the specific language governing permissions and
20 * limitations under the License.
21 *
22 *//*!
23 * \file
24 * \brief Pipeline Cache Tests
25 *//*--------------------------------------------------------------------*/
26
27 #include "vktPipelineCacheTests.hpp"
28 #include "vktPipelineClearUtil.hpp"
29 #include "vktPipelineImageUtil.hpp"
30 #include "vktPipelineVertexUtil.hpp"
31 #include "vktTestCase.hpp"
32 #include "vktTestCaseUtil.hpp"
33 #include "vkImageUtil.hpp"
34 #include "vkMemUtil.hpp"
35 #include "vkPrograms.hpp"
36 #include "vkBuilderUtil.hpp"
37 #include "vkQueryUtil.hpp"
38 #include "vkRef.hpp"
39 #include "vkRefUtil.hpp"
40 #include "vkTypeUtil.hpp"
41 #include "vkCmdUtil.hpp"
42 #include "vkObjUtil.hpp"
43 #include "tcuImageCompare.hpp"
44 #include "deUniquePtr.hpp"
45 #include "deMemory.h"
46 #include "tcuTestLog.hpp"
47
48 #include <sstream>
49 #include <vector>
50
51 namespace vkt
52 {
53 namespace pipeline
54 {
55
56 using namespace vk;
57
58 namespace
59 {
60
61 // helper functions
62
getShaderFlagStr(const VkShaderStageFlags shader,bool isDescription)63 std::string getShaderFlagStr(const VkShaderStageFlags shader, bool isDescription)
64 {
65 std::ostringstream desc;
66 if (shader & VK_SHADER_STAGE_COMPUTE_BIT)
67 {
68 desc << ((isDescription) ? "compute stage" : "compute_stage");
69 }
70 else
71 {
72 desc << ((isDescription) ? "vertex stage" : "vertex_stage");
73 if (shader & VK_SHADER_STAGE_GEOMETRY_BIT)
74 desc << ((isDescription) ? " geometry stage" : "_geometry_stage");
75 if (shader & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT)
76 desc << ((isDescription) ? " tessellation control stage" : "_tessellation_control_stage");
77 if (shader & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)
78 desc << ((isDescription) ? " tessellation evaluation stage" : "_tessellation_evaluation_stage");
79 desc << ((isDescription) ? " fragment stage" : "_fragment_stage");
80 }
81
82 return desc.str();
83 }
84
85 // helper classes
86 class CacheTestParam
87 {
88 public:
89 CacheTestParam(PipelineConstructionType pipelineConstructionType, const VkShaderStageFlags shaders,
90 bool compileCacheMissShaders, VkPipelineCacheCreateFlags pipelineCacheCreateFlags = 0u);
91 virtual ~CacheTestParam(void) = default;
92 virtual const std::string generateTestName(void) const;
getPipelineConstructionType(void) const93 PipelineConstructionType getPipelineConstructionType(void) const
94 {
95 return m_pipelineConstructionType;
96 }
getShaderFlags(void) const97 VkShaderStageFlags getShaderFlags(void) const
98 {
99 return m_shaders;
100 }
getPipelineCacheCreateFlags(void) const101 VkPipelineCacheCreateFlags getPipelineCacheCreateFlags(void) const
102 {
103 return m_pipelineCacheCreateFlags;
104 }
getCompileMissShaders(void) const105 bool getCompileMissShaders(void) const
106 {
107 return m_compileCacheMissShaders;
108 }
109
110 protected:
111 PipelineConstructionType m_pipelineConstructionType;
112 VkShaderStageFlags m_shaders;
113 VkPipelineCacheCreateFlags m_pipelineCacheCreateFlags;
114 bool m_compileCacheMissShaders;
115 };
116
CacheTestParam(PipelineConstructionType pipelineConstructionType,const VkShaderStageFlags shaders,bool compileCacheMissShaders,VkPipelineCacheCreateFlags pipelineCacheCreateFlags)117 CacheTestParam::CacheTestParam(PipelineConstructionType pipelineConstructionType, const VkShaderStageFlags shaders,
118 bool compileCacheMissShaders, VkPipelineCacheCreateFlags pipelineCacheCreateFlags)
119 : m_pipelineConstructionType(pipelineConstructionType)
120 , m_shaders(shaders)
121 , m_pipelineCacheCreateFlags(pipelineCacheCreateFlags)
122 , m_compileCacheMissShaders(compileCacheMissShaders)
123 {
124 }
125
generateTestName(void) const126 const std::string CacheTestParam::generateTestName(void) const
127 {
128 std::string name = getShaderFlagStr(m_shaders, false);
129 if (m_pipelineCacheCreateFlags == VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT)
130 {
131 name += "_externally_synchronized";
132 }
133 return name;
134 }
135
136 template <class Test>
newTestCase(tcu::TestContext & testContext,const CacheTestParam * testParam)137 vkt::TestCase *newTestCase(tcu::TestContext &testContext, const CacheTestParam *testParam)
138 {
139 return new Test(testContext, testParam->generateTestName().c_str(), testParam);
140 }
141
createBufferAndBindMemory(Context & context,VkDeviceSize size,VkBufferUsageFlags usage,de::MovePtr<Allocation> * pAlloc)142 Move<VkBuffer> createBufferAndBindMemory(Context &context, VkDeviceSize size, VkBufferUsageFlags usage,
143 de::MovePtr<Allocation> *pAlloc)
144 {
145 const DeviceInterface &vk = context.getDeviceInterface();
146 const VkDevice vkDevice = context.getDevice();
147 const uint32_t queueFamilyIndex = context.getUniversalQueueFamilyIndex();
148
149 const VkBufferCreateInfo vertexBufferParams = {
150 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
151 DE_NULL, // const void* pNext;
152 0u, // VkBufferCreateFlags flags;
153 size, // VkDeviceSize size;
154 usage, // VkBufferUsageFlags usage;
155 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
156 1u, // uint32_t queueFamilyCount;
157 &queueFamilyIndex // const uint32_t* pQueueFamilyIndices;
158 };
159
160 Move<VkBuffer> vertexBuffer = createBuffer(vk, vkDevice, &vertexBufferParams);
161
162 *pAlloc = context.getDefaultAllocator().allocate(getBufferMemoryRequirements(vk, vkDevice, *vertexBuffer),
163 MemoryRequirement::HostVisible);
164 VK_CHECK(vk.bindBufferMemory(vkDevice, *vertexBuffer, (*pAlloc)->getMemory(), (*pAlloc)->getOffset()));
165
166 return vertexBuffer;
167 }
168
createImage2DAndBindMemory(Context & context,VkFormat format,uint32_t width,uint32_t height,VkImageUsageFlags usage,VkSampleCountFlagBits sampleCount,de::details::MovePtr<Allocation> * pAlloc)169 Move<VkImage> createImage2DAndBindMemory(Context &context, VkFormat format, uint32_t width, uint32_t height,
170 VkImageUsageFlags usage, VkSampleCountFlagBits sampleCount,
171 de::details::MovePtr<Allocation> *pAlloc)
172 {
173 const DeviceInterface &vk = context.getDeviceInterface();
174 const VkDevice vkDevice = context.getDevice();
175 const uint32_t queueFamilyIndex = context.getUniversalQueueFamilyIndex();
176
177 const VkImageCreateInfo colorImageParams = {
178 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
179 DE_NULL, // const void* pNext;
180 0u, // VkImageCreateFlags flags;
181 VK_IMAGE_TYPE_2D, // VkImageType imageType;
182 format, // VkFormat format;
183 {width, height, 1u}, // VkExtent3D extent;
184 1u, // uint32_t mipLevels;
185 1u, // uint32_t arraySize;
186 sampleCount, // uint32_t samples;
187 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
188 usage, // VkImageUsageFlags usage;
189 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
190 1u, // uint32_t queueFamilyCount;
191 &queueFamilyIndex, // const uint32_t* pQueueFamilyIndices;
192 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout;
193 };
194
195 Move<VkImage> image = createImage(vk, vkDevice, &colorImageParams);
196
197 *pAlloc = context.getDefaultAllocator().allocate(getImageMemoryRequirements(vk, vkDevice, *image),
198 MemoryRequirement::Any);
199 VK_CHECK(vk.bindImageMemory(vkDevice, *image, (*pAlloc)->getMemory(), (*pAlloc)->getOffset()));
200
201 return image;
202 }
203
204 // Test Classes
205 class CacheTest : public vkt::TestCase
206 {
207 public:
CacheTest(tcu::TestContext & testContext,const std::string & name,const CacheTestParam * param)208 CacheTest(tcu::TestContext &testContext, const std::string &name, const CacheTestParam *param)
209 : vkt::TestCase(testContext, name)
210 , m_param(*param)
211 {
212 }
~CacheTest(void)213 virtual ~CacheTest(void)
214 {
215 }
216
217 protected:
218 const CacheTestParam m_param;
219 };
220
221 class CacheTestInstance : public vkt::TestInstance
222 {
223 public:
224 enum
225 {
226 PIPELINE_CACHE_NDX_NO_CACHE,
227 PIPELINE_CACHE_NDX_CACHED,
228 PIPELINE_CACHE_NDX_COUNT,
229 };
230 CacheTestInstance(Context &context, const CacheTestParam *param);
231 virtual ~CacheTestInstance(void);
232 virtual tcu::TestStatus iterate(void);
233
234 protected:
235 virtual tcu::TestStatus verifyTestResult(void) = 0;
236 virtual void prepareCommandBuffer(void) = 0;
237
238 protected:
239 const CacheTestParam *m_param;
240 Move<VkCommandPool> m_cmdPool;
241 Move<VkCommandBuffer> m_cmdBuffer;
242 Move<VkPipelineCache> m_cache;
243 };
244
CacheTestInstance(Context & context,const CacheTestParam * param)245 CacheTestInstance::CacheTestInstance(Context &context, const CacheTestParam *param)
246 : TestInstance(context)
247 , m_param(param)
248 {
249 const DeviceInterface &vk = m_context.getDeviceInterface();
250 const VkDevice vkDevice = m_context.getDevice();
251 const uint32_t queueFamilyIndex = context.getUniversalQueueFamilyIndex();
252
253 // Create command pool
254 m_cmdPool = createCommandPool(vk, vkDevice, VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, queueFamilyIndex);
255
256 // Create command buffer
257 m_cmdBuffer = allocateCommandBuffer(vk, vkDevice, *m_cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY);
258
259 // Create the Pipeline Cache
260 {
261 const VkPipelineCacheCreateInfo pipelineCacheCreateInfo = {
262 VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO, // VkStructureType sType;
263 DE_NULL, // const void* pNext;
264 m_param->getPipelineCacheCreateFlags(), // VkPipelineCacheCreateFlags flags;
265 0u, // uintptr_t initialDataSize;
266 DE_NULL, // const void* pInitialData;
267 };
268
269 m_cache = createPipelineCache(vk, vkDevice, &pipelineCacheCreateInfo);
270 }
271 }
272
~CacheTestInstance(void)273 CacheTestInstance::~CacheTestInstance(void)
274 {
275 }
276
iterate(void)277 tcu::TestStatus CacheTestInstance::iterate(void)
278 {
279 const DeviceInterface &vk = m_context.getDeviceInterface();
280 const VkDevice vkDevice = m_context.getDevice();
281 const VkQueue queue = m_context.getUniversalQueue();
282
283 prepareCommandBuffer();
284
285 submitCommandsAndWait(vk, vkDevice, queue, m_cmdBuffer.get());
286
287 return verifyTestResult();
288 }
289
290 class GraphicsCacheTest : public CacheTest
291 {
292 public:
GraphicsCacheTest(tcu::TestContext & testContext,const std::string & name,const CacheTestParam * param)293 GraphicsCacheTest(tcu::TestContext &testContext, const std::string &name, const CacheTestParam *param)
294 : CacheTest(testContext, name, param)
295 {
296 }
~GraphicsCacheTest(void)297 virtual ~GraphicsCacheTest(void)
298 {
299 }
300 virtual void initPrograms(SourceCollections &programCollection) const;
301 virtual void checkSupport(Context &context) const;
302 virtual TestInstance *createInstance(Context &context) const;
303 };
304
305 class GraphicsCacheTestInstance : public CacheTestInstance
306 {
307 public:
308 GraphicsCacheTestInstance(Context &context, const CacheTestParam *param);
309 virtual ~GraphicsCacheTestInstance(void);
310
311 protected:
312 void preparePipelineWrapper(GraphicsPipelineWrapper &gpw, VkPipelineCache cache, bool useMissShaders);
313 virtual void preparePipelines(void);
314 void prepareRenderPass(const RenderPassWrapper &renderPassFramebuffer, GraphicsPipelineWrapper &pipeline);
315 virtual void prepareCommandBuffer(void);
316 virtual tcu::TestStatus verifyTestResult(void);
317
318 protected:
319 const tcu::UVec2 m_renderSize;
320 const VkFormat m_colorFormat;
321 const VkFormat m_depthFormat;
322 PipelineLayoutWrapper m_pipelineLayout;
323
324 Move<VkImage> m_depthImage;
325 de::MovePtr<Allocation> m_depthImageAlloc;
326 de::MovePtr<Allocation> m_colorImageAlloc[PIPELINE_CACHE_NDX_COUNT];
327 Move<VkImageView> m_depthAttachmentView;
328 VkImageMemoryBarrier m_imageLayoutBarriers[3];
329
330 Move<VkBuffer> m_vertexBuffer;
331 de::MovePtr<Allocation> m_vertexBufferMemory;
332 std::vector<Vertex4RGBA> m_vertices;
333
334 GraphicsPipelineWrapper m_pipeline[PIPELINE_CACHE_NDX_COUNT];
335
336 Move<VkImage> m_colorImage[PIPELINE_CACHE_NDX_COUNT];
337 Move<VkImageView> m_colorAttachmentView[PIPELINE_CACHE_NDX_COUNT];
338 RenderPassWrapper m_renderPassFramebuffer[PIPELINE_CACHE_NDX_COUNT];
339 };
340
initPrograms(SourceCollections & programCollection) const341 void GraphicsCacheTest::initPrograms(SourceCollections &programCollection) const
342 {
343 enum ShaderCacheOpType
344 {
345 SHADERS_CACHE_OP_HIT = 0,
346 SHADERS_CACHE_OP_MISS,
347
348 SHADERS_CACHE_OP_LAST
349 };
350
351 for (uint32_t shaderOpNdx = 0u; shaderOpNdx < SHADERS_CACHE_OP_LAST; shaderOpNdx++)
352 {
353 const ShaderCacheOpType shaderOp = (ShaderCacheOpType)shaderOpNdx;
354
355 if (shaderOp == SHADERS_CACHE_OP_MISS && !m_param.getCompileMissShaders())
356 continue;
357
358 const std::string missHitDiff = (shaderOp == SHADERS_CACHE_OP_HIT ? "" : " + 0.1");
359 const std::string missSuffix = (shaderOp == SHADERS_CACHE_OP_HIT ? "" : "_miss");
360
361 programCollection.glslSources.add("color_vert" + missSuffix)
362 << glu::VertexSource("#version 450\n"
363 "layout(location = 0) in vec4 position;\n"
364 "layout(location = 1) in vec4 color;\n"
365 "layout(location = 0) out highp vec4 vtxColor;\n"
366 "out gl_PerVertex { vec4 gl_Position; };\n"
367 "void main (void)\n"
368 "{\n"
369 " gl_Position = position;\n"
370 " vtxColor = color" +
371 missHitDiff +
372 ";\n"
373 "}\n");
374
375 programCollection.glslSources.add("color_frag" + missSuffix)
376 << glu::FragmentSource("#version 310 es\n"
377 "layout(location = 0) in highp vec4 vtxColor;\n"
378 "layout(location = 0) out highp vec4 fragColor;\n"
379 "void main (void)\n"
380 "{\n"
381 " fragColor = vtxColor" +
382 missHitDiff +
383 ";\n"
384 "}\n");
385
386 VkShaderStageFlags shaderFlag = m_param.getShaderFlags();
387 if (shaderFlag & VK_SHADER_STAGE_GEOMETRY_BIT)
388 {
389 programCollection.glslSources.add("unused_geo" + missSuffix)
390 << glu::GeometrySource("#version 450 \n"
391 "layout(triangles) in;\n"
392 "layout(triangle_strip, max_vertices = 3) out;\n"
393 "layout(location = 0) in highp vec4 in_vtxColor[];\n"
394 "layout(location = 0) out highp vec4 vtxColor;\n"
395 "out gl_PerVertex { vec4 gl_Position; };\n"
396 "in gl_PerVertex { vec4 gl_Position; } gl_in[];\n"
397 "void main (void)\n"
398 "{\n"
399 " for(int ndx=0; ndx<3; ndx++)\n"
400 " {\n"
401 " gl_Position = gl_in[ndx].gl_Position;\n"
402 " vtxColor = in_vtxColor[ndx]" +
403 missHitDiff +
404 ";\n"
405 " EmitVertex();\n"
406 " }\n"
407 " EndPrimitive();\n"
408 "}\n");
409 }
410 if (shaderFlag & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT)
411 {
412 programCollection.glslSources.add("basic_tcs" + missSuffix) << glu::TessellationControlSource(
413 "#version 450 \n"
414 "layout(vertices = 3) out;\n"
415 "layout(location = 0) in highp vec4 color[];\n"
416 "layout(location = 0) out highp vec4 vtxColor[];\n"
417 "out gl_PerVertex { vec4 gl_Position; } gl_out[3];\n"
418 "in gl_PerVertex { vec4 gl_Position; } gl_in[gl_MaxPatchVertices];\n"
419 "void main()\n"
420 "{\n"
421 " gl_TessLevelOuter[0] = 4.0;\n"
422 " gl_TessLevelOuter[1] = 4.0;\n"
423 " gl_TessLevelOuter[2] = 4.0;\n"
424 " gl_TessLevelInner[0] = 4.0;\n"
425 " gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;\n"
426 " vtxColor[gl_InvocationID] = color[gl_InvocationID]" +
427 missHitDiff +
428 ";\n"
429 "}\n");
430 }
431 if (shaderFlag & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)
432 {
433 programCollection.glslSources.add("basic_tes" + missSuffix) << glu::TessellationEvaluationSource(
434 "#version 450 \n"
435 "layout(triangles, fractional_even_spacing, ccw) in;\n"
436 "layout(location = 0) in highp vec4 colors[];\n"
437 "layout(location = 0) out highp vec4 vtxColor;\n"
438 "out gl_PerVertex { vec4 gl_Position; };\n"
439 "in gl_PerVertex { vec4 gl_Position; } gl_in[gl_MaxPatchVertices];\n"
440 "void main() \n"
441 "{\n"
442 " float u = gl_TessCoord.x;\n"
443 " float v = gl_TessCoord.y;\n"
444 " float w = gl_TessCoord.z;\n"
445 " vec4 pos = vec4(0);\n"
446 " vec4 color = vec4(0)" +
447 missHitDiff +
448 ";\n"
449 " pos.xyz += u * gl_in[0].gl_Position.xyz;\n"
450 " color.xyz += u * colors[0].xyz;\n"
451 " pos.xyz += v * gl_in[1].gl_Position.xyz;\n"
452 " color.xyz += v * colors[1].xyz;\n"
453 " pos.xyz += w * gl_in[2].gl_Position.xyz;\n"
454 " color.xyz += w * colors[2].xyz;\n"
455 " pos.w = 1.0;\n"
456 " color.w = 1.0;\n"
457 " gl_Position = pos;\n"
458 " vtxColor = color;\n"
459 "}\n");
460 }
461 }
462 }
463
checkSupport(Context & context) const464 void GraphicsCacheTest::checkSupport(Context &context) const
465 {
466 if (m_param.getShaderFlags() & VK_SHADER_STAGE_GEOMETRY_BIT)
467 context.requireDeviceCoreFeature(DEVICE_CORE_FEATURE_GEOMETRY_SHADER);
468 if ((m_param.getShaderFlags() & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) ||
469 (m_param.getShaderFlags() & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT))
470 context.requireDeviceCoreFeature(DEVICE_CORE_FEATURE_TESSELLATION_SHADER);
471
472 checkPipelineConstructionRequirements(context.getInstanceInterface(), context.getPhysicalDevice(),
473 m_param.getPipelineConstructionType());
474 }
475
createInstance(Context & context) const476 TestInstance *GraphicsCacheTest::createInstance(Context &context) const
477 {
478 return new GraphicsCacheTestInstance(context, &m_param);
479 }
480
GraphicsCacheTestInstance(Context & context,const CacheTestParam * param)481 GraphicsCacheTestInstance::GraphicsCacheTestInstance(Context &context, const CacheTestParam *param)
482 : CacheTestInstance(context, param)
483 , m_renderSize(32u, 32u)
484 , m_colorFormat(VK_FORMAT_R8G8B8A8_UNORM)
485 , m_depthFormat(VK_FORMAT_D16_UNORM)
486 , m_pipeline{
487 {context.getInstanceInterface(), context.getDeviceInterface(), context.getPhysicalDevice(),
488 context.getDevice(), context.getDeviceExtensions(), param->getPipelineConstructionType()},
489 {context.getInstanceInterface(), context.getDeviceInterface(), context.getPhysicalDevice(),
490 context.getDevice(), context.getDeviceExtensions(), param->getPipelineConstructionType()},
491 }
492 {
493 const DeviceInterface &vk = m_context.getDeviceInterface();
494 const VkDevice vkDevice = m_context.getDevice();
495
496 // Create vertex buffer
497 {
498 m_vertexBuffer =
499 createBufferAndBindMemory(m_context, 1024u, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, &m_vertexBufferMemory);
500
501 m_vertices = createOverlappingQuads();
502 // Load vertices into vertex buffer
503 deMemcpy(m_vertexBufferMemory->getHostPtr(), m_vertices.data(), m_vertices.size() * sizeof(Vertex4RGBA));
504 flushAlloc(vk, vkDevice, *m_vertexBufferMemory);
505 }
506
507 // Create render pass
508 m_renderPassFramebuffer[PIPELINE_CACHE_NDX_NO_CACHE] =
509 RenderPassWrapper(m_param->getPipelineConstructionType(), vk, vkDevice, m_colorFormat, m_depthFormat);
510 m_renderPassFramebuffer[PIPELINE_CACHE_NDX_CACHED] =
511 RenderPassWrapper(m_param->getPipelineConstructionType(), vk, vkDevice, m_colorFormat, m_depthFormat);
512
513 const VkComponentMapping ComponentMappingRGBA = {VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G,
514 VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A};
515 // Create color image
516 {
517 m_colorImage[PIPELINE_CACHE_NDX_NO_CACHE] =
518 createImage2DAndBindMemory(m_context, m_colorFormat, m_renderSize.x(), m_renderSize.y(),
519 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
520 VK_SAMPLE_COUNT_1_BIT, &m_colorImageAlloc[PIPELINE_CACHE_NDX_NO_CACHE]);
521 m_colorImage[PIPELINE_CACHE_NDX_CACHED] =
522 createImage2DAndBindMemory(m_context, m_colorFormat, m_renderSize.x(), m_renderSize.y(),
523 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
524 VK_SAMPLE_COUNT_1_BIT, &m_colorImageAlloc[PIPELINE_CACHE_NDX_CACHED]);
525 }
526
527 // Create depth image
528 {
529 m_depthImage = createImage2DAndBindMemory(m_context, m_depthFormat, m_renderSize.x(), m_renderSize.y(),
530 VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VK_SAMPLE_COUNT_1_BIT,
531 &m_depthImageAlloc);
532 }
533
534 // Set up image layout transition barriers
535 {
536 VkImageMemoryBarrier colorImageBarrier = {
537 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
538 DE_NULL, // const void* pNext;
539 0u, // VkAccessFlags srcAccessMask;
540 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VkAccessFlags dstAccessMask;
541 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout;
542 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout newLayout;
543 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex;
544 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex;
545 *m_colorImage[PIPELINE_CACHE_NDX_NO_CACHE], // VkImage image;
546 {VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u}, // VkImageSubresourceRange subresourceRange;
547 };
548
549 m_imageLayoutBarriers[0] = colorImageBarrier;
550
551 colorImageBarrier.image = *m_colorImage[PIPELINE_CACHE_NDX_CACHED];
552 m_imageLayoutBarriers[1] = colorImageBarrier;
553
554 const VkImageMemoryBarrier depthImageBarrier = {
555 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
556 DE_NULL, // const void* pNext;
557 0u, // VkAccessFlags srcAccessMask;
558 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT, // VkAccessFlags dstAccessMask;
559 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout;
560 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, // VkImageLayout newLayout;
561 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex;
562 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex;
563 *m_depthImage, // VkImage image;
564 {VK_IMAGE_ASPECT_DEPTH_BIT, 0u, 1u, 0u, 1u}, // VkImageSubresourceRange subresourceRange;
565 };
566
567 m_imageLayoutBarriers[2] = depthImageBarrier;
568 }
569 // Create color attachment view
570 {
571 VkImageViewCreateInfo colorAttachmentViewParams = {
572 VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, // VkStructureType sType;
573 DE_NULL, // const void* pNext;
574 0u, // VkImageViewCreateFlags flags;
575 *m_colorImage[PIPELINE_CACHE_NDX_NO_CACHE], // VkImage image;
576 VK_IMAGE_VIEW_TYPE_2D, // VkImageViewType viewType;
577 m_colorFormat, // VkFormat format;
578 ComponentMappingRGBA, // VkComponentMapping components;
579 {VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u}, // VkImageSubresourceRange subresourceRange;
580 };
581
582 m_colorAttachmentView[PIPELINE_CACHE_NDX_NO_CACHE] = createImageView(vk, vkDevice, &colorAttachmentViewParams);
583
584 colorAttachmentViewParams.image = *m_colorImage[PIPELINE_CACHE_NDX_CACHED];
585 m_colorAttachmentView[PIPELINE_CACHE_NDX_CACHED] = createImageView(vk, vkDevice, &colorAttachmentViewParams);
586 }
587
588 // Create depth attachment view
589 {
590 const VkImageViewCreateInfo depthAttachmentViewParams = {
591 VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, // VkStructureType sType;
592 DE_NULL, // const void* pNext;
593 0u, // VkImageViewCreateFlags flags;
594 *m_depthImage, // VkImage image;
595 VK_IMAGE_VIEW_TYPE_2D, // VkImageViewType viewType;
596 m_depthFormat, // VkFormat format;
597 ComponentMappingRGBA, // VkComponentMapping components;
598 {VK_IMAGE_ASPECT_DEPTH_BIT, 0u, 1u, 0u, 1u}, // VkImageSubresourceRange subresourceRange;
599 };
600
601 m_depthAttachmentView = createImageView(vk, vkDevice, &depthAttachmentViewParams);
602 }
603
604 // Create framebuffer
605 {
606 std::vector<VkImage> images = {
607 *m_colorImage[PIPELINE_CACHE_NDX_NO_CACHE],
608 *m_depthImage,
609 };
610 VkImageView attachmentBindInfos[2] = {
611 *m_colorAttachmentView[PIPELINE_CACHE_NDX_NO_CACHE],
612 *m_depthAttachmentView,
613 };
614
615 VkFramebufferCreateInfo framebufferParams = {
616 VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, // VkStructureType sType;
617 DE_NULL, // const void* pNext;
618 0u, // VkFramebufferCreateFlags flags;
619 *m_renderPassFramebuffer[PIPELINE_CACHE_NDX_CACHED], // VkRenderPass renderPass;
620 2u, // uint32_t attachmentCount;
621 attachmentBindInfos, // const VkImageView* pAttachments;
622 (uint32_t)m_renderSize.x(), // uint32_t width;
623 (uint32_t)m_renderSize.y(), // uint32_t height;
624 1u, // uint32_t layers;
625 };
626
627 m_renderPassFramebuffer[PIPELINE_CACHE_NDX_NO_CACHE].createFramebuffer(vk, vkDevice, &framebufferParams,
628 images);
629
630 framebufferParams.renderPass = *m_renderPassFramebuffer[PIPELINE_CACHE_NDX_CACHED];
631 images[0] = *m_colorImage[PIPELINE_CACHE_NDX_CACHED];
632 attachmentBindInfos[0] = *m_colorAttachmentView[PIPELINE_CACHE_NDX_CACHED];
633 m_renderPassFramebuffer[PIPELINE_CACHE_NDX_CACHED].createFramebuffer(vk, vkDevice, &framebufferParams, images);
634 }
635
636 // Create pipeline layout
637 {
638 const VkPipelineLayoutCreateInfo pipelineLayoutParams = {
639 VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, // VkStructureType sType;
640 DE_NULL, // const void* pNext;
641 0u, // VkPipelineLayoutCreateFlags flags;
642 0u, // uint32_t setLayoutCount;
643 DE_NULL, // const VkDescriptorSetLayout* pSetLayouts;
644 0u, // uint32_t pushConstantRangeCount;
645 DE_NULL // const VkPushConstantRange* pPushConstantRanges;
646 };
647
648 m_pipelineLayout =
649 PipelineLayoutWrapper(m_param->getPipelineConstructionType(), vk, vkDevice, &pipelineLayoutParams);
650 }
651 }
652
~GraphicsCacheTestInstance(void)653 GraphicsCacheTestInstance::~GraphicsCacheTestInstance(void)
654 {
655 }
656
preparePipelineWrapper(GraphicsPipelineWrapper & gpw,VkPipelineCache cache,bool useMissShaders=false)657 void GraphicsCacheTestInstance::preparePipelineWrapper(GraphicsPipelineWrapper &gpw, VkPipelineCache cache,
658 bool useMissShaders = false)
659 {
660 static const VkPipelineDepthStencilStateCreateInfo defaultDepthStencilState{
661 VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO, // VkStructureType sType;
662 DE_NULL, // const void* pNext;
663 0u, // VkPipelineDepthStencilStateCreateFlags flags;
664 VK_TRUE, // VkBool32 depthTestEnable;
665 VK_TRUE, // VkBool32 depthWriteEnable;
666 VK_COMPARE_OP_LESS_OR_EQUAL, // VkCompareOp depthCompareOp;
667 VK_FALSE, // VkBool32 depthBoundsTestEnable;
668 VK_FALSE, // VkBool32 stencilTestEnable;
669 {
670 // VkStencilOpState front;
671 VK_STENCIL_OP_KEEP, // VkStencilOp failOp;
672 VK_STENCIL_OP_KEEP, // VkStencilOp passOp;
673 VK_STENCIL_OP_KEEP, // VkStencilOp depthFailOp;
674 VK_COMPARE_OP_NEVER, // VkCompareOp compareOp;
675 0u, // uint32_t compareMask;
676 0u, // uint32_t writeMask;
677 0u, // uint32_t reference;
678 },
679 {
680 // VkStencilOpState back;
681 VK_STENCIL_OP_KEEP, // VkStencilOp failOp;
682 VK_STENCIL_OP_KEEP, // VkStencilOp passOp;
683 VK_STENCIL_OP_KEEP, // VkStencilOp depthFailOp;
684 VK_COMPARE_OP_NEVER, // VkCompareOp compareOp;
685 0u, // uint32_t compareMask;
686 0u, // uint32_t writeMask;
687 0u, // uint32_t reference;
688 },
689 0.0f, // float minDepthBounds;
690 1.0f, // float maxDepthBounds;
691 };
692
693 static const VkVertexInputBindingDescription defaultVertexInputBindingDescription{
694 0u, // uint32_t binding;
695 sizeof(Vertex4RGBA), // uint32_t strideInBytes;
696 VK_VERTEX_INPUT_RATE_VERTEX, // VkVertexInputRate inputRate;
697 };
698
699 static const VkVertexInputAttributeDescription defaultVertexInputAttributeDescriptions[]{
700 {
701 0u, // uint32_t location;
702 0u, // uint32_t binding;
703 VK_FORMAT_R32G32B32A32_SFLOAT, // VkFormat format;
704 0u // uint32_t offsetInBytes;
705 },
706 {
707 1u, // uint32_t location;
708 0u, // uint32_t binding;
709 VK_FORMAT_R32G32B32A32_SFLOAT, // VkFormat format;
710 offsetof(Vertex4RGBA, color), // uint32_t offsetInBytes;
711 }};
712
713 static const VkPipelineVertexInputStateCreateInfo defaultVertexInputStateParams{
714 VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // VkStructureType sType;
715 DE_NULL, // const void* pNext;
716 0u, // VkPipelineVertexInputStateCreateFlags flags;
717 1u, // uint32_t vertexBindingDescriptionCount;
718 &defaultVertexInputBindingDescription, // const VkVertexInputBindingDescription* pVertexBindingDescriptions;
719 2u, // uint32_t vertexAttributeDescriptionCount;
720 defaultVertexInputAttributeDescriptions, // const VkVertexInputAttributeDescription* pVertexAttributeDescriptions;
721 };
722
723 const DeviceInterface &vk = m_context.getDeviceInterface();
724 const VkDevice vkDevice = m_context.getDevice();
725 const std::string postfix = useMissShaders ? "_miss" : "";
726
727 auto createModule = [&vk, vkDevice, &postfix](Context &context, std::string shaderName)
728 { return ShaderWrapper(vk, vkDevice, context.getBinaryCollection().get(shaderName + postfix), 0); };
729
730 // Bind shader stages
731 ShaderWrapper vertShaderModule = createModule(m_context, "color_vert");
732 ShaderWrapper fragShaderModule = createModule(m_context, "color_frag");
733 ShaderWrapper tescShaderModule;
734 ShaderWrapper teseShaderModule;
735 ShaderWrapper geomShaderModule;
736
737 if (m_param->getShaderFlags() & VK_SHADER_STAGE_GEOMETRY_BIT)
738 geomShaderModule = createModule(m_context, "unused_geo");
739 if (m_param->getShaderFlags() & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT)
740 tescShaderModule = createModule(m_context, "basic_tcs");
741 if (m_param->getShaderFlags() & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)
742 teseShaderModule = createModule(m_context, "basic_tes");
743
744 const std::vector<VkViewport> viewport{makeViewport(m_renderSize)};
745 const std::vector<VkRect2D> scissor{makeRect2D(m_renderSize)};
746
747 gpw.setDefaultTopology((m_param->getShaderFlags() & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) ?
748 VK_PRIMITIVE_TOPOLOGY_PATCH_LIST :
749 VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST)
750 .setDefaultRasterizationState()
751 .setDefaultColorBlendState()
752 .setDefaultMultisampleState()
753 .setupVertexInputState(&defaultVertexInputStateParams)
754 .setupPreRasterizationShaderState(viewport, scissor, m_pipelineLayout, *m_renderPassFramebuffer[0], 0u,
755 vertShaderModule, DE_NULL, tescShaderModule, teseShaderModule,
756 geomShaderModule)
757 .setupFragmentShaderState(m_pipelineLayout, *m_renderPassFramebuffer[0], 0u, fragShaderModule,
758 &defaultDepthStencilState)
759 .setupFragmentOutputState(*m_renderPassFramebuffer[0])
760 .setMonolithicPipelineLayout(m_pipelineLayout)
761 .buildPipeline(cache);
762 }
763
preparePipelines(void)764 void GraphicsCacheTestInstance::preparePipelines(void)
765 {
766 preparePipelineWrapper(m_pipeline[PIPELINE_CACHE_NDX_NO_CACHE], *m_cache);
767 preparePipelineWrapper(m_pipeline[PIPELINE_CACHE_NDX_CACHED], *m_cache);
768 }
769
prepareRenderPass(const RenderPassWrapper & renderPassFramebuffer,GraphicsPipelineWrapper & pipeline)770 void GraphicsCacheTestInstance::prepareRenderPass(const RenderPassWrapper &renderPassFramebuffer,
771 GraphicsPipelineWrapper &pipeline)
772 {
773 const DeviceInterface &vk = m_context.getDeviceInterface();
774
775 const VkClearValue attachmentClearValues[2] = {
776 defaultClearValue(m_colorFormat),
777 defaultClearValue(m_depthFormat),
778 };
779
780 renderPassFramebuffer.begin(vk, *m_cmdBuffer, makeRect2D(0, 0, m_renderSize.x(), m_renderSize.y()), 2u,
781 attachmentClearValues);
782
783 pipeline.bind(*m_cmdBuffer);
784 VkDeviceSize offsets = 0u;
785 vk.cmdBindVertexBuffers(*m_cmdBuffer, 0u, 1u, &m_vertexBuffer.get(), &offsets);
786 vk.cmdDraw(*m_cmdBuffer, (uint32_t)m_vertices.size(), 1u, 0u, 0u);
787
788 renderPassFramebuffer.end(vk, *m_cmdBuffer);
789 }
790
prepareCommandBuffer(void)791 void GraphicsCacheTestInstance::prepareCommandBuffer(void)
792 {
793 const DeviceInterface &vk = m_context.getDeviceInterface();
794
795 preparePipelines();
796
797 beginCommandBuffer(vk, *m_cmdBuffer, 0u);
798
799 vk.cmdPipelineBarrier(*m_cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
800 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
801 (VkDependencyFlags)0, 0u, DE_NULL, 0u, DE_NULL, DE_LENGTH_OF_ARRAY(m_imageLayoutBarriers),
802 m_imageLayoutBarriers);
803
804 prepareRenderPass(m_renderPassFramebuffer[PIPELINE_CACHE_NDX_NO_CACHE], m_pipeline[PIPELINE_CACHE_NDX_NO_CACHE]);
805
806 // After the first render pass, the images are in correct layouts
807
808 prepareRenderPass(m_renderPassFramebuffer[PIPELINE_CACHE_NDX_CACHED], m_pipeline[PIPELINE_CACHE_NDX_CACHED]);
809
810 endCommandBuffer(vk, *m_cmdBuffer);
811 }
812
verifyTestResult(void)813 tcu::TestStatus GraphicsCacheTestInstance::verifyTestResult(void)
814 {
815 const DeviceInterface &vk = m_context.getDeviceInterface();
816 const VkDevice vkDevice = m_context.getDevice();
817 const uint32_t queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
818
819 const VkQueue queue = m_context.getUniversalQueue();
820 de::MovePtr<tcu::TextureLevel> resultNoCache =
821 readColorAttachment(vk, vkDevice, queue, queueFamilyIndex, m_context.getDefaultAllocator(),
822 *m_colorImage[PIPELINE_CACHE_NDX_NO_CACHE], m_colorFormat, m_renderSize);
823 de::MovePtr<tcu::TextureLevel> resultCache =
824 readColorAttachment(vk, vkDevice, queue, queueFamilyIndex, m_context.getDefaultAllocator(),
825 *m_colorImage[PIPELINE_CACHE_NDX_CACHED], m_colorFormat, m_renderSize);
826
827 bool compareOk = tcu::intThresholdCompare(m_context.getTestContext().getLog(), "IntImageCompare",
828 "Image comparison", resultNoCache->getAccess(), resultCache->getAccess(),
829 tcu::UVec4(1, 1, 1, 1), tcu::COMPARE_LOG_RESULT);
830
831 if (compareOk)
832 return tcu::TestStatus::pass("Render images w/o cached pipeline match.");
833 else
834 return tcu::TestStatus::fail("Render Images mismatch.");
835 }
836
837 class ComputeCacheTest : public CacheTest
838 {
839 public:
ComputeCacheTest(tcu::TestContext & testContext,const std::string & name,const CacheTestParam * param)840 ComputeCacheTest(tcu::TestContext &testContext, const std::string &name, const CacheTestParam *param)
841 : CacheTest(testContext, name, param)
842 {
843 }
~ComputeCacheTest(void)844 virtual ~ComputeCacheTest(void)
845 {
846 }
847 virtual void initPrograms(SourceCollections &programCollection) const;
848 virtual TestInstance *createInstance(Context &context) const;
849 };
850
851 class ComputeCacheTestInstance : public CacheTestInstance
852 {
853 public:
854 ComputeCacheTestInstance(Context &context, const CacheTestParam *param);
855 virtual ~ComputeCacheTestInstance(void);
856 virtual void prepareCommandBuffer(void);
857
858 protected:
859 virtual tcu::TestStatus verifyTestResult(void);
860 void buildBuffers(void);
861 void buildDescriptorSets(uint32_t ndx);
862 void buildShader(void);
863 void buildPipeline(uint32_t ndx);
864
865 protected:
866 Move<VkBuffer> m_inputBuf;
867 de::MovePtr<Allocation> m_inputBufferAlloc;
868 Move<VkShaderModule> m_computeShaderModule;
869
870 Move<VkBuffer> m_outputBuf[PIPELINE_CACHE_NDX_COUNT];
871 de::MovePtr<Allocation> m_outputBufferAlloc[PIPELINE_CACHE_NDX_COUNT];
872
873 Move<VkDescriptorPool> m_descriptorPool[PIPELINE_CACHE_NDX_COUNT];
874 Move<VkDescriptorSetLayout> m_descriptorSetLayout[PIPELINE_CACHE_NDX_COUNT];
875 Move<VkDescriptorSet> m_descriptorSet[PIPELINE_CACHE_NDX_COUNT];
876
877 Move<VkPipelineLayout> m_pipelineLayout[PIPELINE_CACHE_NDX_COUNT];
878 Move<VkPipeline> m_pipeline[PIPELINE_CACHE_NDX_COUNT];
879 };
880
initPrograms(SourceCollections & programCollection) const881 void ComputeCacheTest::initPrograms(SourceCollections &programCollection) const
882 {
883 programCollection.glslSources.add("basic_compute") << glu::ComputeSource(
884 "#version 310 es\n"
885 "layout(local_size_x = 1) in;\n"
886 "layout(std430) buffer;\n"
887 "layout(binding = 0) readonly buffer Input0\n"
888 "{\n"
889 " vec4 elements[];\n"
890 "} input_data0;\n"
891 "layout(binding = 1) writeonly buffer Output\n"
892 "{\n"
893 " vec4 elements[];\n"
894 "} output_data;\n"
895 "void main()\n"
896 "{\n"
897 " uint ident = gl_GlobalInvocationID.x;\n"
898 " output_data.elements[ident] = input_data0.elements[ident] * input_data0.elements[ident];\n"
899 "}");
900 }
901
createInstance(Context & context) const902 TestInstance *ComputeCacheTest::createInstance(Context &context) const
903 {
904 return new ComputeCacheTestInstance(context, &m_param);
905 }
906
buildBuffers(void)907 void ComputeCacheTestInstance::buildBuffers(void)
908 {
909 const DeviceInterface &vk = m_context.getDeviceInterface();
910 const VkDevice vkDevice = m_context.getDevice();
911
912 // Create buffer object, allocate storage, and generate input data
913 const VkDeviceSize size = sizeof(tcu::Vec4) * 128u;
914 m_inputBuf = createBufferAndBindMemory(m_context, size, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, &m_inputBufferAlloc);
915
916 // Initialize input buffer
917 tcu::Vec4 *pVec = reinterpret_cast<tcu::Vec4 *>(m_inputBufferAlloc->getHostPtr());
918 for (uint32_t ndx = 0u; ndx < 128u; ndx++)
919 {
920 for (uint32_t component = 0u; component < 4u; component++)
921 pVec[ndx][component] = (float)(ndx * (component + 1u));
922 }
923 flushAlloc(vk, vkDevice, *m_inputBufferAlloc);
924
925 // Clear the output buffer
926 for (uint32_t ndx = 0; ndx < PIPELINE_CACHE_NDX_COUNT; ndx++)
927 {
928 m_outputBuf[ndx] =
929 createBufferAndBindMemory(m_context, size, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, &m_outputBufferAlloc[ndx]);
930
931 pVec = reinterpret_cast<tcu::Vec4 *>(m_outputBufferAlloc[ndx]->getHostPtr());
932
933 for (uint32_t i = 0; i < (size / sizeof(tcu::Vec4)); i++)
934 pVec[i] = tcu::Vec4(0.0f);
935
936 flushAlloc(vk, vkDevice, *m_outputBufferAlloc[ndx]);
937 }
938 }
939
buildDescriptorSets(uint32_t ndx)940 void ComputeCacheTestInstance::buildDescriptorSets(uint32_t ndx)
941 {
942 const DeviceInterface &vk = m_context.getDeviceInterface();
943 const VkDevice vkDevice = m_context.getDevice();
944
945 // Create descriptor set layout
946 DescriptorSetLayoutBuilder descLayoutBuilder;
947
948 for (uint32_t bindingNdx = 0u; bindingNdx < 2u; bindingNdx++)
949 descLayoutBuilder.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT);
950
951 m_descriptorSetLayout[ndx] = descLayoutBuilder.build(vk, vkDevice);
952
953 std::vector<VkDescriptorBufferInfo> descriptorInfos;
954 descriptorInfos.push_back(makeDescriptorBufferInfo(*m_inputBuf, 0u, sizeof(tcu::Vec4) * 128u));
955 descriptorInfos.push_back(makeDescriptorBufferInfo(*m_outputBuf[ndx], 0u, sizeof(tcu::Vec4) * 128u));
956
957 // Create descriptor pool
958 m_descriptorPool[ndx] = DescriptorPoolBuilder()
959 .addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 2u)
960 .build(vk, vkDevice, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
961
962 // Create descriptor set
963 const VkDescriptorSetAllocateInfo descriptorSetAllocInfo = {
964 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO, // VkStructureType sType;
965 DE_NULL, // const void* pNext;
966 *m_descriptorPool[ndx], // VkDescriptorPool descriptorPool;
967 1u, // uint32_t setLayoutCount;
968 &m_descriptorSetLayout[ndx].get(), // const VkDescriptorSetLayout* pSetLayouts;
969 };
970 m_descriptorSet[ndx] = allocateDescriptorSet(vk, vkDevice, &descriptorSetAllocInfo);
971
972 DescriptorSetUpdateBuilder builder;
973 for (uint32_t descriptorNdx = 0u; descriptorNdx < 2u; descriptorNdx++)
974 {
975 builder.writeSingle(*m_descriptorSet[ndx], DescriptorSetUpdateBuilder::Location::binding(descriptorNdx),
976 VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &descriptorInfos[descriptorNdx]);
977 }
978 builder.update(vk, vkDevice);
979 }
980
buildShader(void)981 void ComputeCacheTestInstance::buildShader(void)
982 {
983 const DeviceInterface &vk = m_context.getDeviceInterface();
984 const VkDevice vkDevice = m_context.getDevice();
985
986 // Create compute shader
987 VkShaderModuleCreateInfo shaderModuleCreateInfo = {
988 VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO, // VkStructureType sType;
989 DE_NULL, // const void* pNext;
990 0u, // VkShaderModuleCreateFlags flags;
991 m_context.getBinaryCollection().get("basic_compute").getSize(), // uintptr_t codeSize;
992 (uint32_t *)m_context.getBinaryCollection()
993 .get("basic_compute")
994 .getBinary(), // const uint32_t* pCode;
995 };
996 m_computeShaderModule = createShaderModule(vk, vkDevice, &shaderModuleCreateInfo);
997 }
998
buildPipeline(uint32_t ndx)999 void ComputeCacheTestInstance::buildPipeline(uint32_t ndx)
1000 {
1001 const DeviceInterface &vk = m_context.getDeviceInterface();
1002 const VkDevice vkDevice = m_context.getDevice();
1003
1004 // Create compute pipeline layout
1005 const VkPipelineLayoutCreateInfo pipelineLayoutCreateInfo = {
1006 VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, // VkStructureType sType;
1007 DE_NULL, // const void* pNext;
1008 0u, // VkPipelineLayoutCreateFlags flags;
1009 1u, // uint32_t setLayoutCount;
1010 &m_descriptorSetLayout[ndx].get(), // const VkDescriptorSetLayout* pSetLayouts;
1011 0u, // uint32_t pushConstantRangeCount;
1012 DE_NULL, // const VkPushConstantRange* pPushConstantRanges;
1013 };
1014
1015 m_pipelineLayout[ndx] = createPipelineLayout(vk, vkDevice, &pipelineLayoutCreateInfo);
1016
1017 const VkPipelineShaderStageCreateInfo stageCreateInfo = {
1018 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType;
1019 DE_NULL, // const void* pNext;
1020 0u, // VkPipelineShaderStageCreateFlags flags;
1021 VK_SHADER_STAGE_COMPUTE_BIT, // VkShaderStageFlagBits stage;
1022 *m_computeShaderModule, // VkShaderModule module;
1023 "main", // const char* pName;
1024 DE_NULL, // const VkSpecializationInfo* pSpecializationInfo;
1025 };
1026
1027 const VkComputePipelineCreateInfo pipelineCreateInfo = {
1028 VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, // VkStructureType sType;
1029 DE_NULL, // const void* pNext;
1030 0u, // VkPipelineCreateFlags flags;
1031 stageCreateInfo, // VkPipelineShaderStageCreateInfo stage;
1032 *m_pipelineLayout[ndx], // VkPipelineLayout layout;
1033 (VkPipeline)0, // VkPipeline basePipelineHandle;
1034 0u, // int32_t basePipelineIndex;
1035 };
1036
1037 m_pipeline[ndx] = createComputePipeline(vk, vkDevice, *m_cache, &pipelineCreateInfo);
1038 }
1039
ComputeCacheTestInstance(Context & context,const CacheTestParam * param)1040 ComputeCacheTestInstance::ComputeCacheTestInstance(Context &context, const CacheTestParam *param)
1041 : CacheTestInstance(context, param)
1042 {
1043 buildBuffers();
1044
1045 buildDescriptorSets(PIPELINE_CACHE_NDX_NO_CACHE);
1046
1047 buildDescriptorSets(PIPELINE_CACHE_NDX_CACHED);
1048
1049 buildShader();
1050
1051 buildPipeline(PIPELINE_CACHE_NDX_NO_CACHE);
1052
1053 buildPipeline(PIPELINE_CACHE_NDX_CACHED);
1054 }
1055
~ComputeCacheTestInstance(void)1056 ComputeCacheTestInstance::~ComputeCacheTestInstance(void)
1057 {
1058 }
1059
prepareCommandBuffer(void)1060 void ComputeCacheTestInstance::prepareCommandBuffer(void)
1061 {
1062 const DeviceInterface &vk = m_context.getDeviceInterface();
1063
1064 beginCommandBuffer(vk, *m_cmdBuffer, 0u);
1065
1066 for (uint32_t ndx = 0; ndx < PIPELINE_CACHE_NDX_COUNT; ndx++)
1067 {
1068 vk.cmdBindPipeline(*m_cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *m_pipeline[ndx]);
1069 vk.cmdBindDescriptorSets(*m_cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *m_pipelineLayout[ndx], 0u, 1u,
1070 &m_descriptorSet[ndx].get(), 0u, DE_NULL);
1071 vk.cmdDispatch(*m_cmdBuffer, 128u, 1u, 1u);
1072 }
1073
1074 endCommandBuffer(vk, *m_cmdBuffer);
1075 }
1076
verifyTestResult(void)1077 tcu::TestStatus ComputeCacheTestInstance::verifyTestResult(void)
1078 {
1079 const DeviceInterface &vk = m_context.getDeviceInterface();
1080 const VkDevice vkDevice = m_context.getDevice();
1081
1082 // Read the content of output buffers
1083 invalidateAlloc(vk, vkDevice, *m_outputBufferAlloc[PIPELINE_CACHE_NDX_NO_CACHE]);
1084
1085 invalidateAlloc(vk, vkDevice, *m_outputBufferAlloc[PIPELINE_CACHE_NDX_CACHED]);
1086 // Compare the content
1087 uint8_t *bufNoCache = reinterpret_cast<uint8_t *>(m_outputBufferAlloc[PIPELINE_CACHE_NDX_NO_CACHE]->getHostPtr());
1088 uint8_t *bufCached = reinterpret_cast<uint8_t *>(m_outputBufferAlloc[PIPELINE_CACHE_NDX_CACHED]->getHostPtr());
1089 for (uint32_t ndx = 0u; ndx < sizeof(tcu::Vec4) * 128u; ndx++)
1090 {
1091 if (bufNoCache[ndx] != bufCached[ndx])
1092 {
1093 return tcu::TestStatus::fail("Output buffers w/o cached pipeline mismatch.");
1094 }
1095 }
1096
1097 return tcu::TestStatus::pass("Output buffers w/o cached pipeline match.");
1098 }
1099
1100 class PipelineFromCacheTest : public GraphicsCacheTest
1101 {
1102 public:
1103 PipelineFromCacheTest(tcu::TestContext &testContext, const std::string &name, const CacheTestParam *param);
~PipelineFromCacheTest(void)1104 virtual ~PipelineFromCacheTest(void)
1105 {
1106 }
1107 virtual TestInstance *createInstance(Context &context) const;
1108 };
1109
PipelineFromCacheTest(tcu::TestContext & testContext,const std::string & name,const CacheTestParam * param)1110 PipelineFromCacheTest::PipelineFromCacheTest(tcu::TestContext &testContext, const std::string &name,
1111 const CacheTestParam *param)
1112 : GraphicsCacheTest(testContext, name, param)
1113 {
1114 }
1115
1116 class PipelineFromCacheTestInstance : public GraphicsCacheTestInstance
1117 {
1118 public:
1119 PipelineFromCacheTestInstance(Context &context, const CacheTestParam *param);
1120 virtual ~PipelineFromCacheTestInstance(void);
1121
1122 protected:
1123 void preparePipelines(void);
1124
1125 protected:
1126 Move<VkPipelineCache> m_newCache;
1127 uint8_t *m_data;
1128 };
1129
createInstance(Context & context) const1130 TestInstance *PipelineFromCacheTest::createInstance(Context &context) const
1131 {
1132 return new PipelineFromCacheTestInstance(context, &m_param);
1133 }
1134
PipelineFromCacheTestInstance(Context & context,const CacheTestParam * param)1135 PipelineFromCacheTestInstance::PipelineFromCacheTestInstance(Context &context, const CacheTestParam *param)
1136 : GraphicsCacheTestInstance(context, param)
1137 , m_data(DE_NULL)
1138 {
1139 const DeviceInterface &vk = m_context.getDeviceInterface();
1140 const VkDevice vkDevice = m_context.getDevice();
1141
1142 // Create more pipeline caches
1143 {
1144 size_t dataSize = 0u;
1145
1146 VK_CHECK(vk.getPipelineCacheData(vkDevice, *m_cache, (uintptr_t *)&dataSize, DE_NULL));
1147
1148 m_data = new uint8_t[dataSize];
1149 DE_ASSERT(m_data);
1150 VK_CHECK(vk.getPipelineCacheData(vkDevice, *m_cache, (uintptr_t *)&dataSize, (void *)m_data));
1151
1152 const VkPipelineCacheCreateInfo pipelineCacheCreateInfo = {
1153 VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO, // VkStructureType sType;
1154 DE_NULL, // const void* pNext;
1155 0u, // VkPipelineCacheCreateFlags flags;
1156 dataSize, // uintptr_t initialDataSize;
1157 m_data, // const void* pInitialData;
1158 };
1159 m_newCache = createPipelineCache(vk, vkDevice, &pipelineCacheCreateInfo);
1160 }
1161 }
1162
~PipelineFromCacheTestInstance(void)1163 PipelineFromCacheTestInstance::~PipelineFromCacheTestInstance(void)
1164 {
1165 delete[] m_data;
1166 }
1167
preparePipelines(void)1168 void PipelineFromCacheTestInstance::preparePipelines(void)
1169 {
1170 preparePipelineWrapper(m_pipeline[PIPELINE_CACHE_NDX_NO_CACHE], *m_cache);
1171 preparePipelineWrapper(m_pipeline[PIPELINE_CACHE_NDX_CACHED], *m_newCache);
1172 }
1173
1174 class PipelineFromIncompleteCacheTest : public GraphicsCacheTest
1175 {
1176 public:
1177 PipelineFromIncompleteCacheTest(tcu::TestContext &testContext, const std::string &name,
1178 const CacheTestParam *param);
~PipelineFromIncompleteCacheTest(void)1179 virtual ~PipelineFromIncompleteCacheTest(void)
1180 {
1181 }
1182 virtual TestInstance *createInstance(Context &context) const;
1183 };
1184
PipelineFromIncompleteCacheTest(tcu::TestContext & testContext,const std::string & name,const CacheTestParam * param)1185 PipelineFromIncompleteCacheTest::PipelineFromIncompleteCacheTest(tcu::TestContext &testContext, const std::string &name,
1186 const CacheTestParam *param)
1187 : GraphicsCacheTest(testContext, name, param)
1188 {
1189 }
1190
1191 class PipelineFromIncompleteCacheTestInstance : public GraphicsCacheTestInstance
1192 {
1193 public:
1194 PipelineFromIncompleteCacheTestInstance(Context &context, const CacheTestParam *param);
1195 virtual ~PipelineFromIncompleteCacheTestInstance(void);
1196
1197 protected:
1198 void preparePipelines(void);
1199
1200 protected:
1201 Move<VkPipelineCache> m_newCache;
1202 uint8_t *m_data;
1203 };
1204
createInstance(Context & context) const1205 TestInstance *PipelineFromIncompleteCacheTest::createInstance(Context &context) const
1206 {
1207 return new PipelineFromIncompleteCacheTestInstance(context, &m_param);
1208 }
1209
PipelineFromIncompleteCacheTestInstance(Context & context,const CacheTestParam * param)1210 PipelineFromIncompleteCacheTestInstance::PipelineFromIncompleteCacheTestInstance(Context &context,
1211 const CacheTestParam *param)
1212 : GraphicsCacheTestInstance(context, param)
1213 , m_data(DE_NULL)
1214 {
1215 const DeviceInterface &vk = m_context.getDeviceInterface();
1216 const VkDevice vkDevice = m_context.getDevice();
1217
1218 // Create more pipeline caches
1219 {
1220 size_t dataSize = 0u;
1221 VK_CHECK(vk.getPipelineCacheData(vkDevice, *m_cache, (uintptr_t *)&dataSize, DE_NULL));
1222
1223 if (dataSize == 0)
1224 TCU_THROW(NotSupportedError, "Empty pipeline cache - unable to test");
1225
1226 dataSize--;
1227
1228 m_data = new uint8_t[dataSize];
1229 DE_ASSERT(m_data);
1230 if (vk.getPipelineCacheData(vkDevice, *m_cache, (uintptr_t *)&dataSize, (void *)m_data) != VK_INCOMPLETE)
1231 TCU_THROW(TestError, "GetPipelineCacheData should return VK_INCOMPLETE state!");
1232
1233 const VkPipelineCacheCreateInfo pipelineCacheCreateInfo = {
1234 VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO, // VkStructureType sType;
1235 DE_NULL, // const void* pNext;
1236 0u, // VkPipelineCacheCreateFlags flags;
1237 dataSize, // uintptr_t initialDataSize;
1238 m_data, // const void* pInitialData;
1239 };
1240 m_newCache = createPipelineCache(vk, vkDevice, &pipelineCacheCreateInfo);
1241 }
1242 }
1243
~PipelineFromIncompleteCacheTestInstance(void)1244 PipelineFromIncompleteCacheTestInstance::~PipelineFromIncompleteCacheTestInstance(void)
1245 {
1246 delete[] m_data;
1247 }
1248
preparePipelines(void)1249 void PipelineFromIncompleteCacheTestInstance::preparePipelines(void)
1250 {
1251 preparePipelineWrapper(m_pipeline[PIPELINE_CACHE_NDX_NO_CACHE], *m_cache);
1252 preparePipelineWrapper(m_pipeline[PIPELINE_CACHE_NDX_CACHED], *m_newCache);
1253 }
1254
1255 enum MergeCacheType
1256 {
1257 MERGE_CACHE_EMPTY = 0,
1258 MERGE_CACHE_FROM_DATA,
1259 MERGE_CACHE_HIT,
1260 MERGE_CACHE_MISS,
1261 MERGE_CACHE_MISS_AND_HIT,
1262 MERGE_CACHE_MERGED,
1263
1264 MERGE_CACHE_TYPE_LAST = MERGE_CACHE_MERGED
1265 };
1266
getMergeCacheTypeStr(MergeCacheType type)1267 std::string getMergeCacheTypeStr(MergeCacheType type)
1268 {
1269 switch (type)
1270 {
1271 case MERGE_CACHE_EMPTY:
1272 return "empty";
1273 case MERGE_CACHE_FROM_DATA:
1274 return "from_data";
1275 case MERGE_CACHE_HIT:
1276 return "hit";
1277 case MERGE_CACHE_MISS_AND_HIT:
1278 return "misshit";
1279 case MERGE_CACHE_MISS:
1280 return "miss";
1281 case MERGE_CACHE_MERGED:
1282 return "merged";
1283 }
1284 TCU_FAIL("unhandled merge cache type");
1285 }
1286
getMergeCacheTypesStr(const std::vector<MergeCacheType> & types)1287 std::string getMergeCacheTypesStr(const std::vector<MergeCacheType> &types)
1288 {
1289 std::string ret;
1290 for (size_t idx = 0; idx < types.size(); ++idx)
1291 {
1292 if (ret.size())
1293 ret += '_';
1294 ret += getMergeCacheTypeStr(types[idx]);
1295 }
1296 return ret;
1297 }
1298
1299 class MergeCacheTestParam
1300 {
1301 public:
1302 MergeCacheType destCacheType;
1303 std::vector<MergeCacheType> srcCacheTypes;
1304 };
1305
1306 class MergeCacheTest : public GraphicsCacheTest
1307 {
1308 public:
MergeCacheTest(tcu::TestContext & testContext,const std::string & name,const CacheTestParam * param,const MergeCacheTestParam * mergeCacheParam)1309 MergeCacheTest(tcu::TestContext &testContext, const std::string &name, const CacheTestParam *param,
1310 const MergeCacheTestParam *mergeCacheParam)
1311 : GraphicsCacheTest(testContext, name, param)
1312 , m_mergeCacheParam(*mergeCacheParam)
1313 {
1314 }
~MergeCacheTest(void)1315 virtual ~MergeCacheTest(void)
1316 {
1317 }
1318 virtual TestInstance *createInstance(Context &context) const;
1319
1320 private:
1321 const MergeCacheTestParam m_mergeCacheParam;
1322 };
1323
1324 class MergeCacheTestInstance : public GraphicsCacheTestInstance
1325 {
1326 public:
1327 MergeCacheTestInstance(Context &context, const CacheTestParam *param, const MergeCacheTestParam *mergeCacheParam);
1328
1329 private:
1330 Move<VkPipelineCache> createPipelineCache(const InstanceInterface &vki, const DeviceInterface &vk,
1331 VkPhysicalDevice physicalDevice, VkDevice device, MergeCacheType type);
1332
1333 protected:
1334 void preparePipelines(void);
1335
1336 protected:
1337 Move<VkPipelineCache> m_cacheMerged;
1338 };
1339
createInstance(Context & context) const1340 TestInstance *MergeCacheTest::createInstance(Context &context) const
1341 {
1342 return new MergeCacheTestInstance(context, &m_param, &m_mergeCacheParam);
1343 }
1344
MergeCacheTestInstance(Context & context,const CacheTestParam * param,const MergeCacheTestParam * mergeCacheParam)1345 MergeCacheTestInstance::MergeCacheTestInstance(Context &context, const CacheTestParam *param,
1346 const MergeCacheTestParam *mergeCacheParam)
1347 : GraphicsCacheTestInstance(context, param)
1348 {
1349 const InstanceInterface &vki = context.getInstanceInterface();
1350 const DeviceInterface &vk = m_context.getDeviceInterface();
1351 const VkPhysicalDevice physicalDevice = context.getPhysicalDevice();
1352 const VkDevice vkDevice = m_context.getDevice();
1353
1354 // Create a merge destination cache
1355 m_cacheMerged = createPipelineCache(vki, vk, physicalDevice, vkDevice, mergeCacheParam->destCacheType);
1356
1357 // Create more pipeline caches
1358 std::vector<VkPipelineCache> sourceCaches(mergeCacheParam->srcCacheTypes.size());
1359 typedef de::SharedPtr<Move<VkPipelineCache>> PipelineCachePtr;
1360 std::vector<PipelineCachePtr> sourceCachePtrs(sourceCaches.size());
1361 {
1362 for (size_t sourceIdx = 0; sourceIdx < mergeCacheParam->srcCacheTypes.size(); sourceIdx++)
1363 {
1364 // vk::Move is not copyable, so create it on heap and wrap into de::SharedPtr
1365 PipelineCachePtr pipelineCachePtr(new Move<VkPipelineCache>());
1366 *pipelineCachePtr =
1367 createPipelineCache(vki, vk, physicalDevice, vkDevice, mergeCacheParam->srcCacheTypes[sourceIdx]);
1368
1369 sourceCachePtrs[sourceIdx] = pipelineCachePtr;
1370 sourceCaches[sourceIdx] = **pipelineCachePtr;
1371 }
1372 }
1373
1374 // Merge the caches
1375 VK_CHECK(
1376 vk.mergePipelineCaches(vkDevice, *m_cacheMerged, static_cast<uint32_t>(sourceCaches.size()), &sourceCaches[0]));
1377 }
1378
createPipelineCache(const InstanceInterface & vki,const DeviceInterface & vk,VkPhysicalDevice physicalDevice,VkDevice device,MergeCacheType type)1379 Move<VkPipelineCache> MergeCacheTestInstance::createPipelineCache(const InstanceInterface &vki,
1380 const DeviceInterface &vk,
1381 VkPhysicalDevice physicalDevice, VkDevice device,
1382 MergeCacheType type)
1383 {
1384 VkPipelineCacheCreateInfo pipelineCacheCreateInfo = {
1385 VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO, // VkStructureType sType;
1386 DE_NULL, // const void* pNext;
1387 0u, // VkPipelineCacheCreateFlags flags;
1388 0u, // uintptr_t initialDataSize;
1389 DE_NULL, // const void* pInitialData;
1390 };
1391
1392 GraphicsPipelineWrapper localPipeline(vki, vk, physicalDevice, device, m_context.getDeviceExtensions(),
1393 m_param->getPipelineConstructionType());
1394 GraphicsPipelineWrapper localMissPipeline(vki, vk, physicalDevice, device, m_context.getDeviceExtensions(),
1395 m_param->getPipelineConstructionType());
1396
1397 switch (type)
1398 {
1399 case MERGE_CACHE_EMPTY:
1400 {
1401 return vk::createPipelineCache(vk, device, &pipelineCacheCreateInfo);
1402 }
1403 case MERGE_CACHE_FROM_DATA:
1404 {
1405 // Create a cache with init data from m_cache
1406 size_t dataSize = 0u;
1407 VK_CHECK(vk.getPipelineCacheData(device, *m_cache, (uintptr_t *)&dataSize, DE_NULL));
1408
1409 std::vector<uint8_t> data(dataSize);
1410 VK_CHECK(vk.getPipelineCacheData(device, *m_cache, (uintptr_t *)&dataSize, &data[0]));
1411
1412 pipelineCacheCreateInfo.initialDataSize = data.size();
1413 pipelineCacheCreateInfo.pInitialData = &data[0];
1414 return vk::createPipelineCache(vk, device, &pipelineCacheCreateInfo);
1415 }
1416 case MERGE_CACHE_HIT:
1417 {
1418 Move<VkPipelineCache> ret = createPipelineCache(vki, vk, physicalDevice, device, MERGE_CACHE_EMPTY);
1419
1420 preparePipelineWrapper(localPipeline, *ret);
1421
1422 return ret;
1423 }
1424 case MERGE_CACHE_MISS:
1425 {
1426 Move<VkPipelineCache> ret = createPipelineCache(vki, vk, physicalDevice, device, MERGE_CACHE_EMPTY);
1427
1428 preparePipelineWrapper(localMissPipeline, *ret, true);
1429
1430 return ret;
1431 }
1432 case MERGE_CACHE_MISS_AND_HIT:
1433 {
1434 Move<VkPipelineCache> ret = createPipelineCache(vki, vk, physicalDevice, device, MERGE_CACHE_EMPTY);
1435
1436 preparePipelineWrapper(localPipeline, *ret);
1437 preparePipelineWrapper(localMissPipeline, *ret, true);
1438
1439 return ret;
1440 }
1441 case MERGE_CACHE_MERGED:
1442 {
1443 Move<VkPipelineCache> cache1 = createPipelineCache(vki, vk, physicalDevice, device, MERGE_CACHE_FROM_DATA);
1444 Move<VkPipelineCache> cache2 = createPipelineCache(vki, vk, physicalDevice, device, MERGE_CACHE_HIT);
1445 Move<VkPipelineCache> cache3 = createPipelineCache(vki, vk, physicalDevice, device, MERGE_CACHE_MISS);
1446
1447 const VkPipelineCache sourceCaches[] = {*cache1, *cache2, *cache3};
1448
1449 Move<VkPipelineCache> ret = createPipelineCache(vki, vk, physicalDevice, device, MERGE_CACHE_EMPTY);
1450
1451 // Merge the caches
1452 VK_CHECK(vk.mergePipelineCaches(device, *ret, DE_LENGTH_OF_ARRAY(sourceCaches), sourceCaches));
1453
1454 return ret;
1455 }
1456 }
1457 TCU_FAIL("unhandled merge cache type");
1458 }
1459
preparePipelines(void)1460 void MergeCacheTestInstance::preparePipelines(void)
1461 {
1462 preparePipelineWrapper(m_pipeline[PIPELINE_CACHE_NDX_NO_CACHE], *m_cache);
1463
1464 // Create pipeline from merged cache
1465 preparePipelineWrapper(m_pipeline[PIPELINE_CACHE_NDX_CACHED], *m_cacheMerged);
1466 }
1467
1468 class CacheHeaderTest : public GraphicsCacheTest
1469 {
1470 public:
CacheHeaderTest(tcu::TestContext & testContext,const std::string & name,const CacheTestParam * param)1471 CacheHeaderTest(tcu::TestContext &testContext, const std::string &name, const CacheTestParam *param)
1472 : GraphicsCacheTest(testContext, name, param)
1473 {
1474 }
~CacheHeaderTest(void)1475 virtual ~CacheHeaderTest(void)
1476 {
1477 }
1478 virtual TestInstance *createInstance(Context &context) const;
1479 };
1480
1481 class CacheHeaderTestInstance : public GraphicsCacheTestInstance
1482 {
1483 public:
1484 CacheHeaderTestInstance(Context &context, const CacheTestParam *param);
1485 virtual ~CacheHeaderTestInstance(void);
1486
1487 protected:
1488 uint8_t *m_data;
1489
1490 struct CacheHeader
1491 {
1492 uint32_t HeaderLength;
1493 uint32_t HeaderVersion;
1494 uint32_t VendorID;
1495 uint32_t DeviceID;
1496 uint8_t PipelineCacheUUID[VK_UUID_SIZE];
1497 } m_header;
1498 };
1499
createInstance(Context & context) const1500 TestInstance *CacheHeaderTest::createInstance(Context &context) const
1501 {
1502 return new CacheHeaderTestInstance(context, &m_param);
1503 }
1504
CacheHeaderTestInstance(Context & context,const CacheTestParam * param)1505 CacheHeaderTestInstance::CacheHeaderTestInstance(Context &context, const CacheTestParam *param)
1506 : GraphicsCacheTestInstance(context, param)
1507 , m_data(DE_NULL)
1508 {
1509 const DeviceInterface &vk = m_context.getDeviceInterface();
1510 const VkDevice vkDevice = m_context.getDevice();
1511
1512 // Create more pipeline caches
1513 {
1514 // Create a cache with init data from m_cache
1515 size_t dataSize = 0u;
1516 VK_CHECK(vk.getPipelineCacheData(vkDevice, *m_cache, (uintptr_t *)&dataSize, DE_NULL));
1517
1518 if (dataSize < sizeof(m_header))
1519 TCU_THROW(TestError, "Pipeline cache size is smaller than header size");
1520
1521 m_data = new uint8_t[dataSize];
1522 DE_ASSERT(m_data);
1523 VK_CHECK(vk.getPipelineCacheData(vkDevice, *m_cache, (uintptr_t *)&dataSize, (void *)m_data));
1524
1525 deMemcpy(&m_header, m_data, sizeof(m_header));
1526
1527 if (m_header.HeaderLength - VK_UUID_SIZE != 16)
1528 TCU_THROW(TestError, "Invalid header size!");
1529
1530 if (m_header.HeaderVersion != 1)
1531 TCU_THROW(TestError, "Invalid header version!");
1532
1533 if (m_header.VendorID != m_context.getDeviceProperties().vendorID)
1534 TCU_THROW(TestError, "Invalid header vendor ID!");
1535
1536 if (m_header.DeviceID != m_context.getDeviceProperties().deviceID)
1537 TCU_THROW(TestError, "Invalid header device ID!");
1538
1539 if (deMemCmp(&m_header.PipelineCacheUUID, &m_context.getDeviceProperties().pipelineCacheUUID, VK_UUID_SIZE) !=
1540 0)
1541 TCU_THROW(TestError, "Invalid header pipeline cache UUID!");
1542 }
1543 }
1544
~CacheHeaderTestInstance(void)1545 CacheHeaderTestInstance::~CacheHeaderTestInstance(void)
1546 {
1547 delete[] m_data;
1548 }
1549
1550 class InvalidSizeTest : public GraphicsCacheTest
1551 {
1552 public:
1553 InvalidSizeTest(tcu::TestContext &testContext, const std::string &name, const CacheTestParam *param);
~InvalidSizeTest(void)1554 virtual ~InvalidSizeTest(void)
1555 {
1556 }
1557 virtual TestInstance *createInstance(Context &context) const;
1558 };
1559
InvalidSizeTest(tcu::TestContext & testContext,const std::string & name,const CacheTestParam * param)1560 InvalidSizeTest::InvalidSizeTest(tcu::TestContext &testContext, const std::string &name, const CacheTestParam *param)
1561 : GraphicsCacheTest(testContext, name, param)
1562 {
1563 }
1564
1565 class InvalidSizeTestInstance : public GraphicsCacheTestInstance
1566 {
1567 public:
1568 InvalidSizeTestInstance(Context &context, const CacheTestParam *param);
1569 virtual ~InvalidSizeTestInstance(void);
1570
1571 protected:
1572 uint8_t *m_data;
1573 uint8_t *m_zeroBlock;
1574 };
1575
createInstance(Context & context) const1576 TestInstance *InvalidSizeTest::createInstance(Context &context) const
1577 {
1578 return new InvalidSizeTestInstance(context, &m_param);
1579 }
1580
InvalidSizeTestInstance(Context & context,const CacheTestParam * param)1581 InvalidSizeTestInstance::InvalidSizeTestInstance(Context &context, const CacheTestParam *param)
1582 : GraphicsCacheTestInstance(context, param)
1583 , m_data(DE_NULL)
1584 , m_zeroBlock(DE_NULL)
1585 {
1586 const DeviceInterface &vk = m_context.getDeviceInterface();
1587 const VkDevice vkDevice = m_context.getDevice();
1588
1589 // Create more pipeline caches
1590 try
1591 {
1592 // Create a cache with init data from m_cache
1593 size_t dataSize = 0u;
1594 size_t savedDataSize = 0u;
1595 VK_CHECK(vk.getPipelineCacheData(vkDevice, *m_cache, (uintptr_t *)&dataSize, DE_NULL));
1596 savedDataSize = dataSize;
1597
1598 // If the value of dataSize is less than the maximum size that can be retrieved by the pipeline cache,
1599 // at most pDataSize bytes will be written to pData, and vkGetPipelineCacheData will return VK_INCOMPLETE.
1600 dataSize--;
1601
1602 m_data = new uint8_t[savedDataSize];
1603 deMemset(m_data, 0, savedDataSize);
1604 DE_ASSERT(m_data);
1605 if (vk.getPipelineCacheData(vkDevice, *m_cache, (uintptr_t *)&dataSize, (void *)m_data) != VK_INCOMPLETE)
1606 TCU_THROW(TestError, "GetPipelineCacheData should return VK_INCOMPLETE state!");
1607
1608 delete[] m_data;
1609 m_data = DE_NULL;
1610
1611 // If the value of dataSize is less than what is necessary to store the header,
1612 // nothing will be written to pData and zero will be written to dataSize.
1613 dataSize = 16 + VK_UUID_SIZE - 1;
1614
1615 m_data = new uint8_t[savedDataSize];
1616 deMemset(m_data, 0, savedDataSize);
1617 DE_ASSERT(m_data);
1618 if (vk.getPipelineCacheData(vkDevice, *m_cache, (uintptr_t *)&dataSize, (void *)m_data) != VK_INCOMPLETE)
1619 TCU_THROW(TestError, "GetPipelineCacheData should return VK_INCOMPLETE state!");
1620
1621 m_zeroBlock = new uint8_t[savedDataSize];
1622 deMemset(m_zeroBlock, 0, savedDataSize);
1623 if (deMemCmp(m_data, m_zeroBlock, savedDataSize) != 0 || dataSize != 0)
1624 TCU_THROW(TestError, "Data needs to be empty and data size should be 0 when invalid size is passed to "
1625 "GetPipelineCacheData!");
1626 }
1627 catch (...)
1628 {
1629 delete[] m_data;
1630 delete[] m_zeroBlock;
1631 throw;
1632 }
1633 }
1634
~InvalidSizeTestInstance(void)1635 InvalidSizeTestInstance::~InvalidSizeTestInstance(void)
1636 {
1637 delete[] m_data;
1638 delete[] m_zeroBlock;
1639 }
1640
1641 class ZeroSizeTest : public GraphicsCacheTest
1642 {
1643 public:
1644 ZeroSizeTest(tcu::TestContext &testContext, const std::string &name, const CacheTestParam *param);
~ZeroSizeTest(void)1645 virtual ~ZeroSizeTest(void)
1646 {
1647 }
1648 virtual TestInstance *createInstance(Context &context) const;
1649 };
1650
ZeroSizeTest(tcu::TestContext & testContext,const std::string & name,const CacheTestParam * param)1651 ZeroSizeTest::ZeroSizeTest(tcu::TestContext &testContext, const std::string &name, const CacheTestParam *param)
1652 : GraphicsCacheTest(testContext, name, param)
1653 {
1654 }
1655
1656 class ZeroSizeTestInstance : public GraphicsCacheTestInstance
1657 {
1658 public:
1659 ZeroSizeTestInstance(Context &context, const CacheTestParam *param);
1660 virtual ~ZeroSizeTestInstance(void);
1661
1662 protected:
1663 uint8_t *m_data;
1664 uint8_t *m_zeroBlock;
1665 };
1666
createInstance(Context & context) const1667 TestInstance *ZeroSizeTest::createInstance(Context &context) const
1668 {
1669 return new ZeroSizeTestInstance(context, &m_param);
1670 }
1671
ZeroSizeTestInstance(Context & context,const CacheTestParam * param)1672 ZeroSizeTestInstance::ZeroSizeTestInstance(Context &context, const CacheTestParam *param)
1673 : GraphicsCacheTestInstance(context, param)
1674 , m_data(DE_NULL)
1675 , m_zeroBlock(DE_NULL)
1676 {
1677 const DeviceInterface &vk = m_context.getDeviceInterface();
1678 const VkDevice vkDevice = m_context.getDevice();
1679
1680 // Create more pipeline caches
1681 try
1682 {
1683 // Create a cache with init data from m_cache
1684 size_t dataSize = 0u;
1685
1686 VK_CHECK(vk.getPipelineCacheData(vkDevice, *m_cache, (uintptr_t *)&dataSize, DE_NULL));
1687
1688 m_data = new uint8_t[dataSize];
1689 deMemset(m_data, 0, dataSize);
1690 DE_ASSERT(m_data);
1691
1692 VK_CHECK(vk.getPipelineCacheData(vkDevice, *m_cache, (uintptr_t *)&dataSize, (void *)m_data));
1693
1694 {
1695 // Create a cache with initialDataSize = 0 & pInitialData != NULL
1696 const VkPipelineCacheCreateInfo pipelineCacheCreateInfo = {
1697 VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO, // VkStructureType sType;
1698 DE_NULL, // const void* pNext;
1699 0u, // VkPipelineCacheCreateFlags flags;
1700 0u, // uintptr_t initialDataSize;
1701 m_data, // const void* pInitialData;
1702 };
1703
1704 const Unique<VkPipelineCache> pipelineCache(createPipelineCache(vk, vkDevice, &pipelineCacheCreateInfo));
1705 }
1706 }
1707 catch (...)
1708 {
1709 delete[] m_data;
1710 delete[] m_zeroBlock;
1711 throw;
1712 }
1713 }
1714
~ZeroSizeTestInstance(void)1715 ZeroSizeTestInstance::~ZeroSizeTestInstance(void)
1716 {
1717 delete[] m_data;
1718 delete[] m_zeroBlock;
1719 }
1720
1721 class InvalidBlobTest : public GraphicsCacheTest
1722 {
1723 public:
1724 InvalidBlobTest(tcu::TestContext &testContext, const std::string &name, const CacheTestParam *param);
~InvalidBlobTest(void)1725 virtual ~InvalidBlobTest(void)
1726 {
1727 }
1728 virtual TestInstance *createInstance(Context &context) const;
1729 };
1730
InvalidBlobTest(tcu::TestContext & testContext,const std::string & name,const CacheTestParam * param)1731 InvalidBlobTest::InvalidBlobTest(tcu::TestContext &testContext, const std::string &name, const CacheTestParam *param)
1732 : GraphicsCacheTest(testContext, name, param)
1733 {
1734 }
1735
1736 class InvalidBlobTestInstance : public GraphicsCacheTestInstance
1737 {
1738 public:
1739 InvalidBlobTestInstance(Context &context, const CacheTestParam *param);
1740 virtual ~InvalidBlobTestInstance(void);
1741
1742 protected:
1743 uint8_t *m_data;
1744 uint8_t *m_zeroBlock;
1745 };
1746
createInstance(Context & context) const1747 TestInstance *InvalidBlobTest::createInstance(Context &context) const
1748 {
1749 return new InvalidBlobTestInstance(context, &m_param);
1750 }
1751
InvalidBlobTestInstance(Context & context,const CacheTestParam * param)1752 InvalidBlobTestInstance::InvalidBlobTestInstance(Context &context, const CacheTestParam *param)
1753 : GraphicsCacheTestInstance(context, param)
1754 , m_data(DE_NULL)
1755 , m_zeroBlock(DE_NULL)
1756 {
1757 const DeviceInterface &vk = m_context.getDeviceInterface();
1758 const VkDevice vkDevice = m_context.getDevice();
1759
1760 // Create more pipeline caches
1761 try
1762 {
1763 // Create a cache with init data from m_cache
1764 size_t dataSize = 0u;
1765
1766 VK_CHECK(vk.getPipelineCacheData(vkDevice, *m_cache, (uintptr_t *)&dataSize, DE_NULL));
1767
1768 m_data = new uint8_t[dataSize];
1769 deMemset(m_data, 0, dataSize);
1770 DE_ASSERT(m_data);
1771
1772 VK_CHECK(vk.getPipelineCacheData(vkDevice, *m_cache, (uintptr_t *)&dataSize, (void *)m_data));
1773
1774 const struct
1775 {
1776 uint32_t offset;
1777 std::string name;
1778 } headerLayout[] = {
1779 {4u, "pipeline cache header version"}, {8u, "vendor ID"}, {12u, "device ID"}, {16u, "pipeline cache ID"}};
1780
1781 for (uint32_t i = 0u; i < DE_LENGTH_OF_ARRAY(headerLayout); i++)
1782 {
1783 m_context.getTestContext().getLog()
1784 << tcu::TestLog::Message << "Creating pipeline cache using previously retrieved data with invalid "
1785 << headerLayout[i].name << tcu::TestLog::EndMessage;
1786
1787 m_data[headerLayout[i].offset] =
1788 (uint8_t)(m_data[headerLayout[i].offset] + 13u); // Add arbitrary number to create an invalid value
1789
1790 const VkPipelineCacheCreateInfo pipelineCacheCreateInfo = {
1791 VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO, // VkStructureType sType;
1792 DE_NULL, // const void* pNext;
1793 0u, // VkPipelineCacheCreateFlags flags;
1794 dataSize, // uintptr_t initialDataSize;
1795 m_data, // const void* pInitialData;
1796 };
1797
1798 const Unique<VkPipelineCache> pipelineCache(createPipelineCache(vk, vkDevice, &pipelineCacheCreateInfo));
1799
1800 m_data[headerLayout[i].offset] =
1801 (uint8_t)(m_data[headerLayout[i].offset] - 13u); // Return to original value
1802 }
1803 }
1804 catch (...)
1805 {
1806 delete[] m_data;
1807 delete[] m_zeroBlock;
1808 throw;
1809 }
1810 }
1811
~InvalidBlobTestInstance(void)1812 InvalidBlobTestInstance::~InvalidBlobTestInstance(void)
1813 {
1814 delete[] m_data;
1815 delete[] m_zeroBlock;
1816 }
1817 } // namespace
1818
createCacheTests(tcu::TestContext & testCtx,PipelineConstructionType pipelineConstructionType)1819 tcu::TestCaseGroup *createCacheTests(tcu::TestContext &testCtx, PipelineConstructionType pipelineConstructionType)
1820 {
1821 de::MovePtr<tcu::TestCaseGroup> cacheTests(new tcu::TestCaseGroup(testCtx, "cache"));
1822
1823 const VkShaderStageFlags vertFragStages = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT;
1824 const VkShaderStageFlags vertGeomFragStages = vertFragStages | VK_SHADER_STAGE_GEOMETRY_BIT;
1825 const VkShaderStageFlags vertTesFragStages =
1826 vertFragStages | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT;
1827
1828 // Graphics Pipeline Tests
1829 {
1830 de::MovePtr<tcu::TestCaseGroup> graphicsTests(new tcu::TestCaseGroup(testCtx, "graphics_tests"));
1831
1832 const CacheTestParam testParams[] = {
1833 CacheTestParam(pipelineConstructionType, vertFragStages, false),
1834 CacheTestParam(pipelineConstructionType, vertGeomFragStages, false),
1835 CacheTestParam(pipelineConstructionType, vertTesFragStages, false),
1836 CacheTestParam(pipelineConstructionType, vertFragStages, false,
1837 VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT),
1838 CacheTestParam(pipelineConstructionType, vertGeomFragStages, false,
1839 VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT),
1840 CacheTestParam(pipelineConstructionType, vertTesFragStages, false,
1841 VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT),
1842 };
1843
1844 for (uint32_t i = 0; i < DE_LENGTH_OF_ARRAY(testParams); i++)
1845 graphicsTests->addChild(newTestCase<GraphicsCacheTest>(testCtx, &testParams[i]));
1846
1847 cacheTests->addChild(graphicsTests.release());
1848 }
1849
1850 // Graphics Pipeline Tests
1851 {
1852 de::MovePtr<tcu::TestCaseGroup> graphicsTests(new tcu::TestCaseGroup(testCtx, "pipeline_from_get_data"));
1853
1854 const CacheTestParam testParams[] = {
1855 CacheTestParam(pipelineConstructionType, vertFragStages, false),
1856 CacheTestParam(pipelineConstructionType, vertGeomFragStages, false),
1857 CacheTestParam(pipelineConstructionType, vertTesFragStages, false),
1858 };
1859
1860 for (uint32_t i = 0; i < DE_LENGTH_OF_ARRAY(testParams); i++)
1861 graphicsTests->addChild(newTestCase<PipelineFromCacheTest>(testCtx, &testParams[i]));
1862
1863 cacheTests->addChild(graphicsTests.release());
1864 }
1865
1866 // Graphics Pipeline Tests
1867 {
1868 de::MovePtr<tcu::TestCaseGroup> graphicsTests(
1869 new tcu::TestCaseGroup(testCtx, "pipeline_from_incomplete_get_data"));
1870
1871 const CacheTestParam testParams[] = {
1872 CacheTestParam(pipelineConstructionType, vertFragStages, false),
1873 CacheTestParam(pipelineConstructionType, vertGeomFragStages, false),
1874 CacheTestParam(pipelineConstructionType, vertTesFragStages, false),
1875 };
1876
1877 for (uint32_t i = 0; i < DE_LENGTH_OF_ARRAY(testParams); i++)
1878 graphicsTests->addChild(newTestCase<PipelineFromIncompleteCacheTest>(testCtx, &testParams[i]));
1879
1880 cacheTests->addChild(graphicsTests.release());
1881 }
1882
1883 // Compute Pipeline Tests - don't repeat those tests for graphics pipeline library
1884 if (pipelineConstructionType == PIPELINE_CONSTRUCTION_TYPE_MONOLITHIC)
1885 {
1886 de::MovePtr<tcu::TestCaseGroup> computeTests(new tcu::TestCaseGroup(testCtx, "compute_tests"));
1887
1888 const CacheTestParam testParams[] = {
1889 CacheTestParam(pipelineConstructionType, VK_SHADER_STAGE_COMPUTE_BIT, false),
1890 };
1891
1892 for (uint32_t i = 0; i < DE_LENGTH_OF_ARRAY(testParams); i++)
1893 computeTests->addChild(newTestCase<ComputeCacheTest>(testCtx, &testParams[i]));
1894
1895 cacheTests->addChild(computeTests.release());
1896 }
1897
1898 // Merge cache Tests
1899 {
1900 de::MovePtr<tcu::TestCaseGroup> mergeTests(new tcu::TestCaseGroup(testCtx, "merge"));
1901
1902 const CacheTestParam testParams[] = {
1903 CacheTestParam(pipelineConstructionType, vertFragStages, true),
1904 CacheTestParam(pipelineConstructionType, vertGeomFragStages, true),
1905 CacheTestParam(pipelineConstructionType, vertTesFragStages, true),
1906 };
1907
1908 for (uint32_t i = 0; i < DE_LENGTH_OF_ARRAY(testParams); i++)
1909 {
1910
1911 de::MovePtr<tcu::TestCaseGroup> mergeStagesTests(
1912 new tcu::TestCaseGroup(testCtx, testParams[i].generateTestName().c_str()));
1913
1914 for (uint32_t destTypeIdx = 0u; destTypeIdx <= MERGE_CACHE_TYPE_LAST; destTypeIdx++)
1915 for (uint32_t srcType1Idx = 0u; srcType1Idx <= MERGE_CACHE_TYPE_LAST; srcType1Idx++)
1916 {
1917
1918 MergeCacheTestParam cacheTestParam;
1919 cacheTestParam.destCacheType = MergeCacheType(destTypeIdx);
1920 cacheTestParam.srcCacheTypes.push_back(MergeCacheType(srcType1Idx));
1921
1922 // merge with one cache
1923 {
1924 std::string testName = "src_" + getMergeCacheTypesStr(cacheTestParam.srcCacheTypes) + "_dst_" +
1925 getMergeCacheTypeStr(cacheTestParam.destCacheType);
1926 mergeStagesTests->addChild(
1927 new MergeCacheTest(testCtx, testName.c_str(), &testParams[i], &cacheTestParam));
1928 }
1929
1930 // merge with two caches
1931 for (uint32_t srcType2Idx = 0u; srcType2Idx <= MERGE_CACHE_TYPE_LAST; srcType2Idx++)
1932 {
1933 MergeCacheTestParam cacheTestParamTwoCaches = cacheTestParam;
1934
1935 cacheTestParamTwoCaches.srcCacheTypes.push_back(MergeCacheType(srcType2Idx));
1936
1937 std::string testName = "src_" + getMergeCacheTypesStr(cacheTestParamTwoCaches.srcCacheTypes) +
1938 "_dst_" + getMergeCacheTypeStr(cacheTestParamTwoCaches.destCacheType);
1939 mergeStagesTests->addChild(
1940 new MergeCacheTest(testCtx, testName.c_str(), &testParams[i], &cacheTestParamTwoCaches));
1941 }
1942 }
1943 mergeTests->addChild(mergeStagesTests.release());
1944 }
1945 cacheTests->addChild(mergeTests.release());
1946 }
1947
1948 // Misc Tests
1949 {
1950 de::MovePtr<tcu::TestCaseGroup> miscTests(new tcu::TestCaseGroup(testCtx, "misc_tests"));
1951
1952 const CacheTestParam testParam(pipelineConstructionType, vertFragStages, false);
1953
1954 miscTests->addChild(new CacheHeaderTest(testCtx, "cache_header_test", &testParam));
1955
1956 miscTests->addChild(new InvalidSizeTest(testCtx, "invalid_size_test", &testParam));
1957
1958 miscTests->addChild(new ZeroSizeTest(testCtx, "zero_size_test", &testParam));
1959
1960 miscTests->addChild(new InvalidBlobTest(testCtx, "invalid_blob_test", &testParam));
1961
1962 cacheTests->addChild(miscTests.release());
1963 }
1964
1965 return cacheTests.release();
1966 }
1967
1968 } // namespace pipeline
1969
1970 } // namespace vkt
1971