1 /*-------------------------------------------------------------------------
2 * Vulkan CTS Framework
3 * --------------------
4 *
5 * Copyright (c) 2021 The Khronos Group Inc.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 *//*!
20 * \file
21 * \brief Defines class for handling resources ( programs, pipelines, files, etc. )
22 *//*--------------------------------------------------------------------*/
23
24 #include "vkResourceInterface.hpp"
25 #include "vkQueryUtil.hpp"
26
27 #ifdef CTS_USES_VULKANSC
28 #include <functional>
29 #include <fstream>
30 #include "vkSafetyCriticalUtil.hpp"
31 #include "vkRefUtil.hpp"
32 #include "tcuCommandLine.hpp"
33 #include "vksCacheBuilder.hpp"
34 #include "vksSerializer.hpp"
35 #include "vkApiVersion.hpp"
36 using namespace vksc_server::json;
37 #endif // CTS_USES_VULKANSC
38
39 namespace vk
40 {
41
ResourceInterface(tcu::TestContext & testCtx)42 ResourceInterface::ResourceInterface(tcu::TestContext &testCtx)
43 : m_testCtx(testCtx)
44 #ifdef CTS_USES_VULKANSC
45 , m_commandPoolIndex(0u)
46 , m_resourceCounter(0u)
47 , m_statCurrent(resetDeviceObjectReservationCreateInfo())
48 , m_statMax(resetDeviceObjectReservationCreateInfo())
49 , m_enabledHandleDestroy(true)
50 #endif // CTS_USES_VULKANSC
51 {
52 #ifdef CTS_USES_VULKANSC
53 // pipelineCacheRequestCount does not contain one instance of createPipelineCache call that happens only in subprocess
54 m_statCurrent.pipelineCacheRequestCount = 1u;
55 m_statMax.pipelineCacheRequestCount = 1u;
56 #endif // CTS_USES_VULKANSC
57 }
58
~ResourceInterface()59 ResourceInterface::~ResourceInterface()
60 {
61 }
62
initTestCase(const std::string & casePath)63 void ResourceInterface::initTestCase(const std::string &casePath)
64 {
65 m_currentTestPath = casePath;
66 }
67
getCasePath() const68 const std::string &ResourceInterface::getCasePath() const
69 {
70 return m_currentTestPath;
71 }
72
73 #ifdef CTS_USES_VULKANSC
initApiVersion(const uint32_t version)74 void ResourceInterface::initApiVersion(const uint32_t version)
75 {
76 const ApiVersion apiVersion = unpackVersion(version);
77 const bool vulkanSC = (apiVersion.variantNum == 1);
78
79 m_version = tcu::Maybe<uint32_t>(version);
80 m_vulkanSC = vulkanSC;
81 }
82
isVulkanSC(void) const83 bool ResourceInterface::isVulkanSC(void) const
84 {
85 return m_vulkanSC.get();
86 }
87
incResourceCounter()88 uint64_t ResourceInterface::incResourceCounter()
89 {
90 return ++m_resourceCounter;
91 }
92
getStatMutex()93 std::mutex &ResourceInterface::getStatMutex()
94 {
95 return m_mutex;
96 }
97
getStatCurrent()98 VkDeviceObjectReservationCreateInfo &ResourceInterface::getStatCurrent()
99 {
100 return m_statCurrent;
101 }
102
getStatMax()103 VkDeviceObjectReservationCreateInfo &ResourceInterface::getStatMax()
104 {
105 return m_statMax;
106 }
107
getStatMax() const108 const VkDeviceObjectReservationCreateInfo &ResourceInterface::getStatMax() const
109 {
110 return m_statMax;
111 }
112
setHandleDestroy(bool value)113 void ResourceInterface::setHandleDestroy(bool value)
114 {
115 m_enabledHandleDestroy = value;
116 }
117
isEnabledHandleDestroy() const118 bool ResourceInterface::isEnabledHandleDestroy() const
119 {
120 return m_enabledHandleDestroy;
121 }
122
removeRedundantObjects()123 void ResourceInterface::removeRedundantObjects()
124 {
125 // At the end of the day we only need to export objects used in pipelines.
126 // Rest of the objects may be removed from m_json* structures as redundant
127 std::set<VkSamplerYcbcrConversion> samplerYcbcrConversionsInPipeline;
128 std::set<VkSampler> samplersInPipeline;
129 std::set<VkShaderModule> shadersInPipeline;
130 std::set<VkRenderPass> renderPassesInPipeline;
131 std::set<VkPipelineLayout> pipelineLayoutsInPipeline;
132 std::set<VkDescriptorSetLayout> descriptorSetLayoutsInPipeline;
133
134 Context jsonReader;
135
136 for (auto it = begin(m_pipelineInput.pipelines); it != end(m_pipelineInput.pipelines); ++it)
137 {
138 if (it->pipelineContents.find("VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO") != std::string::npos)
139 {
140 VkGraphicsPipelineCreateInfo gpCI;
141 deMemset(&gpCI, 0, sizeof(gpCI));
142 readJSON_VkGraphicsPipelineCreateInfo(jsonReader, it->pipelineContents, gpCI);
143
144 for (uint32_t i = 0; i < gpCI.stageCount; ++i)
145 shadersInPipeline.insert(gpCI.pStages[i].module);
146 renderPassesInPipeline.insert(gpCI.renderPass);
147 pipelineLayoutsInPipeline.insert(gpCI.layout);
148 }
149 else if (it->pipelineContents.find("VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO") != std::string::npos)
150 {
151 VkComputePipelineCreateInfo cpCI;
152 deMemset(&cpCI, 0, sizeof(cpCI));
153 readJSON_VkComputePipelineCreateInfo(jsonReader, it->pipelineContents, cpCI);
154
155 shadersInPipeline.insert(cpCI.stage.module);
156 pipelineLayoutsInPipeline.insert(cpCI.layout);
157 }
158 else
159 TCU_THROW(InternalError, "Could not recognize pipeline type");
160 }
161 for (auto it = begin(m_pipelineInput.shaderModules); it != end(m_pipelineInput.shaderModules);)
162 {
163 if (shadersInPipeline.find(it->first) == end(shadersInPipeline))
164 it = m_pipelineInput.shaderModules.erase(it);
165 else
166 ++it;
167 }
168 for (auto it = begin(m_pipelineInput.renderPasses); it != end(m_pipelineInput.renderPasses);)
169 {
170 if (renderPassesInPipeline.find(it->first) == end(renderPassesInPipeline))
171 it = m_pipelineInput.renderPasses.erase(it);
172 else
173 ++it;
174 }
175 for (auto it = begin(m_pipelineInput.pipelineLayouts); it != end(m_pipelineInput.pipelineLayouts);)
176 {
177 if (pipelineLayoutsInPipeline.find(it->first) == end(pipelineLayoutsInPipeline))
178 {
179 it = m_pipelineInput.pipelineLayouts.erase(it);
180 }
181 else
182 {
183 VkPipelineLayoutCreateInfo plCI;
184 deMemset(&plCI, 0, sizeof(plCI));
185 readJSON_VkPipelineLayoutCreateInfo(jsonReader, it->second, plCI);
186 for (uint32_t i = 0; i < plCI.setLayoutCount; ++i)
187 descriptorSetLayoutsInPipeline.insert(plCI.pSetLayouts[i]);
188 ++it;
189 }
190 }
191 for (auto it = begin(m_pipelineInput.descriptorSetLayouts); it != end(m_pipelineInput.descriptorSetLayouts);)
192 {
193 if (descriptorSetLayoutsInPipeline.find(it->first) == end(descriptorSetLayoutsInPipeline))
194 it = m_pipelineInput.descriptorSetLayouts.erase(it);
195 else
196 {
197 VkDescriptorSetLayoutCreateInfo dsCI;
198 deMemset(&dsCI, 0, sizeof(dsCI));
199 readJSON_VkDescriptorSetLayoutCreateInfo(jsonReader, it->second, dsCI);
200
201 for (uint32_t i = 0; i < dsCI.bindingCount; ++i)
202 {
203 if (dsCI.pBindings[i].pImmutableSamplers == NULL)
204 continue;
205 for (uint32_t j = 0; j < dsCI.pBindings[i].descriptorCount; ++j)
206 {
207 if (dsCI.pBindings[i].pImmutableSamplers[j] == DE_NULL)
208 continue;
209 samplersInPipeline.insert(dsCI.pBindings[i].pImmutableSamplers[j]);
210 }
211 }
212 ++it;
213 }
214 }
215
216 for (auto it = begin(m_pipelineInput.samplers); it != end(m_pipelineInput.samplers);)
217 {
218 if (samplersInPipeline.find(it->first) == end(samplersInPipeline))
219 it = m_pipelineInput.samplers.erase(it);
220 else
221 {
222 VkSamplerCreateInfo sCI;
223 deMemset(&sCI, 0, sizeof(sCI));
224 readJSON_VkSamplerCreateInfo(jsonReader, it->second, sCI);
225
226 if (sCI.pNext != DE_NULL)
227 {
228 VkSamplerYcbcrConversionInfo *info = (VkSamplerYcbcrConversionInfo *)(sCI.pNext);
229 if (info->sType == VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO)
230 samplerYcbcrConversionsInPipeline.insert(info->conversion);
231 }
232 ++it;
233 }
234 }
235 for (auto it = begin(m_pipelineInput.samplerYcbcrConversions); it != end(m_pipelineInput.samplerYcbcrConversions);)
236 {
237 if (samplerYcbcrConversionsInPipeline.find(it->first) == end(samplerYcbcrConversionsInPipeline))
238 it = m_pipelineInput.samplerYcbcrConversions.erase(it);
239 else
240 ++it;
241 }
242 }
243
finalizeCommandBuffers()244 void ResourceInterface::finalizeCommandBuffers()
245 {
246 // We have information about command buffer sizes
247 // Now we have to convert it into command pool sizes
248 std::map<uint64_t, std::size_t> cpToIndex;
249 for (std::size_t i = 0; i < m_commandPoolMemoryConsumption.size(); ++i)
250 cpToIndex.insert({m_commandPoolMemoryConsumption[i].commandPool, i});
251 for (const auto &memC : m_commandBufferMemoryConsumption)
252 {
253 std::size_t j = cpToIndex[memC.second.commandPool];
254 m_commandPoolMemoryConsumption[j].updateValues(memC.second.maxCommandPoolAllocated,
255 memC.second.maxCommandPoolReservedSize,
256 memC.second.maxCommandBufferAllocated);
257 m_commandPoolMemoryConsumption[j].commandBufferCount++;
258 }
259 // Each m_commandPoolMemoryConsumption element must have at least one command buffer ( see DeviceDriverSC::createCommandPoolHandlerNorm() )
260 // As a result we have to ensure that commandBufferRequestCount is not less than the number of command pools
261 m_statMax.commandBufferRequestCount =
262 de::max(uint32_t(m_commandPoolMemoryConsumption.size()), m_statMax.commandBufferRequestCount);
263 }
264
exportData() const265 std::vector<uint8_t> ResourceInterface::exportData() const
266 {
267 vksc_server::VulkanDataTransmittedFromMainToSubprocess vdtfmtsp(m_pipelineInput, m_statMax,
268 m_commandPoolMemoryConsumption, m_pipelineSizes);
269
270 return vksc_server::Serialize(vdtfmtsp);
271 }
272
importData(std::vector<uint8_t> & importText) const273 void ResourceInterface::importData(std::vector<uint8_t> &importText) const
274 {
275 vksc_server::VulkanDataTransmittedFromMainToSubprocess vdtfmtsp =
276 vksc_server::Deserialize<vksc_server::VulkanDataTransmittedFromMainToSubprocess>(importText);
277
278 m_pipelineInput = vdtfmtsp.pipelineCacheInput;
279 m_statMax = vdtfmtsp.memoryReservation;
280 m_commandPoolMemoryConsumption = vdtfmtsp.commandPoolMemoryConsumption;
281 m_pipelineSizes = vdtfmtsp.pipelineSizes;
282 }
283
registerObjectHash(uint64_t handle,std::size_t hashValue) const284 void ResourceInterface::registerObjectHash(uint64_t handle, std::size_t hashValue) const
285 {
286 m_objectHashes[handle] = hashValue;
287 }
288
getObjectHashes() const289 const std::map<uint64_t, std::size_t> &ResourceInterface::getObjectHashes() const
290 {
291 return m_objectHashes;
292 }
293
294 struct PipelinePoolSizeInfo
295 {
296 uint32_t maxTestCount;
297 uint32_t size;
298 };
299
preparePipelinePoolSizes()300 void ResourceInterface::preparePipelinePoolSizes()
301 {
302 std::map<std::string, std::vector<PipelinePoolSizeInfo>> pipelineInfoPerTest;
303
304 // Step 1: collect information about all pipelines in each test, group by size
305 for (const auto &pipeline : m_pipelineInput.pipelines)
306 {
307 auto it = std::find_if(begin(m_pipelineSizes), end(m_pipelineSizes),
308 vksc_server::PipelineIdentifierEqual(pipeline.id));
309 if (it == end(m_pipelineSizes))
310 TCU_THROW(InternalError, "VkPipelinePoolEntrySizeCreateInfo not created for pipelineIdentifier");
311
312 PipelinePoolSizeInfo ppsi{it->count, it->size};
313
314 for (const auto &test : pipeline.tests)
315 {
316 auto pit = pipelineInfoPerTest.find(test);
317 if (pit == end(pipelineInfoPerTest))
318 pit = pipelineInfoPerTest.insert({test, std::vector<PipelinePoolSizeInfo>()}).first;
319 // group by the same sizes in a test
320 bool found = false;
321 for (size_t i = 0; i < pit->second.size(); ++i)
322 {
323 if (pit->second[i].size == ppsi.size)
324 {
325 pit->second[i].maxTestCount += ppsi.maxTestCount;
326 found = true;
327 break;
328 }
329 }
330 if (!found)
331 pit->second.push_back(ppsi);
332 }
333 }
334
335 // Step 2: choose pipeline pool sizes
336 std::vector<PipelinePoolSizeInfo> finalPoolSizes;
337 for (const auto &pInfo : pipelineInfoPerTest)
338 {
339 for (const auto &ppsi1 : pInfo.second)
340 {
341 auto it = std::find_if(begin(finalPoolSizes), end(finalPoolSizes),
342 [&ppsi1](const PipelinePoolSizeInfo &x) { return (x.size == ppsi1.size); });
343 if (it != end(finalPoolSizes))
344 it->maxTestCount = de::max(it->maxTestCount, ppsi1.maxTestCount);
345 else
346 finalPoolSizes.push_back(ppsi1);
347 }
348 }
349
350 // Step 3: convert results to VkPipelinePoolSize
351 m_pipelinePoolSizes.clear();
352 for (const auto &ppsi : finalPoolSizes)
353 {
354 VkPipelinePoolSize poolSize = {
355 VK_STRUCTURE_TYPE_PIPELINE_POOL_SIZE, // VkStructureType sType;
356 DE_NULL, // const void* pNext;
357 ppsi.size, // VkDeviceSize poolEntrySize;
358 ppsi.maxTestCount // uint32_t poolEntryCount;
359 };
360 m_pipelinePoolSizes.emplace_back(poolSize);
361 }
362 }
363
getPipelinePoolSizes() const364 std::vector<VkPipelinePoolSize> ResourceInterface::getPipelinePoolSizes() const
365 {
366 return m_pipelinePoolSizes;
367 }
368
fillPoolEntrySize(vk::VkPipelineOfflineCreateInfo & pipelineIdentifier) const369 void ResourceInterface::fillPoolEntrySize(vk::VkPipelineOfflineCreateInfo &pipelineIdentifier) const
370 {
371 auto it = std::find_if(begin(m_pipelineSizes), end(m_pipelineSizes),
372 vksc_server::PipelineIdentifierEqual(pipelineIdentifier));
373 if (it == end(m_pipelineSizes))
374 TCU_THROW(InternalError, "VkPipelinePoolEntrySizeCreateInfo not created for pipelineIdentifier");
375 pipelineIdentifier.poolEntrySize = it->size;
376 }
377
getNextCommandPoolSize()378 vksc_server::VulkanCommandMemoryConsumption ResourceInterface::getNextCommandPoolSize()
379 {
380 if (m_commandPoolMemoryConsumption.empty())
381 return vksc_server::VulkanCommandMemoryConsumption();
382
383 vksc_server::VulkanCommandMemoryConsumption result = m_commandPoolMemoryConsumption[m_commandPoolIndex];
384 // modulo operation is just a safeguard against excessive number of requests
385 m_commandPoolIndex = (m_commandPoolIndex + 1) % uint32_t(m_commandPoolMemoryConsumption.size());
386 return result;
387 }
388
getCacheDataSize() const389 std::size_t ResourceInterface::getCacheDataSize() const
390 {
391 return m_cacheData.size();
392 }
393
getCacheData() const394 const uint8_t *ResourceInterface::getCacheData() const
395 {
396 return m_cacheData.data();
397 }
398
getPipelineCache(VkDevice device) const399 VkPipelineCache ResourceInterface::getPipelineCache(VkDevice device) const
400 {
401 auto pit = m_pipelineCache.find(device);
402 if (pit == end(m_pipelineCache))
403 TCU_THROW(InternalError, "m_pipelineCache not found for this device");
404 return pit->second.get()->get();
405 }
406
407 #endif // CTS_USES_VULKANSC
408
ResourceInterfaceStandard(tcu::TestContext & testCtx)409 ResourceInterfaceStandard::ResourceInterfaceStandard(tcu::TestContext &testCtx) : ResourceInterface(testCtx)
410 {
411 }
412
initDevice(DeviceInterface & deviceInterface,VkDevice device)413 void ResourceInterfaceStandard::initDevice(DeviceInterface &deviceInterface, VkDevice device)
414 {
415 // ResourceInterfaceStandard is a class for running VulkanSC tests on normal Vulkan driver.
416 // CTS does not have vkCreateShaderModule function defined for Vulkan SC driver, but we need this function
417 // So ResourceInterfaceStandard class must have its own vkCreateShaderModule function pointer
418 // Moreover - we create additional function pointers for vkCreateGraphicsPipelines, vkCreateComputePipelines, etc.
419 // BTW: although ResourceInterfaceStandard exists in normal Vulkan tests - only initDevice and buildProgram functions are used by Vulkan tests
420 // Other functions are called from within DeviceDriverSC which does not exist in these tests ( DeviceDriver class is used instead )
421 m_createShaderModuleFunc[device] =
422 (CreateShaderModuleFunc)deviceInterface.getDeviceProcAddr(device, "vkCreateShaderModule");
423 m_createGraphicsPipelinesFunc[device] =
424 (CreateGraphicsPipelinesFunc)deviceInterface.getDeviceProcAddr(device, "vkCreateGraphicsPipelines");
425 m_createComputePipelinesFunc[device] =
426 (CreateComputePipelinesFunc)deviceInterface.getDeviceProcAddr(device, "vkCreateComputePipelines");
427 #ifdef CTS_USES_VULKANSC
428 if (m_testCtx.getCommandLine().isSubProcess())
429 {
430 if (m_cacheData.size() > 0)
431 {
432 VkPipelineCacheCreateInfo pCreateInfo = {
433 VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO, // VkStructureType sType;
434 DE_NULL, // const void* pNext;
435 VK_PIPELINE_CACHE_CREATE_READ_ONLY_BIT |
436 VK_PIPELINE_CACHE_CREATE_USE_APPLICATION_STORAGE_BIT, // VkPipelineCacheCreateFlags flags;
437 m_cacheData.size(), // uintptr_t initialDataSize;
438 m_cacheData.data() // const void* pInitialData;
439 };
440 m_pipelineCache[device] = de::SharedPtr<Move<VkPipelineCache>>(
441 new Move<VkPipelineCache>(createPipelineCache(deviceInterface, device, &pCreateInfo, DE_NULL)));
442 }
443 }
444 #endif // CTS_USES_VULKANSC
445 }
446
deinitDevice(VkDevice device)447 void ResourceInterfaceStandard::deinitDevice(VkDevice device)
448 {
449 #ifdef CTS_USES_VULKANSC
450 if (m_testCtx.getCommandLine().isSubProcess())
451 {
452 m_pipelineCache.erase(device);
453 }
454 #else
455 DE_UNREF(device);
456 #endif // CTS_USES_VULKANSC
457 }
458
459 #ifdef CTS_USES_VULKANSC
460
registerDeviceFeatures(VkDevice device,const VkDeviceCreateInfo * pCreateInfo) const461 void ResourceInterfaceStandard::registerDeviceFeatures(VkDevice device, const VkDeviceCreateInfo *pCreateInfo) const
462 {
463 VkPhysicalDeviceFeatures2 *chainedFeatures = (VkPhysicalDeviceFeatures2 *)findStructureInChain(
464 pCreateInfo->pNext, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2);
465 if (chainedFeatures != NULL)
466 {
467 m_deviceFeatures[device] = writeJSON_pNextChain(pCreateInfo->pNext);
468 }
469 else
470 {
471 VkPhysicalDeviceFeatures2 deviceFeatures2 = initVulkanStructure();
472 if (pCreateInfo->pEnabledFeatures != NULL)
473 deviceFeatures2.features = *(pCreateInfo->pEnabledFeatures);
474
475 deviceFeatures2.pNext = (void *)pCreateInfo->pNext;
476 m_deviceFeatures[device] = writeJSON_VkPhysicalDeviceFeatures2(deviceFeatures2);
477 }
478
479 std::vector<std::string> extensions;
480 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; ++i)
481 extensions.push_back(pCreateInfo->ppEnabledExtensionNames[i]);
482 m_deviceExtensions[device] = extensions;
483 }
484
unregisterDeviceFeatures(VkDevice device) const485 void ResourceInterfaceStandard::unregisterDeviceFeatures(VkDevice device) const
486 {
487 m_deviceFeatures.erase(device);
488 m_deviceExtensions.erase(device);
489 }
490
createShaderModule(VkDevice device,const VkShaderModuleCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkShaderModule * pShaderModule,bool normalMode) const491 VkResult ResourceInterfaceStandard::createShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
492 const VkAllocationCallbacks *pAllocator,
493 VkShaderModule *pShaderModule, bool normalMode) const
494 {
495 if (normalMode)
496 {
497 if (isVulkanSC())
498 {
499 *pShaderModule = VkShaderModule(++m_resourceCounter);
500 registerObjectHash(pShaderModule->getInternal(),
501 calculateShaderModuleHash(*pCreateInfo, getObjectHashes()));
502 return VK_SUCCESS;
503 }
504 else
505 {
506 const auto it = m_createShaderModuleFunc.find(device);
507 if (it != end(m_createShaderModuleFunc))
508 {
509 VkResult result = it->second(device, pCreateInfo, pAllocator, pShaderModule);
510 registerObjectHash(pShaderModule->getInternal(),
511 calculateShaderModuleHash(*pCreateInfo, getObjectHashes()));
512 return result;
513 }
514 TCU_THROW(InternalError, "vkCreateShaderModule not defined");
515 }
516 }
517
518 // main process: store VkShaderModuleCreateInfo in JSON format. Shaders will be sent later for m_pipelineCache creation ( and sent through file to another process )
519 *pShaderModule = VkShaderModule(++m_resourceCounter);
520 registerObjectHash(pShaderModule->getInternal(), calculateShaderModuleHash(*pCreateInfo, getObjectHashes()));
521 m_pipelineInput.shaderModules.insert({*pShaderModule, writeJSON_VkShaderModuleCreateInfo(*pCreateInfo)});
522 return VK_SUCCESS;
523 }
524
makeGraphicsPipelineIdentifier(const std::string & testPath,const VkGraphicsPipelineCreateInfo & gpCI,const std::map<uint64_t,std::size_t> & objectHashes)525 VkPipelineOfflineCreateInfo makeGraphicsPipelineIdentifier(const std::string &testPath,
526 const VkGraphicsPipelineCreateInfo &gpCI,
527 const std::map<uint64_t, std::size_t> &objectHashes)
528 {
529 DE_UNREF(testPath);
530 VkPipelineOfflineCreateInfo pipelineID = resetPipelineOfflineCreateInfo();
531 std::size_t hashValue = calculateGraphicsPipelineHash(gpCI, objectHashes);
532 memcpy(pipelineID.pipelineIdentifier, &hashValue, sizeof(std::size_t));
533 return pipelineID;
534 }
535
makeComputePipelineIdentifier(const std::string & testPath,const VkComputePipelineCreateInfo & cpCI,const std::map<uint64_t,std::size_t> & objectHashes)536 VkPipelineOfflineCreateInfo makeComputePipelineIdentifier(const std::string &testPath,
537 const VkComputePipelineCreateInfo &cpCI,
538 const std::map<uint64_t, std::size_t> &objectHashes)
539 {
540 DE_UNREF(testPath);
541 VkPipelineOfflineCreateInfo pipelineID = resetPipelineOfflineCreateInfo();
542 std::size_t hashValue = calculateComputePipelineHash(cpCI, objectHashes);
543 memcpy(pipelineID.pipelineIdentifier, &hashValue, sizeof(std::size_t));
544 return pipelineID;
545 }
546
createGraphicsPipelines(VkDevice device,VkPipelineCache pipelineCache,uint32_t createInfoCount,const VkGraphicsPipelineCreateInfo * pCreateInfos,const VkAllocationCallbacks * pAllocator,VkPipeline * pPipelines,bool normalMode) const547 VkResult ResourceInterfaceStandard::createGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache,
548 uint32_t createInfoCount,
549 const VkGraphicsPipelineCreateInfo *pCreateInfos,
550 const VkAllocationCallbacks *pAllocator,
551 VkPipeline *pPipelines, bool normalMode) const
552 {
553 DE_UNREF(pipelineCache);
554
555 // build pipeline identifiers (if required), make a copy of pCreateInfos
556 std::vector<VkPipelineOfflineCreateInfo> pipelineIDs;
557 std::vector<uint8_t> idInPNextChain;
558 std::vector<VkGraphicsPipelineCreateInfo> pCreateInfoCopies;
559
560 for (uint32_t i = 0; i < createInfoCount; ++i)
561 {
562 pCreateInfoCopies.push_back(pCreateInfos[i]);
563
564 // Check if test added pipeline identifier on its own
565 VkPipelineOfflineCreateInfo *idInfo = (VkPipelineOfflineCreateInfo *)findStructureInChain(
566 pCreateInfos[i].pNext, VK_STRUCTURE_TYPE_PIPELINE_OFFLINE_CREATE_INFO);
567 if (idInfo == DE_NULL)
568 {
569 pipelineIDs.push_back(
570 makeGraphicsPipelineIdentifier(m_currentTestPath, pCreateInfos[i], getObjectHashes()));
571 idInPNextChain.push_back(0);
572 }
573 else
574 {
575 pipelineIDs.push_back(*idInfo);
576 idInPNextChain.push_back(1);
577 }
578
579 if (normalMode)
580 fillPoolEntrySize(pipelineIDs.back());
581 }
582
583 // reset not used pointers, so that JSON generation does not crash
584 std::vector<VkPipelineViewportStateCreateInfo> viewportStateCopies(createInfoCount);
585 if (!normalMode)
586 {
587 for (uint32_t i = 0; i < createInfoCount; ++i)
588 {
589 bool vertexInputStateRequired = false;
590 bool inputAssemblyStateRequired = false;
591 bool tessellationStateRequired = false;
592 bool viewportStateRequired = false;
593 bool viewportStateViewportsRequired = false;
594 bool viewportStateScissorsRequired = false;
595 bool multiSampleStateRequired = false;
596 bool depthStencilStateRequired = false;
597 bool colorBlendStateRequired = false;
598
599 if (pCreateInfoCopies[i].pStages != DE_NULL)
600 {
601 for (uint32_t j = 0; j < pCreateInfoCopies[i].stageCount; ++j)
602 {
603 if (pCreateInfoCopies[i].pStages[j].stage == VK_SHADER_STAGE_VERTEX_BIT)
604 {
605 vertexInputStateRequired = true;
606 inputAssemblyStateRequired = true;
607 }
608 if (pCreateInfoCopies[i].pStages[j].stage == VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT)
609 {
610 tessellationStateRequired = true;
611 }
612 }
613 }
614 if (pCreateInfoCopies[i].pDynamicState != DE_NULL)
615 {
616 if (pCreateInfoCopies[i].pDynamicState->pDynamicStates != DE_NULL)
617 for (uint32_t j = 0; j < pCreateInfoCopies[i].pDynamicState->dynamicStateCount; ++j)
618 {
619 if (pCreateInfoCopies[i].pDynamicState->pDynamicStates[j] == VK_DYNAMIC_STATE_VIEWPORT ||
620 pCreateInfoCopies[i].pDynamicState->pDynamicStates[j] ==
621 VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT)
622 {
623 viewportStateRequired = true;
624 viewportStateViewportsRequired = true;
625 }
626 if (pCreateInfoCopies[i].pDynamicState->pDynamicStates[j] == VK_DYNAMIC_STATE_SCISSOR ||
627 pCreateInfoCopies[i].pDynamicState->pDynamicStates[j] ==
628 VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT)
629 {
630 viewportStateRequired = true;
631 viewportStateScissorsRequired = true;
632 }
633 if (pCreateInfoCopies[i].pDynamicState->pDynamicStates[j] ==
634 VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE_EXT)
635 viewportStateRequired = true;
636 }
637 }
638 if (pCreateInfoCopies[i].pRasterizationState != DE_NULL)
639 {
640 if (pCreateInfoCopies[i].pRasterizationState->rasterizerDiscardEnable == VK_FALSE)
641 {
642 viewportStateRequired = true;
643 viewportStateViewportsRequired = true;
644 viewportStateScissorsRequired = true;
645 multiSampleStateRequired = true;
646 depthStencilStateRequired = true;
647 colorBlendStateRequired = true;
648 }
649 }
650 if (pCreateInfoCopies[i].pVertexInputState != DE_NULL && !vertexInputStateRequired)
651 pCreateInfoCopies[i].pVertexInputState = DE_NULL;
652 if (pCreateInfoCopies[i].pInputAssemblyState != DE_NULL && !inputAssemblyStateRequired)
653 pCreateInfoCopies[i].pInputAssemblyState = DE_NULL;
654 if (pCreateInfoCopies[i].pTessellationState != DE_NULL && !tessellationStateRequired)
655 pCreateInfoCopies[i].pTessellationState = DE_NULL;
656 if (pCreateInfoCopies[i].pViewportState != DE_NULL)
657 {
658 if (viewportStateRequired)
659 {
660 viewportStateCopies[i] = *(pCreateInfoCopies[i].pViewportState);
661 bool exchangeVP = false;
662 if (pCreateInfoCopies[i].pViewportState->pViewports != DE_NULL && !viewportStateViewportsRequired)
663 {
664 viewportStateCopies[i].pViewports = DE_NULL;
665 viewportStateCopies[i].viewportCount = 0u;
666 exchangeVP = true;
667 }
668 if (pCreateInfoCopies[i].pViewportState->pScissors != DE_NULL && !viewportStateScissorsRequired)
669 {
670 viewportStateCopies[i].pScissors = DE_NULL;
671 viewportStateCopies[i].scissorCount = 0u;
672 exchangeVP = true;
673 }
674 if (exchangeVP)
675 pCreateInfoCopies[i].pViewportState = &(viewportStateCopies[i]);
676 }
677 else
678 pCreateInfoCopies[i].pViewportState = DE_NULL;
679 }
680 if (pCreateInfoCopies[i].pMultisampleState != DE_NULL && !multiSampleStateRequired)
681 pCreateInfoCopies[i].pMultisampleState = DE_NULL;
682 if (pCreateInfoCopies[i].pDepthStencilState != DE_NULL && !depthStencilStateRequired)
683 pCreateInfoCopies[i].pDepthStencilState = DE_NULL;
684 if (pCreateInfoCopies[i].pColorBlendState != DE_NULL && !colorBlendStateRequired)
685 pCreateInfoCopies[i].pColorBlendState = DE_NULL;
686 }
687 }
688
689 // Include pipelineIdentifiers into pNext chain of pCreateInfoCopies - skip this operation if pipeline identifier was created inside test
690 for (uint32_t i = 0; i < createInfoCount; ++i)
691 {
692 if (idInPNextChain[i] == 0)
693 {
694 pipelineIDs[i].pNext = pCreateInfoCopies[i].pNext;
695 pCreateInfoCopies[i].pNext = &pipelineIDs[i];
696 }
697 }
698
699 // subprocess: load graphics pipelines from OUR m_pipelineCache cache
700 if (normalMode)
701 {
702 const auto it = m_createGraphicsPipelinesFunc.find(device);
703 if (it != end(m_createGraphicsPipelinesFunc))
704 {
705 auto pit = m_pipelineCache.find(device);
706 if (pit != end(m_pipelineCache))
707 {
708 VkPipelineCache pCache = pit->second->get();
709 return it->second(device, pCache, createInfoCount, pCreateInfoCopies.data(), pAllocator, pPipelines);
710 }
711 TCU_THROW(InternalError, "m_pipelineCache not initialized for this device");
712 }
713 TCU_THROW(InternalError, "vkCreateGraphicsPipelines not defined");
714 }
715
716 // main process: store pipelines in JSON format. Pipelines will be sent later for m_pipelineCache creation ( and sent through file to another process )
717 for (uint32_t i = 0; i < createInfoCount; ++i)
718 {
719 m_pipelineIdentifiers.insert({pPipelines[i], pipelineIDs[i]});
720
721 auto it = std::find_if(begin(m_pipelineInput.pipelines), end(m_pipelineInput.pipelines),
722 vksc_server::PipelineIdentifierEqual(pipelineIDs[i]));
723 pipelineIDs[i].pNext = DE_NULL;
724 if (it == end(m_pipelineInput.pipelines))
725 {
726 const auto &featIt = m_deviceFeatures.find(device);
727 if (featIt == end(m_deviceFeatures))
728 TCU_THROW(InternalError, "Can't find device features for this pipeline");
729 const auto &extIt = m_deviceExtensions.find(device);
730 if (extIt == end(m_deviceExtensions))
731 TCU_THROW(InternalError, "Can't find device extensions for this pipeline");
732
733 m_pipelineInput.pipelines.push_back(vksc_server::VulkanJsonPipelineDescription(
734 pipelineIDs[i], writeJSON_VkGraphicsPipelineCreateInfo(pCreateInfoCopies[i]), featIt->second,
735 extIt->second, m_currentTestPath));
736 }
737 else
738 it->add(m_currentTestPath);
739 }
740 return VK_SUCCESS;
741 }
742
createComputePipelines(VkDevice device,VkPipelineCache pipelineCache,uint32_t createInfoCount,const VkComputePipelineCreateInfo * pCreateInfos,const VkAllocationCallbacks * pAllocator,VkPipeline * pPipelines,bool normalMode) const743 VkResult ResourceInterfaceStandard::createComputePipelines(VkDevice device, VkPipelineCache pipelineCache,
744 uint32_t createInfoCount,
745 const VkComputePipelineCreateInfo *pCreateInfos,
746 const VkAllocationCallbacks *pAllocator,
747 VkPipeline *pPipelines, bool normalMode) const
748 {
749 DE_UNREF(pipelineCache);
750
751 // build pipeline identifiers (if required), make a copy of pCreateInfos
752 std::vector<VkPipelineOfflineCreateInfo> pipelineIDs;
753 std::vector<uint8_t> idInPNextChain;
754 std::vector<VkComputePipelineCreateInfo> pCreateInfoCopies;
755
756 for (uint32_t i = 0; i < createInfoCount; ++i)
757 {
758 pCreateInfoCopies.push_back(pCreateInfos[i]);
759
760 // Check if test added pipeline identifier on its own
761 VkPipelineOfflineCreateInfo *idInfo = (VkPipelineOfflineCreateInfo *)findStructureInChain(
762 pCreateInfos[i].pNext, VK_STRUCTURE_TYPE_PIPELINE_OFFLINE_CREATE_INFO);
763 if (idInfo == DE_NULL)
764 {
765 pipelineIDs.push_back(makeComputePipelineIdentifier(m_currentTestPath, pCreateInfos[i], getObjectHashes()));
766 idInPNextChain.push_back(0);
767 }
768 else
769 {
770 pipelineIDs.push_back(*idInfo);
771 idInPNextChain.push_back(1);
772 }
773
774 if (normalMode)
775 fillPoolEntrySize(pipelineIDs.back());
776 }
777
778 // Include pipelineIdentifiers into pNext chain of pCreateInfoCopies - skip this operation if pipeline identifier was created inside test
779 for (uint32_t i = 0; i < createInfoCount; ++i)
780 {
781 if (idInPNextChain[i] == 0)
782 {
783 pipelineIDs[i].pNext = pCreateInfoCopies[i].pNext;
784 pCreateInfoCopies[i].pNext = &pipelineIDs[i];
785 }
786 }
787
788 // subprocess: load compute pipelines from OUR pipeline cache
789 if (normalMode)
790 {
791 const auto it = m_createComputePipelinesFunc.find(device);
792 if (it != end(m_createComputePipelinesFunc))
793 {
794 auto pit = m_pipelineCache.find(device);
795 if (pit != end(m_pipelineCache))
796 {
797 VkPipelineCache pCache = pit->second->get();
798 return it->second(device, pCache, createInfoCount, pCreateInfoCopies.data(), pAllocator, pPipelines);
799 }
800 TCU_THROW(InternalError, "m_pipelineCache not initialized for this device");
801 }
802 TCU_THROW(InternalError, "vkCreateComputePipelines not defined");
803 }
804
805 // main process: store pipelines in JSON format. Pipelines will be sent later for m_pipelineCache creation ( and sent through file to another process )
806 for (uint32_t i = 0; i < createInfoCount; ++i)
807 {
808 m_pipelineIdentifiers.insert({pPipelines[i], pipelineIDs[i]});
809
810 auto it = std::find_if(begin(m_pipelineInput.pipelines), end(m_pipelineInput.pipelines),
811 vksc_server::PipelineIdentifierEqual(pipelineIDs[i]));
812 pipelineIDs[i].pNext = DE_NULL;
813 if (it == end(m_pipelineInput.pipelines))
814 {
815 const auto &featIt = m_deviceFeatures.find(device);
816 if (featIt == end(m_deviceFeatures))
817 TCU_THROW(InternalError, "Can't find device features for this pipeline");
818 const auto &extIt = m_deviceExtensions.find(device);
819 if (extIt == end(m_deviceExtensions))
820 TCU_THROW(InternalError, "Can't find device extensions for this pipeline");
821
822 m_pipelineInput.pipelines.push_back(vksc_server::VulkanJsonPipelineDescription(
823 pipelineIDs[i], writeJSON_VkComputePipelineCreateInfo(pCreateInfoCopies[i]), featIt->second,
824 extIt->second, m_currentTestPath));
825 }
826 else
827 it->add(m_currentTestPath);
828 }
829 return VK_SUCCESS;
830 }
831
destroyPipeline(VkDevice device,VkPipeline pipeline,const VkAllocationCallbacks * pAllocator) const832 void ResourceInterfaceStandard::destroyPipeline(VkDevice device, VkPipeline pipeline,
833 const VkAllocationCallbacks *pAllocator) const
834 {
835 DE_UNREF(device);
836 DE_UNREF(pAllocator);
837
838 auto it = m_pipelineIdentifiers.find(pipeline);
839 if (it == end(m_pipelineIdentifiers))
840 TCU_THROW(InternalError, "Can't find pipeline");
841
842 auto pit = std::find_if(begin(m_pipelineInput.pipelines), end(m_pipelineInput.pipelines),
843 vksc_server::PipelineIdentifierEqual(it->second));
844 if (pit == end(m_pipelineInput.pipelines))
845 TCU_THROW(InternalError, "Can't find pipeline identifier");
846 pit->remove();
847 }
848
createRenderPass(VkDevice device,const VkRenderPassCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkRenderPass * pRenderPass) const849 void ResourceInterfaceStandard::createRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
850 const VkAllocationCallbacks *pAllocator,
851 VkRenderPass *pRenderPass) const
852 {
853 DE_UNREF(device);
854 DE_UNREF(pAllocator);
855 m_pipelineInput.renderPasses.insert({*pRenderPass, writeJSON_VkRenderPassCreateInfo(*pCreateInfo)});
856 }
857
createRenderPass2(VkDevice device,const VkRenderPassCreateInfo2 * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkRenderPass * pRenderPass) const858 void ResourceInterfaceStandard::createRenderPass2(VkDevice device, const VkRenderPassCreateInfo2 *pCreateInfo,
859 const VkAllocationCallbacks *pAllocator,
860 VkRenderPass *pRenderPass) const
861 {
862 DE_UNREF(device);
863 DE_UNREF(pAllocator);
864 m_pipelineInput.renderPasses.insert({*pRenderPass, writeJSON_VkRenderPassCreateInfo2(*pCreateInfo)});
865 }
866
createPipelineLayout(VkDevice device,const VkPipelineLayoutCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkPipelineLayout * pPipelineLayout) const867 void ResourceInterfaceStandard::createPipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
868 const VkAllocationCallbacks *pAllocator,
869 VkPipelineLayout *pPipelineLayout) const
870 {
871 DE_UNREF(device);
872 DE_UNREF(pAllocator);
873 m_pipelineInput.pipelineLayouts.insert({*pPipelineLayout, writeJSON_VkPipelineLayoutCreateInfo(*pCreateInfo)});
874 }
875
createDescriptorSetLayout(VkDevice device,const VkDescriptorSetLayoutCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorSetLayout * pSetLayout) const876 void ResourceInterfaceStandard::createDescriptorSetLayout(VkDevice device,
877 const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
878 const VkAllocationCallbacks *pAllocator,
879 VkDescriptorSetLayout *pSetLayout) const
880 {
881 DE_UNREF(device);
882 DE_UNREF(pAllocator);
883 m_pipelineInput.descriptorSetLayouts.insert({*pSetLayout, writeJSON_VkDescriptorSetLayoutCreateInfo(*pCreateInfo)});
884 }
885
createSampler(VkDevice device,const VkSamplerCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSampler * pSampler) const886 void ResourceInterfaceStandard::createSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
887 const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) const
888 {
889 DE_UNREF(device);
890 DE_UNREF(pAllocator);
891 m_pipelineInput.samplers.insert({*pSampler, writeJSON_VkSamplerCreateInfo(*pCreateInfo)});
892 }
893
createSamplerYcbcrConversion(VkDevice device,const VkSamplerYcbcrConversionCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSamplerYcbcrConversion * pYcbcrConversion) const894 void ResourceInterfaceStandard::createSamplerYcbcrConversion(VkDevice device,
895 const VkSamplerYcbcrConversionCreateInfo *pCreateInfo,
896 const VkAllocationCallbacks *pAllocator,
897 VkSamplerYcbcrConversion *pYcbcrConversion) const
898 {
899 DE_UNREF(device);
900 DE_UNREF(pAllocator);
901 m_pipelineInput.samplerYcbcrConversions.insert(
902 {*pYcbcrConversion, writeJSON_VkSamplerYcbcrConversionCreateInfo(*pCreateInfo)});
903 }
904
createCommandPool(VkDevice device,const VkCommandPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkCommandPool * pCommandPool) const905 void ResourceInterfaceStandard::createCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
906 const VkAllocationCallbacks *pAllocator,
907 VkCommandPool *pCommandPool) const
908 {
909 DE_UNREF(device);
910 DE_UNREF(pCreateInfo);
911 DE_UNREF(pAllocator);
912 m_commandPoolMemoryConsumption.push_back(vksc_server::VulkanCommandMemoryConsumption(pCommandPool->getInternal()));
913 }
914
allocateCommandBuffers(VkDevice device,const VkCommandBufferAllocateInfo * pAllocateInfo,VkCommandBuffer * pCommandBuffers) const915 void ResourceInterfaceStandard::allocateCommandBuffers(VkDevice device,
916 const VkCommandBufferAllocateInfo *pAllocateInfo,
917 VkCommandBuffer *pCommandBuffers) const
918 {
919 DE_UNREF(device);
920 for (uint32_t i = 0; i < pAllocateInfo->commandBufferCount; ++i)
921 {
922 m_commandBufferMemoryConsumption.insert({pCommandBuffers[i], vksc_server::VulkanCommandMemoryConsumption(
923 pAllocateInfo->commandPool.getInternal())});
924 }
925 }
926
increaseCommandBufferSize(VkCommandBuffer commandBuffer,VkDeviceSize commandSize) const927 void ResourceInterfaceStandard::increaseCommandBufferSize(VkCommandBuffer commandBuffer, VkDeviceSize commandSize) const
928 {
929 auto it = m_commandBufferMemoryConsumption.find(commandBuffer);
930 if (it == end(m_commandBufferMemoryConsumption))
931 TCU_THROW(InternalError, "Unregistered command buffer");
932
933 it->second.updateValues(commandSize, commandSize, commandSize);
934 }
935
resetCommandPool(VkDevice device,VkCommandPool commandPool,VkCommandPoolResetFlags flags) const936 void ResourceInterfaceStandard::resetCommandPool(VkDevice device, VkCommandPool commandPool,
937 VkCommandPoolResetFlags flags) const
938 {
939 DE_UNREF(device);
940 DE_UNREF(flags);
941
942 for (auto &memC : m_commandBufferMemoryConsumption)
943 {
944 if (memC.second.commandPool == commandPool.getInternal())
945 memC.second.resetValues();
946 }
947 }
948
importPipelineCacheData(const PlatformInterface & vkp,VkInstance instance,const InstanceInterface & vki,VkPhysicalDevice physicalDevice,uint32_t queueIndex)949 void ResourceInterfaceStandard::importPipelineCacheData(const PlatformInterface &vkp, VkInstance instance,
950 const InstanceInterface &vki, VkPhysicalDevice physicalDevice,
951 uint32_t queueIndex)
952 {
953 if (!std::string(m_testCtx.getCommandLine().getPipelineCompilerPath()).empty())
954 {
955 m_cacheData = vksc_server::buildOfflinePipelineCache(
956 m_pipelineInput, std::string(m_testCtx.getCommandLine().getPipelineCompilerPath()),
957 std::string(m_testCtx.getCommandLine().getPipelineCompilerDataDir()),
958 std::string(m_testCtx.getCommandLine().getPipelineCompilerArgs()),
959 std::string(m_testCtx.getCommandLine().getPipelineCompilerOutputFile()),
960 std::string(m_testCtx.getCommandLine().getPipelineCompilerLogFile()),
961 std::string(m_testCtx.getCommandLine().getPipelineCompilerFilePrefix()));
962 }
963 else
964 {
965 m_cacheData = vksc_server::buildPipelineCache(m_pipelineInput, vkp, instance, vki, physicalDevice, queueIndex);
966 }
967
968 VkPhysicalDeviceVulkanSC10Properties vulkanSC10Properties = initVulkanStructure();
969 VkPhysicalDeviceProperties2 deviceProperties2 = initVulkanStructure(&vulkanSC10Properties);
970 vki.getPhysicalDeviceProperties2(physicalDevice, &deviceProperties2);
971
972 m_pipelineSizes = vksc_server::extractSizesFromPipelineCache(
973 m_pipelineInput, m_cacheData, uint32_t(m_testCtx.getCommandLine().getPipelineDefaultSize()),
974 vulkanSC10Properties.recyclePipelineMemory == VK_TRUE);
975 preparePipelinePoolSizes();
976 }
977
resetObjects()978 void ResourceInterfaceStandard::resetObjects()
979 {
980 m_pipelineInput = {};
981 m_objectHashes.clear();
982 m_commandPoolMemoryConsumption.clear();
983 m_commandPoolIndex = 0u;
984 m_commandBufferMemoryConsumption.clear();
985 m_resourceCounter = 0u;
986 m_statCurrent = resetDeviceObjectReservationCreateInfo();
987 m_statMax = resetDeviceObjectReservationCreateInfo();
988 // pipelineCacheRequestCount does not contain one instance of createPipelineCache call that happens only in subprocess
989 m_statCurrent.pipelineCacheRequestCount = 1u;
990 m_statMax.pipelineCacheRequestCount = 1u;
991 m_cacheData.clear();
992 m_pipelineIdentifiers.clear();
993 m_pipelineSizes.clear();
994 m_pipelinePoolSizes.clear();
995 runGarbageCollection();
996 }
997
resetPipelineCaches()998 void ResourceInterfaceStandard::resetPipelineCaches()
999 {
1000 if (m_testCtx.getCommandLine().isSubProcess())
1001 {
1002 m_pipelineCache.clear();
1003 }
1004 }
1005
1006 #endif // CTS_USES_VULKANSC
1007
compileProgram(const vk::ProgramIdentifier & progId,const vk::GlslSource & source,glu::ShaderProgramInfo * buildInfo,const tcu::CommandLine & commandLine)1008 vk::ProgramBinary *ResourceInterfaceStandard::compileProgram(const vk::ProgramIdentifier &progId,
1009 const vk::GlslSource &source,
1010 glu::ShaderProgramInfo *buildInfo,
1011 const tcu::CommandLine &commandLine)
1012 {
1013 DE_UNREF(progId);
1014 return vk::buildProgram(source, buildInfo, commandLine);
1015 }
1016
compileProgram(const vk::ProgramIdentifier & progId,const vk::HlslSource & source,glu::ShaderProgramInfo * buildInfo,const tcu::CommandLine & commandLine)1017 vk::ProgramBinary *ResourceInterfaceStandard::compileProgram(const vk::ProgramIdentifier &progId,
1018 const vk::HlslSource &source,
1019 glu::ShaderProgramInfo *buildInfo,
1020 const tcu::CommandLine &commandLine)
1021 {
1022 DE_UNREF(progId);
1023 return vk::buildProgram(source, buildInfo, commandLine);
1024 }
1025
compileProgram(const vk::ProgramIdentifier & progId,const vk::SpirVAsmSource & source,vk::SpirVProgramInfo * buildInfo,const tcu::CommandLine & commandLine)1026 vk::ProgramBinary *ResourceInterfaceStandard::compileProgram(const vk::ProgramIdentifier &progId,
1027 const vk::SpirVAsmSource &source,
1028 vk::SpirVProgramInfo *buildInfo,
1029 const tcu::CommandLine &commandLine)
1030 {
1031 DE_UNREF(progId);
1032 return vk::assembleProgram(source, buildInfo, commandLine);
1033 }
1034
1035 #ifdef CTS_USES_VULKANSC
1036
ResourceInterfaceVKSC(tcu::TestContext & testCtx)1037 ResourceInterfaceVKSC::ResourceInterfaceVKSC(tcu::TestContext &testCtx) : ResourceInterfaceStandard(testCtx)
1038 {
1039 m_address = std::string(testCtx.getCommandLine().getServerAddress());
1040 }
1041
getServer()1042 vksc_server::Server *ResourceInterfaceVKSC::getServer()
1043 {
1044 if (!m_server)
1045 {
1046 m_server = std::make_shared<vksc_server::Server>(m_address);
1047 }
1048 return m_server.get();
1049 }
1050
noServer() const1051 bool ResourceInterfaceVKSC::noServer() const
1052 {
1053 return m_address.empty();
1054 }
1055
compileProgram(const vk::ProgramIdentifier & progId,const vk::GlslSource & source,glu::ShaderProgramInfo * buildInfo,const tcu::CommandLine & commandLine)1056 vk::ProgramBinary *ResourceInterfaceVKSC::compileProgram(const vk::ProgramIdentifier &progId,
1057 const vk::GlslSource &source,
1058 glu::ShaderProgramInfo *buildInfo,
1059 const tcu::CommandLine &commandLine)
1060 {
1061 if (noServer())
1062 return ResourceInterfaceStandard::compileProgram(progId, source, buildInfo, commandLine);
1063
1064 DE_UNREF(progId);
1065 DE_UNREF(buildInfo);
1066
1067 vksc_server::CompileShaderRequest request;
1068 request.source.active = "glsl";
1069 request.source.glsl = source;
1070 request.commandLine = commandLine.getInitialCmdLine();
1071 vksc_server::CompileShaderResponse response;
1072 getServer()->SendRequest(request, response);
1073
1074 return new ProgramBinary(PROGRAM_FORMAT_SPIRV, response.binary.size(), response.binary.data());
1075 }
1076
compileProgram(const vk::ProgramIdentifier & progId,const vk::HlslSource & source,glu::ShaderProgramInfo * buildInfo,const tcu::CommandLine & commandLine)1077 vk::ProgramBinary *ResourceInterfaceVKSC::compileProgram(const vk::ProgramIdentifier &progId,
1078 const vk::HlslSource &source,
1079 glu::ShaderProgramInfo *buildInfo,
1080 const tcu::CommandLine &commandLine)
1081 {
1082 if (noServer())
1083 return ResourceInterfaceStandard::compileProgram(progId, source, buildInfo, commandLine);
1084
1085 DE_UNREF(progId);
1086 DE_UNREF(buildInfo);
1087
1088 vksc_server::CompileShaderRequest request;
1089 request.source.active = "hlsl";
1090 request.source.hlsl = source;
1091 request.commandLine = commandLine.getInitialCmdLine();
1092 vksc_server::CompileShaderResponse response;
1093 getServer()->SendRequest(request, response);
1094
1095 return new ProgramBinary(PROGRAM_FORMAT_SPIRV, response.binary.size(), response.binary.data());
1096 }
1097
compileProgram(const vk::ProgramIdentifier & progId,const vk::SpirVAsmSource & source,vk::SpirVProgramInfo * buildInfo,const tcu::CommandLine & commandLine)1098 vk::ProgramBinary *ResourceInterfaceVKSC::compileProgram(const vk::ProgramIdentifier &progId,
1099 const vk::SpirVAsmSource &source,
1100 vk::SpirVProgramInfo *buildInfo,
1101 const tcu::CommandLine &commandLine)
1102 {
1103 if (noServer())
1104 return ResourceInterfaceStandard::compileProgram(progId, source, buildInfo, commandLine);
1105
1106 DE_UNREF(progId);
1107 DE_UNREF(buildInfo);
1108
1109 vksc_server::CompileShaderRequest request;
1110 request.source.active = "spirv";
1111 request.source.spirv = source;
1112 request.commandLine = commandLine.getInitialCmdLine();
1113 vksc_server::CompileShaderResponse response;
1114 getServer()->SendRequest(request, response);
1115
1116 return new ProgramBinary(PROGRAM_FORMAT_SPIRV, response.binary.size(), response.binary.data());
1117 }
1118
createShaderModule(VkDevice device,const VkShaderModuleCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkShaderModule * pShaderModule,bool normalMode) const1119 VkResult ResourceInterfaceVKSC::createShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
1120 const VkAllocationCallbacks *pAllocator,
1121 VkShaderModule *pShaderModule, bool normalMode) const
1122 {
1123 if (noServer() || !normalMode || !isVulkanSC())
1124 return ResourceInterfaceStandard::createShaderModule(device, pCreateInfo, pAllocator, pShaderModule,
1125 normalMode);
1126
1127 // We will reach this place only in one case:
1128 // - server exists
1129 // - subprocess asks for creation of VkShaderModule which will be later ignored, because it will receive the whole pipeline from server
1130 // ( Are there any tests which receive VkShaderModule and do not use it in any pipeline ? )
1131 *pShaderModule = VkShaderModule(++m_resourceCounter);
1132 registerObjectHash(pShaderModule->getInternal(), calculateShaderModuleHash(*pCreateInfo, getObjectHashes()));
1133 return VK_SUCCESS;
1134 }
1135
importPipelineCacheData(const PlatformInterface & vkp,VkInstance instance,const InstanceInterface & vki,VkPhysicalDevice physicalDevice,uint32_t queueIndex)1136 void ResourceInterfaceVKSC::importPipelineCacheData(const PlatformInterface &vkp, VkInstance instance,
1137 const InstanceInterface &vki, VkPhysicalDevice physicalDevice,
1138 uint32_t queueIndex)
1139 {
1140 if (noServer())
1141 {
1142 ResourceInterfaceStandard::importPipelineCacheData(vkp, instance, vki, physicalDevice, queueIndex);
1143 return;
1144 }
1145
1146 vksc_server::CreateCacheRequest request;
1147 request.input = m_pipelineInput;
1148 std::vector<int> caseFraction = m_testCtx.getCommandLine().getCaseFraction();
1149 request.caseFraction = caseFraction.empty() ? -1 : caseFraction[0];
1150
1151 vksc_server::CreateCacheResponse response;
1152 getServer()->SendRequest(request, response);
1153
1154 if (response.status)
1155 {
1156 m_cacheData = std::move(response.binary);
1157
1158 VkPhysicalDeviceVulkanSC10Properties vulkanSC10Properties = initVulkanStructure();
1159 VkPhysicalDeviceProperties2 deviceProperties2 = initVulkanStructure(&vulkanSC10Properties);
1160 vki.getPhysicalDeviceProperties2(physicalDevice, &deviceProperties2);
1161
1162 m_pipelineSizes = vksc_server::extractSizesFromPipelineCache(
1163 m_pipelineInput, m_cacheData, uint32_t(m_testCtx.getCommandLine().getPipelineDefaultSize()),
1164 vulkanSC10Properties.recyclePipelineMemory == VK_TRUE);
1165 preparePipelinePoolSizes();
1166 }
1167 else
1168 {
1169 TCU_THROW(InternalError,
1170 "Server did not return pipeline cache data when requested (check server log for details)");
1171 }
1172 }
1173
MultithreadedDestroyGuard(de::SharedPtr<vk::ResourceInterface> resourceInterface)1174 MultithreadedDestroyGuard::MultithreadedDestroyGuard(de::SharedPtr<vk::ResourceInterface> resourceInterface)
1175 : m_resourceInterface{resourceInterface}
1176 {
1177 if (m_resourceInterface.get() != DE_NULL)
1178 m_resourceInterface->setHandleDestroy(false);
1179 }
1180
~MultithreadedDestroyGuard()1181 MultithreadedDestroyGuard::~MultithreadedDestroyGuard()
1182 {
1183 if (m_resourceInterface.get() != DE_NULL)
1184 m_resourceInterface->setHandleDestroy(true);
1185 }
1186
1187 #endif // CTS_USES_VULKANSC
1188
1189 } // namespace vk
1190