1 // Copyright 2019 The SwiftShader Authors. All Rights Reserved.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #include "VkPipelineCache.hpp"
16
17 #include <cstring>
18
19 namespace vk {
20
SpirvBinaryKey(const sw::SpirvBinary & spirv,const VkSpecializationInfo * specializationInfo,bool robustBufferAccess,bool optimize)21 PipelineCache::SpirvBinaryKey::SpirvBinaryKey(const sw::SpirvBinary &spirv,
22 const VkSpecializationInfo *specializationInfo,
23 bool robustBufferAccess,
24 bool optimize)
25 : spirv(spirv)
26 , specializationInfo(specializationInfo)
27 , robustBufferAccess(robustBufferAccess)
28 , optimize(optimize)
29 {
30 }
31
operator <(const SpirvBinaryKey & other) const32 bool PipelineCache::SpirvBinaryKey::operator<(const SpirvBinaryKey &other) const
33 {
34 if(spirv.size() != other.spirv.size())
35 {
36 return spirv.size() < other.spirv.size();
37 }
38
39 int cmp = memcmp(spirv.data(), other.spirv.data(), spirv.size() * sizeof(uint32_t));
40 if(cmp != 0)
41 {
42 return cmp < 0;
43 }
44
45 if(robustBufferAccess != other.robustBufferAccess)
46 {
47 return !robustBufferAccess && other.robustBufferAccess;
48 }
49
50 if(optimize != other.optimize)
51 {
52 return !optimize && other.optimize;
53 }
54
55 return (specializationInfo < other.specializationInfo);
56 }
57
ComputeProgramKey(uint64_t shaderIdentifier,uint32_t pipelineLayoutIdentifier)58 PipelineCache::ComputeProgramKey::ComputeProgramKey(uint64_t shaderIdentifier, uint32_t pipelineLayoutIdentifier)
59 : shaderIdentifier(shaderIdentifier)
60 , pipelineLayoutIdentifier(pipelineLayoutIdentifier)
61 {}
62
operator <(const ComputeProgramKey & other) const63 bool PipelineCache::ComputeProgramKey::operator<(const ComputeProgramKey &other) const
64 {
65 return std::tie(shaderIdentifier, pipelineLayoutIdentifier) < std::tie(other.shaderIdentifier, other.pipelineLayoutIdentifier);
66 }
67
PipelineCache(const VkPipelineCacheCreateInfo * pCreateInfo,void * mem)68 PipelineCache::PipelineCache(const VkPipelineCacheCreateInfo *pCreateInfo, void *mem)
69 : dataSize(ComputeRequiredAllocationSize(pCreateInfo))
70 , data(reinterpret_cast<uint8_t *>(mem))
71 {
72 CacheHeader *header = reinterpret_cast<CacheHeader *>(mem);
73 header->headerLength = sizeof(CacheHeader);
74 header->headerVersion = VK_PIPELINE_CACHE_HEADER_VERSION_ONE;
75 header->vendorID = VENDOR_ID;
76 header->deviceID = DEVICE_ID;
77 memcpy(header->pipelineCacheUUID, SWIFTSHADER_UUID, VK_UUID_SIZE);
78
79 if(pCreateInfo->pInitialData && (pCreateInfo->initialDataSize > 0))
80 {
81 memcpy(data + sizeof(CacheHeader), pCreateInfo->pInitialData, pCreateInfo->initialDataSize);
82 }
83 }
84
~PipelineCache()85 PipelineCache::~PipelineCache()
86 {
87 spirvShaders.clear();
88 computePrograms.clear();
89 }
90
destroy(const VkAllocationCallbacks * pAllocator)91 void PipelineCache::destroy(const VkAllocationCallbacks *pAllocator)
92 {
93 vk::freeHostMemory(data, pAllocator);
94 }
95
ComputeRequiredAllocationSize(const VkPipelineCacheCreateInfo * pCreateInfo)96 size_t PipelineCache::ComputeRequiredAllocationSize(const VkPipelineCacheCreateInfo *pCreateInfo)
97 {
98 return pCreateInfo->initialDataSize + sizeof(CacheHeader);
99 }
100
getData(size_t * pDataSize,void * pData)101 VkResult PipelineCache::getData(size_t *pDataSize, void *pData)
102 {
103 if(!pData)
104 {
105 *pDataSize = dataSize;
106 return VK_SUCCESS;
107 }
108
109 if(*pDataSize != dataSize)
110 {
111 *pDataSize = 0;
112 return VK_INCOMPLETE;
113 }
114
115 if(*pDataSize > 0)
116 {
117 memcpy(pData, data, *pDataSize);
118 }
119
120 return VK_SUCCESS;
121 }
122
merge(uint32_t srcCacheCount,const VkPipelineCache * pSrcCaches)123 VkResult PipelineCache::merge(uint32_t srcCacheCount, const VkPipelineCache *pSrcCaches)
124 {
125 for(uint32_t i = 0; i < srcCacheCount; i++)
126 {
127 PipelineCache *srcCache = Cast(pSrcCaches[i]);
128
129 {
130 marl::lock thisLock(spirvShadersMutex);
131 marl::lock srcLock(srcCache->spirvShadersMutex);
132 spirvShaders.insert(srcCache->spirvShaders.begin(), srcCache->spirvShaders.end());
133 }
134
135 {
136 marl::lock thisLock(computeProgramsMutex);
137 marl::lock srcLock(srcCache->computeProgramsMutex);
138 computePrograms.insert(srcCache->computePrograms.begin(), srcCache->computePrograms.end());
139 }
140 }
141
142 return VK_SUCCESS;
143 }
144
145 } // namespace vk
146