xref: /aosp_15_r20/external/swiftshader/src/Vulkan/VkPipeline.cpp (revision 03ce13f70fcc45d86ee91b7ee4cab1936a95046e)
1 // Copyright 2018 The SwiftShader Authors. All Rights Reserved.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //    http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 #include "VkPipeline.hpp"
16 
17 #include "VkDestroy.hpp"
18 #include "VkDevice.hpp"
19 #include "VkPipelineCache.hpp"
20 #include "VkPipelineLayout.hpp"
21 #include "VkRenderPass.hpp"
22 #include "VkShaderModule.hpp"
23 #include "VkStringify.hpp"
24 #include "Pipeline/ComputeProgram.hpp"
25 #include "Pipeline/SpirvShader.hpp"
26 
27 #include "marl/trace.h"
28 
29 #include "spirv-tools/optimizer.hpp"
30 
31 #include <iostream>
32 
33 namespace {
34 
35 // optimizeSpirv() applies and freezes specializations into constants, and runs spirv-opt.
optimizeSpirv(const vk::PipelineCache::SpirvBinaryKey & key)36 sw::SpirvBinary optimizeSpirv(const vk::PipelineCache::SpirvBinaryKey &key)
37 {
38 	const sw::SpirvBinary &code = key.getBinary();
39 	const VkSpecializationInfo *specializationInfo = key.getSpecializationInfo();
40 	bool optimize = key.getOptimization();
41 
42 	spvtools::Optimizer opt{ vk::SPIRV_VERSION };
43 
44 	opt.SetMessageConsumer([](spv_message_level_t level, const char *source, const spv_position_t &position, const char *message) {
45 		switch(level)
46 		{
47 		case SPV_MSG_FATAL: sw::warn("SPIR-V FATAL: %d:%d %s\n", int(position.line), int(position.column), message);
48 		case SPV_MSG_INTERNAL_ERROR: sw::warn("SPIR-V INTERNAL_ERROR: %d:%d %s\n", int(position.line), int(position.column), message);
49 		case SPV_MSG_ERROR: sw::warn("SPIR-V ERROR: %d:%d %s\n", int(position.line), int(position.column), message);
50 		case SPV_MSG_WARNING: sw::warn("SPIR-V WARNING: %d:%d %s\n", int(position.line), int(position.column), message);
51 		case SPV_MSG_INFO: sw::trace("SPIR-V INFO: %d:%d %s\n", int(position.line), int(position.column), message);
52 		case SPV_MSG_DEBUG: sw::trace("SPIR-V DEBUG: %d:%d %s\n", int(position.line), int(position.column), message);
53 		default: sw::trace("SPIR-V MESSAGE: %d:%d %s\n", int(position.line), int(position.column), message);
54 		}
55 	});
56 
57 	// If the pipeline uses specialization, apply the specializations before freezing
58 	if(specializationInfo)
59 	{
60 		std::unordered_map<uint32_t, std::vector<uint32_t>> specializations;
61 		const uint8_t *specializationData = static_cast<const uint8_t *>(specializationInfo->pData);
62 
63 		for(uint32_t i = 0; i < specializationInfo->mapEntryCount; i++)
64 		{
65 			const VkSpecializationMapEntry &entry = specializationInfo->pMapEntries[i];
66 			const uint8_t *value_ptr = specializationData + entry.offset;
67 			std::vector<uint32_t> value(reinterpret_cast<const uint32_t *>(value_ptr),
68 			                            reinterpret_cast<const uint32_t *>(value_ptr + entry.size));
69 			specializations.emplace(entry.constantID, std::move(value));
70 		}
71 
72 		opt.RegisterPass(spvtools::CreateSetSpecConstantDefaultValuePass(specializations));
73 	}
74 
75 	if(optimize)
76 	{
77 		// Remove DontInline flags so the optimizer force-inlines all functions,
78 		// as we currently don't support OpFunctionCall (b/141246700).
79 		opt.RegisterPass(spvtools::CreateRemoveDontInlinePass());
80 
81 		// Full optimization list taken from spirv-opt.
82 		opt.RegisterPerformancePasses();
83 	}
84 
85 	spvtools::OptimizerOptions optimizerOptions = {};
86 #if defined(NDEBUG)
87 	optimizerOptions.set_run_validator(false);
88 #else
89 	optimizerOptions.set_run_validator(true);
90 	spvtools::ValidatorOptions validatorOptions = {};
91 	validatorOptions.SetScalarBlockLayout(true);            // VK_EXT_scalar_block_layout
92 	validatorOptions.SetUniformBufferStandardLayout(true);  // VK_KHR_uniform_buffer_standard_layout
93 	validatorOptions.SetAllowLocalSizeId(true);             // VK_KHR_maintenance4
94 	optimizerOptions.set_validator_options(validatorOptions);
95 #endif
96 
97 	sw::SpirvBinary optimized;
98 	opt.Run(code.data(), code.size(), &optimized, optimizerOptions);
99 	ASSERT(optimized.size() > 0);
100 
101 	if(false)
102 	{
103 		spvtools::SpirvTools core(vk::SPIRV_VERSION);
104 		std::string preOpt;
105 		core.Disassemble(code, &preOpt, SPV_BINARY_TO_TEXT_OPTION_NONE);
106 		std::string postOpt;
107 		core.Disassemble(optimized, &postOpt, SPV_BINARY_TO_TEXT_OPTION_NONE);
108 		std::cout << "PRE-OPT: " << preOpt << std::endl
109 		          << "POST-OPT: " << postOpt << std::endl;
110 	}
111 
112 	return optimized;
113 }
114 
createProgram(vk::Device * device,std::shared_ptr<sw::SpirvShader> shader,const vk::PipelineLayout * layout)115 std::shared_ptr<sw::ComputeProgram> createProgram(vk::Device *device, std::shared_ptr<sw::SpirvShader> shader, const vk::PipelineLayout *layout)
116 {
117 	MARL_SCOPED_EVENT("createProgram");
118 
119 	vk::DescriptorSet::Bindings descriptorSets;  // TODO(b/129523279): Delay code generation until dispatch time.
120 	// TODO(b/119409619): use allocator.
121 	auto program = std::make_shared<sw::ComputeProgram>(device, shader, layout, descriptorSets);
122 	program->generate();
123 	program->finalize("ComputeProgram");
124 
125 	return program;
126 }
127 
128 class PipelineCreationFeedback
129 {
130 public:
PipelineCreationFeedback(const VkGraphicsPipelineCreateInfo * pCreateInfo)131 	PipelineCreationFeedback(const VkGraphicsPipelineCreateInfo *pCreateInfo)
132 	    : pipelineCreationFeedback(GetPipelineCreationFeedback(pCreateInfo->pNext))
133 	{
134 		pipelineCreationBegins();
135 	}
136 
PipelineCreationFeedback(const VkComputePipelineCreateInfo * pCreateInfo)137 	PipelineCreationFeedback(const VkComputePipelineCreateInfo *pCreateInfo)
138 	    : pipelineCreationFeedback(GetPipelineCreationFeedback(pCreateInfo->pNext))
139 	{
140 		pipelineCreationBegins();
141 	}
142 
~PipelineCreationFeedback()143 	~PipelineCreationFeedback()
144 	{
145 		pipelineCreationEnds();
146 	}
147 
stageCreationBegins(uint32_t stage)148 	void stageCreationBegins(uint32_t stage)
149 	{
150 		if(pipelineCreationFeedback && (stage < pipelineCreationFeedback->pipelineStageCreationFeedbackCount))
151 		{
152 			// Record stage creation begin time
153 			pipelineCreationFeedback->pPipelineStageCreationFeedbacks[stage].duration = now();
154 		}
155 	}
156 
cacheHit(uint32_t stage)157 	void cacheHit(uint32_t stage)
158 	{
159 		if(pipelineCreationFeedback)
160 		{
161 			pipelineCreationFeedback->pPipelineCreationFeedback->flags |=
162 			    VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT;
163 			if(stage < pipelineCreationFeedback->pipelineStageCreationFeedbackCount)
164 			{
165 				pipelineCreationFeedback->pPipelineStageCreationFeedbacks[stage].flags |=
166 				    VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT;
167 			}
168 		}
169 	}
170 
stageCreationEnds(uint32_t stage)171 	void stageCreationEnds(uint32_t stage)
172 	{
173 		if(pipelineCreationFeedback && (stage < pipelineCreationFeedback->pipelineStageCreationFeedbackCount))
174 		{
175 			pipelineCreationFeedback->pPipelineStageCreationFeedbacks[stage].flags |=
176 			    VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT;
177 			pipelineCreationFeedback->pPipelineStageCreationFeedbacks[stage].duration =
178 			    now() - pipelineCreationFeedback->pPipelineStageCreationFeedbacks[stage].duration;
179 		}
180 	}
181 
pipelineCreationError()182 	void pipelineCreationError()
183 	{
184 		clear();
185 		pipelineCreationFeedback = nullptr;
186 	}
187 
188 private:
GetPipelineCreationFeedback(const void * pNext)189 	static const VkPipelineCreationFeedbackCreateInfo *GetPipelineCreationFeedback(const void *pNext)
190 	{
191 		return vk::GetExtendedStruct<VkPipelineCreationFeedbackCreateInfo>(pNext, VK_STRUCTURE_TYPE_PIPELINE_CREATION_FEEDBACK_CREATE_INFO);
192 	}
193 
pipelineCreationBegins()194 	void pipelineCreationBegins()
195 	{
196 		if(pipelineCreationFeedback)
197 		{
198 			clear();
199 
200 			// Record pipeline creation begin time
201 			pipelineCreationFeedback->pPipelineCreationFeedback->duration = now();
202 		}
203 	}
204 
pipelineCreationEnds()205 	void pipelineCreationEnds()
206 	{
207 		if(pipelineCreationFeedback)
208 		{
209 			pipelineCreationFeedback->pPipelineCreationFeedback->flags |=
210 			    VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT;
211 			pipelineCreationFeedback->pPipelineCreationFeedback->duration =
212 			    now() - pipelineCreationFeedback->pPipelineCreationFeedback->duration;
213 		}
214 	}
215 
clear()216 	void clear()
217 	{
218 		if(pipelineCreationFeedback)
219 		{
220 			// Clear all flags and durations
221 			pipelineCreationFeedback->pPipelineCreationFeedback->flags = 0;
222 			pipelineCreationFeedback->pPipelineCreationFeedback->duration = 0;
223 			for(uint32_t i = 0; i < pipelineCreationFeedback->pipelineStageCreationFeedbackCount; i++)
224 			{
225 				pipelineCreationFeedback->pPipelineStageCreationFeedbacks[i].flags = 0;
226 				pipelineCreationFeedback->pPipelineStageCreationFeedbacks[i].duration = 0;
227 			}
228 		}
229 	}
230 
now()231 	uint64_t now()
232 	{
233 		return std::chrono::time_point_cast<std::chrono::nanoseconds>(std::chrono::system_clock::now()).time_since_epoch().count();
234 	}
235 
236 	const VkPipelineCreationFeedbackCreateInfo *pipelineCreationFeedback = nullptr;
237 };
238 
getRobustBufferAccess(VkPipelineRobustnessBufferBehaviorEXT behavior,bool inheritRobustBufferAccess)239 bool getRobustBufferAccess(VkPipelineRobustnessBufferBehaviorEXT behavior, bool inheritRobustBufferAccess)
240 {
241 	// Based on behavior:
242 	// - <not provided>:
243 	//   * For pipelines, use device's robustBufferAccess
244 	//   * For shaders, use pipeline's robustBufferAccess
245 	//     Note that pipeline's robustBufferAccess is already set to device's if not overriden.
246 	// - Default: Use device's robustBufferAccess
247 	// - Disabled / Enabled: Override to disabled or enabled
248 	//
249 	// This function is passed "DEFAULT" when override is not provided, and
250 	// inheritRobustBufferAccess is appropriately set to the device or pipeline's
251 	// robustBufferAccess
252 	switch(behavior)
253 	{
254 	case VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DEVICE_DEFAULT_EXT:
255 		return inheritRobustBufferAccess;
256 	case VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DISABLED_EXT:
257 		return false;
258 	case VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_EXT:
259 		return true;
260 	default:
261 		UNSUPPORTED("Unsupported robustness behavior");
262 		return true;
263 	}
264 }
265 
getRobustBufferAccess(const VkPipelineRobustnessCreateInfoEXT * overrideRobustness,bool deviceRobustBufferAccess,bool inheritRobustBufferAccess)266 bool getRobustBufferAccess(const VkPipelineRobustnessCreateInfoEXT *overrideRobustness, bool deviceRobustBufferAccess, bool inheritRobustBufferAccess)
267 {
268 	VkPipelineRobustnessBufferBehaviorEXT storageBehavior = VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DEVICE_DEFAULT_EXT;
269 	VkPipelineRobustnessBufferBehaviorEXT uniformBehavior = VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DEVICE_DEFAULT_EXT;
270 	VkPipelineRobustnessBufferBehaviorEXT vertexBehavior = VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DEVICE_DEFAULT_EXT;
271 
272 	if(overrideRobustness)
273 	{
274 		storageBehavior = overrideRobustness->storageBuffers;
275 		uniformBehavior = overrideRobustness->uniformBuffers;
276 		vertexBehavior = overrideRobustness->vertexInputs;
277 		inheritRobustBufferAccess = deviceRobustBufferAccess;
278 	}
279 
280 	bool storageRobustBufferAccess = getRobustBufferAccess(storageBehavior, inheritRobustBufferAccess);
281 	bool uniformRobustBufferAccess = getRobustBufferAccess(uniformBehavior, inheritRobustBufferAccess);
282 	bool vertexRobustBufferAccess = getRobustBufferAccess(vertexBehavior, inheritRobustBufferAccess);
283 
284 	// Note: in the initial implementation, enabling robust access for any buffer enables it for
285 	// all.  TODO(b/185122256) split robustBufferAccess in the pipeline and shaders into three
286 	// categories and provide robustness for storage, uniform and vertex buffers accordingly.
287 	return storageRobustBufferAccess || uniformRobustBufferAccess || vertexRobustBufferAccess;
288 }
289 
getPipelineRobustBufferAccess(const void * pNext,vk::Device * device)290 bool getPipelineRobustBufferAccess(const void *pNext, vk::Device *device)
291 {
292 	const VkPipelineRobustnessCreateInfoEXT *overrideRobustness = vk::GetExtendedStruct<VkPipelineRobustnessCreateInfoEXT>(pNext, VK_STRUCTURE_TYPE_PIPELINE_ROBUSTNESS_CREATE_INFO_EXT);
293 	const bool deviceRobustBufferAccess = device->getEnabledFeatures().robustBufferAccess;
294 
295 	// For pipelines, there's no robustBufferAccess to inherit from.  Default and no-override
296 	// both lead to using the device's robustBufferAccess.
297 	return getRobustBufferAccess(overrideRobustness, deviceRobustBufferAccess, deviceRobustBufferAccess);
298 }
299 
getPipelineStageRobustBufferAccess(const void * pNext,vk::Device * device,bool pipelineRobustBufferAccess)300 bool getPipelineStageRobustBufferAccess(const void *pNext, vk::Device *device, bool pipelineRobustBufferAccess)
301 {
302 	const VkPipelineRobustnessCreateInfoEXT *overrideRobustness = vk::GetExtendedStruct<VkPipelineRobustnessCreateInfoEXT>(pNext, VK_STRUCTURE_TYPE_PIPELINE_ROBUSTNESS_CREATE_INFO_EXT);
303 	const bool deviceRobustBufferAccess = device->getEnabledFeatures().robustBufferAccess;
304 
305 	return getRobustBufferAccess(overrideRobustness, deviceRobustBufferAccess, pipelineRobustBufferAccess);
306 }
307 
308 }  // anonymous namespace
309 
310 namespace vk {
Pipeline(PipelineLayout * layout,Device * device,bool robustBufferAccess)311 Pipeline::Pipeline(PipelineLayout *layout, Device *device, bool robustBufferAccess)
312     : layout(layout)
313     , device(device)
314     , robustBufferAccess(robustBufferAccess)
315 {
316 	if(layout)
317 	{
318 		layout->incRefCount();
319 	}
320 }
321 
destroy(const VkAllocationCallbacks * pAllocator)322 void Pipeline::destroy(const VkAllocationCallbacks *pAllocator)
323 {
324 	destroyPipeline(pAllocator);
325 
326 	if(layout)
327 	{
328 		vk::release(static_cast<VkPipelineLayout>(*layout), pAllocator);
329 	}
330 }
331 
GraphicsPipeline(const VkGraphicsPipelineCreateInfo * pCreateInfo,void * mem,Device * device)332 GraphicsPipeline::GraphicsPipeline(const VkGraphicsPipelineCreateInfo *pCreateInfo, void *mem, Device *device)
333     : Pipeline(vk::Cast(pCreateInfo->layout), device, getPipelineRobustBufferAccess(pCreateInfo->pNext, device))
334     , state(device, pCreateInfo, layout)
335 {
336 	// Either the vertex input interface comes from a pipeline library, or the
337 	// VkGraphicsPipelineCreateInfo itself.  Same with shaders.
338 	const auto *libraryCreateInfo = GetExtendedStruct<VkPipelineLibraryCreateInfoKHR>(pCreateInfo->pNext, VK_STRUCTURE_TYPE_PIPELINE_LIBRARY_CREATE_INFO_KHR);
339 	bool vertexInputInterfaceInLibraries = false;
340 	bool fragmentOutputInterfaceInLibraries = false;
341 	if(libraryCreateInfo)
342 	{
343 		for(uint32_t i = 0; i < libraryCreateInfo->libraryCount; ++i)
344 		{
345 			const auto *library = static_cast<const vk::GraphicsPipeline *>(vk::Cast(libraryCreateInfo->pLibraries[i]));
346 			if(library->state.hasVertexInputInterfaceState())
347 			{
348 				inputs = library->inputs;
349 				vertexInputInterfaceInLibraries = true;
350 			}
351 			if(library->state.hasPreRasterizationState())
352 			{
353 				vertexShader = library->vertexShader;
354 			}
355 			if(library->state.hasFragmentState())
356 			{
357 				fragmentShader = library->fragmentShader;
358 			}
359 			if(library->state.hasFragmentOutputInterfaceState())
360 			{
361 				memcpy(attachments.indexToLocation, library->attachments.indexToLocation, sizeof(attachments.indexToLocation));
362 				memcpy(attachments.locationToIndex, library->attachments.locationToIndex, sizeof(attachments.locationToIndex));
363 				fragmentOutputInterfaceInLibraries = true;
364 			}
365 		}
366 	}
367 	if(state.hasVertexInputInterfaceState() && !vertexInputInterfaceInLibraries)
368 	{
369 		inputs.initialize(pCreateInfo->pVertexInputState, pCreateInfo->pDynamicState);
370 	}
371 	if(state.hasFragmentOutputInterfaceState() && !fragmentOutputInterfaceInLibraries)
372 	{
373 		const auto *colorMapping = GetExtendedStruct<VkRenderingAttachmentLocationInfoKHR>(pCreateInfo->pNext, VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_LOCATION_INFO_KHR);
374 		if(colorMapping)
375 		{
376 			// Note that with VK_KHR_dynamic_rendering_local_read, if
377 			// VkRenderingAttachmentLocationInfoKHR is provided, setting an index to
378 			// VK_ATTACHMENT_UNUSED disables output for that attachment, even if write
379 			// mask is not explicitly disabled.
380 			for(uint32_t i = 0; i < sw::MAX_COLOR_BUFFERS; ++i)
381 			{
382 				attachments.indexToLocation[i] = VK_ATTACHMENT_UNUSED;
383 				attachments.locationToIndex[i] = VK_ATTACHMENT_UNUSED;
384 			}
385 
386 			for(uint32_t i = 0; i < colorMapping->colorAttachmentCount; ++i)
387 			{
388 				const uint32_t location = colorMapping->pColorAttachmentLocations[i];
389 				if(location != VK_ATTACHMENT_UNUSED)
390 				{
391 					attachments.indexToLocation[i] = location;
392 					attachments.locationToIndex[location] = i;
393 				}
394 			}
395 		}
396 		else
397 		{
398 			for(uint32_t i = 0; i < sw::MAX_COLOR_BUFFERS; ++i)
399 			{
400 				attachments.indexToLocation[i] = i;
401 				attachments.locationToIndex[i] = i;
402 			}
403 		}
404 	}
405 }
406 
destroyPipeline(const VkAllocationCallbacks * pAllocator)407 void GraphicsPipeline::destroyPipeline(const VkAllocationCallbacks *pAllocator)
408 {
409 	vertexShader.reset();
410 	fragmentShader.reset();
411 }
412 
ComputeRequiredAllocationSize(const VkGraphicsPipelineCreateInfo * pCreateInfo)413 size_t GraphicsPipeline::ComputeRequiredAllocationSize(const VkGraphicsPipelineCreateInfo *pCreateInfo)
414 {
415 	return 0;
416 }
417 
GetGraphicsPipelineSubset(const VkGraphicsPipelineCreateInfo * pCreateInfo)418 VkGraphicsPipelineLibraryFlagsEXT GraphicsPipeline::GetGraphicsPipelineSubset(const VkGraphicsPipelineCreateInfo *pCreateInfo)
419 {
420 	const auto *libraryCreateInfo = vk::GetExtendedStruct<VkPipelineLibraryCreateInfoKHR>(pCreateInfo->pNext, VK_STRUCTURE_TYPE_PIPELINE_LIBRARY_CREATE_INFO_KHR);
421 	const auto *graphicsLibraryCreateInfo = vk::GetExtendedStruct<VkGraphicsPipelineLibraryCreateInfoEXT>(pCreateInfo->pNext, VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_LIBRARY_CREATE_INFO_EXT);
422 
423 	if(graphicsLibraryCreateInfo)
424 	{
425 		return graphicsLibraryCreateInfo->flags;
426 	}
427 
428 	// > If this structure is omitted, and either VkGraphicsPipelineCreateInfo::flags
429 	// > includes VK_PIPELINE_CREATE_LIBRARY_BIT_KHR or the
430 	// > VkGraphicsPipelineCreateInfo::pNext chain includes a VkPipelineLibraryCreateInfoKHR
431 	// > structure with a libraryCount greater than 0, it is as if flags is 0. Otherwise if
432 	// > this structure is omitted, it is as if flags includes all possible subsets of the
433 	// > graphics pipeline (i.e. a complete graphics pipeline).
434 	//
435 	// The above basically says that when a pipeline is created:
436 	// - If not a library and not created from libraries, it's a complete pipeline (i.e.
437 	//   Vulkan 1.0 pipelines)
438 	// - If only created from other libraries, no state is taken from
439 	//   VkGraphicsPipelineCreateInfo.
440 	//
441 	// Otherwise the behavior when creating a library from other libraries is that some
442 	// state is taken from VkGraphicsPipelineCreateInfo and some from the libraries.
443 	const bool isLibrary = (pCreateInfo->flags & VK_PIPELINE_CREATE_LIBRARY_BIT_KHR) != 0;
444 	if(isLibrary || (libraryCreateInfo && libraryCreateInfo->libraryCount > 0))
445 	{
446 		return 0;
447 	}
448 
449 	return VK_GRAPHICS_PIPELINE_LIBRARY_VERTEX_INPUT_INTERFACE_BIT_EXT |
450 	       VK_GRAPHICS_PIPELINE_LIBRARY_PRE_RASTERIZATION_SHADERS_BIT_EXT |
451 	       VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_SHADER_BIT_EXT |
452 	       VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_OUTPUT_INTERFACE_BIT_EXT;
453 }
454 
getIndexBuffers(const vk::DynamicState & dynamicState,uint32_t count,uint32_t first,bool indexed,std::vector<std::pair<uint32_t,void * >> * indexBuffers) const455 void GraphicsPipeline::getIndexBuffers(const vk::DynamicState &dynamicState, uint32_t count, uint32_t first, bool indexed, std::vector<std::pair<uint32_t, void *>> *indexBuffers) const
456 {
457 	const vk::VertexInputInterfaceState &vertexInputInterfaceState = state.getVertexInputInterfaceState();
458 
459 	const VkPrimitiveTopology topology = vertexInputInterfaceState.hasDynamicTopology() ? dynamicState.primitiveTopology : vertexInputInterfaceState.getTopology();
460 	const bool hasPrimitiveRestartEnable = vertexInputInterfaceState.hasDynamicPrimitiveRestartEnable() ? dynamicState.primitiveRestartEnable : vertexInputInterfaceState.hasPrimitiveRestartEnable();
461 	indexBuffer.getIndexBuffers(topology, count, first, indexed, hasPrimitiveRestartEnable, indexBuffers);
462 }
463 
preRasterizationContainsImageWrite() const464 bool GraphicsPipeline::preRasterizationContainsImageWrite() const
465 {
466 	return vertexShader.get() && vertexShader->containsImageWrite();
467 }
468 
fragmentContainsImageWrite() const469 bool GraphicsPipeline::fragmentContainsImageWrite() const
470 {
471 	return fragmentShader.get() && fragmentShader->containsImageWrite();
472 }
473 
setShader(const VkShaderStageFlagBits & stage,const std::shared_ptr<sw::SpirvShader> spirvShader)474 void GraphicsPipeline::setShader(const VkShaderStageFlagBits &stage, const std::shared_ptr<sw::SpirvShader> spirvShader)
475 {
476 	switch(stage)
477 	{
478 	case VK_SHADER_STAGE_VERTEX_BIT:
479 		ASSERT(vertexShader.get() == nullptr);
480 		vertexShader = spirvShader;
481 		break;
482 
483 	case VK_SHADER_STAGE_FRAGMENT_BIT:
484 		ASSERT(fragmentShader.get() == nullptr);
485 		fragmentShader = spirvShader;
486 		break;
487 
488 	default:
489 		UNSUPPORTED("Unsupported stage");
490 		break;
491 	}
492 }
493 
getShader(const VkShaderStageFlagBits & stage) const494 const std::shared_ptr<sw::SpirvShader> GraphicsPipeline::getShader(const VkShaderStageFlagBits &stage) const
495 {
496 	switch(stage)
497 	{
498 	case VK_SHADER_STAGE_VERTEX_BIT:
499 		return vertexShader;
500 	case VK_SHADER_STAGE_FRAGMENT_BIT:
501 		return fragmentShader;
502 	default:
503 		UNSUPPORTED("Unsupported stage");
504 		return fragmentShader;
505 	}
506 }
507 
compileShaders(const VkAllocationCallbacks * pAllocator,const VkGraphicsPipelineCreateInfo * pCreateInfo,PipelineCache * pPipelineCache)508 VkResult GraphicsPipeline::compileShaders(const VkAllocationCallbacks *pAllocator, const VkGraphicsPipelineCreateInfo *pCreateInfo, PipelineCache *pPipelineCache)
509 {
510 	PipelineCreationFeedback pipelineCreationFeedback(pCreateInfo);
511 	VkGraphicsPipelineLibraryFlagsEXT pipelineSubset = GetGraphicsPipelineSubset(pCreateInfo);
512 	const bool expectVertexShader = (pipelineSubset & VK_GRAPHICS_PIPELINE_LIBRARY_PRE_RASTERIZATION_SHADERS_BIT_EXT) != 0;
513 	const bool expectFragmentShader = (pipelineSubset & VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_SHADER_BIT_EXT) != 0;
514 
515 	const auto *inputAttachmentMapping = GetExtendedStruct<VkRenderingInputAttachmentIndexInfoKHR>(pCreateInfo->pNext, VK_STRUCTURE_TYPE_RENDERING_INPUT_ATTACHMENT_INDEX_INFO_KHR);
516 
517 	for(uint32_t stageIndex = 0; stageIndex < pCreateInfo->stageCount; stageIndex++)
518 	{
519 		const VkPipelineShaderStageCreateInfo &stageInfo = pCreateInfo->pStages[stageIndex];
520 
521 		// Ignore stages that don't exist in the pipeline library.
522 		if((stageInfo.stage == VK_SHADER_STAGE_VERTEX_BIT && !expectVertexShader) ||
523 		   (stageInfo.stage == VK_SHADER_STAGE_FRAGMENT_BIT && !expectFragmentShader))
524 		{
525 			continue;
526 		}
527 
528 		pipelineCreationFeedback.stageCreationBegins(stageIndex);
529 
530 		if((stageInfo.flags &
531 		    ~(VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT |
532 		      VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT)) != 0)
533 		{
534 			UNSUPPORTED("pStage->flags 0x%08X", int(stageInfo.flags));
535 		}
536 
537 		const bool optimize = true;  // TODO(b/251802301): Don't optimize when debugging shaders.
538 
539 		const ShaderModule *module = vk::Cast(stageInfo.module);
540 
541 		// VK_EXT_graphics_pipeline_library allows VkShaderModuleCreateInfo to be chained to
542 		// VkPipelineShaderStageCreateInfo, which is used if stageInfo.module is
543 		// VK_NULL_HANDLE.
544 		VkShaderModule tempModule = {};
545 		if(stageInfo.module == VK_NULL_HANDLE)
546 		{
547 			const auto *moduleCreateInfo = vk::GetExtendedStruct<VkShaderModuleCreateInfo>(stageInfo.pNext,
548 			                                                                               VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
549 			ASSERT(moduleCreateInfo);
550 			VkResult createResult = vk::ShaderModule::Create(nullptr, moduleCreateInfo, &tempModule);
551 			if(createResult != VK_SUCCESS)
552 			{
553 				return createResult;
554 			}
555 
556 			module = vk::Cast(tempModule);
557 		}
558 
559 		const PipelineCache::SpirvBinaryKey key(module->getBinary(), stageInfo.pSpecializationInfo, robustBufferAccess, optimize);
560 
561 		if((pCreateInfo->flags & VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT) &&
562 		   (!pPipelineCache || !pPipelineCache->contains(key)))
563 		{
564 			pipelineCreationFeedback.pipelineCreationError();
565 			return VK_PIPELINE_COMPILE_REQUIRED_EXT;
566 		}
567 
568 		sw::SpirvBinary spirv;
569 
570 		if(pPipelineCache)
571 		{
572 			auto onCacheMiss = [&] { return optimizeSpirv(key); };
573 			auto onCacheHit = [&] { pipelineCreationFeedback.cacheHit(stageIndex); };
574 			spirv = pPipelineCache->getOrOptimizeSpirv(key, onCacheMiss, onCacheHit);
575 		}
576 		else
577 		{
578 			spirv = optimizeSpirv(key);
579 
580 			// If the pipeline does not have specialization constants, there's a 1-to-1 mapping between the unoptimized and optimized SPIR-V,
581 			// so we should use a 1-to-1 mapping of the identifiers to avoid JIT routine recompiles.
582 			if(!key.getSpecializationInfo())
583 			{
584 				spirv.mapOptimizedIdentifier(key.getBinary());
585 			}
586 		}
587 
588 		const bool stageRobustBufferAccess = getPipelineStageRobustBufferAccess(stageInfo.pNext, device, robustBufferAccess);
589 
590 		// TODO(b/201798871): use allocator.
591 		auto shader = std::make_shared<sw::SpirvShader>(stageInfo.stage, stageInfo.pName, spirv,
592 		                                                vk::Cast(pCreateInfo->renderPass), pCreateInfo->subpass, inputAttachmentMapping, stageRobustBufferAccess);
593 
594 		setShader(stageInfo.stage, shader);
595 
596 		pipelineCreationFeedback.stageCreationEnds(stageIndex);
597 
598 		if(tempModule != VK_NULL_HANDLE)
599 		{
600 			vk::destroy(tempModule, nullptr);
601 		}
602 	}
603 
604 	return VK_SUCCESS;
605 }
606 
ComputePipeline(const VkComputePipelineCreateInfo * pCreateInfo,void * mem,Device * device)607 ComputePipeline::ComputePipeline(const VkComputePipelineCreateInfo *pCreateInfo, void *mem, Device *device)
608     : Pipeline(vk::Cast(pCreateInfo->layout), device, getPipelineRobustBufferAccess(pCreateInfo->pNext, device))
609 {
610 }
611 
destroyPipeline(const VkAllocationCallbacks * pAllocator)612 void ComputePipeline::destroyPipeline(const VkAllocationCallbacks *pAllocator)
613 {
614 	shader.reset();
615 	program.reset();
616 }
617 
ComputeRequiredAllocationSize(const VkComputePipelineCreateInfo * pCreateInfo)618 size_t ComputePipeline::ComputeRequiredAllocationSize(const VkComputePipelineCreateInfo *pCreateInfo)
619 {
620 	return 0;
621 }
622 
compileShaders(const VkAllocationCallbacks * pAllocator,const VkComputePipelineCreateInfo * pCreateInfo,PipelineCache * pPipelineCache)623 VkResult ComputePipeline::compileShaders(const VkAllocationCallbacks *pAllocator, const VkComputePipelineCreateInfo *pCreateInfo, PipelineCache *pPipelineCache)
624 {
625 	PipelineCreationFeedback pipelineCreationFeedback(pCreateInfo);
626 	pipelineCreationFeedback.stageCreationBegins(0);
627 
628 	auto &stage = pCreateInfo->stage;
629 	const ShaderModule *module = vk::Cast(stage.module);
630 
631 	// VK_EXT_graphics_pipeline_library allows VkShaderModuleCreateInfo to be chained to
632 	// VkPipelineShaderStageCreateInfo, which is used if stageInfo.module is
633 	// VK_NULL_HANDLE.
634 	VkShaderModule tempModule = {};
635 	if(stage.module == VK_NULL_HANDLE)
636 	{
637 		const auto *moduleCreateInfo = vk::GetExtendedStruct<VkShaderModuleCreateInfo>(stage.pNext,
638 		                                                                               VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
639 		ASSERT(moduleCreateInfo);
640 		VkResult createResult = vk::ShaderModule::Create(nullptr, moduleCreateInfo, &tempModule);
641 		if(createResult != VK_SUCCESS)
642 		{
643 			return createResult;
644 		}
645 
646 		module = vk::Cast(tempModule);
647 	}
648 
649 	ASSERT(shader.get() == nullptr);
650 	ASSERT(program.get() == nullptr);
651 
652 	const bool optimize = true;  // TODO(b/251802301): Don't optimize when debugging shaders.
653 
654 	const PipelineCache::SpirvBinaryKey shaderKey(module->getBinary(), stage.pSpecializationInfo, robustBufferAccess, optimize);
655 
656 	if((pCreateInfo->flags & VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT) &&
657 	   (!pPipelineCache || !pPipelineCache->contains(shaderKey)))
658 	{
659 		pipelineCreationFeedback.pipelineCreationError();
660 		return VK_PIPELINE_COMPILE_REQUIRED_EXT;
661 	}
662 
663 	sw::SpirvBinary spirv;
664 
665 	if(pPipelineCache)
666 	{
667 		auto onCacheMiss = [&] { return optimizeSpirv(shaderKey); };
668 		auto onCacheHit = [&] { pipelineCreationFeedback.cacheHit(0); };
669 		spirv = pPipelineCache->getOrOptimizeSpirv(shaderKey, onCacheMiss, onCacheHit);
670 	}
671 	else
672 	{
673 		spirv = optimizeSpirv(shaderKey);
674 
675 		// If the pipeline does not have specialization constants, there's a 1-to-1 mapping between the unoptimized and optimized SPIR-V,
676 		// so we should use a 1-to-1 mapping of the identifiers to avoid JIT routine recompiles.
677 		if(!shaderKey.getSpecializationInfo())
678 		{
679 			spirv.mapOptimizedIdentifier(shaderKey.getBinary());
680 		}
681 	}
682 
683 	const bool stageRobustBufferAccess = getPipelineStageRobustBufferAccess(stage.pNext, device, robustBufferAccess);
684 
685 	// TODO(b/201798871): use allocator.
686 	shader = std::make_shared<sw::SpirvShader>(stage.stage, stage.pName, spirv,
687 	                                           nullptr, 0, nullptr, stageRobustBufferAccess);
688 
689 	const PipelineCache::ComputeProgramKey programKey(shader->getIdentifier(), layout->identifier);
690 
691 	if(pPipelineCache)
692 	{
693 		program = pPipelineCache->getOrCreateComputeProgram(programKey, [&] {
694 			return createProgram(device, shader, layout);
695 		});
696 	}
697 	else
698 	{
699 		program = createProgram(device, shader, layout);
700 	}
701 
702 	pipelineCreationFeedback.stageCreationEnds(0);
703 
704 	return VK_SUCCESS;
705 }
706 
run(uint32_t baseGroupX,uint32_t baseGroupY,uint32_t baseGroupZ,uint32_t groupCountX,uint32_t groupCountY,uint32_t groupCountZ,const vk::DescriptorSet::Array & descriptorSetObjects,const vk::DescriptorSet::Bindings & descriptorSets,const vk::DescriptorSet::DynamicOffsets & descriptorDynamicOffsets,const vk::Pipeline::PushConstantStorage & pushConstants)707 void ComputePipeline::run(uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ,
708                           uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ,
709                           const vk::DescriptorSet::Array &descriptorSetObjects,
710                           const vk::DescriptorSet::Bindings &descriptorSets,
711                           const vk::DescriptorSet::DynamicOffsets &descriptorDynamicOffsets,
712                           const vk::Pipeline::PushConstantStorage &pushConstants)
713 {
714 	ASSERT_OR_RETURN(program != nullptr);
715 	program->run(
716 	    descriptorSetObjects, descriptorSets, descriptorDynamicOffsets, pushConstants,
717 	    baseGroupX, baseGroupY, baseGroupZ,
718 	    groupCountX, groupCountY, groupCountZ);
719 }
720 
721 }  // namespace vk
722