1 // Copyright 2018 The SwiftShader Authors. All Rights Reserved.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #ifndef VK_PIPELINE_HPP_
16 #define VK_PIPELINE_HPP_
17
18 #include "Device/Context.hpp"
19 #include "Vulkan/VkPipelineCache.hpp"
20 #include <memory>
21
22 namespace sw {
23
24 class ComputeProgram;
25 class SpirvShader;
26
27 } // namespace sw
28
29 namespace vk {
30
31 class ShaderModule;
32
33 class Pipeline
34 {
35 public:
36 Pipeline(PipelineLayout *layout, Device *device, bool robustBufferAccess);
37 virtual ~Pipeline() = default;
38
operator VkPipeline()39 operator VkPipeline()
40 {
41 return vk::TtoVkT<Pipeline, VkPipeline>(this);
42 }
43
Cast(VkPipeline object)44 static inline Pipeline *Cast(VkPipeline object)
45 {
46 return vk::VkTtoT<Pipeline, VkPipeline>(object);
47 }
48
49 void destroy(const VkAllocationCallbacks *pAllocator);
50
51 virtual void destroyPipeline(const VkAllocationCallbacks *pAllocator) = 0;
52 #ifndef NDEBUG
53 virtual VkPipelineBindPoint bindPoint() const = 0;
54 #endif
55
getLayout() const56 PipelineLayout *getLayout() const
57 {
58 return layout;
59 }
60
61 struct PushConstantStorage
62 {
63 unsigned char data[vk::MAX_PUSH_CONSTANT_SIZE];
64 };
65
66 protected:
67 PipelineLayout *layout = nullptr;
68 Device *const device;
69
70 const bool robustBufferAccess = true;
71 };
72
73 class GraphicsPipeline : public Pipeline, public ObjectBase<GraphicsPipeline, VkPipeline>
74 {
75 public:
76 GraphicsPipeline(const VkGraphicsPipelineCreateInfo *pCreateInfo,
77 void *mem,
78 Device *device);
79 virtual ~GraphicsPipeline() = default;
80
81 void destroyPipeline(const VkAllocationCallbacks *pAllocator) override;
82
83 #ifndef NDEBUG
bindPoint() const84 VkPipelineBindPoint bindPoint() const override
85 {
86 return VK_PIPELINE_BIND_POINT_GRAPHICS;
87 }
88 #endif
89
90 static size_t ComputeRequiredAllocationSize(const VkGraphicsPipelineCreateInfo *pCreateInfo);
91 static VkGraphicsPipelineLibraryFlagsEXT GetGraphicsPipelineSubset(const VkGraphicsPipelineCreateInfo *pCreateInfo);
92
93 VkResult compileShaders(const VkAllocationCallbacks *pAllocator, const VkGraphicsPipelineCreateInfo *pCreateInfo, PipelineCache *pipelineCache);
94
getCombinedState(const DynamicState & ds) const95 GraphicsState getCombinedState(const DynamicState &ds) const { return state.combineStates(ds); }
getState() const96 const GraphicsState &getState() const { return state; }
97
98 void getIndexBuffers(const vk::DynamicState &dynamicState, uint32_t count, uint32_t first, bool indexed, std::vector<std::pair<uint32_t, void *>> *indexBuffers) const;
99
getIndexBuffer()100 IndexBuffer &getIndexBuffer() { return indexBuffer; }
getIndexBuffer() const101 const IndexBuffer &getIndexBuffer() const { return indexBuffer; }
getAttachments()102 Attachments &getAttachments() { return attachments; }
getAttachments() const103 const Attachments &getAttachments() const { return attachments; }
getInputs()104 Inputs &getInputs() { return inputs; }
getInputs() const105 const Inputs &getInputs() const { return inputs; }
106
107 bool preRasterizationContainsImageWrite() const;
108 bool fragmentContainsImageWrite() const;
109
110 const std::shared_ptr<sw::SpirvShader> getShader(const VkShaderStageFlagBits &stage) const;
111
112 private:
113 void setShader(const VkShaderStageFlagBits &stage, const std::shared_ptr<sw::SpirvShader> spirvShader);
114 std::shared_ptr<sw::SpirvShader> vertexShader;
115 std::shared_ptr<sw::SpirvShader> fragmentShader;
116
117 const GraphicsState state;
118
119 IndexBuffer indexBuffer;
120 Attachments attachments;
121 Inputs inputs;
122 };
123
124 class ComputePipeline : public Pipeline, public ObjectBase<ComputePipeline, VkPipeline>
125 {
126 public:
127 ComputePipeline(const VkComputePipelineCreateInfo *pCreateInfo, void *mem, Device *device);
128 virtual ~ComputePipeline() = default;
129
130 void destroyPipeline(const VkAllocationCallbacks *pAllocator) override;
131
132 #ifndef NDEBUG
bindPoint() const133 VkPipelineBindPoint bindPoint() const override
134 {
135 return VK_PIPELINE_BIND_POINT_COMPUTE;
136 }
137 #endif
138
139 static size_t ComputeRequiredAllocationSize(const VkComputePipelineCreateInfo *pCreateInfo);
140
141 VkResult compileShaders(const VkAllocationCallbacks *pAllocator, const VkComputePipelineCreateInfo *pCreateInfo, PipelineCache *pipelineCache);
142
143 void run(uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ,
144 uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ,
145 const vk::DescriptorSet::Array &descriptorSetObjects,
146 const vk::DescriptorSet::Bindings &descriptorSets,
147 const vk::DescriptorSet::DynamicOffsets &descriptorDynamicOffsets,
148 const vk::Pipeline::PushConstantStorage &pushConstants);
149
150 protected:
151 std::shared_ptr<sw::SpirvShader> shader;
152 std::shared_ptr<sw::ComputeProgram> program;
153 };
154
Cast(VkPipeline object)155 static inline Pipeline *Cast(VkPipeline object)
156 {
157 return Pipeline::Cast(object);
158 }
159
160 } // namespace vk
161
162 #endif // VK_PIPELINE_HPP_
163