xref: /aosp_15_r20/external/swiftshader/src/Vulkan/VkQueue.cpp (revision 03ce13f70fcc45d86ee91b7ee4cab1936a95046e)
1 // Copyright 2018 The SwiftShader Authors. All Rights Reserved.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //    http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 #include "VkQueue.hpp"
16 
17 #include "VkCommandBuffer.hpp"
18 #include "VkFence.hpp"
19 #include "VkSemaphore.hpp"
20 #include "VkStringify.hpp"
21 #include "VkStructConversion.hpp"
22 #include "VkTimelineSemaphore.hpp"
23 #include "Device/Renderer.hpp"
24 #include "WSI/VkSwapchainKHR.hpp"
25 
26 #include "marl/defer.h"
27 #include "marl/scheduler.h"
28 #include "marl/thread.h"
29 #include "marl/trace.h"
30 
31 #include <cstring>
32 
33 namespace vk {
34 
Queue(Device * device,marl::Scheduler * scheduler)35 Queue::Queue(Device *device, marl::Scheduler *scheduler)
36     : device(device)
37 {
38 	queueThread = std::thread(&Queue::taskLoop, this, scheduler);
39 }
40 
~Queue()41 Queue::~Queue()
42 {
43 	Task task;
44 	task.type = Task::KILL_THREAD;
45 	pending.put(task);
46 
47 	queueThread.join();
48 	ASSERT_MSG(pending.count() == 0, "queue has work after worker thread shutdown");
49 
50 	garbageCollect();
51 }
52 
submit(uint32_t submitCount,SubmitInfo * pSubmits,Fence * fence)53 VkResult Queue::submit(uint32_t submitCount, SubmitInfo *pSubmits, Fence *fence)
54 {
55 	garbageCollect();
56 
57 	Task task;
58 	task.submitCount = submitCount;
59 	task.pSubmits = pSubmits;
60 	if(fence)
61 	{
62 		task.events = fence->getCountedEvent();
63 		task.events->add();
64 	}
65 
66 	pending.put(task);
67 
68 	return VK_SUCCESS;
69 }
70 
submitQueue(const Task & task)71 void Queue::submitQueue(const Task &task)
72 {
73 	if(renderer == nullptr)
74 	{
75 		renderer.reset(new sw::Renderer(device));
76 	}
77 
78 	for(uint32_t i = 0; i < task.submitCount; i++)
79 	{
80 		SubmitInfo &submitInfo = task.pSubmits[i];
81 		for(uint32_t j = 0; j < submitInfo.waitSemaphoreCount; j++)
82 		{
83 			if(auto *sem = DynamicCast<TimelineSemaphore>(submitInfo.pWaitSemaphores[j]))
84 			{
85 				ASSERT(j < submitInfo.waitSemaphoreValueCount);
86 				sem->wait(submitInfo.pWaitSemaphoreValues[j]);
87 			}
88 			else if(auto *sem = DynamicCast<BinarySemaphore>(submitInfo.pWaitSemaphores[j]))
89 			{
90 				sem->wait(submitInfo.pWaitDstStageMask[j]);
91 			}
92 			else
93 			{
94 				UNSUPPORTED("Unknown semaphore type");
95 			}
96 		}
97 
98 		{
99 			CommandBuffer::ExecutionState executionState;
100 			executionState.renderer = renderer.get();
101 			executionState.events = task.events.get();
102 			for(uint32_t j = 0; j < submitInfo.commandBufferCount; j++)
103 			{
104 				Cast(submitInfo.pCommandBuffers[j])->submit(executionState);
105 			}
106 		}
107 
108 		for(uint32_t j = 0; j < submitInfo.signalSemaphoreCount; j++)
109 		{
110 			if(auto *sem = DynamicCast<TimelineSemaphore>(submitInfo.pSignalSemaphores[j]))
111 			{
112 				ASSERT(j < submitInfo.signalSemaphoreValueCount);
113 				sem->signal(submitInfo.pSignalSemaphoreValues[j]);
114 			}
115 			else if(auto *sem = DynamicCast<BinarySemaphore>(submitInfo.pSignalSemaphores[j]))
116 			{
117 				sem->signal();
118 			}
119 			else
120 			{
121 				UNSUPPORTED("Unknown semaphore type");
122 			}
123 		}
124 	}
125 
126 	if(task.pSubmits)
127 	{
128 		toDelete.put(task.pSubmits);
129 	}
130 
131 	if(task.events)
132 	{
133 		// TODO: fix renderer signaling so that work submitted separately from (but before) a fence
134 		// is guaranteed complete by the time the fence signals.
135 		renderer->synchronize();
136 		task.events->done();
137 	}
138 }
139 
taskLoop(marl::Scheduler * scheduler)140 void Queue::taskLoop(marl::Scheduler *scheduler)
141 {
142 	marl::Thread::setName("Queue<%p>", this);
143 	scheduler->bind();
144 	defer(scheduler->unbind());
145 
146 	while(true)
147 	{
148 		Task task = pending.take();
149 
150 		switch(task.type)
151 		{
152 		case Task::KILL_THREAD:
153 			ASSERT_MSG(pending.count() == 0, "queue has remaining work!");
154 			return;
155 		case Task::SUBMIT_QUEUE:
156 			submitQueue(task);
157 			break;
158 		default:
159 			UNREACHABLE("task.type %d", static_cast<int>(task.type));
160 			break;
161 		}
162 	}
163 }
164 
waitIdle()165 VkResult Queue::waitIdle()
166 {
167 	// Wait for task queue to flush.
168 	auto event = std::make_shared<sw::CountedEvent>();
169 	event->add();  // done() is called at the end of submitQueue()
170 
171 	Task task;
172 	task.events = event;
173 	pending.put(task);
174 
175 	event->wait();
176 
177 	garbageCollect();
178 
179 	return VK_SUCCESS;
180 }
181 
garbageCollect()182 void Queue::garbageCollect()
183 {
184 	while(true)
185 	{
186 		auto v = toDelete.tryTake();
187 		if(!v.second) { break; }
188 		SubmitInfo::Release(v.first);
189 	}
190 }
191 
192 #ifndef __ANDROID__
present(const VkPresentInfoKHR * presentInfo)193 VkResult Queue::present(const VkPresentInfoKHR *presentInfo)
194 {
195 	// This is a hack to deal with screen tearing for now.
196 	// Need to correctly implement threading using VkSemaphore
197 	// to get rid of it. b/132458423
198 	waitIdle();
199 
200 	// Note: VkSwapchainPresentModeInfoEXT can be used to override the present mode, but present
201 	// mode is currently ignored by SwiftShader.
202 
203 	for(uint32_t i = 0; i < presentInfo->waitSemaphoreCount; i++)
204 	{
205 		auto *semaphore = vk::DynamicCast<BinarySemaphore>(presentInfo->pWaitSemaphores[i]);
206 		semaphore->wait();
207 	}
208 
209 	const auto *presentFences = vk::GetExtendedStruct<VkSwapchainPresentFenceInfoEXT>(presentInfo->pNext, VK_STRUCTURE_TYPE_SWAPCHAIN_PRESENT_FENCE_INFO_EXT);
210 
211 	VkResult commandResult = VK_SUCCESS;
212 
213 	for(uint32_t i = 0; i < presentInfo->swapchainCount; i++)
214 	{
215 		auto *swapchain = vk::Cast(presentInfo->pSwapchains[i]);
216 		VkResult perSwapchainResult = swapchain->present(presentInfo->pImageIndices[i]);
217 
218 		if(presentInfo->pResults)
219 		{
220 			presentInfo->pResults[i] = perSwapchainResult;
221 		}
222 
223 		// Keep track of the worst result code. VK_SUBOPTIMAL_KHR is a success code so it should
224 		// not override failure codes, but should not get replaced by a VK_SUCCESS result itself.
225 		if(perSwapchainResult != VK_SUCCESS)
226 		{
227 			if(commandResult == VK_SUCCESS || commandResult == VK_SUBOPTIMAL_KHR)
228 			{
229 				commandResult = perSwapchainResult;
230 			}
231 		}
232 
233 		// The wait semaphores and the swapchain are no longer accessed
234 		if(presentFences)
235 		{
236 			vk::Cast(presentFences->pFences[i])->complete();
237 		}
238 	}
239 
240 	return commandResult;
241 }
242 #endif
243 
beginDebugUtilsLabel(const VkDebugUtilsLabelEXT * pLabelInfo)244 void Queue::beginDebugUtilsLabel(const VkDebugUtilsLabelEXT *pLabelInfo)
245 {
246 	// Optional debug label region
247 }
248 
endDebugUtilsLabel()249 void Queue::endDebugUtilsLabel()
250 {
251 	// Close debug label region opened with beginDebugUtilsLabel()
252 }
253 
insertDebugUtilsLabel(const VkDebugUtilsLabelEXT * pLabelInfo)254 void Queue::insertDebugUtilsLabel(const VkDebugUtilsLabelEXT *pLabelInfo)
255 {
256 	// Optional single debug label
257 }
258 
259 }  // namespace vk
260