1 /*
2 * Copyright 2018 Collabora Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "zink_batch.h"
25 #include "zink_context.h"
26 #include "zink_fence.h"
27
28 #include "zink_resource.h"
29 #include "zink_screen.h"
30
31 #include "util/os_file.h"
32 #include "util/set.h"
33 #include "util/u_memory.h"
34
35 #ifdef _WIN32
36 #include <windows.h>
37 #include <vulkan/vulkan_win32.h>
38 #endif
39
40 static void
destroy_fence(struct zink_screen * screen,struct zink_tc_fence * mfence)41 destroy_fence(struct zink_screen *screen, struct zink_tc_fence *mfence)
42 {
43 if (mfence->fence)
44 util_dynarray_delete_unordered(&mfence->fence->mfences, struct zink_tc_fence *, mfence);
45 mfence->fence = NULL;
46 tc_unflushed_batch_token_reference(&mfence->tc_token, NULL);
47 if (mfence->sem)
48 VKSCR(DestroySemaphore)(screen->dev, mfence->sem, NULL);
49 FREE(mfence);
50 }
51
52 struct zink_tc_fence *
zink_create_tc_fence(void)53 zink_create_tc_fence(void)
54 {
55 struct zink_tc_fence *mfence = CALLOC_STRUCT(zink_tc_fence);
56 if (!mfence)
57 return NULL;
58 pipe_reference_init(&mfence->reference, 1);
59 util_queue_fence_init(&mfence->ready);
60 return mfence;
61 }
62
63 struct pipe_fence_handle *
zink_create_tc_fence_for_tc(struct pipe_context * pctx,struct tc_unflushed_batch_token * tc_token)64 zink_create_tc_fence_for_tc(struct pipe_context *pctx, struct tc_unflushed_batch_token *tc_token)
65 {
66 struct zink_tc_fence *mfence = zink_create_tc_fence();
67 if (!mfence)
68 return NULL;
69 util_queue_fence_reset(&mfence->ready);
70 tc_unflushed_batch_token_reference(&mfence->tc_token, tc_token);
71 return (struct pipe_fence_handle*)mfence;
72 }
73
74 void
zink_fence_reference(struct zink_screen * screen,struct zink_tc_fence ** ptr,struct zink_tc_fence * mfence)75 zink_fence_reference(struct zink_screen *screen,
76 struct zink_tc_fence **ptr,
77 struct zink_tc_fence *mfence)
78 {
79 if (pipe_reference(&(*ptr)->reference, &mfence->reference))
80 destroy_fence(screen, *ptr);
81
82 *ptr = mfence;
83 }
84
85 static void
fence_reference(struct pipe_screen * pscreen,struct pipe_fence_handle ** pptr,struct pipe_fence_handle * pfence)86 fence_reference(struct pipe_screen *pscreen,
87 struct pipe_fence_handle **pptr,
88 struct pipe_fence_handle *pfence)
89 {
90 zink_fence_reference(zink_screen(pscreen), (struct zink_tc_fence **)pptr,
91 zink_tc_fence(pfence));
92 }
93
94 static bool
tc_fence_finish(struct zink_context * ctx,struct zink_tc_fence * mfence,uint64_t * timeout_ns)95 tc_fence_finish(struct zink_context *ctx, struct zink_tc_fence *mfence, uint64_t *timeout_ns)
96 {
97 if (!util_queue_fence_is_signalled(&mfence->ready)) {
98 int64_t abs_timeout = os_time_get_absolute_timeout(*timeout_ns);
99 if (mfence->tc_token) {
100 /* Ensure that zink_flush will be called for
101 * this mfence, but only if we're in the API thread
102 * where the context is current.
103 *
104 * Note that the batch containing the flush may already
105 * be in flight in the driver thread, so the mfence
106 * may not be ready yet when this call returns.
107 */
108 threaded_context_flush(&ctx->base, mfence->tc_token, *timeout_ns == 0);
109 }
110
111 /* this is a tc mfence, so we're just waiting on the queue mfence to complete
112 * after being signaled by the real mfence
113 */
114 if (*timeout_ns == OS_TIMEOUT_INFINITE) {
115 util_queue_fence_wait(&mfence->ready);
116 } else {
117 if (!util_queue_fence_wait_timeout(&mfence->ready, abs_timeout))
118 return false;
119 }
120 if (*timeout_ns && *timeout_ns != OS_TIMEOUT_INFINITE) {
121 int64_t time_ns = os_time_get_nano();
122 *timeout_ns = abs_timeout > time_ns ? abs_timeout - time_ns : 0;
123 }
124 }
125
126 return true;
127 }
128
129 static bool
fence_wait(struct zink_screen * screen,struct zink_fence * fence,uint64_t timeout_ns)130 fence_wait(struct zink_screen *screen, struct zink_fence *fence, uint64_t timeout_ns)
131 {
132 if (screen->device_lost)
133 return true;
134 if (p_atomic_read(&fence->completed))
135 return true;
136
137 assert(fence->batch_id);
138 assert(fence->submitted);
139
140 bool success = zink_screen_timeline_wait(screen, fence->batch_id, timeout_ns);
141
142 if (success) {
143 p_atomic_set(&fence->completed, true);
144 zink_batch_state(fence)->usage.usage = 0;
145 zink_screen_update_last_finished(screen, fence->batch_id);
146 }
147 return success;
148 }
149
150 static bool
zink_fence_finish(struct zink_screen * screen,struct pipe_context * pctx,struct zink_tc_fence * mfence,uint64_t timeout_ns)151 zink_fence_finish(struct zink_screen *screen, struct pipe_context *pctx, struct zink_tc_fence *mfence,
152 uint64_t timeout_ns)
153 {
154 pctx = threaded_context_unwrap_sync(pctx);
155 struct zink_context *ctx = zink_context(pctx);
156
157 if (screen->device_lost)
158 return true;
159
160 if (pctx && mfence->deferred_ctx == pctx) {
161 if (mfence->fence == ctx->deferred_fence) {
162 zink_context(pctx)->bs->has_work = true;
163 /* this must be the current batch */
164 pctx->flush(pctx, NULL, !timeout_ns ? PIPE_FLUSH_ASYNC : 0);
165 if (!timeout_ns)
166 return false;
167 }
168 }
169
170 /* need to ensure the tc mfence has been flushed before we wait */
171 bool tc_finish = tc_fence_finish(ctx, mfence, &timeout_ns);
172 /* the submit thread hasn't finished yet */
173 if (!tc_finish)
174 return false;
175 /* this was an invalid flush, just return completed */
176 if (!mfence->fence)
177 return true;
178
179 struct zink_fence *fence = mfence->fence;
180
181 unsigned submit_diff = zink_batch_state(mfence->fence)->usage.submit_count - mfence->submit_count;
182 /* this batch is known to have finished because it has been submitted more than 1 time
183 * since the tc fence last saw it
184 */
185 if (submit_diff > 1)
186 return true;
187
188 /* - if fence is submitted, batch_id is nonzero and can be checked
189 * - if fence is not submitted here, it must be reset; batch_id will be 0 and submitted is false
190 * in either case, the fence has finished
191 */
192 if ((fence->submitted && zink_screen_check_last_finished(screen, fence->batch_id)) ||
193 (!fence->submitted && submit_diff))
194 return true;
195
196 return fence_wait(screen, fence, timeout_ns);
197 }
198
199 static bool
fence_finish(struct pipe_screen * pscreen,struct pipe_context * pctx,struct pipe_fence_handle * pfence,uint64_t timeout_ns)200 fence_finish(struct pipe_screen *pscreen, struct pipe_context *pctx,
201 struct pipe_fence_handle *pfence, uint64_t timeout_ns)
202 {
203 return zink_fence_finish(zink_screen(pscreen), pctx, zink_tc_fence(pfence),
204 timeout_ns);
205 }
206
207 static int
fence_get_fd(struct pipe_screen * pscreen,struct pipe_fence_handle * pfence)208 fence_get_fd(struct pipe_screen *pscreen, struct pipe_fence_handle *pfence)
209 {
210 struct zink_screen *screen = zink_screen(pscreen);
211 if (screen->device_lost)
212 return -1;
213
214 struct zink_tc_fence *mfence = (struct zink_tc_fence *)pfence;
215 if (!mfence->sem)
216 return -1;
217
218 const VkSemaphoreGetFdInfoKHR sgfi = {
219 .sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR,
220 .semaphore = mfence->sem,
221 .handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT,
222 };
223 int fd = -1;
224 VkResult result = VKSCR(GetSemaphoreFdKHR)(screen->dev, &sgfi, &fd);
225 if (!zink_screen_handle_vkresult(screen, result)) {
226 mesa_loge("ZINK: vkGetSemaphoreFdKHR failed (%s)", vk_Result_to_str(result));
227 return -1;
228 }
229
230 return fd;
231 }
232
233 void
zink_fence_server_signal(struct pipe_context * pctx,struct pipe_fence_handle * pfence)234 zink_fence_server_signal(struct pipe_context *pctx, struct pipe_fence_handle *pfence)
235 {
236 struct zink_context *ctx = zink_context(pctx);
237 struct zink_tc_fence *mfence = (struct zink_tc_fence *)pfence;
238
239 assert(!ctx->bs->signal_semaphore);
240 ctx->bs->signal_semaphore = mfence->sem;
241 ctx->bs->has_work = true;
242 struct zink_batch_state *bs = ctx->bs;
243 /* this must produce a synchronous flush that completes before the function returns */
244 pctx->flush(pctx, NULL, 0);
245 if (zink_screen(ctx->base.screen)->threaded_submit)
246 util_queue_fence_wait(&bs->flush_completed);
247 }
248
249 void
zink_fence_server_sync(struct pipe_context * pctx,struct pipe_fence_handle * pfence)250 zink_fence_server_sync(struct pipe_context *pctx, struct pipe_fence_handle *pfence)
251 {
252 struct zink_context *ctx = zink_context(pctx);
253 struct zink_tc_fence *mfence = (struct zink_tc_fence *)pfence;
254
255 if (mfence->deferred_ctx == pctx || !mfence->sem)
256 return;
257
258 mfence->deferred_ctx = pctx;
259 /* this will be applied on the next submit */
260 VkPipelineStageFlags flag = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
261 util_dynarray_append(&ctx->bs->wait_semaphores, VkSemaphore, mfence->sem);
262 util_dynarray_append(&ctx->bs->wait_semaphore_stages, VkPipelineStageFlags, flag);
263 pipe_reference(NULL, &mfence->reference);
264 util_dynarray_append(&ctx->bs->fences, struct zink_tc_fence*, mfence);
265
266 /* transfer the external wait sempahore ownership to the next submit */
267 mfence->sem = VK_NULL_HANDLE;
268 }
269
270 void
zink_create_fence_fd(struct pipe_context * pctx,struct pipe_fence_handle ** pfence,int fd,enum pipe_fd_type type)271 zink_create_fence_fd(struct pipe_context *pctx, struct pipe_fence_handle **pfence, int fd, enum pipe_fd_type type)
272 {
273 struct zink_screen *screen = zink_screen(pctx->screen);
274 VkResult result;
275
276 assert(fd >= 0);
277
278 struct zink_tc_fence *mfence = zink_create_tc_fence();
279 if (!mfence)
280 goto fail_tc_fence_create;
281
282 const VkSemaphoreCreateInfo sci = {
283 .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,
284 };
285 result = VKSCR(CreateSemaphore)(screen->dev, &sci, NULL, &mfence->sem);
286 if (result != VK_SUCCESS) {
287 mesa_loge("ZINK: vkCreateSemaphore failed (%s)", vk_Result_to_str(result));
288 goto fail_sem_create;
289 }
290
291 int dup_fd = os_dupfd_cloexec(fd);
292 if (dup_fd < 0)
293 goto fail_fd_dup;
294
295 static const VkExternalSemaphoreHandleTypeFlagBits flags[] = {
296 [PIPE_FD_TYPE_NATIVE_SYNC] = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT,
297 [PIPE_FD_TYPE_SYNCOBJ] = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT,
298 };
299 assert(type < ARRAY_SIZE(flags));
300
301 const VkImportSemaphoreFdInfoKHR sdi = {
302 .sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR,
303 .semaphore = mfence->sem,
304 .flags = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT,
305 .handleType = flags[type],
306 .fd = dup_fd,
307 };
308 result = VKSCR(ImportSemaphoreFdKHR)(screen->dev, &sdi);
309 if (!zink_screen_handle_vkresult(screen, result)) {
310 mesa_loge("ZINK: vkImportSemaphoreFdKHR failed (%s)", vk_Result_to_str(result));
311 goto fail_sem_import;
312 }
313
314 *pfence = (struct pipe_fence_handle *)mfence;
315 return;
316
317 fail_sem_import:
318 close(dup_fd);
319 fail_fd_dup:
320 VKSCR(DestroySemaphore)(screen->dev, mfence->sem, NULL);
321 fail_sem_create:
322 FREE(mfence);
323 fail_tc_fence_create:
324 *pfence = NULL;
325 }
326
327 #ifdef _WIN32
328 void
zink_create_fence_win32(struct pipe_screen * pscreen,struct pipe_fence_handle ** pfence,void * handle,const void * name,enum pipe_fd_type type)329 zink_create_fence_win32(struct pipe_screen *pscreen, struct pipe_fence_handle **pfence, void *handle, const void *name, enum pipe_fd_type type)
330 {
331 struct zink_screen *screen = zink_screen(pscreen);
332 VkResult ret = VK_ERROR_UNKNOWN;
333 VkSemaphoreCreateInfo sci = {
334 VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,
335 NULL,
336 0
337 };
338 struct zink_tc_fence *mfence = zink_create_tc_fence();
339 VkExternalSemaphoreHandleTypeFlagBits flags[] = {
340 [PIPE_FD_TYPE_NATIVE_SYNC] = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT,
341 [PIPE_FD_TYPE_SYNCOBJ] = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT,
342 };
343 VkImportSemaphoreWin32HandleInfoKHR sdi = {0};
344 assert(type < ARRAY_SIZE(flags));
345
346 *pfence = NULL;
347
348 if (VKSCR(CreateSemaphore)(screen->dev, &sci, NULL, &mfence->sem) != VK_SUCCESS) {
349 FREE(mfence);
350 return;
351 }
352
353 sdi.sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR;
354 sdi.semaphore = mfence->sem;
355 sdi.handleType = flags[type];
356 sdi.handle = handle;
357 sdi.name = (LPCWSTR)name;
358 ret = VKSCR(ImportSemaphoreWin32HandleKHR)(screen->dev, &sdi);
359
360 if (!zink_screen_handle_vkresult(screen, ret))
361 goto fail;
362 *pfence = (struct pipe_fence_handle *)mfence;
363 return;
364
365 fail:
366 VKSCR(DestroySemaphore)(screen->dev, mfence->sem, NULL);
367 FREE(mfence);
368 }
369 #endif
370
371 void
zink_screen_fence_init(struct pipe_screen * pscreen)372 zink_screen_fence_init(struct pipe_screen *pscreen)
373 {
374 pscreen->fence_reference = fence_reference;
375 pscreen->fence_finish = fence_finish;
376 pscreen->fence_get_fd = fence_get_fd;
377 }
378