xref: /aosp_15_r20/external/intel-media-driver/media_softlet/linux/common/os/xe/mos_synchronization_xe.c (revision ba62d9d3abf0e404f2022b4cd7a85e107f48596f)
1 /*
2  * Copyright © 2023 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Xu, Zhengguo <[email protected]>
25  */
26 
27 #include <fcntl.h>
28 #include <algorithm>
29 #include <unistd.h>
30 #include "dma-buf.h"
31 #include "xf86drm.h"
32 #include "xf86atomic.h"
33 #include "mos_bufmgr_xe.h"
34 #include "mos_synchronization_xe.h"
35 #include "mos_utilities.h"
36 #include "mos_bufmgr_util_debug.h"
37 
38 /**
39  * @flags indicates to create opration:
40  * If flags=0(recommended), it will create a syncobj with not signaled.
41  * If flags=DRM_SYNCOBJ_CREATE_SIGNALED, it will create a syncobj with signaled state.
42  * So first exec with this syncobj will not block by waiting it. If want to wait this syncobj,
43  * and it must use DRM_IOCTL_SYNCOBJ_RESET to reset it as not signaled.
44  * After the syncobj is signaled by any process, we must use DRM_IOCTL_SYNCOBJ_RESET to reset it
45  * as not signaled state, otherwise next exec will ignore this syncobj in exec sync array.
46  */
mos_sync_syncobj_create(int fd,uint32_t flags)47 int mos_sync_syncobj_create(int fd, uint32_t flags)
48 {
49     struct drm_syncobj_create create = { 0 };
50     int ret = 0;
51 
52     create.flags = flags;
53     ret = drmIoctl(fd, DRM_IOCTL_SYNCOBJ_CREATE, &create);
54     MOS_DRM_CHK_STATUS_MESSAGE_RETURN(ret,
55                 "ioctl failed in DRM_IOCTL_SYNCOBJ_CREATE, return error(%d)", ret)
56     return create.handle;
57 }
58 
59 /**
60  * Syncobj handle that needs to destroy.
61  *
62  * Note: kmd will wait the syncobj signaled when umd tries to destroy it.
63  * So, there is no need to wait it in umd normally.
64  */
mos_sync_syncobj_destroy(int fd,uint32_t handle)65 int mos_sync_syncobj_destroy(int fd, uint32_t handle)
66 {
67     struct drm_syncobj_destroy destroy;
68     memclear(destroy);
69     int ret = 0;
70 
71     destroy.handle = handle;
72     ret = drmIoctl(fd, DRM_IOCTL_SYNCOBJ_DESTROY, &destroy);
73     MOS_DRM_CHK_STATUS_MESSAGE_RETURN(ret,
74                 "ioctl failed in DRM_IOCTL_SYNCOBJ_DESTROY, return error(%d)", ret)
75     return ret;
76 }
77 
78 /**
79  * @handles indicates to the syncobj array that wants to wait signaled.
80  * @abs_timeout_nsec indicates to the timeout:
81  *     if abs_timeout_nsec = 0, the call will return immediately when any syncobj in the array is busy state.
82  *     if abs_timeout_nsec > 0, the call will wait the syncobj in the array to be signaled.
83  * @flags indicates to wait operation for the handles:
84  *     if flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL, wait all syncobj to be signaled;
85  * @first_signaled indicates to first recored syncobj handle that is signaled in the handles array.
86  * Note: if return == -ETIME, wait timeout, it means busy state.
87  */
mos_sync_syncobj_wait_err(int fd,uint32_t * handles,uint32_t count,int64_t abs_timeout_nsec,uint32_t flags,uint32_t * first_signaled)88 int mos_sync_syncobj_wait_err(int fd, uint32_t *handles, uint32_t count,
89          int64_t abs_timeout_nsec, uint32_t flags, uint32_t *first_signaled)
90 {
91     if(handles == nullptr || count == 0)
92     {
93         MOS_DRM_ASSERTMESSAGE("Invalid inputs");
94         return -EINVAL;
95     }
96     struct drm_syncobj_wait wait;
97     int ret = 0;
98 
99     wait.handles = (uintptr_t)(handles);
100     wait.timeout_nsec = abs_timeout_nsec;
101     wait.count_handles = count;
102     wait.flags = flags;
103     wait.first_signaled = 0;
104     wait.pad = 0;
105 
106     ret = drmIoctl(fd, DRM_IOCTL_SYNCOBJ_WAIT, &wait);
107     if (first_signaled)
108         *first_signaled = wait.first_signaled;
109     return ret;
110 }
111 
112 /**
113  * Reset the syncobj to not signaled state.
114  *
115  * Note: kmd will wait all syncobj in handles array to be signaled before reset them.
116  * So, there is no need to wait in umd normally.
117  */
mos_sync_syncobj_reset(int fd,uint32_t * handles,uint32_t count)118 int mos_sync_syncobj_reset(int fd, uint32_t *handles, uint32_t count)
119 {
120     if(handles == nullptr || count == 0)
121     {
122         MOS_DRM_ASSERTMESSAGE("Invalid inputs");
123         return -EINVAL;
124     }
125 
126     struct drm_syncobj_array array;
127     memclear(array);
128     int ret = 0;
129 
130     array.handles = (uintptr_t)(handles);
131     array.count_handles = count;
132     ret = drmIoctl(fd, DRM_IOCTL_SYNCOBJ_RESET, &array);
133     return ret;
134 }
135 
136 /**
137  * Signal the syncobj in handles array.
138  */
mos_sync_syncobj_signal(int fd,uint32_t * handles,uint32_t count)139 int mos_sync_syncobj_signal(int fd, uint32_t *handles, uint32_t count)
140 {
141     if(handles == nullptr || count == 0)
142     {
143         MOS_DRM_ASSERTMESSAGE("Invalid inputs");
144         return -EINVAL;
145     }
146 
147     struct drm_syncobj_array array;
148     memclear(array);
149     int ret = 0;
150 
151     array.handles = (uintptr_t)(handles);
152     array.count_handles = count;
153     ret = drmIoctl(fd, DRM_IOCTL_SYNCOBJ_SIGNAL, &array);
154     return ret;
155 }
156 
157 /**
158  * Signal the syncobjs' timeline value with given points.
159  */
mos_sync_syncobj_timeline_signal(int fd,uint32_t * handles,uint64_t * points,uint32_t count)160 int mos_sync_syncobj_timeline_signal(int fd, uint32_t *handles, uint64_t *points, uint32_t count)
161 {
162     if(handles == nullptr || points == nullptr || count == 0)
163     {
164         MOS_DRM_ASSERTMESSAGE("Invalid inputs");
165         return -EINVAL;
166     }
167 
168     struct drm_syncobj_timeline_array array;
169     memclear(array);
170     int ret = 0;
171 
172     array.handles = (uintptr_t)(handles);
173     array.points = (uintptr_t)(points);
174     array.count_handles = count;
175     ret = drmIoctl(fd, DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL, &array);
176     return ret;
177 }
178 
179 /**
180  * Same as @mos_sync_syncobj_wait_err;
181  * Difference is that each syncobj needs to wait the timeline_value in points array.
182  */
mos_sync_syncobj_timeline_wait(int fd,uint32_t * handles,uint64_t * points,unsigned num_handles,int64_t timeout_nsec,unsigned flags,uint32_t * first_signaled)183 int mos_sync_syncobj_timeline_wait(int fd, uint32_t *handles, uint64_t *points,
184             unsigned num_handles,
185             int64_t timeout_nsec, unsigned flags,
186             uint32_t *first_signaled)
187 {
188     if(handles == nullptr || points == nullptr || num_handles == 0)
189     {
190         MOS_DRM_ASSERTMESSAGE("Invalid inputs");
191         return -EINVAL;
192     }
193 
194     struct drm_syncobj_timeline_wait args;
195     int ret;
196 
197     args.handles = (uintptr_t)(handles);
198     args.points = (uintptr_t)(points);
199     args.timeout_nsec = timeout_nsec;
200     args.count_handles = num_handles;
201     args.flags = flags;
202     args.first_signaled = 0;
203     args.pad = 0;
204 
205     ret = drmIoctl(fd, DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT, &args);
206 
207     if (first_signaled)
208         *first_signaled = args.first_signaled;
209 
210     return ret;
211 }
212 
213 /*
214  * @handles: a set of syncobj handle
215  * @points: output value, a set of sync points queried
216  * */
mos_sync_syncobj_timeline_query(int fd,uint32_t * handles,uint64_t * points,uint32_t handle_count)217 int mos_sync_syncobj_timeline_query(int fd, uint32_t *handles, uint64_t *points,
218              uint32_t handle_count)
219 {
220     if(handles == nullptr || points == nullptr || handle_count == 0)
221     {
222         MOS_DRM_ASSERTMESSAGE("Invalid inputs");
223         return -EINVAL;
224     }
225 
226     struct drm_syncobj_timeline_array args;
227     int ret;
228 
229     args.handles = (uintptr_t)(handles);
230     args.points = (uintptr_t)(points);
231     args.count_handles = handle_count;
232     args.flags = 0;
233 
234     ret = drmIoctl(fd, DRM_IOCTL_SYNCOBJ_QUERY, &args);
235 
236     return ret;
237 }
238 
239 /**
240  * Export the syncfile fd from given prime fd.
241  *
242  * @prime_fd indicates to the prime fd of the bo handle;
243  * @flags indicates to the operation flags of read or write;
244  *
245  * @return value indicates to sync file fd got from dma buffer;
246  *
247  * Note: Caller must close the syncfile fd after using to avoid leak.
248  */
mos_sync_dmabuf_export_syncfile(int prime_fd,uint32_t flags)249 int mos_sync_dmabuf_export_syncfile(int prime_fd, uint32_t flags)
250 {
251     int ret = 0;
252     struct dma_buf_export_sync_file export_sync_file;
253     memclear(export_sync_file);
254     if(flags & EXEC_OBJECT_READ_XE)
255     {
256         export_sync_file.flags |= DMA_BUF_SYNC_READ;
257     }
258     if(flags & EXEC_OBJECT_WRITE_XE)
259     {
260         export_sync_file.flags |= DMA_BUF_SYNC_WRITE;
261     }
262 
263     export_sync_file.fd = -1;
264     ret = drmIoctl(prime_fd, DMA_BUF_IOCTL_EXPORT_SYNC_FILE,
265                    &export_sync_file);
266     MOS_DRM_CHK_STATUS_MESSAGE_RETURN(ret,
267                 "ioctl failed in DMA_BUF_IOCTL_EXPORT_SYNC_FILE, return error(%d)", ret);
268     return export_sync_file.fd;
269 }
270 
271 /**
272  * Sync file fd to syncobj handle.
273  *
274  * @fd indicates to the opened device;
275  * @syncfile_fd indicates to exported syncfile fd by the prime fd;
276  *
277  * @return value indicates to syncobj handle imported into the syncfile fd;
278  *
279  * Note: Caller must close the syncobj handle after using to avoid leak.
280  */
mos_sync_syncfile_fd_to_syncobj_handle(int fd,int syncfile_fd)281 int mos_sync_syncfile_fd_to_syncobj_handle(int fd, int syncfile_fd)
282 {
283     int ret = 0;
284     struct drm_syncobj_handle syncobj_import;
285     memclear(syncobj_import);
286     syncobj_import.handle = mos_sync_syncobj_create(fd, 0);
287     syncobj_import.flags = DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE;
288     syncobj_import.fd = syncfile_fd;
289     ret = drmIoctl(fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE,
290                    &syncobj_import);
291     MOS_DRM_CHK_STATUS_MESSAGE_RETURN(ret,
292                 "ioctl failed in DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, return error(%d)", ret);
293     return syncobj_import.handle;
294 }
295 
296 /**
297  * Syncobj handle to sync file fd.
298  *
299  * @fd indicates to the opened device;
300  * @syncobj_handle indicates to the syncobj handle;
301  *
302  * @return value indicates to sync file fd got from the syncobj handle;
303  *
304  * Note: Caller must close the sync file fd after using to avoid leak.
305  */
mos_sync_syncobj_handle_to_syncfile_fd(int fd,int syncobj_handle)306 int mos_sync_syncobj_handle_to_syncfile_fd(int fd, int syncobj_handle)
307 {
308     int ret = 0;
309     struct drm_syncobj_handle syncobj_import;
310     memclear(syncobj_import);
311     syncobj_import.handle = syncobj_handle;
312     syncobj_import.flags = DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE;
313     syncobj_import.fd = -1;
314     ret = drmIoctl(fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD,
315                    &syncobj_import);
316     MOS_DRM_CHK_STATUS_MESSAGE_RETURN(ret,
317                 "ioctl failed in DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, return error(%d)", ret);
318     return syncobj_import.fd;
319 }
320 
321 /**
322  * Convert external bo handle to a syncobj handle to use as fence in syncobj in umd.
323  *
324  * @fd indicates to opened device;
325  * @bo_handle indicates to external bo handle;
326  * @flags indicates to operation flags of read and write;
327  * @out_prime_fd indicates to the prime_fd export from the handle;
328  *     Umd must clost the prime_fd immedicately after using to avoid leak.
329  *
330  * @return value indicates to syncobj handle exported.
331  *
332  * Note: Caller must close the syncobj handle immedicately after using to avoid leak.
333  *     And if umd wants to sync with external process, umd should always export all external
334  *     bo's syncobj from their dma sync buffer and add them into exec syncs array.
335  */
mos_sync_export_external_bo_sync(int fd,int bo_handle,uint32_t flags,int & out_prime_fd)336 int mos_sync_export_external_bo_sync(int fd, int bo_handle, uint32_t flags, int &out_prime_fd)
337 {
338     int prime_fd = -1;
339     int syncfile_fd = -1;
340     int syncobj_handle = -1;
341     int ret = 0;
342     ret = drmPrimeHandleToFD(fd, bo_handle, DRM_CLOEXEC | DRM_RDWR, &prime_fd);
343     MOS_DRM_CHK_STATUS_MESSAGE_RETURN(ret,
344                 "drmPrimeHandleToFD faled, return error(%d)", ret);
345     syncfile_fd = mos_sync_dmabuf_export_syncfile(prime_fd, flags);
346     if(syncfile_fd < 0)
347     {
348         MOS_DRM_ASSERTMESSAGE("Failed to get external bo syncobj");
349         close(prime_fd);
350         return INVALID_HANDLE;
351     }
352     syncobj_handle = mos_sync_syncfile_fd_to_syncobj_handle(fd, syncfile_fd);
353     if(syncobj_handle < 0)
354     {
355         MOS_DRM_ASSERTMESSAGE("Failed to get external bo syncobj");
356         close(prime_fd);
357         close(syncfile_fd);
358         return INVALID_HANDLE;
359     }
360     out_prime_fd = prime_fd;
361     close(syncfile_fd);
362     return syncobj_handle;
363 }
364 
365 /**
366  * Import the sync file fd into the DMA buffer of external bo.
367  *
368  * @fd indicates to opened device;
369  * @prime_fd indicates to the prime fd of the external bo;
370  * @syncfile_fd indicates to syncfile fd got from fence out syncobj handle umd.
371  *
372  * Note: if umd wants to export its fence out for external process to sync, umd
373  *     should always import its batch syncobj into all external bo dma sync buffer.
374  *
375  */
mos_sync_import_syncfile_to_external_bo(int fd,int prime_fd,int syncfile_fd)376 int mos_sync_import_syncfile_to_external_bo(int fd, int prime_fd, int syncfile_fd)
377 {
378     int ret = 0;
379 
380     struct dma_buf_import_sync_file import_sync_file;
381     memclear(import_sync_file);
382     import_sync_file.flags = DMA_BUF_SYNC_WRITE;
383     import_sync_file.fd = syncfile_fd;
384     ret = drmIoctl(prime_fd, DMA_BUF_IOCTL_IMPORT_SYNC_FILE, &import_sync_file);
385     return ret;
386 }
387 
388 /**
389  * Transfer fence in a given syncobj or point to dst syncobj or its point.
390  *
391  * @fd indicates to opened device
392  * @handle_dst indicates to destination syncobj handle
393  * @point_dst indicates to destination timeline point
394  * @handle_src indicates to source syncobj handle
395  * @point_src indicates to source timeline point
396  * @flags indicates to transfer flags
397  */
__mos_sync_syncobj_transfer(int fd,uint32_t handle_dst,uint64_t point_dst,uint32_t handle_src,uint64_t point_src,uint32_t flags)398 int __mos_sync_syncobj_transfer(int fd,
399             uint32_t handle_dst, uint64_t point_dst,
400             uint32_t handle_src, uint64_t point_src,
401             uint32_t flags)
402 {
403     struct drm_syncobj_transfer args;
404     memclear(args);
405     int ret;
406 
407     args.src_handle = handle_src;
408     args.dst_handle = handle_dst;
409     args.src_point = point_src;
410     args.dst_point = point_dst;
411     args.flags = flags;
412     args.pad = 0;
413     ret = drmIoctl(fd, DRM_IOCTL_SYNCOBJ_TRANSFER, &args);
414     return ret;
415 }
416 
mos_sync_syncobj_timeline_to_binary(int fd,uint32_t binary_handle,uint32_t timeline_handle,uint64_t point,uint32_t flags)417 int mos_sync_syncobj_timeline_to_binary(int fd, uint32_t binary_handle,
418             uint32_t timeline_handle,
419             uint64_t point,
420             uint32_t flags)
421 {
422     return __mos_sync_syncobj_transfer(fd,
423                 binary_handle, 0,
424                 timeline_handle, point,
425                 flags);
426 }
427 
428 /**
429  * Initial a new timeline dep object.
430  *
431  * @fd indicates to opened device;
432  *
433  * @return indicates to a new timeline deo object
434  */
mos_sync_create_timeline_dep(int fd)435 struct mos_xe_dep *mos_sync_create_timeline_dep(int fd)
436 {
437     struct mos_xe_dep *dep = nullptr;
438     //create new one
439     dep = (struct mos_xe_dep *)calloc(1, sizeof(struct mos_xe_dep));
440     MOS_DRM_CHK_NULL_RETURN_VALUE(dep, nullptr);
441     int handle = mos_sync_syncobj_create(fd, 0);
442 
443     if (handle > 0)
444     {
445         dep->syncobj_handle = handle;
446         dep->timeline_index = 1;
447     }
448     else
449     {
450         MOS_XE_SAFE_FREE(dep)
451         return nullptr;
452     }
453 
454     return dep;
455 }
456 
457 /**
458  * Update the timeline dep to the queue by timeline index++.
459  *
460  * @deps indicates to the timeline dep from ctx busy queue.
461  */
mos_sync_update_timeline_dep(struct mos_xe_dep * dep)462 void mos_sync_update_timeline_dep(struct mos_xe_dep *dep)
463 {
464     if(dep)
465     {
466         dep->timeline_index++;
467     }
468 }
469 
470 /**
471  * Destroy the syncobj and timeline dep object
472  *
473  * @fd indicates to opened device
474  * @dep indicates to the timeline dep object in its context
475  *
476  */
mos_sync_destroy_timeline_dep(int fd,struct mos_xe_dep * dep)477 void mos_sync_destroy_timeline_dep(int fd, struct mos_xe_dep* dep)
478 {
479     if (dep)
480     {
481         mos_sync_syncobj_destroy(fd, dep->syncobj_handle);
482         MOS_XE_SAFE_FREE(dep)
483     }
484 }
485 
486 /**
487  * Add the timeline dep from read and write dep map into exec syncs array.
488  *
489  * @curr_engine indicates to current exec engine id;
490  * @lst_write_engine indicates to last exec engine id for writing;
491  * @flags indicates to operation flags(read or write) for current exec;
492  * @engine_ids indicates to valid engine IDs;
493  * @read_deps indicates to read deps on previous exec;
494  * @write_deps indicates to write deps on previous exec;
495  * @syncs indicates to exec syncs array for current exec.
496  *
497  * Note: all deps from bo dep map are used as fence in, in this case,
498  *     we should never set DRM_XE_SYNC_FLAG_SIGNAL for sync, otherwise kmd will
499  *     not wait this sync.
500  *
501  * @return indicates to update status.
502  */
mos_sync_update_exec_syncs_from_timeline_deps(uint32_t curr_engine,uint32_t lst_write_engine,uint32_t flags,std::set<uint32_t> & engine_ids,std::map<uint32_t,struct mos_xe_bo_dep> & read_deps,std::map<uint32_t,struct mos_xe_bo_dep> & write_deps,std::vector<drm_xe_sync> & syncs)503 int mos_sync_update_exec_syncs_from_timeline_deps(uint32_t curr_engine,
504             uint32_t lst_write_engine, uint32_t flags,
505             std::set<uint32_t> &engine_ids,
506             std::map<uint32_t, struct mos_xe_bo_dep> &read_deps,
507             std::map<uint32_t, struct mos_xe_bo_dep> &write_deps,
508             std::vector<drm_xe_sync> &syncs)
509 {
510     if (lst_write_engine != curr_engine)
511     {
512         if (write_deps.count(lst_write_engine) > 0
513                 && engine_ids.count(lst_write_engine) > 0)
514         {
515             if (write_deps[lst_write_engine].dep)
516             {
517                 drm_xe_sync sync;
518                 memclear(sync);
519                 sync.handle = write_deps[lst_write_engine].dep->syncobj_handle;
520                 sync.type = DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ;
521                 sync.timeline_value = write_deps[lst_write_engine].exec_timeline_index;
522                 syncs.push_back(sync);
523             }
524         }
525     }
526 
527     //For flags & write, we need to add all sync in read_deps into syncs.
528     if (flags & EXEC_OBJECT_WRITE_XE)
529     {
530         auto it = read_deps.begin();
531         while (it != read_deps.end())
532         {
533             uint32_t engine_id = it->first;
534             if (engine_id != curr_engine
535                     && engine_ids.count(engine_id) > 0)
536             {
537                 if (it->second.dep)
538                 {
539                     drm_xe_sync sync;
540                     memclear(sync);
541                     sync.handle = it->second.dep->syncobj_handle;
542                     sync.type = DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ;
543                     sync.timeline_value = it->second.exec_timeline_index;
544                     syncs.push_back(sync);
545                 }
546             }
547             it++;
548         }
549     }
550 
551     return MOS_XE_SUCCESS;
552 }
553 
554 /**
555  * Add the dep from dma buffer of external bo into exec syncs array.
556  *
557  * @fd indicates to opened device;
558  * @bo_handle indicates to external bo handle;
559  * @flags indicates to operation flags(read or write) for current exec;
560  * @syncs indicates to exec syncs array for current exec. Exported syncobj handle from external bo is used as fence in.
561  * @out_prime_fd indicates to the prime_fd export from external bo handle; umd must close the prime fd after using to avoid leak.
562  *
563  * Note: Caller must close this exported syncobj handle after using to avoid leak.
564  */
mos_sync_update_exec_syncs_from_handle(int fd,uint32_t bo_handle,uint32_t flags,std::vector<struct drm_xe_sync> & syncs,int & out_prime_fd)565 int mos_sync_update_exec_syncs_from_handle(int fd,
566             uint32_t bo_handle, uint32_t flags,
567             std::vector<struct drm_xe_sync> &syncs,
568             int &out_prime_fd)
569 {
570     int syncobj_handle = mos_sync_export_external_bo_sync(fd, bo_handle, flags, out_prime_fd);
571     if(syncobj_handle < 0)
572     {
573         MOS_DRM_ASSERTMESSAGE("failed to add syncobj for external bo with invalid syncobj handle: %d",
574                     syncobj_handle);
575         return -1;
576     }
577 
578     struct drm_xe_sync sync;
579     memclear(sync);
580     sync.type = DRM_XE_SYNC_TYPE_SYNCOBJ;
581     sync.handle = syncobj_handle;
582     syncs.push_back(sync);
583 
584     return MOS_XE_SUCCESS;
585 }
586 
587 /**
588  * Add the timeline dep from its context into exec syncs array for fence out.
589  *
590  * @fd indicates to opened device;
591  * @dep indicates to a timeline dep in its context for fence out on current submission.
592  * @syncs indicates to exec syncs array for current exec; timeline dep from queue is used as fence out.
593  *
594  */
mos_sync_update_exec_syncs_from_timeline_dep(int fd,struct mos_xe_dep * dep,std::vector<struct drm_xe_sync> & syncs)595 void mos_sync_update_exec_syncs_from_timeline_dep(int fd,
596             struct mos_xe_dep *dep,
597             std::vector<struct drm_xe_sync> &syncs)
598 {
599     if (dep)
600     {
601         drm_xe_sync sync;
602         memclear(sync);
603         //must set DRM_XE_SYNC_FLAG_SIGNAL for timeline fence out syncobj.
604         sync.handle = dep->syncobj_handle;
605         sync.timeline_value = dep->timeline_index;
606         sync.flags = DRM_XE_SYNC_FLAG_SIGNAL;
607         sync.type = DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ;
608         syncs.push_back(sync);
609     }
610 }
611 
612 /**
613  * Update read and write deps of bo with given dep.
614  *
615  * @curr_engine indicates to current exec dummy engine id;
616  * @flags indicates to operation flags(read or write) for current exec;
617  * @dep indicates to the fence out dep that needs to update into the deps map;
618  * @read_deps indicates to read deps on previous exec;
619  * @write_deps indicates to write deps on previous exec;
620  */
mos_sync_update_bo_deps(uint32_t curr_engine,uint32_t flags,mos_xe_dep * dep,std::map<uint32_t,struct mos_xe_bo_dep> & read_deps,std::map<uint32_t,struct mos_xe_bo_dep> & write_deps)621 int mos_sync_update_bo_deps(uint32_t curr_engine,
622             uint32_t flags, mos_xe_dep *dep,
623             std::map<uint32_t, struct mos_xe_bo_dep> &read_deps,
624             std::map<uint32_t, struct mos_xe_bo_dep> &write_deps)
625 {
626     MOS_DRM_CHK_NULL_RETURN_VALUE(dep, -EINVAL)
627     mos_xe_bo_dep bo_dep;
628     bo_dep.dep = dep;
629     bo_dep.exec_timeline_index = dep->timeline_index;
630     if(flags & EXEC_OBJECT_READ_XE)
631     {
632         read_deps[curr_engine] = bo_dep;
633     }
634 
635     if(flags & EXEC_OBJECT_WRITE_XE)
636     {
637         write_deps[curr_engine] = bo_dep;
638     }
639 
640     return MOS_XE_SUCCESS;
641 }
642 
643 /**
644  * Get busy timeline deps from read and write deps map for bo wait.
645  *
646  * @engine_ids indicates to valid engine IDs;
647  * @read_deps indicates to read deps on previous exec;
648  * @write_deps indicates to write deps on previous exec;
649  * @max_timeline_data max exec timeline value on each context for this bo resource;
650  * @lst_write_engine indicates to last exec engine id for writing;
651  * @rw_flags indicates to read/write operation:
652  *     if rw_flags & EXEC_OBJECT_WRITE_XE, means bo write. Otherwise it means bo read.
653  */
mos_sync_get_bo_wait_timeline_deps(std::set<uint32_t> & engine_ids,std::map<uint32_t,struct mos_xe_bo_dep> & read_deps,std::map<uint32_t,struct mos_xe_bo_dep> & write_deps,std::map<uint32_t,uint64_t> & max_timeline_data,uint32_t lst_write_engine,uint32_t rw_flags)654 void mos_sync_get_bo_wait_timeline_deps(std::set<uint32_t> &engine_ids,
655             std::map<uint32_t, struct mos_xe_bo_dep> &read_deps,
656             std::map<uint32_t, struct mos_xe_bo_dep> &write_deps,
657             std::map<uint32_t, uint64_t> &max_timeline_data,
658             uint32_t lst_write_engine,
659             uint32_t rw_flags)
660 {
661     max_timeline_data.clear();
662 
663     //case1: get all timeline dep from read dep on all engines
664     if(rw_flags & EXEC_OBJECT_WRITE_XE)
665     {
666         for(auto it = read_deps.begin(); it != read_deps.end(); it++)
667         {
668             uint32_t engine_id = it->first;
669             uint64_t bo_exec_timeline = it->second.exec_timeline_index;
670             // Get the valid busy dep in this read dep map for this bo.
671             if (it->second.dep && engine_ids.count(engine_id) > 0)
672             {
673                 // Save the max timeline data
674                 max_timeline_data[it->second.dep->syncobj_handle] = bo_exec_timeline;
675             }
676         }
677     }
678 
679     //case2: get timeline dep from write dep on last write engine.
680     if (engine_ids.count(lst_write_engine) > 0
681         && write_deps.count(lst_write_engine) > 0
682        && write_deps[lst_write_engine].dep)
683     {
684         uint32_t syncobj_handle = write_deps[lst_write_engine].dep->syncobj_handle;
685         uint64_t bo_exec_timeline = write_deps[lst_write_engine].exec_timeline_index;
686         if (max_timeline_data.count(syncobj_handle) == 0
687                 || max_timeline_data[syncobj_handle] < bo_exec_timeline)
688         {
689             // Save the max timeline data
690             max_timeline_data[syncobj_handle] = bo_exec_timeline;
691         }
692     }
693 }
694