1 /*
2 * Copyright © 2022 Imagination Technologies Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a copy
5 * of this software and associated documentation files (the "Software"), to deal
6 * in the Software without restriction, including without limitation the rights
7 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 * copies of the Software, and to permit persons to whom the Software is
9 * furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include <stdbool.h>
25 #include <stddef.h>
26 #include <stdint.h>
27 #include <unistd.h>
28 #include <poll.h>
29 #include <vulkan/vulkan.h>
30
31 #include "pvr_private.h"
32 #include "pvr_srv.h"
33 #include "pvr_srv_sync.h"
34 #include "util/libsync.h"
35 #include "util/macros.h"
36 #include "util/timespec.h"
37 #include "vk_alloc.h"
38 #include "vk_log.h"
39 #include "vk_sync.h"
40 #include "vk_util.h"
41
pvr_srv_sync_init(struct vk_device * device,struct vk_sync * sync,uint64_t initial_value)42 static VkResult pvr_srv_sync_init(struct vk_device *device,
43 struct vk_sync *sync,
44 uint64_t initial_value)
45 {
46 struct pvr_srv_sync *srv_sync = to_srv_sync(sync);
47
48 srv_sync->signaled = initial_value ? true : false;
49 srv_sync->fd = -1;
50
51 return VK_SUCCESS;
52 }
53
pvr_srv_sync_finish(struct vk_device * device,struct vk_sync * sync)54 void pvr_srv_sync_finish(struct vk_device *device, struct vk_sync *sync)
55 {
56 struct pvr_srv_sync *srv_sync = to_srv_sync(sync);
57
58 if (srv_sync->fd != -1)
59 close(srv_sync->fd);
60 }
61
62 /* Note: function closes the fd. */
pvr_set_sync_state(struct pvr_srv_sync * srv_sync,bool signaled)63 static void pvr_set_sync_state(struct pvr_srv_sync *srv_sync, bool signaled)
64 {
65 if (srv_sync->fd != -1) {
66 close(srv_sync->fd);
67 srv_sync->fd = -1;
68 }
69
70 srv_sync->signaled = signaled;
71 }
72
pvr_srv_set_sync_payload(struct pvr_srv_sync * srv_sync,int payload)73 void pvr_srv_set_sync_payload(struct pvr_srv_sync *srv_sync, int payload)
74 {
75 if (srv_sync->fd != -1)
76 close(srv_sync->fd);
77
78 srv_sync->fd = payload;
79 srv_sync->signaled = (payload == -1);
80 }
81
pvr_srv_sync_signal(struct vk_device * device,struct vk_sync * sync,UNUSED uint64_t value)82 static VkResult pvr_srv_sync_signal(struct vk_device *device,
83 struct vk_sync *sync,
84 UNUSED uint64_t value)
85 {
86 struct pvr_srv_sync *srv_sync = to_srv_sync(sync);
87
88 pvr_set_sync_state(srv_sync, true);
89
90 return VK_SUCCESS;
91 }
92
pvr_srv_sync_reset(struct vk_device * device,struct vk_sync * sync)93 static VkResult pvr_srv_sync_reset(struct vk_device *device,
94 struct vk_sync *sync)
95 {
96 struct pvr_srv_sync *srv_sync = to_srv_sync(sync);
97
98 pvr_set_sync_state(srv_sync, false);
99
100 return VK_SUCCESS;
101 }
102
103 /* Careful, timeout might overflow. */
pvr_start_timeout(struct timespec * timeout,uint64_t timeout_ns)104 static inline void pvr_start_timeout(struct timespec *timeout,
105 uint64_t timeout_ns)
106 {
107 clock_gettime(CLOCK_MONOTONIC, timeout);
108 timespec_add_nsec(timeout, timeout, timeout_ns);
109 }
110
111 /* Careful, a negative value might be returned. */
112 static inline struct timespec
pvr_get_remaining_time(const struct timespec * timeout)113 pvr_get_remaining_time(const struct timespec *timeout)
114 {
115 struct timespec time;
116
117 clock_gettime(CLOCK_MONOTONIC, &time);
118 timespec_sub(&time, timeout, &time);
119
120 return time;
121 }
122
pvr_get_relative_time_ms(uint64_t abs_timeout_ns)123 static inline int pvr_get_relative_time_ms(uint64_t abs_timeout_ns)
124 {
125 uint64_t cur_time_ms;
126 uint64_t abs_timeout_ms;
127
128 if (abs_timeout_ns >= INT64_MAX) {
129 /* This is treated as an infinite wait */
130 return -1;
131 }
132
133 cur_time_ms = os_time_get_nano() / 1000000;
134 abs_timeout_ms = abs_timeout_ns / 1000000;
135
136 if (abs_timeout_ms <= cur_time_ms)
137 return 0;
138
139 return MIN2(abs_timeout_ms - cur_time_ms, INT_MAX);
140 }
141
142 /* pvr_srv_sync can have pending state, which means they would need spin waits
143 */
pvr_srv_sync_get_status(struct vk_sync * wait,uint64_t abs_timeout_ns)144 static VkResult pvr_srv_sync_get_status(struct vk_sync *wait,
145 uint64_t abs_timeout_ns)
146 {
147 struct pvr_srv_sync *srv_sync = to_srv_sync(wait);
148
149 if (srv_sync->signaled) {
150 assert(srv_sync->fd == -1);
151 return VK_SUCCESS;
152 }
153
154 /* If fd is -1 and this is not signaled, the fence is in pending mode */
155 if (srv_sync->fd == -1)
156 return VK_TIMEOUT;
157
158 if (sync_wait(srv_sync->fd, pvr_get_relative_time_ms(abs_timeout_ns))) {
159 if (errno == ETIME)
160 return VK_TIMEOUT;
161 else if (errno == ENOMEM)
162 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
163 else
164 return vk_error(NULL, VK_ERROR_DEVICE_LOST);
165 }
166
167 pvr_set_sync_state(srv_sync, true);
168
169 return VK_SUCCESS;
170 }
171
172 /* abs_timeout_ns == 0 -> Get status without waiting.
173 * abs_timeout_ns == ~0 -> Wait infinitely.
174 * else wait for the given abs_timeout_ns in nanoseconds. */
pvr_srv_sync_wait_many(struct vk_device * device,uint32_t wait_count,const struct vk_sync_wait * waits,enum vk_sync_wait_flags wait_flags,uint64_t abs_timeout_ns)175 static VkResult pvr_srv_sync_wait_many(struct vk_device *device,
176 uint32_t wait_count,
177 const struct vk_sync_wait *waits,
178 enum vk_sync_wait_flags wait_flags,
179 uint64_t abs_timeout_ns)
180 {
181 bool wait_any = !!(wait_flags & VK_SYNC_WAIT_ANY);
182
183 while (true) {
184 bool have_unsignaled = false;
185
186 for (uint32_t i = 0; i < wait_count; i++) {
187 VkResult result =
188 pvr_srv_sync_get_status(waits[i].sync,
189 wait_any ? 0 : abs_timeout_ns);
190
191 if (result != VK_TIMEOUT && (wait_any || result != VK_SUCCESS))
192 return result;
193 else if (result == VK_TIMEOUT)
194 have_unsignaled = true;
195 }
196
197 if (!have_unsignaled)
198 break;
199
200 if (os_time_get_nano() >= abs_timeout_ns)
201 return VK_TIMEOUT;
202
203 /* TODO: Use pvrsrvkm global events to stop busy waiting and for a bonus
204 * catch device loss.
205 */
206 sched_yield();
207 }
208
209 return VK_SUCCESS;
210 }
211
pvr_srv_sync_move(struct vk_device * device,struct vk_sync * dst,struct vk_sync * src)212 static VkResult pvr_srv_sync_move(struct vk_device *device,
213 struct vk_sync *dst,
214 struct vk_sync *src)
215 {
216 struct pvr_srv_sync *srv_dst_sync = to_srv_sync(dst);
217 struct pvr_srv_sync *srv_src_sync = to_srv_sync(src);
218
219 if (!(dst->flags & VK_SYNC_IS_SHARED) && !(src->flags & VK_SYNC_IS_SHARED)) {
220 pvr_srv_set_sync_payload(srv_dst_sync, srv_src_sync->fd);
221 srv_src_sync->fd = -1;
222 pvr_srv_sync_reset(device, src);
223 return VK_SUCCESS;
224 }
225
226 unreachable("srv_sync doesn't support move for shared sync objects.");
227 return VK_ERROR_UNKNOWN;
228 }
229
pvr_srv_sync_import_sync_file(struct vk_device * device,struct vk_sync * sync,int sync_file)230 static VkResult pvr_srv_sync_import_sync_file(struct vk_device *device,
231 struct vk_sync *sync,
232 int sync_file)
233 {
234 struct pvr_srv_sync *srv_sync = to_srv_sync(sync);
235 int fd = -1;
236
237 if (sync_file >= 0) {
238 fd = dup(sync_file);
239 if (fd < 0)
240 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
241 }
242
243 pvr_srv_set_sync_payload(srv_sync, fd);
244
245 return VK_SUCCESS;
246 }
247
pvr_srv_sync_export_sync_file(struct vk_device * device,struct vk_sync * sync,int * sync_file)248 static VkResult pvr_srv_sync_export_sync_file(struct vk_device *device,
249 struct vk_sync *sync,
250 int *sync_file)
251 {
252 struct pvr_srv_sync *srv_sync = to_srv_sync(sync);
253 VkResult result;
254 int fd;
255
256 if (srv_sync->fd < 0) {
257 struct pvr_device *driver_device =
258 container_of(device, struct pvr_device, vk);
259
260 result = pvr_srv_sync_get_presignaled_sync(driver_device, &srv_sync);
261 if (result != VK_SUCCESS)
262 return result;
263 }
264
265 assert(srv_sync->fd >= 0);
266
267 fd = dup(srv_sync->fd);
268 if (fd < 0)
269 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
270
271 *sync_file = fd;
272
273 return VK_SUCCESS;
274 }
275
276 const struct vk_sync_type pvr_srv_sync_type = {
277 .size = sizeof(struct pvr_srv_sync),
278 /* clang-format off */
279 .features = VK_SYNC_FEATURE_BINARY |
280 VK_SYNC_FEATURE_GPU_WAIT |
281 VK_SYNC_FEATURE_GPU_MULTI_WAIT |
282 VK_SYNC_FEATURE_CPU_WAIT |
283 VK_SYNC_FEATURE_CPU_RESET |
284 VK_SYNC_FEATURE_CPU_SIGNAL |
285 VK_SYNC_FEATURE_WAIT_ANY,
286 /* clang-format on */
287 .init = pvr_srv_sync_init,
288 .finish = pvr_srv_sync_finish,
289 .signal = pvr_srv_sync_signal,
290 .reset = pvr_srv_sync_reset,
291 .wait_many = pvr_srv_sync_wait_many,
292 .move = pvr_srv_sync_move,
293 .import_sync_file = pvr_srv_sync_import_sync_file,
294 .export_sync_file = pvr_srv_sync_export_sync_file,
295 };
296