Lines Matching +full:wait +full:- +full:retry +full:- +full:us

1 // SPDX-License-Identifier: GPL-2.0
37 struct io_waitid_async *iwa = req->async_data; in io_waitid_free()
39 put_pid(iwa->wo.wo_pid); in io_waitid_free()
40 kfree(req->async_data); in io_waitid_free()
41 req->async_data = NULL; in io_waitid_free()
42 req->flags &= ~REQ_F_ASYNC_DATA; in io_waitid_free()
51 infop = (struct compat_siginfo __user *) iw->infop; in io_waitid_compat_copy_si()
56 unsafe_put_user(signo, &infop->si_signo, Efault); in io_waitid_compat_copy_si()
57 unsafe_put_user(0, &infop->si_errno, Efault); in io_waitid_compat_copy_si()
58 unsafe_put_user(iw->info.cause, &infop->si_code, Efault); in io_waitid_compat_copy_si()
59 unsafe_put_user(iw->info.pid, &infop->si_pid, Efault); in io_waitid_compat_copy_si()
60 unsafe_put_user(iw->info.uid, &infop->si_uid, Efault); in io_waitid_compat_copy_si()
61 unsafe_put_user(iw->info.status, &infop->si_status, Efault); in io_waitid_compat_copy_si()
77 if (!iw->infop) in io_waitid_copy_si()
81 if (req->ctx->compat) in io_waitid_copy_si()
85 if (!user_write_access_begin(iw->infop, sizeof(*iw->infop))) in io_waitid_copy_si()
88 unsafe_put_user(signo, &iw->infop->si_signo, Efault); in io_waitid_copy_si()
89 unsafe_put_user(0, &iw->infop->si_errno, Efault); in io_waitid_copy_si()
90 unsafe_put_user(iw->info.cause, &iw->infop->si_code, Efault); in io_waitid_copy_si()
91 unsafe_put_user(iw->info.pid, &iw->infop->si_pid, Efault); in io_waitid_copy_si()
92 unsafe_put_user(iw->info.uid, &iw->infop->si_uid, Efault); in io_waitid_copy_si()
93 unsafe_put_user(iw->info.status, &iw->infop->si_status, Efault); in io_waitid_copy_si()
113 ret = -EFAULT; in io_waitid_finish()
123 WARN_ON_ONCE(!(atomic_read(&iw->refs) & IO_WAITID_REF_MASK)); in io_waitid_complete()
125 lockdep_assert_held(&req->ctx->uring_lock); in io_waitid_complete()
127 hlist_del_init(&req->hash_node); in io_waitid_complete()
138 struct io_waitid_async *iwa = req->async_data; in __io_waitid_cancel()
141 * Mark us canceled regardless of ownership. This will prevent a in __io_waitid_cancel()
142 * potential retry from a spurious wakeup. in __io_waitid_cancel()
144 atomic_or(IO_WAITID_CANCEL_FLAG, &iw->refs); in __io_waitid_cancel()
147 if (atomic_fetch_inc(&iw->refs) & IO_WAITID_REF_MASK) in __io_waitid_cancel()
150 spin_lock_irq(&iw->head->lock); in __io_waitid_cancel()
151 list_del_init(&iwa->wo.child_wait.entry); in __io_waitid_cancel()
152 spin_unlock_irq(&iw->head->lock); in __io_waitid_cancel()
153 io_waitid_complete(req, -ECANCELED); in __io_waitid_cancel()
154 io_req_queue_tw_complete(req, -ECANCELED); in __io_waitid_cancel()
165 if (cd->flags & (IORING_ASYNC_CANCEL_FD|IORING_ASYNC_CANCEL_FD_FIXED)) in io_waitid_cancel()
166 return -ENOENT; in io_waitid_cancel()
169 hlist_for_each_entry_safe(req, tmp, &ctx->waitid_list, hash_node) { in io_waitid_cancel()
170 if (req->cqe.user_data != cd->data && in io_waitid_cancel()
171 !(cd->flags & IORING_ASYNC_CANCEL_ANY)) in io_waitid_cancel()
175 if (!(cd->flags & IORING_ASYNC_CANCEL_ALL)) in io_waitid_cancel()
183 return -ENOENT; in io_waitid_cancel()
193 lockdep_assert_held(&ctx->uring_lock); in io_waitid_remove_all()
195 hlist_for_each_entry_safe(req, tmp, &ctx->waitid_list, hash_node) { in io_waitid_remove_all()
198 hlist_del_init(&req->hash_node); in io_waitid_remove_all()
209 struct io_waitid_async *iwa = req->async_data; in io_waitid_drop_issue_ref()
211 if (!atomic_sub_return(1, &iw->refs)) in io_waitid_drop_issue_ref()
215 * Wakeup triggered, racing with us. It was prevented from in io_waitid_drop_issue_ref()
218 req->io_task_work.func = io_waitid_cb; in io_waitid_drop_issue_ref()
220 remove_wait_queue(iw->head, &iwa->wo.child_wait); in io_waitid_drop_issue_ref()
226 struct io_waitid_async *iwa = req->async_data; in io_waitid_cb()
227 struct io_ring_ctx *ctx = req->ctx; in io_waitid_cb()
232 ret = __do_wait(&iwa->wo); in io_waitid_cb()
235 * If we get -ERESTARTSYS here, we need to re-arm and check again in io_waitid_cb()
236 * to ensure we get another callback. If the retry works, then we can in io_waitid_cb()
240 if (unlikely(ret == -ERESTARTSYS)) { in io_waitid_cb()
243 /* Don't retry if cancel found it meanwhile */ in io_waitid_cb()
244 ret = -ECANCELED; in io_waitid_cb()
245 if (!(atomic_read(&iw->refs) & IO_WAITID_CANCEL_FLAG)) { in io_waitid_cb()
246 iw->head = &current->signal->wait_chldexit; in io_waitid_cb()
247 add_wait_queue(iw->head, &iwa->wo.child_wait); in io_waitid_cb()
248 ret = __do_wait(&iwa->wo); in io_waitid_cb()
249 if (ret == -ERESTARTSYS) { in io_waitid_cb()
250 /* retry armed, drop our ref */ in io_waitid_cb()
255 remove_wait_queue(iw->head, &iwa->wo.child_wait); in io_waitid_cb()
263 static int io_waitid_wait(struct wait_queue_entry *wait, unsigned mode, in io_waitid_wait() argument
266 struct wait_opts *wo = container_of(wait, struct wait_opts, child_wait); in io_waitid_wait()
268 struct io_kiocb *req = iwa->req; in io_waitid_wait()
276 if (atomic_fetch_inc(&iw->refs) & IO_WAITID_REF_MASK) in io_waitid_wait()
279 req->io_task_work.func = io_waitid_cb; in io_waitid_wait()
281 list_del_init(&wait->entry); in io_waitid_wait()
290 if (sqe->addr || sqe->buf_index || sqe->addr3 || sqe->waitid_flags) in io_waitid_prep()
291 return -EINVAL; in io_waitid_prep()
295 return -ENOMEM; in io_waitid_prep()
296 iwa->req = req; in io_waitid_prep()
298 iw->which = READ_ONCE(sqe->len); in io_waitid_prep()
299 iw->upid = READ_ONCE(sqe->fd); in io_waitid_prep()
300 iw->options = READ_ONCE(sqe->file_index); in io_waitid_prep()
301 iw->infop = u64_to_user_ptr(READ_ONCE(sqe->addr2)); in io_waitid_prep()
308 struct io_waitid_async *iwa = req->async_data; in io_waitid()
309 struct io_ring_ctx *ctx = req->ctx; in io_waitid()
312 ret = kernel_waitid_prepare(&iwa->wo, iw->which, iw->upid, &iw->info, in io_waitid()
313 iw->options, NULL); in io_waitid()
322 atomic_set(&iw->refs, 1); in io_waitid()
326 * finding us until a) we remain on the list, and b) the lock is in io_waitid()
331 hlist_add_head(&req->hash_node, &ctx->waitid_list); in io_waitid()
333 init_waitqueue_func_entry(&iwa->wo.child_wait, io_waitid_wait); in io_waitid()
334 iwa->wo.child_wait.private = req->tctx->task; in io_waitid()
335 iw->head = &current->signal->wait_chldexit; in io_waitid()
336 add_wait_queue(iw->head, &iwa->wo.child_wait); in io_waitid()
338 ret = __do_wait(&iwa->wo); in io_waitid()
339 if (ret == -ERESTARTSYS) { in io_waitid()
350 * Wakeup triggered, racing with us. It was prevented from in io_waitid()
357 hlist_del_init(&req->hash_node); in io_waitid()
358 remove_wait_queue(iw->head, &iwa->wo.child_wait); in io_waitid()