xref: /aosp_15_r20/external/liburing/src/queue.c (revision 25da2bea747f3a93b4c30fd9708b0618ef55a0e6)
1*25da2beaSAndroid Build Coastguard Worker /* SPDX-License-Identifier: MIT */
2*25da2beaSAndroid Build Coastguard Worker #define _POSIX_C_SOURCE 200112L
3*25da2beaSAndroid Build Coastguard Worker 
4*25da2beaSAndroid Build Coastguard Worker #include "lib.h"
5*25da2beaSAndroid Build Coastguard Worker #include "syscall.h"
6*25da2beaSAndroid Build Coastguard Worker #include "liburing.h"
7*25da2beaSAndroid Build Coastguard Worker #include "int_flags.h"
8*25da2beaSAndroid Build Coastguard Worker #include "liburing/compat.h"
9*25da2beaSAndroid Build Coastguard Worker #include "liburing/io_uring.h"
10*25da2beaSAndroid Build Coastguard Worker 
11*25da2beaSAndroid Build Coastguard Worker /*
12*25da2beaSAndroid Build Coastguard Worker  * Returns true if we're not using SQ thread (thus nobody submits but us)
13*25da2beaSAndroid Build Coastguard Worker  * or if IORING_SQ_NEED_WAKEUP is set, so submit thread must be explicitly
14*25da2beaSAndroid Build Coastguard Worker  * awakened. For the latter case, we set the thread wakeup flag.
15*25da2beaSAndroid Build Coastguard Worker  */
sq_ring_needs_enter(struct io_uring * ring,unsigned * flags)16*25da2beaSAndroid Build Coastguard Worker static inline bool sq_ring_needs_enter(struct io_uring *ring, unsigned *flags)
17*25da2beaSAndroid Build Coastguard Worker {
18*25da2beaSAndroid Build Coastguard Worker 	if (!(ring->flags & IORING_SETUP_SQPOLL))
19*25da2beaSAndroid Build Coastguard Worker 		return true;
20*25da2beaSAndroid Build Coastguard Worker 
21*25da2beaSAndroid Build Coastguard Worker 	/*
22*25da2beaSAndroid Build Coastguard Worker 	 * Ensure the kernel can see the store to the SQ tail before we read
23*25da2beaSAndroid Build Coastguard Worker 	 * the flags.
24*25da2beaSAndroid Build Coastguard Worker 	 */
25*25da2beaSAndroid Build Coastguard Worker 	io_uring_smp_mb();
26*25da2beaSAndroid Build Coastguard Worker 
27*25da2beaSAndroid Build Coastguard Worker 	if (uring_unlikely(IO_URING_READ_ONCE(*ring->sq.kflags) &
28*25da2beaSAndroid Build Coastguard Worker 			   IORING_SQ_NEED_WAKEUP)) {
29*25da2beaSAndroid Build Coastguard Worker 		*flags |= IORING_ENTER_SQ_WAKEUP;
30*25da2beaSAndroid Build Coastguard Worker 		return true;
31*25da2beaSAndroid Build Coastguard Worker 	}
32*25da2beaSAndroid Build Coastguard Worker 
33*25da2beaSAndroid Build Coastguard Worker 	return false;
34*25da2beaSAndroid Build Coastguard Worker }
35*25da2beaSAndroid Build Coastguard Worker 
cq_ring_needs_flush(struct io_uring * ring)36*25da2beaSAndroid Build Coastguard Worker static inline bool cq_ring_needs_flush(struct io_uring *ring)
37*25da2beaSAndroid Build Coastguard Worker {
38*25da2beaSAndroid Build Coastguard Worker 	return IO_URING_READ_ONCE(*ring->sq.kflags) &
39*25da2beaSAndroid Build Coastguard Worker 				 (IORING_SQ_CQ_OVERFLOW | IORING_SQ_TASKRUN);
40*25da2beaSAndroid Build Coastguard Worker }
41*25da2beaSAndroid Build Coastguard Worker 
cq_ring_needs_enter(struct io_uring * ring)42*25da2beaSAndroid Build Coastguard Worker static inline bool cq_ring_needs_enter(struct io_uring *ring)
43*25da2beaSAndroid Build Coastguard Worker {
44*25da2beaSAndroid Build Coastguard Worker 	return (ring->flags & IORING_SETUP_IOPOLL) || cq_ring_needs_flush(ring);
45*25da2beaSAndroid Build Coastguard Worker }
46*25da2beaSAndroid Build Coastguard Worker 
47*25da2beaSAndroid Build Coastguard Worker struct get_data {
48*25da2beaSAndroid Build Coastguard Worker 	unsigned submit;
49*25da2beaSAndroid Build Coastguard Worker 	unsigned wait_nr;
50*25da2beaSAndroid Build Coastguard Worker 	unsigned get_flags;
51*25da2beaSAndroid Build Coastguard Worker 	int sz;
52*25da2beaSAndroid Build Coastguard Worker 	void *arg;
53*25da2beaSAndroid Build Coastguard Worker };
54*25da2beaSAndroid Build Coastguard Worker 
_io_uring_get_cqe(struct io_uring * ring,struct io_uring_cqe ** cqe_ptr,struct get_data * data)55*25da2beaSAndroid Build Coastguard Worker static int _io_uring_get_cqe(struct io_uring *ring,
56*25da2beaSAndroid Build Coastguard Worker 			     struct io_uring_cqe **cqe_ptr,
57*25da2beaSAndroid Build Coastguard Worker 			     struct get_data *data)
58*25da2beaSAndroid Build Coastguard Worker {
59*25da2beaSAndroid Build Coastguard Worker 	struct io_uring_cqe *cqe = NULL;
60*25da2beaSAndroid Build Coastguard Worker 	bool looped = false;
61*25da2beaSAndroid Build Coastguard Worker 	int err;
62*25da2beaSAndroid Build Coastguard Worker 
63*25da2beaSAndroid Build Coastguard Worker 	do {
64*25da2beaSAndroid Build Coastguard Worker 		bool need_enter = false;
65*25da2beaSAndroid Build Coastguard Worker 		unsigned flags = 0;
66*25da2beaSAndroid Build Coastguard Worker 		unsigned nr_available;
67*25da2beaSAndroid Build Coastguard Worker 		int ret;
68*25da2beaSAndroid Build Coastguard Worker 
69*25da2beaSAndroid Build Coastguard Worker 		err = __io_uring_peek_cqe(ring, &cqe, &nr_available);
70*25da2beaSAndroid Build Coastguard Worker 		if (err)
71*25da2beaSAndroid Build Coastguard Worker 			break;
72*25da2beaSAndroid Build Coastguard Worker 		if (!cqe && !data->wait_nr && !data->submit) {
73*25da2beaSAndroid Build Coastguard Worker 			/*
74*25da2beaSAndroid Build Coastguard Worker 			 * If we already looped once, we already entererd
75*25da2beaSAndroid Build Coastguard Worker 			 * the kernel. Since there's nothing to submit or
76*25da2beaSAndroid Build Coastguard Worker 			 * wait for, don't keep retrying.
77*25da2beaSAndroid Build Coastguard Worker 			 */
78*25da2beaSAndroid Build Coastguard Worker 			if (looped || !cq_ring_needs_enter(ring)) {
79*25da2beaSAndroid Build Coastguard Worker 				err = -EAGAIN;
80*25da2beaSAndroid Build Coastguard Worker 				break;
81*25da2beaSAndroid Build Coastguard Worker 			}
82*25da2beaSAndroid Build Coastguard Worker 			need_enter = true;
83*25da2beaSAndroid Build Coastguard Worker 		}
84*25da2beaSAndroid Build Coastguard Worker 		if (data->wait_nr > nr_available || need_enter) {
85*25da2beaSAndroid Build Coastguard Worker 			flags = IORING_ENTER_GETEVENTS | data->get_flags;
86*25da2beaSAndroid Build Coastguard Worker 			need_enter = true;
87*25da2beaSAndroid Build Coastguard Worker 		}
88*25da2beaSAndroid Build Coastguard Worker 		if (data->submit && sq_ring_needs_enter(ring, &flags))
89*25da2beaSAndroid Build Coastguard Worker 			need_enter = true;
90*25da2beaSAndroid Build Coastguard Worker 		if (!need_enter)
91*25da2beaSAndroid Build Coastguard Worker 			break;
92*25da2beaSAndroid Build Coastguard Worker 
93*25da2beaSAndroid Build Coastguard Worker 		if (ring->int_flags & INT_FLAG_REG_RING)
94*25da2beaSAndroid Build Coastguard Worker 			flags |= IORING_ENTER_REGISTERED_RING;
95*25da2beaSAndroid Build Coastguard Worker 		ret = ____sys_io_uring_enter2(ring->enter_ring_fd, data->submit,
96*25da2beaSAndroid Build Coastguard Worker 					      data->wait_nr, flags, data->arg,
97*25da2beaSAndroid Build Coastguard Worker 					      data->sz);
98*25da2beaSAndroid Build Coastguard Worker 		if (ret < 0) {
99*25da2beaSAndroid Build Coastguard Worker 			err = ret;
100*25da2beaSAndroid Build Coastguard Worker 			break;
101*25da2beaSAndroid Build Coastguard Worker 		}
102*25da2beaSAndroid Build Coastguard Worker 
103*25da2beaSAndroid Build Coastguard Worker 		data->submit -= ret;
104*25da2beaSAndroid Build Coastguard Worker 		if (cqe)
105*25da2beaSAndroid Build Coastguard Worker 			break;
106*25da2beaSAndroid Build Coastguard Worker 		looped = true;
107*25da2beaSAndroid Build Coastguard Worker 	} while (1);
108*25da2beaSAndroid Build Coastguard Worker 
109*25da2beaSAndroid Build Coastguard Worker 	*cqe_ptr = cqe;
110*25da2beaSAndroid Build Coastguard Worker 	return err;
111*25da2beaSAndroid Build Coastguard Worker }
112*25da2beaSAndroid Build Coastguard Worker 
__io_uring_get_cqe(struct io_uring * ring,struct io_uring_cqe ** cqe_ptr,unsigned submit,unsigned wait_nr,sigset_t * sigmask)113*25da2beaSAndroid Build Coastguard Worker int __io_uring_get_cqe(struct io_uring *ring, struct io_uring_cqe **cqe_ptr,
114*25da2beaSAndroid Build Coastguard Worker 		       unsigned submit, unsigned wait_nr, sigset_t *sigmask)
115*25da2beaSAndroid Build Coastguard Worker {
116*25da2beaSAndroid Build Coastguard Worker 	struct get_data data = {
117*25da2beaSAndroid Build Coastguard Worker 		.submit		= submit,
118*25da2beaSAndroid Build Coastguard Worker 		.wait_nr 	= wait_nr,
119*25da2beaSAndroid Build Coastguard Worker 		.get_flags	= 0,
120*25da2beaSAndroid Build Coastguard Worker 		.sz		= _NSIG / 8,
121*25da2beaSAndroid Build Coastguard Worker 		.arg		= sigmask,
122*25da2beaSAndroid Build Coastguard Worker 	};
123*25da2beaSAndroid Build Coastguard Worker 
124*25da2beaSAndroid Build Coastguard Worker 	return _io_uring_get_cqe(ring, cqe_ptr, &data);
125*25da2beaSAndroid Build Coastguard Worker }
126*25da2beaSAndroid Build Coastguard Worker 
127*25da2beaSAndroid Build Coastguard Worker /*
128*25da2beaSAndroid Build Coastguard Worker  * Fill in an array of IO completions up to count, if any are available.
129*25da2beaSAndroid Build Coastguard Worker  * Returns the amount of IO completions filled.
130*25da2beaSAndroid Build Coastguard Worker  */
io_uring_peek_batch_cqe(struct io_uring * ring,struct io_uring_cqe ** cqes,unsigned count)131*25da2beaSAndroid Build Coastguard Worker unsigned io_uring_peek_batch_cqe(struct io_uring *ring,
132*25da2beaSAndroid Build Coastguard Worker 				 struct io_uring_cqe **cqes, unsigned count)
133*25da2beaSAndroid Build Coastguard Worker {
134*25da2beaSAndroid Build Coastguard Worker 	unsigned ready;
135*25da2beaSAndroid Build Coastguard Worker 	bool overflow_checked = false;
136*25da2beaSAndroid Build Coastguard Worker 	int shift = 0;
137*25da2beaSAndroid Build Coastguard Worker 
138*25da2beaSAndroid Build Coastguard Worker 	if (ring->flags & IORING_SETUP_CQE32)
139*25da2beaSAndroid Build Coastguard Worker 		shift = 1;
140*25da2beaSAndroid Build Coastguard Worker 
141*25da2beaSAndroid Build Coastguard Worker again:
142*25da2beaSAndroid Build Coastguard Worker 	ready = io_uring_cq_ready(ring);
143*25da2beaSAndroid Build Coastguard Worker 	if (ready) {
144*25da2beaSAndroid Build Coastguard Worker 		unsigned head = *ring->cq.khead;
145*25da2beaSAndroid Build Coastguard Worker 		unsigned mask = *ring->cq.kring_mask;
146*25da2beaSAndroid Build Coastguard Worker 		unsigned last;
147*25da2beaSAndroid Build Coastguard Worker 		int i = 0;
148*25da2beaSAndroid Build Coastguard Worker 
149*25da2beaSAndroid Build Coastguard Worker 		count = count > ready ? ready : count;
150*25da2beaSAndroid Build Coastguard Worker 		last = head + count;
151*25da2beaSAndroid Build Coastguard Worker 		for (;head != last; head++, i++)
152*25da2beaSAndroid Build Coastguard Worker 			cqes[i] = &ring->cq.cqes[(head & mask) << shift];
153*25da2beaSAndroid Build Coastguard Worker 
154*25da2beaSAndroid Build Coastguard Worker 		return count;
155*25da2beaSAndroid Build Coastguard Worker 	}
156*25da2beaSAndroid Build Coastguard Worker 
157*25da2beaSAndroid Build Coastguard Worker 	if (overflow_checked)
158*25da2beaSAndroid Build Coastguard Worker 		goto done;
159*25da2beaSAndroid Build Coastguard Worker 
160*25da2beaSAndroid Build Coastguard Worker 	if (cq_ring_needs_flush(ring)) {
161*25da2beaSAndroid Build Coastguard Worker 		int flags = IORING_ENTER_GETEVENTS;
162*25da2beaSAndroid Build Coastguard Worker 
163*25da2beaSAndroid Build Coastguard Worker 		if (ring->int_flags & INT_FLAG_REG_RING)
164*25da2beaSAndroid Build Coastguard Worker 			flags |= IORING_ENTER_REGISTERED_RING;
165*25da2beaSAndroid Build Coastguard Worker 		____sys_io_uring_enter(ring->enter_ring_fd, 0, 0, flags, NULL);
166*25da2beaSAndroid Build Coastguard Worker 		overflow_checked = true;
167*25da2beaSAndroid Build Coastguard Worker 		goto again;
168*25da2beaSAndroid Build Coastguard Worker 	}
169*25da2beaSAndroid Build Coastguard Worker 
170*25da2beaSAndroid Build Coastguard Worker done:
171*25da2beaSAndroid Build Coastguard Worker 	return 0;
172*25da2beaSAndroid Build Coastguard Worker }
173*25da2beaSAndroid Build Coastguard Worker 
174*25da2beaSAndroid Build Coastguard Worker /*
175*25da2beaSAndroid Build Coastguard Worker  * Sync internal state with kernel ring state on the SQ side. Returns the
176*25da2beaSAndroid Build Coastguard Worker  * number of pending items in the SQ ring, for the shared ring.
177*25da2beaSAndroid Build Coastguard Worker  */
__io_uring_flush_sq(struct io_uring * ring)178*25da2beaSAndroid Build Coastguard Worker int __io_uring_flush_sq(struct io_uring *ring)
179*25da2beaSAndroid Build Coastguard Worker {
180*25da2beaSAndroid Build Coastguard Worker 	struct io_uring_sq *sq = &ring->sq;
181*25da2beaSAndroid Build Coastguard Worker 	const unsigned mask = *sq->kring_mask;
182*25da2beaSAndroid Build Coastguard Worker 	unsigned ktail = *sq->ktail;
183*25da2beaSAndroid Build Coastguard Worker 	unsigned to_submit = sq->sqe_tail - sq->sqe_head;
184*25da2beaSAndroid Build Coastguard Worker 
185*25da2beaSAndroid Build Coastguard Worker 	if (!to_submit)
186*25da2beaSAndroid Build Coastguard Worker 		goto out;
187*25da2beaSAndroid Build Coastguard Worker 
188*25da2beaSAndroid Build Coastguard Worker 	/*
189*25da2beaSAndroid Build Coastguard Worker 	 * Fill in sqes that we have queued up, adding them to the kernel ring
190*25da2beaSAndroid Build Coastguard Worker 	 */
191*25da2beaSAndroid Build Coastguard Worker 	do {
192*25da2beaSAndroid Build Coastguard Worker 		sq->array[ktail & mask] = sq->sqe_head & mask;
193*25da2beaSAndroid Build Coastguard Worker 		ktail++;
194*25da2beaSAndroid Build Coastguard Worker 		sq->sqe_head++;
195*25da2beaSAndroid Build Coastguard Worker 	} while (--to_submit);
196*25da2beaSAndroid Build Coastguard Worker 
197*25da2beaSAndroid Build Coastguard Worker 	/*
198*25da2beaSAndroid Build Coastguard Worker 	 * Ensure that the kernel sees the SQE updates before it sees the tail
199*25da2beaSAndroid Build Coastguard Worker 	 * update.
200*25da2beaSAndroid Build Coastguard Worker 	 */
201*25da2beaSAndroid Build Coastguard Worker 	io_uring_smp_store_release(sq->ktail, ktail);
202*25da2beaSAndroid Build Coastguard Worker out:
203*25da2beaSAndroid Build Coastguard Worker 	/*
204*25da2beaSAndroid Build Coastguard Worker 	 * This _may_ look problematic, as we're not supposed to be reading
205*25da2beaSAndroid Build Coastguard Worker 	 * SQ->head without acquire semantics. When we're in SQPOLL mode, the
206*25da2beaSAndroid Build Coastguard Worker 	 * kernel submitter could be updating this right now. For non-SQPOLL,
207*25da2beaSAndroid Build Coastguard Worker 	 * task itself does it, and there's no potential race. But even for
208*25da2beaSAndroid Build Coastguard Worker 	 * SQPOLL, the load is going to be potentially out-of-date the very
209*25da2beaSAndroid Build Coastguard Worker 	 * instant it's done, regardless or whether or not it's done
210*25da2beaSAndroid Build Coastguard Worker 	 * atomically. Worst case, we're going to be over-estimating what
211*25da2beaSAndroid Build Coastguard Worker 	 * we can submit. The point is, we need to be able to deal with this
212*25da2beaSAndroid Build Coastguard Worker 	 * situation regardless of any perceived atomicity.
213*25da2beaSAndroid Build Coastguard Worker 	 */
214*25da2beaSAndroid Build Coastguard Worker 	return ktail - *sq->khead;
215*25da2beaSAndroid Build Coastguard Worker }
216*25da2beaSAndroid Build Coastguard Worker 
217*25da2beaSAndroid Build Coastguard Worker /*
218*25da2beaSAndroid Build Coastguard Worker  * If we have kernel support for IORING_ENTER_EXT_ARG, then we can use that
219*25da2beaSAndroid Build Coastguard Worker  * more efficiently than queueing an internal timeout command.
220*25da2beaSAndroid Build Coastguard Worker  */
io_uring_wait_cqes_new(struct io_uring * ring,struct io_uring_cqe ** cqe_ptr,unsigned wait_nr,struct __kernel_timespec * ts,sigset_t * sigmask)221*25da2beaSAndroid Build Coastguard Worker static int io_uring_wait_cqes_new(struct io_uring *ring,
222*25da2beaSAndroid Build Coastguard Worker 				  struct io_uring_cqe **cqe_ptr,
223*25da2beaSAndroid Build Coastguard Worker 				  unsigned wait_nr,
224*25da2beaSAndroid Build Coastguard Worker 				  struct __kernel_timespec *ts,
225*25da2beaSAndroid Build Coastguard Worker 				  sigset_t *sigmask)
226*25da2beaSAndroid Build Coastguard Worker {
227*25da2beaSAndroid Build Coastguard Worker 	struct io_uring_getevents_arg arg = {
228*25da2beaSAndroid Build Coastguard Worker 		.sigmask	= (unsigned long) sigmask,
229*25da2beaSAndroid Build Coastguard Worker 		.sigmask_sz	= _NSIG / 8,
230*25da2beaSAndroid Build Coastguard Worker 		.ts		= (unsigned long) ts
231*25da2beaSAndroid Build Coastguard Worker 	};
232*25da2beaSAndroid Build Coastguard Worker 	struct get_data data = {
233*25da2beaSAndroid Build Coastguard Worker 		.wait_nr	= wait_nr,
234*25da2beaSAndroid Build Coastguard Worker 		.get_flags	= IORING_ENTER_EXT_ARG,
235*25da2beaSAndroid Build Coastguard Worker 		.sz		= sizeof(arg),
236*25da2beaSAndroid Build Coastguard Worker 		.arg		= &arg
237*25da2beaSAndroid Build Coastguard Worker 	};
238*25da2beaSAndroid Build Coastguard Worker 
239*25da2beaSAndroid Build Coastguard Worker 	return _io_uring_get_cqe(ring, cqe_ptr, &data);
240*25da2beaSAndroid Build Coastguard Worker }
241*25da2beaSAndroid Build Coastguard Worker 
242*25da2beaSAndroid Build Coastguard Worker /*
243*25da2beaSAndroid Build Coastguard Worker  * Like io_uring_wait_cqe(), except it accepts a timeout value as well. Note
244*25da2beaSAndroid Build Coastguard Worker  * that an sqe is used internally to handle the timeout. For kernel doesn't
245*25da2beaSAndroid Build Coastguard Worker  * support IORING_FEAT_EXT_ARG, applications using this function must never
246*25da2beaSAndroid Build Coastguard Worker  * set sqe->user_data to LIBURING_UDATA_TIMEOUT!
247*25da2beaSAndroid Build Coastguard Worker  *
248*25da2beaSAndroid Build Coastguard Worker  * For kernels without IORING_FEAT_EXT_ARG (5.10 and older), if 'ts' is
249*25da2beaSAndroid Build Coastguard Worker  * specified, the application need not call io_uring_submit() before
250*25da2beaSAndroid Build Coastguard Worker  * calling this function, as we will do that on its behalf. From this it also
251*25da2beaSAndroid Build Coastguard Worker  * follows that this function isn't safe to use for applications that split SQ
252*25da2beaSAndroid Build Coastguard Worker  * and CQ handling between two threads and expect that to work without
253*25da2beaSAndroid Build Coastguard Worker  * synchronization, as this function manipulates both the SQ and CQ side.
254*25da2beaSAndroid Build Coastguard Worker  *
255*25da2beaSAndroid Build Coastguard Worker  * For kernels with IORING_FEAT_EXT_ARG, no implicit submission is done and
256*25da2beaSAndroid Build Coastguard Worker  * hence this function is safe to use for applications that split SQ and CQ
257*25da2beaSAndroid Build Coastguard Worker  * handling between two threads.
258*25da2beaSAndroid Build Coastguard Worker  */
__io_uring_submit_timeout(struct io_uring * ring,unsigned wait_nr,struct __kernel_timespec * ts)259*25da2beaSAndroid Build Coastguard Worker static int __io_uring_submit_timeout(struct io_uring *ring, unsigned wait_nr,
260*25da2beaSAndroid Build Coastguard Worker 				     struct __kernel_timespec *ts)
261*25da2beaSAndroid Build Coastguard Worker {
262*25da2beaSAndroid Build Coastguard Worker 	struct io_uring_sqe *sqe;
263*25da2beaSAndroid Build Coastguard Worker 	int ret;
264*25da2beaSAndroid Build Coastguard Worker 
265*25da2beaSAndroid Build Coastguard Worker 	/*
266*25da2beaSAndroid Build Coastguard Worker 	 * If the SQ ring is full, we may need to submit IO first
267*25da2beaSAndroid Build Coastguard Worker 	 */
268*25da2beaSAndroid Build Coastguard Worker 	sqe = io_uring_get_sqe(ring);
269*25da2beaSAndroid Build Coastguard Worker 	if (!sqe) {
270*25da2beaSAndroid Build Coastguard Worker 		ret = io_uring_submit(ring);
271*25da2beaSAndroid Build Coastguard Worker 		if (ret < 0)
272*25da2beaSAndroid Build Coastguard Worker 			return ret;
273*25da2beaSAndroid Build Coastguard Worker 		sqe = io_uring_get_sqe(ring);
274*25da2beaSAndroid Build Coastguard Worker 		if (!sqe)
275*25da2beaSAndroid Build Coastguard Worker 			return -EAGAIN;
276*25da2beaSAndroid Build Coastguard Worker 	}
277*25da2beaSAndroid Build Coastguard Worker 	io_uring_prep_timeout(sqe, ts, wait_nr, 0);
278*25da2beaSAndroid Build Coastguard Worker 	sqe->user_data = LIBURING_UDATA_TIMEOUT;
279*25da2beaSAndroid Build Coastguard Worker 	return __io_uring_flush_sq(ring);
280*25da2beaSAndroid Build Coastguard Worker }
281*25da2beaSAndroid Build Coastguard Worker 
io_uring_wait_cqes(struct io_uring * ring,struct io_uring_cqe ** cqe_ptr,unsigned wait_nr,struct __kernel_timespec * ts,sigset_t * sigmask)282*25da2beaSAndroid Build Coastguard Worker int io_uring_wait_cqes(struct io_uring *ring, struct io_uring_cqe **cqe_ptr,
283*25da2beaSAndroid Build Coastguard Worker 		       unsigned wait_nr, struct __kernel_timespec *ts,
284*25da2beaSAndroid Build Coastguard Worker 		       sigset_t *sigmask)
285*25da2beaSAndroid Build Coastguard Worker {
286*25da2beaSAndroid Build Coastguard Worker 	int to_submit = 0;
287*25da2beaSAndroid Build Coastguard Worker 
288*25da2beaSAndroid Build Coastguard Worker 	if (ts) {
289*25da2beaSAndroid Build Coastguard Worker 		if (ring->features & IORING_FEAT_EXT_ARG)
290*25da2beaSAndroid Build Coastguard Worker 			return io_uring_wait_cqes_new(ring, cqe_ptr, wait_nr,
291*25da2beaSAndroid Build Coastguard Worker 							ts, sigmask);
292*25da2beaSAndroid Build Coastguard Worker 		to_submit = __io_uring_submit_timeout(ring, wait_nr, ts);
293*25da2beaSAndroid Build Coastguard Worker 		if (to_submit < 0)
294*25da2beaSAndroid Build Coastguard Worker 			return to_submit;
295*25da2beaSAndroid Build Coastguard Worker 	}
296*25da2beaSAndroid Build Coastguard Worker 
297*25da2beaSAndroid Build Coastguard Worker 	return __io_uring_get_cqe(ring, cqe_ptr, to_submit, wait_nr, sigmask);
298*25da2beaSAndroid Build Coastguard Worker }
299*25da2beaSAndroid Build Coastguard Worker 
io_uring_submit_and_wait_timeout(struct io_uring * ring,struct io_uring_cqe ** cqe_ptr,unsigned wait_nr,struct __kernel_timespec * ts,sigset_t * sigmask)300*25da2beaSAndroid Build Coastguard Worker int io_uring_submit_and_wait_timeout(struct io_uring *ring,
301*25da2beaSAndroid Build Coastguard Worker 				     struct io_uring_cqe **cqe_ptr,
302*25da2beaSAndroid Build Coastguard Worker 				     unsigned wait_nr,
303*25da2beaSAndroid Build Coastguard Worker 				     struct __kernel_timespec *ts,
304*25da2beaSAndroid Build Coastguard Worker 				     sigset_t *sigmask)
305*25da2beaSAndroid Build Coastguard Worker {
306*25da2beaSAndroid Build Coastguard Worker 	int to_submit;
307*25da2beaSAndroid Build Coastguard Worker 
308*25da2beaSAndroid Build Coastguard Worker 	if (ts) {
309*25da2beaSAndroid Build Coastguard Worker 		if (ring->features & IORING_FEAT_EXT_ARG) {
310*25da2beaSAndroid Build Coastguard Worker 			struct io_uring_getevents_arg arg = {
311*25da2beaSAndroid Build Coastguard Worker 				.sigmask	= (unsigned long) sigmask,
312*25da2beaSAndroid Build Coastguard Worker 				.sigmask_sz	= _NSIG / 8,
313*25da2beaSAndroid Build Coastguard Worker 				.ts		= (unsigned long) ts
314*25da2beaSAndroid Build Coastguard Worker 			};
315*25da2beaSAndroid Build Coastguard Worker 			struct get_data data = {
316*25da2beaSAndroid Build Coastguard Worker 				.submit		= __io_uring_flush_sq(ring),
317*25da2beaSAndroid Build Coastguard Worker 				.wait_nr	= wait_nr,
318*25da2beaSAndroid Build Coastguard Worker 				.get_flags	= IORING_ENTER_EXT_ARG,
319*25da2beaSAndroid Build Coastguard Worker 				.sz		= sizeof(arg),
320*25da2beaSAndroid Build Coastguard Worker 				.arg		= &arg
321*25da2beaSAndroid Build Coastguard Worker 			};
322*25da2beaSAndroid Build Coastguard Worker 
323*25da2beaSAndroid Build Coastguard Worker 			return _io_uring_get_cqe(ring, cqe_ptr, &data);
324*25da2beaSAndroid Build Coastguard Worker 		}
325*25da2beaSAndroid Build Coastguard Worker 		to_submit = __io_uring_submit_timeout(ring, wait_nr, ts);
326*25da2beaSAndroid Build Coastguard Worker 		if (to_submit < 0)
327*25da2beaSAndroid Build Coastguard Worker 			return to_submit;
328*25da2beaSAndroid Build Coastguard Worker 	} else
329*25da2beaSAndroid Build Coastguard Worker 		to_submit = __io_uring_flush_sq(ring);
330*25da2beaSAndroid Build Coastguard Worker 
331*25da2beaSAndroid Build Coastguard Worker 	return __io_uring_get_cqe(ring, cqe_ptr, to_submit, wait_nr, sigmask);
332*25da2beaSAndroid Build Coastguard Worker }
333*25da2beaSAndroid Build Coastguard Worker 
334*25da2beaSAndroid Build Coastguard Worker /*
335*25da2beaSAndroid Build Coastguard Worker  * See io_uring_wait_cqes() - this function is the same, it just always uses
336*25da2beaSAndroid Build Coastguard Worker  * '1' as the wait_nr.
337*25da2beaSAndroid Build Coastguard Worker  */
io_uring_wait_cqe_timeout(struct io_uring * ring,struct io_uring_cqe ** cqe_ptr,struct __kernel_timespec * ts)338*25da2beaSAndroid Build Coastguard Worker int io_uring_wait_cqe_timeout(struct io_uring *ring,
339*25da2beaSAndroid Build Coastguard Worker 			      struct io_uring_cqe **cqe_ptr,
340*25da2beaSAndroid Build Coastguard Worker 			      struct __kernel_timespec *ts)
341*25da2beaSAndroid Build Coastguard Worker {
342*25da2beaSAndroid Build Coastguard Worker 	return io_uring_wait_cqes(ring, cqe_ptr, 1, ts, NULL);
343*25da2beaSAndroid Build Coastguard Worker }
344*25da2beaSAndroid Build Coastguard Worker 
345*25da2beaSAndroid Build Coastguard Worker /*
346*25da2beaSAndroid Build Coastguard Worker  * Submit sqes acquired from io_uring_get_sqe() to the kernel.
347*25da2beaSAndroid Build Coastguard Worker  *
348*25da2beaSAndroid Build Coastguard Worker  * Returns number of sqes submitted
349*25da2beaSAndroid Build Coastguard Worker  */
__io_uring_submit(struct io_uring * ring,unsigned submitted,unsigned wait_nr)350*25da2beaSAndroid Build Coastguard Worker static int __io_uring_submit(struct io_uring *ring, unsigned submitted,
351*25da2beaSAndroid Build Coastguard Worker 			     unsigned wait_nr)
352*25da2beaSAndroid Build Coastguard Worker {
353*25da2beaSAndroid Build Coastguard Worker 	unsigned flags;
354*25da2beaSAndroid Build Coastguard Worker 	int ret;
355*25da2beaSAndroid Build Coastguard Worker 
356*25da2beaSAndroid Build Coastguard Worker 	flags = 0;
357*25da2beaSAndroid Build Coastguard Worker 	if (sq_ring_needs_enter(ring, &flags) || wait_nr) {
358*25da2beaSAndroid Build Coastguard Worker 		if (wait_nr || (ring->flags & IORING_SETUP_IOPOLL))
359*25da2beaSAndroid Build Coastguard Worker 			flags |= IORING_ENTER_GETEVENTS;
360*25da2beaSAndroid Build Coastguard Worker 		if (ring->int_flags & INT_FLAG_REG_RING)
361*25da2beaSAndroid Build Coastguard Worker 			flags |= IORING_ENTER_REGISTERED_RING;
362*25da2beaSAndroid Build Coastguard Worker 
363*25da2beaSAndroid Build Coastguard Worker 		ret = ____sys_io_uring_enter(ring->enter_ring_fd, submitted,
364*25da2beaSAndroid Build Coastguard Worker 						wait_nr, flags, NULL);
365*25da2beaSAndroid Build Coastguard Worker 	} else
366*25da2beaSAndroid Build Coastguard Worker 		ret = submitted;
367*25da2beaSAndroid Build Coastguard Worker 
368*25da2beaSAndroid Build Coastguard Worker 	return ret;
369*25da2beaSAndroid Build Coastguard Worker }
370*25da2beaSAndroid Build Coastguard Worker 
__io_uring_submit_and_wait(struct io_uring * ring,unsigned wait_nr)371*25da2beaSAndroid Build Coastguard Worker static int __io_uring_submit_and_wait(struct io_uring *ring, unsigned wait_nr)
372*25da2beaSAndroid Build Coastguard Worker {
373*25da2beaSAndroid Build Coastguard Worker 	return __io_uring_submit(ring, __io_uring_flush_sq(ring), wait_nr);
374*25da2beaSAndroid Build Coastguard Worker }
375*25da2beaSAndroid Build Coastguard Worker 
376*25da2beaSAndroid Build Coastguard Worker /*
377*25da2beaSAndroid Build Coastguard Worker  * Submit sqes acquired from io_uring_get_sqe() to the kernel.
378*25da2beaSAndroid Build Coastguard Worker  *
379*25da2beaSAndroid Build Coastguard Worker  * Returns number of sqes submitted
380*25da2beaSAndroid Build Coastguard Worker  */
io_uring_submit(struct io_uring * ring)381*25da2beaSAndroid Build Coastguard Worker int io_uring_submit(struct io_uring *ring)
382*25da2beaSAndroid Build Coastguard Worker {
383*25da2beaSAndroid Build Coastguard Worker 	return __io_uring_submit_and_wait(ring, 0);
384*25da2beaSAndroid Build Coastguard Worker }
385*25da2beaSAndroid Build Coastguard Worker 
386*25da2beaSAndroid Build Coastguard Worker /*
387*25da2beaSAndroid Build Coastguard Worker  * Like io_uring_submit(), but allows waiting for events as well.
388*25da2beaSAndroid Build Coastguard Worker  *
389*25da2beaSAndroid Build Coastguard Worker  * Returns number of sqes submitted
390*25da2beaSAndroid Build Coastguard Worker  */
io_uring_submit_and_wait(struct io_uring * ring,unsigned wait_nr)391*25da2beaSAndroid Build Coastguard Worker int io_uring_submit_and_wait(struct io_uring *ring, unsigned wait_nr)
392*25da2beaSAndroid Build Coastguard Worker {
393*25da2beaSAndroid Build Coastguard Worker 	return __io_uring_submit_and_wait(ring, wait_nr);
394*25da2beaSAndroid Build Coastguard Worker }
395*25da2beaSAndroid Build Coastguard Worker 
396*25da2beaSAndroid Build Coastguard Worker #ifdef LIBURING_INTERNAL
io_uring_get_sqe(struct io_uring * ring)397*25da2beaSAndroid Build Coastguard Worker struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring)
398*25da2beaSAndroid Build Coastguard Worker {
399*25da2beaSAndroid Build Coastguard Worker 	return _io_uring_get_sqe(ring);
400*25da2beaSAndroid Build Coastguard Worker }
401*25da2beaSAndroid Build Coastguard Worker #endif
402*25da2beaSAndroid Build Coastguard Worker 
__io_uring_sqring_wait(struct io_uring * ring)403*25da2beaSAndroid Build Coastguard Worker int __io_uring_sqring_wait(struct io_uring *ring)
404*25da2beaSAndroid Build Coastguard Worker {
405*25da2beaSAndroid Build Coastguard Worker 	int flags = IORING_ENTER_SQ_WAIT;
406*25da2beaSAndroid Build Coastguard Worker 
407*25da2beaSAndroid Build Coastguard Worker 	if (ring->int_flags & INT_FLAG_REG_RING)
408*25da2beaSAndroid Build Coastguard Worker 		flags |= IORING_ENTER_REGISTERED_RING;
409*25da2beaSAndroid Build Coastguard Worker 
410*25da2beaSAndroid Build Coastguard Worker 	return  ____sys_io_uring_enter(ring->enter_ring_fd, 0, 0, flags, NULL);
411*25da2beaSAndroid Build Coastguard Worker }
412