Lines Matching +full:enum +full:- +full:as +full:- +full:flags
1 /* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR MIT */
32 __u8 flags; /* IOSQE_ flags */ member
56 __u32 poll32_events; /* word-reversed for BE */
114 /* sqe->attr_type_mask flags */
118 __u16 flags; member
127 * If sqe->file_index is set to this for opcodes that instantiate a new
130 * in. The picked direct descriptor will be returned in cqe->res, or -ENFILE
135 enum io_uring_sqe_flags_bit {
146 * sqe->flags
158 /* select buffer from sqe->buf_group */
164 * io_uring_setup() flags
178 * than force an inter-processor interrupt reschedule. This avoids interrupting
185 * IORING_SQ_TASKRUN in the sq ring flags. Not valid with COOP_TASKRUN.
222 enum io_uring_op {
287 * sqe->uring_cmd_flags top 8bits aren't available for userspace
289 * along with setting sqe->buf_index.
296 * sqe->fsync_flags
301 * sqe->timeout_flags
313 * sqe->splice_flags
314 * extends splice(2) flags
319 * POLL_ADD flags. Note that since sqe->poll_events is the flag space, the
320 * command flags for POLL_ADD are stored in sqe->len.
327 * sqe->addr as the old user_data field.
337 * ASYNC_CANCEL flags.
355 * send/sendmsg and recv/recvmsg flags (sqe->ioprio)
359 * -EAGAIN result, arm poll upfront and skip
378 * recv will grab as many buffers from the buffer
381 * the starting buffer ID in cqe->flags as per
395 * It should be treated as a flag, all other
396 * bits of cqe.res should be treated as reserved!
401 * accept flags stored in sqe->ioprio
408 * IORING_OP_MSG_RING command types, stored in sqe->addr
410 enum io_uring_msg_ring_flags {
411 IORING_MSG_DATA, /* pass sqe->len as 'res' and off as user_data */
416 * IORING_OP_MSG_RING flags (sqe->msg_ring_flags)
422 /* Pass through the flags from sqe->file_index to cqe->flags */
426 * IORING_OP_FIXED_FD_INSTALL flags (sqe->install_fd_flags)
428 * IORING_FIXED_FD_NO_CLOEXEC Don't mark the fd as O_CLOEXEC
433 * IORING_OP_NOP flags (sqe->nop_flags)
435 * IORING_NOP_INJECT_RESULT Inject result from sqe->result
446 __u64 user_data; /* sqe->user_data value passed back */
448 __u32 flags; member
452 * contains 16-bytes of padding, doubling the size of the CQE.
458 * cqe->flags
469 * the incremental buffer consumption, as provided by
501 __u32 flags; member
509 * sq_ring->flags
522 __u32 flags; member
528 * cq_ring->flags
535 * io_uring_enter(2) flags
551 __u32 flags; member
562 * io_uring_params->features flags
585 enum io_uring_register_op {
606 /* set/clear io-wq thread affinities */
610 /* set/get max number of io-wq workers */
656 /* io-wq worker categories */
657 enum io_wq_type {
669 enum {
677 __u32 flags; member
683 enum {
684 /* expose the region as registered wait arguments */
690 __u64 flags; member
696 * -1 file descriptors.
702 __u32 flags; member
724 #define IORING_REGISTER_FILES_SKIP (-2)
731 __u16 flags; /* IO_URING_OP_* flags */ member
759 enum {
766 __u32 flags; member
784 * ring tail is overlaid with the io_uring_buf->resv field.
797 * Flags for IORING_REGISTER_PBUF_RING.
802 * mmap(2) with the offset set as:
809 * use of it will consume only as much as it needs. This
813 enum io_uring_register_pbuf_ring_flags {
823 __u16 flags; member
834 enum io_uring_napi_op {
843 enum io_uring_napi_tracking_strategy {
871 * io_uring_restriction->opcode values
873 enum io_uring_register_restriction_op {
880 /* Allow sqe flags */
883 /* Require sqe flags (these flags must be set on each submission) */
889 enum {
902 __u32 flags; member
925 __u32 flags; member
934 * The range is specified as [off, off + len)
946 __u32 flags; member
952 enum io_uring_socket_op {