1 /* SPDX-License-Identifier: GPL-2.0
2 *
3 * FUSE: Filesystem in Userspace
4 * Copyright (c) 2023-2024 DataDirect Networks.
5 */
6
7 #ifndef _FS_FUSE_DEV_URING_I_H
8 #define _FS_FUSE_DEV_URING_I_H
9
10 #include "fuse_i.h"
11
12 #ifdef CONFIG_FUSE_IO_URING
13
14 #define FUSE_URING_TEARDOWN_TIMEOUT (5 * HZ)
15 #define FUSE_URING_TEARDOWN_INTERVAL (HZ/20)
16
17 enum fuse_ring_req_state {
18 FRRS_INVALID = 0,
19
20 /* The ring entry received from userspace and it is being processed */
21 FRRS_COMMIT,
22
23 /* The ring entry is waiting for new fuse requests */
24 FRRS_AVAILABLE,
25
26 /* The ring entry got assigned a fuse req */
27 FRRS_FUSE_REQ,
28
29 /* The ring entry is in or on the way to user space */
30 FRRS_USERSPACE,
31
32 /* The ring entry is in teardown */
33 FRRS_TEARDOWN,
34
35 /* The ring entry is released, but not freed yet */
36 FRRS_RELEASED,
37 };
38
39 /** A fuse ring entry, part of the ring queue */
40 struct fuse_ring_ent {
41 /* userspace buffer */
42 struct fuse_uring_req_header __user *headers;
43 void __user *payload;
44
45 /* the ring queue that owns the request */
46 struct fuse_ring_queue *queue;
47
48 /* fields below are protected by queue->lock */
49
50 struct io_uring_cmd *cmd;
51
52 struct list_head list;
53
54 enum fuse_ring_req_state state;
55
56 struct fuse_req *fuse_req;
57 };
58
59 struct fuse_ring_queue {
60 /*
61 * back pointer to the main fuse uring structure that holds this
62 * queue
63 */
64 struct fuse_ring *ring;
65
66 /* queue id, corresponds to the cpu core */
67 unsigned int qid;
68
69 /*
70 * queue lock, taken when any value in the queue changes _and_ also
71 * a ring entry state changes.
72 */
73 spinlock_t lock;
74
75 /* available ring entries (struct fuse_ring_ent) */
76 struct list_head ent_avail_queue;
77
78 /*
79 * entries in the process of being committed or in the process
80 * to be sent to userspace
81 */
82 struct list_head ent_w_req_queue;
83 struct list_head ent_commit_queue;
84
85 /* entries in userspace */
86 struct list_head ent_in_userspace;
87
88 /* entries that are released */
89 struct list_head ent_released;
90
91 /* fuse requests waiting for an entry slot */
92 struct list_head fuse_req_queue;
93
94 /* background fuse requests */
95 struct list_head fuse_req_bg_queue;
96
97 struct fuse_pqueue fpq;
98
99 unsigned int active_background;
100
101 bool stopped;
102 };
103
104 /**
105 * Describes if uring is for communication and holds alls the data needed
106 * for uring communication
107 */
108 struct fuse_ring {
109 /* back pointer */
110 struct fuse_conn *fc;
111
112 /* number of ring queues */
113 size_t nr_queues;
114
115 /* maximum payload/arg size */
116 size_t max_payload_sz;
117
118 struct fuse_ring_queue **queues;
119
120 /*
121 * Log ring entry states on stop when entries cannot be released
122 */
123 unsigned int stop_debug_log : 1;
124
125 wait_queue_head_t stop_waitq;
126
127 /* async tear down */
128 struct delayed_work async_teardown_work;
129
130 /* log */
131 unsigned long teardown_time;
132
133 atomic_t queue_refs;
134
135 bool ready;
136 };
137
138 bool fuse_uring_enabled(void);
139 void fuse_uring_destruct(struct fuse_conn *fc);
140 void fuse_uring_stop_queues(struct fuse_ring *ring);
141 void fuse_uring_abort_end_requests(struct fuse_ring *ring);
142 int fuse_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags);
143 void fuse_uring_queue_fuse_req(struct fuse_iqueue *fiq, struct fuse_req *req);
144 bool fuse_uring_queue_bq_req(struct fuse_req *req);
145 bool fuse_uring_remove_pending_req(struct fuse_req *req);
146
fuse_uring_abort(struct fuse_conn * fc)147 static inline void fuse_uring_abort(struct fuse_conn *fc)
148 {
149 struct fuse_ring *ring = fc->ring;
150
151 if (ring == NULL)
152 return;
153
154 if (atomic_read(&ring->queue_refs) > 0) {
155 fuse_uring_abort_end_requests(ring);
156 fuse_uring_stop_queues(ring);
157 }
158 }
159
fuse_uring_wait_stopped_queues(struct fuse_conn * fc)160 static inline void fuse_uring_wait_stopped_queues(struct fuse_conn *fc)
161 {
162 struct fuse_ring *ring = fc->ring;
163
164 if (ring)
165 wait_event(ring->stop_waitq,
166 atomic_read(&ring->queue_refs) == 0);
167 }
168
fuse_uring_ready(struct fuse_conn * fc)169 static inline bool fuse_uring_ready(struct fuse_conn *fc)
170 {
171 return fc->ring && fc->ring->ready;
172 }
173
174 #else /* CONFIG_FUSE_IO_URING */
175
176 struct fuse_ring;
177
fuse_uring_create(struct fuse_conn * fc)178 static inline void fuse_uring_create(struct fuse_conn *fc)
179 {
180 }
181
fuse_uring_destruct(struct fuse_conn * fc)182 static inline void fuse_uring_destruct(struct fuse_conn *fc)
183 {
184 }
185
fuse_uring_enabled(void)186 static inline bool fuse_uring_enabled(void)
187 {
188 return false;
189 }
190
fuse_uring_abort(struct fuse_conn * fc)191 static inline void fuse_uring_abort(struct fuse_conn *fc)
192 {
193 }
194
fuse_uring_wait_stopped_queues(struct fuse_conn * fc)195 static inline void fuse_uring_wait_stopped_queues(struct fuse_conn *fc)
196 {
197 }
198
fuse_uring_ready(struct fuse_conn * fc)199 static inline bool fuse_uring_ready(struct fuse_conn *fc)
200 {
201 return false;
202 }
203
fuse_uring_remove_pending_req(struct fuse_req * req)204 static inline bool fuse_uring_remove_pending_req(struct fuse_req *req)
205 {
206 return false;
207 }
208
209 #endif /* CONFIG_FUSE_IO_URING */
210
211 #endif /* _FS_FUSE_DEV_URING_I_H */
212