1 // SPDX-License-Identifier: MIT or GPL-2.0-only
2
3 #if !defined(UBLKSRV_INTERNAL_H_)
4 #error "Never include <ublksrv_priv.h> directly; use <ublksrv.h> instead."
5 #endif
6
7 #ifndef UBLKSRV_PRIVATE_INC_H
8 #define UBLKSRV_PRIVATE_INC_H
9
10 #include <unistd.h>
11 #include <stdlib.h>
12 #include <stddef.h>
13 #include <signal.h>
14 #include <limits.h>
15 #include <pthread.h>
16 #include <string.h>
17 #include <sys/types.h>
18 #include <sys/eventfd.h>
19 #include <sys/epoll.h>
20 #include <sys/poll.h>
21
22 #include "ublk_cmd.h"
23 #include "ublksrv_utils.h"
24 #include "ublksrv.h"
25 #include "ublksrv_aio.h"
26
27
28 /* todo: relace the hardcode name with /dev/char/maj:min */
29 #define UBLKC_DEV "/dev/ublkc"
30 #define UBLKC_PATH_MAX 32
31
32 #ifdef __cplusplus
33 extern "C" {
34 #endif
35
36 struct ublksrv_ctrl_dev {
37 struct io_uring ring;
38
39 int ctrl_fd;
40 unsigned bs_shift;
41 struct ublksrv_ctrl_dev_info dev_info;
42
43 const char *tgt_type;
44 const struct ublksrv_tgt_type *tgt_ops;
45
46 /*
47 * default is UBLKSRV_RUN_DIR but can be specified via command line,
48 * pid file will be saved there
49 */
50 const char *run_dir;
51
52 union {
53 /* used by ->init_tgt() */
54 struct {
55 int tgt_argc;
56 char **tgt_argv;
57 };
58 /* used by ->recovery_tgt(), tgt_argc == -1 */
59 struct {
60 int padding;
61 const char *recovery_jbuf;
62 };
63 };
64
65 cpu_set_t *queues_cpuset;
66
67 unsigned long reserved[4];
68 };
69
70 struct ublk_io {
71 char *buf_addr;
72
73 #define UBLKSRV_NEED_FETCH_RQ (1UL << 0)
74 #define UBLKSRV_NEED_COMMIT_RQ_COMP (1UL << 1)
75 #define UBLKSRV_IO_FREE (1UL << 2)
76 #define UBLKSRV_NEED_GET_DATA (1UL << 3)
77 unsigned int flags;
78
79 /* result is updated after all target ios are done */
80 unsigned int result;
81
82 struct ublk_io_data data;
83 };
84
85 struct _ublksrv_queue {
86 /********** part of API, can't change ************/
87 int q_id;
88 int q_depth;
89
90 struct io_uring *ring_ptr;
91 struct _ublksrv_dev *dev;
92 void *private_data;
93 /*************************************************/
94
95 /*
96 * Read only by ublksrv daemon, setup via mmap on /dev/ublkcN.
97 *
98 * ublksrv_io_desc(iod) is stored in this buffer, so iod
99 * can be retrieved by request's tag directly.
100 *
101 * ublksrv writes the iod into this array, and notify ublksrv daemon
102 * by issued io_uring command beforehand.
103 * */
104 char *io_cmd_buf;
105 char *io_buf;
106
107 unsigned cmd_inflight, tgt_io_inflight; //obsolete
108 unsigned state;
109
110 /* eventfd */
111 int efd;
112
113 /* cache tgt ops */
114 const struct ublksrv_tgt_type *tgt_ops;
115
116 /*
117 * ring for submit io command to ublk driver, can only be issued
118 * from ublksrv daemon.
119 *
120 * ring depth == dev_info->queue_depth.
121 */
122 struct io_uring ring;
123
124 unsigned tid;
125
126 #define UBLKSRV_NR_CTX_BATCH 4
127 int nr_ctxs;
128 struct ublksrv_aio_ctx *ctxs[UBLKSRV_NR_CTX_BATCH];
129
130 unsigned long reserved[8];
131
132 struct ublk_io ios[0];
133 };
134
135 struct _ublksrv_dev {
136 //keep same with ublksrv_dev
137 /********** part of API, can't change ************/
138 struct ublksrv_tgt_info tgt;
139 /************************************************/
140
141 struct _ublksrv_queue *__queues[MAX_NR_HW_QUEUES];
142 char *io_buf_start;
143 pthread_t *thread;
144 int cdev_fd;
145 int pid_file_fd;
146
147 const struct ublksrv_ctrl_dev *ctrl_dev;
148 void *target_data;
149 int cq_depth;
150 int pad;
151
152 /* reserved isn't necessary any more */
153 unsigned long reserved[3];
154 };
155
156 #define local_to_tq(q) ((struct ublksrv_queue *)(q))
157 #define tq_to_local(q) ((struct _ublksrv_queue *)(q))
158
159 #define local_to_tdev(d) ((struct ublksrv_dev *)(d))
160 #define tdev_to_local(d) ((struct _ublksrv_dev *)(d))
161
ublk_is_unprivileged(const struct ublksrv_ctrl_dev * ctrl_dev)162 static inline bool ublk_is_unprivileged(const struct ublksrv_ctrl_dev *ctrl_dev)
163 {
164 return !!(ctrl_dev->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV);
165 }
166
ublksrv_get_queue_affinity(const struct ublksrv_ctrl_dev * dev,int qid)167 static inline cpu_set_t *ublksrv_get_queue_affinity(
168 const struct ublksrv_ctrl_dev *dev, int qid)
169 {
170 unsigned char *buf = (unsigned char *)&dev->queues_cpuset[qid];
171
172 if (ublk_is_unprivileged(dev))
173 return (cpu_set_t *)&buf[UBLKC_PATH_MAX];
174
175 return &dev->queues_cpuset[qid];
176 }
177
ublksrv_mark_io_done(struct ublk_io * io,int res)178 static inline void ublksrv_mark_io_done(struct ublk_io *io, int res)
179 {
180 /*
181 * mark io done by target, so that ->ubq_daemon can commit its
182 * result and fetch new request via io_uring command.
183 */
184 io->flags |= (UBLKSRV_NEED_COMMIT_RQ_COMP | UBLKSRV_IO_FREE);
185
186 io->result = res;
187 }
188
ublksrv_io_done(struct ublk_io * io)189 static inline bool ublksrv_io_done(struct ublk_io *io)
190 {
191 return io->flags & UBLKSRV_IO_FREE;
192 }
193
194 int create_pid_file(const char *pid_file, int *pid_fd);
195
196 extern void ublksrv_build_cpu_str(char *buf, int len, const cpu_set_t *cpuset);
197
198 /* bit63: target io, bit62: eventfd data */
build_eventfd_data()199 static inline __u64 build_eventfd_data()
200 {
201 return 0x3ULL << 62;
202 }
203
is_eventfd_io(__u64 user_data)204 static inline int is_eventfd_io(__u64 user_data)
205 {
206 return (user_data & (1ULL << 62)) != 0;
207 }
208
is_target_io(__u64 user_data)209 static inline int is_target_io(__u64 user_data)
210 {
211 return (user_data & (1ULL << 63)) != 0;
212 }
213
214 /* two helpers for setting up io_uring */
ublksrv_setup_ring(struct io_uring * r,int depth,int cq_depth,unsigned flags)215 static inline int ublksrv_setup_ring(struct io_uring *r, int depth,
216 int cq_depth, unsigned flags)
217 {
218 struct io_uring_params p;
219
220 memset(&p, 0, sizeof(p));
221 p.flags = flags | IORING_SETUP_CQSIZE;
222 p.cq_entries = cq_depth;
223
224 return io_uring_queue_init_params(depth, r, &p);
225 }
226
ublksrv_uring_get_sqe(struct io_uring * r,int idx,bool is_sqe128)227 static inline struct io_uring_sqe *ublksrv_uring_get_sqe(struct io_uring *r,
228 int idx, bool is_sqe128)
229 {
230 if (is_sqe128)
231 return &r->sq.sqes[idx << 1];
232 return &r->sq.sqes[idx];
233 }
234
ublksrv_get_sqe_cmd(struct io_uring_sqe * sqe)235 static inline void *ublksrv_get_sqe_cmd(struct io_uring_sqe *sqe)
236 {
237 return (void *)&sqe->addr3;
238 }
239
ublksrv_set_sqe_cmd_op(struct io_uring_sqe * sqe,__u32 cmd_op)240 static inline void ublksrv_set_sqe_cmd_op(struct io_uring_sqe *sqe, __u32 cmd_op)
241 {
242 __u32 *addr = (__u32 *)&sqe->off;
243
244 addr[0] = cmd_op;
245 addr[1] = 0;
246 }
247
248 /*
249 * ublksrv_aio_ctx is used to offload IO handling from ublksrv io_uring
250 * context.
251 *
252 * ublksrv_aio_ctx is bound with one single pthread which has to belong
253 * to same process of the io_uring where IO is originated, so we can
254 * support to handle IO from multiple queues of the same device. At
255 * default, ublksrv_aio_ctx supports to handle device wide aio or io
256 * offloading except for UBLKSRV_AIO_QUEUE_WIDE.
257 *
258 * Meantime ublksrv_aio_ctx can be created per each queue, and only handle
259 * IOs from this queue.
260 *
261 * The final io handling in the aio context depends on user's implementation,
262 * either sync or async IO submitting is supported.
263 */
264 struct ublksrv_aio_ctx {
265 struct ublksrv_aio_list submit;
266
267 /* per-queue completion list */
268 struct ublksrv_aio_list *complete;
269
270 int efd; //for wakeup us
271
272 #define UBLKSRV_AIO_QUEUE_WIDE (1U << 0)
273 unsigned int flags;
274 bool dead;
275
276 const struct ublksrv_dev *dev;
277
278 void *ctx_data;
279
280 unsigned long reserved[8];
281 };
282
283 #ifdef __cplusplus
284 }
285 #endif
286
287 #endif
288