xref: /aosp_15_r20/external/libfuse/lib/fuse_lowlevel.c (revision 9e5649576b786774a32d7b0252c9cd8c6538fa49)
1 /*
2   FUSE: Filesystem in Userspace
3   Copyright (C) 2001-2007  Miklos Szeredi <[email protected]>
4 
5   Implementation of (most of) the low-level FUSE API. The session loop
6   functions are implemented in separate files.
7 
8   This program can be distributed under the terms of the GNU LGPLv2.
9   See the file COPYING.LIB
10 */
11 
12 #define _GNU_SOURCE
13 
14 #include "fuse_config.h"
15 #include "fuse_i.h"
16 #include "fuse_kernel.h"
17 #include "fuse_opt.h"
18 #include "fuse_misc.h"
19 #include "mount_util.h"
20 
21 #include <stdio.h>
22 #include <stdlib.h>
23 #include <stddef.h>
24 #include <string.h>
25 #include <unistd.h>
26 #include <limits.h>
27 #include <errno.h>
28 #include <assert.h>
29 #include <sys/file.h>
30 #include <sys/ioctl.h>
31 
32 #ifndef F_LINUX_SPECIFIC_BASE
33 #define F_LINUX_SPECIFIC_BASE       1024
34 #endif
35 #ifndef F_SETPIPE_SZ
36 #define F_SETPIPE_SZ	(F_LINUX_SPECIFIC_BASE + 7)
37 #endif
38 
39 
40 #define PARAM(inarg) (((char *)(inarg)) + sizeof(*(inarg)))
41 #define OFFSET_MAX 0x7fffffffffffffffLL
42 
43 #define container_of(ptr, type, member) ({				\
44 			const typeof( ((type *)0)->member ) *__mptr = (ptr); \
45 			(type *)( (char *)__mptr - offsetof(type,member) );})
46 
47 struct fuse_pollhandle {
48 	uint64_t kh;
49 	struct fuse_session *se;
50 };
51 
52 static size_t pagesize;
53 
fuse_ll_init_pagesize(void)54 static __attribute__((constructor)) void fuse_ll_init_pagesize(void)
55 {
56 	pagesize = getpagesize();
57 }
58 
convert_stat(const struct stat * stbuf,struct fuse_attr * attr)59 static void convert_stat(const struct stat *stbuf, struct fuse_attr *attr)
60 {
61 	attr->ino	= stbuf->st_ino;
62 	attr->mode	= stbuf->st_mode;
63 	attr->nlink	= stbuf->st_nlink;
64 	attr->uid	= stbuf->st_uid;
65 	attr->gid	= stbuf->st_gid;
66 	attr->rdev	= stbuf->st_rdev;
67 	attr->size	= stbuf->st_size;
68 	attr->blksize	= stbuf->st_blksize;
69 	attr->blocks	= stbuf->st_blocks;
70 	attr->atime	= stbuf->st_atime;
71 	attr->mtime	= stbuf->st_mtime;
72 	attr->ctime	= stbuf->st_ctime;
73 	attr->atimensec = ST_ATIM_NSEC(stbuf);
74 	attr->mtimensec = ST_MTIM_NSEC(stbuf);
75 	attr->ctimensec = ST_CTIM_NSEC(stbuf);
76 }
77 
convert_attr(const struct fuse_setattr_in * attr,struct stat * stbuf)78 static void convert_attr(const struct fuse_setattr_in *attr, struct stat *stbuf)
79 {
80 	stbuf->st_mode	       = attr->mode;
81 	stbuf->st_uid	       = attr->uid;
82 	stbuf->st_gid	       = attr->gid;
83 	stbuf->st_size	       = attr->size;
84 	stbuf->st_atime	       = attr->atime;
85 	stbuf->st_mtime	       = attr->mtime;
86 	stbuf->st_ctime        = attr->ctime;
87 	ST_ATIM_NSEC_SET(stbuf, attr->atimensec);
88 	ST_MTIM_NSEC_SET(stbuf, attr->mtimensec);
89 	ST_CTIM_NSEC_SET(stbuf, attr->ctimensec);
90 }
91 
iov_length(const struct iovec * iov,size_t count)92 static	size_t iov_length(const struct iovec *iov, size_t count)
93 {
94 	size_t seg;
95 	size_t ret = 0;
96 
97 	for (seg = 0; seg < count; seg++)
98 		ret += iov[seg].iov_len;
99 	return ret;
100 }
101 
list_init_req(struct fuse_req * req)102 static void list_init_req(struct fuse_req *req)
103 {
104 	req->next = req;
105 	req->prev = req;
106 }
107 
list_del_req(struct fuse_req * req)108 static void list_del_req(struct fuse_req *req)
109 {
110 	struct fuse_req *prev = req->prev;
111 	struct fuse_req *next = req->next;
112 	prev->next = next;
113 	next->prev = prev;
114 }
115 
list_add_req(struct fuse_req * req,struct fuse_req * next)116 static void list_add_req(struct fuse_req *req, struct fuse_req *next)
117 {
118 	struct fuse_req *prev = next->prev;
119 	req->next = next;
120 	req->prev = prev;
121 	prev->next = req;
122 	next->prev = req;
123 }
124 
destroy_req(fuse_req_t req)125 static void destroy_req(fuse_req_t req)
126 {
127 	assert(req->ch == NULL);
128 	pthread_mutex_destroy(&req->lock);
129 	free(req);
130 }
131 
fuse_free_req(fuse_req_t req)132 void fuse_free_req(fuse_req_t req)
133 {
134 	int ctr;
135 	struct fuse_session *se = req->se;
136 
137 	pthread_mutex_lock(&se->lock);
138 	req->u.ni.func = NULL;
139 	req->u.ni.data = NULL;
140 	list_del_req(req);
141 	ctr = --req->ctr;
142 	fuse_chan_put(req->ch);
143 	req->ch = NULL;
144 	pthread_mutex_unlock(&se->lock);
145 	if (!ctr)
146 		destroy_req(req);
147 }
148 
fuse_ll_alloc_req(struct fuse_session * se)149 static struct fuse_req *fuse_ll_alloc_req(struct fuse_session *se)
150 {
151 	struct fuse_req *req;
152 
153 	req = (struct fuse_req *) calloc(1, sizeof(struct fuse_req));
154 	if (req == NULL) {
155 		fuse_log(FUSE_LOG_ERR, "fuse: failed to allocate request\n");
156 	} else {
157 		req->se = se;
158 		req->ctr = 1;
159 		list_init_req(req);
160 		pthread_mutex_init(&req->lock, NULL);
161 	}
162 
163 	return req;
164 }
165 
166 /* Send data. If *ch* is NULL, send via session master fd */
fuse_send_msg(struct fuse_session * se,struct fuse_chan * ch,struct iovec * iov,int count)167 static int fuse_send_msg(struct fuse_session *se, struct fuse_chan *ch,
168 			 struct iovec *iov, int count)
169 {
170 	struct fuse_out_header *out = iov[0].iov_base;
171 
172 	assert(se != NULL);
173 	out->len = iov_length(iov, count);
174 	if (se->debug) {
175 		if (out->unique == 0) {
176 			fuse_log(FUSE_LOG_DEBUG, "NOTIFY: code=%d length=%u\n",
177 				out->error, out->len);
178 		} else if (out->error) {
179 			fuse_log(FUSE_LOG_DEBUG,
180 				"   unique: %llu, error: %i (%s), outsize: %i\n",
181 				(unsigned long long) out->unique, out->error,
182 				strerror(-out->error), out->len);
183 		} else {
184 			fuse_log(FUSE_LOG_DEBUG,
185 				"   unique: %llu, success, outsize: %i\n",
186 				(unsigned long long) out->unique, out->len);
187 		}
188 	}
189 
190 	ssize_t res;
191 	if (se->io != NULL)
192 		/* se->io->writev is never NULL if se->io is not NULL as
193 		specified by fuse_session_custom_io()*/
194 		res = se->io->writev(ch ? ch->fd : se->fd, iov, count,
195 					   se->userdata);
196 	else
197 		res = writev(ch ? ch->fd : se->fd, iov, count);
198 
199 	int err = errno;
200 
201 	if (res == -1) {
202 		/* ENOENT means the operation was interrupted */
203 		if (!fuse_session_exited(se) && err != ENOENT)
204 			perror("fuse: writing device");
205 		return -err;
206 	}
207 
208 	return 0;
209 }
210 
211 
fuse_send_reply_iov_nofree(fuse_req_t req,int error,struct iovec * iov,int count)212 int fuse_send_reply_iov_nofree(fuse_req_t req, int error, struct iovec *iov,
213 			       int count)
214 {
215 	struct fuse_out_header out;
216 
217 #if __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 32
218 	const char *str = strerrordesc_np(error * -1);
219 	if ((str == NULL && error != 0) || error > 0) {
220 #else
221 	if (error <= -1000 || error > 0) {
222 #endif
223 		fuse_log(FUSE_LOG_ERR, "fuse: bad error value: %i\n",	error);
224 		error = -ERANGE;
225 	}
226 
227 	out.unique = req->unique;
228 	out.error = error;
229 
230 	iov[0].iov_base = &out;
231 	iov[0].iov_len = sizeof(struct fuse_out_header);
232 
233 	return fuse_send_msg(req->se, req->ch, iov, count);
234 }
235 
236 static int send_reply_iov(fuse_req_t req, int error, struct iovec *iov,
237 			  int count)
238 {
239 	int res;
240 
241 	res = fuse_send_reply_iov_nofree(req, error, iov, count);
242 	fuse_free_req(req);
243 	return res;
244 }
245 
246 static int send_reply(fuse_req_t req, int error, const void *arg,
247 		      size_t argsize)
248 {
249 	struct iovec iov[2];
250 	int count = 1;
251 	if (argsize) {
252 		iov[1].iov_base = (void *) arg;
253 		iov[1].iov_len = argsize;
254 		count++;
255 	}
256 	return send_reply_iov(req, error, iov, count);
257 }
258 
259 int fuse_reply_iov(fuse_req_t req, const struct iovec *iov, int count)
260 {
261 	int res;
262 	struct iovec *padded_iov;
263 
264 	padded_iov = malloc((count + 1) * sizeof(struct iovec));
265 	if (padded_iov == NULL)
266 		return fuse_reply_err(req, ENOMEM);
267 
268 	memcpy(padded_iov + 1, iov, count * sizeof(struct iovec));
269 	count++;
270 
271 	res = send_reply_iov(req, 0, padded_iov, count);
272 	free(padded_iov);
273 
274 	return res;
275 }
276 
277 
278 /* `buf` is allowed to be empty so that the proper size may be
279    allocated by the caller */
280 size_t fuse_add_direntry(fuse_req_t req, char *buf, size_t bufsize,
281 			 const char *name, const struct stat *stbuf, off_t off)
282 {
283 	(void)req;
284 	size_t namelen;
285 	size_t entlen;
286 	size_t entlen_padded;
287 	struct fuse_dirent *dirent;
288 
289 	namelen = strlen(name);
290 	entlen = FUSE_NAME_OFFSET + namelen;
291 	entlen_padded = FUSE_DIRENT_ALIGN(entlen);
292 
293 	if ((buf == NULL) || (entlen_padded > bufsize))
294 	  return entlen_padded;
295 
296 	dirent = (struct fuse_dirent*) buf;
297 	dirent->ino = stbuf->st_ino;
298 	dirent->off = off;
299 	dirent->namelen = namelen;
300 	dirent->type = (stbuf->st_mode & S_IFMT) >> 12;
301 	memcpy(dirent->name, name, namelen);
302 	memset(dirent->name + namelen, 0, entlen_padded - entlen);
303 
304 	return entlen_padded;
305 }
306 
307 static void convert_statfs(const struct statvfs *stbuf,
308 			   struct fuse_kstatfs *kstatfs)
309 {
310 	kstatfs->bsize	 = stbuf->f_bsize;
311 	kstatfs->frsize	 = stbuf->f_frsize;
312 	kstatfs->blocks	 = stbuf->f_blocks;
313 	kstatfs->bfree	 = stbuf->f_bfree;
314 	kstatfs->bavail	 = stbuf->f_bavail;
315 	kstatfs->files	 = stbuf->f_files;
316 	kstatfs->ffree	 = stbuf->f_ffree;
317 	kstatfs->namelen = stbuf->f_namemax;
318 }
319 
320 static int send_reply_ok(fuse_req_t req, const void *arg, size_t argsize)
321 {
322 	return send_reply(req, 0, arg, argsize);
323 }
324 
325 int fuse_reply_err(fuse_req_t req, int err)
326 {
327 	return send_reply(req, -err, NULL, 0);
328 }
329 
330 void fuse_reply_none(fuse_req_t req)
331 {
332 	fuse_free_req(req);
333 }
334 
335 static unsigned long calc_timeout_sec(double t)
336 {
337 	if (t > (double) ULONG_MAX)
338 		return ULONG_MAX;
339 	else if (t < 0.0)
340 		return 0;
341 	else
342 		return (unsigned long) t;
343 }
344 
345 static unsigned int calc_timeout_nsec(double t)
346 {
347 	double f = t - (double) calc_timeout_sec(t);
348 	if (f < 0.0)
349 		return 0;
350 	else if (f >= 0.999999999)
351 		return 999999999;
352 	else
353 		return (unsigned int) (f * 1.0e9);
354 }
355 
356 static void fill_entry(struct fuse_entry_out *arg,
357 		       const struct fuse_entry_param *e)
358 {
359 	arg->nodeid = e->ino;
360 	arg->generation = e->generation;
361 	arg->entry_valid = calc_timeout_sec(e->entry_timeout);
362 	arg->entry_valid_nsec = calc_timeout_nsec(e->entry_timeout);
363 	arg->attr_valid = calc_timeout_sec(e->attr_timeout);
364 	arg->attr_valid_nsec = calc_timeout_nsec(e->attr_timeout);
365 	convert_stat(&e->attr, &arg->attr);
366 }
367 
368 /* `buf` is allowed to be empty so that the proper size may be
369    allocated by the caller */
370 size_t fuse_add_direntry_plus(fuse_req_t req, char *buf, size_t bufsize,
371 			      const char *name,
372 			      const struct fuse_entry_param *e, off_t off)
373 {
374 	(void)req;
375 	size_t namelen;
376 	size_t entlen;
377 	size_t entlen_padded;
378 
379 	namelen = strlen(name);
380 	entlen = FUSE_NAME_OFFSET_DIRENTPLUS + namelen;
381 	entlen_padded = FUSE_DIRENT_ALIGN(entlen);
382 	if ((buf == NULL) || (entlen_padded > bufsize))
383 	  return entlen_padded;
384 
385 	struct fuse_direntplus *dp = (struct fuse_direntplus *) buf;
386 	memset(&dp->entry_out, 0, sizeof(dp->entry_out));
387 	fill_entry(&dp->entry_out, e);
388 
389 	struct fuse_dirent *dirent = &dp->dirent;
390 	dirent->ino = e->attr.st_ino;
391 	dirent->off = off;
392 	dirent->namelen = namelen;
393 	dirent->type = (e->attr.st_mode & S_IFMT) >> 12;
394 	memcpy(dirent->name, name, namelen);
395 	memset(dirent->name + namelen, 0, entlen_padded - entlen);
396 
397 	return entlen_padded;
398 }
399 
400 static void fill_open(struct fuse_open_out *arg,
401 		      const struct fuse_file_info *f,
402 		      int use_upstream_passthrough)
403 {
404 	arg->fh = f->fh;
405 	if (use_upstream_passthrough) {
406 		if (f->backing_id > 0) {
407 			arg->backing_id = f->backing_id;
408 			arg->open_flags |= FOPEN_PASSTHROUGH;
409 		}
410 	} else {
411 		arg->passthrough_fh = f->passthrough_fh;
412 	}
413 
414 	if (f->direct_io)
415 		arg->open_flags |= FOPEN_DIRECT_IO;
416 	if (f->keep_cache)
417 		arg->open_flags |= FOPEN_KEEP_CACHE;
418 	if (f->cache_readdir)
419 		arg->open_flags |= FOPEN_CACHE_DIR;
420 	if (f->nonseekable)
421 		arg->open_flags |= FOPEN_NONSEEKABLE;
422 	if (f->noflush)
423 		arg->open_flags |= FOPEN_NOFLUSH;
424 	if (f->parallel_direct_writes)
425 		arg->open_flags |= FOPEN_PARALLEL_DIRECT_WRITES;
426 }
427 
428 int fuse_reply_entry(fuse_req_t req, const struct fuse_entry_param* e) {
429     struct {
430         struct fuse_entry_out arg;
431         struct fuse_entry_bpf_out bpf_arg;
432     } __attribute__((packed)) arg_ext = {0};
433 
434     struct fuse_entry_out arg;
435     struct fuse_entry_bpf_out bpf_arg;
436     size_t size;
437     int extended_args = e->bpf_action || bpf_arg.bpf_fd || e->backing_action || e->backing_fd;
438 
439     if (extended_args) {
440         size = req->se->conn.proto_minor < 9 ? FUSE_COMPAT_ENTRY_OUT_SIZE : sizeof(arg_ext);
441     } else {
442         size = req->se->conn.proto_minor < 9 ? FUSE_COMPAT_ENTRY_OUT_SIZE : sizeof(arg);
443     }
444 
445     /* before ABI 7.4 e->ino == 0 was invalid, only ENOENT meant
446        negative entry */
447     if (!e->ino && req->se->conn.proto_minor < 4) return fuse_reply_err(req, ENOENT);
448 
449     memset(&arg, 0, sizeof(arg));
450     fill_entry(&arg, e);
451 
452     if (extended_args) {
453         memset(&bpf_arg, 0, sizeof(bpf_arg));
454 
455         bpf_arg.bpf_action = e->bpf_action;
456         bpf_arg.bpf_fd = e->bpf_fd;
457         bpf_arg.backing_action = e->backing_action;
458         bpf_arg.backing_fd = e->backing_fd;
459 
460         arg_ext.arg = arg;
461         arg_ext.bpf_arg = bpf_arg;
462 
463         return send_reply_ok(req, &arg_ext, size);
464     } else {
465         return send_reply_ok(req, &arg, size);
466     }
467 }
468 
469 int fuse_reply_create(fuse_req_t req, const struct fuse_entry_param *e,
470 		      const struct fuse_file_info *f)
471 {
472 	char buf[sizeof(struct fuse_entry_out) + sizeof(struct fuse_open_out)];
473 	size_t entrysize = req->se->conn.proto_minor < 9 ?
474 		FUSE_COMPAT_ENTRY_OUT_SIZE : sizeof(struct fuse_entry_out);
475 	struct fuse_entry_out *earg = (struct fuse_entry_out *) buf;
476 	struct fuse_open_out *oarg = (struct fuse_open_out *) (buf + entrysize);
477 
478 	memset(buf, 0, sizeof(buf));
479 	fill_entry(earg, e);
480 	fill_open(oarg, f, req->se->conn.capable & FUSE_CAP_PASSTHROUGH_UPSTREAM);
481 	return send_reply_ok(req, buf,
482 			     entrysize + sizeof(struct fuse_open_out));
483 }
484 
485 int fuse_reply_attr(fuse_req_t req, const struct stat *attr,
486 		    double attr_timeout)
487 {
488 	struct fuse_attr_out arg;
489 	size_t size = req->se->conn.proto_minor < 9 ?
490 		FUSE_COMPAT_ATTR_OUT_SIZE : sizeof(arg);
491 
492 	memset(&arg, 0, sizeof(arg));
493 	arg.attr_valid = calc_timeout_sec(attr_timeout);
494 	arg.attr_valid_nsec = calc_timeout_nsec(attr_timeout);
495 	convert_stat(attr, &arg.attr);
496 
497 	return send_reply_ok(req, &arg, size);
498 }
499 
500 int fuse_reply_readlink(fuse_req_t req, const char *linkname)
501 {
502 	return send_reply_ok(req, linkname, strlen(linkname));
503 }
504 
505 int fuse_reply_canonical_path(fuse_req_t req, const char *path)
506 {
507 	// The kernel expects a buffer containing the null terminator for this op
508 	// So we add the null terminator size to strlen
509 	return send_reply_ok(req, path, strlen(path) + 1);
510 }
511 
512 enum {
513 	FUSE_PASSTHROUGH_API_UNAVAILABLE,
514 	FUSE_PASSTHROUGH_API_V0,
515 	FUSE_PASSTHROUGH_API_V1,
516 	FUSE_PASSTHROUGH_API_V2,
517 	FUSE_PASSTHROUGH_API_STABLE,
518 };
519 
520 /*
521  * Requests the FUSE passthrough feature to be enabled on a specific file
522  * through the passed fd.
523  * This function returns an identifier that must be used as passthrough_fh
524  * when the open/create_open request reply is sent back to /dev/fuse.
525  * As for the current FUSE passthrough implementation, passthrough_fh values
526  * are only valid if > 0, so in case the FUSE passthrough open ioctl returns
527  * a value <= 0, this must be considered an error and is returned as-is by
528  * this function.
529  */
530 int fuse_passthrough_enable(fuse_req_t req, unsigned int fd) {
531 	static sig_atomic_t passthrough_version = FUSE_PASSTHROUGH_API_STABLE;
532 	int ret = 0; /* values <= 0 represent errors in FUSE passthrough */
533 
534 	if (!(req->se->conn.capable & FUSE_CAP_PASSTHROUGH))
535 		return -ENOTTY;
536 	/*
537 	 * The interface of FUSE passthrough is still unstable in the kernel,
538 	 * so the following solution is to search for the most updated API
539 	 * version and, if not found, fall back to an older one.
540 	 * This happens when ioctl() returns -1 and errno is set to ENOTTY,
541 	 * an error code that corresponds to the lack of a specific ioctl.
542 	 */
543 	switch (passthrough_version) {
544 	case FUSE_PASSTHROUGH_API_STABLE:
545 		/* There is not a stable API yet */
546 		passthrough_version = FUSE_PASSTHROUGH_API_V2;
547 	case FUSE_PASSTHROUGH_API_V2: {
548 		ret = ioctl(req->se->fd, FUSE_DEV_IOC_PASSTHROUGH_OPEN_V2, &fd);
549 		if (ret == -1 && errno == ENOTTY)
550 			passthrough_version = FUSE_PASSTHROUGH_API_V1;
551 		else
552 			break;
553 	}
554 	case FUSE_PASSTHROUGH_API_V1: {
555 		struct fuse_passthrough_out_v0 out = {};
556 		out.fd = fd;
557 
558 		ret = ioctl(req->se->fd, FUSE_DEV_IOC_PASSTHROUGH_OPEN_V1, &out);
559 		if (ret == -1 && errno == ENOTTY)
560 			passthrough_version = FUSE_PASSTHROUGH_API_V0;
561 		else
562 			break;
563 	}
564 	case FUSE_PASSTHROUGH_API_V0: {
565 		struct fuse_passthrough_out_v0 out = {};
566 		out.fd = fd;
567 
568 		ret = ioctl(req->se->fd, FUSE_DEV_IOC_PASSTHROUGH_OPEN_V0, &out);
569 		if (ret == -1 && errno == ENOTTY)
570 			passthrough_version = FUSE_PASSTHROUGH_API_UNAVAILABLE;
571 		else
572 			break;
573 	}
574 	default:
575 		fuse_log(FUSE_LOG_ERR, "fuse: passthrough_enable no valid API\n");
576 		return -ENOTTY;
577 	}
578 
579 	if (ret <= 0)
580 		fuse_log(FUSE_LOG_ERR, "fuse: passthrough_enable: %s\n", strerror(errno));
581 	return ret;
582 }
583 
584 int fuse_passthrough_open(fuse_req_t req, int fd)
585 {
586 	struct fuse_backing_map map = { .fd = fd };
587 	int ret;
588 
589 	ret = ioctl(req->se->fd, FUSE_DEV_IOC_BACKING_OPEN, &map);
590 	if (ret <= 0) {
591 		fuse_log(FUSE_LOG_ERR, "fuse: passthrough_open: %s\n", strerror(errno));
592 		return 0;
593 	}
594 
595 	return ret;
596 }
597 
598 int fuse_passthrough_close(fuse_req_t req, int backing_id)
599 {
600 	int ret;
601 
602 	ret = ioctl(req->se->fd, FUSE_DEV_IOC_BACKING_CLOSE, &backing_id);
603 	if (ret < 0)
604 		fuse_log(FUSE_LOG_ERR, "fuse: passthrough_close: %s\n", strerror(errno));
605 
606 	return ret;
607 }
608 
609 int fuse_reply_open(fuse_req_t req, const struct fuse_file_info *f)
610 {
611 	struct fuse_open_out arg;
612 
613 	memset(&arg, 0, sizeof(arg));
614 	fill_open(&arg, f, req->se->conn.capable & FUSE_CAP_PASSTHROUGH_UPSTREAM);
615 	return send_reply_ok(req, &arg, sizeof(arg));
616 }
617 
618 int fuse_reply_write(fuse_req_t req, size_t count)
619 {
620 	struct fuse_write_out arg;
621 
622 	memset(&arg, 0, sizeof(arg));
623 	arg.size = count;
624 
625 	return send_reply_ok(req, &arg, sizeof(arg));
626 }
627 
628 int fuse_reply_buf(fuse_req_t req, const char *buf, size_t size)
629 {
630 	return send_reply_ok(req, buf, size);
631 }
632 
633 static int fuse_send_data_iov_fallback(struct fuse_session *se,
634 				       struct fuse_chan *ch,
635 				       struct iovec *iov, int iov_count,
636 				       struct fuse_bufvec *buf,
637 				       size_t len)
638 {
639 	struct fuse_bufvec mem_buf = FUSE_BUFVEC_INIT(len);
640 	void *mbuf;
641 	int res;
642 
643 	/* Optimize common case */
644 	if (buf->count == 1 && buf->idx == 0 && buf->off == 0 &&
645 	    !(buf->buf[0].flags & FUSE_BUF_IS_FD)) {
646 		/* FIXME: also avoid memory copy if there are multiple buffers
647 		   but none of them contain an fd */
648 
649 		iov[iov_count].iov_base = buf->buf[0].mem;
650 		iov[iov_count].iov_len = len;
651 		iov_count++;
652 		return fuse_send_msg(se, ch, iov, iov_count);
653 	}
654 
655 	res = posix_memalign(&mbuf, pagesize, len);
656 	if (res != 0)
657 		return res;
658 
659 	mem_buf.buf[0].mem = mbuf;
660 	res = fuse_buf_copy(&mem_buf, buf, 0);
661 	if (res < 0) {
662 		free(mbuf);
663 		return -res;
664 	}
665 	len = res;
666 
667 	iov[iov_count].iov_base = mbuf;
668 	iov[iov_count].iov_len = len;
669 	iov_count++;
670 	res = fuse_send_msg(se, ch, iov, iov_count);
671 	free(mbuf);
672 
673 	return res;
674 }
675 
676 struct fuse_ll_pipe {
677 	size_t size;
678 	int can_grow;
679 	int pipe[2];
680 };
681 
682 static void fuse_ll_pipe_free(struct fuse_ll_pipe *llp)
683 {
684 	close(llp->pipe[0]);
685 	close(llp->pipe[1]);
686 	free(llp);
687 }
688 
689 #ifdef HAVE_SPLICE
690 #if !defined(HAVE_PIPE2) || !defined(O_CLOEXEC)
691 static int fuse_pipe(int fds[2])
692 {
693 	int rv = pipe(fds);
694 
695 	if (rv == -1)
696 		return rv;
697 
698 	if (fcntl(fds[0], F_SETFL, O_NONBLOCK) == -1 ||
699 	    fcntl(fds[1], F_SETFL, O_NONBLOCK) == -1 ||
700 	    fcntl(fds[0], F_SETFD, FD_CLOEXEC) == -1 ||
701 	    fcntl(fds[1], F_SETFD, FD_CLOEXEC) == -1) {
702 		close(fds[0]);
703 		close(fds[1]);
704 		rv = -1;
705 	}
706 	return rv;
707 }
708 #else
709 static int fuse_pipe(int fds[2])
710 {
711 	return pipe2(fds, O_CLOEXEC | O_NONBLOCK);
712 }
713 #endif
714 
715 static struct fuse_ll_pipe *fuse_ll_get_pipe(struct fuse_session *se)
716 {
717 	struct fuse_ll_pipe *llp = pthread_getspecific(se->pipe_key);
718 	if (llp == NULL) {
719 		int res;
720 
721 		llp = malloc(sizeof(struct fuse_ll_pipe));
722 		if (llp == NULL)
723 			return NULL;
724 
725 		res = fuse_pipe(llp->pipe);
726 		if (res == -1) {
727 			free(llp);
728 			return NULL;
729 		}
730 
731 		/*
732 		 *the default size is 16 pages on linux
733 		 */
734 		llp->size = pagesize * 16;
735 		llp->can_grow = 1;
736 
737 		pthread_setspecific(se->pipe_key, llp);
738 	}
739 
740 	return llp;
741 }
742 #endif
743 
744 static void fuse_ll_clear_pipe(struct fuse_session *se)
745 {
746 	struct fuse_ll_pipe *llp = pthread_getspecific(se->pipe_key);
747 	if (llp) {
748 		pthread_setspecific(se->pipe_key, NULL);
749 		fuse_ll_pipe_free(llp);
750 	}
751 }
752 
753 #if defined(HAVE_SPLICE) && defined(HAVE_VMSPLICE)
754 static int read_back(int fd, char *buf, size_t len)
755 {
756 	int res;
757 
758 	res = read(fd, buf, len);
759 	if (res == -1) {
760 		fuse_log(FUSE_LOG_ERR, "fuse: internal error: failed to read back from pipe: %s\n", strerror(errno));
761 		return -EIO;
762 	}
763 	if (res != len) {
764 		fuse_log(FUSE_LOG_ERR, "fuse: internal error: short read back from pipe: %i from %zi\n", res, len);
765 		return -EIO;
766 	}
767 	return 0;
768 }
769 
770 static int grow_pipe_to_max(int pipefd)
771 {
772 	int max;
773 	int res;
774 	int maxfd;
775 	char buf[32];
776 
777 	maxfd = open("/proc/sys/fs/pipe-max-size", O_RDONLY);
778 	if (maxfd < 0)
779 		return -errno;
780 
781 	res = read(maxfd, buf, sizeof(buf) - 1);
782 	if (res < 0) {
783 		int saved_errno;
784 
785 		saved_errno = errno;
786 		close(maxfd);
787 		return -saved_errno;
788 	}
789 	close(maxfd);
790 	buf[res] = '\0';
791 
792 	max = atoi(buf);
793 	res = fcntl(pipefd, F_SETPIPE_SZ, max);
794 	if (res < 0)
795 		return -errno;
796 	return max;
797 }
798 
799 static int fuse_send_data_iov(struct fuse_session *se, struct fuse_chan *ch,
800 			       struct iovec *iov, int iov_count,
801 			       struct fuse_bufvec *buf, unsigned int flags)
802 {
803 	int res;
804 	size_t len = fuse_buf_size(buf);
805 	struct fuse_out_header *out = iov[0].iov_base;
806 	struct fuse_ll_pipe *llp;
807 	int splice_flags;
808 	size_t pipesize;
809 	size_t total_buf_size;
810 	size_t idx;
811 	size_t headerlen;
812 	struct fuse_bufvec pipe_buf = FUSE_BUFVEC_INIT(len);
813 
814 	if (se->broken_splice_nonblock)
815 		goto fallback;
816 
817 	if (flags & FUSE_BUF_NO_SPLICE)
818 		goto fallback;
819 
820 	total_buf_size = 0;
821 	for (idx = buf->idx; idx < buf->count; idx++) {
822 		total_buf_size += buf->buf[idx].size;
823 		if (idx == buf->idx)
824 			total_buf_size -= buf->off;
825 	}
826 	if (total_buf_size < 2 * pagesize)
827 		goto fallback;
828 
829 	if (se->conn.proto_minor < 14 ||
830 	    !(se->conn.want & FUSE_CAP_SPLICE_WRITE))
831 		goto fallback;
832 
833 	llp = fuse_ll_get_pipe(se);
834 	if (llp == NULL)
835 		goto fallback;
836 
837 
838 	headerlen = iov_length(iov, iov_count);
839 
840 	out->len = headerlen + len;
841 
842 	/*
843 	 * Heuristic for the required pipe size, does not work if the
844 	 * source contains less than page size fragments
845 	 */
846 	pipesize = pagesize * (iov_count + buf->count + 1) + out->len;
847 
848 	if (llp->size < pipesize) {
849 		if (llp->can_grow) {
850 			res = fcntl(llp->pipe[0], F_SETPIPE_SZ, pipesize);
851 			if (res == -1) {
852 				res = grow_pipe_to_max(llp->pipe[0]);
853 				if (res > 0)
854 					llp->size = res;
855 				llp->can_grow = 0;
856 				goto fallback;
857 			}
858 			llp->size = res;
859 		}
860 		if (llp->size < pipesize)
861 			goto fallback;
862 	}
863 
864 
865 	res = vmsplice(llp->pipe[1], iov, iov_count, SPLICE_F_NONBLOCK);
866 	if (res == -1)
867 		goto fallback;
868 
869 	if (res != headerlen) {
870 		res = -EIO;
871 		fuse_log(FUSE_LOG_ERR, "fuse: short vmsplice to pipe: %u/%zu\n", res,
872 			headerlen);
873 		goto clear_pipe;
874 	}
875 
876 	pipe_buf.buf[0].flags = FUSE_BUF_IS_FD;
877 	pipe_buf.buf[0].fd = llp->pipe[1];
878 
879 	res = fuse_buf_copy(&pipe_buf, buf,
880 			    FUSE_BUF_FORCE_SPLICE | FUSE_BUF_SPLICE_NONBLOCK);
881 	if (res < 0) {
882 		if (res == -EAGAIN || res == -EINVAL) {
883 			/*
884 			 * Should only get EAGAIN on kernels with
885 			 * broken SPLICE_F_NONBLOCK support (<=
886 			 * 2.6.35) where this error or a short read is
887 			 * returned even if the pipe itself is not
888 			 * full
889 			 *
890 			 * EINVAL might mean that splice can't handle
891 			 * this combination of input and output.
892 			 */
893 			if (res == -EAGAIN)
894 				se->broken_splice_nonblock = 1;
895 
896 			pthread_setspecific(se->pipe_key, NULL);
897 			fuse_ll_pipe_free(llp);
898 			goto fallback;
899 		}
900 		res = -res;
901 		goto clear_pipe;
902 	}
903 
904 	if (res != 0 && res < len) {
905 		struct fuse_bufvec mem_buf = FUSE_BUFVEC_INIT(len);
906 		void *mbuf;
907 		size_t now_len = res;
908 		/*
909 		 * For regular files a short count is either
910 		 *  1) due to EOF, or
911 		 *  2) because of broken SPLICE_F_NONBLOCK (see above)
912 		 *
913 		 * For other inputs it's possible that we overflowed
914 		 * the pipe because of small buffer fragments.
915 		 */
916 
917 		res = posix_memalign(&mbuf, pagesize, len);
918 		if (res != 0)
919 			goto clear_pipe;
920 
921 		mem_buf.buf[0].mem = mbuf;
922 		mem_buf.off = now_len;
923 		res = fuse_buf_copy(&mem_buf, buf, 0);
924 		if (res > 0) {
925 			char *tmpbuf;
926 			size_t extra_len = res;
927 			/*
928 			 * Trickiest case: got more data.  Need to get
929 			 * back the data from the pipe and then fall
930 			 * back to regular write.
931 			 */
932 			tmpbuf = malloc(headerlen);
933 			if (tmpbuf == NULL) {
934 				free(mbuf);
935 				res = ENOMEM;
936 				goto clear_pipe;
937 			}
938 			res = read_back(llp->pipe[0], tmpbuf, headerlen);
939 			free(tmpbuf);
940 			if (res != 0) {
941 				free(mbuf);
942 				goto clear_pipe;
943 			}
944 			res = read_back(llp->pipe[0], mbuf, now_len);
945 			if (res != 0) {
946 				free(mbuf);
947 				goto clear_pipe;
948 			}
949 			len = now_len + extra_len;
950 			iov[iov_count].iov_base = mbuf;
951 			iov[iov_count].iov_len = len;
952 			iov_count++;
953 			res = fuse_send_msg(se, ch, iov, iov_count);
954 			free(mbuf);
955 			return res;
956 		}
957 		free(mbuf);
958 		res = now_len;
959 	}
960 	len = res;
961 	out->len = headerlen + len;
962 
963 	if (se->debug) {
964 		fuse_log(FUSE_LOG_DEBUG,
965 			"   unique: %llu, success, outsize: %i (splice)\n",
966 			(unsigned long long) out->unique, out->len);
967 	}
968 
969 	splice_flags = 0;
970 	if ((flags & FUSE_BUF_SPLICE_MOVE) &&
971 	    (se->conn.want & FUSE_CAP_SPLICE_MOVE))
972 		splice_flags |= SPLICE_F_MOVE;
973 
974 	if (se->io != NULL && se->io->splice_send != NULL) {
975 		res = se->io->splice_send(llp->pipe[0], NULL,
976 						  ch ? ch->fd : se->fd, NULL, out->len,
977 					  	  splice_flags, se->userdata);
978 	} else {
979 		res = splice(llp->pipe[0], NULL, ch ? ch->fd : se->fd, NULL,
980 			       out->len, splice_flags);
981 	}
982 	if (res == -1) {
983 		res = -errno;
984 		perror("fuse: splice from pipe");
985 		goto clear_pipe;
986 	}
987 	if (res != out->len) {
988 		res = -EIO;
989 		fuse_log(FUSE_LOG_ERR, "fuse: short splice from pipe: %u/%u\n",
990 			res, out->len);
991 		goto clear_pipe;
992 	}
993 	return 0;
994 
995 clear_pipe:
996 	fuse_ll_clear_pipe(se);
997 	return res;
998 
999 fallback:
1000 	return fuse_send_data_iov_fallback(se, ch, iov, iov_count, buf, len);
1001 }
1002 #else
1003 static int fuse_send_data_iov(struct fuse_session *se, struct fuse_chan *ch,
1004 			       struct iovec *iov, int iov_count,
1005 			       struct fuse_bufvec *buf, unsigned int flags)
1006 {
1007 	size_t len = fuse_buf_size(buf);
1008 	(void) flags;
1009 
1010 	return fuse_send_data_iov_fallback(se, ch, iov, iov_count, buf, len);
1011 }
1012 #endif
1013 
1014 int fuse_reply_data(fuse_req_t req, struct fuse_bufvec *bufv,
1015 		    enum fuse_buf_copy_flags flags)
1016 {
1017 	struct iovec iov[2];
1018 	struct fuse_out_header out;
1019 	int res;
1020 
1021 	iov[0].iov_base = &out;
1022 	iov[0].iov_len = sizeof(struct fuse_out_header);
1023 
1024 	out.unique = req->unique;
1025 	out.error = 0;
1026 
1027 	res = fuse_send_data_iov(req->se, req->ch, iov, 1, bufv, flags);
1028 	if (res <= 0) {
1029 		fuse_free_req(req);
1030 		return res;
1031 	} else {
1032 		return fuse_reply_err(req, res);
1033 	}
1034 }
1035 
1036 int fuse_reply_statfs(fuse_req_t req, const struct statvfs *stbuf)
1037 {
1038 	struct fuse_statfs_out arg;
1039 	size_t size = req->se->conn.proto_minor < 4 ?
1040 		FUSE_COMPAT_STATFS_SIZE : sizeof(arg);
1041 
1042 	memset(&arg, 0, sizeof(arg));
1043 	convert_statfs(stbuf, &arg.st);
1044 
1045 	return send_reply_ok(req, &arg, size);
1046 }
1047 
1048 int fuse_reply_xattr(fuse_req_t req, size_t count)
1049 {
1050 	struct fuse_getxattr_out arg;
1051 
1052 	memset(&arg, 0, sizeof(arg));
1053 	arg.size = count;
1054 
1055 	return send_reply_ok(req, &arg, sizeof(arg));
1056 }
1057 
1058 int fuse_reply_lock(fuse_req_t req, const struct flock *lock)
1059 {
1060 	struct fuse_lk_out arg;
1061 
1062 	memset(&arg, 0, sizeof(arg));
1063 	arg.lk.type = lock->l_type;
1064 	if (lock->l_type != F_UNLCK) {
1065 		arg.lk.start = lock->l_start;
1066 		if (lock->l_len == 0)
1067 			arg.lk.end = OFFSET_MAX;
1068 		else
1069 			arg.lk.end = lock->l_start + lock->l_len - 1;
1070 	}
1071 	arg.lk.pid = lock->l_pid;
1072 	return send_reply_ok(req, &arg, sizeof(arg));
1073 }
1074 
1075 int fuse_reply_bmap(fuse_req_t req, uint64_t idx)
1076 {
1077 	struct fuse_bmap_out arg;
1078 
1079 	memset(&arg, 0, sizeof(arg));
1080 	arg.block = idx;
1081 
1082 	return send_reply_ok(req, &arg, sizeof(arg));
1083 }
1084 
1085 static struct fuse_ioctl_iovec *fuse_ioctl_iovec_copy(const struct iovec *iov,
1086 						      size_t count)
1087 {
1088 	struct fuse_ioctl_iovec *fiov;
1089 	size_t i;
1090 
1091 	fiov = malloc(sizeof(fiov[0]) * count);
1092 	if (!fiov)
1093 		return NULL;
1094 
1095 	for (i = 0; i < count; i++) {
1096 		fiov[i].base = (uintptr_t) iov[i].iov_base;
1097 		fiov[i].len = iov[i].iov_len;
1098 	}
1099 
1100 	return fiov;
1101 }
1102 
1103 int fuse_reply_ioctl_retry(fuse_req_t req,
1104 			   const struct iovec *in_iov, size_t in_count,
1105 			   const struct iovec *out_iov, size_t out_count)
1106 {
1107 	struct fuse_ioctl_out arg;
1108 	struct fuse_ioctl_iovec *in_fiov = NULL;
1109 	struct fuse_ioctl_iovec *out_fiov = NULL;
1110 	struct iovec iov[4];
1111 	size_t count = 1;
1112 	int res;
1113 
1114 	memset(&arg, 0, sizeof(arg));
1115 	arg.flags |= FUSE_IOCTL_RETRY;
1116 	arg.in_iovs = in_count;
1117 	arg.out_iovs = out_count;
1118 	iov[count].iov_base = &arg;
1119 	iov[count].iov_len = sizeof(arg);
1120 	count++;
1121 
1122 	if (req->se->conn.proto_minor < 16) {
1123 		if (in_count) {
1124 			iov[count].iov_base = (void *)in_iov;
1125 			iov[count].iov_len = sizeof(in_iov[0]) * in_count;
1126 			count++;
1127 		}
1128 
1129 		if (out_count) {
1130 			iov[count].iov_base = (void *)out_iov;
1131 			iov[count].iov_len = sizeof(out_iov[0]) * out_count;
1132 			count++;
1133 		}
1134 	} else {
1135 		/* Can't handle non-compat 64bit ioctls on 32bit */
1136 		if (sizeof(void *) == 4 && req->ioctl_64bit) {
1137 			res = fuse_reply_err(req, EINVAL);
1138 			goto out;
1139 		}
1140 
1141 		if (in_count) {
1142 			in_fiov = fuse_ioctl_iovec_copy(in_iov, in_count);
1143 			if (!in_fiov)
1144 				goto enomem;
1145 
1146 			iov[count].iov_base = (void *)in_fiov;
1147 			iov[count].iov_len = sizeof(in_fiov[0]) * in_count;
1148 			count++;
1149 		}
1150 		if (out_count) {
1151 			out_fiov = fuse_ioctl_iovec_copy(out_iov, out_count);
1152 			if (!out_fiov)
1153 				goto enomem;
1154 
1155 			iov[count].iov_base = (void *)out_fiov;
1156 			iov[count].iov_len = sizeof(out_fiov[0]) * out_count;
1157 			count++;
1158 		}
1159 	}
1160 
1161 	res = send_reply_iov(req, 0, iov, count);
1162 out:
1163 	free(in_fiov);
1164 	free(out_fiov);
1165 
1166 	return res;
1167 
1168 enomem:
1169 	res = fuse_reply_err(req, ENOMEM);
1170 	goto out;
1171 }
1172 
1173 int fuse_reply_ioctl(fuse_req_t req, int result, const void *buf, size_t size)
1174 {
1175 	struct fuse_ioctl_out arg;
1176 	struct iovec iov[3];
1177 	size_t count = 1;
1178 
1179 	memset(&arg, 0, sizeof(arg));
1180 	arg.result = result;
1181 	iov[count].iov_base = &arg;
1182 	iov[count].iov_len = sizeof(arg);
1183 	count++;
1184 
1185 	if (size) {
1186 		iov[count].iov_base = (char *) buf;
1187 		iov[count].iov_len = size;
1188 		count++;
1189 	}
1190 
1191 	return send_reply_iov(req, 0, iov, count);
1192 }
1193 
1194 int fuse_reply_ioctl_iov(fuse_req_t req, int result, const struct iovec *iov,
1195 			 int count)
1196 {
1197 	struct iovec *padded_iov;
1198 	struct fuse_ioctl_out arg;
1199 	int res;
1200 
1201 	padded_iov = malloc((count + 2) * sizeof(struct iovec));
1202 	if (padded_iov == NULL)
1203 		return fuse_reply_err(req, ENOMEM);
1204 
1205 	memset(&arg, 0, sizeof(arg));
1206 	arg.result = result;
1207 	padded_iov[1].iov_base = &arg;
1208 	padded_iov[1].iov_len = sizeof(arg);
1209 
1210 	memcpy(&padded_iov[2], iov, count * sizeof(struct iovec));
1211 
1212 	res = send_reply_iov(req, 0, padded_iov, count + 2);
1213 	free(padded_iov);
1214 
1215 	return res;
1216 }
1217 
1218 int fuse_reply_poll(fuse_req_t req, unsigned revents)
1219 {
1220 	struct fuse_poll_out arg;
1221 
1222 	memset(&arg, 0, sizeof(arg));
1223 	arg.revents = revents;
1224 
1225 	return send_reply_ok(req, &arg, sizeof(arg));
1226 }
1227 
1228 int fuse_reply_lseek(fuse_req_t req, off_t off)
1229 {
1230 	struct fuse_lseek_out arg;
1231 
1232 	memset(&arg, 0, sizeof(arg));
1233 	arg.offset = off;
1234 
1235 	return send_reply_ok(req, &arg, sizeof(arg));
1236 }
1237 
1238 static void do_lookup(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1239 {
1240 	char *name = (char *) inarg;
1241 
1242 	if (req->se->op.lookup)
1243 		req->se->op.lookup(req, nodeid, name);
1244 	else
1245 		fuse_reply_err(req, ENOSYS);
1246 }
1247 
1248 static void do_lookup_postfilter(fuse_req_t req, fuse_ino_t nodeid, uint32_t error_in,
1249 								 const void *inarg, size_t size)
1250 {
1251 	if (req->se->op.lookup_postfilter) {
1252 		char *name = (char *) inarg;
1253 		size_t namelen = strlen(name);
1254 
1255 		if (size != namelen + 1 + sizeof(struct fuse_entry_out)
1256 						+ sizeof(struct fuse_entry_bpf_out)) {
1257 			fuse_log(FUSE_LOG_ERR, "%s: Bad size", __func__);
1258 			fuse_reply_err(req, EIO);
1259 		} else {
1260 			struct fuse_entry_out *feo = (void *) (name + namelen + 1);
1261 			struct fuse_entry_bpf_out *febo = (char *) feo + sizeof(*feo);
1262 
1263 			req->se->op.lookup_postfilter(req, nodeid, error_in, name, feo,
1264 											febo);
1265 		}
1266 	} else
1267 		fuse_reply_err(req, ENOSYS);
1268 }
1269 
1270 static void do_forget(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1271 {
1272 	struct fuse_forget_in *arg = (struct fuse_forget_in *) inarg;
1273 
1274 	if (req->se->op.forget)
1275 		req->se->op.forget(req, nodeid, arg->nlookup);
1276 	else
1277 		fuse_reply_none(req);
1278 }
1279 
1280 static void do_batch_forget(fuse_req_t req, fuse_ino_t nodeid,
1281 			    const void *inarg)
1282 {
1283 	struct fuse_batch_forget_in *arg = (void *) inarg;
1284 	struct fuse_forget_one *param = (void *) PARAM(arg);
1285 	unsigned int i;
1286 
1287 	(void) nodeid;
1288 
1289 	if (req->se->op.forget_multi) {
1290 		req->se->op.forget_multi(req, arg->count,
1291 				     (struct fuse_forget_data *) param);
1292 	} else if (req->se->op.forget) {
1293 		for (i = 0; i < arg->count; i++) {
1294 			struct fuse_forget_one *forget = &param[i];
1295 			struct fuse_req *dummy_req;
1296 
1297 			dummy_req = fuse_ll_alloc_req(req->se);
1298 			if (dummy_req == NULL)
1299 				break;
1300 
1301 			dummy_req->unique = req->unique;
1302 			dummy_req->ctx = req->ctx;
1303 			dummy_req->ch = NULL;
1304 
1305 			req->se->op.forget(dummy_req, forget->nodeid,
1306 					  forget->nlookup);
1307 		}
1308 		fuse_reply_none(req);
1309 	} else {
1310 		fuse_reply_none(req);
1311 	}
1312 }
1313 
1314 static void do_getattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1315 {
1316 	struct fuse_file_info *fip = NULL;
1317 	struct fuse_file_info fi;
1318 
1319 	if (req->se->conn.proto_minor >= 9) {
1320 		struct fuse_getattr_in *arg = (struct fuse_getattr_in *) inarg;
1321 
1322 		if (arg->getattr_flags & FUSE_GETATTR_FH) {
1323 			memset(&fi, 0, sizeof(fi));
1324 			fi.fh = arg->fh;
1325 			fip = &fi;
1326 		}
1327 	}
1328 
1329 	if (req->se->op.getattr)
1330 		req->se->op.getattr(req, nodeid, fip);
1331 	else
1332 		fuse_reply_err(req, ENOSYS);
1333 }
1334 
1335 static void do_setattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1336 {
1337 	struct fuse_setattr_in *arg = (struct fuse_setattr_in *) inarg;
1338 
1339 	if (req->se->op.setattr) {
1340 		struct fuse_file_info *fi = NULL;
1341 		struct fuse_file_info fi_store;
1342 		struct stat stbuf;
1343 		memset(&stbuf, 0, sizeof(stbuf));
1344 		convert_attr(arg, &stbuf);
1345 		if (arg->valid & FATTR_FH) {
1346 			arg->valid &= ~FATTR_FH;
1347 			memset(&fi_store, 0, sizeof(fi_store));
1348 			fi = &fi_store;
1349 			fi->fh = arg->fh;
1350 		}
1351 		arg->valid &=
1352 			FUSE_SET_ATTR_MODE	|
1353 			FUSE_SET_ATTR_UID	|
1354 			FUSE_SET_ATTR_GID	|
1355 			FUSE_SET_ATTR_SIZE	|
1356 			FUSE_SET_ATTR_ATIME	|
1357 			FUSE_SET_ATTR_MTIME	|
1358 			FUSE_SET_ATTR_KILL_SUID |
1359 			FUSE_SET_ATTR_KILL_SGID |
1360 			FUSE_SET_ATTR_ATIME_NOW	|
1361 			FUSE_SET_ATTR_MTIME_NOW |
1362 			FUSE_SET_ATTR_CTIME;
1363 
1364 		req->se->op.setattr(req, nodeid, &stbuf, arg->valid, fi);
1365 	} else
1366 		fuse_reply_err(req, ENOSYS);
1367 }
1368 
1369 static void do_access(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1370 {
1371 	struct fuse_access_in *arg = (struct fuse_access_in *) inarg;
1372 
1373 	if (req->se->op.access)
1374 		req->se->op.access(req, nodeid, arg->mask);
1375 	else
1376 		fuse_reply_err(req, ENOSYS);
1377 }
1378 
1379 static void do_readlink(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1380 {
1381 	(void) inarg;
1382 
1383 	if (req->se->op.readlink)
1384 		req->se->op.readlink(req, nodeid);
1385 	else
1386 		fuse_reply_err(req, ENOSYS);
1387 }
1388 
1389 static void do_canonical_path(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1390 {
1391 	(void) inarg;
1392 
1393 	if (req->se->op.canonical_path)
1394 		req->se->op.canonical_path(req, nodeid);
1395 	else
1396 		fuse_reply_err(req, ENOSYS);
1397 }
1398 
1399 static void do_mknod(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1400 {
1401 	struct fuse_mknod_in *arg = (struct fuse_mknod_in *) inarg;
1402 	char *name = PARAM(arg);
1403 
1404 	if (req->se->conn.proto_minor >= 12)
1405 		req->ctx.umask = arg->umask;
1406 	else
1407 		name = (char *) inarg + FUSE_COMPAT_MKNOD_IN_SIZE;
1408 
1409 	if (req->se->op.mknod)
1410 		req->se->op.mknod(req, nodeid, name, arg->mode, arg->rdev);
1411 	else
1412 		fuse_reply_err(req, ENOSYS);
1413 }
1414 
1415 static void do_mkdir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1416 {
1417 	struct fuse_mkdir_in *arg = (struct fuse_mkdir_in *) inarg;
1418 
1419 	if (req->se->conn.proto_minor >= 12)
1420 		req->ctx.umask = arg->umask;
1421 
1422 	if (req->se->op.mkdir)
1423 		req->se->op.mkdir(req, nodeid, PARAM(arg), arg->mode);
1424 	else
1425 		fuse_reply_err(req, ENOSYS);
1426 }
1427 
1428 static void do_unlink(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1429 {
1430 	char *name = (char *) inarg;
1431 
1432 	if (req->se->op.unlink)
1433 		req->se->op.unlink(req, nodeid, name);
1434 	else
1435 		fuse_reply_err(req, ENOSYS);
1436 }
1437 
1438 static void do_rmdir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1439 {
1440 	char *name = (char *) inarg;
1441 
1442 	if (req->se->op.rmdir)
1443 		req->se->op.rmdir(req, nodeid, name);
1444 	else
1445 		fuse_reply_err(req, ENOSYS);
1446 }
1447 
1448 static void do_symlink(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1449 {
1450 	char *name = (char *) inarg;
1451 	char *linkname = ((char *) inarg) + strlen((char *) inarg) + 1;
1452 
1453 	if (req->se->op.symlink)
1454 		req->se->op.symlink(req, linkname, nodeid, name);
1455 	else
1456 		fuse_reply_err(req, ENOSYS);
1457 }
1458 
1459 static void do_rename(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1460 {
1461 	struct fuse_rename_in *arg = (struct fuse_rename_in *) inarg;
1462 	char *oldname = PARAM(arg);
1463 	char *newname = oldname + strlen(oldname) + 1;
1464 
1465 	if (req->se->op.rename)
1466 		req->se->op.rename(req, nodeid, oldname, arg->newdir, newname,
1467 				  0);
1468 	else
1469 		fuse_reply_err(req, ENOSYS);
1470 }
1471 
1472 static void do_rename2(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1473 {
1474 	struct fuse_rename2_in *arg = (struct fuse_rename2_in *) inarg;
1475 	char *oldname = PARAM(arg);
1476 	char *newname = oldname + strlen(oldname) + 1;
1477 
1478 	if (req->se->op.rename)
1479 		req->se->op.rename(req, nodeid, oldname, arg->newdir, newname,
1480 				  arg->flags);
1481 	else
1482 		fuse_reply_err(req, ENOSYS);
1483 }
1484 
1485 static void do_link(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1486 {
1487 	struct fuse_link_in *arg = (struct fuse_link_in *) inarg;
1488 
1489 	if (req->se->op.link)
1490 		req->se->op.link(req, arg->oldnodeid, nodeid, PARAM(arg));
1491 	else
1492 		fuse_reply_err(req, ENOSYS);
1493 }
1494 
1495 static void do_create(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1496 {
1497 	struct fuse_create_in *arg = (struct fuse_create_in *) inarg;
1498 
1499 	if (req->se->op.create) {
1500 		struct fuse_file_info fi;
1501 		char *name = PARAM(arg);
1502 
1503 		memset(&fi, 0, sizeof(fi));
1504 		fi.flags = arg->flags;
1505 
1506 		if (req->se->conn.proto_minor >= 12)
1507 			req->ctx.umask = arg->umask;
1508 		else
1509 			name = (char *) inarg + sizeof(struct fuse_open_in);
1510 
1511 		req->se->op.create(req, nodeid, name, arg->mode, &fi);
1512 	} else
1513 		fuse_reply_err(req, ENOSYS);
1514 }
1515 
1516 static void do_open(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1517 {
1518 	struct fuse_open_in *arg = (struct fuse_open_in *) inarg;
1519 	struct fuse_file_info fi;
1520 
1521 	memset(&fi, 0, sizeof(fi));
1522 	fi.flags = arg->flags;
1523 
1524 	if (req->se->op.open)
1525 		req->se->op.open(req, nodeid, &fi);
1526 	else if (req->se->conn.want & FUSE_CAP_NO_OPEN_SUPPORT)
1527 		fuse_reply_err(req, ENOSYS);
1528 	else
1529 		fuse_reply_open(req, &fi);
1530 }
1531 
1532 static void do_read(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1533 {
1534 	struct fuse_read_in *arg = (struct fuse_read_in *) inarg;
1535 
1536 	if (req->se->op.read) {
1537 		struct fuse_file_info fi;
1538 
1539 		memset(&fi, 0, sizeof(fi));
1540 		fi.fh = arg->fh;
1541 		if (req->se->conn.proto_minor >= 9) {
1542 			fi.lock_owner = arg->lock_owner;
1543 			fi.flags = arg->flags;
1544 		}
1545 		req->se->op.read(req, nodeid, arg->size, arg->offset, &fi);
1546 	} else
1547 		fuse_reply_err(req, ENOSYS);
1548 }
1549 
1550 static void do_write(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1551 {
1552 	struct fuse_write_in *arg = (struct fuse_write_in *) inarg;
1553 	struct fuse_file_info fi;
1554 	char *param;
1555 
1556 	memset(&fi, 0, sizeof(fi));
1557 	fi.fh = arg->fh;
1558 	fi.writepage = (arg->write_flags & FUSE_WRITE_CACHE) != 0;
1559 
1560 	if (req->se->conn.proto_minor < 9) {
1561 		param = ((char *) arg) + FUSE_COMPAT_WRITE_IN_SIZE;
1562 	} else {
1563 		fi.lock_owner = arg->lock_owner;
1564 		fi.flags = arg->flags;
1565 		param = PARAM(arg);
1566 	}
1567 
1568 	if (req->se->op.write)
1569 		req->se->op.write(req, nodeid, param, arg->size,
1570 				 arg->offset, &fi);
1571 	else
1572 		fuse_reply_err(req, ENOSYS);
1573 }
1574 
1575 static void do_write_buf(fuse_req_t req, fuse_ino_t nodeid, const void *inarg,
1576 			 const struct fuse_buf *ibuf)
1577 {
1578 	struct fuse_session *se = req->se;
1579 	struct fuse_bufvec bufv = {
1580 		.buf[0] = *ibuf,
1581 		.count = 1,
1582 	};
1583 	struct fuse_write_in *arg = (struct fuse_write_in *) inarg;
1584 	struct fuse_file_info fi;
1585 
1586 	memset(&fi, 0, sizeof(fi));
1587 	fi.fh = arg->fh;
1588 	fi.writepage = arg->write_flags & FUSE_WRITE_CACHE;
1589 
1590 	if (se->conn.proto_minor < 9) {
1591 		bufv.buf[0].mem = ((char *) arg) + FUSE_COMPAT_WRITE_IN_SIZE;
1592 		bufv.buf[0].size -= sizeof(struct fuse_in_header) +
1593 			FUSE_COMPAT_WRITE_IN_SIZE;
1594 		assert(!(bufv.buf[0].flags & FUSE_BUF_IS_FD));
1595 	} else {
1596 		fi.lock_owner = arg->lock_owner;
1597 		fi.flags = arg->flags;
1598 		if (!(bufv.buf[0].flags & FUSE_BUF_IS_FD))
1599 			bufv.buf[0].mem = PARAM(arg);
1600 
1601 		bufv.buf[0].size -= sizeof(struct fuse_in_header) +
1602 			sizeof(struct fuse_write_in);
1603 	}
1604 	if (bufv.buf[0].size < arg->size) {
1605 		fuse_log(FUSE_LOG_ERR, "fuse: do_write_buf: buffer size too small\n");
1606 		fuse_reply_err(req, EIO);
1607 		goto out;
1608 	}
1609 	bufv.buf[0].size = arg->size;
1610 
1611 	se->op.write_buf(req, nodeid, &bufv, arg->offset, &fi);
1612 
1613 out:
1614 	/* Need to reset the pipe if ->write_buf() didn't consume all data */
1615 	if ((ibuf->flags & FUSE_BUF_IS_FD) && bufv.idx < bufv.count)
1616 		fuse_ll_clear_pipe(se);
1617 }
1618 
1619 static void do_flush(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1620 {
1621 	struct fuse_flush_in *arg = (struct fuse_flush_in *) inarg;
1622 	struct fuse_file_info fi;
1623 
1624 	memset(&fi, 0, sizeof(fi));
1625 	fi.fh = arg->fh;
1626 	fi.flush = 1;
1627 	if (req->se->conn.proto_minor >= 7)
1628 		fi.lock_owner = arg->lock_owner;
1629 
1630 	if (req->se->op.flush)
1631 		req->se->op.flush(req, nodeid, &fi);
1632 	else
1633 		fuse_reply_err(req, ENOSYS);
1634 }
1635 
1636 static void do_release(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1637 {
1638 	struct fuse_release_in *arg = (struct fuse_release_in *) inarg;
1639 	struct fuse_file_info fi;
1640 
1641 	memset(&fi, 0, sizeof(fi));
1642 	fi.flags = arg->flags;
1643 	fi.fh = arg->fh;
1644 	if (req->se->conn.proto_minor >= 8) {
1645 		fi.flush = (arg->release_flags & FUSE_RELEASE_FLUSH) ? 1 : 0;
1646 		fi.lock_owner = arg->lock_owner;
1647 	}
1648 	if (arg->release_flags & FUSE_RELEASE_FLOCK_UNLOCK) {
1649 		fi.flock_release = 1;
1650 		fi.lock_owner = arg->lock_owner;
1651 	}
1652 
1653 	if (req->se->op.release)
1654 		req->se->op.release(req, nodeid, &fi);
1655 	else
1656 		fuse_reply_err(req, 0);
1657 }
1658 
1659 static void do_fsync(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1660 {
1661 	struct fuse_fsync_in *arg = (struct fuse_fsync_in *) inarg;
1662 	struct fuse_file_info fi;
1663 	int datasync = arg->fsync_flags & 1;
1664 
1665 	memset(&fi, 0, sizeof(fi));
1666 	fi.fh = arg->fh;
1667 
1668 	if (req->se->op.fsync)
1669 		req->se->op.fsync(req, nodeid, datasync, &fi);
1670 	else
1671 		fuse_reply_err(req, ENOSYS);
1672 }
1673 
1674 static void do_opendir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1675 {
1676 	struct fuse_open_in *arg = (struct fuse_open_in *) inarg;
1677 	struct fuse_file_info fi;
1678 
1679 	memset(&fi, 0, sizeof(fi));
1680 	fi.flags = arg->flags;
1681 
1682 	if (req->se->op.opendir)
1683 		req->se->op.opendir(req, nodeid, &fi);
1684 	else if (req->se->conn.want & FUSE_CAP_NO_OPENDIR_SUPPORT)
1685 		fuse_reply_err(req, ENOSYS);
1686 	else
1687 		fuse_reply_open(req, &fi);
1688 }
1689 
1690 static void do_readdir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1691 {
1692 	struct fuse_read_in *arg = (struct fuse_read_in *) inarg;
1693 	struct fuse_file_info fi;
1694 
1695 	memset(&fi, 0, sizeof(fi));
1696 	fi.fh = arg->fh;
1697 
1698 	if (req->se->op.readdir)
1699 		req->se->op.readdir(req, nodeid, arg->size, arg->offset, &fi);
1700 	else
1701 		fuse_reply_err(req, ENOSYS);
1702 }
1703 
1704 static void do_readdirplus(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1705 {
1706 	struct fuse_read_in *arg = (struct fuse_read_in *) inarg;
1707 	struct fuse_file_info fi;
1708 
1709 	memset(&fi, 0, sizeof(fi));
1710 	fi.fh = arg->fh;
1711 
1712 	if (req->se->op.readdirplus)
1713 		req->se->op.readdirplus(req, nodeid, arg->size, arg->offset, &fi);
1714 	else
1715 		fuse_reply_err(req, ENOSYS);
1716 }
1717 
1718 static void do_readdir_postfilter(fuse_req_t req, fuse_ino_t nodeid,
1719 									uint32_t error_in, const void *inarg,
1720 									size_t size) {
1721 	struct fuse_read_in *fri = (struct fuse_read_in *) inarg;
1722 	struct fuse_read_out *fro = (struct fuse_read_out *) (fri + 1);
1723 	struct fuse_dirent *dirents = (struct fuse_dirent *) (fro + 1);
1724 	struct fuse_file_info fi;
1725 
1726 	memset(&fi, 0, sizeof(fi));
1727 	fi.fh = fri->fh;
1728 
1729 	if (req->se->op.readdirpostfilter)
1730 		req->se->op.readdirpostfilter(req, nodeid, error_in, fri->offset,
1731 										fro->offset,
1732 										size - sizeof(*fri) - sizeof(*fro),
1733 										dirents, &fi);
1734 	else
1735 		fuse_reply_err(req, ENOSYS);
1736 }
1737 
1738 static void do_releasedir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1739 {
1740 	struct fuse_release_in *arg = (struct fuse_release_in *) inarg;
1741 	struct fuse_file_info fi;
1742 
1743 	memset(&fi, 0, sizeof(fi));
1744 	fi.flags = arg->flags;
1745 	fi.fh = arg->fh;
1746 
1747 	if (req->se->op.releasedir)
1748 		req->se->op.releasedir(req, nodeid, &fi);
1749 	else
1750 		fuse_reply_err(req, 0);
1751 }
1752 
1753 static void do_fsyncdir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1754 {
1755 	struct fuse_fsync_in *arg = (struct fuse_fsync_in *) inarg;
1756 	struct fuse_file_info fi;
1757 	int datasync = arg->fsync_flags & 1;
1758 
1759 	memset(&fi, 0, sizeof(fi));
1760 	fi.fh = arg->fh;
1761 
1762 	if (req->se->op.fsyncdir)
1763 		req->se->op.fsyncdir(req, nodeid, datasync, &fi);
1764 	else
1765 		fuse_reply_err(req, ENOSYS);
1766 }
1767 
1768 static void do_statfs(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1769 {
1770 	(void) nodeid;
1771 	(void) inarg;
1772 
1773 	if (req->se->op.statfs)
1774 		req->se->op.statfs(req, nodeid);
1775 	else {
1776 		struct statvfs buf = {
1777 			.f_namemax = 255,
1778 			.f_bsize = 512,
1779 		};
1780 		fuse_reply_statfs(req, &buf);
1781 	}
1782 }
1783 
1784 static void do_setxattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1785 {
1786 	struct fuse_session *se = req->se;
1787 	unsigned int xattr_ext = !!(se->conn.want & FUSE_CAP_SETXATTR_EXT);
1788 	struct fuse_setxattr_in *arg = (struct fuse_setxattr_in *) inarg;
1789 	char *name = xattr_ext ? PARAM(arg) :
1790 		     (char *)arg + FUSE_COMPAT_SETXATTR_IN_SIZE;
1791 	char *value = name + strlen(name) + 1;
1792 
1793 	/* XXX:The API should be extended to support extra_flags/setxattr_flags */
1794 	if (req->se->op.setxattr)
1795 		req->se->op.setxattr(req, nodeid, name, value, arg->size,
1796 				    arg->flags);
1797 	else
1798 		fuse_reply_err(req, ENOSYS);
1799 }
1800 
1801 static void do_getxattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1802 {
1803 	struct fuse_getxattr_in *arg = (struct fuse_getxattr_in *) inarg;
1804 
1805 	if (req->se->op.getxattr)
1806 		req->se->op.getxattr(req, nodeid, PARAM(arg), arg->size);
1807 	else
1808 		fuse_reply_err(req, ENOSYS);
1809 }
1810 
1811 static void do_listxattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1812 {
1813 	struct fuse_getxattr_in *arg = (struct fuse_getxattr_in *) inarg;
1814 
1815 	if (req->se->op.listxattr)
1816 		req->se->op.listxattr(req, nodeid, arg->size);
1817 	else
1818 		fuse_reply_err(req, ENOSYS);
1819 }
1820 
1821 static void do_removexattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1822 {
1823 	char *name = (char *) inarg;
1824 
1825 	if (req->se->op.removexattr)
1826 		req->se->op.removexattr(req, nodeid, name);
1827 	else
1828 		fuse_reply_err(req, ENOSYS);
1829 }
1830 
1831 static void convert_fuse_file_lock(struct fuse_file_lock *fl,
1832 				   struct flock *flock)
1833 {
1834 	memset(flock, 0, sizeof(struct flock));
1835 	flock->l_type = fl->type;
1836 	flock->l_whence = SEEK_SET;
1837 	flock->l_start = fl->start;
1838 	if (fl->end == OFFSET_MAX)
1839 		flock->l_len = 0;
1840 	else
1841 		flock->l_len = fl->end - fl->start + 1;
1842 	flock->l_pid = fl->pid;
1843 }
1844 
1845 static void do_getlk(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1846 {
1847 	struct fuse_lk_in *arg = (struct fuse_lk_in *) inarg;
1848 	struct fuse_file_info fi;
1849 	struct flock flock;
1850 
1851 	memset(&fi, 0, sizeof(fi));
1852 	fi.fh = arg->fh;
1853 	fi.lock_owner = arg->owner;
1854 
1855 	convert_fuse_file_lock(&arg->lk, &flock);
1856 	if (req->se->op.getlk)
1857 		req->se->op.getlk(req, nodeid, &fi, &flock);
1858 	else
1859 		fuse_reply_err(req, ENOSYS);
1860 }
1861 
1862 static void do_setlk_common(fuse_req_t req, fuse_ino_t nodeid,
1863 			    const void *inarg, int sleep)
1864 {
1865 	struct fuse_lk_in *arg = (struct fuse_lk_in *) inarg;
1866 	struct fuse_file_info fi;
1867 	struct flock flock;
1868 
1869 	memset(&fi, 0, sizeof(fi));
1870 	fi.fh = arg->fh;
1871 	fi.lock_owner = arg->owner;
1872 
1873 	if (arg->lk_flags & FUSE_LK_FLOCK) {
1874 		int op = 0;
1875 
1876 		switch (arg->lk.type) {
1877 		case F_RDLCK:
1878 			op = LOCK_SH;
1879 			break;
1880 		case F_WRLCK:
1881 			op = LOCK_EX;
1882 			break;
1883 		case F_UNLCK:
1884 			op = LOCK_UN;
1885 			break;
1886 		}
1887 		if (!sleep)
1888 			op |= LOCK_NB;
1889 
1890 		if (req->se->op.flock)
1891 			req->se->op.flock(req, nodeid, &fi, op);
1892 		else
1893 			fuse_reply_err(req, ENOSYS);
1894 	} else {
1895 		convert_fuse_file_lock(&arg->lk, &flock);
1896 		if (req->se->op.setlk)
1897 			req->se->op.setlk(req, nodeid, &fi, &flock, sleep);
1898 		else
1899 			fuse_reply_err(req, ENOSYS);
1900 	}
1901 }
1902 
1903 static void do_setlk(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1904 {
1905 	do_setlk_common(req, nodeid, inarg, 0);
1906 }
1907 
1908 static void do_setlkw(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1909 {
1910 	do_setlk_common(req, nodeid, inarg, 1);
1911 }
1912 
1913 static int find_interrupted(struct fuse_session *se, struct fuse_req *req)
1914 {
1915 	struct fuse_req *curr;
1916 
1917 	for (curr = se->list.next; curr != &se->list; curr = curr->next) {
1918 		if (curr->unique == req->u.i.unique) {
1919 			fuse_interrupt_func_t func;
1920 			void *data;
1921 
1922 			curr->ctr++;
1923 			pthread_mutex_unlock(&se->lock);
1924 
1925 			/* Ugh, ugly locking */
1926 			pthread_mutex_lock(&curr->lock);
1927 			pthread_mutex_lock(&se->lock);
1928 			curr->interrupted = 1;
1929 			func = curr->u.ni.func;
1930 			data = curr->u.ni.data;
1931 			pthread_mutex_unlock(&se->lock);
1932 			if (func)
1933 				func(curr, data);
1934 			pthread_mutex_unlock(&curr->lock);
1935 
1936 			pthread_mutex_lock(&se->lock);
1937 			curr->ctr--;
1938 			if (!curr->ctr) {
1939 				destroy_req(curr);
1940 			}
1941 
1942 			return 1;
1943 		}
1944 	}
1945 	for (curr = se->interrupts.next; curr != &se->interrupts;
1946 	     curr = curr->next) {
1947 		if (curr->u.i.unique == req->u.i.unique)
1948 			return 1;
1949 	}
1950 	return 0;
1951 }
1952 
1953 static void do_interrupt(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1954 {
1955 	struct fuse_interrupt_in *arg = (struct fuse_interrupt_in *) inarg;
1956 	struct fuse_session *se = req->se;
1957 
1958 	(void) nodeid;
1959 	if (se->debug)
1960 		fuse_log(FUSE_LOG_DEBUG, "INTERRUPT: %llu\n",
1961 			(unsigned long long) arg->unique);
1962 
1963 	req->u.i.unique = arg->unique;
1964 
1965 	pthread_mutex_lock(&se->lock);
1966 	if (find_interrupted(se, req)) {
1967 		fuse_chan_put(req->ch);
1968 		req->ch = NULL;
1969 		destroy_req(req);
1970 	} else
1971 		list_add_req(req, &se->interrupts);
1972 	pthread_mutex_unlock(&se->lock);
1973 }
1974 
1975 static struct fuse_req *check_interrupt(struct fuse_session *se,
1976 					struct fuse_req *req)
1977 {
1978 	struct fuse_req *curr;
1979 
1980 	for (curr = se->interrupts.next; curr != &se->interrupts;
1981 	     curr = curr->next) {
1982 		if (curr->u.i.unique == req->unique) {
1983 			req->interrupted = 1;
1984 			list_del_req(curr);
1985 			fuse_chan_put(curr->ch);
1986 			curr->ch = NULL;
1987 			destroy_req(curr);
1988 			return NULL;
1989 		}
1990 	}
1991 	curr = se->interrupts.next;
1992 	if (curr != &se->interrupts) {
1993 		list_del_req(curr);
1994 		list_init_req(curr);
1995 		return curr;
1996 	} else
1997 		return NULL;
1998 }
1999 
2000 static void do_bmap(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
2001 {
2002 	struct fuse_bmap_in *arg = (struct fuse_bmap_in *) inarg;
2003 
2004 	if (req->se->op.bmap)
2005 		req->se->op.bmap(req, nodeid, arg->blocksize, arg->block);
2006 	else
2007 		fuse_reply_err(req, ENOSYS);
2008 }
2009 
2010 static void do_ioctl(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
2011 {
2012 	struct fuse_ioctl_in *arg = (struct fuse_ioctl_in *) inarg;
2013 	unsigned int flags = arg->flags;
2014 	void *in_buf = arg->in_size ? PARAM(arg) : NULL;
2015 	struct fuse_file_info fi;
2016 
2017 	if (flags & FUSE_IOCTL_DIR &&
2018 	    !(req->se->conn.want & FUSE_CAP_IOCTL_DIR)) {
2019 		fuse_reply_err(req, ENOTTY);
2020 		return;
2021 	}
2022 
2023 	memset(&fi, 0, sizeof(fi));
2024 	fi.fh = arg->fh;
2025 
2026 	if (sizeof(void *) == 4 && req->se->conn.proto_minor >= 16 &&
2027 	    !(flags & FUSE_IOCTL_32BIT)) {
2028 		req->ioctl_64bit = 1;
2029 	}
2030 
2031 	if (req->se->op.ioctl)
2032 		req->se->op.ioctl(req, nodeid, arg->cmd,
2033 				 (void *)(uintptr_t)arg->arg, &fi, flags,
2034 				 in_buf, arg->in_size, arg->out_size);
2035 	else
2036 		fuse_reply_err(req, ENOSYS);
2037 }
2038 
2039 void fuse_pollhandle_destroy(struct fuse_pollhandle *ph)
2040 {
2041 	free(ph);
2042 }
2043 
2044 static void do_poll(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
2045 {
2046 	struct fuse_poll_in *arg = (struct fuse_poll_in *) inarg;
2047 	struct fuse_file_info fi;
2048 
2049 	memset(&fi, 0, sizeof(fi));
2050 	fi.fh = arg->fh;
2051 	fi.poll_events = arg->events;
2052 
2053 	if (req->se->op.poll) {
2054 		struct fuse_pollhandle *ph = NULL;
2055 
2056 		if (arg->flags & FUSE_POLL_SCHEDULE_NOTIFY) {
2057 			ph = malloc(sizeof(struct fuse_pollhandle));
2058 			if (ph == NULL) {
2059 				fuse_reply_err(req, ENOMEM);
2060 				return;
2061 			}
2062 			ph->kh = arg->kh;
2063 			ph->se = req->se;
2064 		}
2065 
2066 		req->se->op.poll(req, nodeid, &fi, ph);
2067 	} else {
2068 		fuse_reply_err(req, ENOSYS);
2069 	}
2070 }
2071 
2072 static void do_fallocate(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
2073 {
2074 	struct fuse_fallocate_in *arg = (struct fuse_fallocate_in *) inarg;
2075 	struct fuse_file_info fi;
2076 
2077 	memset(&fi, 0, sizeof(fi));
2078 	fi.fh = arg->fh;
2079 
2080 	if (req->se->op.fallocate)
2081 		req->se->op.fallocate(req, nodeid, arg->mode, arg->offset, arg->length, &fi);
2082 	else
2083 		fuse_reply_err(req, ENOSYS);
2084 }
2085 
2086 static void do_copy_file_range(fuse_req_t req, fuse_ino_t nodeid_in, const void *inarg)
2087 {
2088 	struct fuse_copy_file_range_in *arg = (struct fuse_copy_file_range_in *) inarg;
2089 	struct fuse_file_info fi_in, fi_out;
2090 
2091 	memset(&fi_in, 0, sizeof(fi_in));
2092 	fi_in.fh = arg->fh_in;
2093 
2094 	memset(&fi_out, 0, sizeof(fi_out));
2095 	fi_out.fh = arg->fh_out;
2096 
2097 
2098 	if (req->se->op.copy_file_range)
2099 		req->se->op.copy_file_range(req, nodeid_in, arg->off_in,
2100 					    &fi_in, arg->nodeid_out,
2101 					    arg->off_out, &fi_out, arg->len,
2102 					    arg->flags);
2103 	else
2104 		fuse_reply_err(req, ENOSYS);
2105 }
2106 
2107 static void do_lseek(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
2108 {
2109 	struct fuse_lseek_in *arg = (struct fuse_lseek_in *) inarg;
2110 	struct fuse_file_info fi;
2111 
2112 	memset(&fi, 0, sizeof(fi));
2113 	fi.fh = arg->fh;
2114 
2115 	if (req->se->op.lseek)
2116 		req->se->op.lseek(req, nodeid, arg->offset, arg->whence, &fi);
2117 	else
2118 		fuse_reply_err(req, ENOSYS);
2119 }
2120 
2121 /* Prevent bogus data races (bogus since "init" is called before
2122  * multi-threading becomes relevant */
2123 static __attribute__((no_sanitize("thread")))
2124 void do_init(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
2125 {
2126 	struct fuse_init_in *arg = (struct fuse_init_in *) inarg;
2127 	struct fuse_init_out outarg;
2128 	struct fuse_session *se = req->se;
2129 	size_t bufsize = se->bufsize;
2130 	size_t outargsize = sizeof(outarg);
2131 	uint64_t inargflags = 0;
2132 	uint64_t outargflags = 0;
2133 	(void) nodeid;
2134 	if (se->debug) {
2135 		fuse_log(FUSE_LOG_DEBUG, "INIT: %u.%u\n", arg->major, arg->minor);
2136 		if (arg->major == 7 && arg->minor >= 6) {
2137 			fuse_log(FUSE_LOG_DEBUG, "flags=0x%08x\n", arg->flags);
2138 			fuse_log(FUSE_LOG_DEBUG, "max_readahead=0x%08x\n",
2139 				arg->max_readahead);
2140 		}
2141 	}
2142 	se->conn.proto_major = arg->major;
2143 	se->conn.proto_minor = arg->minor;
2144 	se->conn.capable = 0;
2145 	se->conn.want = 0;
2146 
2147 	memset(&outarg, 0, sizeof(outarg));
2148 	outarg.major = FUSE_KERNEL_VERSION;
2149 	outarg.minor = FUSE_KERNEL_MINOR_VERSION;
2150 
2151 	if (arg->major < 7) {
2152 		fuse_log(FUSE_LOG_ERR, "fuse: unsupported protocol version: %u.%u\n",
2153 			arg->major, arg->minor);
2154 		fuse_reply_err(req, EPROTO);
2155 		return;
2156 	}
2157 
2158 	if (arg->major > 7) {
2159 		/* Wait for a second INIT request with a 7.X version */
2160 		send_reply_ok(req, &outarg, sizeof(outarg));
2161 		return;
2162 	}
2163 
2164 	if (arg->minor >= 6) {
2165 		if (arg->max_readahead < se->conn.max_readahead)
2166 			se->conn.max_readahead = arg->max_readahead;
2167 		inargflags = arg->flags;
2168 		if (inargflags & FUSE_INIT_EXT)
2169 			inargflags = inargflags | (uint64_t) arg->flags2 << 32;
2170 		if (inargflags & FUSE_ASYNC_READ)
2171 			se->conn.capable |= FUSE_CAP_ASYNC_READ;
2172 		if (inargflags & FUSE_POSIX_LOCKS)
2173 			se->conn.capable |= FUSE_CAP_POSIX_LOCKS;
2174 		if (inargflags & FUSE_ATOMIC_O_TRUNC)
2175 			se->conn.capable |= FUSE_CAP_ATOMIC_O_TRUNC;
2176 		if (inargflags & FUSE_EXPORT_SUPPORT)
2177 			se->conn.capable |= FUSE_CAP_EXPORT_SUPPORT;
2178 		if (inargflags & FUSE_DONT_MASK)
2179 			se->conn.capable |= FUSE_CAP_DONT_MASK;
2180 		if (inargflags & FUSE_FLOCK_LOCKS)
2181 			se->conn.capable |= FUSE_CAP_FLOCK_LOCKS;
2182 		if (inargflags & FUSE_AUTO_INVAL_DATA)
2183 			se->conn.capable |= FUSE_CAP_AUTO_INVAL_DATA;
2184 		if (inargflags & FUSE_DO_READDIRPLUS)
2185 			se->conn.capable |= FUSE_CAP_READDIRPLUS;
2186 		if (inargflags & FUSE_READDIRPLUS_AUTO)
2187 			se->conn.capable |= FUSE_CAP_READDIRPLUS_AUTO;
2188 		if (inargflags & FUSE_ASYNC_DIO)
2189 			se->conn.capable |= FUSE_CAP_ASYNC_DIO;
2190 		if (inargflags & FUSE_WRITEBACK_CACHE)
2191 			se->conn.capable |= FUSE_CAP_WRITEBACK_CACHE;
2192 		if (inargflags & FUSE_NO_OPEN_SUPPORT)
2193 			se->conn.capable |= FUSE_CAP_NO_OPEN_SUPPORT;
2194 		if (inargflags & FUSE_PARALLEL_DIROPS)
2195 			se->conn.capable |= FUSE_CAP_PARALLEL_DIROPS;
2196 		if (inargflags & FUSE_POSIX_ACL)
2197 			se->conn.capable |= FUSE_CAP_POSIX_ACL;
2198 		if (inargflags & FUSE_HANDLE_KILLPRIV)
2199 			se->conn.capable |= FUSE_CAP_HANDLE_KILLPRIV;
2200 		if (inargflags & FUSE_HANDLE_KILLPRIV_V2)
2201 			se->conn.capable |= FUSE_CAP_HANDLE_KILLPRIV_V2;
2202 		if (inargflags & FUSE_CACHE_SYMLINKS)
2203 			se->conn.capable |= FUSE_CAP_CACHE_SYMLINKS;
2204 		if (inargflags & FUSE_NO_OPENDIR_SUPPORT)
2205 			se->conn.capable |= FUSE_CAP_NO_OPENDIR_SUPPORT;
2206 		if (inargflags & FUSE_EXPLICIT_INVAL_DATA)
2207 			se->conn.capable |= FUSE_CAP_EXPLICIT_INVAL_DATA;
2208 		if (inargflags & FUSE_SETXATTR_EXT)
2209 			se->conn.capable |= FUSE_CAP_SETXATTR_EXT;
2210 		if (!(inargflags & FUSE_MAX_PAGES)) {
2211 			size_t max_bufsize =
2212 				FUSE_DEFAULT_MAX_PAGES_PER_REQ * getpagesize()
2213 				+ FUSE_BUFFER_HEADER_SIZE;
2214 			if (bufsize > max_bufsize) {
2215 				bufsize = max_bufsize;
2216 			}
2217 		}
2218 		if (inargflags & FUSE_DIRECT_IO_ALLOW_MMAP)
2219 			se->conn.capable |= FUSE_CAP_DIRECT_IO_ALLOW_MMAP;
2220 		if (arg->minor >= 38 || (inargflags & FUSE_HAS_EXPIRE_ONLY))
2221 			se->conn.capable |= FUSE_CAP_EXPIRE_ONLY;
2222 		if (inargflags & FUSE_PASSTHROUGH_UPSTREAM)
2223 			se->conn.capable |= FUSE_CAP_PASSTHROUGH_UPSTREAM;
2224 		if (inargflags & FUSE_INIT_EXT) {
2225 			if (inargflags & (1ULL << 63))
2226 				se->conn.capable |= FUSE_CAP_PASSTHROUGH;
2227 		} else {
2228 			if (inargflags & (1 << 31))
2229 				se->conn.capable |= FUSE_CAP_PASSTHROUGH;
2230 		}
2231 	} else {
2232 		se->conn.max_readahead = 0;
2233 	}
2234 
2235 	if (se->conn.proto_minor >= 14) {
2236 #ifdef HAVE_SPLICE
2237 #ifdef HAVE_VMSPLICE
2238 		if ((se->io == NULL) || (se->io->splice_send != NULL)) {
2239 			se->conn.capable |= FUSE_CAP_SPLICE_WRITE | FUSE_CAP_SPLICE_MOVE;
2240 		}
2241 #endif
2242 		if ((se->io == NULL) || (se->io->splice_receive != NULL)) {
2243 			se->conn.capable |= FUSE_CAP_SPLICE_READ;
2244 		}
2245 #endif
2246 	}
2247 	if (se->conn.proto_minor >= 18)
2248 		se->conn.capable |= FUSE_CAP_IOCTL_DIR;
2249 
2250 	/* Default settings for modern filesystems.
2251 	 *
2252 	 * Most of these capabilities were disabled by default in
2253 	 * libfuse2 for backwards compatibility reasons. In libfuse3,
2254 	 * we can finally enable them by default (as long as they're
2255 	 * supported by the kernel).
2256 	 */
2257 #define LL_SET_DEFAULT(cond, cap) \
2258 	if ((cond) && (se->conn.capable & (cap))) \
2259 		se->conn.want |= (cap)
2260 	LL_SET_DEFAULT(1, FUSE_CAP_ASYNC_READ);
2261 	LL_SET_DEFAULT(1, FUSE_CAP_AUTO_INVAL_DATA);
2262 	LL_SET_DEFAULT(1, FUSE_CAP_ASYNC_DIO);
2263 	LL_SET_DEFAULT(1, FUSE_CAP_IOCTL_DIR);
2264 	LL_SET_DEFAULT(1, FUSE_CAP_ATOMIC_O_TRUNC);
2265 	LL_SET_DEFAULT(se->op.write_buf, FUSE_CAP_SPLICE_READ);
2266 	LL_SET_DEFAULT(se->op.getlk && se->op.setlk,
2267 		       FUSE_CAP_POSIX_LOCKS);
2268 	LL_SET_DEFAULT(se->op.flock, FUSE_CAP_FLOCK_LOCKS);
2269 	LL_SET_DEFAULT(se->op.readdirplus, FUSE_CAP_READDIRPLUS);
2270 	LL_SET_DEFAULT(se->op.readdirplus && se->op.readdir,
2271 		       FUSE_CAP_READDIRPLUS_AUTO);
2272 
2273 	/* This could safely become default, but libfuse needs an API extension
2274 	 * to support it
2275 	 * LL_SET_DEFAULT(1, FUSE_CAP_SETXATTR_EXT);
2276 	 */
2277 
2278 	se->conn.time_gran = 1;
2279 
2280 	if (bufsize < FUSE_MIN_READ_BUFFER) {
2281 		fuse_log(FUSE_LOG_ERR, "fuse: warning: buffer size too small: %zu\n",
2282 			bufsize);
2283 		bufsize = FUSE_MIN_READ_BUFFER;
2284 	}
2285 	se->bufsize = bufsize;
2286 
2287 	se->got_init = 1;
2288 	if (se->op.init)
2289 		se->op.init(se->userdata, &se->conn);
2290 
2291 	if (se->conn.max_write > bufsize - FUSE_BUFFER_HEADER_SIZE)
2292 		se->conn.max_write = bufsize - FUSE_BUFFER_HEADER_SIZE;
2293 
2294 	if (se->conn.want & (~se->conn.capable)) {
2295 		fuse_log(FUSE_LOG_ERR, "fuse: error: filesystem requested capabilities "
2296 			"0x%x that are not supported by kernel, aborting.\n",
2297 			se->conn.want & (~se->conn.capable));
2298 		fuse_reply_err(req, EPROTO);
2299 		se->error = -EPROTO;
2300 		fuse_session_exit(se);
2301 		return;
2302 	}
2303 
2304 	unsigned max_read_mo = get_max_read(se->mo);
2305 	if (se->conn.max_read != max_read_mo) {
2306 		fuse_log(FUSE_LOG_ERR, "fuse: error: init() and fuse_session_new() "
2307 			"requested different maximum read size (%u vs %u)\n",
2308 			se->conn.max_read, max_read_mo);
2309 		fuse_reply_err(req, EPROTO);
2310 		se->error = -EPROTO;
2311 		fuse_session_exit(se);
2312 		return;
2313 	}
2314 
2315 	if (se->conn.max_write < bufsize - FUSE_BUFFER_HEADER_SIZE) {
2316 		se->bufsize = se->conn.max_write + FUSE_BUFFER_HEADER_SIZE;
2317 	}
2318 	if (arg->flags & FUSE_MAX_PAGES) {
2319 		outarg.flags |= FUSE_MAX_PAGES;
2320 		outarg.max_pages = (se->conn.max_write - 1) / getpagesize() + 1;
2321 	}
2322 	outargflags = outarg.flags;
2323 	/* Always enable big writes, this is superseded
2324 	   by the max_write option */
2325 	outargflags |= FUSE_BIG_WRITES;
2326 
2327 	if (se->conn.want & FUSE_CAP_ASYNC_READ)
2328 		outargflags |= FUSE_ASYNC_READ;
2329 	if (se->conn.want & FUSE_CAP_POSIX_LOCKS)
2330 		outargflags |= FUSE_POSIX_LOCKS;
2331 	if (se->conn.want & FUSE_CAP_ATOMIC_O_TRUNC)
2332 		outargflags |= FUSE_ATOMIC_O_TRUNC;
2333 	if (se->conn.want & FUSE_CAP_EXPORT_SUPPORT)
2334 		outargflags |= FUSE_EXPORT_SUPPORT;
2335 	if (se->conn.want & FUSE_CAP_DONT_MASK)
2336 		outargflags |= FUSE_DONT_MASK;
2337 	if (se->conn.want & FUSE_CAP_FLOCK_LOCKS)
2338 		outargflags |= FUSE_FLOCK_LOCKS;
2339 	if (se->conn.want & FUSE_CAP_AUTO_INVAL_DATA)
2340 		outargflags |= FUSE_AUTO_INVAL_DATA;
2341 	if (se->conn.want & FUSE_CAP_READDIRPLUS)
2342 		outargflags |= FUSE_DO_READDIRPLUS;
2343 	if (se->conn.want & FUSE_CAP_READDIRPLUS_AUTO)
2344 		outargflags |= FUSE_READDIRPLUS_AUTO;
2345 	if (se->conn.want & FUSE_CAP_ASYNC_DIO)
2346 		outargflags |= FUSE_ASYNC_DIO;
2347 	if (se->conn.want & FUSE_CAP_WRITEBACK_CACHE)
2348 		outargflags |= FUSE_WRITEBACK_CACHE;
2349 	if (se->conn.want & FUSE_CAP_PARALLEL_DIROPS)
2350 		outargflags |= FUSE_PARALLEL_DIROPS;
2351 	if (se->conn.want & FUSE_CAP_POSIX_ACL)
2352 		outargflags |= FUSE_POSIX_ACL;
2353 	if (se->conn.want & FUSE_CAP_HANDLE_KILLPRIV)
2354 		outargflags |= FUSE_HANDLE_KILLPRIV;
2355 	if (se->conn.want & FUSE_CAP_HANDLE_KILLPRIV_V2)
2356 		outargflags |= FUSE_HANDLE_KILLPRIV_V2;
2357 	if (se->conn.want & FUSE_CAP_CACHE_SYMLINKS)
2358 		outargflags |= FUSE_CACHE_SYMLINKS;
2359 	if (se->conn.want & FUSE_CAP_EXPLICIT_INVAL_DATA)
2360 		outargflags |= FUSE_EXPLICIT_INVAL_DATA;
2361 	if (se->conn.want & FUSE_CAP_SETXATTR_EXT)
2362 		outargflags |= FUSE_SETXATTR_EXT;
2363 	if (se->conn.want & FUSE_CAP_DIRECT_IO_ALLOW_MMAP)
2364 		outargflags |= FUSE_DIRECT_IO_ALLOW_MMAP;
2365 	if (se->conn.want & FUSE_CAP_PASSTHROUGH_UPSTREAM) {
2366 		outargflags |= FUSE_PASSTHROUGH_UPSTREAM;
2367 		/*
2368 		 * outarg.max_stack_depth includes the fuse stack layer,
2369 		 * so it is one more than max_backing_stack_depth.
2370 		 */
2371 		outarg.max_stack_depth = se->conn.max_backing_stack_depth + 1;
2372 	}
2373 	if (se->conn.want & FUSE_CAP_PASSTHROUGH) {
2374 		if (inargflags & FUSE_INIT_EXT)
2375 			outargflags |= (1ULL << 63);
2376 		else
2377 			outargflags |= (1 << 31);
2378 	}
2379 	if (inargflags & FUSE_INIT_EXT) {
2380 		outargflags |= FUSE_INIT_EXT;
2381 		outarg.flags2 = outargflags >> 32;
2382 	}
2383 
2384 	outarg.flags = outargflags;
2385 
2386 	outarg.max_readahead = se->conn.max_readahead;
2387 	outarg.max_write = se->conn.max_write;
2388 	if (se->conn.proto_minor >= 13) {
2389 		if (se->conn.max_background >= (1 << 16))
2390 			se->conn.max_background = (1 << 16) - 1;
2391 		if (se->conn.congestion_threshold > se->conn.max_background)
2392 			se->conn.congestion_threshold = se->conn.max_background;
2393 		if (!se->conn.congestion_threshold) {
2394 			se->conn.congestion_threshold =
2395 				se->conn.max_background * 3 / 4;
2396 		}
2397 
2398 		outarg.max_background = se->conn.max_background;
2399 		outarg.congestion_threshold = se->conn.congestion_threshold;
2400 	}
2401 	if (se->conn.proto_minor >= 23)
2402 		outarg.time_gran = se->conn.time_gran;
2403 
2404 	if (se->debug) {
2405 		fuse_log(FUSE_LOG_DEBUG, "   INIT: %u.%u\n", outarg.major, outarg.minor);
2406 		fuse_log(FUSE_LOG_DEBUG, "   flags=0x%08x\n", outarg.flags);
2407 		fuse_log(FUSE_LOG_DEBUG, "   max_readahead=0x%08x\n",
2408 			outarg.max_readahead);
2409 		fuse_log(FUSE_LOG_DEBUG, "   max_write=0x%08x\n", outarg.max_write);
2410 		fuse_log(FUSE_LOG_DEBUG, "   max_background=%i\n",
2411 			outarg.max_background);
2412 		fuse_log(FUSE_LOG_DEBUG, "   congestion_threshold=%i\n",
2413 			outarg.congestion_threshold);
2414 		fuse_log(FUSE_LOG_DEBUG, "   time_gran=%u\n",
2415 			outarg.time_gran);
2416 		if (se->conn.want & FUSE_CAP_PASSTHROUGH)
2417 			fuse_log(FUSE_LOG_DEBUG, "   max_stack_depth=%u\n",
2418 				outarg.max_stack_depth);
2419 	}
2420 	if (arg->minor < 5)
2421 		outargsize = FUSE_COMPAT_INIT_OUT_SIZE;
2422 	else if (arg->minor < 23)
2423 		outargsize = FUSE_COMPAT_22_INIT_OUT_SIZE;
2424 
2425 	send_reply_ok(req, &outarg, outargsize);
2426 }
2427 
2428 static void do_destroy(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
2429 {
2430 	struct fuse_session *se = req->se;
2431 
2432 	(void) nodeid;
2433 	(void) inarg;
2434 
2435 	se->got_destroy = 1;
2436 	se->got_init = 0;
2437 	if (se->op.destroy)
2438 		se->op.destroy(se->userdata);
2439 
2440 	send_reply_ok(req, NULL, 0);
2441 }
2442 
2443 static void list_del_nreq(struct fuse_notify_req *nreq)
2444 {
2445 	struct fuse_notify_req *prev = nreq->prev;
2446 	struct fuse_notify_req *next = nreq->next;
2447 	prev->next = next;
2448 	next->prev = prev;
2449 }
2450 
2451 static void list_add_nreq(struct fuse_notify_req *nreq,
2452 			  struct fuse_notify_req *next)
2453 {
2454 	struct fuse_notify_req *prev = next->prev;
2455 	nreq->next = next;
2456 	nreq->prev = prev;
2457 	prev->next = nreq;
2458 	next->prev = nreq;
2459 }
2460 
2461 static void list_init_nreq(struct fuse_notify_req *nreq)
2462 {
2463 	nreq->next = nreq;
2464 	nreq->prev = nreq;
2465 }
2466 
2467 static void do_notify_reply(fuse_req_t req, fuse_ino_t nodeid,
2468 			    const void *inarg, const struct fuse_buf *buf)
2469 {
2470 	struct fuse_session *se = req->se;
2471 	struct fuse_notify_req *nreq;
2472 	struct fuse_notify_req *head;
2473 
2474 	pthread_mutex_lock(&se->lock);
2475 	head = &se->notify_list;
2476 	for (nreq = head->next; nreq != head; nreq = nreq->next) {
2477 		if (nreq->unique == req->unique) {
2478 			list_del_nreq(nreq);
2479 			break;
2480 		}
2481 	}
2482 	pthread_mutex_unlock(&se->lock);
2483 
2484 	if (nreq != head)
2485 		nreq->reply(nreq, req, nodeid, inarg, buf);
2486 }
2487 
2488 static int send_notify_iov(struct fuse_session *se, int notify_code,
2489 			   struct iovec *iov, int count)
2490 {
2491 	struct fuse_out_header out;
2492 
2493 	if (!se->got_init)
2494 		return -ENOTCONN;
2495 
2496 	out.unique = 0;
2497 	out.error = notify_code;
2498 	iov[0].iov_base = &out;
2499 	iov[0].iov_len = sizeof(struct fuse_out_header);
2500 
2501 	return fuse_send_msg(se, NULL, iov, count);
2502 }
2503 
2504 int fuse_lowlevel_notify_poll(struct fuse_pollhandle *ph)
2505 {
2506 	if (ph != NULL) {
2507 		struct fuse_notify_poll_wakeup_out outarg;
2508 		struct iovec iov[2];
2509 
2510 		outarg.kh = ph->kh;
2511 
2512 		iov[1].iov_base = &outarg;
2513 		iov[1].iov_len = sizeof(outarg);
2514 
2515 		return send_notify_iov(ph->se, FUSE_NOTIFY_POLL, iov, 2);
2516 	} else {
2517 		return 0;
2518 	}
2519 }
2520 
2521 int fuse_lowlevel_notify_inval_inode(struct fuse_session *se, fuse_ino_t ino,
2522 				     off_t off, off_t len)
2523 {
2524 	struct fuse_notify_inval_inode_out outarg;
2525 	struct iovec iov[2];
2526 
2527 	if (!se)
2528 		return -EINVAL;
2529 
2530 	if (se->conn.proto_minor < 12)
2531 		return -ENOSYS;
2532 
2533 	outarg.ino = ino;
2534 	outarg.off = off;
2535 	outarg.len = len;
2536 
2537 	iov[1].iov_base = &outarg;
2538 	iov[1].iov_len = sizeof(outarg);
2539 
2540 	return send_notify_iov(se, FUSE_NOTIFY_INVAL_INODE, iov, 2);
2541 }
2542 
2543 /**
2544  * Notify parent attributes and the dentry matching parent/name
2545  *
2546  * Underlying base function for fuse_lowlevel_notify_inval_entry() and
2547  * fuse_lowlevel_notify_expire_entry().
2548  *
2549  * @warning
2550  * Only checks if fuse_lowlevel_notify_inval_entry() is supported by
2551  * the kernel. All other flags will fall back to
2552  * fuse_lowlevel_notify_inval_entry() if not supported!
2553  * DO THE PROPER CHECKS IN THE DERIVED FUNCTION!
2554  *
2555  * @param se the session object
2556  * @param parent inode number
2557  * @param name file name
2558  * @param namelen strlen() of file name
2559  * @param flags flags to control if the entry should be expired or invalidated
2560  * @return zero for success, -errno for failure
2561 */
2562 static int fuse_lowlevel_notify_entry(struct fuse_session *se, fuse_ino_t parent,
2563 							const char *name, size_t namelen,
2564 							enum fuse_notify_entry_flags flags)
2565 {
2566 	struct fuse_notify_inval_entry_out outarg;
2567 	struct iovec iov[3];
2568 
2569 	if (!se)
2570 		return -EINVAL;
2571 
2572 	if (se->conn.proto_minor < 12)
2573 		return -ENOSYS;
2574 
2575 	outarg.parent = parent;
2576 	outarg.namelen = namelen;
2577 	outarg.flags = 0;
2578 	if (flags & FUSE_LL_EXPIRE_ONLY)
2579 		outarg.flags |= FUSE_EXPIRE_ONLY;
2580 
2581 	iov[1].iov_base = &outarg;
2582 	iov[1].iov_len = sizeof(outarg);
2583 	iov[2].iov_base = (void *)name;
2584 	iov[2].iov_len = namelen + 1;
2585 
2586 	return send_notify_iov(se, FUSE_NOTIFY_INVAL_ENTRY, iov, 3);
2587 }
2588 
2589 int fuse_lowlevel_notify_inval_entry(struct fuse_session *se, fuse_ino_t parent,
2590 						 const char *name, size_t namelen)
2591 {
2592 	return fuse_lowlevel_notify_entry(se, parent, name, namelen, FUSE_LL_INVALIDATE);
2593 }
2594 
2595 int fuse_lowlevel_notify_expire_entry(struct fuse_session *se, fuse_ino_t parent,
2596 							const char *name, size_t namelen)
2597 {
2598 	if (!se)
2599 		return -EINVAL;
2600 
2601 	if (!(se->conn.capable & FUSE_CAP_EXPIRE_ONLY))
2602 		return -ENOSYS;
2603 
2604 	return fuse_lowlevel_notify_entry(se, parent, name, namelen, FUSE_LL_EXPIRE_ONLY);
2605 }
2606 
2607 
2608 int fuse_lowlevel_notify_delete(struct fuse_session *se,
2609 				fuse_ino_t parent, fuse_ino_t child,
2610 				const char *name, size_t namelen)
2611 {
2612 	struct fuse_notify_delete_out outarg;
2613 	struct iovec iov[3];
2614 
2615 	if (!se)
2616 		return -EINVAL;
2617 
2618 	if (se->conn.proto_minor < 18)
2619 		return -ENOSYS;
2620 
2621 	outarg.parent = parent;
2622 	outarg.child = child;
2623 	outarg.namelen = namelen;
2624 	outarg.padding = 0;
2625 
2626 	iov[1].iov_base = &outarg;
2627 	iov[1].iov_len = sizeof(outarg);
2628 	iov[2].iov_base = (void *)name;
2629 	iov[2].iov_len = namelen + 1;
2630 
2631 	return send_notify_iov(se, FUSE_NOTIFY_DELETE, iov, 3);
2632 }
2633 
2634 int fuse_lowlevel_notify_store(struct fuse_session *se, fuse_ino_t ino,
2635 			       off_t offset, struct fuse_bufvec *bufv,
2636 			       enum fuse_buf_copy_flags flags)
2637 {
2638 	struct fuse_out_header out;
2639 	struct fuse_notify_store_out outarg;
2640 	struct iovec iov[3];
2641 	size_t size = fuse_buf_size(bufv);
2642 	int res;
2643 
2644 	if (!se)
2645 		return -EINVAL;
2646 
2647 	if (se->conn.proto_minor < 15)
2648 		return -ENOSYS;
2649 
2650 	out.unique = 0;
2651 	out.error = FUSE_NOTIFY_STORE;
2652 
2653 	outarg.nodeid = ino;
2654 	outarg.offset = offset;
2655 	outarg.size = size;
2656 	outarg.padding = 0;
2657 
2658 	iov[0].iov_base = &out;
2659 	iov[0].iov_len = sizeof(out);
2660 	iov[1].iov_base = &outarg;
2661 	iov[1].iov_len = sizeof(outarg);
2662 
2663 	res = fuse_send_data_iov(se, NULL, iov, 2, bufv, flags);
2664 	if (res > 0)
2665 		res = -res;
2666 
2667 	return res;
2668 }
2669 
2670 struct fuse_retrieve_req {
2671 	struct fuse_notify_req nreq;
2672 	void *cookie;
2673 };
2674 
2675 static void fuse_ll_retrieve_reply(struct fuse_notify_req *nreq,
2676 				   fuse_req_t req, fuse_ino_t ino,
2677 				   const void *inarg,
2678 				   const struct fuse_buf *ibuf)
2679 {
2680 	struct fuse_session *se = req->se;
2681 	struct fuse_retrieve_req *rreq =
2682 		container_of(nreq, struct fuse_retrieve_req, nreq);
2683 	const struct fuse_notify_retrieve_in *arg = inarg;
2684 	struct fuse_bufvec bufv = {
2685 		.buf[0] = *ibuf,
2686 		.count = 1,
2687 	};
2688 
2689 	if (!(bufv.buf[0].flags & FUSE_BUF_IS_FD))
2690 		bufv.buf[0].mem = PARAM(arg);
2691 
2692 	bufv.buf[0].size -= sizeof(struct fuse_in_header) +
2693 		sizeof(struct fuse_notify_retrieve_in);
2694 
2695 	if (bufv.buf[0].size < arg->size) {
2696 		fuse_log(FUSE_LOG_ERR, "fuse: retrieve reply: buffer size too small\n");
2697 		fuse_reply_none(req);
2698 		goto out;
2699 	}
2700 	bufv.buf[0].size = arg->size;
2701 
2702 	if (se->op.retrieve_reply) {
2703 		se->op.retrieve_reply(req, rreq->cookie, ino,
2704 					  arg->offset, &bufv);
2705 	} else {
2706 		fuse_reply_none(req);
2707 	}
2708 out:
2709 	free(rreq);
2710 	if ((ibuf->flags & FUSE_BUF_IS_FD) && bufv.idx < bufv.count)
2711 		fuse_ll_clear_pipe(se);
2712 }
2713 
2714 int fuse_lowlevel_notify_retrieve(struct fuse_session *se, fuse_ino_t ino,
2715 				  size_t size, off_t offset, void *cookie)
2716 {
2717 	struct fuse_notify_retrieve_out outarg;
2718 	struct iovec iov[2];
2719 	struct fuse_retrieve_req *rreq;
2720 	int err;
2721 
2722 	if (!se)
2723 		return -EINVAL;
2724 
2725 	if (se->conn.proto_minor < 15)
2726 		return -ENOSYS;
2727 
2728 	rreq = malloc(sizeof(*rreq));
2729 	if (rreq == NULL)
2730 		return -ENOMEM;
2731 
2732 	pthread_mutex_lock(&se->lock);
2733 	rreq->cookie = cookie;
2734 	rreq->nreq.unique = se->notify_ctr++;
2735 	rreq->nreq.reply = fuse_ll_retrieve_reply;
2736 	list_add_nreq(&rreq->nreq, &se->notify_list);
2737 	pthread_mutex_unlock(&se->lock);
2738 
2739 	outarg.notify_unique = rreq->nreq.unique;
2740 	outarg.nodeid = ino;
2741 	outarg.offset = offset;
2742 	outarg.size = size;
2743 	outarg.padding = 0;
2744 
2745 	iov[1].iov_base = &outarg;
2746 	iov[1].iov_len = sizeof(outarg);
2747 
2748 	err = send_notify_iov(se, FUSE_NOTIFY_RETRIEVE, iov, 2);
2749 	if (err) {
2750 		pthread_mutex_lock(&se->lock);
2751 		list_del_nreq(&rreq->nreq);
2752 		pthread_mutex_unlock(&se->lock);
2753 		free(rreq);
2754 	}
2755 
2756 	return err;
2757 }
2758 
2759 void *fuse_req_userdata(fuse_req_t req)
2760 {
2761 	return req->se->userdata;
2762 }
2763 
2764 const struct fuse_ctx *fuse_req_ctx(fuse_req_t req)
2765 {
2766 	return &req->ctx;
2767 }
2768 
2769 void fuse_req_interrupt_func(fuse_req_t req, fuse_interrupt_func_t func,
2770 			     void *data)
2771 {
2772 	pthread_mutex_lock(&req->lock);
2773 	pthread_mutex_lock(&req->se->lock);
2774 	req->u.ni.func = func;
2775 	req->u.ni.data = data;
2776 	pthread_mutex_unlock(&req->se->lock);
2777 	if (req->interrupted && func)
2778 		func(req, data);
2779 	pthread_mutex_unlock(&req->lock);
2780 }
2781 
2782 int fuse_req_interrupted(fuse_req_t req)
2783 {
2784 	int interrupted;
2785 
2786 	pthread_mutex_lock(&req->se->lock);
2787 	interrupted = req->interrupted;
2788 	pthread_mutex_unlock(&req->se->lock);
2789 
2790 	return interrupted;
2791 }
2792 
2793 static struct {
2794 	void (*func)(fuse_req_t, fuse_ino_t, const void *);
2795 	const char *name;
2796 } fuse_ll_ops[] = {
2797 	[FUSE_LOOKUP]	   = { do_lookup,      "LOOKUP"	     },
2798 	[FUSE_FORGET]	   = { do_forget,      "FORGET"	     },
2799 	[FUSE_GETATTR]	   = { do_getattr,     "GETATTR"     },
2800 	[FUSE_SETATTR]	   = { do_setattr,     "SETATTR"     },
2801 	[FUSE_READLINK]	   = { do_readlink,    "READLINK"    },
2802 	[FUSE_CANONICAL_PATH] = { do_canonical_path, "CANONICAL_PATH" },
2803 	[FUSE_SYMLINK]	   = { do_symlink,     "SYMLINK"     },
2804 	[FUSE_MKNOD]	   = { do_mknod,       "MKNOD"	     },
2805 	[FUSE_MKDIR]	   = { do_mkdir,       "MKDIR"	     },
2806 	[FUSE_UNLINK]	   = { do_unlink,      "UNLINK"	     },
2807 	[FUSE_RMDIR]	   = { do_rmdir,       "RMDIR"	     },
2808 	[FUSE_RENAME]	   = { do_rename,      "RENAME"	     },
2809 	[FUSE_LINK]	   = { do_link,	       "LINK"	     },
2810 	[FUSE_OPEN]	   = { do_open,	       "OPEN"	     },
2811 	[FUSE_READ]	   = { do_read,	       "READ"	     },
2812 	[FUSE_WRITE]	   = { do_write,       "WRITE"	     },
2813 	[FUSE_STATFS]	   = { do_statfs,      "STATFS"	     },
2814 	[FUSE_RELEASE]	   = { do_release,     "RELEASE"     },
2815 	[FUSE_FSYNC]	   = { do_fsync,       "FSYNC"	     },
2816 	[FUSE_SETXATTR]	   = { do_setxattr,    "SETXATTR"    },
2817 	[FUSE_GETXATTR]	   = { do_getxattr,    "GETXATTR"    },
2818 	[FUSE_LISTXATTR]   = { do_listxattr,   "LISTXATTR"   },
2819 	[FUSE_REMOVEXATTR] = { do_removexattr, "REMOVEXATTR" },
2820 	[FUSE_FLUSH]	   = { do_flush,       "FLUSH"	     },
2821 	[FUSE_INIT]	   = { do_init,	       "INIT"	     },
2822 	[FUSE_OPENDIR]	   = { do_opendir,     "OPENDIR"     },
2823 	[FUSE_READDIR]	   = { do_readdir,     "READDIR"     },
2824 	[FUSE_RELEASEDIR]  = { do_releasedir,  "RELEASEDIR"  },
2825 	[FUSE_FSYNCDIR]	   = { do_fsyncdir,    "FSYNCDIR"    },
2826 	[FUSE_GETLK]	   = { do_getlk,       "GETLK"	     },
2827 	[FUSE_SETLK]	   = { do_setlk,       "SETLK"	     },
2828 	[FUSE_SETLKW]	   = { do_setlkw,      "SETLKW"	     },
2829 	[FUSE_ACCESS]	   = { do_access,      "ACCESS"	     },
2830 	[FUSE_CREATE]	   = { do_create,      "CREATE"	     },
2831 	[FUSE_INTERRUPT]   = { do_interrupt,   "INTERRUPT"   },
2832 	[FUSE_BMAP]	   = { do_bmap,	       "BMAP"	     },
2833 	[FUSE_IOCTL]	   = { do_ioctl,       "IOCTL"	     },
2834 	[FUSE_POLL]	   = { do_poll,        "POLL"	     },
2835 	[FUSE_FALLOCATE]   = { do_fallocate,   "FALLOCATE"   },
2836 	[FUSE_DESTROY]	   = { do_destroy,     "DESTROY"     },
2837 	[FUSE_NOTIFY_REPLY] = { (void *) 1,    "NOTIFY_REPLY" },
2838 	[FUSE_BATCH_FORGET] = { do_batch_forget, "BATCH_FORGET" },
2839 	[FUSE_READDIRPLUS] = { do_readdirplus,	"READDIRPLUS"},
2840 	[FUSE_RENAME2]     = { do_rename2,      "RENAME2"    },
2841 	[FUSE_COPY_FILE_RANGE] = { do_copy_file_range, "COPY_FILE_RANGE" },
2842 	[FUSE_LSEEK]	   = { do_lseek,       "LSEEK"	     },
2843 	[CUSE_INIT]	   = { cuse_lowlevel_init, "CUSE_INIT"   },
2844 };
2845 
2846 static struct {
2847 	void (*func)( fuse_req_t, fuse_ino_t, const void *);
2848 	const char *name;
2849 } fuse_ll_prefilter_ops[] = {};
2850 
2851 static struct {
2852 	void (*func)( fuse_req_t, fuse_ino_t, uint32_t, const void *, size_t size);
2853 } fuse_ll_postfilter_ops[] = {
2854 		[FUSE_LOOKUP] = {do_lookup_postfilter},
2855 		[FUSE_READDIR] = {do_readdir_postfilter},
2856 };
2857 
2858 #define FUSE_MAXOP (sizeof(fuse_ll_ops) / sizeof(fuse_ll_ops[0]))
2859 
2860 static const char *opname(enum fuse_opcode opcode)
2861 {
2862 	if (opcode >= FUSE_MAXOP || !fuse_ll_ops[opcode].name)
2863 		return "???";
2864 	else
2865 		return fuse_ll_ops[opcode].name;
2866 }
2867 
2868 static const char *opfiltername(int filter)
2869 {
2870 	switch (filter) {
2871 	case 0:
2872 		return "NONE";
2873 	case FUSE_PREFILTER:
2874 		return "FUSE_PREFILTER";
2875 	case FUSE_POSTFILTER:
2876 		return "FUSE_POSTFILTER";
2877 	case FUSE_PREFILTER | FUSE_POSTFILTER:
2878 		return "FUSE_PREFILTER | FUSE_POSTFILTER";
2879 	default:
2880 		return "???";
2881 	}
2882 }
2883 
2884 static int fuse_ll_copy_from_pipe(struct fuse_bufvec *dst,
2885 				  struct fuse_bufvec *src)
2886 {
2887 	ssize_t res = fuse_buf_copy(dst, src, 0);
2888 	if (res < 0) {
2889 		fuse_log(FUSE_LOG_ERR, "fuse: copy from pipe: %s\n", strerror(-res));
2890 		return res;
2891 	}
2892 	if ((size_t)res < fuse_buf_size(dst)) {
2893 		fuse_log(FUSE_LOG_ERR, "fuse: copy from pipe: short read\n");
2894 		return -1;
2895 	}
2896 	return 0;
2897 }
2898 
2899 void fuse_session_process_buf(struct fuse_session *se,
2900 			      const struct fuse_buf *buf)
2901 {
2902 	fuse_session_process_buf_int(se, buf, NULL);
2903 }
2904 
2905 void fuse_session_process_buf_int(struct fuse_session *se,
2906 				  const struct fuse_buf *buf, struct fuse_chan *ch)
2907 {
2908 	const size_t write_header_size = sizeof(struct fuse_in_header) +
2909 		sizeof(struct fuse_write_in);
2910 	struct fuse_bufvec bufv = { .buf[0] = *buf, .count = 1 };
2911 	struct fuse_bufvec tmpbuf = FUSE_BUFVEC_INIT(write_header_size);
2912 	struct fuse_in_header *in;
2913 	const void *inarg;
2914 	struct fuse_req *req;
2915 	void *mbuf = NULL;
2916 	int err;
2917 	int res;
2918 	int opcode_filter;
2919 
2920 	if (buf->flags & FUSE_BUF_IS_FD) {
2921 		if (buf->size < tmpbuf.buf[0].size)
2922 			tmpbuf.buf[0].size = buf->size;
2923 
2924 		mbuf = malloc(tmpbuf.buf[0].size);
2925 		if (mbuf == NULL) {
2926 			fuse_log(FUSE_LOG_ERR, "fuse: failed to allocate header\n");
2927 			goto clear_pipe;
2928 		}
2929 		tmpbuf.buf[0].mem = mbuf;
2930 
2931 		res = fuse_ll_copy_from_pipe(&tmpbuf, &bufv);
2932 		if (res < 0)
2933 			goto clear_pipe;
2934 
2935 		in = mbuf;
2936 	} else {
2937 		in = buf->mem;
2938 	}
2939 
2940 	/* Cleanup opcode most significant bits used by FUSE BPF */
2941 	opcode_filter = in->opcode & ~FUSE_OPCODE_FILTER;
2942 	in->opcode &= FUSE_OPCODE_FILTER;
2943 
2944 	if (se->debug) {
2945 		fuse_log(FUSE_LOG_DEBUG,
2946 			"unique: %llu, opcode: %s (%i), opcode filter: %s (%i), nodeid: %llu, insize: %zu, pid: %u\n",
2947 			(unsigned long long) in->unique,
2948 			opname((enum fuse_opcode) in->opcode), in->opcode,
2949 			opfiltername((enum fuse_opcode) opcode_filter), opcode_filter,
2950 			(unsigned long long) in->nodeid, buf->size, in->pid);
2951 	}
2952 
2953 	req = fuse_ll_alloc_req(se);
2954 	if (req == NULL) {
2955 		struct fuse_out_header out = {
2956 			.unique = in->unique,
2957 			.error = -ENOMEM,
2958 		};
2959 		struct iovec iov = {
2960 			.iov_base = &out,
2961 			.iov_len = sizeof(struct fuse_out_header),
2962 		};
2963 
2964 		fuse_send_msg(se, ch, &iov, 1);
2965 		goto clear_pipe;
2966 	}
2967 
2968 	req->unique = in->unique;
2969 	req->ctx.uid = in->uid;
2970 	req->ctx.gid = in->gid;
2971 	req->ctx.pid = in->pid;
2972 	req->ch = ch ? fuse_chan_get(ch) : NULL;
2973 
2974 	err = EIO;
2975 	if (!se->got_init) {
2976 		enum fuse_opcode expected;
2977 
2978 		expected = se->cuse_data ? CUSE_INIT : FUSE_INIT;
2979 		if (in->opcode != expected)
2980 			goto reply_err;
2981 	} else if (in->opcode == FUSE_INIT || in->opcode == CUSE_INIT)
2982 		goto reply_err;
2983 
2984 	err = EACCES;
2985 	/* Implement -o allow_root */
2986 	if (se->deny_others && in->uid != se->owner && in->uid != 0 &&
2987 		 in->opcode != FUSE_INIT && in->opcode != FUSE_READ &&
2988 		 in->opcode != FUSE_WRITE && in->opcode != FUSE_FSYNC &&
2989 		 in->opcode != FUSE_RELEASE && in->opcode != FUSE_READDIR &&
2990 		 in->opcode != FUSE_FSYNCDIR && in->opcode != FUSE_RELEASEDIR &&
2991 		 in->opcode != FUSE_NOTIFY_REPLY &&
2992 		 in->opcode != FUSE_READDIRPLUS)
2993 		goto reply_err;
2994 
2995 	err = ENOSYS;
2996 	if (in->opcode >= FUSE_MAXOP || !fuse_ll_ops[in->opcode].func)
2997 		goto reply_err;
2998 	if (in->opcode != FUSE_INTERRUPT) {
2999 		struct fuse_req *intr;
3000 		pthread_mutex_lock(&se->lock);
3001 		intr = check_interrupt(se, req);
3002 		list_add_req(req, &se->list);
3003 		pthread_mutex_unlock(&se->lock);
3004 		if (intr)
3005 			fuse_reply_err(intr, EAGAIN);
3006 	}
3007 
3008 	if ((buf->flags & FUSE_BUF_IS_FD) && write_header_size < buf->size &&
3009 	    (in->opcode != FUSE_WRITE || !se->op.write_buf) &&
3010 	    in->opcode != FUSE_NOTIFY_REPLY) {
3011 		void *newmbuf;
3012 
3013 		err = ENOMEM;
3014 		newmbuf = realloc(mbuf, buf->size);
3015 		if (newmbuf == NULL)
3016 			goto reply_err;
3017 		mbuf = newmbuf;
3018 
3019 		tmpbuf = FUSE_BUFVEC_INIT(buf->size - write_header_size);
3020 		tmpbuf.buf[0].mem = (char *)mbuf + write_header_size;
3021 
3022 		res = fuse_ll_copy_from_pipe(&tmpbuf, &bufv);
3023 		err = -res;
3024 		if (res < 0)
3025 			goto reply_err;
3026 
3027 		in = mbuf;
3028 	}
3029 
3030 	inarg = (void *) &in[1];
3031 	if (in->opcode == FUSE_WRITE && se->op.write_buf)
3032 		do_write_buf(req, in->nodeid, inarg, buf);
3033 	else if (in->opcode == FUSE_NOTIFY_REPLY)
3034 		do_notify_reply(req, in->nodeid, inarg, buf);
3035 	else if (!opcode_filter)
3036 		fuse_ll_ops[in->opcode].func(req, in->nodeid, inarg);
3037 	else if (opcode_filter == FUSE_PREFILTER && fuse_ll_prefilter_ops[in->opcode].func)
3038 	  fuse_ll_prefilter_ops[in->opcode].func(req, in->nodeid, inarg);
3039 	else if (opcode_filter == FUSE_POSTFILTER
3040 			&& fuse_ll_postfilter_ops[in->opcode].func)
3041 		fuse_ll_postfilter_ops[in->opcode].func(
3042 				req, in->nodeid, in->error_in, inarg,
3043 				buf->size - sizeof(struct fuse_in_header));
3044 	else {
3045 		fuse_log(FUSE_LOG_ERR, "Bad opcode");
3046 		err = ENOSYS;
3047 		goto reply_err;
3048 	}
3049 
3050 out_free:
3051 	free(mbuf);
3052 	return;
3053 
3054 reply_err:
3055 	fuse_reply_err(req, err);
3056 clear_pipe:
3057 	if (buf->flags & FUSE_BUF_IS_FD)
3058 		fuse_ll_clear_pipe(se);
3059 	goto out_free;
3060 }
3061 
3062 #define LL_OPTION(n,o,v) \
3063 	{ n, offsetof(struct fuse_session, o), v }
3064 
3065 static const struct fuse_opt fuse_ll_opts[] = {
3066 	LL_OPTION("debug", debug, 1),
3067 	LL_OPTION("-d", debug, 1),
3068 	LL_OPTION("--debug", debug, 1),
3069 	LL_OPTION("allow_root", deny_others, 1),
3070 	FUSE_OPT_END
3071 };
3072 
3073 void fuse_lowlevel_version(void)
3074 {
3075 	printf("using FUSE kernel interface version %i.%i\n",
3076 	       FUSE_KERNEL_VERSION, FUSE_KERNEL_MINOR_VERSION);
3077 	fuse_mount_version();
3078 }
3079 
3080 void fuse_lowlevel_help(void)
3081 {
3082 	/* These are not all options, but the ones that are
3083 	   potentially of interest to an end-user */
3084 	printf(
3085 "    -o allow_other         allow access by all users\n"
3086 "    -o allow_root          allow access by root\n"
3087 "    -o auto_unmount        auto unmount on process termination\n");
3088 }
3089 
3090 void fuse_session_destroy(struct fuse_session *se)
3091 {
3092 	struct fuse_ll_pipe *llp;
3093 
3094 	if (se->got_init && !se->got_destroy) {
3095 		if (se->op.destroy)
3096 			se->op.destroy(se->userdata);
3097 	}
3098 	llp = pthread_getspecific(se->pipe_key);
3099 	if (llp != NULL)
3100 		fuse_ll_pipe_free(llp);
3101 	pthread_key_delete(se->pipe_key);
3102 	pthread_mutex_destroy(&se->lock);
3103 	free(se->cuse_data);
3104 	if (se->fd != -1)
3105 		close(se->fd);
3106 	if (se->io != NULL)
3107 		free(se->io);
3108 	destroy_mount_opts(se->mo);
3109 	free(se);
3110 }
3111 
3112 
3113 static void fuse_ll_pipe_destructor(void *data)
3114 {
3115 	struct fuse_ll_pipe *llp = data;
3116 	fuse_ll_pipe_free(llp);
3117 }
3118 
3119 int fuse_session_receive_buf(struct fuse_session *se, struct fuse_buf *buf)
3120 {
3121 	return fuse_session_receive_buf_int(se, buf, NULL);
3122 }
3123 
3124 int fuse_session_receive_buf_int(struct fuse_session *se, struct fuse_buf *buf,
3125 				 struct fuse_chan *ch)
3126 {
3127 	int err;
3128 	ssize_t res;
3129 #ifdef HAVE_SPLICE
3130 	size_t bufsize = se->bufsize;
3131 	struct fuse_ll_pipe *llp;
3132 	struct fuse_buf tmpbuf;
3133 
3134 	if (se->conn.proto_minor < 14 || !(se->conn.want & FUSE_CAP_SPLICE_READ))
3135 		goto fallback;
3136 
3137 	llp = fuse_ll_get_pipe(se);
3138 	if (llp == NULL)
3139 		goto fallback;
3140 
3141 	if (llp->size < bufsize) {
3142 		if (llp->can_grow) {
3143 			res = fcntl(llp->pipe[0], F_SETPIPE_SZ, bufsize);
3144 			if (res == -1) {
3145 				llp->can_grow = 0;
3146 				res = grow_pipe_to_max(llp->pipe[0]);
3147 				if (res > 0)
3148 					llp->size = res;
3149 				goto fallback;
3150 			}
3151 			llp->size = res;
3152 		}
3153 		if (llp->size < bufsize)
3154 			goto fallback;
3155 	}
3156 
3157 	if (se->io != NULL && se->io->splice_receive != NULL) {
3158 		res = se->io->splice_receive(ch ? ch->fd : se->fd, NULL,
3159 						     llp->pipe[1], NULL, bufsize, 0,
3160 						     se->userdata);
3161 	} else {
3162 		res = splice(ch ? ch->fd : se->fd, NULL, llp->pipe[1], NULL,
3163 				 bufsize, 0);
3164 	}
3165 	err = errno;
3166 
3167 	if (fuse_session_exited(se))
3168 		return 0;
3169 
3170 	if (res == -1) {
3171 		if (err == ENODEV) {
3172 			/* Filesystem was unmounted, or connection was aborted
3173 			   via /sys/fs/fuse/connections */
3174 			fuse_session_exit(se);
3175 			return 0;
3176 		}
3177 		if (err != EINTR && err != EAGAIN)
3178 			perror("fuse: splice from device");
3179 		return -err;
3180 	}
3181 
3182 	if (res < sizeof(struct fuse_in_header)) {
3183 		fuse_log(FUSE_LOG_ERR, "short splice from fuse device\n");
3184 		return -EIO;
3185 	}
3186 
3187 	tmpbuf = (struct fuse_buf) {
3188 		.size = res,
3189 		.flags = FUSE_BUF_IS_FD,
3190 		.fd = llp->pipe[0],
3191 	};
3192 
3193 	/*
3194 	 * Don't bother with zero copy for small requests.
3195 	 * fuse_loop_mt() needs to check for FORGET so this more than
3196 	 * just an optimization.
3197 	 */
3198 	if (res < sizeof(struct fuse_in_header) +
3199 	    sizeof(struct fuse_write_in) + pagesize) {
3200 		struct fuse_bufvec src = { .buf[0] = tmpbuf, .count = 1 };
3201 		struct fuse_bufvec dst = { .count = 1 };
3202 
3203 		if (!buf->mem) {
3204 			buf->mem = malloc(se->bufsize);
3205 			if (!buf->mem) {
3206 				fuse_log(FUSE_LOG_ERR,
3207 					"fuse: failed to allocate read buffer\n");
3208 				return -ENOMEM;
3209 			}
3210 		}
3211 		buf->size = se->bufsize;
3212 		buf->flags = 0;
3213 		dst.buf[0] = *buf;
3214 
3215 		res = fuse_buf_copy(&dst, &src, 0);
3216 		if (res < 0) {
3217 			fuse_log(FUSE_LOG_ERR, "fuse: copy from pipe: %s\n",
3218 				strerror(-res));
3219 			fuse_ll_clear_pipe(se);
3220 			return res;
3221 		}
3222 		if (res < tmpbuf.size) {
3223 			fuse_log(FUSE_LOG_ERR, "fuse: copy from pipe: short read\n");
3224 			fuse_ll_clear_pipe(se);
3225 			return -EIO;
3226 		}
3227 		assert(res == tmpbuf.size);
3228 
3229 	} else {
3230 		/* Don't overwrite buf->mem, as that would cause a leak */
3231 		buf->fd = tmpbuf.fd;
3232 		buf->flags = tmpbuf.flags;
3233 	}
3234 	buf->size = tmpbuf.size;
3235 
3236 	return res;
3237 
3238 fallback:
3239 #endif
3240 	if (!buf->mem) {
3241 		buf->mem = malloc(se->bufsize);
3242 		if (!buf->mem) {
3243 			fuse_log(FUSE_LOG_ERR,
3244 				"fuse: failed to allocate read buffer\n");
3245 			return -ENOMEM;
3246 		}
3247 	}
3248 
3249 restart:
3250 	if (se->io != NULL) {
3251 		/* se->io->read is never NULL if se->io is not NULL as
3252 		specified by fuse_session_custom_io()*/
3253 		res = se->io->read(ch ? ch->fd : se->fd, buf->mem, se->bufsize,
3254 					 se->userdata);
3255 	} else {
3256 		res = read(ch ? ch->fd : se->fd, buf->mem, se->bufsize);
3257 	}
3258 	err = errno;
3259 
3260 	if (fuse_session_exited(se))
3261 		return 0;
3262 	if (res == -1) {
3263 		/* ENOENT means the operation was interrupted, it's safe
3264 		   to restart */
3265 		if (err == ENOENT)
3266 			goto restart;
3267 
3268 		if (err == ENODEV) {
3269 			/* Filesystem was unmounted, or connection was aborted
3270 			   via /sys/fs/fuse/connections */
3271 			fuse_session_exit(se);
3272 			return 0;
3273 		}
3274 		/* Errors occurring during normal operation: EINTR (read
3275 		   interrupted), EAGAIN (nonblocking I/O), ENODEV (filesystem
3276 		   umounted) */
3277 		if (err != EINTR && err != EAGAIN)
3278 			perror("fuse: reading device");
3279 		return -err;
3280 	}
3281 	if ((size_t) res < sizeof(struct fuse_in_header)) {
3282 		fuse_log(FUSE_LOG_ERR, "short read on fuse device\n");
3283 		return -EIO;
3284 	}
3285 
3286 	buf->size = res;
3287 
3288 	return res;
3289 }
3290 
3291 FUSE_SYMVER("_fuse_session_new_317", "_fuse_session_new@@FUSE_3.17")
3292 struct fuse_session *_fuse_session_new_317(struct fuse_args *args,
3293 					  const struct fuse_lowlevel_ops *op,
3294 					  size_t op_size,
3295 					  struct libfuse_version *version,
3296 					  void *userdata)
3297 {
3298 	int err;
3299 	struct fuse_session *se;
3300 	struct mount_opts *mo;
3301 
3302 	if (sizeof(struct fuse_lowlevel_ops) < op_size) {
3303 		fuse_log(FUSE_LOG_ERR, "fuse: warning: library too old, some operations may not work\n");
3304 		op_size = sizeof(struct fuse_lowlevel_ops);
3305 	}
3306 
3307 	if (args->argc == 0) {
3308 		fuse_log(FUSE_LOG_ERR, "fuse: empty argv passed to fuse_session_new().\n");
3309 		return NULL;
3310 	}
3311 
3312 	se = (struct fuse_session *) calloc(1, sizeof(struct fuse_session));
3313 	if (se == NULL) {
3314 		fuse_log(FUSE_LOG_ERR, "fuse: failed to allocate fuse object\n");
3315 		goto out1;
3316 	}
3317 	se->fd = -1;
3318 	se->conn.max_write = UINT_MAX;
3319 	se->conn.max_readahead = UINT_MAX;
3320 
3321 	/* Parse options */
3322 	if(fuse_opt_parse(args, se, fuse_ll_opts, NULL) == -1)
3323 		goto out2;
3324 	if(se->deny_others) {
3325 		/* Allowing access only by root is done by instructing
3326 		 * kernel to allow access by everyone, and then restricting
3327 		 * access to root and mountpoint owner in libfuse.
3328 		 */
3329 		// We may be adding the option a second time, but
3330 		// that doesn't hurt.
3331 		if(fuse_opt_add_arg(args, "-oallow_other") == -1)
3332 			goto out2;
3333 	}
3334 	mo = parse_mount_opts(args);
3335 	if (mo == NULL)
3336 		goto out3;
3337 
3338 	if(args->argc == 1 &&
3339 	   args->argv[0][0] == '-') {
3340 		fuse_log(FUSE_LOG_ERR, "fuse: warning: argv[0] looks like an option, but "
3341 			"will be ignored\n");
3342 	} else if (args->argc != 1) {
3343 		int i;
3344 		fuse_log(FUSE_LOG_ERR, "fuse: unknown option(s): `");
3345 		for(i = 1; i < args->argc-1; i++)
3346 			fuse_log(FUSE_LOG_ERR, "%s ", args->argv[i]);
3347 		fuse_log(FUSE_LOG_ERR, "%s'\n", args->argv[i]);
3348 		goto out4;
3349 	}
3350 
3351 	if (se->debug)
3352 		fuse_log(FUSE_LOG_DEBUG, "FUSE library version: %s\n", PACKAGE_VERSION);
3353 
3354 	se->bufsize = FUSE_MAX_MAX_PAGES * getpagesize() +
3355 		FUSE_BUFFER_HEADER_SIZE;
3356 
3357 	list_init_req(&se->list);
3358 	list_init_req(&se->interrupts);
3359 	list_init_nreq(&se->notify_list);
3360 	se->notify_ctr = 1;
3361 	pthread_mutex_init(&se->lock, NULL);
3362 
3363 	err = pthread_key_create(&se->pipe_key, fuse_ll_pipe_destructor);
3364 	if (err) {
3365 		fuse_log(FUSE_LOG_ERR, "fuse: failed to create thread specific key: %s\n",
3366 			strerror(err));
3367 		goto out5;
3368 	}
3369 
3370 	memcpy(&se->op, op, op_size);
3371 	se->owner = getuid();
3372 	se->userdata = userdata;
3373 
3374 	se->mo = mo;
3375 
3376 	/* Fuse server application should pass the version it was compiled
3377 	 * against and pass it. If a libfuse version accidentally introduces an
3378 	 * ABI incompatibility, it might be possible to 'fix' that at run time,
3379 	 * by checking the version numbers.
3380 	 */
3381 	se->version = *version;
3382 
3383 	return se;
3384 
3385 out5:
3386 	pthread_mutex_destroy(&se->lock);
3387 out4:
3388 	fuse_opt_free_args(args);
3389 out3:
3390 	if (mo != NULL)
3391 		destroy_mount_opts(mo);
3392 out2:
3393 	free(se);
3394 out1:
3395 	return NULL;
3396 }
3397 
3398 struct fuse_session *fuse_session_new_30(struct fuse_args *args,
3399 					  const struct fuse_lowlevel_ops *op,
3400 					  size_t op_size,
3401 					  void *userdata);
3402 FUSE_SYMVER("fuse_session_new_30", "fuse_session_new@FUSE_3.0")
3403 struct fuse_session *fuse_session_new_30(struct fuse_args *args,
3404 					  const struct fuse_lowlevel_ops *op,
3405 					  size_t op_size,
3406 					  void *userdata)
3407 {
3408 	/* unknown version */
3409 	struct libfuse_version version = { 0 };
3410 
3411 	return _fuse_session_new_317(args, op, op_size, &version, userdata);
3412 }
3413 
3414 int fuse_session_custom_io(struct fuse_session *se, const struct fuse_custom_io *io,
3415 			   int fd)
3416 {
3417 	if (fd < 0) {
3418 		fuse_log(FUSE_LOG_ERR, "Invalid file descriptor value %d passed to "
3419 			"fuse_session_custom_io()\n", fd);
3420 		return -EBADF;
3421 	}
3422 	if (io == NULL) {
3423 		fuse_log(FUSE_LOG_ERR, "No custom IO passed to "
3424 			"fuse_session_custom_io()\n");
3425 		return -EINVAL;
3426 	} else if (io->read == NULL || io->writev == NULL) {
3427 		/* If the user provides their own file descriptor, we can't
3428 		guarantee that the default behavior of the io operations made
3429 		in libfuse will function properly. Therefore, we enforce the
3430 		user to implement these io operations when using custom io. */
3431 		fuse_log(FUSE_LOG_ERR, "io passed to fuse_session_custom_io() must "
3432 			"implement both io->read() and io->writev\n");
3433 		return -EINVAL;
3434 	}
3435 
3436 	se->io = malloc(sizeof(struct fuse_custom_io));
3437 	if (se->io == NULL) {
3438 		fuse_log(FUSE_LOG_ERR, "Failed to allocate memory for custom io. "
3439 			"Error: %s\n", strerror(errno));
3440 		return -errno;
3441 	}
3442 
3443 	se->fd = fd;
3444 	*se->io = *io;
3445 	return 0;
3446 }
3447 
3448 int fuse_session_mount(struct fuse_session *se, const char *mountpoint)
3449 {
3450 	int fd;
3451 
3452 	/*
3453 	 * Make sure file descriptors 0, 1 and 2 are open, otherwise chaos
3454 	 * would ensue.
3455 	 */
3456 	do {
3457 		fd = open("/dev/null", O_RDWR);
3458 		if (fd > 2)
3459 			close(fd);
3460 	} while (fd >= 0 && fd <= 2);
3461 
3462 	/*
3463 	 * To allow FUSE daemons to run without privileges, the caller may open
3464 	 * /dev/fuse before launching the file system and pass on the file
3465 	 * descriptor by specifying /dev/fd/N as the mount point. Note that the
3466 	 * parent process takes care of performing the mount in this case.
3467 	 */
3468 	fd = fuse_mnt_parse_fuse_fd(mountpoint);
3469 	if (fd != -1) {
3470 		if (fcntl(fd, F_GETFD) == -1) {
3471 			fuse_log(FUSE_LOG_ERR,
3472 				"fuse: Invalid file descriptor /dev/fd/%u\n",
3473 				fd);
3474 			return -1;
3475 		}
3476 		se->fd = fd;
3477 		return 0;
3478 	}
3479 
3480 	/* Open channel */
3481 	fd = fuse_kern_mount(mountpoint, se->mo);
3482 	if (fd == -1)
3483 		return -1;
3484 	se->fd = fd;
3485 
3486 	/* Save mountpoint */
3487 	se->mountpoint = strdup(mountpoint);
3488 	if (se->mountpoint == NULL)
3489 		goto error_out;
3490 
3491 	return 0;
3492 
3493 error_out:
3494 	fuse_kern_unmount(mountpoint, fd);
3495 	return -1;
3496 }
3497 
3498 int fuse_session_fd(struct fuse_session *se)
3499 {
3500 	return se->fd;
3501 }
3502 
3503 void fuse_session_unmount(struct fuse_session *se)
3504 {
3505 	if (se->mountpoint != NULL) {
3506 		fuse_kern_unmount(se->mountpoint, se->fd);
3507 		se->fd = -1;
3508 		free(se->mountpoint);
3509 		se->mountpoint = NULL;
3510 	}
3511 }
3512 
3513 #ifdef linux
3514 int fuse_req_getgroups(fuse_req_t req, int size, gid_t list[])
3515 {
3516 	char *buf;
3517 	size_t bufsize = 1024;
3518 	char path[128];
3519 	int ret;
3520 	int fd;
3521 	unsigned long pid = req->ctx.pid;
3522 	char *s;
3523 
3524 	sprintf(path, "/proc/%lu/task/%lu/status", pid, pid);
3525 
3526 retry:
3527 	buf = malloc(bufsize);
3528 	if (buf == NULL)
3529 		return -ENOMEM;
3530 
3531 	ret = -EIO;
3532 	fd = open(path, O_RDONLY);
3533 	if (fd == -1)
3534 		goto out_free;
3535 
3536 	ret = read(fd, buf, bufsize);
3537 	close(fd);
3538 	if (ret < 0) {
3539 		ret = -EIO;
3540 		goto out_free;
3541 	}
3542 
3543 	if ((size_t)ret == bufsize) {
3544 		free(buf);
3545 		bufsize *= 4;
3546 		goto retry;
3547 	}
3548 
3549 	ret = -EIO;
3550 	s = strstr(buf, "\nGroups:");
3551 	if (s == NULL)
3552 		goto out_free;
3553 
3554 	s += 8;
3555 	ret = 0;
3556 	while (1) {
3557 		char *end;
3558 		unsigned long val = strtoul(s, &end, 0);
3559 		if (end == s)
3560 			break;
3561 
3562 		s = end;
3563 		if (ret < size)
3564 			list[ret] = val;
3565 		ret++;
3566 	}
3567 
3568 out_free:
3569 	free(buf);
3570 	return ret;
3571 }
3572 #else /* linux */
3573 /*
3574  * This is currently not implemented on other than Linux...
3575  */
3576 int fuse_req_getgroups(fuse_req_t req, int size, gid_t list[])
3577 {
3578 	(void) req; (void) size; (void) list;
3579 	return -ENOSYS;
3580 }
3581 #endif
3582 
3583 /* Prevent spurious data race warning - we don't care
3584  * about races for this flag */
3585 __attribute__((no_sanitize_thread))
3586 void fuse_session_exit(struct fuse_session *se)
3587 {
3588 	se->exited = 1;
3589 }
3590 
3591 __attribute__((no_sanitize_thread))
3592 void fuse_session_reset(struct fuse_session *se)
3593 {
3594 	se->exited = 0;
3595 	se->error = 0;
3596 }
3597 
3598 __attribute__((no_sanitize_thread))
3599 int fuse_session_exited(struct fuse_session *se)
3600 {
3601 	return se->exited;
3602 }
3603