1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   vfs operations that deal with files
5  *
6  *   Copyright (C) International Business Machines  Corp., 2002,2010
7  *   Author(s): Steve French ([email protected])
8  *              Jeremy Allison ([email protected])
9  *
10  */
11 #include <linux/fs.h>
12 #include <linux/filelock.h>
13 #include <linux/backing-dev.h>
14 #include <linux/stat.h>
15 #include <linux/fcntl.h>
16 #include <linux/pagemap.h>
17 #include <linux/pagevec.h>
18 #include <linux/writeback.h>
19 #include <linux/task_io_accounting_ops.h>
20 #include <linux/delay.h>
21 #include <linux/mount.h>
22 #include <linux/slab.h>
23 #include <linux/swap.h>
24 #include <linux/mm.h>
25 #include <asm/div64.h>
26 #include "cifsfs.h"
27 #include "cifspdu.h"
28 #include "cifsglob.h"
29 #include "cifsproto.h"
30 #include "smb2proto.h"
31 #include "cifs_unicode.h"
32 #include "cifs_debug.h"
33 #include "cifs_fs_sb.h"
34 #include "fscache.h"
35 #include "smbdirect.h"
36 #include "fs_context.h"
37 #include "cifs_ioctl.h"
38 #include "cached_dir.h"
39 #include <trace/events/netfs.h>
40 
41 static int cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush);
42 
43 /*
44  * Prepare a subrequest to upload to the server.  We need to allocate credits
45  * so that we know the maximum amount of data that we can include in it.
46  */
cifs_prepare_write(struct netfs_io_subrequest * subreq)47 static void cifs_prepare_write(struct netfs_io_subrequest *subreq)
48 {
49 	struct cifs_io_subrequest *wdata =
50 		container_of(subreq, struct cifs_io_subrequest, subreq);
51 	struct cifs_io_request *req = wdata->req;
52 	struct netfs_io_stream *stream = &req->rreq.io_streams[subreq->stream_nr];
53 	struct TCP_Server_Info *server;
54 	struct cifsFileInfo *open_file = req->cfile;
55 	size_t wsize = req->rreq.wsize;
56 	int rc;
57 
58 	if (!wdata->have_xid) {
59 		wdata->xid = get_xid();
60 		wdata->have_xid = true;
61 	}
62 
63 	server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
64 	wdata->server = server;
65 
66 retry:
67 	if (open_file->invalidHandle) {
68 		rc = cifs_reopen_file(open_file, false);
69 		if (rc < 0) {
70 			if (rc == -EAGAIN)
71 				goto retry;
72 			subreq->error = rc;
73 			return netfs_prepare_write_failed(subreq);
74 		}
75 	}
76 
77 	rc = server->ops->wait_mtu_credits(server, wsize, &stream->sreq_max_len,
78 					   &wdata->credits);
79 	if (rc < 0) {
80 		subreq->error = rc;
81 		return netfs_prepare_write_failed(subreq);
82 	}
83 
84 	wdata->credits.rreq_debug_id = subreq->rreq->debug_id;
85 	wdata->credits.rreq_debug_index = subreq->debug_index;
86 	wdata->credits.in_flight_check = 1;
87 	trace_smb3_rw_credits(wdata->rreq->debug_id,
88 			      wdata->subreq.debug_index,
89 			      wdata->credits.value,
90 			      server->credits, server->in_flight,
91 			      wdata->credits.value,
92 			      cifs_trace_rw_credits_write_prepare);
93 
94 #ifdef CONFIG_CIFS_SMB_DIRECT
95 	if (server->smbd_conn)
96 		stream->sreq_max_segs = server->smbd_conn->max_frmr_depth;
97 #endif
98 }
99 
100 /*
101  * Issue a subrequest to upload to the server.
102  */
cifs_issue_write(struct netfs_io_subrequest * subreq)103 static void cifs_issue_write(struct netfs_io_subrequest *subreq)
104 {
105 	struct cifs_io_subrequest *wdata =
106 		container_of(subreq, struct cifs_io_subrequest, subreq);
107 	struct cifs_sb_info *sbi = CIFS_SB(subreq->rreq->inode->i_sb);
108 	int rc;
109 
110 	if (cifs_forced_shutdown(sbi)) {
111 		rc = -EIO;
112 		goto fail;
113 	}
114 
115 	rc = adjust_credits(wdata->server, wdata, cifs_trace_rw_credits_issue_write_adjust);
116 	if (rc)
117 		goto fail;
118 
119 	rc = -EAGAIN;
120 	if (wdata->req->cfile->invalidHandle)
121 		goto fail;
122 
123 	wdata->server->ops->async_writev(wdata);
124 out:
125 	return;
126 
127 fail:
128 	if (rc == -EAGAIN)
129 		trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
130 	else
131 		trace_netfs_sreq(subreq, netfs_sreq_trace_fail);
132 	add_credits_and_wake_if(wdata->server, &wdata->credits, 0);
133 	cifs_write_subrequest_terminated(wdata, rc, false);
134 	goto out;
135 }
136 
cifs_netfs_invalidate_cache(struct netfs_io_request * wreq)137 static void cifs_netfs_invalidate_cache(struct netfs_io_request *wreq)
138 {
139 	cifs_invalidate_cache(wreq->inode, 0);
140 }
141 
142 /*
143  * Negotiate the size of a read operation on behalf of the netfs library.
144  */
cifs_prepare_read(struct netfs_io_subrequest * subreq)145 static int cifs_prepare_read(struct netfs_io_subrequest *subreq)
146 {
147 	struct netfs_io_request *rreq = subreq->rreq;
148 	struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
149 	struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
150 	struct TCP_Server_Info *server;
151 	struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
152 	size_t size;
153 	int rc = 0;
154 
155 	if (!rdata->have_xid) {
156 		rdata->xid = get_xid();
157 		rdata->have_xid = true;
158 	}
159 
160 	server = cifs_pick_channel(tlink_tcon(req->cfile->tlink)->ses);
161 	rdata->server = server;
162 
163 	if (cifs_sb->ctx->rsize == 0)
164 		cifs_sb->ctx->rsize =
165 			server->ops->negotiate_rsize(tlink_tcon(req->cfile->tlink),
166 						     cifs_sb->ctx);
167 
168 	rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
169 					   &size, &rdata->credits);
170 	if (rc)
171 		return rc;
172 
173 	rreq->io_streams[0].sreq_max_len = size;
174 
175 	rdata->credits.in_flight_check = 1;
176 	rdata->credits.rreq_debug_id = rreq->debug_id;
177 	rdata->credits.rreq_debug_index = subreq->debug_index;
178 
179 	trace_smb3_rw_credits(rdata->rreq->debug_id,
180 			      rdata->subreq.debug_index,
181 			      rdata->credits.value,
182 			      server->credits, server->in_flight, 0,
183 			      cifs_trace_rw_credits_read_submit);
184 
185 #ifdef CONFIG_CIFS_SMB_DIRECT
186 	if (server->smbd_conn)
187 		rreq->io_streams[0].sreq_max_segs = server->smbd_conn->max_frmr_depth;
188 #endif
189 	return 0;
190 }
191 
192 /*
193  * Issue a read operation on behalf of the netfs helper functions.  We're asked
194  * to make a read of a certain size at a point in the file.  We are permitted
195  * to only read a portion of that, but as long as we read something, the netfs
196  * helper will call us again so that we can issue another read.
197  */
cifs_issue_read(struct netfs_io_subrequest * subreq)198 static void cifs_issue_read(struct netfs_io_subrequest *subreq)
199 {
200 	struct netfs_io_request *rreq = subreq->rreq;
201 	struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
202 	struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
203 	struct TCP_Server_Info *server = rdata->server;
204 	int rc = 0;
205 
206 	cifs_dbg(FYI, "%s: op=%08x[%x] mapping=%p len=%zu/%zu\n",
207 		 __func__, rreq->debug_id, subreq->debug_index, rreq->mapping,
208 		 subreq->transferred, subreq->len);
209 
210 	rc = adjust_credits(server, rdata, cifs_trace_rw_credits_issue_read_adjust);
211 	if (rc)
212 		goto failed;
213 
214 	if (req->cfile->invalidHandle) {
215 		do {
216 			rc = cifs_reopen_file(req->cfile, true);
217 		} while (rc == -EAGAIN);
218 		if (rc)
219 			goto failed;
220 	}
221 
222 	if (subreq->rreq->origin != NETFS_DIO_READ)
223 		__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
224 
225 	trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
226 	rc = rdata->server->ops->async_readv(rdata);
227 	if (rc)
228 		goto failed;
229 	return;
230 
231 failed:
232 	subreq->error = rc;
233 	netfs_read_subreq_terminated(subreq);
234 }
235 
236 /*
237  * Writeback calls this when it finds a folio that needs uploading.  This isn't
238  * called if writeback only has copy-to-cache to deal with.
239  */
cifs_begin_writeback(struct netfs_io_request * wreq)240 static void cifs_begin_writeback(struct netfs_io_request *wreq)
241 {
242 	struct cifs_io_request *req = container_of(wreq, struct cifs_io_request, rreq);
243 	int ret;
244 
245 	ret = cifs_get_writable_file(CIFS_I(wreq->inode), FIND_WR_ANY, &req->cfile);
246 	if (ret) {
247 		cifs_dbg(VFS, "No writable handle in writepages ret=%d\n", ret);
248 		return;
249 	}
250 
251 	wreq->io_streams[0].avail = true;
252 }
253 
254 /*
255  * Initialise a request.
256  */
cifs_init_request(struct netfs_io_request * rreq,struct file * file)257 static int cifs_init_request(struct netfs_io_request *rreq, struct file *file)
258 {
259 	struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
260 	struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
261 	struct cifsFileInfo *open_file = NULL;
262 
263 	rreq->rsize = cifs_sb->ctx->rsize;
264 	rreq->wsize = cifs_sb->ctx->wsize;
265 	req->pid = current->tgid; // Ummm...  This may be a workqueue
266 
267 	if (file) {
268 		open_file = file->private_data;
269 		rreq->netfs_priv = file->private_data;
270 		req->cfile = cifsFileInfo_get(open_file);
271 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
272 			req->pid = req->cfile->pid;
273 	} else if (rreq->origin != NETFS_WRITEBACK) {
274 		WARN_ON_ONCE(1);
275 		return -EIO;
276 	}
277 
278 	return 0;
279 }
280 
281 /*
282  * Completion of a request operation.
283  */
cifs_rreq_done(struct netfs_io_request * rreq)284 static void cifs_rreq_done(struct netfs_io_request *rreq)
285 {
286 	struct timespec64 atime, mtime;
287 	struct inode *inode = rreq->inode;
288 
289 	/* we do not want atime to be less than mtime, it broke some apps */
290 	atime = inode_set_atime_to_ts(inode, current_time(inode));
291 	mtime = inode_get_mtime(inode);
292 	if (timespec64_compare(&atime, &mtime))
293 		inode_set_atime_to_ts(inode, inode_get_mtime(inode));
294 }
295 
cifs_free_request(struct netfs_io_request * rreq)296 static void cifs_free_request(struct netfs_io_request *rreq)
297 {
298 	struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
299 
300 	if (req->cfile)
301 		cifsFileInfo_put(req->cfile);
302 }
303 
cifs_free_subrequest(struct netfs_io_subrequest * subreq)304 static void cifs_free_subrequest(struct netfs_io_subrequest *subreq)
305 {
306 	struct cifs_io_subrequest *rdata =
307 		container_of(subreq, struct cifs_io_subrequest, subreq);
308 	int rc = subreq->error;
309 
310 	if (rdata->subreq.source == NETFS_DOWNLOAD_FROM_SERVER) {
311 #ifdef CONFIG_CIFS_SMB_DIRECT
312 		if (rdata->mr) {
313 			smbd_deregister_mr(rdata->mr);
314 			rdata->mr = NULL;
315 		}
316 #endif
317 	}
318 
319 	if (rdata->credits.value != 0) {
320 		trace_smb3_rw_credits(rdata->rreq->debug_id,
321 				      rdata->subreq.debug_index,
322 				      rdata->credits.value,
323 				      rdata->server ? rdata->server->credits : 0,
324 				      rdata->server ? rdata->server->in_flight : 0,
325 				      -rdata->credits.value,
326 				      cifs_trace_rw_credits_free_subreq);
327 		if (rdata->server)
328 			add_credits_and_wake_if(rdata->server, &rdata->credits, 0);
329 		else
330 			rdata->credits.value = 0;
331 	}
332 
333 	if (rdata->have_xid)
334 		free_xid(rdata->xid);
335 }
336 
337 const struct netfs_request_ops cifs_req_ops = {
338 	.request_pool		= &cifs_io_request_pool,
339 	.subrequest_pool	= &cifs_io_subrequest_pool,
340 	.init_request		= cifs_init_request,
341 	.free_request		= cifs_free_request,
342 	.free_subrequest	= cifs_free_subrequest,
343 	.prepare_read		= cifs_prepare_read,
344 	.issue_read		= cifs_issue_read,
345 	.done			= cifs_rreq_done,
346 	.begin_writeback	= cifs_begin_writeback,
347 	.prepare_write		= cifs_prepare_write,
348 	.issue_write		= cifs_issue_write,
349 	.invalidate_cache	= cifs_netfs_invalidate_cache,
350 };
351 
352 /*
353  * Mark as invalid, all open files on tree connections since they
354  * were closed when session to server was lost.
355  */
356 void
cifs_mark_open_files_invalid(struct cifs_tcon * tcon)357 cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
358 {
359 	struct cifsFileInfo *open_file = NULL;
360 	struct list_head *tmp;
361 	struct list_head *tmp1;
362 
363 	/* only send once per connect */
364 	spin_lock(&tcon->tc_lock);
365 	if (tcon->need_reconnect)
366 		tcon->status = TID_NEED_RECON;
367 
368 	if (tcon->status != TID_NEED_RECON) {
369 		spin_unlock(&tcon->tc_lock);
370 		return;
371 	}
372 	tcon->status = TID_IN_FILES_INVALIDATE;
373 	spin_unlock(&tcon->tc_lock);
374 
375 	/* list all files open on tree connection and mark them invalid */
376 	spin_lock(&tcon->open_file_lock);
377 	list_for_each_safe(tmp, tmp1, &tcon->openFileList) {
378 		open_file = list_entry(tmp, struct cifsFileInfo, tlist);
379 		open_file->invalidHandle = true;
380 		open_file->oplock_break_cancelled = true;
381 	}
382 	spin_unlock(&tcon->open_file_lock);
383 
384 	invalidate_all_cached_dirs(tcon);
385 	spin_lock(&tcon->tc_lock);
386 	if (tcon->status == TID_IN_FILES_INVALIDATE)
387 		tcon->status = TID_NEED_TCON;
388 	spin_unlock(&tcon->tc_lock);
389 
390 	/*
391 	 * BB Add call to invalidate_inodes(sb) for all superblocks mounted
392 	 * to this tcon.
393 	 */
394 }
395 
cifs_convert_flags(unsigned int flags,int rdwr_for_fscache)396 static inline int cifs_convert_flags(unsigned int flags, int rdwr_for_fscache)
397 {
398 	if ((flags & O_ACCMODE) == O_RDONLY)
399 		return GENERIC_READ;
400 	else if ((flags & O_ACCMODE) == O_WRONLY)
401 		return rdwr_for_fscache == 1 ? (GENERIC_READ | GENERIC_WRITE) : GENERIC_WRITE;
402 	else if ((flags & O_ACCMODE) == O_RDWR) {
403 		/* GENERIC_ALL is too much permission to request
404 		   can cause unnecessary access denied on create */
405 		/* return GENERIC_ALL; */
406 		return (GENERIC_READ | GENERIC_WRITE);
407 	}
408 
409 	return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
410 		FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
411 		FILE_READ_DATA);
412 }
413 
414 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
cifs_posix_convert_flags(unsigned int flags)415 static u32 cifs_posix_convert_flags(unsigned int flags)
416 {
417 	u32 posix_flags = 0;
418 
419 	if ((flags & O_ACCMODE) == O_RDONLY)
420 		posix_flags = SMB_O_RDONLY;
421 	else if ((flags & O_ACCMODE) == O_WRONLY)
422 		posix_flags = SMB_O_WRONLY;
423 	else if ((flags & O_ACCMODE) == O_RDWR)
424 		posix_flags = SMB_O_RDWR;
425 
426 	if (flags & O_CREAT) {
427 		posix_flags |= SMB_O_CREAT;
428 		if (flags & O_EXCL)
429 			posix_flags |= SMB_O_EXCL;
430 	} else if (flags & O_EXCL)
431 		cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
432 			 current->comm, current->tgid);
433 
434 	if (flags & O_TRUNC)
435 		posix_flags |= SMB_O_TRUNC;
436 	/* be safe and imply O_SYNC for O_DSYNC */
437 	if (flags & O_DSYNC)
438 		posix_flags |= SMB_O_SYNC;
439 	if (flags & O_DIRECTORY)
440 		posix_flags |= SMB_O_DIRECTORY;
441 	if (flags & O_NOFOLLOW)
442 		posix_flags |= SMB_O_NOFOLLOW;
443 	if (flags & O_DIRECT)
444 		posix_flags |= SMB_O_DIRECT;
445 
446 	return posix_flags;
447 }
448 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
449 
cifs_get_disposition(unsigned int flags)450 static inline int cifs_get_disposition(unsigned int flags)
451 {
452 	if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
453 		return FILE_CREATE;
454 	else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
455 		return FILE_OVERWRITE_IF;
456 	else if ((flags & O_CREAT) == O_CREAT)
457 		return FILE_OPEN_IF;
458 	else if ((flags & O_TRUNC) == O_TRUNC)
459 		return FILE_OVERWRITE;
460 	else
461 		return FILE_OPEN;
462 }
463 
464 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
cifs_posix_open(const char * full_path,struct inode ** pinode,struct super_block * sb,int mode,unsigned int f_flags,__u32 * poplock,__u16 * pnetfid,unsigned int xid)465 int cifs_posix_open(const char *full_path, struct inode **pinode,
466 			struct super_block *sb, int mode, unsigned int f_flags,
467 			__u32 *poplock, __u16 *pnetfid, unsigned int xid)
468 {
469 	int rc;
470 	FILE_UNIX_BASIC_INFO *presp_data;
471 	__u32 posix_flags = 0;
472 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
473 	struct cifs_fattr fattr;
474 	struct tcon_link *tlink;
475 	struct cifs_tcon *tcon;
476 
477 	cifs_dbg(FYI, "posix open %s\n", full_path);
478 
479 	presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
480 	if (presp_data == NULL)
481 		return -ENOMEM;
482 
483 	tlink = cifs_sb_tlink(cifs_sb);
484 	if (IS_ERR(tlink)) {
485 		rc = PTR_ERR(tlink);
486 		goto posix_open_ret;
487 	}
488 
489 	tcon = tlink_tcon(tlink);
490 	mode &= ~current_umask();
491 
492 	posix_flags = cifs_posix_convert_flags(f_flags);
493 	rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
494 			     poplock, full_path, cifs_sb->local_nls,
495 			     cifs_remap(cifs_sb));
496 	cifs_put_tlink(tlink);
497 
498 	if (rc)
499 		goto posix_open_ret;
500 
501 	if (presp_data->Type == cpu_to_le32(-1))
502 		goto posix_open_ret; /* open ok, caller does qpathinfo */
503 
504 	if (!pinode)
505 		goto posix_open_ret; /* caller does not need info */
506 
507 	cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
508 
509 	/* get new inode and set it up */
510 	if (*pinode == NULL) {
511 		cifs_fill_uniqueid(sb, &fattr);
512 		*pinode = cifs_iget(sb, &fattr);
513 		if (!*pinode) {
514 			rc = -ENOMEM;
515 			goto posix_open_ret;
516 		}
517 	} else {
518 		cifs_revalidate_mapping(*pinode);
519 		rc = cifs_fattr_to_inode(*pinode, &fattr, false);
520 	}
521 
522 posix_open_ret:
523 	kfree(presp_data);
524 	return rc;
525 }
526 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
527 
cifs_nt_open(const char * full_path,struct inode * inode,struct cifs_sb_info * cifs_sb,struct cifs_tcon * tcon,unsigned int f_flags,__u32 * oplock,struct cifs_fid * fid,unsigned int xid,struct cifs_open_info_data * buf)528 static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
529 			struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
530 			struct cifs_fid *fid, unsigned int xid, struct cifs_open_info_data *buf)
531 {
532 	int rc;
533 	int desired_access;
534 	int disposition;
535 	int create_options = CREATE_NOT_DIR;
536 	struct TCP_Server_Info *server = tcon->ses->server;
537 	struct cifs_open_parms oparms;
538 	int rdwr_for_fscache = 0;
539 
540 	if (!server->ops->open)
541 		return -ENOSYS;
542 
543 	/* If we're caching, we need to be able to fill in around partial writes. */
544 	if (cifs_fscache_enabled(inode) && (f_flags & O_ACCMODE) == O_WRONLY)
545 		rdwr_for_fscache = 1;
546 
547 	desired_access = cifs_convert_flags(f_flags, rdwr_for_fscache);
548 
549 /*********************************************************************
550  *  open flag mapping table:
551  *
552  *	POSIX Flag            CIFS Disposition
553  *	----------            ----------------
554  *	O_CREAT               FILE_OPEN_IF
555  *	O_CREAT | O_EXCL      FILE_CREATE
556  *	O_CREAT | O_TRUNC     FILE_OVERWRITE_IF
557  *	O_TRUNC               FILE_OVERWRITE
558  *	none of the above     FILE_OPEN
559  *
560  *	Note that there is not a direct match between disposition
561  *	FILE_SUPERSEDE (ie create whether or not file exists although
562  *	O_CREAT | O_TRUNC is similar but truncates the existing
563  *	file rather than creating a new file as FILE_SUPERSEDE does
564  *	(which uses the attributes / metadata passed in on open call)
565  *?
566  *?  O_SYNC is a reasonable match to CIFS writethrough flag
567  *?  and the read write flags match reasonably.  O_LARGEFILE
568  *?  is irrelevant because largefile support is always used
569  *?  by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
570  *	 O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
571  *********************************************************************/
572 
573 	disposition = cifs_get_disposition(f_flags);
574 
575 	/* BB pass O_SYNC flag through on file attributes .. BB */
576 
577 	/* O_SYNC also has bit for O_DSYNC so following check picks up either */
578 	if (f_flags & O_SYNC)
579 		create_options |= CREATE_WRITE_THROUGH;
580 
581 	if (f_flags & O_DIRECT)
582 		create_options |= CREATE_NO_BUFFER;
583 
584 retry_open:
585 	oparms = (struct cifs_open_parms) {
586 		.tcon = tcon,
587 		.cifs_sb = cifs_sb,
588 		.desired_access = desired_access,
589 		.create_options = cifs_create_options(cifs_sb, create_options),
590 		.disposition = disposition,
591 		.path = full_path,
592 		.fid = fid,
593 	};
594 
595 	rc = server->ops->open(xid, &oparms, oplock, buf);
596 	if (rc) {
597 		if (rc == -EACCES && rdwr_for_fscache == 1) {
598 			desired_access = cifs_convert_flags(f_flags, 0);
599 			rdwr_for_fscache = 2;
600 			goto retry_open;
601 		}
602 		return rc;
603 	}
604 	if (rdwr_for_fscache == 2)
605 		cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
606 
607 	/* TODO: Add support for calling posix query info but with passing in fid */
608 	if (tcon->unix_ext)
609 		rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
610 					      xid);
611 	else
612 		rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
613 					 xid, fid);
614 
615 	if (rc) {
616 		server->ops->close(xid, tcon, fid);
617 		if (rc == -ESTALE)
618 			rc = -EOPENSTALE;
619 	}
620 
621 	return rc;
622 }
623 
624 static bool
cifs_has_mand_locks(struct cifsInodeInfo * cinode)625 cifs_has_mand_locks(struct cifsInodeInfo *cinode)
626 {
627 	struct cifs_fid_locks *cur;
628 	bool has_locks = false;
629 
630 	down_read(&cinode->lock_sem);
631 	list_for_each_entry(cur, &cinode->llist, llist) {
632 		if (!list_empty(&cur->locks)) {
633 			has_locks = true;
634 			break;
635 		}
636 	}
637 	up_read(&cinode->lock_sem);
638 	return has_locks;
639 }
640 
641 void
cifs_down_write(struct rw_semaphore * sem)642 cifs_down_write(struct rw_semaphore *sem)
643 {
644 	while (!down_write_trylock(sem))
645 		msleep(10);
646 }
647 
648 static void cifsFileInfo_put_work(struct work_struct *work);
649 void serverclose_work(struct work_struct *work);
650 
cifs_new_fileinfo(struct cifs_fid * fid,struct file * file,struct tcon_link * tlink,__u32 oplock,const char * symlink_target)651 struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
652 				       struct tcon_link *tlink, __u32 oplock,
653 				       const char *symlink_target)
654 {
655 	struct dentry *dentry = file_dentry(file);
656 	struct inode *inode = d_inode(dentry);
657 	struct cifsInodeInfo *cinode = CIFS_I(inode);
658 	struct cifsFileInfo *cfile;
659 	struct cifs_fid_locks *fdlocks;
660 	struct cifs_tcon *tcon = tlink_tcon(tlink);
661 	struct TCP_Server_Info *server = tcon->ses->server;
662 
663 	cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
664 	if (cfile == NULL)
665 		return cfile;
666 
667 	fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
668 	if (!fdlocks) {
669 		kfree(cfile);
670 		return NULL;
671 	}
672 
673 	if (symlink_target) {
674 		cfile->symlink_target = kstrdup(symlink_target, GFP_KERNEL);
675 		if (!cfile->symlink_target) {
676 			kfree(fdlocks);
677 			kfree(cfile);
678 			return NULL;
679 		}
680 	}
681 
682 	INIT_LIST_HEAD(&fdlocks->locks);
683 	fdlocks->cfile = cfile;
684 	cfile->llist = fdlocks;
685 
686 	cfile->count = 1;
687 	cfile->pid = current->tgid;
688 	cfile->uid = current_fsuid();
689 	cfile->dentry = dget(dentry);
690 	cfile->f_flags = file->f_flags;
691 	cfile->invalidHandle = false;
692 	cfile->deferred_close_scheduled = false;
693 	cfile->tlink = cifs_get_tlink(tlink);
694 	INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
695 	INIT_WORK(&cfile->put, cifsFileInfo_put_work);
696 	INIT_WORK(&cfile->serverclose, serverclose_work);
697 	INIT_DELAYED_WORK(&cfile->deferred, smb2_deferred_work_close);
698 	mutex_init(&cfile->fh_mutex);
699 	spin_lock_init(&cfile->file_info_lock);
700 
701 	cifs_sb_active(inode->i_sb);
702 
703 	/*
704 	 * If the server returned a read oplock and we have mandatory brlocks,
705 	 * set oplock level to None.
706 	 */
707 	if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
708 		cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
709 		oplock = 0;
710 	}
711 
712 	cifs_down_write(&cinode->lock_sem);
713 	list_add(&fdlocks->llist, &cinode->llist);
714 	up_write(&cinode->lock_sem);
715 
716 	spin_lock(&tcon->open_file_lock);
717 	if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
718 		oplock = fid->pending_open->oplock;
719 	list_del(&fid->pending_open->olist);
720 
721 	fid->purge_cache = false;
722 	server->ops->set_fid(cfile, fid, oplock);
723 
724 	list_add(&cfile->tlist, &tcon->openFileList);
725 	atomic_inc(&tcon->num_local_opens);
726 
727 	/* if readable file instance put first in list*/
728 	spin_lock(&cinode->open_file_lock);
729 	if (file->f_mode & FMODE_READ)
730 		list_add(&cfile->flist, &cinode->openFileList);
731 	else
732 		list_add_tail(&cfile->flist, &cinode->openFileList);
733 	spin_unlock(&cinode->open_file_lock);
734 	spin_unlock(&tcon->open_file_lock);
735 
736 	if (fid->purge_cache)
737 		cifs_zap_mapping(inode);
738 
739 	file->private_data = cfile;
740 	return cfile;
741 }
742 
743 struct cifsFileInfo *
cifsFileInfo_get(struct cifsFileInfo * cifs_file)744 cifsFileInfo_get(struct cifsFileInfo *cifs_file)
745 {
746 	spin_lock(&cifs_file->file_info_lock);
747 	cifsFileInfo_get_locked(cifs_file);
748 	spin_unlock(&cifs_file->file_info_lock);
749 	return cifs_file;
750 }
751 
cifsFileInfo_put_final(struct cifsFileInfo * cifs_file)752 static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file)
753 {
754 	struct inode *inode = d_inode(cifs_file->dentry);
755 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
756 	struct cifsLockInfo *li, *tmp;
757 	struct super_block *sb = inode->i_sb;
758 
759 	/*
760 	 * Delete any outstanding lock records. We'll lose them when the file
761 	 * is closed anyway.
762 	 */
763 	cifs_down_write(&cifsi->lock_sem);
764 	list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
765 		list_del(&li->llist);
766 		cifs_del_lock_waiters(li);
767 		kfree(li);
768 	}
769 	list_del(&cifs_file->llist->llist);
770 	kfree(cifs_file->llist);
771 	up_write(&cifsi->lock_sem);
772 
773 	cifs_put_tlink(cifs_file->tlink);
774 	dput(cifs_file->dentry);
775 	cifs_sb_deactive(sb);
776 	kfree(cifs_file->symlink_target);
777 	kfree(cifs_file);
778 }
779 
cifsFileInfo_put_work(struct work_struct * work)780 static void cifsFileInfo_put_work(struct work_struct *work)
781 {
782 	struct cifsFileInfo *cifs_file = container_of(work,
783 			struct cifsFileInfo, put);
784 
785 	cifsFileInfo_put_final(cifs_file);
786 }
787 
serverclose_work(struct work_struct * work)788 void serverclose_work(struct work_struct *work)
789 {
790 	struct cifsFileInfo *cifs_file = container_of(work,
791 			struct cifsFileInfo, serverclose);
792 
793 	struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
794 
795 	struct TCP_Server_Info *server = tcon->ses->server;
796 	int rc = 0;
797 	int retries = 0;
798 	int MAX_RETRIES = 4;
799 
800 	do {
801 		if (server->ops->close_getattr)
802 			rc = server->ops->close_getattr(0, tcon, cifs_file);
803 		else if (server->ops->close)
804 			rc = server->ops->close(0, tcon, &cifs_file->fid);
805 
806 		if (rc == -EBUSY || rc == -EAGAIN) {
807 			retries++;
808 			msleep(250);
809 		}
810 	} while ((rc == -EBUSY || rc == -EAGAIN) && (retries < MAX_RETRIES)
811 	);
812 
813 	if (retries == MAX_RETRIES)
814 		pr_warn("Serverclose failed %d times, giving up\n", MAX_RETRIES);
815 
816 	if (cifs_file->offload)
817 		queue_work(fileinfo_put_wq, &cifs_file->put);
818 	else
819 		cifsFileInfo_put_final(cifs_file);
820 }
821 
822 /**
823  * cifsFileInfo_put - release a reference of file priv data
824  *
825  * Always potentially wait for oplock handler. See _cifsFileInfo_put().
826  *
827  * @cifs_file:	cifs/smb3 specific info (eg refcounts) for an open file
828  */
cifsFileInfo_put(struct cifsFileInfo * cifs_file)829 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
830 {
831 	_cifsFileInfo_put(cifs_file, true, true);
832 }
833 
834 /**
835  * _cifsFileInfo_put - release a reference of file priv data
836  *
837  * This may involve closing the filehandle @cifs_file out on the
838  * server. Must be called without holding tcon->open_file_lock,
839  * cinode->open_file_lock and cifs_file->file_info_lock.
840  *
841  * If @wait_for_oplock_handler is true and we are releasing the last
842  * reference, wait for any running oplock break handler of the file
843  * and cancel any pending one.
844  *
845  * @cifs_file:	cifs/smb3 specific info (eg refcounts) for an open file
846  * @wait_oplock_handler: must be false if called from oplock_break_handler
847  * @offload:	not offloaded on close and oplock breaks
848  *
849  */
_cifsFileInfo_put(struct cifsFileInfo * cifs_file,bool wait_oplock_handler,bool offload)850 void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
851 		       bool wait_oplock_handler, bool offload)
852 {
853 	struct inode *inode = d_inode(cifs_file->dentry);
854 	struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
855 	struct TCP_Server_Info *server = tcon->ses->server;
856 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
857 	struct super_block *sb = inode->i_sb;
858 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
859 	struct cifs_fid fid = {};
860 	struct cifs_pending_open open;
861 	bool oplock_break_cancelled;
862 	bool serverclose_offloaded = false;
863 
864 	spin_lock(&tcon->open_file_lock);
865 	spin_lock(&cifsi->open_file_lock);
866 	spin_lock(&cifs_file->file_info_lock);
867 
868 	cifs_file->offload = offload;
869 	if (--cifs_file->count > 0) {
870 		spin_unlock(&cifs_file->file_info_lock);
871 		spin_unlock(&cifsi->open_file_lock);
872 		spin_unlock(&tcon->open_file_lock);
873 		return;
874 	}
875 	spin_unlock(&cifs_file->file_info_lock);
876 
877 	if (server->ops->get_lease_key)
878 		server->ops->get_lease_key(inode, &fid);
879 
880 	/* store open in pending opens to make sure we don't miss lease break */
881 	cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
882 
883 	/* remove it from the lists */
884 	list_del(&cifs_file->flist);
885 	list_del(&cifs_file->tlist);
886 	atomic_dec(&tcon->num_local_opens);
887 
888 	if (list_empty(&cifsi->openFileList)) {
889 		cifs_dbg(FYI, "closing last open instance for inode %p\n",
890 			 d_inode(cifs_file->dentry));
891 		/*
892 		 * In strict cache mode we need invalidate mapping on the last
893 		 * close  because it may cause a error when we open this file
894 		 * again and get at least level II oplock.
895 		 */
896 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
897 			set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
898 		cifs_set_oplock_level(cifsi, 0);
899 	}
900 
901 	spin_unlock(&cifsi->open_file_lock);
902 	spin_unlock(&tcon->open_file_lock);
903 
904 	oplock_break_cancelled = wait_oplock_handler ?
905 		cancel_work_sync(&cifs_file->oplock_break) : false;
906 
907 	if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
908 		struct TCP_Server_Info *server = tcon->ses->server;
909 		unsigned int xid;
910 		int rc = 0;
911 
912 		xid = get_xid();
913 		if (server->ops->close_getattr)
914 			rc = server->ops->close_getattr(xid, tcon, cifs_file);
915 		else if (server->ops->close)
916 			rc = server->ops->close(xid, tcon, &cifs_file->fid);
917 		_free_xid(xid);
918 
919 		if (rc == -EBUSY || rc == -EAGAIN) {
920 			// Server close failed, hence offloading it as an async op
921 			queue_work(serverclose_wq, &cifs_file->serverclose);
922 			serverclose_offloaded = true;
923 		}
924 	}
925 
926 	if (oplock_break_cancelled)
927 		cifs_done_oplock_break(cifsi);
928 
929 	cifs_del_pending_open(&open);
930 
931 	// if serverclose has been offloaded to wq (on failure), it will
932 	// handle offloading put as well. If serverclose not offloaded,
933 	// we need to handle offloading put here.
934 	if (!serverclose_offloaded) {
935 		if (offload)
936 			queue_work(fileinfo_put_wq, &cifs_file->put);
937 		else
938 			cifsFileInfo_put_final(cifs_file);
939 	}
940 }
941 
cifs_open(struct inode * inode,struct file * file)942 int cifs_open(struct inode *inode, struct file *file)
943 
944 {
945 	int rc = -EACCES;
946 	unsigned int xid;
947 	__u32 oplock;
948 	struct cifs_sb_info *cifs_sb;
949 	struct TCP_Server_Info *server;
950 	struct cifs_tcon *tcon;
951 	struct tcon_link *tlink;
952 	struct cifsFileInfo *cfile = NULL;
953 	void *page;
954 	const char *full_path;
955 	bool posix_open_ok = false;
956 	struct cifs_fid fid = {};
957 	struct cifs_pending_open open;
958 	struct cifs_open_info_data data = {};
959 
960 	xid = get_xid();
961 
962 	cifs_sb = CIFS_SB(inode->i_sb);
963 	if (unlikely(cifs_forced_shutdown(cifs_sb))) {
964 		free_xid(xid);
965 		return -EIO;
966 	}
967 
968 	tlink = cifs_sb_tlink(cifs_sb);
969 	if (IS_ERR(tlink)) {
970 		free_xid(xid);
971 		return PTR_ERR(tlink);
972 	}
973 	tcon = tlink_tcon(tlink);
974 	server = tcon->ses->server;
975 
976 	page = alloc_dentry_path();
977 	full_path = build_path_from_dentry(file_dentry(file), page);
978 	if (IS_ERR(full_path)) {
979 		rc = PTR_ERR(full_path);
980 		goto out;
981 	}
982 
983 	cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
984 		 inode, file->f_flags, full_path);
985 
986 	if (file->f_flags & O_DIRECT &&
987 	    cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
988 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
989 			file->f_op = &cifs_file_direct_nobrl_ops;
990 		else
991 			file->f_op = &cifs_file_direct_ops;
992 	}
993 
994 	/* Get the cached handle as SMB2 close is deferred */
995 	if (OPEN_FMODE(file->f_flags) & FMODE_WRITE) {
996 		rc = cifs_get_writable_path(tcon, full_path, FIND_WR_FSUID_ONLY, &cfile);
997 	} else {
998 		rc = cifs_get_readable_path(tcon, full_path, &cfile);
999 	}
1000 	if (rc == 0) {
1001 		if (file->f_flags == cfile->f_flags) {
1002 			file->private_data = cfile;
1003 			spin_lock(&CIFS_I(inode)->deferred_lock);
1004 			cifs_del_deferred_close(cfile);
1005 			spin_unlock(&CIFS_I(inode)->deferred_lock);
1006 			goto use_cache;
1007 		} else {
1008 			_cifsFileInfo_put(cfile, true, false);
1009 		}
1010 	} else {
1011 		/* hard link on the defeered close file */
1012 		rc = cifs_get_hardlink_path(tcon, inode, file);
1013 		if (rc)
1014 			cifs_close_deferred_file(CIFS_I(inode));
1015 	}
1016 
1017 	if (server->oplocks)
1018 		oplock = REQ_OPLOCK;
1019 	else
1020 		oplock = 0;
1021 
1022 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1023 	if (!tcon->broken_posix_open && tcon->unix_ext &&
1024 	    cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1025 				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
1026 		/* can not refresh inode info since size could be stale */
1027 		rc = cifs_posix_open(full_path, &inode, inode->i_sb,
1028 				cifs_sb->ctx->file_mode /* ignored */,
1029 				file->f_flags, &oplock, &fid.netfid, xid);
1030 		if (rc == 0) {
1031 			cifs_dbg(FYI, "posix open succeeded\n");
1032 			posix_open_ok = true;
1033 		} else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
1034 			if (tcon->ses->serverNOS)
1035 				cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
1036 					 tcon->ses->ip_addr,
1037 					 tcon->ses->serverNOS);
1038 			tcon->broken_posix_open = true;
1039 		} else if ((rc != -EIO) && (rc != -EREMOTE) &&
1040 			 (rc != -EOPNOTSUPP)) /* path not found or net err */
1041 			goto out;
1042 		/*
1043 		 * Else fallthrough to retry open the old way on network i/o
1044 		 * or DFS errors.
1045 		 */
1046 	}
1047 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1048 
1049 	if (server->ops->get_lease_key)
1050 		server->ops->get_lease_key(inode, &fid);
1051 
1052 	cifs_add_pending_open(&fid, tlink, &open);
1053 
1054 	if (!posix_open_ok) {
1055 		if (server->ops->get_lease_key)
1056 			server->ops->get_lease_key(inode, &fid);
1057 
1058 		rc = cifs_nt_open(full_path, inode, cifs_sb, tcon, file->f_flags, &oplock, &fid,
1059 				  xid, &data);
1060 		if (rc) {
1061 			cifs_del_pending_open(&open);
1062 			goto out;
1063 		}
1064 	}
1065 
1066 	cfile = cifs_new_fileinfo(&fid, file, tlink, oplock, data.symlink_target);
1067 	if (cfile == NULL) {
1068 		if (server->ops->close)
1069 			server->ops->close(xid, tcon, &fid);
1070 		cifs_del_pending_open(&open);
1071 		rc = -ENOMEM;
1072 		goto out;
1073 	}
1074 
1075 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1076 	if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
1077 		/*
1078 		 * Time to set mode which we can not set earlier due to
1079 		 * problems creating new read-only files.
1080 		 */
1081 		struct cifs_unix_set_info_args args = {
1082 			.mode	= inode->i_mode,
1083 			.uid	= INVALID_UID, /* no change */
1084 			.gid	= INVALID_GID, /* no change */
1085 			.ctime	= NO_CHANGE_64,
1086 			.atime	= NO_CHANGE_64,
1087 			.mtime	= NO_CHANGE_64,
1088 			.device	= 0,
1089 		};
1090 		CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
1091 				       cfile->pid);
1092 	}
1093 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1094 
1095 use_cache:
1096 	fscache_use_cookie(cifs_inode_cookie(file_inode(file)),
1097 			   file->f_mode & FMODE_WRITE);
1098 	if (!(file->f_flags & O_DIRECT))
1099 		goto out;
1100 	if ((file->f_flags & (O_ACCMODE | O_APPEND)) == O_RDONLY)
1101 		goto out;
1102 	cifs_invalidate_cache(file_inode(file), FSCACHE_INVAL_DIO_WRITE);
1103 
1104 out:
1105 	free_dentry_path(page);
1106 	free_xid(xid);
1107 	cifs_put_tlink(tlink);
1108 	cifs_free_open_info(&data);
1109 	return rc;
1110 }
1111 
1112 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1113 static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
1114 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1115 
1116 /*
1117  * Try to reacquire byte range locks that were released when session
1118  * to server was lost.
1119  */
1120 static int
cifs_relock_file(struct cifsFileInfo * cfile)1121 cifs_relock_file(struct cifsFileInfo *cfile)
1122 {
1123 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1124 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1125 	int rc = 0;
1126 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1127 	struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1128 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1129 
1130 	down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
1131 	if (cinode->can_cache_brlcks) {
1132 		/* can cache locks - no need to relock */
1133 		up_read(&cinode->lock_sem);
1134 		return rc;
1135 	}
1136 
1137 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1138 	if (cap_unix(tcon->ses) &&
1139 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1140 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1141 		rc = cifs_push_posix_locks(cfile);
1142 	else
1143 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1144 		rc = tcon->ses->server->ops->push_mand_locks(cfile);
1145 
1146 	up_read(&cinode->lock_sem);
1147 	return rc;
1148 }
1149 
1150 static int
cifs_reopen_file(struct cifsFileInfo * cfile,bool can_flush)1151 cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
1152 {
1153 	int rc = -EACCES;
1154 	unsigned int xid;
1155 	__u32 oplock;
1156 	struct cifs_sb_info *cifs_sb;
1157 	struct cifs_tcon *tcon;
1158 	struct TCP_Server_Info *server;
1159 	struct cifsInodeInfo *cinode;
1160 	struct inode *inode;
1161 	void *page;
1162 	const char *full_path;
1163 	int desired_access;
1164 	int disposition = FILE_OPEN;
1165 	int create_options = CREATE_NOT_DIR;
1166 	struct cifs_open_parms oparms;
1167 	int rdwr_for_fscache = 0;
1168 
1169 	xid = get_xid();
1170 	mutex_lock(&cfile->fh_mutex);
1171 	if (!cfile->invalidHandle) {
1172 		mutex_unlock(&cfile->fh_mutex);
1173 		free_xid(xid);
1174 		return 0;
1175 	}
1176 
1177 	inode = d_inode(cfile->dentry);
1178 	cifs_sb = CIFS_SB(inode->i_sb);
1179 	tcon = tlink_tcon(cfile->tlink);
1180 	server = tcon->ses->server;
1181 
1182 	/*
1183 	 * Can not grab rename sem here because various ops, including those
1184 	 * that already have the rename sem can end up causing writepage to get
1185 	 * called and if the server was down that means we end up here, and we
1186 	 * can never tell if the caller already has the rename_sem.
1187 	 */
1188 	page = alloc_dentry_path();
1189 	full_path = build_path_from_dentry(cfile->dentry, page);
1190 	if (IS_ERR(full_path)) {
1191 		mutex_unlock(&cfile->fh_mutex);
1192 		free_dentry_path(page);
1193 		free_xid(xid);
1194 		return PTR_ERR(full_path);
1195 	}
1196 
1197 	cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
1198 		 inode, cfile->f_flags, full_path);
1199 
1200 	if (tcon->ses->server->oplocks)
1201 		oplock = REQ_OPLOCK;
1202 	else
1203 		oplock = 0;
1204 
1205 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1206 	if (tcon->unix_ext && cap_unix(tcon->ses) &&
1207 	    (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1208 				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
1209 		/*
1210 		 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
1211 		 * original open. Must mask them off for a reopen.
1212 		 */
1213 		unsigned int oflags = cfile->f_flags &
1214 						~(O_CREAT | O_EXCL | O_TRUNC);
1215 
1216 		rc = cifs_posix_open(full_path, NULL, inode->i_sb,
1217 				     cifs_sb->ctx->file_mode /* ignored */,
1218 				     oflags, &oplock, &cfile->fid.netfid, xid);
1219 		if (rc == 0) {
1220 			cifs_dbg(FYI, "posix reopen succeeded\n");
1221 			oparms.reconnect = true;
1222 			goto reopen_success;
1223 		}
1224 		/*
1225 		 * fallthrough to retry open the old way on errors, especially
1226 		 * in the reconnect path it is important to retry hard
1227 		 */
1228 	}
1229 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1230 
1231 	/* If we're caching, we need to be able to fill in around partial writes. */
1232 	if (cifs_fscache_enabled(inode) && (cfile->f_flags & O_ACCMODE) == O_WRONLY)
1233 		rdwr_for_fscache = 1;
1234 
1235 	desired_access = cifs_convert_flags(cfile->f_flags, rdwr_for_fscache);
1236 
1237 	/* O_SYNC also has bit for O_DSYNC so following check picks up either */
1238 	if (cfile->f_flags & O_SYNC)
1239 		create_options |= CREATE_WRITE_THROUGH;
1240 
1241 	if (cfile->f_flags & O_DIRECT)
1242 		create_options |= CREATE_NO_BUFFER;
1243 
1244 	if (server->ops->get_lease_key)
1245 		server->ops->get_lease_key(inode, &cfile->fid);
1246 
1247 retry_open:
1248 	oparms = (struct cifs_open_parms) {
1249 		.tcon = tcon,
1250 		.cifs_sb = cifs_sb,
1251 		.desired_access = desired_access,
1252 		.create_options = cifs_create_options(cifs_sb, create_options),
1253 		.disposition = disposition,
1254 		.path = full_path,
1255 		.fid = &cfile->fid,
1256 		.reconnect = true,
1257 	};
1258 
1259 	/*
1260 	 * Can not refresh inode by passing in file_info buf to be returned by
1261 	 * ops->open and then calling get_inode_info with returned buf since
1262 	 * file might have write behind data that needs to be flushed and server
1263 	 * version of file size can be stale. If we knew for sure that inode was
1264 	 * not dirty locally we could do this.
1265 	 */
1266 	rc = server->ops->open(xid, &oparms, &oplock, NULL);
1267 	if (rc == -ENOENT && oparms.reconnect == false) {
1268 		/* durable handle timeout is expired - open the file again */
1269 		rc = server->ops->open(xid, &oparms, &oplock, NULL);
1270 		/* indicate that we need to relock the file */
1271 		oparms.reconnect = true;
1272 	}
1273 	if (rc == -EACCES && rdwr_for_fscache == 1) {
1274 		desired_access = cifs_convert_flags(cfile->f_flags, 0);
1275 		rdwr_for_fscache = 2;
1276 		goto retry_open;
1277 	}
1278 
1279 	if (rc) {
1280 		mutex_unlock(&cfile->fh_mutex);
1281 		cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
1282 		cifs_dbg(FYI, "oplock: %d\n", oplock);
1283 		goto reopen_error_exit;
1284 	}
1285 
1286 	if (rdwr_for_fscache == 2)
1287 		cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
1288 
1289 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1290 reopen_success:
1291 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1292 	cfile->invalidHandle = false;
1293 	mutex_unlock(&cfile->fh_mutex);
1294 	cinode = CIFS_I(inode);
1295 
1296 	if (can_flush) {
1297 		rc = filemap_write_and_wait(inode->i_mapping);
1298 		if (!is_interrupt_error(rc))
1299 			mapping_set_error(inode->i_mapping, rc);
1300 
1301 		if (tcon->posix_extensions) {
1302 			rc = smb311_posix_get_inode_info(&inode, full_path,
1303 							 NULL, inode->i_sb, xid);
1304 		} else if (tcon->unix_ext) {
1305 			rc = cifs_get_inode_info_unix(&inode, full_path,
1306 						      inode->i_sb, xid);
1307 		} else {
1308 			rc = cifs_get_inode_info(&inode, full_path, NULL,
1309 						 inode->i_sb, xid, NULL);
1310 		}
1311 	}
1312 	/*
1313 	 * Else we are writing out data to server already and could deadlock if
1314 	 * we tried to flush data, and since we do not know if we have data that
1315 	 * would invalidate the current end of file on the server we can not go
1316 	 * to the server to get the new inode info.
1317 	 */
1318 
1319 	/*
1320 	 * If the server returned a read oplock and we have mandatory brlocks,
1321 	 * set oplock level to None.
1322 	 */
1323 	if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
1324 		cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
1325 		oplock = 0;
1326 	}
1327 
1328 	server->ops->set_fid(cfile, &cfile->fid, oplock);
1329 	if (oparms.reconnect)
1330 		cifs_relock_file(cfile);
1331 
1332 reopen_error_exit:
1333 	free_dentry_path(page);
1334 	free_xid(xid);
1335 	return rc;
1336 }
1337 
smb2_deferred_work_close(struct work_struct * work)1338 void smb2_deferred_work_close(struct work_struct *work)
1339 {
1340 	struct cifsFileInfo *cfile = container_of(work,
1341 			struct cifsFileInfo, deferred.work);
1342 
1343 	spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1344 	cifs_del_deferred_close(cfile);
1345 	cfile->deferred_close_scheduled = false;
1346 	spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1347 	_cifsFileInfo_put(cfile, true, false);
1348 }
1349 
1350 static bool
smb2_can_defer_close(struct inode * inode,struct cifs_deferred_close * dclose)1351 smb2_can_defer_close(struct inode *inode, struct cifs_deferred_close *dclose)
1352 {
1353 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1354 	struct cifsInodeInfo *cinode = CIFS_I(inode);
1355 
1356 	return (cifs_sb->ctx->closetimeo && cinode->lease_granted && dclose &&
1357 			(cinode->oplock == CIFS_CACHE_RHW_FLG ||
1358 			 cinode->oplock == CIFS_CACHE_RH_FLG) &&
1359 			!test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags));
1360 
1361 }
1362 
cifs_close(struct inode * inode,struct file * file)1363 int cifs_close(struct inode *inode, struct file *file)
1364 {
1365 	struct cifsFileInfo *cfile;
1366 	struct cifsInodeInfo *cinode = CIFS_I(inode);
1367 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1368 	struct cifs_deferred_close *dclose;
1369 
1370 	cifs_fscache_unuse_inode_cookie(inode, file->f_mode & FMODE_WRITE);
1371 
1372 	if (file->private_data != NULL) {
1373 		cfile = file->private_data;
1374 		file->private_data = NULL;
1375 		dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL);
1376 		if ((cfile->status_file_deleted == false) &&
1377 		    (smb2_can_defer_close(inode, dclose))) {
1378 			if (test_and_clear_bit(NETFS_ICTX_MODIFIED_ATTR, &cinode->netfs.flags)) {
1379 				inode_set_mtime_to_ts(inode,
1380 						      inode_set_ctime_current(inode));
1381 			}
1382 			spin_lock(&cinode->deferred_lock);
1383 			cifs_add_deferred_close(cfile, dclose);
1384 			if (cfile->deferred_close_scheduled &&
1385 			    delayed_work_pending(&cfile->deferred)) {
1386 				/*
1387 				 * If there is no pending work, mod_delayed_work queues new work.
1388 				 * So, Increase the ref count to avoid use-after-free.
1389 				 */
1390 				if (!mod_delayed_work(deferredclose_wq,
1391 						&cfile->deferred, cifs_sb->ctx->closetimeo))
1392 					cifsFileInfo_get(cfile);
1393 			} else {
1394 				/* Deferred close for files */
1395 				queue_delayed_work(deferredclose_wq,
1396 						&cfile->deferred, cifs_sb->ctx->closetimeo);
1397 				cfile->deferred_close_scheduled = true;
1398 				spin_unlock(&cinode->deferred_lock);
1399 				return 0;
1400 			}
1401 			spin_unlock(&cinode->deferred_lock);
1402 			_cifsFileInfo_put(cfile, true, false);
1403 		} else {
1404 			_cifsFileInfo_put(cfile, true, false);
1405 			kfree(dclose);
1406 		}
1407 	}
1408 
1409 	/* return code from the ->release op is always ignored */
1410 	return 0;
1411 }
1412 
1413 void
cifs_reopen_persistent_handles(struct cifs_tcon * tcon)1414 cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
1415 {
1416 	struct cifsFileInfo *open_file, *tmp;
1417 	LIST_HEAD(tmp_list);
1418 
1419 	if (!tcon->use_persistent || !tcon->need_reopen_files)
1420 		return;
1421 
1422 	tcon->need_reopen_files = false;
1423 
1424 	cifs_dbg(FYI, "Reopen persistent handles\n");
1425 
1426 	/* list all files open on tree connection, reopen resilient handles  */
1427 	spin_lock(&tcon->open_file_lock);
1428 	list_for_each_entry(open_file, &tcon->openFileList, tlist) {
1429 		if (!open_file->invalidHandle)
1430 			continue;
1431 		cifsFileInfo_get(open_file);
1432 		list_add_tail(&open_file->rlist, &tmp_list);
1433 	}
1434 	spin_unlock(&tcon->open_file_lock);
1435 
1436 	list_for_each_entry_safe(open_file, tmp, &tmp_list, rlist) {
1437 		if (cifs_reopen_file(open_file, false /* do not flush */))
1438 			tcon->need_reopen_files = true;
1439 		list_del_init(&open_file->rlist);
1440 		cifsFileInfo_put(open_file);
1441 	}
1442 }
1443 
cifs_closedir(struct inode * inode,struct file * file)1444 int cifs_closedir(struct inode *inode, struct file *file)
1445 {
1446 	int rc = 0;
1447 	unsigned int xid;
1448 	struct cifsFileInfo *cfile = file->private_data;
1449 	struct cifs_tcon *tcon;
1450 	struct TCP_Server_Info *server;
1451 	char *buf;
1452 
1453 	cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
1454 
1455 	if (cfile == NULL)
1456 		return rc;
1457 
1458 	xid = get_xid();
1459 	tcon = tlink_tcon(cfile->tlink);
1460 	server = tcon->ses->server;
1461 
1462 	cifs_dbg(FYI, "Freeing private data in close dir\n");
1463 	spin_lock(&cfile->file_info_lock);
1464 	if (server->ops->dir_needs_close(cfile)) {
1465 		cfile->invalidHandle = true;
1466 		spin_unlock(&cfile->file_info_lock);
1467 		if (server->ops->close_dir)
1468 			rc = server->ops->close_dir(xid, tcon, &cfile->fid);
1469 		else
1470 			rc = -ENOSYS;
1471 		cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
1472 		/* not much we can do if it fails anyway, ignore rc */
1473 		rc = 0;
1474 	} else
1475 		spin_unlock(&cfile->file_info_lock);
1476 
1477 	buf = cfile->srch_inf.ntwrk_buf_start;
1478 	if (buf) {
1479 		cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
1480 		cfile->srch_inf.ntwrk_buf_start = NULL;
1481 		if (cfile->srch_inf.smallBuf)
1482 			cifs_small_buf_release(buf);
1483 		else
1484 			cifs_buf_release(buf);
1485 	}
1486 
1487 	cifs_put_tlink(cfile->tlink);
1488 	kfree(file->private_data);
1489 	file->private_data = NULL;
1490 	/* BB can we lock the filestruct while this is going on? */
1491 	free_xid(xid);
1492 	return rc;
1493 }
1494 
1495 static struct cifsLockInfo *
cifs_lock_init(__u64 offset,__u64 length,__u8 type,__u16 flags)1496 cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags)
1497 {
1498 	struct cifsLockInfo *lock =
1499 		kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
1500 	if (!lock)
1501 		return lock;
1502 	lock->offset = offset;
1503 	lock->length = length;
1504 	lock->type = type;
1505 	lock->pid = current->tgid;
1506 	lock->flags = flags;
1507 	INIT_LIST_HEAD(&lock->blist);
1508 	init_waitqueue_head(&lock->block_q);
1509 	return lock;
1510 }
1511 
1512 void
cifs_del_lock_waiters(struct cifsLockInfo * lock)1513 cifs_del_lock_waiters(struct cifsLockInfo *lock)
1514 {
1515 	struct cifsLockInfo *li, *tmp;
1516 	list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
1517 		list_del_init(&li->blist);
1518 		wake_up(&li->block_q);
1519 	}
1520 }
1521 
1522 #define CIFS_LOCK_OP	0
1523 #define CIFS_READ_OP	1
1524 #define CIFS_WRITE_OP	2
1525 
1526 /* @rw_check : 0 - no op, 1 - read, 2 - write */
1527 static bool
cifs_find_fid_lock_conflict(struct cifs_fid_locks * fdlocks,__u64 offset,__u64 length,__u8 type,__u16 flags,struct cifsFileInfo * cfile,struct cifsLockInfo ** conf_lock,int rw_check)1528 cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
1529 			    __u64 length, __u8 type, __u16 flags,
1530 			    struct cifsFileInfo *cfile,
1531 			    struct cifsLockInfo **conf_lock, int rw_check)
1532 {
1533 	struct cifsLockInfo *li;
1534 	struct cifsFileInfo *cur_cfile = fdlocks->cfile;
1535 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1536 
1537 	list_for_each_entry(li, &fdlocks->locks, llist) {
1538 		if (offset + length <= li->offset ||
1539 		    offset >= li->offset + li->length)
1540 			continue;
1541 		if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
1542 		    server->ops->compare_fids(cfile, cur_cfile)) {
1543 			/* shared lock prevents write op through the same fid */
1544 			if (!(li->type & server->vals->shared_lock_type) ||
1545 			    rw_check != CIFS_WRITE_OP)
1546 				continue;
1547 		}
1548 		if ((type & server->vals->shared_lock_type) &&
1549 		    ((server->ops->compare_fids(cfile, cur_cfile) &&
1550 		     current->tgid == li->pid) || type == li->type))
1551 			continue;
1552 		if (rw_check == CIFS_LOCK_OP &&
1553 		    (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) &&
1554 		    server->ops->compare_fids(cfile, cur_cfile))
1555 			continue;
1556 		if (conf_lock)
1557 			*conf_lock = li;
1558 		return true;
1559 	}
1560 	return false;
1561 }
1562 
1563 bool
cifs_find_lock_conflict(struct cifsFileInfo * cfile,__u64 offset,__u64 length,__u8 type,__u16 flags,struct cifsLockInfo ** conf_lock,int rw_check)1564 cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1565 			__u8 type, __u16 flags,
1566 			struct cifsLockInfo **conf_lock, int rw_check)
1567 {
1568 	bool rc = false;
1569 	struct cifs_fid_locks *cur;
1570 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1571 
1572 	list_for_each_entry(cur, &cinode->llist, llist) {
1573 		rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
1574 						 flags, cfile, conf_lock,
1575 						 rw_check);
1576 		if (rc)
1577 			break;
1578 	}
1579 
1580 	return rc;
1581 }
1582 
1583 /*
1584  * Check if there is another lock that prevents us to set the lock (mandatory
1585  * style). If such a lock exists, update the flock structure with its
1586  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1587  * or leave it the same if we can't. Returns 0 if we don't need to request to
1588  * the server or 1 otherwise.
1589  */
1590 static int
cifs_lock_test(struct cifsFileInfo * cfile,__u64 offset,__u64 length,__u8 type,struct file_lock * flock)1591 cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1592 	       __u8 type, struct file_lock *flock)
1593 {
1594 	int rc = 0;
1595 	struct cifsLockInfo *conf_lock;
1596 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1597 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1598 	bool exist;
1599 
1600 	down_read(&cinode->lock_sem);
1601 
1602 	exist = cifs_find_lock_conflict(cfile, offset, length, type,
1603 					flock->c.flc_flags, &conf_lock,
1604 					CIFS_LOCK_OP);
1605 	if (exist) {
1606 		flock->fl_start = conf_lock->offset;
1607 		flock->fl_end = conf_lock->offset + conf_lock->length - 1;
1608 		flock->c.flc_pid = conf_lock->pid;
1609 		if (conf_lock->type & server->vals->shared_lock_type)
1610 			flock->c.flc_type = F_RDLCK;
1611 		else
1612 			flock->c.flc_type = F_WRLCK;
1613 	} else if (!cinode->can_cache_brlcks)
1614 		rc = 1;
1615 	else
1616 		flock->c.flc_type = F_UNLCK;
1617 
1618 	up_read(&cinode->lock_sem);
1619 	return rc;
1620 }
1621 
1622 static void
cifs_lock_add(struct cifsFileInfo * cfile,struct cifsLockInfo * lock)1623 cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
1624 {
1625 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1626 	cifs_down_write(&cinode->lock_sem);
1627 	list_add_tail(&lock->llist, &cfile->llist->locks);
1628 	up_write(&cinode->lock_sem);
1629 }
1630 
1631 /*
1632  * Set the byte-range lock (mandatory style). Returns:
1633  * 1) 0, if we set the lock and don't need to request to the server;
1634  * 2) 1, if no locks prevent us but we need to request to the server;
1635  * 3) -EACCES, if there is a lock that prevents us and wait is false.
1636  */
1637 static int
cifs_lock_add_if(struct cifsFileInfo * cfile,struct cifsLockInfo * lock,bool wait)1638 cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
1639 		 bool wait)
1640 {
1641 	struct cifsLockInfo *conf_lock;
1642 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1643 	bool exist;
1644 	int rc = 0;
1645 
1646 try_again:
1647 	exist = false;
1648 	cifs_down_write(&cinode->lock_sem);
1649 
1650 	exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
1651 					lock->type, lock->flags, &conf_lock,
1652 					CIFS_LOCK_OP);
1653 	if (!exist && cinode->can_cache_brlcks) {
1654 		list_add_tail(&lock->llist, &cfile->llist->locks);
1655 		up_write(&cinode->lock_sem);
1656 		return rc;
1657 	}
1658 
1659 	if (!exist)
1660 		rc = 1;
1661 	else if (!wait)
1662 		rc = -EACCES;
1663 	else {
1664 		list_add_tail(&lock->blist, &conf_lock->blist);
1665 		up_write(&cinode->lock_sem);
1666 		rc = wait_event_interruptible(lock->block_q,
1667 					(lock->blist.prev == &lock->blist) &&
1668 					(lock->blist.next == &lock->blist));
1669 		if (!rc)
1670 			goto try_again;
1671 		cifs_down_write(&cinode->lock_sem);
1672 		list_del_init(&lock->blist);
1673 	}
1674 
1675 	up_write(&cinode->lock_sem);
1676 	return rc;
1677 }
1678 
1679 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1680 /*
1681  * Check if there is another lock that prevents us to set the lock (posix
1682  * style). If such a lock exists, update the flock structure with its
1683  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1684  * or leave it the same if we can't. Returns 0 if we don't need to request to
1685  * the server or 1 otherwise.
1686  */
1687 static int
cifs_posix_lock_test(struct file * file,struct file_lock * flock)1688 cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1689 {
1690 	int rc = 0;
1691 	struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1692 	unsigned char saved_type = flock->c.flc_type;
1693 
1694 	if ((flock->c.flc_flags & FL_POSIX) == 0)
1695 		return 1;
1696 
1697 	down_read(&cinode->lock_sem);
1698 	posix_test_lock(file, flock);
1699 
1700 	if (lock_is_unlock(flock) && !cinode->can_cache_brlcks) {
1701 		flock->c.flc_type = saved_type;
1702 		rc = 1;
1703 	}
1704 
1705 	up_read(&cinode->lock_sem);
1706 	return rc;
1707 }
1708 
1709 /*
1710  * Set the byte-range lock (posix style). Returns:
1711  * 1) <0, if the error occurs while setting the lock;
1712  * 2) 0, if we set the lock and don't need to request to the server;
1713  * 3) FILE_LOCK_DEFERRED, if we will wait for some other file_lock;
1714  * 4) FILE_LOCK_DEFERRED + 1, if we need to request to the server.
1715  */
1716 static int
cifs_posix_lock_set(struct file * file,struct file_lock * flock)1717 cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1718 {
1719 	struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1720 	int rc = FILE_LOCK_DEFERRED + 1;
1721 
1722 	if ((flock->c.flc_flags & FL_POSIX) == 0)
1723 		return rc;
1724 
1725 	cifs_down_write(&cinode->lock_sem);
1726 	if (!cinode->can_cache_brlcks) {
1727 		up_write(&cinode->lock_sem);
1728 		return rc;
1729 	}
1730 
1731 	rc = posix_lock_file(file, flock, NULL);
1732 	up_write(&cinode->lock_sem);
1733 	return rc;
1734 }
1735 
1736 int
cifs_push_mandatory_locks(struct cifsFileInfo * cfile)1737 cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
1738 {
1739 	unsigned int xid;
1740 	int rc = 0, stored_rc;
1741 	struct cifsLockInfo *li, *tmp;
1742 	struct cifs_tcon *tcon;
1743 	unsigned int num, max_num, max_buf;
1744 	LOCKING_ANDX_RANGE *buf, *cur;
1745 	static const int types[] = {
1746 		LOCKING_ANDX_LARGE_FILES,
1747 		LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1748 	};
1749 	int i;
1750 
1751 	xid = get_xid();
1752 	tcon = tlink_tcon(cfile->tlink);
1753 
1754 	/*
1755 	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1756 	 * and check it before using.
1757 	 */
1758 	max_buf = tcon->ses->server->maxBuf;
1759 	if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
1760 		free_xid(xid);
1761 		return -EINVAL;
1762 	}
1763 
1764 	BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1765 		     PAGE_SIZE);
1766 	max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1767 			PAGE_SIZE);
1768 	max_num = (max_buf - sizeof(struct smb_hdr)) /
1769 						sizeof(LOCKING_ANDX_RANGE);
1770 	buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1771 	if (!buf) {
1772 		free_xid(xid);
1773 		return -ENOMEM;
1774 	}
1775 
1776 	for (i = 0; i < 2; i++) {
1777 		cur = buf;
1778 		num = 0;
1779 		list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
1780 			if (li->type != types[i])
1781 				continue;
1782 			cur->Pid = cpu_to_le16(li->pid);
1783 			cur->LengthLow = cpu_to_le32((u32)li->length);
1784 			cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1785 			cur->OffsetLow = cpu_to_le32((u32)li->offset);
1786 			cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1787 			if (++num == max_num) {
1788 				stored_rc = cifs_lockv(xid, tcon,
1789 						       cfile->fid.netfid,
1790 						       (__u8)li->type, 0, num,
1791 						       buf);
1792 				if (stored_rc)
1793 					rc = stored_rc;
1794 				cur = buf;
1795 				num = 0;
1796 			} else
1797 				cur++;
1798 		}
1799 
1800 		if (num) {
1801 			stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
1802 					       (__u8)types[i], 0, num, buf);
1803 			if (stored_rc)
1804 				rc = stored_rc;
1805 		}
1806 	}
1807 
1808 	kfree(buf);
1809 	free_xid(xid);
1810 	return rc;
1811 }
1812 
1813 static __u32
hash_lockowner(fl_owner_t owner)1814 hash_lockowner(fl_owner_t owner)
1815 {
1816 	return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1817 }
1818 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1819 
1820 struct lock_to_push {
1821 	struct list_head llist;
1822 	__u64 offset;
1823 	__u64 length;
1824 	__u32 pid;
1825 	__u16 netfid;
1826 	__u8 type;
1827 };
1828 
1829 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1830 static int
cifs_push_posix_locks(struct cifsFileInfo * cfile)1831 cifs_push_posix_locks(struct cifsFileInfo *cfile)
1832 {
1833 	struct inode *inode = d_inode(cfile->dentry);
1834 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1835 	struct file_lock *flock;
1836 	struct file_lock_context *flctx = locks_inode_context(inode);
1837 	unsigned int count = 0, i;
1838 	int rc = 0, xid, type;
1839 	struct list_head locks_to_send, *el;
1840 	struct lock_to_push *lck, *tmp;
1841 	__u64 length;
1842 
1843 	xid = get_xid();
1844 
1845 	if (!flctx)
1846 		goto out;
1847 
1848 	spin_lock(&flctx->flc_lock);
1849 	list_for_each(el, &flctx->flc_posix) {
1850 		count++;
1851 	}
1852 	spin_unlock(&flctx->flc_lock);
1853 
1854 	INIT_LIST_HEAD(&locks_to_send);
1855 
1856 	/*
1857 	 * Allocating count locks is enough because no FL_POSIX locks can be
1858 	 * added to the list while we are holding cinode->lock_sem that
1859 	 * protects locking operations of this inode.
1860 	 */
1861 	for (i = 0; i < count; i++) {
1862 		lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1863 		if (!lck) {
1864 			rc = -ENOMEM;
1865 			goto err_out;
1866 		}
1867 		list_add_tail(&lck->llist, &locks_to_send);
1868 	}
1869 
1870 	el = locks_to_send.next;
1871 	spin_lock(&flctx->flc_lock);
1872 	for_each_file_lock(flock, &flctx->flc_posix) {
1873 		unsigned char ftype = flock->c.flc_type;
1874 
1875 		if (el == &locks_to_send) {
1876 			/*
1877 			 * The list ended. We don't have enough allocated
1878 			 * structures - something is really wrong.
1879 			 */
1880 			cifs_dbg(VFS, "Can't push all brlocks!\n");
1881 			break;
1882 		}
1883 		length = cifs_flock_len(flock);
1884 		if (ftype == F_RDLCK || ftype == F_SHLCK)
1885 			type = CIFS_RDLCK;
1886 		else
1887 			type = CIFS_WRLCK;
1888 		lck = list_entry(el, struct lock_to_push, llist);
1889 		lck->pid = hash_lockowner(flock->c.flc_owner);
1890 		lck->netfid = cfile->fid.netfid;
1891 		lck->length = length;
1892 		lck->type = type;
1893 		lck->offset = flock->fl_start;
1894 	}
1895 	spin_unlock(&flctx->flc_lock);
1896 
1897 	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1898 		int stored_rc;
1899 
1900 		stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
1901 					     lck->offset, lck->length, NULL,
1902 					     lck->type, 0);
1903 		if (stored_rc)
1904 			rc = stored_rc;
1905 		list_del(&lck->llist);
1906 		kfree(lck);
1907 	}
1908 
1909 out:
1910 	free_xid(xid);
1911 	return rc;
1912 err_out:
1913 	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1914 		list_del(&lck->llist);
1915 		kfree(lck);
1916 	}
1917 	goto out;
1918 }
1919 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1920 
1921 static int
cifs_push_locks(struct cifsFileInfo * cfile)1922 cifs_push_locks(struct cifsFileInfo *cfile)
1923 {
1924 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1925 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1926 	int rc = 0;
1927 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1928 	struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1929 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1930 
1931 	/* we are going to update can_cache_brlcks here - need a write access */
1932 	cifs_down_write(&cinode->lock_sem);
1933 	if (!cinode->can_cache_brlcks) {
1934 		up_write(&cinode->lock_sem);
1935 		return rc;
1936 	}
1937 
1938 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1939 	if (cap_unix(tcon->ses) &&
1940 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1941 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1942 		rc = cifs_push_posix_locks(cfile);
1943 	else
1944 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1945 		rc = tcon->ses->server->ops->push_mand_locks(cfile);
1946 
1947 	cinode->can_cache_brlcks = false;
1948 	up_write(&cinode->lock_sem);
1949 	return rc;
1950 }
1951 
1952 static void
cifs_read_flock(struct file_lock * flock,__u32 * type,int * lock,int * unlock,bool * wait_flag,struct TCP_Server_Info * server)1953 cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
1954 		bool *wait_flag, struct TCP_Server_Info *server)
1955 {
1956 	if (flock->c.flc_flags & FL_POSIX)
1957 		cifs_dbg(FYI, "Posix\n");
1958 	if (flock->c.flc_flags & FL_FLOCK)
1959 		cifs_dbg(FYI, "Flock\n");
1960 	if (flock->c.flc_flags & FL_SLEEP) {
1961 		cifs_dbg(FYI, "Blocking lock\n");
1962 		*wait_flag = true;
1963 	}
1964 	if (flock->c.flc_flags & FL_ACCESS)
1965 		cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
1966 	if (flock->c.flc_flags & FL_LEASE)
1967 		cifs_dbg(FYI, "Lease on file - not implemented yet\n");
1968 	if (flock->c.flc_flags &
1969 	    (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1970 	       FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK)))
1971 		cifs_dbg(FYI, "Unknown lock flags 0x%x\n",
1972 		         flock->c.flc_flags);
1973 
1974 	*type = server->vals->large_lock_type;
1975 	if (lock_is_write(flock)) {
1976 		cifs_dbg(FYI, "F_WRLCK\n");
1977 		*type |= server->vals->exclusive_lock_type;
1978 		*lock = 1;
1979 	} else if (lock_is_unlock(flock)) {
1980 		cifs_dbg(FYI, "F_UNLCK\n");
1981 		*type |= server->vals->unlock_lock_type;
1982 		*unlock = 1;
1983 		/* Check if unlock includes more than one lock range */
1984 	} else if (lock_is_read(flock)) {
1985 		cifs_dbg(FYI, "F_RDLCK\n");
1986 		*type |= server->vals->shared_lock_type;
1987 		*lock = 1;
1988 	} else if (flock->c.flc_type == F_EXLCK) {
1989 		cifs_dbg(FYI, "F_EXLCK\n");
1990 		*type |= server->vals->exclusive_lock_type;
1991 		*lock = 1;
1992 	} else if (flock->c.flc_type == F_SHLCK) {
1993 		cifs_dbg(FYI, "F_SHLCK\n");
1994 		*type |= server->vals->shared_lock_type;
1995 		*lock = 1;
1996 	} else
1997 		cifs_dbg(FYI, "Unknown type of lock\n");
1998 }
1999 
2000 static int
cifs_getlk(struct file * file,struct file_lock * flock,__u32 type,bool wait_flag,bool posix_lck,unsigned int xid)2001 cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
2002 	   bool wait_flag, bool posix_lck, unsigned int xid)
2003 {
2004 	int rc = 0;
2005 	__u64 length = cifs_flock_len(flock);
2006 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2007 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2008 	struct TCP_Server_Info *server = tcon->ses->server;
2009 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2010 	__u16 netfid = cfile->fid.netfid;
2011 
2012 	if (posix_lck) {
2013 		int posix_lock_type;
2014 
2015 		rc = cifs_posix_lock_test(file, flock);
2016 		if (!rc)
2017 			return rc;
2018 
2019 		if (type & server->vals->shared_lock_type)
2020 			posix_lock_type = CIFS_RDLCK;
2021 		else
2022 			posix_lock_type = CIFS_WRLCK;
2023 		rc = CIFSSMBPosixLock(xid, tcon, netfid,
2024 				      hash_lockowner(flock->c.flc_owner),
2025 				      flock->fl_start, length, flock,
2026 				      posix_lock_type, wait_flag);
2027 		return rc;
2028 	}
2029 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2030 
2031 	rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
2032 	if (!rc)
2033 		return rc;
2034 
2035 	/* BB we could chain these into one lock request BB */
2036 	rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
2037 				    1, 0, false);
2038 	if (rc == 0) {
2039 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2040 					    type, 0, 1, false);
2041 		flock->c.flc_type = F_UNLCK;
2042 		if (rc != 0)
2043 			cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2044 				 rc);
2045 		return 0;
2046 	}
2047 
2048 	if (type & server->vals->shared_lock_type) {
2049 		flock->c.flc_type = F_WRLCK;
2050 		return 0;
2051 	}
2052 
2053 	type &= ~server->vals->exclusive_lock_type;
2054 
2055 	rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2056 				    type | server->vals->shared_lock_type,
2057 				    1, 0, false);
2058 	if (rc == 0) {
2059 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2060 			type | server->vals->shared_lock_type, 0, 1, false);
2061 		flock->c.flc_type = F_RDLCK;
2062 		if (rc != 0)
2063 			cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2064 				 rc);
2065 	} else
2066 		flock->c.flc_type = F_WRLCK;
2067 
2068 	return 0;
2069 }
2070 
2071 void
cifs_move_llist(struct list_head * source,struct list_head * dest)2072 cifs_move_llist(struct list_head *source, struct list_head *dest)
2073 {
2074 	struct list_head *li, *tmp;
2075 	list_for_each_safe(li, tmp, source)
2076 		list_move(li, dest);
2077 }
2078 
2079 int
cifs_get_hardlink_path(struct cifs_tcon * tcon,struct inode * inode,struct file * file)2080 cifs_get_hardlink_path(struct cifs_tcon *tcon, struct inode *inode,
2081 				struct file *file)
2082 {
2083 	struct cifsFileInfo *open_file = NULL;
2084 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2085 	int rc = 0;
2086 
2087 	spin_lock(&tcon->open_file_lock);
2088 	spin_lock(&cinode->open_file_lock);
2089 
2090 	list_for_each_entry(open_file, &cinode->openFileList, flist) {
2091 		if (file->f_flags == open_file->f_flags) {
2092 			rc = -EINVAL;
2093 			break;
2094 		}
2095 	}
2096 
2097 	spin_unlock(&cinode->open_file_lock);
2098 	spin_unlock(&tcon->open_file_lock);
2099 	return rc;
2100 }
2101 
2102 void
cifs_free_llist(struct list_head * llist)2103 cifs_free_llist(struct list_head *llist)
2104 {
2105 	struct cifsLockInfo *li, *tmp;
2106 	list_for_each_entry_safe(li, tmp, llist, llist) {
2107 		cifs_del_lock_waiters(li);
2108 		list_del(&li->llist);
2109 		kfree(li);
2110 	}
2111 }
2112 
2113 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2114 int
cifs_unlock_range(struct cifsFileInfo * cfile,struct file_lock * flock,unsigned int xid)2115 cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
2116 		  unsigned int xid)
2117 {
2118 	int rc = 0, stored_rc;
2119 	static const int types[] = {
2120 		LOCKING_ANDX_LARGE_FILES,
2121 		LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
2122 	};
2123 	unsigned int i;
2124 	unsigned int max_num, num, max_buf;
2125 	LOCKING_ANDX_RANGE *buf, *cur;
2126 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2127 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
2128 	struct cifsLockInfo *li, *tmp;
2129 	__u64 length = cifs_flock_len(flock);
2130 	LIST_HEAD(tmp_llist);
2131 
2132 	/*
2133 	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
2134 	 * and check it before using.
2135 	 */
2136 	max_buf = tcon->ses->server->maxBuf;
2137 	if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
2138 		return -EINVAL;
2139 
2140 	BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
2141 		     PAGE_SIZE);
2142 	max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
2143 			PAGE_SIZE);
2144 	max_num = (max_buf - sizeof(struct smb_hdr)) /
2145 						sizeof(LOCKING_ANDX_RANGE);
2146 	buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
2147 	if (!buf)
2148 		return -ENOMEM;
2149 
2150 	cifs_down_write(&cinode->lock_sem);
2151 	for (i = 0; i < 2; i++) {
2152 		cur = buf;
2153 		num = 0;
2154 		list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
2155 			if (flock->fl_start > li->offset ||
2156 			    (flock->fl_start + length) <
2157 			    (li->offset + li->length))
2158 				continue;
2159 			if (current->tgid != li->pid)
2160 				continue;
2161 			if (types[i] != li->type)
2162 				continue;
2163 			if (cinode->can_cache_brlcks) {
2164 				/*
2165 				 * We can cache brlock requests - simply remove
2166 				 * a lock from the file's list.
2167 				 */
2168 				list_del(&li->llist);
2169 				cifs_del_lock_waiters(li);
2170 				kfree(li);
2171 				continue;
2172 			}
2173 			cur->Pid = cpu_to_le16(li->pid);
2174 			cur->LengthLow = cpu_to_le32((u32)li->length);
2175 			cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
2176 			cur->OffsetLow = cpu_to_le32((u32)li->offset);
2177 			cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
2178 			/*
2179 			 * We need to save a lock here to let us add it again to
2180 			 * the file's list if the unlock range request fails on
2181 			 * the server.
2182 			 */
2183 			list_move(&li->llist, &tmp_llist);
2184 			if (++num == max_num) {
2185 				stored_rc = cifs_lockv(xid, tcon,
2186 						       cfile->fid.netfid,
2187 						       li->type, num, 0, buf);
2188 				if (stored_rc) {
2189 					/*
2190 					 * We failed on the unlock range
2191 					 * request - add all locks from the tmp
2192 					 * list to the head of the file's list.
2193 					 */
2194 					cifs_move_llist(&tmp_llist,
2195 							&cfile->llist->locks);
2196 					rc = stored_rc;
2197 				} else
2198 					/*
2199 					 * The unlock range request succeed -
2200 					 * free the tmp list.
2201 					 */
2202 					cifs_free_llist(&tmp_llist);
2203 				cur = buf;
2204 				num = 0;
2205 			} else
2206 				cur++;
2207 		}
2208 		if (num) {
2209 			stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
2210 					       types[i], num, 0, buf);
2211 			if (stored_rc) {
2212 				cifs_move_llist(&tmp_llist,
2213 						&cfile->llist->locks);
2214 				rc = stored_rc;
2215 			} else
2216 				cifs_free_llist(&tmp_llist);
2217 		}
2218 	}
2219 
2220 	up_write(&cinode->lock_sem);
2221 	kfree(buf);
2222 	return rc;
2223 }
2224 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2225 
2226 static int
cifs_setlk(struct file * file,struct file_lock * flock,__u32 type,bool wait_flag,bool posix_lck,int lock,int unlock,unsigned int xid)2227 cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
2228 	   bool wait_flag, bool posix_lck, int lock, int unlock,
2229 	   unsigned int xid)
2230 {
2231 	int rc = 0;
2232 	__u64 length = cifs_flock_len(flock);
2233 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2234 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2235 	struct TCP_Server_Info *server = tcon->ses->server;
2236 	struct inode *inode = d_inode(cfile->dentry);
2237 
2238 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2239 	if (posix_lck) {
2240 		int posix_lock_type;
2241 
2242 		rc = cifs_posix_lock_set(file, flock);
2243 		if (rc <= FILE_LOCK_DEFERRED)
2244 			return rc;
2245 
2246 		if (type & server->vals->shared_lock_type)
2247 			posix_lock_type = CIFS_RDLCK;
2248 		else
2249 			posix_lock_type = CIFS_WRLCK;
2250 
2251 		if (unlock == 1)
2252 			posix_lock_type = CIFS_UNLCK;
2253 
2254 		rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
2255 				      hash_lockowner(flock->c.flc_owner),
2256 				      flock->fl_start, length,
2257 				      NULL, posix_lock_type, wait_flag);
2258 		goto out;
2259 	}
2260 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2261 	if (lock) {
2262 		struct cifsLockInfo *lock;
2263 
2264 		lock = cifs_lock_init(flock->fl_start, length, type,
2265 				      flock->c.flc_flags);
2266 		if (!lock)
2267 			return -ENOMEM;
2268 
2269 		rc = cifs_lock_add_if(cfile, lock, wait_flag);
2270 		if (rc < 0) {
2271 			kfree(lock);
2272 			return rc;
2273 		}
2274 		if (!rc)
2275 			goto out;
2276 
2277 		/*
2278 		 * Windows 7 server can delay breaking lease from read to None
2279 		 * if we set a byte-range lock on a file - break it explicitly
2280 		 * before sending the lock to the server to be sure the next
2281 		 * read won't conflict with non-overlapted locks due to
2282 		 * pagereading.
2283 		 */
2284 		if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
2285 					CIFS_CACHE_READ(CIFS_I(inode))) {
2286 			cifs_zap_mapping(inode);
2287 			cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
2288 				 inode);
2289 			CIFS_I(inode)->oplock = 0;
2290 		}
2291 
2292 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2293 					    type, 1, 0, wait_flag);
2294 		if (rc) {
2295 			kfree(lock);
2296 			return rc;
2297 		}
2298 
2299 		cifs_lock_add(cfile, lock);
2300 	} else if (unlock)
2301 		rc = server->ops->mand_unlock_range(cfile, flock, xid);
2302 
2303 out:
2304 	if ((flock->c.flc_flags & FL_POSIX) || (flock->c.flc_flags & FL_FLOCK)) {
2305 		/*
2306 		 * If this is a request to remove all locks because we
2307 		 * are closing the file, it doesn't matter if the
2308 		 * unlocking failed as both cifs.ko and the SMB server
2309 		 * remove the lock on file close
2310 		 */
2311 		if (rc) {
2312 			cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
2313 			if (!(flock->c.flc_flags & FL_CLOSE))
2314 				return rc;
2315 		}
2316 		rc = locks_lock_file_wait(file, flock);
2317 	}
2318 	return rc;
2319 }
2320 
cifs_flock(struct file * file,int cmd,struct file_lock * fl)2321 int cifs_flock(struct file *file, int cmd, struct file_lock *fl)
2322 {
2323 	int rc, xid;
2324 	int lock = 0, unlock = 0;
2325 	bool wait_flag = false;
2326 	bool posix_lck = false;
2327 	struct cifs_sb_info *cifs_sb;
2328 	struct cifs_tcon *tcon;
2329 	struct cifsFileInfo *cfile;
2330 	__u32 type;
2331 
2332 	xid = get_xid();
2333 
2334 	if (!(fl->c.flc_flags & FL_FLOCK)) {
2335 		rc = -ENOLCK;
2336 		free_xid(xid);
2337 		return rc;
2338 	}
2339 
2340 	cfile = (struct cifsFileInfo *)file->private_data;
2341 	tcon = tlink_tcon(cfile->tlink);
2342 
2343 	cifs_read_flock(fl, &type, &lock, &unlock, &wait_flag,
2344 			tcon->ses->server);
2345 	cifs_sb = CIFS_FILE_SB(file);
2346 
2347 	if (cap_unix(tcon->ses) &&
2348 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2349 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2350 		posix_lck = true;
2351 
2352 	if (!lock && !unlock) {
2353 		/*
2354 		 * if no lock or unlock then nothing to do since we do not
2355 		 * know what it is
2356 		 */
2357 		rc = -EOPNOTSUPP;
2358 		free_xid(xid);
2359 		return rc;
2360 	}
2361 
2362 	rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock,
2363 			xid);
2364 	free_xid(xid);
2365 	return rc;
2366 
2367 
2368 }
2369 
cifs_lock(struct file * file,int cmd,struct file_lock * flock)2370 int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
2371 {
2372 	int rc, xid;
2373 	int lock = 0, unlock = 0;
2374 	bool wait_flag = false;
2375 	bool posix_lck = false;
2376 	struct cifs_sb_info *cifs_sb;
2377 	struct cifs_tcon *tcon;
2378 	struct cifsFileInfo *cfile;
2379 	__u32 type;
2380 
2381 	rc = -EACCES;
2382 	xid = get_xid();
2383 
2384 	cifs_dbg(FYI, "%s: %pD2 cmd=0x%x type=0x%x flags=0x%x r=%lld:%lld\n", __func__, file, cmd,
2385 		 flock->c.flc_flags, flock->c.flc_type,
2386 		 (long long)flock->fl_start,
2387 		 (long long)flock->fl_end);
2388 
2389 	cfile = (struct cifsFileInfo *)file->private_data;
2390 	tcon = tlink_tcon(cfile->tlink);
2391 
2392 	cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
2393 			tcon->ses->server);
2394 	cifs_sb = CIFS_FILE_SB(file);
2395 	set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags);
2396 
2397 	if (cap_unix(tcon->ses) &&
2398 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2399 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2400 		posix_lck = true;
2401 	/*
2402 	 * BB add code here to normalize offset and length to account for
2403 	 * negative length which we can not accept over the wire.
2404 	 */
2405 	if (IS_GETLK(cmd)) {
2406 		rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
2407 		free_xid(xid);
2408 		return rc;
2409 	}
2410 
2411 	if (!lock && !unlock) {
2412 		/*
2413 		 * if no lock or unlock then nothing to do since we do not
2414 		 * know what it is
2415 		 */
2416 		free_xid(xid);
2417 		return -EOPNOTSUPP;
2418 	}
2419 
2420 	rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
2421 			xid);
2422 	free_xid(xid);
2423 	return rc;
2424 }
2425 
cifs_write_subrequest_terminated(struct cifs_io_subrequest * wdata,ssize_t result,bool was_async)2426 void cifs_write_subrequest_terminated(struct cifs_io_subrequest *wdata, ssize_t result,
2427 				      bool was_async)
2428 {
2429 	struct netfs_io_request *wreq = wdata->rreq;
2430 	struct netfs_inode *ictx = netfs_inode(wreq->inode);
2431 	loff_t wrend;
2432 
2433 	if (result > 0) {
2434 		wrend = wdata->subreq.start + wdata->subreq.transferred + result;
2435 
2436 		if (wrend > ictx->zero_point &&
2437 		    (wdata->rreq->origin == NETFS_UNBUFFERED_WRITE ||
2438 		     wdata->rreq->origin == NETFS_DIO_WRITE))
2439 			ictx->zero_point = wrend;
2440 		if (wrend > ictx->remote_i_size)
2441 			netfs_resize_file(ictx, wrend, true);
2442 	}
2443 
2444 	netfs_write_subrequest_terminated(&wdata->subreq, result, was_async);
2445 }
2446 
find_readable_file(struct cifsInodeInfo * cifs_inode,bool fsuid_only)2447 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
2448 					bool fsuid_only)
2449 {
2450 	struct cifsFileInfo *open_file = NULL;
2451 	struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
2452 
2453 	/* only filter by fsuid on multiuser mounts */
2454 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2455 		fsuid_only = false;
2456 
2457 	spin_lock(&cifs_inode->open_file_lock);
2458 	/* we could simply get the first_list_entry since write-only entries
2459 	   are always at the end of the list but since the first entry might
2460 	   have a close pending, we go through the whole list */
2461 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2462 		if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2463 			continue;
2464 		if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
2465 			if ((!open_file->invalidHandle)) {
2466 				/* found a good file */
2467 				/* lock it so it will not be closed on us */
2468 				cifsFileInfo_get(open_file);
2469 				spin_unlock(&cifs_inode->open_file_lock);
2470 				return open_file;
2471 			} /* else might as well continue, and look for
2472 			     another, or simply have the caller reopen it
2473 			     again rather than trying to fix this handle */
2474 		} else /* write only file */
2475 			break; /* write only files are last so must be done */
2476 	}
2477 	spin_unlock(&cifs_inode->open_file_lock);
2478 	return NULL;
2479 }
2480 
2481 /* Return -EBADF if no handle is found and general rc otherwise */
2482 int
cifs_get_writable_file(struct cifsInodeInfo * cifs_inode,int flags,struct cifsFileInfo ** ret_file)2483 cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags,
2484 		       struct cifsFileInfo **ret_file)
2485 {
2486 	struct cifsFileInfo *open_file, *inv_file = NULL;
2487 	struct cifs_sb_info *cifs_sb;
2488 	bool any_available = false;
2489 	int rc = -EBADF;
2490 	unsigned int refind = 0;
2491 	bool fsuid_only = flags & FIND_WR_FSUID_ONLY;
2492 	bool with_delete = flags & FIND_WR_WITH_DELETE;
2493 	*ret_file = NULL;
2494 
2495 	/*
2496 	 * Having a null inode here (because mapping->host was set to zero by
2497 	 * the VFS or MM) should not happen but we had reports of on oops (due
2498 	 * to it being zero) during stress testcases so we need to check for it
2499 	 */
2500 
2501 	if (cifs_inode == NULL) {
2502 		cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
2503 		dump_stack();
2504 		return rc;
2505 	}
2506 
2507 	cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
2508 
2509 	/* only filter by fsuid on multiuser mounts */
2510 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2511 		fsuid_only = false;
2512 
2513 	spin_lock(&cifs_inode->open_file_lock);
2514 refind_writable:
2515 	if (refind > MAX_REOPEN_ATT) {
2516 		spin_unlock(&cifs_inode->open_file_lock);
2517 		return rc;
2518 	}
2519 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2520 		if (!any_available && open_file->pid != current->tgid)
2521 			continue;
2522 		if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2523 			continue;
2524 		if (with_delete && !(open_file->fid.access & DELETE))
2525 			continue;
2526 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
2527 			if (!open_file->invalidHandle) {
2528 				/* found a good writable file */
2529 				cifsFileInfo_get(open_file);
2530 				spin_unlock(&cifs_inode->open_file_lock);
2531 				*ret_file = open_file;
2532 				return 0;
2533 			} else {
2534 				if (!inv_file)
2535 					inv_file = open_file;
2536 			}
2537 		}
2538 	}
2539 	/* couldn't find usable FH with same pid, try any available */
2540 	if (!any_available) {
2541 		any_available = true;
2542 		goto refind_writable;
2543 	}
2544 
2545 	if (inv_file) {
2546 		any_available = false;
2547 		cifsFileInfo_get(inv_file);
2548 	}
2549 
2550 	spin_unlock(&cifs_inode->open_file_lock);
2551 
2552 	if (inv_file) {
2553 		rc = cifs_reopen_file(inv_file, false);
2554 		if (!rc) {
2555 			*ret_file = inv_file;
2556 			return 0;
2557 		}
2558 
2559 		spin_lock(&cifs_inode->open_file_lock);
2560 		list_move_tail(&inv_file->flist, &cifs_inode->openFileList);
2561 		spin_unlock(&cifs_inode->open_file_lock);
2562 		cifsFileInfo_put(inv_file);
2563 		++refind;
2564 		inv_file = NULL;
2565 		spin_lock(&cifs_inode->open_file_lock);
2566 		goto refind_writable;
2567 	}
2568 
2569 	return rc;
2570 }
2571 
2572 struct cifsFileInfo *
find_writable_file(struct cifsInodeInfo * cifs_inode,int flags)2573 find_writable_file(struct cifsInodeInfo *cifs_inode, int flags)
2574 {
2575 	struct cifsFileInfo *cfile;
2576 	int rc;
2577 
2578 	rc = cifs_get_writable_file(cifs_inode, flags, &cfile);
2579 	if (rc)
2580 		cifs_dbg(FYI, "Couldn't find writable handle rc=%d\n", rc);
2581 
2582 	return cfile;
2583 }
2584 
2585 int
cifs_get_writable_path(struct cifs_tcon * tcon,const char * name,int flags,struct cifsFileInfo ** ret_file)2586 cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
2587 		       int flags,
2588 		       struct cifsFileInfo **ret_file)
2589 {
2590 	struct cifsFileInfo *cfile;
2591 	void *page = alloc_dentry_path();
2592 
2593 	*ret_file = NULL;
2594 
2595 	spin_lock(&tcon->open_file_lock);
2596 	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2597 		struct cifsInodeInfo *cinode;
2598 		const char *full_path = build_path_from_dentry(cfile->dentry, page);
2599 		if (IS_ERR(full_path)) {
2600 			spin_unlock(&tcon->open_file_lock);
2601 			free_dentry_path(page);
2602 			return PTR_ERR(full_path);
2603 		}
2604 		if (strcmp(full_path, name))
2605 			continue;
2606 
2607 		cinode = CIFS_I(d_inode(cfile->dentry));
2608 		spin_unlock(&tcon->open_file_lock);
2609 		free_dentry_path(page);
2610 		return cifs_get_writable_file(cinode, flags, ret_file);
2611 	}
2612 
2613 	spin_unlock(&tcon->open_file_lock);
2614 	free_dentry_path(page);
2615 	return -ENOENT;
2616 }
2617 
2618 int
cifs_get_readable_path(struct cifs_tcon * tcon,const char * name,struct cifsFileInfo ** ret_file)2619 cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
2620 		       struct cifsFileInfo **ret_file)
2621 {
2622 	struct cifsFileInfo *cfile;
2623 	void *page = alloc_dentry_path();
2624 
2625 	*ret_file = NULL;
2626 
2627 	spin_lock(&tcon->open_file_lock);
2628 	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2629 		struct cifsInodeInfo *cinode;
2630 		const char *full_path = build_path_from_dentry(cfile->dentry, page);
2631 		if (IS_ERR(full_path)) {
2632 			spin_unlock(&tcon->open_file_lock);
2633 			free_dentry_path(page);
2634 			return PTR_ERR(full_path);
2635 		}
2636 		if (strcmp(full_path, name))
2637 			continue;
2638 
2639 		cinode = CIFS_I(d_inode(cfile->dentry));
2640 		spin_unlock(&tcon->open_file_lock);
2641 		free_dentry_path(page);
2642 		*ret_file = find_readable_file(cinode, 0);
2643 		return *ret_file ? 0 : -ENOENT;
2644 	}
2645 
2646 	spin_unlock(&tcon->open_file_lock);
2647 	free_dentry_path(page);
2648 	return -ENOENT;
2649 }
2650 
2651 /*
2652  * Flush data on a strict file.
2653  */
cifs_strict_fsync(struct file * file,loff_t start,loff_t end,int datasync)2654 int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2655 		      int datasync)
2656 {
2657 	unsigned int xid;
2658 	int rc = 0;
2659 	struct cifs_tcon *tcon;
2660 	struct TCP_Server_Info *server;
2661 	struct cifsFileInfo *smbfile = file->private_data;
2662 	struct inode *inode = file_inode(file);
2663 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2664 
2665 	rc = file_write_and_wait_range(file, start, end);
2666 	if (rc) {
2667 		trace_cifs_fsync_err(inode->i_ino, rc);
2668 		return rc;
2669 	}
2670 
2671 	xid = get_xid();
2672 
2673 	cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2674 		 file, datasync);
2675 
2676 	if (!CIFS_CACHE_READ(CIFS_I(inode))) {
2677 		rc = cifs_zap_mapping(inode);
2678 		if (rc) {
2679 			cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
2680 			rc = 0; /* don't care about it in fsync */
2681 		}
2682 	}
2683 
2684 	tcon = tlink_tcon(smbfile->tlink);
2685 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2686 		server = tcon->ses->server;
2687 		if (server->ops->flush == NULL) {
2688 			rc = -ENOSYS;
2689 			goto strict_fsync_exit;
2690 		}
2691 
2692 		if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2693 			smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
2694 			if (smbfile) {
2695 				rc = server->ops->flush(xid, tcon, &smbfile->fid);
2696 				cifsFileInfo_put(smbfile);
2697 			} else
2698 				cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2699 		} else
2700 			rc = server->ops->flush(xid, tcon, &smbfile->fid);
2701 	}
2702 
2703 strict_fsync_exit:
2704 	free_xid(xid);
2705 	return rc;
2706 }
2707 
2708 /*
2709  * Flush data on a non-strict data.
2710  */
cifs_fsync(struct file * file,loff_t start,loff_t end,int datasync)2711 int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2712 {
2713 	unsigned int xid;
2714 	int rc = 0;
2715 	struct cifs_tcon *tcon;
2716 	struct TCP_Server_Info *server;
2717 	struct cifsFileInfo *smbfile = file->private_data;
2718 	struct inode *inode = file_inode(file);
2719 	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
2720 
2721 	rc = file_write_and_wait_range(file, start, end);
2722 	if (rc) {
2723 		trace_cifs_fsync_err(file_inode(file)->i_ino, rc);
2724 		return rc;
2725 	}
2726 
2727 	xid = get_xid();
2728 
2729 	cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2730 		 file, datasync);
2731 
2732 	tcon = tlink_tcon(smbfile->tlink);
2733 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2734 		server = tcon->ses->server;
2735 		if (server->ops->flush == NULL) {
2736 			rc = -ENOSYS;
2737 			goto fsync_exit;
2738 		}
2739 
2740 		if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2741 			smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
2742 			if (smbfile) {
2743 				rc = server->ops->flush(xid, tcon, &smbfile->fid);
2744 				cifsFileInfo_put(smbfile);
2745 			} else
2746 				cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2747 		} else
2748 			rc = server->ops->flush(xid, tcon, &smbfile->fid);
2749 	}
2750 
2751 fsync_exit:
2752 	free_xid(xid);
2753 	return rc;
2754 }
2755 
2756 /*
2757  * As file closes, flush all cached write data for this inode checking
2758  * for write behind errors.
2759  */
cifs_flush(struct file * file,fl_owner_t id)2760 int cifs_flush(struct file *file, fl_owner_t id)
2761 {
2762 	struct inode *inode = file_inode(file);
2763 	int rc = 0;
2764 
2765 	if (file->f_mode & FMODE_WRITE)
2766 		rc = filemap_write_and_wait(inode->i_mapping);
2767 
2768 	cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
2769 	if (rc) {
2770 		/* get more nuanced writeback errors */
2771 		rc = filemap_check_wb_err(file->f_mapping, 0);
2772 		trace_cifs_flush_err(inode->i_ino, rc);
2773 	}
2774 	return rc;
2775 }
2776 
2777 static ssize_t
cifs_writev(struct kiocb * iocb,struct iov_iter * from)2778 cifs_writev(struct kiocb *iocb, struct iov_iter *from)
2779 {
2780 	struct file *file = iocb->ki_filp;
2781 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2782 	struct inode *inode = file->f_mapping->host;
2783 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2784 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2785 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2786 	ssize_t rc;
2787 
2788 	rc = netfs_start_io_write(inode);
2789 	if (rc < 0)
2790 		return rc;
2791 
2792 	/*
2793 	 * We need to hold the sem to be sure nobody modifies lock list
2794 	 * with a brlock that prevents writing.
2795 	 */
2796 	down_read(&cinode->lock_sem);
2797 
2798 	rc = generic_write_checks(iocb, from);
2799 	if (rc <= 0)
2800 		goto out;
2801 
2802 	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) &&
2803 	    (cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
2804 				     server->vals->exclusive_lock_type, 0,
2805 				     NULL, CIFS_WRITE_OP))) {
2806 		rc = -EACCES;
2807 		goto out;
2808 	}
2809 
2810 	rc = netfs_buffered_write_iter_locked(iocb, from, NULL);
2811 
2812 out:
2813 	up_read(&cinode->lock_sem);
2814 	netfs_end_io_write(inode);
2815 	if (rc > 0)
2816 		rc = generic_write_sync(iocb, rc);
2817 	return rc;
2818 }
2819 
2820 ssize_t
cifs_strict_writev(struct kiocb * iocb,struct iov_iter * from)2821 cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
2822 {
2823 	struct inode *inode = file_inode(iocb->ki_filp);
2824 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2825 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2826 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2827 						iocb->ki_filp->private_data;
2828 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2829 	ssize_t written;
2830 
2831 	written = cifs_get_writer(cinode);
2832 	if (written)
2833 		return written;
2834 
2835 	if (CIFS_CACHE_WRITE(cinode)) {
2836 		if (cap_unix(tcon->ses) &&
2837 		    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2838 		    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
2839 			written = netfs_file_write_iter(iocb, from);
2840 			goto out;
2841 		}
2842 		written = cifs_writev(iocb, from);
2843 		goto out;
2844 	}
2845 	/*
2846 	 * For non-oplocked files in strict cache mode we need to write the data
2847 	 * to the server exactly from the pos to pos+len-1 rather than flush all
2848 	 * affected pages because it may cause a error with mandatory locks on
2849 	 * these pages but not on the region from pos to ppos+len-1.
2850 	 */
2851 	written = netfs_file_write_iter(iocb, from);
2852 	if (CIFS_CACHE_READ(cinode)) {
2853 		/*
2854 		 * We have read level caching and we have just sent a write
2855 		 * request to the server thus making data in the cache stale.
2856 		 * Zap the cache and set oplock/lease level to NONE to avoid
2857 		 * reading stale data from the cache. All subsequent read
2858 		 * operations will read new data from the server.
2859 		 */
2860 		cifs_zap_mapping(inode);
2861 		cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
2862 			 inode);
2863 		cinode->oplock = 0;
2864 	}
2865 out:
2866 	cifs_put_writer(cinode);
2867 	return written;
2868 }
2869 
cifs_loose_read_iter(struct kiocb * iocb,struct iov_iter * iter)2870 ssize_t cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2871 {
2872 	ssize_t rc;
2873 	struct inode *inode = file_inode(iocb->ki_filp);
2874 
2875 	if (iocb->ki_flags & IOCB_DIRECT)
2876 		return netfs_unbuffered_read_iter(iocb, iter);
2877 
2878 	rc = cifs_revalidate_mapping(inode);
2879 	if (rc)
2880 		return rc;
2881 
2882 	return netfs_file_read_iter(iocb, iter);
2883 }
2884 
cifs_file_write_iter(struct kiocb * iocb,struct iov_iter * from)2885 ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2886 {
2887 	struct inode *inode = file_inode(iocb->ki_filp);
2888 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2889 	ssize_t written;
2890 	int rc;
2891 
2892 	if (iocb->ki_filp->f_flags & O_DIRECT) {
2893 		written = netfs_unbuffered_write_iter(iocb, from);
2894 		if (written > 0 && CIFS_CACHE_READ(cinode)) {
2895 			cifs_zap_mapping(inode);
2896 			cifs_dbg(FYI,
2897 				 "Set no oplock for inode=%p after a write operation\n",
2898 				 inode);
2899 			cinode->oplock = 0;
2900 		}
2901 		return written;
2902 	}
2903 
2904 	written = cifs_get_writer(cinode);
2905 	if (written)
2906 		return written;
2907 
2908 	written = netfs_file_write_iter(iocb, from);
2909 
2910 	if (!CIFS_CACHE_WRITE(CIFS_I(inode))) {
2911 		rc = filemap_fdatawrite(inode->i_mapping);
2912 		if (rc)
2913 			cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
2914 				 rc, inode);
2915 	}
2916 
2917 	cifs_put_writer(cinode);
2918 	return written;
2919 }
2920 
2921 ssize_t
cifs_strict_readv(struct kiocb * iocb,struct iov_iter * to)2922 cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
2923 {
2924 	struct inode *inode = file_inode(iocb->ki_filp);
2925 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2926 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2927 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2928 						iocb->ki_filp->private_data;
2929 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2930 	int rc = -EACCES;
2931 
2932 	/*
2933 	 * In strict cache mode we need to read from the server all the time
2934 	 * if we don't have level II oplock because the server can delay mtime
2935 	 * change - so we can't make a decision about inode invalidating.
2936 	 * And we can also fail with pagereading if there are mandatory locks
2937 	 * on pages affected by this read but not on the region from pos to
2938 	 * pos+len-1.
2939 	 */
2940 	if (!CIFS_CACHE_READ(cinode))
2941 		return netfs_unbuffered_read_iter(iocb, to);
2942 
2943 	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0) {
2944 		if (iocb->ki_flags & IOCB_DIRECT)
2945 			return netfs_unbuffered_read_iter(iocb, to);
2946 		return netfs_buffered_read_iter(iocb, to);
2947 	}
2948 
2949 	/*
2950 	 * We need to hold the sem to be sure nobody modifies lock list
2951 	 * with a brlock that prevents reading.
2952 	 */
2953 	if (iocb->ki_flags & IOCB_DIRECT) {
2954 		rc = netfs_start_io_direct(inode);
2955 		if (rc < 0)
2956 			goto out;
2957 		rc = -EACCES;
2958 		down_read(&cinode->lock_sem);
2959 		if (!cifs_find_lock_conflict(
2960 			    cfile, iocb->ki_pos, iov_iter_count(to),
2961 			    tcon->ses->server->vals->shared_lock_type,
2962 			    0, NULL, CIFS_READ_OP))
2963 			rc = netfs_unbuffered_read_iter_locked(iocb, to);
2964 		up_read(&cinode->lock_sem);
2965 		netfs_end_io_direct(inode);
2966 	} else {
2967 		rc = netfs_start_io_read(inode);
2968 		if (rc < 0)
2969 			goto out;
2970 		rc = -EACCES;
2971 		down_read(&cinode->lock_sem);
2972 		if (!cifs_find_lock_conflict(
2973 			    cfile, iocb->ki_pos, iov_iter_count(to),
2974 			    tcon->ses->server->vals->shared_lock_type,
2975 			    0, NULL, CIFS_READ_OP))
2976 			rc = filemap_read(iocb, to, 0);
2977 		up_read(&cinode->lock_sem);
2978 		netfs_end_io_read(inode);
2979 	}
2980 out:
2981 	return rc;
2982 }
2983 
cifs_page_mkwrite(struct vm_fault * vmf)2984 static vm_fault_t cifs_page_mkwrite(struct vm_fault *vmf)
2985 {
2986 	return netfs_page_mkwrite(vmf, NULL);
2987 }
2988 
2989 static const struct vm_operations_struct cifs_file_vm_ops = {
2990 	.fault = filemap_fault,
2991 	.map_pages = filemap_map_pages,
2992 	.page_mkwrite = cifs_page_mkwrite,
2993 };
2994 
cifs_file_strict_mmap(struct file * file,struct vm_area_struct * vma)2995 int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
2996 {
2997 	int xid, rc = 0;
2998 	struct inode *inode = file_inode(file);
2999 
3000 	xid = get_xid();
3001 
3002 	if (!CIFS_CACHE_READ(CIFS_I(inode)))
3003 		rc = cifs_zap_mapping(inode);
3004 	if (!rc)
3005 		rc = generic_file_mmap(file, vma);
3006 	if (!rc)
3007 		vma->vm_ops = &cifs_file_vm_ops;
3008 
3009 	free_xid(xid);
3010 	return rc;
3011 }
3012 
cifs_file_mmap(struct file * file,struct vm_area_struct * vma)3013 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
3014 {
3015 	int rc, xid;
3016 
3017 	xid = get_xid();
3018 
3019 	rc = cifs_revalidate_file(file);
3020 	if (rc)
3021 		cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
3022 			 rc);
3023 	if (!rc)
3024 		rc = generic_file_mmap(file, vma);
3025 	if (!rc)
3026 		vma->vm_ops = &cifs_file_vm_ops;
3027 
3028 	free_xid(xid);
3029 	return rc;
3030 }
3031 
is_inode_writable(struct cifsInodeInfo * cifs_inode)3032 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3033 {
3034 	struct cifsFileInfo *open_file;
3035 
3036 	spin_lock(&cifs_inode->open_file_lock);
3037 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
3038 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
3039 			spin_unlock(&cifs_inode->open_file_lock);
3040 			return 1;
3041 		}
3042 	}
3043 	spin_unlock(&cifs_inode->open_file_lock);
3044 	return 0;
3045 }
3046 
3047 /* We do not want to update the file size from server for inodes
3048    open for write - to avoid races with writepage extending
3049    the file - in the future we could consider allowing
3050    refreshing the inode only on increases in the file size
3051    but this is tricky to do without racing with writebehind
3052    page caching in the current Linux kernel design */
is_size_safe_to_change(struct cifsInodeInfo * cifsInode,__u64 end_of_file,bool from_readdir)3053 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file,
3054 			    bool from_readdir)
3055 {
3056 	if (!cifsInode)
3057 		return true;
3058 
3059 	if (is_inode_writable(cifsInode) ||
3060 		((cifsInode->oplock & CIFS_CACHE_RW_FLG) != 0 && from_readdir)) {
3061 		/* This inode is open for write at least once */
3062 		struct cifs_sb_info *cifs_sb;
3063 
3064 		cifs_sb = CIFS_SB(cifsInode->netfs.inode.i_sb);
3065 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
3066 			/* since no page cache to corrupt on directio
3067 			we can change size safely */
3068 			return true;
3069 		}
3070 
3071 		if (i_size_read(&cifsInode->netfs.inode) < end_of_file)
3072 			return true;
3073 
3074 		return false;
3075 	} else
3076 		return true;
3077 }
3078 
cifs_oplock_break(struct work_struct * work)3079 void cifs_oplock_break(struct work_struct *work)
3080 {
3081 	struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3082 						  oplock_break);
3083 	struct inode *inode = d_inode(cfile->dentry);
3084 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3085 	struct cifsInodeInfo *cinode = CIFS_I(inode);
3086 	struct cifs_tcon *tcon;
3087 	struct TCP_Server_Info *server;
3088 	struct tcon_link *tlink;
3089 	int rc = 0;
3090 	bool purge_cache = false, oplock_break_cancelled;
3091 	__u64 persistent_fid, volatile_fid;
3092 	__u16 net_fid;
3093 
3094 	wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
3095 			TASK_UNINTERRUPTIBLE);
3096 
3097 	tlink = cifs_sb_tlink(cifs_sb);
3098 	if (IS_ERR(tlink))
3099 		goto out;
3100 	tcon = tlink_tcon(tlink);
3101 	server = tcon->ses->server;
3102 
3103 	server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
3104 				      cfile->oplock_epoch, &purge_cache);
3105 
3106 	if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
3107 						cifs_has_mand_locks(cinode)) {
3108 		cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
3109 			 inode);
3110 		cinode->oplock = 0;
3111 	}
3112 
3113 	if (inode && S_ISREG(inode->i_mode)) {
3114 		if (CIFS_CACHE_READ(cinode))
3115 			break_lease(inode, O_RDONLY);
3116 		else
3117 			break_lease(inode, O_WRONLY);
3118 		rc = filemap_fdatawrite(inode->i_mapping);
3119 		if (!CIFS_CACHE_READ(cinode) || purge_cache) {
3120 			rc = filemap_fdatawait(inode->i_mapping);
3121 			mapping_set_error(inode->i_mapping, rc);
3122 			cifs_zap_mapping(inode);
3123 		}
3124 		cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
3125 		if (CIFS_CACHE_WRITE(cinode))
3126 			goto oplock_break_ack;
3127 	}
3128 
3129 	rc = cifs_push_locks(cfile);
3130 	if (rc)
3131 		cifs_dbg(VFS, "Push locks rc = %d\n", rc);
3132 
3133 oplock_break_ack:
3134 	/*
3135 	 * When oplock break is received and there are no active
3136 	 * file handles but cached, then schedule deferred close immediately.
3137 	 * So, new open will not use cached handle.
3138 	 */
3139 
3140 	if (!CIFS_CACHE_HANDLE(cinode) && !list_empty(&cinode->deferred_closes))
3141 		cifs_close_deferred_file(cinode);
3142 
3143 	persistent_fid = cfile->fid.persistent_fid;
3144 	volatile_fid = cfile->fid.volatile_fid;
3145 	net_fid = cfile->fid.netfid;
3146 	oplock_break_cancelled = cfile->oplock_break_cancelled;
3147 
3148 	_cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
3149 	/*
3150 	 * MS-SMB2 3.2.5.19.1 and 3.2.5.19.2 (and MS-CIFS 3.2.5.42) do not require
3151 	 * an acknowledgment to be sent when the file has already been closed.
3152 	 */
3153 	spin_lock(&cinode->open_file_lock);
3154 	/* check list empty since can race with kill_sb calling tree disconnect */
3155 	if (!oplock_break_cancelled && !list_empty(&cinode->openFileList)) {
3156 		spin_unlock(&cinode->open_file_lock);
3157 		rc = server->ops->oplock_response(tcon, persistent_fid,
3158 						  volatile_fid, net_fid, cinode);
3159 		cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
3160 	} else
3161 		spin_unlock(&cinode->open_file_lock);
3162 
3163 	cifs_put_tlink(tlink);
3164 out:
3165 	cifs_done_oplock_break(cinode);
3166 }
3167 
cifs_swap_activate(struct swap_info_struct * sis,struct file * swap_file,sector_t * span)3168 static int cifs_swap_activate(struct swap_info_struct *sis,
3169 			      struct file *swap_file, sector_t *span)
3170 {
3171 	struct cifsFileInfo *cfile = swap_file->private_data;
3172 	struct inode *inode = swap_file->f_mapping->host;
3173 	unsigned long blocks;
3174 	long long isize;
3175 
3176 	cifs_dbg(FYI, "swap activate\n");
3177 
3178 	if (!swap_file->f_mapping->a_ops->swap_rw)
3179 		/* Cannot support swap */
3180 		return -EINVAL;
3181 
3182 	spin_lock(&inode->i_lock);
3183 	blocks = inode->i_blocks;
3184 	isize = inode->i_size;
3185 	spin_unlock(&inode->i_lock);
3186 	if (blocks*512 < isize) {
3187 		pr_warn("swap activate: swapfile has holes\n");
3188 		return -EINVAL;
3189 	}
3190 	*span = sis->pages;
3191 
3192 	pr_warn_once("Swap support over SMB3 is experimental\n");
3193 
3194 	/*
3195 	 * TODO: consider adding ACL (or documenting how) to prevent other
3196 	 * users (on this or other systems) from reading it
3197 	 */
3198 
3199 
3200 	/* TODO: add sk_set_memalloc(inet) or similar */
3201 
3202 	if (cfile)
3203 		cfile->swapfile = true;
3204 	/*
3205 	 * TODO: Since file already open, we can't open with DENY_ALL here
3206 	 * but we could add call to grab a byte range lock to prevent others
3207 	 * from reading or writing the file
3208 	 */
3209 
3210 	sis->flags |= SWP_FS_OPS;
3211 	return add_swap_extent(sis, 0, sis->max, 0);
3212 }
3213 
cifs_swap_deactivate(struct file * file)3214 static void cifs_swap_deactivate(struct file *file)
3215 {
3216 	struct cifsFileInfo *cfile = file->private_data;
3217 
3218 	cifs_dbg(FYI, "swap deactivate\n");
3219 
3220 	/* TODO: undo sk_set_memalloc(inet) will eventually be needed */
3221 
3222 	if (cfile)
3223 		cfile->swapfile = false;
3224 
3225 	/* do we need to unpin (or unlock) the file */
3226 }
3227 
3228 /**
3229  * cifs_swap_rw - SMB3 address space operation for swap I/O
3230  * @iocb: target I/O control block
3231  * @iter: I/O buffer
3232  *
3233  * Perform IO to the swap-file.  This is much like direct IO.
3234  */
cifs_swap_rw(struct kiocb * iocb,struct iov_iter * iter)3235 static int cifs_swap_rw(struct kiocb *iocb, struct iov_iter *iter)
3236 {
3237 	ssize_t ret;
3238 
3239 	if (iov_iter_rw(iter) == READ)
3240 		ret = netfs_unbuffered_read_iter_locked(iocb, iter);
3241 	else
3242 		ret = netfs_unbuffered_write_iter_locked(iocb, iter, NULL);
3243 	if (ret < 0)
3244 		return ret;
3245 	return 0;
3246 }
3247 
3248 const struct address_space_operations cifs_addr_ops = {
3249 	.read_folio	= netfs_read_folio,
3250 	.readahead	= netfs_readahead,
3251 	.writepages	= netfs_writepages,
3252 	.dirty_folio	= netfs_dirty_folio,
3253 	.release_folio	= netfs_release_folio,
3254 	.direct_IO	= noop_direct_IO,
3255 	.invalidate_folio = netfs_invalidate_folio,
3256 	.migrate_folio	= filemap_migrate_folio,
3257 	/*
3258 	 * TODO: investigate and if useful we could add an is_dirty_writeback
3259 	 * helper if needed
3260 	 */
3261 	.swap_activate	= cifs_swap_activate,
3262 	.swap_deactivate = cifs_swap_deactivate,
3263 	.swap_rw = cifs_swap_rw,
3264 };
3265 
3266 /*
3267  * cifs_readahead requires the server to support a buffer large enough to
3268  * contain the header plus one complete page of data.  Otherwise, we need
3269  * to leave cifs_readahead out of the address space operations.
3270  */
3271 const struct address_space_operations cifs_addr_ops_smallbuf = {
3272 	.read_folio	= netfs_read_folio,
3273 	.writepages	= netfs_writepages,
3274 	.dirty_folio	= netfs_dirty_folio,
3275 	.release_folio	= netfs_release_folio,
3276 	.invalidate_folio = netfs_invalidate_folio,
3277 	.migrate_folio	= filemap_migrate_folio,
3278 };
3279