Lines Matching full:files
116 * space if any. This does not copy the file pointers. Called with the files
134 * clear the extra space. Called with the files spinlock held for write.
169 * with slots_wanted > BITS_PER_LONG (embedded instance in files->fdtab in alloc_fdtable()
230 * The files->file_lock should be held on entry, and will be held on exit.
232 static int expand_fdtable(struct files_struct *files, unsigned int nr) in expand_fdtable() argument
233 __releases(files->file_lock) in expand_fdtable()
234 __acquires(files->file_lock) in expand_fdtable()
238 spin_unlock(&files->file_lock); in expand_fdtable()
244 if (atomic_read(&files->count) > 1) in expand_fdtable()
247 spin_lock(&files->file_lock); in expand_fdtable()
250 cur_fdt = files_fdtable(files); in expand_fdtable()
253 rcu_assign_pointer(files->fdt, new_fdt); in expand_fdtable()
254 if (cur_fdt != &files->fdtab) in expand_fdtable()
262 * Expand files.
266 * The files->file_lock should be held on entry, and will be held on exit.
268 static int expand_files(struct files_struct *files, unsigned int nr) in expand_files() argument
269 __releases(files->file_lock) in expand_files()
270 __acquires(files->file_lock) in expand_files()
276 fdt = files_fdtable(files); in expand_files()
282 if (unlikely(files->resize_in_progress)) { in expand_files()
283 spin_unlock(&files->file_lock); in expand_files()
284 wait_event(files->resize_wait, !files->resize_in_progress); in expand_files()
285 spin_lock(&files->file_lock); in expand_files()
294 files->resize_in_progress = true; in expand_files()
295 error = expand_fdtable(files, nr); in expand_files()
296 files->resize_in_progress = false; in expand_files()
298 wake_up_all(&files->resize_wait); in expand_files()
429 * At the same time we know no files will disappear as all other in dup_fd()
454 static struct fdtable *close_files(struct files_struct * files) in close_files() argument
459 * files structure. in close_files()
461 struct fdtable *fdt = rcu_dereference_raw(files->fdt); in close_files()
474 filp_close(file, files); in close_files()
486 void put_files_struct(struct files_struct *files) in put_files_struct() argument
488 if (atomic_dec_and_test(&files->count)) { in put_files_struct()
489 struct fdtable *fdt = close_files(files); in put_files_struct()
492 if (fdt != &files->fdtab) in put_files_struct()
494 kmem_cache_free(files_cachep, files); in put_files_struct()
500 struct files_struct * files = tsk->files; in exit_files() local
502 if (files) { in exit_files()
504 tsk->files = NULL; in exit_files()
506 put_files_struct(files); in exit_files()
552 struct files_struct *files = current->files; in alloc_fd() local
557 spin_lock(&files->file_lock); in alloc_fd()
559 fdt = files_fdtable(files); in alloc_fd()
561 if (fd < files->next_fd) in alloc_fd()
562 fd = files->next_fd; in alloc_fd()
568 * N.B. For clone tasks sharing a files structure, this test in alloc_fd()
569 * will limit the total number of files that can be opened. in alloc_fd()
576 error = expand_files(files, fd); in alloc_fd()
583 if (start <= files->next_fd) in alloc_fd()
584 files->next_fd = fd + 1; in alloc_fd()
590 spin_unlock(&files->file_lock); in alloc_fd()
605 static void __put_unused_fd(struct files_struct *files, unsigned int fd) in __put_unused_fd() argument
607 struct fdtable *fdt = files_fdtable(files); in __put_unused_fd()
609 if (fd < files->next_fd) in __put_unused_fd()
610 files->next_fd = fd; in __put_unused_fd()
615 struct files_struct *files = current->files; in put_unused_fd() local
616 spin_lock(&files->file_lock); in put_unused_fd()
617 __put_unused_fd(files, fd); in put_unused_fd()
618 spin_unlock(&files->file_lock); in put_unused_fd()
626 * The VFS is full of places where we drop the files lock between
641 struct files_struct *files = current->files; in fd_install() local
649 if (unlikely(files->resize_in_progress)) { in fd_install()
651 spin_lock(&files->file_lock); in fd_install()
652 fdt = files_fdtable(files); in fd_install()
655 spin_unlock(&files->file_lock); in fd_install()
660 fdt = rcu_dereference_sched(files->fdt); in fd_install()
670 * @files: file struct to retrieve file from
679 struct file *file_close_fd_locked(struct files_struct *files, unsigned fd) in file_close_fd_locked() argument
681 struct fdtable *fdt = files_fdtable(files); in file_close_fd_locked()
684 lockdep_assert_held(&files->file_lock); in file_close_fd_locked()
693 __put_unused_fd(files, fd); in file_close_fd_locked()
700 struct files_struct *files = current->files; in close_fd() local
703 spin_lock(&files->file_lock); in close_fd()
704 file = file_close_fd_locked(files, fd); in close_fd()
705 spin_unlock(&files->file_lock); in close_fd()
709 return filp_close(file, files); in close_fd()
740 static inline void __range_close(struct files_struct *files, unsigned int fd, in __range_close() argument
746 spin_lock(&files->file_lock); in __range_close()
747 n = last_fd(files_fdtable(files)); in __range_close()
751 file = file_close_fd_locked(files, fd); in __range_close()
753 spin_unlock(&files->file_lock); in __range_close()
754 filp_close(file, files); in __range_close()
756 spin_lock(&files->file_lock); in __range_close()
758 spin_unlock(&files->file_lock); in __range_close()
760 spin_lock(&files->file_lock); in __range_close()
763 spin_unlock(&files->file_lock); in __range_close()
781 struct files_struct *cur_fds = me->files, *fds = NULL; in SYSCALL_DEFINE3()
817 * We're done closing the files we were supposed to. Time to install in SYSCALL_DEFINE3()
821 me->files = cur_fds; in SYSCALL_DEFINE3()
839 struct files_struct *files = current->files; in file_close_fd() local
842 spin_lock(&files->file_lock); in file_close_fd()
843 file = file_close_fd_locked(files, fd); in file_close_fd()
844 spin_unlock(&files->file_lock); in file_close_fd()
849 void do_close_on_exec(struct files_struct *files) in do_close_on_exec() argument
855 spin_lock(&files->file_lock); in do_close_on_exec()
859 fdt = files_fdtable(files); in do_close_on_exec()
874 __put_unused_fd(files, fd); in do_close_on_exec()
875 spin_unlock(&files->file_lock); in do_close_on_exec()
876 filp_close(file, files); in do_close_on_exec()
878 spin_lock(&files->file_lock); in do_close_on_exec()
882 spin_unlock(&files->file_lock); in do_close_on_exec()
976 static inline struct file *__fget_files_rcu(struct files_struct *files, in __fget_files_rcu() argument
981 struct fdtable *fdt = rcu_dereference_raw(files->fdt); in __fget_files_rcu()
1030 unlikely(rcu_dereference_raw(files->fdt) != fdt)) { in __fget_files_rcu()
1052 static struct file *__fget_files(struct files_struct *files, unsigned int fd, in __fget_files() argument
1058 file = __fget_files_rcu(files, fd, mask); in __fget_files()
1066 return __fget_files(current->files, fd, mask); in __fget()
1086 if (task->files) in fget_task()
1087 file = __fget_files(task->files, fd, 0); in fget_task()
1096 struct files_struct *files; in fget_task_next() local
1101 files = task->files; in fget_task_next()
1102 if (files) { in fget_task_next()
1104 for (; fd < files_fdtable(files)->max_fds; fd++) { in fget_task_next()
1105 file = __fget_files_rcu(files, fd, 0); in fget_task_next()
1142 struct files_struct *files = current->files; in __fget_light() local
1154 if (likely(atomic_read_acquire(&files->count) == 1)) { in __fget_light()
1155 file = files_lookup_fd_raw(files, fd); in __fget_light()
1160 file = __fget_files(files, fd, mask); in __fget_light()
1218 struct files_struct *files = current->files; in set_close_on_exec() local
1219 spin_lock(&files->file_lock); in set_close_on_exec()
1220 __set_close_on_exec(fd, files_fdtable(files), flag); in set_close_on_exec()
1221 spin_unlock(&files->file_lock); in set_close_on_exec()
1228 res = close_on_exec(fd, current->files); in get_close_on_exec()
1233 static int do_dup2(struct files_struct *files, in do_dup2() argument
1235 __releases(&files->file_lock) in do_dup2()
1246 fdt = files_fdtable(files); in do_dup2()
1254 spin_unlock(&files->file_lock); in do_dup2()
1257 filp_close(tofree, files); in do_dup2()
1262 spin_unlock(&files->file_lock); in do_dup2()
1269 struct files_struct *files = current->files; in replace_fd() local
1277 spin_lock(&files->file_lock); in replace_fd()
1278 err = expand_files(files, fd); in replace_fd()
1281 return do_dup2(files, file, fd, flags); in replace_fd()
1284 spin_unlock(&files->file_lock); in replace_fd()
1348 struct files_struct *files = current->files; in ksys_dup3() local
1359 spin_lock(&files->file_lock); in ksys_dup3()
1360 err = expand_files(files, newfd); in ksys_dup3()
1361 file = files_lookup_fd_locked(files, oldfd); in ksys_dup3()
1369 return do_dup2(files, file, newfd, flags); in ksys_dup3()
1374 spin_unlock(&files->file_lock); in ksys_dup3()
1386 struct files_struct *files = current->files; in SYSCALL_DEFINE2() local
1391 f = __fget_files_rcu(files, oldfd, 0); in SYSCALL_DEFINE2()
1431 int iterate_fd(struct files_struct *files, unsigned n, in iterate_fd() argument
1437 if (!files) in iterate_fd()
1439 spin_lock(&files->file_lock); in iterate_fd()
1440 for (fdt = files_fdtable(files); n < fdt->max_fds; n++) { in iterate_fd()
1442 file = rcu_dereference_check_fdtable(files, fdt->fd[n]); in iterate_fd()
1449 spin_unlock(&files->file_lock); in iterate_fd()