1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes
7 * Manage the dynamic fd arrays in the process files_struct.
10 #include <linux/syscalls.h>
11 #include <linux/export.h>
13 #include <linux/kernel.h>
15 #include <linux/sched/signal.h>
16 #include <linux/slab.h>
17 #include <linux/file.h>
18 #include <linux/fdtable.h>
19 #include <linux/bitops.h>
20 #include <linux/spinlock.h>
21 #include <linux/rcupdate.h>
22 #include <linux/close_range.h>
24 #include <linux/io_uring.h>
26 unsigned int sysctl_nr_open __read_mostly = 1024*1024;
27 unsigned int sysctl_nr_open_min = BITS_PER_LONG;
28 /* our min() is unusable in constant expressions ;-/ */
29 #define __const_min(x, y) ((x) < (y) ? (x) : (y))
30 unsigned int sysctl_nr_open_max =
31 __const_min(INT_MAX, ~(size_t)0/sizeof(void *)) & -BITS_PER_LONG;
33 static void __free_fdtable(struct fdtable *fdt)
36 kvfree(fdt->open_fds);
40 static void free_fdtable_rcu(struct rcu_head *rcu)
42 __free_fdtable(container_of(rcu, struct fdtable, rcu));
45 #define BITBIT_NR(nr) BITS_TO_LONGS(BITS_TO_LONGS(nr))
46 #define BITBIT_SIZE(nr) (BITBIT_NR(nr) * sizeof(long))
49 * Copy 'count' fd bits from the old table to the new table and clear the extra
50 * space if any. This does not copy the file pointers. Called with the files
51 * spinlock held for write.
53 static void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt,
56 unsigned int cpy, set;
58 cpy = count / BITS_PER_BYTE;
59 set = (nfdt->max_fds - count) / BITS_PER_BYTE;
60 memcpy(nfdt->open_fds, ofdt->open_fds, cpy);
61 memset((char *)nfdt->open_fds + cpy, 0, set);
62 memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy);
63 memset((char *)nfdt->close_on_exec + cpy, 0, set);
65 cpy = BITBIT_SIZE(count);
66 set = BITBIT_SIZE(nfdt->max_fds) - cpy;
67 memcpy(nfdt->full_fds_bits, ofdt->full_fds_bits, cpy);
68 memset((char *)nfdt->full_fds_bits + cpy, 0, set);
72 * Copy all file descriptors from the old table to the new, expanded table and
73 * clear the extra space. Called with the files spinlock held for write.
75 static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
79 BUG_ON(nfdt->max_fds < ofdt->max_fds);
81 cpy = ofdt->max_fds * sizeof(struct file *);
82 set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *);
83 memcpy(nfdt->fd, ofdt->fd, cpy);
84 memset((char *)nfdt->fd + cpy, 0, set);
86 copy_fd_bitmaps(nfdt, ofdt, ofdt->max_fds);
89 static struct fdtable * alloc_fdtable(unsigned int nr)
95 * Figure out how many fds we actually want to support in this fdtable.
96 * Allocation steps are keyed to the size of the fdarray, since it
97 * grows far faster than any of the other dynamic data. We try to fit
98 * the fdarray into comfortable page-tuned chunks: starting at 1024B
99 * and growing in powers of two from there on.
101 nr /= (1024 / sizeof(struct file *));
102 nr = roundup_pow_of_two(nr + 1);
103 nr *= (1024 / sizeof(struct file *));
105 * Note that this can drive nr *below* what we had passed if sysctl_nr_open
106 * had been set lower between the check in expand_files() and here. Deal
107 * with that in caller, it's cheaper that way.
109 * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise
110 * bitmaps handling below becomes unpleasant, to put it mildly...
112 if (unlikely(nr > sysctl_nr_open))
113 nr = ((sysctl_nr_open - 1) | (BITS_PER_LONG - 1)) + 1;
115 fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL_ACCOUNT);
119 data = kvmalloc_array(nr, sizeof(struct file *), GFP_KERNEL_ACCOUNT);
124 data = kvmalloc(max_t(size_t,
125 2 * nr / BITS_PER_BYTE + BITBIT_SIZE(nr), L1_CACHE_BYTES),
129 fdt->open_fds = data;
130 data += nr / BITS_PER_BYTE;
131 fdt->close_on_exec = data;
132 data += nr / BITS_PER_BYTE;
133 fdt->full_fds_bits = data;
146 * Expand the file descriptor table.
147 * This function will allocate a new fdtable and both fd array and fdset, of
149 * Return <0 error code on error; 1 on successful completion.
150 * The files->file_lock should be held on entry, and will be held on exit.
152 static int expand_fdtable(struct files_struct *files, unsigned int nr)
153 __releases(files->file_lock)
154 __acquires(files->file_lock)
156 struct fdtable *new_fdt, *cur_fdt;
158 spin_unlock(&files->file_lock);
159 new_fdt = alloc_fdtable(nr);
161 /* make sure all fd_install() have seen resize_in_progress
162 * or have finished their rcu_read_lock_sched() section.
164 if (atomic_read(&files->count) > 1)
167 spin_lock(&files->file_lock);
171 * extremely unlikely race - sysctl_nr_open decreased between the check in
172 * caller and alloc_fdtable(). Cheaper to catch it here...
174 if (unlikely(new_fdt->max_fds <= nr)) {
175 __free_fdtable(new_fdt);
178 cur_fdt = files_fdtable(files);
179 BUG_ON(nr < cur_fdt->max_fds);
180 copy_fdtable(new_fdt, cur_fdt);
181 rcu_assign_pointer(files->fdt, new_fdt);
182 if (cur_fdt != &files->fdtab)
183 call_rcu(&cur_fdt->rcu, free_fdtable_rcu);
184 /* coupled with smp_rmb() in fd_install() */
191 * This function will expand the file structures, if the requested size exceeds
192 * the current capacity and there is room for expansion.
193 * Return <0 error code on error; 0 when nothing done; 1 when files were
194 * expanded and execution may have blocked.
195 * The files->file_lock should be held on entry, and will be held on exit.
197 static int expand_files(struct files_struct *files, unsigned int nr)
198 __releases(files->file_lock)
199 __acquires(files->file_lock)
205 fdt = files_fdtable(files);
207 /* Do we need to expand? */
208 if (nr < fdt->max_fds)
212 if (nr >= sysctl_nr_open)
215 if (unlikely(files->resize_in_progress)) {
216 spin_unlock(&files->file_lock);
218 wait_event(files->resize_wait, !files->resize_in_progress);
219 spin_lock(&files->file_lock);
223 /* All good, so we try */
224 files->resize_in_progress = true;
225 expanded = expand_fdtable(files, nr);
226 files->resize_in_progress = false;
228 wake_up_all(&files->resize_wait);
232 static inline void __set_close_on_exec(unsigned int fd, struct fdtable *fdt)
234 __set_bit(fd, fdt->close_on_exec);
237 static inline void __clear_close_on_exec(unsigned int fd, struct fdtable *fdt)
239 if (test_bit(fd, fdt->close_on_exec))
240 __clear_bit(fd, fdt->close_on_exec);
243 static inline void __set_open_fd(unsigned int fd, struct fdtable *fdt)
245 __set_bit(fd, fdt->open_fds);
247 if (!~fdt->open_fds[fd])
248 __set_bit(fd, fdt->full_fds_bits);
251 static inline void __clear_open_fd(unsigned int fd, struct fdtable *fdt)
253 __clear_bit(fd, fdt->open_fds);
254 __clear_bit(fd / BITS_PER_LONG, fdt->full_fds_bits);
257 static unsigned int count_open_files(struct fdtable *fdt)
259 unsigned int size = fdt->max_fds;
262 /* Find the last open fd */
263 for (i = size / BITS_PER_LONG; i > 0; ) {
264 if (fdt->open_fds[--i])
267 i = (i + 1) * BITS_PER_LONG;
271 static unsigned int sane_fdtable_size(struct fdtable *fdt, unsigned int max_fds)
275 count = count_open_files(fdt);
276 if (max_fds < NR_OPEN_DEFAULT)
277 max_fds = NR_OPEN_DEFAULT;
278 return min(count, max_fds);
282 * Allocate a new files structure and copy contents from the
283 * passed in files structure.
284 * errorp will be valid only when the returned files_struct is NULL.
286 struct files_struct *dup_fd(struct files_struct *oldf, unsigned int max_fds, int *errorp)
288 struct files_struct *newf;
289 struct file **old_fds, **new_fds;
290 unsigned int open_files, i;
291 struct fdtable *old_fdt, *new_fdt;
294 newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
298 atomic_set(&newf->count, 1);
300 spin_lock_init(&newf->file_lock);
301 newf->resize_in_progress = false;
302 init_waitqueue_head(&newf->resize_wait);
304 new_fdt = &newf->fdtab;
305 new_fdt->max_fds = NR_OPEN_DEFAULT;
306 new_fdt->close_on_exec = newf->close_on_exec_init;
307 new_fdt->open_fds = newf->open_fds_init;
308 new_fdt->full_fds_bits = newf->full_fds_bits_init;
309 new_fdt->fd = &newf->fd_array[0];
311 spin_lock(&oldf->file_lock);
312 old_fdt = files_fdtable(oldf);
313 open_files = sane_fdtable_size(old_fdt, max_fds);
316 * Check whether we need to allocate a larger fd array and fd set.
318 while (unlikely(open_files > new_fdt->max_fds)) {
319 spin_unlock(&oldf->file_lock);
321 if (new_fdt != &newf->fdtab)
322 __free_fdtable(new_fdt);
324 new_fdt = alloc_fdtable(open_files - 1);
330 /* beyond sysctl_nr_open; nothing to do */
331 if (unlikely(new_fdt->max_fds < open_files)) {
332 __free_fdtable(new_fdt);
338 * Reacquire the oldf lock and a pointer to its fd table
339 * who knows it may have a new bigger fd table. We need
340 * the latest pointer.
342 spin_lock(&oldf->file_lock);
343 old_fdt = files_fdtable(oldf);
344 open_files = sane_fdtable_size(old_fdt, max_fds);
347 copy_fd_bitmaps(new_fdt, old_fdt, open_files);
349 old_fds = old_fdt->fd;
350 new_fds = new_fdt->fd;
352 for (i = open_files; i != 0; i--) {
353 struct file *f = *old_fds++;
358 * The fd may be claimed in the fd bitmap but not yet
359 * instantiated in the files array if a sibling thread
360 * is partway through open(). So make sure that this
361 * fd is available to the new process.
363 __clear_open_fd(open_files - i, new_fdt);
365 rcu_assign_pointer(*new_fds++, f);
367 spin_unlock(&oldf->file_lock);
369 /* clear the remainder */
370 memset(new_fds, 0, (new_fdt->max_fds - open_files) * sizeof(struct file *));
372 rcu_assign_pointer(newf->fdt, new_fdt);
377 kmem_cache_free(files_cachep, newf);
382 static struct fdtable *close_files(struct files_struct * files)
385 * It is safe to dereference the fd table without RCU or
386 * ->file_lock because this is the last reference to the
389 struct fdtable *fdt = rcu_dereference_raw(files->fdt);
390 unsigned int i, j = 0;
394 i = j * BITS_PER_LONG;
395 if (i >= fdt->max_fds)
397 set = fdt->open_fds[j++];
400 struct file * file = xchg(&fdt->fd[i], NULL);
402 filp_close(file, files);
414 void put_files_struct(struct files_struct *files)
416 if (atomic_dec_and_test(&files->count)) {
417 struct fdtable *fdt = close_files(files);
419 /* free the arrays if they are not embedded */
420 if (fdt != &files->fdtab)
422 kmem_cache_free(files_cachep, files);
426 void exit_files(struct task_struct *tsk)
428 struct files_struct * files = tsk->files;
431 io_uring_files_cancel(files);
435 put_files_struct(files);
439 struct files_struct init_files = {
440 .count = ATOMIC_INIT(1),
441 .fdt = &init_files.fdtab,
443 .max_fds = NR_OPEN_DEFAULT,
444 .fd = &init_files.fd_array[0],
445 .close_on_exec = init_files.close_on_exec_init,
446 .open_fds = init_files.open_fds_init,
447 .full_fds_bits = init_files.full_fds_bits_init,
449 .file_lock = __SPIN_LOCK_UNLOCKED(init_files.file_lock),
450 .resize_wait = __WAIT_QUEUE_HEAD_INITIALIZER(init_files.resize_wait),
453 static unsigned int find_next_fd(struct fdtable *fdt, unsigned int start)
455 unsigned int maxfd = fdt->max_fds;
456 unsigned int maxbit = maxfd / BITS_PER_LONG;
457 unsigned int bitbit = start / BITS_PER_LONG;
459 bitbit = find_next_zero_bit(fdt->full_fds_bits, maxbit, bitbit) * BITS_PER_LONG;
464 return find_next_zero_bit(fdt->open_fds, maxfd, start);
468 * allocate a file descriptor, mark it busy.
470 static int alloc_fd(unsigned start, unsigned end, unsigned flags)
472 struct files_struct *files = current->files;
477 spin_lock(&files->file_lock);
479 fdt = files_fdtable(files);
481 if (fd < files->next_fd)
484 if (fd < fdt->max_fds)
485 fd = find_next_fd(fdt, fd);
488 * N.B. For clone tasks sharing a files structure, this test
489 * will limit the total number of files that can be opened.
495 error = expand_files(files, fd);
500 * If we needed to expand the fs array we
501 * might have blocked - try again.
506 if (start <= files->next_fd)
507 files->next_fd = fd + 1;
509 __set_open_fd(fd, fdt);
510 if (flags & O_CLOEXEC)
511 __set_close_on_exec(fd, fdt);
513 __clear_close_on_exec(fd, fdt);
517 if (rcu_access_pointer(fdt->fd[fd]) != NULL) {
518 printk(KERN_WARNING "alloc_fd: slot %d not NULL!\n", fd);
519 rcu_assign_pointer(fdt->fd[fd], NULL);
524 spin_unlock(&files->file_lock);
528 int __get_unused_fd_flags(unsigned flags, unsigned long nofile)
530 return alloc_fd(0, nofile, flags);
533 int get_unused_fd_flags(unsigned flags)
535 return __get_unused_fd_flags(flags, rlimit(RLIMIT_NOFILE));
537 EXPORT_SYMBOL(get_unused_fd_flags);
539 static void __put_unused_fd(struct files_struct *files, unsigned int fd)
541 struct fdtable *fdt = files_fdtable(files);
542 __clear_open_fd(fd, fdt);
543 if (fd < files->next_fd)
547 void put_unused_fd(unsigned int fd)
549 struct files_struct *files = current->files;
550 spin_lock(&files->file_lock);
551 __put_unused_fd(files, fd);
552 spin_unlock(&files->file_lock);
555 EXPORT_SYMBOL(put_unused_fd);
558 * Install a file pointer in the fd array.
560 * The VFS is full of places where we drop the files lock between
561 * setting the open_fds bitmap and installing the file in the file
562 * array. At any such point, we are vulnerable to a dup2() race
563 * installing a file in the array before us. We need to detect this and
564 * fput() the struct file we are about to overwrite in this case.
566 * It should never happen - if we allow dup2() do it, _really_ bad things
569 * This consumes the "file" refcount, so callers should treat it
570 * as if they had called fput(file).
573 void fd_install(unsigned int fd, struct file *file)
575 struct files_struct *files = current->files;
578 rcu_read_lock_sched();
580 if (unlikely(files->resize_in_progress)) {
581 rcu_read_unlock_sched();
582 spin_lock(&files->file_lock);
583 fdt = files_fdtable(files);
584 BUG_ON(fdt->fd[fd] != NULL);
585 rcu_assign_pointer(fdt->fd[fd], file);
586 spin_unlock(&files->file_lock);
589 /* coupled with smp_wmb() in expand_fdtable() */
591 fdt = rcu_dereference_sched(files->fdt);
592 BUG_ON(fdt->fd[fd] != NULL);
593 rcu_assign_pointer(fdt->fd[fd], file);
594 rcu_read_unlock_sched();
597 EXPORT_SYMBOL(fd_install);
599 static struct file *pick_file(struct files_struct *files, unsigned fd)
601 struct file *file = NULL;
604 spin_lock(&files->file_lock);
605 fdt = files_fdtable(files);
606 if (fd >= fdt->max_fds)
611 rcu_assign_pointer(fdt->fd[fd], NULL);
612 __put_unused_fd(files, fd);
615 spin_unlock(&files->file_lock);
619 int close_fd(unsigned fd)
621 struct files_struct *files = current->files;
624 file = pick_file(files, fd);
628 return filp_close(file, files);
630 EXPORT_SYMBOL(close_fd); /* for ksys_close() */
632 static inline void __range_cloexec(struct files_struct *cur_fds,
633 unsigned int fd, unsigned int max_fd)
640 spin_lock(&cur_fds->file_lock);
641 fdt = files_fdtable(cur_fds);
642 bitmap_set(fdt->close_on_exec, fd, max_fd - fd + 1);
643 spin_unlock(&cur_fds->file_lock);
646 static inline void __range_close(struct files_struct *cur_fds, unsigned int fd,
649 while (fd <= max_fd) {
652 file = pick_file(cur_fds, fd++);
656 filp_close(file, cur_fds);
662 * __close_range() - Close all file descriptors in a given range.
664 * @fd: starting file descriptor to close
665 * @max_fd: last file descriptor to close
667 * This closes a range of file descriptors. All file descriptors
668 * from @fd up to and including @max_fd are closed.
670 int __close_range(unsigned fd, unsigned max_fd, unsigned int flags)
672 unsigned int cur_max;
673 struct task_struct *me = current;
674 struct files_struct *cur_fds = me->files, *fds = NULL;
676 if (flags & ~(CLOSE_RANGE_UNSHARE | CLOSE_RANGE_CLOEXEC))
683 cur_max = files_fdtable(cur_fds)->max_fds;
686 /* cap to last valid index into fdtable */
689 if (flags & CLOSE_RANGE_UNSHARE) {
691 unsigned int max_unshare_fds = NR_OPEN_MAX;
694 * If the requested range is greater than the current maximum,
695 * we're closing everything so only copy all file descriptors
696 * beneath the lowest file descriptor.
698 if (max_fd >= cur_max)
699 max_unshare_fds = fd;
701 ret = unshare_fd(CLONE_FILES, max_unshare_fds, &fds);
706 * We used to share our file descriptor table, and have now
707 * created a private one, make sure we're using it below.
713 max_fd = min(max_fd, cur_max);
715 if (flags & CLOSE_RANGE_CLOEXEC)
716 __range_cloexec(cur_fds, fd, max_fd);
718 __range_close(cur_fds, fd, max_fd);
722 * We're done closing the files we were supposed to. Time to install
723 * the new file descriptor table and drop the old one.
728 put_files_struct(fds);
735 * variant of close_fd that gets a ref on the file for later fput.
736 * The caller must ensure that filp_close() called on the file, and then
739 int close_fd_get_file(unsigned int fd, struct file **res)
741 struct files_struct *files = current->files;
745 spin_lock(&files->file_lock);
746 fdt = files_fdtable(files);
747 if (fd >= fdt->max_fds)
752 rcu_assign_pointer(fdt->fd[fd], NULL);
753 __put_unused_fd(files, fd);
754 spin_unlock(&files->file_lock);
760 spin_unlock(&files->file_lock);
765 void do_close_on_exec(struct files_struct *files)
770 /* exec unshares first */
771 spin_lock(&files->file_lock);
774 unsigned fd = i * BITS_PER_LONG;
775 fdt = files_fdtable(files);
776 if (fd >= fdt->max_fds)
778 set = fdt->close_on_exec[i];
781 fdt->close_on_exec[i] = 0;
782 for ( ; set ; fd++, set >>= 1) {
789 rcu_assign_pointer(fdt->fd[fd], NULL);
790 __put_unused_fd(files, fd);
791 spin_unlock(&files->file_lock);
792 filp_close(file, files);
794 spin_lock(&files->file_lock);
798 spin_unlock(&files->file_lock);
801 static struct file *__fget_files(struct files_struct *files, unsigned int fd,
802 fmode_t mask, unsigned int refs)
808 file = files_lookup_fd_rcu(files, fd);
810 /* File object ref couldn't be taken.
811 * dup2() atomicity guarantee is the reason
812 * we loop to catch the new file (or NULL pointer)
814 if (file->f_mode & mask)
816 else if (!get_file_rcu_many(file, refs))
824 static inline struct file *__fget(unsigned int fd, fmode_t mask,
827 return __fget_files(current->files, fd, mask, refs);
830 struct file *fget_many(unsigned int fd, unsigned int refs)
832 return __fget(fd, FMODE_PATH, refs);
835 struct file *fget(unsigned int fd)
837 return __fget(fd, FMODE_PATH, 1);
841 struct file *fget_raw(unsigned int fd)
843 return __fget(fd, 0, 1);
845 EXPORT_SYMBOL(fget_raw);
847 struct file *fget_task(struct task_struct *task, unsigned int fd)
849 struct file *file = NULL;
853 file = __fget_files(task->files, fd, 0, 1);
859 struct file *task_lookup_fd_rcu(struct task_struct *task, unsigned int fd)
861 /* Must be called with rcu_read_lock held */
862 struct files_struct *files;
863 struct file *file = NULL;
868 file = files_lookup_fd_rcu(files, fd);
874 struct file *task_lookup_next_fd_rcu(struct task_struct *task, unsigned int *ret_fd)
876 /* Must be called with rcu_read_lock held */
877 struct files_struct *files;
878 unsigned int fd = *ret_fd;
879 struct file *file = NULL;
884 for (; fd < files_fdtable(files)->max_fds; fd++) {
885 file = files_lookup_fd_rcu(files, fd);
896 * Lightweight file lookup - no refcnt increment if fd table isn't shared.
898 * You can use this instead of fget if you satisfy all of the following
900 * 1) You must call fput_light before exiting the syscall and returning control
901 * to userspace (i.e. you cannot remember the returned struct file * after
902 * returning to userspace).
903 * 2) You must not call filp_close on the returned struct file * in between
904 * calls to fget_light and fput_light.
905 * 3) You must not clone the current task in between the calls to fget_light
908 * The fput_needed flag returned by fget_light should be passed to the
909 * corresponding fput_light.
911 static unsigned long __fget_light(unsigned int fd, fmode_t mask)
913 struct files_struct *files = current->files;
916 if (atomic_read(&files->count) == 1) {
917 file = files_lookup_fd_raw(files, fd);
918 if (!file || unlikely(file->f_mode & mask))
920 return (unsigned long)file;
922 file = __fget(fd, mask, 1);
925 return FDPUT_FPUT | (unsigned long)file;
928 unsigned long __fdget(unsigned int fd)
930 return __fget_light(fd, FMODE_PATH);
932 EXPORT_SYMBOL(__fdget);
934 unsigned long __fdget_raw(unsigned int fd)
936 return __fget_light(fd, 0);
939 unsigned long __fdget_pos(unsigned int fd)
941 unsigned long v = __fdget(fd);
942 struct file *file = (struct file *)(v & ~3);
944 if (file && (file->f_mode & FMODE_ATOMIC_POS)) {
945 if (file_count(file) > 1) {
946 v |= FDPUT_POS_UNLOCK;
947 mutex_lock(&file->f_pos_lock);
953 void __f_unlock_pos(struct file *f)
955 mutex_unlock(&f->f_pos_lock);
959 * We only lock f_pos if we have threads or if the file might be
960 * shared with another process. In both cases we'll have an elevated
961 * file count (done either by fdget() or by fork()).
964 void set_close_on_exec(unsigned int fd, int flag)
966 struct files_struct *files = current->files;
968 spin_lock(&files->file_lock);
969 fdt = files_fdtable(files);
971 __set_close_on_exec(fd, fdt);
973 __clear_close_on_exec(fd, fdt);
974 spin_unlock(&files->file_lock);
977 bool get_close_on_exec(unsigned int fd)
979 struct files_struct *files = current->files;
983 fdt = files_fdtable(files);
984 res = close_on_exec(fd, fdt);
989 static int do_dup2(struct files_struct *files,
990 struct file *file, unsigned fd, unsigned flags)
991 __releases(&files->file_lock)
997 * We need to detect attempts to do dup2() over allocated but still
998 * not finished descriptor. NB: OpenBSD avoids that at the price of
999 * extra work in their equivalent of fget() - they insert struct
1000 * file immediately after grabbing descriptor, mark it larval if
1001 * more work (e.g. actual opening) is needed and make sure that
1002 * fget() treats larval files as absent. Potentially interesting,
1003 * but while extra work in fget() is trivial, locking implications
1004 * and amount of surgery on open()-related paths in VFS are not.
1005 * FreeBSD fails with -EBADF in the same situation, NetBSD "solution"
1006 * deadlocks in rather amusing ways, AFAICS. All of that is out of
1007 * scope of POSIX or SUS, since neither considers shared descriptor
1008 * tables and this condition does not arise without those.
1010 fdt = files_fdtable(files);
1011 tofree = fdt->fd[fd];
1012 if (!tofree && fd_is_open(fd, fdt))
1015 rcu_assign_pointer(fdt->fd[fd], file);
1016 __set_open_fd(fd, fdt);
1017 if (flags & O_CLOEXEC)
1018 __set_close_on_exec(fd, fdt);
1020 __clear_close_on_exec(fd, fdt);
1021 spin_unlock(&files->file_lock);
1024 filp_close(tofree, files);
1029 spin_unlock(&files->file_lock);
1033 int replace_fd(unsigned fd, struct file *file, unsigned flags)
1036 struct files_struct *files = current->files;
1039 return close_fd(fd);
1041 if (fd >= rlimit(RLIMIT_NOFILE))
1044 spin_lock(&files->file_lock);
1045 err = expand_files(files, fd);
1046 if (unlikely(err < 0))
1048 return do_dup2(files, file, fd, flags);
1051 spin_unlock(&files->file_lock);
1056 * __receive_fd() - Install received file into file descriptor table
1058 * @fd: fd to install into (if negative, a new fd will be allocated)
1059 * @file: struct file that was received from another process
1060 * @ufd: __user pointer to write new fd number to
1061 * @o_flags: the O_* flags to apply to the new fd entry
1063 * Installs a received file into the file descriptor table, with appropriate
1064 * checks and count updates. Optionally writes the fd number to userspace, if
1067 * This helper handles its own reference counting of the incoming
1070 * Returns newly install fd or -ve on error.
1072 int __receive_fd(int fd, struct file *file, int __user *ufd, unsigned int o_flags)
1077 error = security_file_receive(file);
1082 new_fd = get_unused_fd_flags(o_flags);
1090 error = put_user(new_fd, ufd);
1093 put_unused_fd(new_fd);
1099 fd_install(new_fd, get_file(file));
1101 error = replace_fd(new_fd, file, o_flags);
1106 /* Bump the sock usage counts, if any. */
1107 __receive_sock(file);
1111 static int ksys_dup3(unsigned int oldfd, unsigned int newfd, int flags)
1115 struct files_struct *files = current->files;
1117 if ((flags & ~O_CLOEXEC) != 0)
1120 if (unlikely(oldfd == newfd))
1123 if (newfd >= rlimit(RLIMIT_NOFILE))
1126 spin_lock(&files->file_lock);
1127 err = expand_files(files, newfd);
1128 file = files_lookup_fd_locked(files, oldfd);
1129 if (unlikely(!file))
1131 if (unlikely(err < 0)) {
1136 return do_dup2(files, file, newfd, flags);
1141 spin_unlock(&files->file_lock);
1145 SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
1147 return ksys_dup3(oldfd, newfd, flags);
1150 SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd)
1152 if (unlikely(newfd == oldfd)) { /* corner case */
1153 struct files_struct *files = current->files;
1157 if (!files_lookup_fd_rcu(files, oldfd))
1162 return ksys_dup3(oldfd, newfd, 0);
1165 SYSCALL_DEFINE1(dup, unsigned int, fildes)
1168 struct file *file = fget_raw(fildes);
1171 ret = get_unused_fd_flags(0);
1173 fd_install(ret, file);
1180 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
1182 unsigned long nofile = rlimit(RLIMIT_NOFILE);
1186 err = alloc_fd(from, nofile, flags);
1189 fd_install(err, file);
1194 int iterate_fd(struct files_struct *files, unsigned n,
1195 int (*f)(const void *, struct file *, unsigned),
1198 struct fdtable *fdt;
1202 spin_lock(&files->file_lock);
1203 for (fdt = files_fdtable(files); n < fdt->max_fds; n++) {
1205 file = rcu_dereference_check_fdtable(files, fdt->fd[n]);
1208 res = f(p, file, n);
1212 spin_unlock(&files->file_lock);
1215 EXPORT_SYMBOL(iterate_fd);