4 * Copyright (C) 1991, 1992 Linus Torvalds
7 #include <linux/syscalls.h>
8 #include <linux/init.h>
10 #include <linux/sched/task.h>
12 #include <linux/file.h>
13 #include <linux/fdtable.h>
14 #include <linux/capability.h>
15 #include <linux/dnotify.h>
16 #include <linux/slab.h>
17 #include <linux/module.h>
18 #include <linux/pipe_fs_i.h>
19 #include <linux/security.h>
20 #include <linux/ptrace.h>
21 #include <linux/signal.h>
22 #include <linux/rcupdate.h>
23 #include <linux/pid_namespace.h>
24 #include <linux/user_namespace.h>
25 #include <linux/shmem_fs.h>
26 #include <linux/compat.h>
29 #include <asm/siginfo.h>
30 #include <linux/uaccess.h>
32 #define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT | O_NOATIME)
34 static int setfl(int fd, struct file * filp, unsigned long arg)
36 struct inode * inode = file_inode(filp);
40 * O_APPEND cannot be cleared if the file is marked as append-only
41 * and the file is open for write.
43 if (((arg ^ filp->f_flags) & O_APPEND) && IS_APPEND(inode))
46 /* O_NOATIME can only be set by the owner or superuser */
47 if ((arg & O_NOATIME) && !(filp->f_flags & O_NOATIME))
48 if (!inode_owner_or_capable(inode))
51 /* required for strict SunOS emulation */
52 if (O_NONBLOCK != O_NDELAY)
56 /* Pipe packetized mode is controlled by O_DIRECT flag */
57 if (!S_ISFIFO(inode->i_mode) && (arg & O_DIRECT)) {
58 if (!filp->f_mapping || !filp->f_mapping->a_ops ||
59 !filp->f_mapping->a_ops->direct_IO)
63 if (filp->f_op->check_flags)
64 error = filp->f_op->check_flags(arg);
69 * ->fasync() is responsible for setting the FASYNC bit.
71 if (((arg ^ filp->f_flags) & FASYNC) && filp->f_op->fasync) {
72 error = filp->f_op->fasync(fd, filp, (arg & FASYNC) != 0);
78 spin_lock(&filp->f_lock);
79 filp->f_flags = (arg & SETFL_MASK) | (filp->f_flags & ~SETFL_MASK);
80 spin_unlock(&filp->f_lock);
86 static void f_modown(struct file *filp, struct pid *pid, enum pid_type type,
89 write_lock_irq(&filp->f_owner.lock);
90 if (force || !filp->f_owner.pid) {
91 put_pid(filp->f_owner.pid);
92 filp->f_owner.pid = get_pid(pid);
93 filp->f_owner.pid_type = type;
96 const struct cred *cred = current_cred();
97 filp->f_owner.uid = cred->uid;
98 filp->f_owner.euid = cred->euid;
101 write_unlock_irq(&filp->f_owner.lock);
104 void __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
107 security_file_set_fowner(filp);
108 f_modown(filp, pid, type, force);
110 EXPORT_SYMBOL(__f_setown);
112 int f_setown(struct file *filp, unsigned long arg, int force)
115 struct pid *pid = NULL;
116 int who = arg, ret = 0;
120 /* avoid overflow below */
130 pid = find_vpid(who);
136 __f_setown(filp, pid, type, force);
141 EXPORT_SYMBOL(f_setown);
143 void f_delown(struct file *filp)
145 f_modown(filp, NULL, PIDTYPE_PID, 1);
148 pid_t f_getown(struct file *filp)
151 read_lock(&filp->f_owner.lock);
152 pid = pid_vnr(filp->f_owner.pid);
153 if (filp->f_owner.pid_type == PIDTYPE_PGID)
155 read_unlock(&filp->f_owner.lock);
159 static int f_setown_ex(struct file *filp, unsigned long arg)
161 struct f_owner_ex __user *owner_p = (void __user *)arg;
162 struct f_owner_ex owner;
167 ret = copy_from_user(&owner, owner_p, sizeof(owner));
171 switch (owner.type) {
189 pid = find_vpid(owner.pid);
190 if (owner.pid && !pid)
193 __f_setown(filp, pid, type, 1);
199 static int f_getown_ex(struct file *filp, unsigned long arg)
201 struct f_owner_ex __user *owner_p = (void __user *)arg;
202 struct f_owner_ex owner;
205 read_lock(&filp->f_owner.lock);
206 owner.pid = pid_vnr(filp->f_owner.pid);
207 switch (filp->f_owner.pid_type) {
209 owner.type = F_OWNER_TID;
213 owner.type = F_OWNER_PID;
217 owner.type = F_OWNER_PGRP;
225 read_unlock(&filp->f_owner.lock);
228 ret = copy_to_user(owner_p, &owner, sizeof(owner));
235 #ifdef CONFIG_CHECKPOINT_RESTORE
236 static int f_getowner_uids(struct file *filp, unsigned long arg)
238 struct user_namespace *user_ns = current_user_ns();
239 uid_t __user *dst = (void __user *)arg;
243 read_lock(&filp->f_owner.lock);
244 src[0] = from_kuid(user_ns, filp->f_owner.uid);
245 src[1] = from_kuid(user_ns, filp->f_owner.euid);
246 read_unlock(&filp->f_owner.lock);
248 err = put_user(src[0], &dst[0]);
249 err |= put_user(src[1], &dst[1]);
254 static int f_getowner_uids(struct file *filp, unsigned long arg)
260 static bool rw_hint_valid(enum rw_hint hint)
263 case RWF_WRITE_LIFE_NOT_SET:
264 case RWH_WRITE_LIFE_NONE:
265 case RWH_WRITE_LIFE_SHORT:
266 case RWH_WRITE_LIFE_MEDIUM:
267 case RWH_WRITE_LIFE_LONG:
268 case RWH_WRITE_LIFE_EXTREME:
275 static long fcntl_rw_hint(struct file *file, unsigned int cmd,
278 struct inode *inode = file_inode(file);
279 u64 *argp = (u64 __user *)arg;
284 case F_GET_FILE_RW_HINT:
285 h = file_write_hint(file);
286 if (copy_to_user(argp, &h, sizeof(*argp)))
289 case F_SET_FILE_RW_HINT:
290 if (copy_from_user(&h, argp, sizeof(h)))
292 hint = (enum rw_hint) h;
293 if (!rw_hint_valid(hint))
296 spin_lock(&file->f_lock);
297 file->f_write_hint = hint;
298 spin_unlock(&file->f_lock);
301 h = inode->i_write_hint;
302 if (copy_to_user(argp, &h, sizeof(*argp)))
306 if (copy_from_user(&h, argp, sizeof(h)))
308 hint = (enum rw_hint) h;
309 if (!rw_hint_valid(hint))
313 inode->i_write_hint = hint;
321 static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
324 void __user *argp = (void __user *)arg;
330 err = f_dupfd(arg, filp, 0);
332 case F_DUPFD_CLOEXEC:
333 err = f_dupfd(arg, filp, O_CLOEXEC);
336 err = get_close_on_exec(fd) ? FD_CLOEXEC : 0;
340 set_close_on_exec(fd, arg & FD_CLOEXEC);
346 err = setfl(fd, filp, arg);
348 #if BITS_PER_LONG != 32
349 /* 32-bit arches must use fcntl64() */
353 if (copy_from_user(&flock, argp, sizeof(flock)))
355 err = fcntl_getlk(filp, cmd, &flock);
356 if (!err && copy_to_user(argp, &flock, sizeof(flock)))
359 #if BITS_PER_LONG != 32
360 /* 32-bit arches must use fcntl64() */
367 if (copy_from_user(&flock, argp, sizeof(flock)))
369 err = fcntl_setlk(fd, filp, cmd, &flock);
373 * XXX If f_owner is a process group, the
374 * negative return value will get converted
375 * into an error. Oops. If we keep the
376 * current syscall conventions, the only way
377 * to fix this will be in libc.
379 err = f_getown(filp);
380 force_successful_syscall_return();
383 err = f_setown(filp, arg, 1);
386 err = f_getown_ex(filp, arg);
389 err = f_setown_ex(filp, arg);
391 case F_GETOWNER_UIDS:
392 err = f_getowner_uids(filp, arg);
395 err = filp->f_owner.signum;
398 /* arg == 0 restores default behaviour. */
399 if (!valid_signal(arg)) {
403 filp->f_owner.signum = arg;
406 err = fcntl_getlease(filp);
409 err = fcntl_setlease(fd, filp, arg);
412 err = fcntl_dirnotify(fd, filp, arg);
416 err = pipe_fcntl(filp, cmd, arg);
420 err = shmem_fcntl(filp, cmd, arg);
424 case F_GET_FILE_RW_HINT:
425 case F_SET_FILE_RW_HINT:
426 err = fcntl_rw_hint(filp, cmd, arg);
434 static int check_fcntl_cmd(unsigned cmd)
438 case F_DUPFD_CLOEXEC:
447 SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd, unsigned long, arg)
449 struct fd f = fdget_raw(fd);
455 if (unlikely(f.file->f_mode & FMODE_PATH)) {
456 if (!check_fcntl_cmd(cmd))
460 err = security_file_fcntl(f.file, cmd, arg);
462 err = do_fcntl(fd, cmd, arg, f.file);
470 #if BITS_PER_LONG == 32
471 SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd,
474 void __user *argp = (void __user *)arg;
475 struct fd f = fdget_raw(fd);
476 struct flock64 flock;
482 if (unlikely(f.file->f_mode & FMODE_PATH)) {
483 if (!check_fcntl_cmd(cmd))
487 err = security_file_fcntl(f.file, cmd, arg);
495 if (copy_from_user(&flock, argp, sizeof(flock)))
497 err = fcntl_getlk64(f.file, cmd, &flock);
498 if (!err && copy_to_user(argp, &flock, sizeof(flock)))
506 if (copy_from_user(&flock, argp, sizeof(flock)))
508 err = fcntl_setlk64(fd, f.file, cmd, &flock);
511 err = do_fcntl(fd, cmd, arg, f.file);
522 /* careful - don't use anywhere else */
523 #define copy_flock_fields(dst, src) \
524 (dst)->l_type = (src)->l_type; \
525 (dst)->l_whence = (src)->l_whence; \
526 (dst)->l_start = (src)->l_start; \
527 (dst)->l_len = (src)->l_len; \
528 (dst)->l_pid = (src)->l_pid;
530 static int get_compat_flock(struct flock *kfl, const struct compat_flock __user *ufl)
532 struct compat_flock fl;
534 if (copy_from_user(&fl, ufl, sizeof(struct compat_flock)))
536 copy_flock_fields(kfl, &fl);
540 static int get_compat_flock64(struct flock *kfl, const struct compat_flock64 __user *ufl)
542 struct compat_flock64 fl;
544 if (copy_from_user(&fl, ufl, sizeof(struct compat_flock64)))
546 copy_flock_fields(kfl, &fl);
550 static int put_compat_flock(const struct flock *kfl, struct compat_flock __user *ufl)
552 struct compat_flock fl;
554 memset(&fl, 0, sizeof(struct compat_flock));
555 copy_flock_fields(&fl, kfl);
556 if (copy_to_user(ufl, &fl, sizeof(struct compat_flock)))
561 static int put_compat_flock64(const struct flock *kfl, struct compat_flock64 __user *ufl)
563 struct compat_flock64 fl;
565 memset(&fl, 0, sizeof(struct compat_flock64));
566 copy_flock_fields(&fl, kfl);
567 if (copy_to_user(ufl, &fl, sizeof(struct compat_flock64)))
571 #undef copy_flock_fields
574 convert_fcntl_cmd(unsigned int cmd)
589 * GETLK was successful and we need to return the data, but it needs to fit in
590 * the compat structure.
591 * l_start shouldn't be too big, unless the original start + end is greater than
592 * COMPAT_OFF_T_MAX, in which case the app was asking for trouble, so we return
593 * -EOVERFLOW in that case. l_len could be too big, in which case we just
594 * truncate it, and only allow the app to see that part of the conflicting lock
595 * that might make sense to it anyway
597 static int fixup_compat_flock(struct flock *flock)
599 if (flock->l_start > COMPAT_OFF_T_MAX)
601 if (flock->l_len > COMPAT_OFF_T_MAX)
602 flock->l_len = COMPAT_OFF_T_MAX;
606 COMPAT_SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd,
609 struct fd f = fdget_raw(fd);
616 if (unlikely(f.file->f_mode & FMODE_PATH)) {
617 if (!check_fcntl_cmd(cmd))
621 err = security_file_fcntl(f.file, cmd, arg);
627 err = get_compat_flock(&flock, compat_ptr(arg));
630 err = fcntl_getlk(f.file, convert_fcntl_cmd(cmd), &flock);
633 err = fixup_compat_flock(&flock);
636 err = put_compat_flock(&flock, compat_ptr(arg));
640 err = get_compat_flock64(&flock, compat_ptr(arg));
643 err = fcntl_getlk(f.file, convert_fcntl_cmd(cmd), &flock);
646 err = fixup_compat_flock(&flock);
649 err = put_compat_flock64(&flock, compat_ptr(arg));
653 err = get_compat_flock(&flock, compat_ptr(arg));
656 err = fcntl_setlk(fd, f.file, convert_fcntl_cmd(cmd), &flock);
662 err = get_compat_flock64(&flock, compat_ptr(arg));
665 err = fcntl_setlk(fd, f.file, convert_fcntl_cmd(cmd), &flock);
668 err = do_fcntl(fd, cmd, arg, f.file);
676 COMPAT_SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd,
688 return compat_sys_fcntl64(fd, cmd, arg);
692 /* Table to convert sigio signal codes into poll band bitmaps */
694 static const long band_table[NSIGPOLL] = {
695 POLLIN | POLLRDNORM, /* POLL_IN */
696 POLLOUT | POLLWRNORM | POLLWRBAND, /* POLL_OUT */
697 POLLIN | POLLRDNORM | POLLMSG, /* POLL_MSG */
698 POLLERR, /* POLL_ERR */
699 POLLPRI | POLLRDBAND, /* POLL_PRI */
700 POLLHUP | POLLERR /* POLL_HUP */
703 static inline int sigio_perm(struct task_struct *p,
704 struct fown_struct *fown, int sig)
706 const struct cred *cred;
710 cred = __task_cred(p);
711 ret = ((uid_eq(fown->euid, GLOBAL_ROOT_UID) ||
712 uid_eq(fown->euid, cred->suid) || uid_eq(fown->euid, cred->uid) ||
713 uid_eq(fown->uid, cred->suid) || uid_eq(fown->uid, cred->uid)) &&
714 !security_file_send_sigiotask(p, fown, sig));
719 static void send_sigio_to_task(struct task_struct *p,
720 struct fown_struct *fown,
721 int fd, int reason, int group)
724 * F_SETSIG can change ->signum lockless in parallel, make
725 * sure we read it once and use the same value throughout.
727 int signum = ACCESS_ONCE(fown->signum);
729 if (!sigio_perm(p, fown, signum))
735 /* Queue a rt signal with the appropriate fd as its
736 value. We use SI_SIGIO as the source, not
737 SI_KERNEL, since kernel signals always get
738 delivered even if we can't queue. Failure to
739 queue in this case _should_ be reported; we fall
740 back to SIGIO in that case. --sct */
741 si.si_signo = signum;
744 /* Make sure we are called with one of the POLL_*
745 reasons, otherwise we could leak kernel stack into
747 BUG_ON((reason & __SI_MASK) != __SI_POLL);
748 if (reason - POLL_IN >= NSIGPOLL)
751 si.si_band = band_table[reason - POLL_IN];
753 if (!do_send_sig_info(signum, &si, p, group))
755 /* fall-through: fall back on the old plain SIGIO signal */
757 do_send_sig_info(SIGIO, SEND_SIG_PRIV, p, group);
761 void send_sigio(struct fown_struct *fown, int fd, int band)
763 struct task_struct *p;
768 read_lock(&fown->lock);
770 type = fown->pid_type;
771 if (type == PIDTYPE_MAX) {
778 goto out_unlock_fown;
780 read_lock(&tasklist_lock);
781 do_each_pid_task(pid, type, p) {
782 send_sigio_to_task(p, fown, fd, band, group);
783 } while_each_pid_task(pid, type, p);
784 read_unlock(&tasklist_lock);
786 read_unlock(&fown->lock);
789 static void send_sigurg_to_task(struct task_struct *p,
790 struct fown_struct *fown, int group)
792 if (sigio_perm(p, fown, SIGURG))
793 do_send_sig_info(SIGURG, SEND_SIG_PRIV, p, group);
796 int send_sigurg(struct fown_struct *fown)
798 struct task_struct *p;
804 read_lock(&fown->lock);
806 type = fown->pid_type;
807 if (type == PIDTYPE_MAX) {
814 goto out_unlock_fown;
818 read_lock(&tasklist_lock);
819 do_each_pid_task(pid, type, p) {
820 send_sigurg_to_task(p, fown, group);
821 } while_each_pid_task(pid, type, p);
822 read_unlock(&tasklist_lock);
824 read_unlock(&fown->lock);
828 static DEFINE_SPINLOCK(fasync_lock);
829 static struct kmem_cache *fasync_cache __read_mostly;
831 static void fasync_free_rcu(struct rcu_head *head)
833 kmem_cache_free(fasync_cache,
834 container_of(head, struct fasync_struct, fa_rcu));
838 * Remove a fasync entry. If successfully removed, return
839 * positive and clear the FASYNC flag. If no entry exists,
840 * do nothing and return 0.
842 * NOTE! It is very important that the FASYNC flag always
843 * match the state "is the filp on a fasync list".
846 int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp)
848 struct fasync_struct *fa, **fp;
851 spin_lock(&filp->f_lock);
852 spin_lock(&fasync_lock);
853 for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
854 if (fa->fa_file != filp)
857 spin_lock_irq(&fa->fa_lock);
859 spin_unlock_irq(&fa->fa_lock);
862 call_rcu(&fa->fa_rcu, fasync_free_rcu);
863 filp->f_flags &= ~FASYNC;
867 spin_unlock(&fasync_lock);
868 spin_unlock(&filp->f_lock);
872 struct fasync_struct *fasync_alloc(void)
874 return kmem_cache_alloc(fasync_cache, GFP_KERNEL);
878 * NOTE! This can be used only for unused fasync entries:
879 * entries that actually got inserted on the fasync list
880 * need to be released by rcu - see fasync_remove_entry.
882 void fasync_free(struct fasync_struct *new)
884 kmem_cache_free(fasync_cache, new);
888 * Insert a new entry into the fasync list. Return the pointer to the
889 * old one if we didn't use the new one.
891 * NOTE! It is very important that the FASYNC flag always
892 * match the state "is the filp on a fasync list".
894 struct fasync_struct *fasync_insert_entry(int fd, struct file *filp, struct fasync_struct **fapp, struct fasync_struct *new)
896 struct fasync_struct *fa, **fp;
898 spin_lock(&filp->f_lock);
899 spin_lock(&fasync_lock);
900 for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
901 if (fa->fa_file != filp)
904 spin_lock_irq(&fa->fa_lock);
906 spin_unlock_irq(&fa->fa_lock);
910 spin_lock_init(&new->fa_lock);
911 new->magic = FASYNC_MAGIC;
914 new->fa_next = *fapp;
915 rcu_assign_pointer(*fapp, new);
916 filp->f_flags |= FASYNC;
919 spin_unlock(&fasync_lock);
920 spin_unlock(&filp->f_lock);
925 * Add a fasync entry. Return negative on error, positive if
926 * added, and zero if did nothing but change an existing one.
928 static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fapp)
930 struct fasync_struct *new;
932 new = fasync_alloc();
937 * fasync_insert_entry() returns the old (update) entry if
940 * So free the (unused) new entry and return 0 to let the
941 * caller know that we didn't add any new fasync entries.
943 if (fasync_insert_entry(fd, filp, fapp, new)) {
952 * fasync_helper() is used by almost all character device drivers
953 * to set up the fasync queue, and for regular files by the file
954 * lease code. It returns negative on error, 0 if it did no changes
955 * and positive if it added/deleted the entry.
957 int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp)
960 return fasync_remove_entry(filp, fapp);
961 return fasync_add_entry(fd, filp, fapp);
964 EXPORT_SYMBOL(fasync_helper);
967 * rcu_read_lock() is held
969 static void kill_fasync_rcu(struct fasync_struct *fa, int sig, int band)
972 struct fown_struct *fown;
975 if (fa->magic != FASYNC_MAGIC) {
976 printk(KERN_ERR "kill_fasync: bad magic number in "
980 spin_lock_irqsave(&fa->fa_lock, flags);
982 fown = &fa->fa_file->f_owner;
983 /* Don't send SIGURG to processes which have not set a
984 queued signum: SIGURG has its own default signalling
986 if (!(sig == SIGURG && fown->signum == 0))
987 send_sigio(fown, fa->fa_fd, band);
989 spin_unlock_irqrestore(&fa->fa_lock, flags);
990 fa = rcu_dereference(fa->fa_next);
994 void kill_fasync(struct fasync_struct **fp, int sig, int band)
996 /* First a quick test without locking: usually
1001 kill_fasync_rcu(rcu_dereference(*fp), sig, band);
1005 EXPORT_SYMBOL(kill_fasync);
1007 static int __init fcntl_init(void)
1010 * Please add new bits here to ensure allocation uniqueness.
1011 * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
1012 * is defined as O_NONBLOCK on some platforms and not on others.
1014 BUILD_BUG_ON(21 - 1 /* for O_RDONLY being 0 */ !=
1016 (VALID_OPEN_FLAGS & ~(O_NONBLOCK | O_NDELAY)) |
1017 __FMODE_EXEC | __FMODE_NONOTIFY));
1019 fasync_cache = kmem_cache_create("fasync_cache",
1020 sizeof(struct fasync_struct), 0, SLAB_PANIC, NULL);
1024 module_init(fcntl_init)