4 * Copyright (C) 1991, 1992 Linus Torvalds
7 #include <linux/syscalls.h>
8 #include <linux/init.h>
11 #include <linux/file.h>
12 #include <linux/capability.h>
13 #include <linux/dnotify.h>
14 #include <linux/smp_lock.h>
15 #include <linux/slab.h>
16 #include <linux/module.h>
17 #include <linux/security.h>
18 #include <linux/ptrace.h>
19 #include <linux/signal.h>
20 #include <linux/rcupdate.h>
23 #include <asm/siginfo.h>
24 #include <asm/uaccess.h>
26 void fastcall set_close_on_exec(unsigned int fd, int flag)
28 struct files_struct *files = current->files;
30 spin_lock(&files->file_lock);
31 fdt = files_fdtable(files);
33 FD_SET(fd, fdt->close_on_exec);
35 FD_CLR(fd, fdt->close_on_exec);
36 spin_unlock(&files->file_lock);
39 static int get_close_on_exec(unsigned int fd)
41 struct files_struct *files = current->files;
45 fdt = files_fdtable(files);
46 res = FD_ISSET(fd, fdt->close_on_exec);
52 * locate_fd finds a free file descriptor in the open_fds fdset,
53 * expanding the fd arrays if necessary. Must be called with the
54 * file_lock held for write.
57 static int locate_fd(struct files_struct *files,
58 struct file *file, unsigned int orig_start)
66 if (orig_start >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
70 fdt = files_fdtable(files);
72 * Someone might have closed fd's in the range
73 * orig_start..fdt->next_fd
76 if (start < files->next_fd)
77 start = files->next_fd;
80 if (start < fdt->max_fdset) {
81 newfd = find_next_zero_bit(fdt->open_fds->fds_bits,
82 fdt->max_fdset, start);
86 if (newfd >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
89 error = expand_files(files, newfd);
94 * If we needed to expand the fs array we
95 * might have blocked - try again.
101 * We reacquired files_lock, so we are safe as long as
102 * we reacquire the fdtable pointer and use it while holding
103 * the lock, no one can free it during that time.
105 if (start <= files->next_fd)
106 files->next_fd = newfd + 1;
114 static int dupfd(struct file *file, unsigned int start)
116 struct files_struct * files = current->files;
120 spin_lock(&files->file_lock);
121 fd = locate_fd(files, file, start);
123 /* locate_fd() may have expanded fdtable, load the ptr */
124 fdt = files_fdtable(files);
125 FD_SET(fd, fdt->open_fds);
126 FD_CLR(fd, fdt->close_on_exec);
127 spin_unlock(&files->file_lock);
128 fd_install(fd, file);
130 spin_unlock(&files->file_lock);
137 asmlinkage long sys_dup2(unsigned int oldfd, unsigned int newfd)
140 struct file * file, *tofree;
141 struct files_struct * files = current->files;
144 spin_lock(&files->file_lock);
145 if (!(file = fcheck(oldfd)))
151 if (newfd >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
153 get_file(file); /* We are now finished with oldfd */
155 err = expand_files(files, newfd);
159 /* To avoid races with open() and dup(), we will mark the fd as
160 * in-use in the open-file bitmap throughout the entire dup2()
161 * process. This is quite safe: do_close() uses the fd array
162 * entry, not the bitmap, to decide what work needs to be
164 /* Doesn't work. open() might be there first. --AV */
166 /* Yes. It's a race. In user space. Nothing sane to do */
168 fdt = files_fdtable(files);
169 tofree = fdt->fd[newfd];
170 if (!tofree && FD_ISSET(newfd, fdt->open_fds))
173 rcu_assign_pointer(fdt->fd[newfd], file);
174 FD_SET(newfd, fdt->open_fds);
175 FD_CLR(newfd, fdt->close_on_exec);
176 spin_unlock(&files->file_lock);
179 filp_close(tofree, files);
184 spin_unlock(&files->file_lock);
188 spin_unlock(&files->file_lock);
193 asmlinkage long sys_dup(unsigned int fildes)
196 struct file * file = fget(fildes);
199 ret = dupfd(file, 0);
203 #define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | FASYNC | O_DIRECT | O_NOATIME)
205 static int setfl(int fd, struct file * filp, unsigned long arg)
207 struct inode * inode = filp->f_path.dentry->d_inode;
211 * O_APPEND cannot be cleared if the file is marked as append-only
212 * and the file is open for write.
214 if (((arg ^ filp->f_flags) & O_APPEND) && IS_APPEND(inode))
217 /* O_NOATIME can only be set by the owner or superuser */
218 if ((arg & O_NOATIME) && !(filp->f_flags & O_NOATIME))
219 if (current->fsuid != inode->i_uid && !capable(CAP_FOWNER))
222 /* required for strict SunOS emulation */
223 if (O_NONBLOCK != O_NDELAY)
227 if (arg & O_DIRECT) {
228 if (!filp->f_mapping || !filp->f_mapping->a_ops ||
229 !filp->f_mapping->a_ops->direct_IO)
233 if (filp->f_op && filp->f_op->check_flags)
234 error = filp->f_op->check_flags(arg);
239 if ((arg ^ filp->f_flags) & FASYNC) {
240 if (filp->f_op && filp->f_op->fasync) {
241 error = filp->f_op->fasync(fd, filp, (arg & FASYNC) != 0);
247 filp->f_flags = (arg & SETFL_MASK) | (filp->f_flags & ~SETFL_MASK);
253 static void f_modown(struct file *filp, struct pid *pid, enum pid_type type,
254 uid_t uid, uid_t euid, int force)
256 write_lock_irq(&filp->f_owner.lock);
257 if (force || !filp->f_owner.pid) {
258 put_pid(filp->f_owner.pid);
259 filp->f_owner.pid = get_pid(pid);
260 filp->f_owner.pid_type = type;
261 filp->f_owner.uid = uid;
262 filp->f_owner.euid = euid;
264 write_unlock_irq(&filp->f_owner.lock);
267 int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
272 err = security_file_set_fowner(filp);
276 f_modown(filp, pid, type, current->uid, current->euid, force);
279 EXPORT_SYMBOL(__f_setown);
281 int f_setown(struct file *filp, unsigned long arg, int force)
294 result = __f_setown(filp, pid, type, force);
298 EXPORT_SYMBOL(f_setown);
300 void f_delown(struct file *filp)
302 f_modown(filp, NULL, PIDTYPE_PID, 0, 0, 1);
305 pid_t f_getown(struct file *filp)
308 read_lock(&filp->f_owner.lock);
309 pid = pid_nr(filp->f_owner.pid);
310 if (filp->f_owner.pid_type == PIDTYPE_PGID)
312 read_unlock(&filp->f_owner.lock);
316 static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
324 err = dupfd(filp, arg);
327 err = get_close_on_exec(fd) ? FD_CLOEXEC : 0;
331 set_close_on_exec(fd, arg & FD_CLOEXEC);
337 err = setfl(fd, filp, arg);
340 err = fcntl_getlk(filp, (struct flock __user *) arg);
344 err = fcntl_setlk(fd, filp, cmd, (struct flock __user *) arg);
348 * XXX If f_owner is a process group, the
349 * negative return value will get converted
350 * into an error. Oops. If we keep the
351 * current syscall conventions, the only way
352 * to fix this will be in libc.
354 err = f_getown(filp);
355 force_successful_syscall_return();
358 err = f_setown(filp, arg, 1);
361 err = filp->f_owner.signum;
364 /* arg == 0 restores default behaviour. */
365 if (!valid_signal(arg)) {
369 filp->f_owner.signum = arg;
372 err = fcntl_getlease(filp);
375 err = fcntl_setlease(fd, filp, arg);
378 err = fcntl_dirnotify(fd, filp, arg);
386 asmlinkage long sys_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg)
395 err = security_file_fcntl(filp, cmd, arg);
401 err = do_fcntl(fd, cmd, arg, filp);
408 #if BITS_PER_LONG == 32
409 asmlinkage long sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg)
419 err = security_file_fcntl(filp, cmd, arg);
428 err = fcntl_getlk64(filp, (struct flock64 __user *) arg);
432 err = fcntl_setlk64(fd, filp, cmd,
433 (struct flock64 __user *) arg);
436 err = do_fcntl(fd, cmd, arg, filp);
445 /* Table to convert sigio signal codes into poll band bitmaps */
447 static const long band_table[NSIGPOLL] = {
448 POLLIN | POLLRDNORM, /* POLL_IN */
449 POLLOUT | POLLWRNORM | POLLWRBAND, /* POLL_OUT */
450 POLLIN | POLLRDNORM | POLLMSG, /* POLL_MSG */
451 POLLERR, /* POLL_ERR */
452 POLLPRI | POLLRDBAND, /* POLL_PRI */
453 POLLHUP | POLLERR /* POLL_HUP */
456 static inline int sigio_perm(struct task_struct *p,
457 struct fown_struct *fown, int sig)
459 return (((fown->euid == 0) ||
460 (fown->euid == p->suid) || (fown->euid == p->uid) ||
461 (fown->uid == p->suid) || (fown->uid == p->uid)) &&
462 !security_file_send_sigiotask(p, fown, sig));
465 static void send_sigio_to_task(struct task_struct *p,
466 struct fown_struct *fown,
470 if (!sigio_perm(p, fown, fown->signum))
473 switch (fown->signum) {
476 /* Queue a rt signal with the appropriate fd as its
477 value. We use SI_SIGIO as the source, not
478 SI_KERNEL, since kernel signals always get
479 delivered even if we can't queue. Failure to
480 queue in this case _should_ be reported; we fall
481 back to SIGIO in that case. --sct */
482 si.si_signo = fown->signum;
485 /* Make sure we are called with one of the POLL_*
486 reasons, otherwise we could leak kernel stack into
488 BUG_ON((reason & __SI_MASK) != __SI_POLL);
489 if (reason - POLL_IN >= NSIGPOLL)
492 si.si_band = band_table[reason - POLL_IN];
494 if (!group_send_sig_info(fown->signum, &si, p))
496 /* fall-through: fall back on the old plain SIGIO signal */
498 group_send_sig_info(SIGIO, SEND_SIG_PRIV, p);
502 void send_sigio(struct fown_struct *fown, int fd, int band)
504 struct task_struct *p;
508 read_lock(&fown->lock);
509 type = fown->pid_type;
512 goto out_unlock_fown;
514 read_lock(&tasklist_lock);
515 do_each_pid_task(pid, type, p) {
516 send_sigio_to_task(p, fown, fd, band);
517 } while_each_pid_task(pid, type, p);
518 read_unlock(&tasklist_lock);
520 read_unlock(&fown->lock);
523 static void send_sigurg_to_task(struct task_struct *p,
524 struct fown_struct *fown)
526 if (sigio_perm(p, fown, SIGURG))
527 group_send_sig_info(SIGURG, SEND_SIG_PRIV, p);
530 int send_sigurg(struct fown_struct *fown)
532 struct task_struct *p;
537 read_lock(&fown->lock);
538 type = fown->pid_type;
541 goto out_unlock_fown;
545 read_lock(&tasklist_lock);
546 do_each_pid_task(pid, type, p) {
547 send_sigurg_to_task(p, fown);
548 } while_each_pid_task(pid, type, p);
549 read_unlock(&tasklist_lock);
551 read_unlock(&fown->lock);
555 static DEFINE_RWLOCK(fasync_lock);
556 static struct kmem_cache *fasync_cache __read_mostly;
559 * fasync_helper() is used by some character device drivers (mainly mice)
560 * to set up the fasync queue. It returns negative on error, 0 if it did
561 * no changes and positive if it added/deleted the entry.
563 int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp)
565 struct fasync_struct *fa, **fp;
566 struct fasync_struct *new = NULL;
570 new = kmem_cache_alloc(fasync_cache, GFP_KERNEL);
574 write_lock_irq(&fasync_lock);
575 for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
576 if (fa->fa_file == filp) {
579 kmem_cache_free(fasync_cache, new);
582 kmem_cache_free(fasync_cache, fa);
590 new->magic = FASYNC_MAGIC;
593 new->fa_next = *fapp;
598 write_unlock_irq(&fasync_lock);
602 EXPORT_SYMBOL(fasync_helper);
604 void __kill_fasync(struct fasync_struct *fa, int sig, int band)
607 struct fown_struct * fown;
608 if (fa->magic != FASYNC_MAGIC) {
609 printk(KERN_ERR "kill_fasync: bad magic number in "
613 fown = &fa->fa_file->f_owner;
614 /* Don't send SIGURG to processes which have not set a
615 queued signum: SIGURG has its own default signalling
617 if (!(sig == SIGURG && fown->signum == 0))
618 send_sigio(fown, fa->fa_fd, band);
623 EXPORT_SYMBOL(__kill_fasync);
625 void kill_fasync(struct fasync_struct **fp, int sig, int band)
627 /* First a quick test without locking: usually
631 read_lock(&fasync_lock);
632 /* reread *fp after obtaining the lock */
633 __kill_fasync(*fp, sig, band);
634 read_unlock(&fasync_lock);
637 EXPORT_SYMBOL(kill_fasync);
639 static int __init fasync_init(void)
641 fasync_cache = kmem_cache_create("fasync_cache",
642 sizeof(struct fasync_struct), 0, SLAB_PANIC, NULL, NULL);
646 module_init(fasync_init)