1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 1991, 1992, 1999 Linus Torvalds
9 #include <linux/file.h>
10 #include <linux/poll.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
15 #include <linux/log2.h>
16 #include <linux/mount.h>
17 #include <linux/magic.h>
18 #include <linux/pipe_fs_i.h>
19 #include <linux/uio.h>
20 #include <linux/highmem.h>
21 #include <linux/pagemap.h>
22 #include <linux/audit.h>
23 #include <linux/syscalls.h>
24 #include <linux/fcntl.h>
25 #include <linux/memcontrol.h>
27 #include <linux/uaccess.h>
28 #include <asm/ioctls.h>
33 * The max size that a non-root user is allowed to grow the pipe. Can
34 * be set by root in /proc/sys/fs/pipe-max-size
36 unsigned int pipe_max_size = 1048576;
38 /* Maximum allocatable pages per user. Hard limit is unset by default, soft
39 * matches default values.
41 unsigned long pipe_user_pages_hard;
42 unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR;
45 * We use a start+len construction, which provides full use of the
47 * -- Florian Coosmann (FGC)
49 * Reads with count = 0 should always return 0.
50 * -- Julian Bradfield 1999-06-07.
52 * FIFOs and Pipes now generate SIGIO for both readers and writers.
53 * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
55 * pipe_read & write cleanup
56 * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
59 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
62 mutex_lock_nested(&pipe->mutex, subclass);
65 void pipe_lock(struct pipe_inode_info *pipe)
68 * pipe_lock() nests non-pipe inode locks (for writing to a file)
70 pipe_lock_nested(pipe, I_MUTEX_PARENT);
72 EXPORT_SYMBOL(pipe_lock);
74 void pipe_unlock(struct pipe_inode_info *pipe)
77 mutex_unlock(&pipe->mutex);
79 EXPORT_SYMBOL(pipe_unlock);
81 static inline void __pipe_lock(struct pipe_inode_info *pipe)
83 mutex_lock_nested(&pipe->mutex, I_MUTEX_PARENT);
86 static inline void __pipe_unlock(struct pipe_inode_info *pipe)
88 mutex_unlock(&pipe->mutex);
91 void pipe_double_lock(struct pipe_inode_info *pipe1,
92 struct pipe_inode_info *pipe2)
94 BUG_ON(pipe1 == pipe2);
97 pipe_lock_nested(pipe1, I_MUTEX_PARENT);
98 pipe_lock_nested(pipe2, I_MUTEX_CHILD);
100 pipe_lock_nested(pipe2, I_MUTEX_PARENT);
101 pipe_lock_nested(pipe1, I_MUTEX_CHILD);
105 /* Drop the inode semaphore and wait for a pipe event, atomically */
106 void pipe_wait(struct pipe_inode_info *pipe)
111 * Pipes are system-local resources, so sleeping on them
112 * is considered a noninteractive wait:
114 prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE);
117 finish_wait(&pipe->wait, &wait);
121 static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
122 struct pipe_buffer *buf)
124 struct page *page = buf->page;
127 * If nobody else uses this page, and we don't already have a
128 * temporary page, let's keep track of it as a one-deep
129 * allocation cache. (Otherwise just release our reference to it)
131 if (page_count(page) == 1 && !pipe->tmp_page)
132 pipe->tmp_page = page;
137 static int anon_pipe_buf_steal(struct pipe_inode_info *pipe,
138 struct pipe_buffer *buf)
140 struct page *page = buf->page;
142 if (page_count(page) == 1) {
143 memcg_kmem_uncharge(page, 0);
144 __SetPageLocked(page);
151 * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer
152 * @pipe: the pipe that the buffer belongs to
153 * @buf: the buffer to attempt to steal
156 * This function attempts to steal the &struct page attached to
157 * @buf. If successful, this function returns 0 and returns with
158 * the page locked. The caller may then reuse the page for whatever
159 * he wishes; the typical use is insertion into a different file
162 int generic_pipe_buf_steal(struct pipe_inode_info *pipe,
163 struct pipe_buffer *buf)
165 struct page *page = buf->page;
168 * A reference of one is golden, that means that the owner of this
169 * page is the only one holding a reference to it. lock the page
172 if (page_count(page) == 1) {
179 EXPORT_SYMBOL(generic_pipe_buf_steal);
182 * generic_pipe_buf_get - get a reference to a &struct pipe_buffer
183 * @pipe: the pipe that the buffer belongs to
184 * @buf: the buffer to get a reference to
187 * This function grabs an extra reference to @buf. It's used in
188 * in the tee() system call, when we duplicate the buffers in one
191 bool generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
193 return try_get_page(buf->page);
195 EXPORT_SYMBOL(generic_pipe_buf_get);
198 * generic_pipe_buf_confirm - verify contents of the pipe buffer
199 * @info: the pipe that the buffer belongs to
200 * @buf: the buffer to confirm
203 * This function does nothing, because the generic pipe code uses
204 * pages that are always good when inserted into the pipe.
206 int generic_pipe_buf_confirm(struct pipe_inode_info *info,
207 struct pipe_buffer *buf)
211 EXPORT_SYMBOL(generic_pipe_buf_confirm);
214 * generic_pipe_buf_release - put a reference to a &struct pipe_buffer
215 * @pipe: the pipe that the buffer belongs to
216 * @buf: the buffer to put a reference to
219 * This function releases a reference to @buf.
221 void generic_pipe_buf_release(struct pipe_inode_info *pipe,
222 struct pipe_buffer *buf)
226 EXPORT_SYMBOL(generic_pipe_buf_release);
228 /* New data written to a pipe may be appended to a buffer with this type. */
229 static const struct pipe_buf_operations anon_pipe_buf_ops = {
230 .confirm = generic_pipe_buf_confirm,
231 .release = anon_pipe_buf_release,
232 .steal = anon_pipe_buf_steal,
233 .get = generic_pipe_buf_get,
236 static const struct pipe_buf_operations anon_pipe_buf_nomerge_ops = {
237 .confirm = generic_pipe_buf_confirm,
238 .release = anon_pipe_buf_release,
239 .steal = anon_pipe_buf_steal,
240 .get = generic_pipe_buf_get,
243 static const struct pipe_buf_operations packet_pipe_buf_ops = {
244 .confirm = generic_pipe_buf_confirm,
245 .release = anon_pipe_buf_release,
246 .steal = anon_pipe_buf_steal,
247 .get = generic_pipe_buf_get,
251 * pipe_buf_mark_unmergeable - mark a &struct pipe_buffer as unmergeable
252 * @buf: the buffer to mark
255 * This function ensures that no future writes will be merged into the
256 * given &struct pipe_buffer. This is necessary when multiple pipe buffers
257 * share the same backing page.
259 void pipe_buf_mark_unmergeable(struct pipe_buffer *buf)
261 if (buf->ops == &anon_pipe_buf_ops)
262 buf->ops = &anon_pipe_buf_nomerge_ops;
265 static bool pipe_buf_can_merge(struct pipe_buffer *buf)
267 return buf->ops == &anon_pipe_buf_ops;
271 pipe_read(struct kiocb *iocb, struct iov_iter *to)
273 size_t total_len = iov_iter_count(to);
274 struct file *filp = iocb->ki_filp;
275 struct pipe_inode_info *pipe = filp->private_data;
279 /* Null read succeeds. */
280 if (unlikely(total_len == 0))
287 int bufs = pipe->nrbufs;
289 int curbuf = pipe->curbuf;
290 struct pipe_buffer *buf = pipe->bufs + curbuf;
291 size_t chars = buf->len;
295 if (chars > total_len)
298 error = pipe_buf_confirm(pipe, buf);
305 written = copy_page_to_iter(buf->page, buf->offset, chars, to);
306 if (unlikely(written < chars)) {
312 buf->offset += chars;
315 /* Was it a packet buffer? Clean up and exit */
316 if (buf->flags & PIPE_BUF_FLAG_PACKET) {
322 pipe_buf_release(pipe, buf);
323 curbuf = (curbuf + 1) & (pipe->buffers - 1);
324 pipe->curbuf = curbuf;
325 pipe->nrbufs = --bufs;
330 break; /* common path: read succeeded */
332 if (bufs) /* More to do? */
336 if (!pipe->waiting_writers) {
337 /* syscall merging: Usually we must not sleep
338 * if O_NONBLOCK is set, or if we got some data.
339 * But if a writer sleeps in kernel space, then
340 * we can wait for that data without violating POSIX.
344 if (filp->f_flags & O_NONBLOCK) {
349 if (signal_pending(current)) {
355 wake_up_interruptible_sync_poll(&pipe->wait, EPOLLOUT | EPOLLWRNORM);
356 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
362 /* Signal writers asynchronously that there is more room. */
364 wake_up_interruptible_sync_poll(&pipe->wait, EPOLLOUT | EPOLLWRNORM);
365 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
372 static inline int is_packetized(struct file *file)
374 return (file->f_flags & O_DIRECT) != 0;
378 pipe_write(struct kiocb *iocb, struct iov_iter *from)
380 struct file *filp = iocb->ki_filp;
381 struct pipe_inode_info *pipe = filp->private_data;
384 size_t total_len = iov_iter_count(from);
387 /* Null write succeeds. */
388 if (unlikely(total_len == 0))
393 if (!pipe->readers) {
394 send_sig(SIGPIPE, current, 0);
399 /* We try to merge small writes */
400 chars = total_len & (PAGE_SIZE-1); /* size of the last buffer */
401 if (pipe->nrbufs && chars != 0) {
402 int lastbuf = (pipe->curbuf + pipe->nrbufs - 1) &
404 struct pipe_buffer *buf = pipe->bufs + lastbuf;
405 int offset = buf->offset + buf->len;
407 if (pipe_buf_can_merge(buf) && offset + chars <= PAGE_SIZE) {
408 ret = pipe_buf_confirm(pipe, buf);
412 ret = copy_page_from_iter(buf->page, offset, chars, from);
413 if (unlikely(ret < chars)) {
419 if (!iov_iter_count(from))
427 if (!pipe->readers) {
428 send_sig(SIGPIPE, current, 0);
434 if (bufs < pipe->buffers) {
435 int newbuf = (pipe->curbuf + bufs) & (pipe->buffers-1);
436 struct pipe_buffer *buf = pipe->bufs + newbuf;
437 struct page *page = pipe->tmp_page;
441 page = alloc_page(GFP_HIGHUSER | __GFP_ACCOUNT);
442 if (unlikely(!page)) {
443 ret = ret ? : -ENOMEM;
446 pipe->tmp_page = page;
448 /* Always wake up, even if the copy fails. Otherwise
449 * we lock up (O_NONBLOCK-)readers that sleep due to
451 * FIXME! Is this really true?
454 copied = copy_page_from_iter(page, 0, PAGE_SIZE, from);
455 if (unlikely(copied < PAGE_SIZE && iov_iter_count(from))) {
462 /* Insert it into the buffer array */
464 buf->ops = &anon_pipe_buf_ops;
468 if (is_packetized(filp)) {
469 buf->ops = &packet_pipe_buf_ops;
470 buf->flags = PIPE_BUF_FLAG_PACKET;
472 pipe->nrbufs = ++bufs;
473 pipe->tmp_page = NULL;
475 if (!iov_iter_count(from))
478 if (bufs < pipe->buffers)
480 if (filp->f_flags & O_NONBLOCK) {
485 if (signal_pending(current)) {
491 wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLRDNORM);
492 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
495 pipe->waiting_writers++;
497 pipe->waiting_writers--;
502 wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLRDNORM);
503 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
505 if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) {
506 int err = file_update_time(filp);
509 sb_end_write(file_inode(filp)->i_sb);
514 static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
516 struct pipe_inode_info *pipe = filp->private_data;
517 int count, buf, nrbufs;
524 nrbufs = pipe->nrbufs;
525 while (--nrbufs >= 0) {
526 count += pipe->bufs[buf].len;
527 buf = (buf+1) & (pipe->buffers - 1);
531 return put_user(count, (int __user *)arg);
537 /* No kernel lock held - fine */
539 pipe_poll(struct file *filp, poll_table *wait)
542 struct pipe_inode_info *pipe = filp->private_data;
545 poll_wait(filp, &pipe->wait, wait);
547 /* Reading only -- no need for acquiring the semaphore. */
548 nrbufs = pipe->nrbufs;
550 if (filp->f_mode & FMODE_READ) {
551 mask = (nrbufs > 0) ? EPOLLIN | EPOLLRDNORM : 0;
552 if (!pipe->writers && filp->f_version != pipe->w_counter)
556 if (filp->f_mode & FMODE_WRITE) {
557 mask |= (nrbufs < pipe->buffers) ? EPOLLOUT | EPOLLWRNORM : 0;
559 * Most Unices do not set EPOLLERR for FIFOs but on Linux they
560 * behave exactly like pipes for poll().
569 static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
573 spin_lock(&inode->i_lock);
574 if (!--pipe->files) {
575 inode->i_pipe = NULL;
578 spin_unlock(&inode->i_lock);
581 free_pipe_info(pipe);
585 pipe_release(struct inode *inode, struct file *file)
587 struct pipe_inode_info *pipe = file->private_data;
590 if (file->f_mode & FMODE_READ)
592 if (file->f_mode & FMODE_WRITE)
595 if (pipe->readers || pipe->writers) {
596 wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLOUT | EPOLLRDNORM | EPOLLWRNORM | EPOLLERR | EPOLLHUP);
597 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
598 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
602 put_pipe_info(inode, pipe);
607 pipe_fasync(int fd, struct file *filp, int on)
609 struct pipe_inode_info *pipe = filp->private_data;
613 if (filp->f_mode & FMODE_READ)
614 retval = fasync_helper(fd, filp, on, &pipe->fasync_readers);
615 if ((filp->f_mode & FMODE_WRITE) && retval >= 0) {
616 retval = fasync_helper(fd, filp, on, &pipe->fasync_writers);
617 if (retval < 0 && (filp->f_mode & FMODE_READ))
618 /* this can happen only if on == T */
619 fasync_helper(-1, filp, 0, &pipe->fasync_readers);
625 static unsigned long account_pipe_buffers(struct user_struct *user,
626 unsigned long old, unsigned long new)
628 return atomic_long_add_return(new - old, &user->pipe_bufs);
631 static bool too_many_pipe_buffers_soft(unsigned long user_bufs)
633 unsigned long soft_limit = READ_ONCE(pipe_user_pages_soft);
635 return soft_limit && user_bufs > soft_limit;
638 static bool too_many_pipe_buffers_hard(unsigned long user_bufs)
640 unsigned long hard_limit = READ_ONCE(pipe_user_pages_hard);
642 return hard_limit && user_bufs > hard_limit;
645 static bool is_unprivileged_user(void)
647 return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
650 struct pipe_inode_info *alloc_pipe_info(void)
652 struct pipe_inode_info *pipe;
653 unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
654 struct user_struct *user = get_current_user();
655 unsigned long user_bufs;
656 unsigned int max_size = READ_ONCE(pipe_max_size);
658 pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL_ACCOUNT);
662 if (pipe_bufs * PAGE_SIZE > max_size && !capable(CAP_SYS_RESOURCE))
663 pipe_bufs = max_size >> PAGE_SHIFT;
665 user_bufs = account_pipe_buffers(user, 0, pipe_bufs);
667 if (too_many_pipe_buffers_soft(user_bufs) && is_unprivileged_user()) {
668 user_bufs = account_pipe_buffers(user, pipe_bufs, 1);
672 if (too_many_pipe_buffers_hard(user_bufs) && is_unprivileged_user())
673 goto out_revert_acct;
675 pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer),
679 init_waitqueue_head(&pipe->wait);
680 pipe->r_counter = pipe->w_counter = 1;
681 pipe->buffers = pipe_bufs;
683 mutex_init(&pipe->mutex);
688 (void) account_pipe_buffers(user, pipe_bufs, 0);
695 void free_pipe_info(struct pipe_inode_info *pipe)
699 (void) account_pipe_buffers(pipe->user, pipe->buffers, 0);
700 free_uid(pipe->user);
701 for (i = 0; i < pipe->buffers; i++) {
702 struct pipe_buffer *buf = pipe->bufs + i;
704 pipe_buf_release(pipe, buf);
707 __free_page(pipe->tmp_page);
712 static struct vfsmount *pipe_mnt __read_mostly;
715 * pipefs_dname() is called from d_path().
717 static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen)
719 return dynamic_dname(dentry, buffer, buflen, "pipe:[%lu]",
720 d_inode(dentry)->i_ino);
723 static const struct dentry_operations pipefs_dentry_operations = {
724 .d_dname = pipefs_dname,
727 static struct inode * get_pipe_inode(void)
729 struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb);
730 struct pipe_inode_info *pipe;
735 inode->i_ino = get_next_ino();
737 pipe = alloc_pipe_info();
741 inode->i_pipe = pipe;
743 pipe->readers = pipe->writers = 1;
744 inode->i_fop = &pipefifo_fops;
747 * Mark the inode dirty from the very beginning,
748 * that way it will never be moved to the dirty
749 * list because "mark_inode_dirty()" will think
750 * that it already _is_ on the dirty list.
752 inode->i_state = I_DIRTY;
753 inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
754 inode->i_uid = current_fsuid();
755 inode->i_gid = current_fsgid();
756 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
767 int create_pipe_files(struct file **res, int flags)
769 struct inode *inode = get_pipe_inode();
775 f = alloc_file_pseudo(inode, pipe_mnt, "",
776 O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT)),
779 free_pipe_info(inode->i_pipe);
784 f->private_data = inode->i_pipe;
786 res[0] = alloc_file_clone(f, O_RDONLY | (flags & O_NONBLOCK),
788 if (IS_ERR(res[0])) {
789 put_pipe_info(inode, inode->i_pipe);
791 return PTR_ERR(res[0]);
793 res[0]->private_data = inode->i_pipe;
798 static int __do_pipe_flags(int *fd, struct file **files, int flags)
803 if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT))
806 error = create_pipe_files(files, flags);
810 error = get_unused_fd_flags(flags);
815 error = get_unused_fd_flags(flags);
820 audit_fd_pair(fdr, fdw);
833 int do_pipe_flags(int *fd, int flags)
835 struct file *files[2];
836 int error = __do_pipe_flags(fd, files, flags);
838 fd_install(fd[0], files[0]);
839 fd_install(fd[1], files[1]);
845 * sys_pipe() is the normal C calling standard for creating
846 * a pipe. It's not the way Unix traditionally does this, though.
848 static int do_pipe2(int __user *fildes, int flags)
850 struct file *files[2];
854 error = __do_pipe_flags(fd, files, flags);
856 if (unlikely(copy_to_user(fildes, fd, sizeof(fd)))) {
859 put_unused_fd(fd[0]);
860 put_unused_fd(fd[1]);
863 fd_install(fd[0], files[0]);
864 fd_install(fd[1], files[1]);
870 SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags)
872 return do_pipe2(fildes, flags);
875 SYSCALL_DEFINE1(pipe, int __user *, fildes)
877 return do_pipe2(fildes, 0);
880 static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt)
884 while (cur == *cnt) {
886 if (signal_pending(current))
889 return cur == *cnt ? -ERESTARTSYS : 0;
892 static void wake_up_partner(struct pipe_inode_info *pipe)
894 wake_up_interruptible(&pipe->wait);
897 static int fifo_open(struct inode *inode, struct file *filp)
899 struct pipe_inode_info *pipe;
900 bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC;
905 spin_lock(&inode->i_lock);
907 pipe = inode->i_pipe;
909 spin_unlock(&inode->i_lock);
911 spin_unlock(&inode->i_lock);
912 pipe = alloc_pipe_info();
916 spin_lock(&inode->i_lock);
917 if (unlikely(inode->i_pipe)) {
918 inode->i_pipe->files++;
919 spin_unlock(&inode->i_lock);
920 free_pipe_info(pipe);
921 pipe = inode->i_pipe;
923 inode->i_pipe = pipe;
924 spin_unlock(&inode->i_lock);
927 filp->private_data = pipe;
928 /* OK, we have a pipe and it's pinned down */
932 /* We can only do regular read/write on fifos */
933 filp->f_mode &= (FMODE_READ | FMODE_WRITE);
935 switch (filp->f_mode) {
939 * POSIX.1 says that O_NONBLOCK means return with the FIFO
940 * opened, even when there is no process writing the FIFO.
943 if (pipe->readers++ == 0)
944 wake_up_partner(pipe);
946 if (!is_pipe && !pipe->writers) {
947 if ((filp->f_flags & O_NONBLOCK)) {
948 /* suppress EPOLLHUP until we have
950 filp->f_version = pipe->w_counter;
952 if (wait_for_partner(pipe, &pipe->w_counter))
961 * POSIX.1 says that O_NONBLOCK means return -1 with
962 * errno=ENXIO when there is no process reading the FIFO.
965 if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
969 if (!pipe->writers++)
970 wake_up_partner(pipe);
972 if (!is_pipe && !pipe->readers) {
973 if (wait_for_partner(pipe, &pipe->r_counter))
978 case FMODE_READ | FMODE_WRITE:
981 * POSIX.1 leaves this case "undefined" when O_NONBLOCK is set.
982 * This implementation will NEVER block on a O_RDWR open, since
983 * the process can at least talk to itself.
990 if (pipe->readers == 1 || pipe->writers == 1)
991 wake_up_partner(pipe);
1000 __pipe_unlock(pipe);
1004 if (!--pipe->readers)
1005 wake_up_interruptible(&pipe->wait);
1010 if (!--pipe->writers)
1011 wake_up_interruptible(&pipe->wait);
1016 __pipe_unlock(pipe);
1018 put_pipe_info(inode, pipe);
1022 const struct file_operations pipefifo_fops = {
1024 .llseek = no_llseek,
1025 .read_iter = pipe_read,
1026 .write_iter = pipe_write,
1028 .unlocked_ioctl = pipe_ioctl,
1029 .release = pipe_release,
1030 .fasync = pipe_fasync,
1034 * Currently we rely on the pipe array holding a power-of-2 number
1035 * of pages. Returns 0 on error.
1037 unsigned int round_pipe_size(unsigned long size)
1039 if (size > (1U << 31))
1042 /* Minimum pipe size, as required by POSIX */
1043 if (size < PAGE_SIZE)
1046 return roundup_pow_of_two(size);
1050 * Allocate a new array of pipe buffers and copy the info over. Returns the
1051 * pipe size if successful, or return -ERROR on error.
1053 static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
1055 struct pipe_buffer *bufs;
1056 unsigned int size, nr_pages;
1057 unsigned long user_bufs;
1060 size = round_pipe_size(arg);
1061 nr_pages = size >> PAGE_SHIFT;
1067 * If trying to increase the pipe capacity, check that an
1068 * unprivileged user is not trying to exceed various limits
1069 * (soft limit check here, hard limit check just below).
1070 * Decreasing the pipe capacity is always permitted, even
1071 * if the user is currently over a limit.
1073 if (nr_pages > pipe->buffers &&
1074 size > pipe_max_size && !capable(CAP_SYS_RESOURCE))
1077 user_bufs = account_pipe_buffers(pipe->user, pipe->buffers, nr_pages);
1079 if (nr_pages > pipe->buffers &&
1080 (too_many_pipe_buffers_hard(user_bufs) ||
1081 too_many_pipe_buffers_soft(user_bufs)) &&
1082 is_unprivileged_user()) {
1084 goto out_revert_acct;
1088 * We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't
1089 * expect a lot of shrink+grow operations, just free and allocate
1090 * again like we would do for growing. If the pipe currently
1091 * contains more buffers than arg, then return busy.
1093 if (nr_pages < pipe->nrbufs) {
1095 goto out_revert_acct;
1098 bufs = kcalloc(nr_pages, sizeof(*bufs),
1099 GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
1100 if (unlikely(!bufs)) {
1102 goto out_revert_acct;
1106 * The pipe array wraps around, so just start the new one at zero
1107 * and adjust the indexes.
1113 tail = pipe->curbuf + pipe->nrbufs;
1114 if (tail < pipe->buffers)
1117 tail &= (pipe->buffers - 1);
1119 head = pipe->nrbufs - tail;
1121 memcpy(bufs, pipe->bufs + pipe->curbuf, head * sizeof(struct pipe_buffer));
1123 memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer));
1129 pipe->buffers = nr_pages;
1130 return nr_pages * PAGE_SIZE;
1133 (void) account_pipe_buffers(pipe->user, nr_pages, pipe->buffers);
1138 * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
1139 * location, so checking ->i_pipe is not enough to verify that this is a
1142 struct pipe_inode_info *get_pipe_info(struct file *file)
1144 return file->f_op == &pipefifo_fops ? file->private_data : NULL;
1147 long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
1149 struct pipe_inode_info *pipe;
1152 pipe = get_pipe_info(file);
1160 ret = pipe_set_size(pipe, arg);
1163 ret = pipe->buffers * PAGE_SIZE;
1170 __pipe_unlock(pipe);
1174 static const struct super_operations pipefs_ops = {
1175 .destroy_inode = free_inode_nonrcu,
1176 .statfs = simple_statfs,
1180 * pipefs should _never_ be mounted by userland - too much of security hassle,
1181 * no real gain from having the whole whorehouse mounted. So we don't need
1182 * any operations on the root directory. However, we need a non-trivial
1183 * d_name - pipe: will go nicely and kill the special-casing in procfs.
1185 static struct dentry *pipefs_mount(struct file_system_type *fs_type,
1186 int flags, const char *dev_name, void *data)
1188 return mount_pseudo(fs_type, "pipe:", &pipefs_ops,
1189 &pipefs_dentry_operations, PIPEFS_MAGIC);
1192 static struct file_system_type pipe_fs_type = {
1194 .mount = pipefs_mount,
1195 .kill_sb = kill_anon_super,
1198 static int __init init_pipe_fs(void)
1200 int err = register_filesystem(&pipe_fs_type);
1203 pipe_mnt = kern_mount(&pipe_fs_type);
1204 if (IS_ERR(pipe_mnt)) {
1205 err = PTR_ERR(pipe_mnt);
1206 unregister_filesystem(&pipe_fs_type);
1212 fs_initcall(init_pipe_fs);