1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 1991, 1992, 1999 Linus Torvalds
9 #include <linux/file.h>
10 #include <linux/poll.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
15 #include <linux/log2.h>
16 #include <linux/mount.h>
17 #include <linux/pseudo_fs.h>
18 #include <linux/magic.h>
19 #include <linux/pipe_fs_i.h>
20 #include <linux/uio.h>
21 #include <linux/highmem.h>
22 #include <linux/pagemap.h>
23 #include <linux/audit.h>
24 #include <linux/syscalls.h>
25 #include <linux/fcntl.h>
26 #include <linux/memcontrol.h>
28 #include <linux/uaccess.h>
29 #include <asm/ioctls.h>
34 * The max size that a non-root user is allowed to grow the pipe. Can
35 * be set by root in /proc/sys/fs/pipe-max-size
37 unsigned int pipe_max_size = 1048576;
39 /* Maximum allocatable pages per user. Hard limit is unset by default, soft
40 * matches default values.
42 unsigned long pipe_user_pages_hard;
43 unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR;
46 * We use head and tail indices that aren't masked off, except at the point of
47 * dereference, but rather they're allowed to wrap naturally. This means there
48 * isn't a dead spot in the buffer, but the ring has to be a power of two and
50 * -- David Howells 2019-09-23.
52 * Reads with count = 0 should always return 0.
53 * -- Julian Bradfield 1999-06-07.
55 * FIFOs and Pipes now generate SIGIO for both readers and writers.
56 * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
58 * pipe_read & write cleanup
59 * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
62 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
65 mutex_lock_nested(&pipe->mutex, subclass);
68 void pipe_lock(struct pipe_inode_info *pipe)
71 * pipe_lock() nests non-pipe inode locks (for writing to a file)
73 pipe_lock_nested(pipe, I_MUTEX_PARENT);
75 EXPORT_SYMBOL(pipe_lock);
77 void pipe_unlock(struct pipe_inode_info *pipe)
80 mutex_unlock(&pipe->mutex);
82 EXPORT_SYMBOL(pipe_unlock);
84 static inline void __pipe_lock(struct pipe_inode_info *pipe)
86 mutex_lock_nested(&pipe->mutex, I_MUTEX_PARENT);
89 static inline void __pipe_unlock(struct pipe_inode_info *pipe)
91 mutex_unlock(&pipe->mutex);
94 void pipe_double_lock(struct pipe_inode_info *pipe1,
95 struct pipe_inode_info *pipe2)
97 BUG_ON(pipe1 == pipe2);
100 pipe_lock_nested(pipe1, I_MUTEX_PARENT);
101 pipe_lock_nested(pipe2, I_MUTEX_CHILD);
103 pipe_lock_nested(pipe2, I_MUTEX_PARENT);
104 pipe_lock_nested(pipe1, I_MUTEX_CHILD);
108 /* Drop the inode semaphore and wait for a pipe event, atomically */
109 void pipe_wait(struct pipe_inode_info *pipe)
114 * Pipes are system-local resources, so sleeping on them
115 * is considered a noninteractive wait:
117 prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE);
120 finish_wait(&pipe->wait, &wait);
124 static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
125 struct pipe_buffer *buf)
127 struct page *page = buf->page;
130 * If nobody else uses this page, and we don't already have a
131 * temporary page, let's keep track of it as a one-deep
132 * allocation cache. (Otherwise just release our reference to it)
134 if (page_count(page) == 1 && !pipe->tmp_page)
135 pipe->tmp_page = page;
140 static int anon_pipe_buf_steal(struct pipe_inode_info *pipe,
141 struct pipe_buffer *buf)
143 struct page *page = buf->page;
145 if (page_count(page) == 1) {
146 memcg_kmem_uncharge(page, 0);
147 __SetPageLocked(page);
154 * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer
155 * @pipe: the pipe that the buffer belongs to
156 * @buf: the buffer to attempt to steal
159 * This function attempts to steal the &struct page attached to
160 * @buf. If successful, this function returns 0 and returns with
161 * the page locked. The caller may then reuse the page for whatever
162 * he wishes; the typical use is insertion into a different file
165 int generic_pipe_buf_steal(struct pipe_inode_info *pipe,
166 struct pipe_buffer *buf)
168 struct page *page = buf->page;
171 * A reference of one is golden, that means that the owner of this
172 * page is the only one holding a reference to it. lock the page
175 if (page_count(page) == 1) {
182 EXPORT_SYMBOL(generic_pipe_buf_steal);
185 * generic_pipe_buf_get - get a reference to a &struct pipe_buffer
186 * @pipe: the pipe that the buffer belongs to
187 * @buf: the buffer to get a reference to
190 * This function grabs an extra reference to @buf. It's used in
191 * in the tee() system call, when we duplicate the buffers in one
194 bool generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
196 return try_get_page(buf->page);
198 EXPORT_SYMBOL(generic_pipe_buf_get);
201 * generic_pipe_buf_confirm - verify contents of the pipe buffer
202 * @info: the pipe that the buffer belongs to
203 * @buf: the buffer to confirm
206 * This function does nothing, because the generic pipe code uses
207 * pages that are always good when inserted into the pipe.
209 int generic_pipe_buf_confirm(struct pipe_inode_info *info,
210 struct pipe_buffer *buf)
214 EXPORT_SYMBOL(generic_pipe_buf_confirm);
217 * generic_pipe_buf_release - put a reference to a &struct pipe_buffer
218 * @pipe: the pipe that the buffer belongs to
219 * @buf: the buffer to put a reference to
222 * This function releases a reference to @buf.
224 void generic_pipe_buf_release(struct pipe_inode_info *pipe,
225 struct pipe_buffer *buf)
229 EXPORT_SYMBOL(generic_pipe_buf_release);
231 /* New data written to a pipe may be appended to a buffer with this type. */
232 static const struct pipe_buf_operations anon_pipe_buf_ops = {
233 .confirm = generic_pipe_buf_confirm,
234 .release = anon_pipe_buf_release,
235 .steal = anon_pipe_buf_steal,
236 .get = generic_pipe_buf_get,
239 static const struct pipe_buf_operations anon_pipe_buf_nomerge_ops = {
240 .confirm = generic_pipe_buf_confirm,
241 .release = anon_pipe_buf_release,
242 .steal = anon_pipe_buf_steal,
243 .get = generic_pipe_buf_get,
246 static const struct pipe_buf_operations packet_pipe_buf_ops = {
247 .confirm = generic_pipe_buf_confirm,
248 .release = anon_pipe_buf_release,
249 .steal = anon_pipe_buf_steal,
250 .get = generic_pipe_buf_get,
254 * pipe_buf_mark_unmergeable - mark a &struct pipe_buffer as unmergeable
255 * @buf: the buffer to mark
258 * This function ensures that no future writes will be merged into the
259 * given &struct pipe_buffer. This is necessary when multiple pipe buffers
260 * share the same backing page.
262 void pipe_buf_mark_unmergeable(struct pipe_buffer *buf)
264 if (buf->ops == &anon_pipe_buf_ops)
265 buf->ops = &anon_pipe_buf_nomerge_ops;
268 static bool pipe_buf_can_merge(struct pipe_buffer *buf)
270 return buf->ops == &anon_pipe_buf_ops;
274 pipe_read(struct kiocb *iocb, struct iov_iter *to)
276 size_t total_len = iov_iter_count(to);
277 struct file *filp = iocb->ki_filp;
278 struct pipe_inode_info *pipe = filp->private_data;
282 /* Null read succeeds. */
283 if (unlikely(total_len == 0))
290 * We only wake up writers if the pipe was full when we started
291 * reading in order to avoid unnecessary wakeups.
293 * But when we do wake up writers, we do so using a sync wakeup
294 * (WF_SYNC), because we want them to get going and generate more
297 was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage);
299 unsigned int head = pipe->head;
300 unsigned int tail = pipe->tail;
301 unsigned int mask = pipe->ring_size - 1;
303 if (!pipe_empty(head, tail)) {
304 struct pipe_buffer *buf = &pipe->bufs[tail & mask];
305 size_t chars = buf->len;
309 if (chars > total_len)
312 error = pipe_buf_confirm(pipe, buf);
319 written = copy_page_to_iter(buf->page, buf->offset, chars, to);
320 if (unlikely(written < chars)) {
326 buf->offset += chars;
329 /* Was it a packet buffer? Clean up and exit */
330 if (buf->flags & PIPE_BUF_FLAG_PACKET) {
336 pipe_buf_release(pipe, buf);
337 spin_lock_irq(&pipe->wait.lock);
340 spin_unlock_irq(&pipe->wait.lock);
344 break; /* common path: read succeeded */
345 if (!pipe_empty(head, tail)) /* More to do? */
353 if (filp->f_flags & O_NONBLOCK) {
357 if (signal_pending(current)) {
363 wake_up_interruptible_sync_poll(&pipe->wait, EPOLLOUT | EPOLLWRNORM);
364 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
367 was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage);
372 wake_up_interruptible_sync_poll(&pipe->wait, EPOLLOUT | EPOLLWRNORM);
373 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
380 static inline int is_packetized(struct file *file)
382 return (file->f_flags & O_DIRECT) != 0;
386 pipe_write(struct kiocb *iocb, struct iov_iter *from)
388 struct file *filp = iocb->ki_filp;
389 struct pipe_inode_info *pipe = filp->private_data;
392 size_t total_len = iov_iter_count(from);
394 bool was_empty = false;
396 /* Null write succeeds. */
397 if (unlikely(total_len == 0))
402 if (!pipe->readers) {
403 send_sig(SIGPIPE, current, 0);
409 * Only wake up if the pipe started out empty, since
410 * otherwise there should be no readers waiting.
412 * If it wasn't empty we try to merge new data into
415 * That naturally merges small writes, but it also
416 * page-aligs the rest of the writes for large writes
417 * spanning multiple pages.
420 was_empty = pipe_empty(head, pipe->tail);
421 chars = total_len & (PAGE_SIZE-1);
422 if (chars && !was_empty) {
423 unsigned int mask = pipe->ring_size - 1;
424 struct pipe_buffer *buf = &pipe->bufs[(head - 1) & mask];
425 int offset = buf->offset + buf->len;
427 if (pipe_buf_can_merge(buf) && offset + chars <= PAGE_SIZE) {
428 ret = pipe_buf_confirm(pipe, buf);
432 ret = copy_page_from_iter(buf->page, offset, chars, from);
433 if (unlikely(ret < chars)) {
439 if (!iov_iter_count(from))
445 if (!pipe->readers) {
446 send_sig(SIGPIPE, current, 0);
453 if (!pipe_full(head, pipe->tail, pipe->max_usage)) {
454 unsigned int mask = pipe->ring_size - 1;
455 struct pipe_buffer *buf = &pipe->bufs[head & mask];
456 struct page *page = pipe->tmp_page;
460 page = alloc_page(GFP_HIGHUSER | __GFP_ACCOUNT);
461 if (unlikely(!page)) {
462 ret = ret ? : -ENOMEM;
465 pipe->tmp_page = page;
468 /* Allocate a slot in the ring in advance and attach an
469 * empty buffer. If we fault or otherwise fail to use
470 * it, either the reader will consume it or it'll still
471 * be there for the next write.
473 spin_lock_irq(&pipe->wait.lock);
476 if (pipe_full(head, pipe->tail, pipe->max_usage)) {
477 spin_unlock_irq(&pipe->wait.lock);
481 pipe->head = head + 1;
482 spin_unlock_irq(&pipe->wait.lock);
484 /* Insert it into the buffer array */
485 buf = &pipe->bufs[head & mask];
487 buf->ops = &anon_pipe_buf_ops;
491 if (is_packetized(filp)) {
492 buf->ops = &packet_pipe_buf_ops;
493 buf->flags = PIPE_BUF_FLAG_PACKET;
495 pipe->tmp_page = NULL;
497 copied = copy_page_from_iter(page, 0, PAGE_SIZE, from);
498 if (unlikely(copied < PAGE_SIZE && iov_iter_count(from))) {
507 if (!iov_iter_count(from))
511 if (!pipe_full(head, pipe->tail, pipe->max_usage))
514 /* Wait for buffer space to become available. */
515 if (filp->f_flags & O_NONBLOCK) {
520 if (signal_pending(current)) {
527 * We're going to release the pipe lock and wait for more
528 * space. We wake up any readers if necessary, and then
529 * after waiting we need to re-check whether the pipe
530 * become empty while we dropped the lock.
533 wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLRDNORM);
534 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
538 was_empty = pipe_empty(head, pipe->tail);
544 * If we do do a wakeup event, we do a 'sync' wakeup, because we
545 * want the reader to start processing things asap, rather than
546 * leave the data pending.
548 * This is particularly important for small writes, because of
549 * how (for example) the GNU make jobserver uses small writes to
550 * wake up pending jobs
553 wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLRDNORM);
554 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
556 if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) {
557 int err = file_update_time(filp);
560 sb_end_write(file_inode(filp)->i_sb);
565 static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
567 struct pipe_inode_info *pipe = filp->private_data;
568 int count, head, tail, mask;
576 mask = pipe->ring_size - 1;
578 while (tail != head) {
579 count += pipe->bufs[tail & mask].len;
584 return put_user(count, (int __user *)arg);
590 /* No kernel lock held - fine */
592 pipe_poll(struct file *filp, poll_table *wait)
595 struct pipe_inode_info *pipe = filp->private_data;
596 unsigned int head, tail;
599 * Reading only -- no need for acquiring the semaphore.
601 * But because this is racy, the code has to add the
602 * entry to the poll table _first_ ..
604 poll_wait(filp, &pipe->wait, wait);
607 * .. and only then can you do the racy tests. That way,
608 * if something changes and you got it wrong, the poll
609 * table entry will wake you up and fix it.
611 head = READ_ONCE(pipe->head);
612 tail = READ_ONCE(pipe->tail);
615 if (filp->f_mode & FMODE_READ) {
616 if (!pipe_empty(head, tail))
617 mask |= EPOLLIN | EPOLLRDNORM;
618 if (!pipe->writers && filp->f_version != pipe->w_counter)
622 if (filp->f_mode & FMODE_WRITE) {
623 if (!pipe_full(head, tail, pipe->max_usage))
624 mask |= EPOLLOUT | EPOLLWRNORM;
626 * Most Unices do not set EPOLLERR for FIFOs but on Linux they
627 * behave exactly like pipes for poll().
636 static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
640 spin_lock(&inode->i_lock);
641 if (!--pipe->files) {
642 inode->i_pipe = NULL;
645 spin_unlock(&inode->i_lock);
648 free_pipe_info(pipe);
652 pipe_release(struct inode *inode, struct file *file)
654 struct pipe_inode_info *pipe = file->private_data;
657 if (file->f_mode & FMODE_READ)
659 if (file->f_mode & FMODE_WRITE)
662 if (pipe->readers || pipe->writers) {
663 wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLOUT | EPOLLRDNORM | EPOLLWRNORM | EPOLLERR | EPOLLHUP);
664 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
665 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
669 put_pipe_info(inode, pipe);
674 pipe_fasync(int fd, struct file *filp, int on)
676 struct pipe_inode_info *pipe = filp->private_data;
680 if (filp->f_mode & FMODE_READ)
681 retval = fasync_helper(fd, filp, on, &pipe->fasync_readers);
682 if ((filp->f_mode & FMODE_WRITE) && retval >= 0) {
683 retval = fasync_helper(fd, filp, on, &pipe->fasync_writers);
684 if (retval < 0 && (filp->f_mode & FMODE_READ))
685 /* this can happen only if on == T */
686 fasync_helper(-1, filp, 0, &pipe->fasync_readers);
692 static unsigned long account_pipe_buffers(struct user_struct *user,
693 unsigned long old, unsigned long new)
695 return atomic_long_add_return(new - old, &user->pipe_bufs);
698 static bool too_many_pipe_buffers_soft(unsigned long user_bufs)
700 unsigned long soft_limit = READ_ONCE(pipe_user_pages_soft);
702 return soft_limit && user_bufs > soft_limit;
705 static bool too_many_pipe_buffers_hard(unsigned long user_bufs)
707 unsigned long hard_limit = READ_ONCE(pipe_user_pages_hard);
709 return hard_limit && user_bufs > hard_limit;
712 static bool is_unprivileged_user(void)
714 return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
717 struct pipe_inode_info *alloc_pipe_info(void)
719 struct pipe_inode_info *pipe;
720 unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
721 struct user_struct *user = get_current_user();
722 unsigned long user_bufs;
723 unsigned int max_size = READ_ONCE(pipe_max_size);
725 pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL_ACCOUNT);
729 if (pipe_bufs * PAGE_SIZE > max_size && !capable(CAP_SYS_RESOURCE))
730 pipe_bufs = max_size >> PAGE_SHIFT;
732 user_bufs = account_pipe_buffers(user, 0, pipe_bufs);
734 if (too_many_pipe_buffers_soft(user_bufs) && is_unprivileged_user()) {
735 user_bufs = account_pipe_buffers(user, pipe_bufs, 1);
739 if (too_many_pipe_buffers_hard(user_bufs) && is_unprivileged_user())
740 goto out_revert_acct;
742 pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer),
746 init_waitqueue_head(&pipe->wait);
747 pipe->r_counter = pipe->w_counter = 1;
748 pipe->max_usage = pipe_bufs;
749 pipe->ring_size = pipe_bufs;
751 mutex_init(&pipe->mutex);
756 (void) account_pipe_buffers(user, pipe_bufs, 0);
763 void free_pipe_info(struct pipe_inode_info *pipe)
767 (void) account_pipe_buffers(pipe->user, pipe->ring_size, 0);
768 free_uid(pipe->user);
769 for (i = 0; i < pipe->ring_size; i++) {
770 struct pipe_buffer *buf = pipe->bufs + i;
772 pipe_buf_release(pipe, buf);
775 __free_page(pipe->tmp_page);
780 static struct vfsmount *pipe_mnt __read_mostly;
783 * pipefs_dname() is called from d_path().
785 static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen)
787 return dynamic_dname(dentry, buffer, buflen, "pipe:[%lu]",
788 d_inode(dentry)->i_ino);
791 static const struct dentry_operations pipefs_dentry_operations = {
792 .d_dname = pipefs_dname,
795 static struct inode * get_pipe_inode(void)
797 struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb);
798 struct pipe_inode_info *pipe;
803 inode->i_ino = get_next_ino();
805 pipe = alloc_pipe_info();
809 inode->i_pipe = pipe;
811 pipe->readers = pipe->writers = 1;
812 inode->i_fop = &pipefifo_fops;
815 * Mark the inode dirty from the very beginning,
816 * that way it will never be moved to the dirty
817 * list because "mark_inode_dirty()" will think
818 * that it already _is_ on the dirty list.
820 inode->i_state = I_DIRTY;
821 inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
822 inode->i_uid = current_fsuid();
823 inode->i_gid = current_fsgid();
824 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
835 int create_pipe_files(struct file **res, int flags)
837 struct inode *inode = get_pipe_inode();
843 f = alloc_file_pseudo(inode, pipe_mnt, "",
844 O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT)),
847 free_pipe_info(inode->i_pipe);
852 f->private_data = inode->i_pipe;
854 res[0] = alloc_file_clone(f, O_RDONLY | (flags & O_NONBLOCK),
856 if (IS_ERR(res[0])) {
857 put_pipe_info(inode, inode->i_pipe);
859 return PTR_ERR(res[0]);
861 res[0]->private_data = inode->i_pipe;
863 stream_open(inode, res[0]);
864 stream_open(inode, res[1]);
868 static int __do_pipe_flags(int *fd, struct file **files, int flags)
873 if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT))
876 error = create_pipe_files(files, flags);
880 error = get_unused_fd_flags(flags);
885 error = get_unused_fd_flags(flags);
890 audit_fd_pair(fdr, fdw);
903 int do_pipe_flags(int *fd, int flags)
905 struct file *files[2];
906 int error = __do_pipe_flags(fd, files, flags);
908 fd_install(fd[0], files[0]);
909 fd_install(fd[1], files[1]);
915 * sys_pipe() is the normal C calling standard for creating
916 * a pipe. It's not the way Unix traditionally does this, though.
918 static int do_pipe2(int __user *fildes, int flags)
920 struct file *files[2];
924 error = __do_pipe_flags(fd, files, flags);
926 if (unlikely(copy_to_user(fildes, fd, sizeof(fd)))) {
929 put_unused_fd(fd[0]);
930 put_unused_fd(fd[1]);
933 fd_install(fd[0], files[0]);
934 fd_install(fd[1], files[1]);
940 SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags)
942 return do_pipe2(fildes, flags);
945 SYSCALL_DEFINE1(pipe, int __user *, fildes)
947 return do_pipe2(fildes, 0);
950 static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt)
954 while (cur == *cnt) {
956 if (signal_pending(current))
959 return cur == *cnt ? -ERESTARTSYS : 0;
962 static void wake_up_partner(struct pipe_inode_info *pipe)
964 wake_up_interruptible(&pipe->wait);
967 static int fifo_open(struct inode *inode, struct file *filp)
969 struct pipe_inode_info *pipe;
970 bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC;
975 spin_lock(&inode->i_lock);
977 pipe = inode->i_pipe;
979 spin_unlock(&inode->i_lock);
981 spin_unlock(&inode->i_lock);
982 pipe = alloc_pipe_info();
986 spin_lock(&inode->i_lock);
987 if (unlikely(inode->i_pipe)) {
988 inode->i_pipe->files++;
989 spin_unlock(&inode->i_lock);
990 free_pipe_info(pipe);
991 pipe = inode->i_pipe;
993 inode->i_pipe = pipe;
994 spin_unlock(&inode->i_lock);
997 filp->private_data = pipe;
998 /* OK, we have a pipe and it's pinned down */
1002 /* We can only do regular read/write on fifos */
1003 stream_open(inode, filp);
1005 switch (filp->f_mode & (FMODE_READ | FMODE_WRITE)) {
1009 * POSIX.1 says that O_NONBLOCK means return with the FIFO
1010 * opened, even when there is no process writing the FIFO.
1013 if (pipe->readers++ == 0)
1014 wake_up_partner(pipe);
1016 if (!is_pipe && !pipe->writers) {
1017 if ((filp->f_flags & O_NONBLOCK)) {
1018 /* suppress EPOLLHUP until we have
1020 filp->f_version = pipe->w_counter;
1022 if (wait_for_partner(pipe, &pipe->w_counter))
1031 * POSIX.1 says that O_NONBLOCK means return -1 with
1032 * errno=ENXIO when there is no process reading the FIFO.
1035 if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
1039 if (!pipe->writers++)
1040 wake_up_partner(pipe);
1042 if (!is_pipe && !pipe->readers) {
1043 if (wait_for_partner(pipe, &pipe->r_counter))
1048 case FMODE_READ | FMODE_WRITE:
1051 * POSIX.1 leaves this case "undefined" when O_NONBLOCK is set.
1052 * This implementation will NEVER block on a O_RDWR open, since
1053 * the process can at least talk to itself.
1060 if (pipe->readers == 1 || pipe->writers == 1)
1061 wake_up_partner(pipe);
1070 __pipe_unlock(pipe);
1074 if (!--pipe->readers)
1075 wake_up_interruptible(&pipe->wait);
1080 if (!--pipe->writers)
1081 wake_up_interruptible(&pipe->wait);
1086 __pipe_unlock(pipe);
1088 put_pipe_info(inode, pipe);
1092 const struct file_operations pipefifo_fops = {
1094 .llseek = no_llseek,
1095 .read_iter = pipe_read,
1096 .write_iter = pipe_write,
1098 .unlocked_ioctl = pipe_ioctl,
1099 .release = pipe_release,
1100 .fasync = pipe_fasync,
1104 * Currently we rely on the pipe array holding a power-of-2 number
1105 * of pages. Returns 0 on error.
1107 unsigned int round_pipe_size(unsigned long size)
1109 if (size > (1U << 31))
1112 /* Minimum pipe size, as required by POSIX */
1113 if (size < PAGE_SIZE)
1116 return roundup_pow_of_two(size);
1120 * Allocate a new array of pipe buffers and copy the info over. Returns the
1121 * pipe size if successful, or return -ERROR on error.
1123 static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
1125 struct pipe_buffer *bufs;
1126 unsigned int size, nr_slots, head, tail, mask, n;
1127 unsigned long user_bufs;
1130 size = round_pipe_size(arg);
1131 nr_slots = size >> PAGE_SHIFT;
1137 * If trying to increase the pipe capacity, check that an
1138 * unprivileged user is not trying to exceed various limits
1139 * (soft limit check here, hard limit check just below).
1140 * Decreasing the pipe capacity is always permitted, even
1141 * if the user is currently over a limit.
1143 if (nr_slots > pipe->ring_size &&
1144 size > pipe_max_size && !capable(CAP_SYS_RESOURCE))
1147 user_bufs = account_pipe_buffers(pipe->user, pipe->ring_size, nr_slots);
1149 if (nr_slots > pipe->ring_size &&
1150 (too_many_pipe_buffers_hard(user_bufs) ||
1151 too_many_pipe_buffers_soft(user_bufs)) &&
1152 is_unprivileged_user()) {
1154 goto out_revert_acct;
1158 * We can shrink the pipe, if arg is greater than the ring occupancy.
1159 * Since we don't expect a lot of shrink+grow operations, just free and
1160 * allocate again like we would do for growing. If the pipe currently
1161 * contains more buffers than arg, then return busy.
1163 mask = pipe->ring_size - 1;
1166 n = pipe_occupancy(pipe->head, pipe->tail);
1169 goto out_revert_acct;
1172 bufs = kcalloc(nr_slots, sizeof(*bufs),
1173 GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
1174 if (unlikely(!bufs)) {
1176 goto out_revert_acct;
1180 * The pipe array wraps around, so just start the new one at zero
1181 * and adjust the indices.
1184 unsigned int h = head & mask;
1185 unsigned int t = tail & mask;
1187 memcpy(bufs, pipe->bufs + t,
1188 n * sizeof(struct pipe_buffer));
1190 unsigned int tsize = pipe->ring_size - t;
1192 memcpy(bufs + tsize, pipe->bufs,
1193 h * sizeof(struct pipe_buffer));
1194 memcpy(bufs, pipe->bufs + t,
1195 tsize * sizeof(struct pipe_buffer));
1204 pipe->ring_size = nr_slots;
1205 pipe->max_usage = nr_slots;
1208 wake_up_interruptible_all(&pipe->wait);
1209 return pipe->max_usage * PAGE_SIZE;
1212 (void) account_pipe_buffers(pipe->user, nr_slots, pipe->ring_size);
1217 * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
1218 * location, so checking ->i_pipe is not enough to verify that this is a
1221 struct pipe_inode_info *get_pipe_info(struct file *file)
1223 return file->f_op == &pipefifo_fops ? file->private_data : NULL;
1226 long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
1228 struct pipe_inode_info *pipe;
1231 pipe = get_pipe_info(file);
1239 ret = pipe_set_size(pipe, arg);
1242 ret = pipe->max_usage * PAGE_SIZE;
1249 __pipe_unlock(pipe);
1253 static const struct super_operations pipefs_ops = {
1254 .destroy_inode = free_inode_nonrcu,
1255 .statfs = simple_statfs,
1259 * pipefs should _never_ be mounted by userland - too much of security hassle,
1260 * no real gain from having the whole whorehouse mounted. So we don't need
1261 * any operations on the root directory. However, we need a non-trivial
1262 * d_name - pipe: will go nicely and kill the special-casing in procfs.
1265 static int pipefs_init_fs_context(struct fs_context *fc)
1267 struct pseudo_fs_context *ctx = init_pseudo(fc, PIPEFS_MAGIC);
1270 ctx->ops = &pipefs_ops;
1271 ctx->dops = &pipefs_dentry_operations;
1275 static struct file_system_type pipe_fs_type = {
1277 .init_fs_context = pipefs_init_fs_context,
1278 .kill_sb = kill_anon_super,
1281 static int __init init_pipe_fs(void)
1283 int err = register_filesystem(&pipe_fs_type);
1286 pipe_mnt = kern_mount(&pipe_fs_type);
1287 if (IS_ERR(pipe_mnt)) {
1288 err = PTR_ERR(pipe_mnt);
1289 unregister_filesystem(&pipe_fs_type);
1295 fs_initcall(init_pipe_fs);