1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 1991, 1992, 1999 Linus Torvalds
9 #include <linux/file.h>
10 #include <linux/poll.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
15 #include <linux/log2.h>
16 #include <linux/mount.h>
17 #include <linux/pseudo_fs.h>
18 #include <linux/magic.h>
19 #include <linux/pipe_fs_i.h>
20 #include <linux/uio.h>
21 #include <linux/highmem.h>
22 #include <linux/pagemap.h>
23 #include <linux/audit.h>
24 #include <linux/syscalls.h>
25 #include <linux/fcntl.h>
26 #include <linux/memcontrol.h>
27 #include <linux/watch_queue.h>
29 #include <linux/uaccess.h>
30 #include <asm/ioctls.h>
35 * New pipe buffers will be restricted to this size while the user is exceeding
36 * their pipe buffer quota. The general pipe use case needs at least two
37 * buffers: one for data yet to be read, and one for new data. If this is less
38 * than two, then a write to a non-empty pipe may block even if the pipe is not
39 * full. This can occur with GNU make jobserver or similar uses of pipes as
40 * semaphores: multiple processes may be waiting to write tokens back to the
41 * pipe before reading tokens: https://lore.kernel.org/lkml/1628086770.5rn8p04n6j.none@localhost/.
43 * Users can reduce their pipe buffers with F_SETPIPE_SZ below this at their
44 * own risk, namely: pipe writes to non-full pipes may block until the pipe is
47 #define PIPE_MIN_DEF_BUFFERS 2
50 * The max size that a non-root user is allowed to grow the pipe. Can
51 * be set by root in /proc/sys/fs/pipe-max-size
53 unsigned int pipe_max_size = 1048576;
55 /* Maximum allocatable pages per user. Hard limit is unset by default, soft
56 * matches default values.
58 unsigned long pipe_user_pages_hard;
59 unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR;
62 * We use head and tail indices that aren't masked off, except at the point of
63 * dereference, but rather they're allowed to wrap naturally. This means there
64 * isn't a dead spot in the buffer, but the ring has to be a power of two and
66 * -- David Howells 2019-09-23.
68 * Reads with count = 0 should always return 0.
69 * -- Julian Bradfield 1999-06-07.
71 * FIFOs and Pipes now generate SIGIO for both readers and writers.
72 * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
74 * pipe_read & write cleanup
75 * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
78 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
81 mutex_lock_nested(&pipe->mutex, subclass);
84 void pipe_lock(struct pipe_inode_info *pipe)
87 * pipe_lock() nests non-pipe inode locks (for writing to a file)
89 pipe_lock_nested(pipe, I_MUTEX_PARENT);
91 EXPORT_SYMBOL(pipe_lock);
93 void pipe_unlock(struct pipe_inode_info *pipe)
96 mutex_unlock(&pipe->mutex);
98 EXPORT_SYMBOL(pipe_unlock);
100 static inline void __pipe_lock(struct pipe_inode_info *pipe)
102 mutex_lock_nested(&pipe->mutex, I_MUTEX_PARENT);
105 static inline void __pipe_unlock(struct pipe_inode_info *pipe)
107 mutex_unlock(&pipe->mutex);
110 void pipe_double_lock(struct pipe_inode_info *pipe1,
111 struct pipe_inode_info *pipe2)
113 BUG_ON(pipe1 == pipe2);
116 pipe_lock_nested(pipe1, I_MUTEX_PARENT);
117 pipe_lock_nested(pipe2, I_MUTEX_CHILD);
119 pipe_lock_nested(pipe2, I_MUTEX_PARENT);
120 pipe_lock_nested(pipe1, I_MUTEX_CHILD);
124 static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
125 struct pipe_buffer *buf)
127 struct page *page = buf->page;
130 * If nobody else uses this page, and we don't already have a
131 * temporary page, let's keep track of it as a one-deep
132 * allocation cache. (Otherwise just release our reference to it)
134 if (page_count(page) == 1 && !pipe->tmp_page)
135 pipe->tmp_page = page;
140 static bool anon_pipe_buf_try_steal(struct pipe_inode_info *pipe,
141 struct pipe_buffer *buf)
143 struct page *page = buf->page;
145 if (page_count(page) != 1)
147 memcg_kmem_uncharge_page(page, 0);
148 __SetPageLocked(page);
153 * generic_pipe_buf_try_steal - attempt to take ownership of a &pipe_buffer
154 * @pipe: the pipe that the buffer belongs to
155 * @buf: the buffer to attempt to steal
158 * This function attempts to steal the &struct page attached to
159 * @buf. If successful, this function returns 0 and returns with
160 * the page locked. The caller may then reuse the page for whatever
161 * he wishes; the typical use is insertion into a different file
164 bool generic_pipe_buf_try_steal(struct pipe_inode_info *pipe,
165 struct pipe_buffer *buf)
167 struct page *page = buf->page;
170 * A reference of one is golden, that means that the owner of this
171 * page is the only one holding a reference to it. lock the page
174 if (page_count(page) == 1) {
180 EXPORT_SYMBOL(generic_pipe_buf_try_steal);
183 * generic_pipe_buf_get - get a reference to a &struct pipe_buffer
184 * @pipe: the pipe that the buffer belongs to
185 * @buf: the buffer to get a reference to
188 * This function grabs an extra reference to @buf. It's used in
189 * the tee() system call, when we duplicate the buffers in one
192 bool generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
194 return try_get_page(buf->page);
196 EXPORT_SYMBOL(generic_pipe_buf_get);
199 * generic_pipe_buf_release - put a reference to a &struct pipe_buffer
200 * @pipe: the pipe that the buffer belongs to
201 * @buf: the buffer to put a reference to
204 * This function releases a reference to @buf.
206 void generic_pipe_buf_release(struct pipe_inode_info *pipe,
207 struct pipe_buffer *buf)
211 EXPORT_SYMBOL(generic_pipe_buf_release);
213 static const struct pipe_buf_operations anon_pipe_buf_ops = {
214 .release = anon_pipe_buf_release,
215 .try_steal = anon_pipe_buf_try_steal,
216 .get = generic_pipe_buf_get,
219 /* Done while waiting without holding the pipe lock - thus the READ_ONCE() */
220 static inline bool pipe_readable(const struct pipe_inode_info *pipe)
222 unsigned int head = READ_ONCE(pipe->head);
223 unsigned int tail = READ_ONCE(pipe->tail);
224 unsigned int writers = READ_ONCE(pipe->writers);
226 return !pipe_empty(head, tail) || !writers;
230 pipe_read(struct kiocb *iocb, struct iov_iter *to)
232 size_t total_len = iov_iter_count(to);
233 struct file *filp = iocb->ki_filp;
234 struct pipe_inode_info *pipe = filp->private_data;
235 bool was_full, wake_next_reader = false;
238 /* Null read succeeds. */
239 if (unlikely(total_len == 0))
246 * We only wake up writers if the pipe was full when we started
247 * reading in order to avoid unnecessary wakeups.
249 * But when we do wake up writers, we do so using a sync wakeup
250 * (WF_SYNC), because we want them to get going and generate more
253 was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage);
255 unsigned int head = pipe->head;
256 unsigned int tail = pipe->tail;
257 unsigned int mask = pipe->ring_size - 1;
259 #ifdef CONFIG_WATCH_QUEUE
260 if (pipe->note_loss) {
261 struct watch_notification n;
269 n.type = WATCH_TYPE_META;
270 n.subtype = WATCH_META_LOSS_NOTIFICATION;
271 n.info = watch_sizeof(n);
272 if (copy_to_iter(&n, sizeof(n), to) != sizeof(n)) {
278 total_len -= sizeof(n);
279 pipe->note_loss = false;
283 if (!pipe_empty(head, tail)) {
284 struct pipe_buffer *buf = &pipe->bufs[tail & mask];
285 size_t chars = buf->len;
289 if (chars > total_len) {
290 if (buf->flags & PIPE_BUF_FLAG_WHOLE) {
298 error = pipe_buf_confirm(pipe, buf);
305 written = copy_page_to_iter(buf->page, buf->offset, chars, to);
306 if (unlikely(written < chars)) {
312 buf->offset += chars;
315 /* Was it a packet buffer? Clean up and exit */
316 if (buf->flags & PIPE_BUF_FLAG_PACKET) {
322 pipe_buf_release(pipe, buf);
323 spin_lock_irq(&pipe->rd_wait.lock);
324 #ifdef CONFIG_WATCH_QUEUE
325 if (buf->flags & PIPE_BUF_FLAG_LOSS)
326 pipe->note_loss = true;
330 spin_unlock_irq(&pipe->rd_wait.lock);
334 break; /* common path: read succeeded */
335 if (!pipe_empty(head, tail)) /* More to do? */
343 if (filp->f_flags & O_NONBLOCK) {
350 * We only get here if we didn't actually read anything.
352 * However, we could have seen (and removed) a zero-sized
353 * pipe buffer, and might have made space in the buffers
356 * You can't make zero-sized pipe buffers by doing an empty
357 * write (not even in packet mode), but they can happen if
358 * the writer gets an EFAULT when trying to fill a buffer
359 * that already got allocated and inserted in the buffer
362 * So we still need to wake up any pending writers in the
363 * _very_ unlikely case that the pipe was full, but we got
366 if (unlikely(was_full))
367 wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
368 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
371 * But because we didn't read anything, at this point we can
372 * just return directly with -ERESTARTSYS if we're interrupted,
373 * since we've done any required wakeups and there's no need
374 * to mark anything accessed. And we've dropped the lock.
376 if (wait_event_interruptible_exclusive(pipe->rd_wait, pipe_readable(pipe)) < 0)
380 was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage);
381 wake_next_reader = true;
383 if (pipe_empty(pipe->head, pipe->tail))
384 wake_next_reader = false;
388 wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
389 if (wake_next_reader)
390 wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
391 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
397 static inline int is_packetized(struct file *file)
399 return (file->f_flags & O_DIRECT) != 0;
402 /* Done while waiting without holding the pipe lock - thus the READ_ONCE() */
403 static inline bool pipe_writable(const struct pipe_inode_info *pipe)
405 unsigned int head = READ_ONCE(pipe->head);
406 unsigned int tail = READ_ONCE(pipe->tail);
407 unsigned int max_usage = READ_ONCE(pipe->max_usage);
409 return !pipe_full(head, tail, max_usage) ||
410 !READ_ONCE(pipe->readers);
414 pipe_write(struct kiocb *iocb, struct iov_iter *from)
416 struct file *filp = iocb->ki_filp;
417 struct pipe_inode_info *pipe = filp->private_data;
420 size_t total_len = iov_iter_count(from);
422 bool was_empty = false;
423 bool wake_next_writer = false;
425 /* Null write succeeds. */
426 if (unlikely(total_len == 0))
431 if (!pipe->readers) {
432 send_sig(SIGPIPE, current, 0);
437 #ifdef CONFIG_WATCH_QUEUE
438 if (pipe->watch_queue) {
445 * If it wasn't empty we try to merge new data into
448 * That naturally merges small writes, but it also
449 * page-aligns the rest of the writes for large writes
450 * spanning multiple pages.
453 was_empty = pipe_empty(head, pipe->tail);
454 chars = total_len & (PAGE_SIZE-1);
455 if (chars && !was_empty) {
456 unsigned int mask = pipe->ring_size - 1;
457 struct pipe_buffer *buf = &pipe->bufs[(head - 1) & mask];
458 int offset = buf->offset + buf->len;
460 if ((buf->flags & PIPE_BUF_FLAG_CAN_MERGE) &&
461 offset + chars <= PAGE_SIZE) {
462 ret = pipe_buf_confirm(pipe, buf);
466 ret = copy_page_from_iter(buf->page, offset, chars, from);
467 if (unlikely(ret < chars)) {
473 if (!iov_iter_count(from))
479 if (!pipe->readers) {
480 send_sig(SIGPIPE, current, 0);
487 if (!pipe_full(head, pipe->tail, pipe->max_usage)) {
488 unsigned int mask = pipe->ring_size - 1;
489 struct pipe_buffer *buf = &pipe->bufs[head & mask];
490 struct page *page = pipe->tmp_page;
494 page = alloc_page(GFP_HIGHUSER | __GFP_ACCOUNT);
495 if (unlikely(!page)) {
496 ret = ret ? : -ENOMEM;
499 pipe->tmp_page = page;
502 /* Allocate a slot in the ring in advance and attach an
503 * empty buffer. If we fault or otherwise fail to use
504 * it, either the reader will consume it or it'll still
505 * be there for the next write.
507 spin_lock_irq(&pipe->rd_wait.lock);
510 if (pipe_full(head, pipe->tail, pipe->max_usage)) {
511 spin_unlock_irq(&pipe->rd_wait.lock);
515 pipe->head = head + 1;
516 spin_unlock_irq(&pipe->rd_wait.lock);
518 /* Insert it into the buffer array */
519 buf = &pipe->bufs[head & mask];
521 buf->ops = &anon_pipe_buf_ops;
524 if (is_packetized(filp))
525 buf->flags = PIPE_BUF_FLAG_PACKET;
527 buf->flags = PIPE_BUF_FLAG_CAN_MERGE;
528 pipe->tmp_page = NULL;
530 copied = copy_page_from_iter(page, 0, PAGE_SIZE, from);
531 if (unlikely(copied < PAGE_SIZE && iov_iter_count(from))) {
540 if (!iov_iter_count(from))
544 if (!pipe_full(head, pipe->tail, pipe->max_usage))
547 /* Wait for buffer space to become available. */
548 if (filp->f_flags & O_NONBLOCK) {
553 if (signal_pending(current)) {
560 * We're going to release the pipe lock and wait for more
561 * space. We wake up any readers if necessary, and then
562 * after waiting we need to re-check whether the pipe
563 * become empty while we dropped the lock.
567 wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
568 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
569 wait_event_interruptible_exclusive(pipe->wr_wait, pipe_writable(pipe));
571 was_empty = pipe_empty(pipe->head, pipe->tail);
572 wake_next_writer = true;
575 if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
576 wake_next_writer = false;
580 * If we do do a wakeup event, we do a 'sync' wakeup, because we
581 * want the reader to start processing things asap, rather than
582 * leave the data pending.
584 * This is particularly important for small writes, because of
585 * how (for example) the GNU make jobserver uses small writes to
586 * wake up pending jobs
588 * Epoll nonsensically wants a wakeup whether the pipe
589 * was already empty or not.
591 if (was_empty || pipe->poll_usage)
592 wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
593 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
594 if (wake_next_writer)
595 wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
596 if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) {
597 int err = file_update_time(filp);
600 sb_end_write(file_inode(filp)->i_sb);
605 static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
607 struct pipe_inode_info *pipe = filp->private_data;
608 int count, head, tail, mask;
616 mask = pipe->ring_size - 1;
618 while (tail != head) {
619 count += pipe->bufs[tail & mask].len;
624 return put_user(count, (int __user *)arg);
626 #ifdef CONFIG_WATCH_QUEUE
627 case IOC_WATCH_QUEUE_SET_SIZE: {
630 ret = watch_queue_set_size(pipe, arg);
635 case IOC_WATCH_QUEUE_SET_FILTER:
636 return watch_queue_set_filter(
637 pipe, (struct watch_notification_filter __user *)arg);
645 /* No kernel lock held - fine */
647 pipe_poll(struct file *filp, poll_table *wait)
650 struct pipe_inode_info *pipe = filp->private_data;
651 unsigned int head, tail;
653 /* Epoll has some historical nasty semantics, this enables them */
654 pipe->poll_usage = 1;
657 * Reading pipe state only -- no need for acquiring the semaphore.
659 * But because this is racy, the code has to add the
660 * entry to the poll table _first_ ..
662 if (filp->f_mode & FMODE_READ)
663 poll_wait(filp, &pipe->rd_wait, wait);
664 if (filp->f_mode & FMODE_WRITE)
665 poll_wait(filp, &pipe->wr_wait, wait);
668 * .. and only then can you do the racy tests. That way,
669 * if something changes and you got it wrong, the poll
670 * table entry will wake you up and fix it.
672 head = READ_ONCE(pipe->head);
673 tail = READ_ONCE(pipe->tail);
676 if (filp->f_mode & FMODE_READ) {
677 if (!pipe_empty(head, tail))
678 mask |= EPOLLIN | EPOLLRDNORM;
679 if (!pipe->writers && filp->f_version != pipe->w_counter)
683 if (filp->f_mode & FMODE_WRITE) {
684 if (!pipe_full(head, tail, pipe->max_usage))
685 mask |= EPOLLOUT | EPOLLWRNORM;
687 * Most Unices do not set EPOLLERR for FIFOs but on Linux they
688 * behave exactly like pipes for poll().
697 static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
701 spin_lock(&inode->i_lock);
702 if (!--pipe->files) {
703 inode->i_pipe = NULL;
706 spin_unlock(&inode->i_lock);
709 free_pipe_info(pipe);
713 pipe_release(struct inode *inode, struct file *file)
715 struct pipe_inode_info *pipe = file->private_data;
718 if (file->f_mode & FMODE_READ)
720 if (file->f_mode & FMODE_WRITE)
723 /* Was that the last reader or writer, but not the other side? */
724 if (!pipe->readers != !pipe->writers) {
725 wake_up_interruptible_all(&pipe->rd_wait);
726 wake_up_interruptible_all(&pipe->wr_wait);
727 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
728 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
732 put_pipe_info(inode, pipe);
737 pipe_fasync(int fd, struct file *filp, int on)
739 struct pipe_inode_info *pipe = filp->private_data;
743 if (filp->f_mode & FMODE_READ)
744 retval = fasync_helper(fd, filp, on, &pipe->fasync_readers);
745 if ((filp->f_mode & FMODE_WRITE) && retval >= 0) {
746 retval = fasync_helper(fd, filp, on, &pipe->fasync_writers);
747 if (retval < 0 && (filp->f_mode & FMODE_READ))
748 /* this can happen only if on == T */
749 fasync_helper(-1, filp, 0, &pipe->fasync_readers);
755 unsigned long account_pipe_buffers(struct user_struct *user,
756 unsigned long old, unsigned long new)
758 return atomic_long_add_return(new - old, &user->pipe_bufs);
761 bool too_many_pipe_buffers_soft(unsigned long user_bufs)
763 unsigned long soft_limit = READ_ONCE(pipe_user_pages_soft);
765 return soft_limit && user_bufs > soft_limit;
768 bool too_many_pipe_buffers_hard(unsigned long user_bufs)
770 unsigned long hard_limit = READ_ONCE(pipe_user_pages_hard);
772 return hard_limit && user_bufs > hard_limit;
775 bool pipe_is_unprivileged_user(void)
777 return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
780 struct pipe_inode_info *alloc_pipe_info(void)
782 struct pipe_inode_info *pipe;
783 unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
784 struct user_struct *user = get_current_user();
785 unsigned long user_bufs;
786 unsigned int max_size = READ_ONCE(pipe_max_size);
788 pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL_ACCOUNT);
792 if (pipe_bufs * PAGE_SIZE > max_size && !capable(CAP_SYS_RESOURCE))
793 pipe_bufs = max_size >> PAGE_SHIFT;
795 user_bufs = account_pipe_buffers(user, 0, pipe_bufs);
797 if (too_many_pipe_buffers_soft(user_bufs) && pipe_is_unprivileged_user()) {
798 user_bufs = account_pipe_buffers(user, pipe_bufs, PIPE_MIN_DEF_BUFFERS);
799 pipe_bufs = PIPE_MIN_DEF_BUFFERS;
802 if (too_many_pipe_buffers_hard(user_bufs) && pipe_is_unprivileged_user())
803 goto out_revert_acct;
805 pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer),
809 init_waitqueue_head(&pipe->rd_wait);
810 init_waitqueue_head(&pipe->wr_wait);
811 pipe->r_counter = pipe->w_counter = 1;
812 pipe->max_usage = pipe_bufs;
813 pipe->ring_size = pipe_bufs;
814 pipe->nr_accounted = pipe_bufs;
816 mutex_init(&pipe->mutex);
821 (void) account_pipe_buffers(user, pipe_bufs, 0);
828 void free_pipe_info(struct pipe_inode_info *pipe)
832 #ifdef CONFIG_WATCH_QUEUE
833 if (pipe->watch_queue) {
834 watch_queue_clear(pipe->watch_queue);
835 put_watch_queue(pipe->watch_queue);
839 (void) account_pipe_buffers(pipe->user, pipe->nr_accounted, 0);
840 free_uid(pipe->user);
841 for (i = 0; i < pipe->ring_size; i++) {
842 struct pipe_buffer *buf = pipe->bufs + i;
844 pipe_buf_release(pipe, buf);
847 __free_page(pipe->tmp_page);
852 static struct vfsmount *pipe_mnt __read_mostly;
855 * pipefs_dname() is called from d_path().
857 static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen)
859 return dynamic_dname(dentry, buffer, buflen, "pipe:[%lu]",
860 d_inode(dentry)->i_ino);
863 static const struct dentry_operations pipefs_dentry_operations = {
864 .d_dname = pipefs_dname,
867 static struct inode * get_pipe_inode(void)
869 struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb);
870 struct pipe_inode_info *pipe;
875 inode->i_ino = get_next_ino();
877 pipe = alloc_pipe_info();
881 inode->i_pipe = pipe;
883 pipe->readers = pipe->writers = 1;
884 inode->i_fop = &pipefifo_fops;
887 * Mark the inode dirty from the very beginning,
888 * that way it will never be moved to the dirty
889 * list because "mark_inode_dirty()" will think
890 * that it already _is_ on the dirty list.
892 inode->i_state = I_DIRTY;
893 inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
894 inode->i_uid = current_fsuid();
895 inode->i_gid = current_fsgid();
896 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
907 int create_pipe_files(struct file **res, int flags)
909 struct inode *inode = get_pipe_inode();
916 if (flags & O_NOTIFICATION_PIPE) {
917 error = watch_queue_init(inode->i_pipe);
919 free_pipe_info(inode->i_pipe);
925 f = alloc_file_pseudo(inode, pipe_mnt, "",
926 O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT)),
929 free_pipe_info(inode->i_pipe);
934 f->private_data = inode->i_pipe;
936 res[0] = alloc_file_clone(f, O_RDONLY | (flags & O_NONBLOCK),
938 if (IS_ERR(res[0])) {
939 put_pipe_info(inode, inode->i_pipe);
941 return PTR_ERR(res[0]);
943 res[0]->private_data = inode->i_pipe;
945 stream_open(inode, res[0]);
946 stream_open(inode, res[1]);
950 static int __do_pipe_flags(int *fd, struct file **files, int flags)
955 if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT | O_NOTIFICATION_PIPE))
958 error = create_pipe_files(files, flags);
962 error = get_unused_fd_flags(flags);
967 error = get_unused_fd_flags(flags);
972 audit_fd_pair(fdr, fdw);
985 int do_pipe_flags(int *fd, int flags)
987 struct file *files[2];
988 int error = __do_pipe_flags(fd, files, flags);
990 fd_install(fd[0], files[0]);
991 fd_install(fd[1], files[1]);
997 * sys_pipe() is the normal C calling standard for creating
998 * a pipe. It's not the way Unix traditionally does this, though.
1000 static int do_pipe2(int __user *fildes, int flags)
1002 struct file *files[2];
1006 error = __do_pipe_flags(fd, files, flags);
1008 if (unlikely(copy_to_user(fildes, fd, sizeof(fd)))) {
1011 put_unused_fd(fd[0]);
1012 put_unused_fd(fd[1]);
1015 fd_install(fd[0], files[0]);
1016 fd_install(fd[1], files[1]);
1022 SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags)
1024 return do_pipe2(fildes, flags);
1027 SYSCALL_DEFINE1(pipe, int __user *, fildes)
1029 return do_pipe2(fildes, 0);
1033 * This is the stupid "wait for pipe to be readable or writable"
1036 * See pipe_read/write() for the proper kind of exclusive wait,
1037 * but that requires that we wake up any other readers/writers
1038 * if we then do not end up reading everything (ie the whole
1039 * "wake_next_reader/writer" logic in pipe_read/write()).
1041 void pipe_wait_readable(struct pipe_inode_info *pipe)
1044 wait_event_interruptible(pipe->rd_wait, pipe_readable(pipe));
1048 void pipe_wait_writable(struct pipe_inode_info *pipe)
1051 wait_event_interruptible(pipe->wr_wait, pipe_writable(pipe));
1056 * This depends on both the wait (here) and the wakeup (wake_up_partner)
1057 * holding the pipe lock, so "*cnt" is stable and we know a wakeup cannot
1058 * race with the count check and waitqueue prep.
1060 * Normally in order to avoid races, you'd do the prepare_to_wait() first,
1061 * then check the condition you're waiting for, and only then sleep. But
1062 * because of the pipe lock, we can check the condition before being on
1065 * We use the 'rd_wait' waitqueue for pipe partner waiting.
1067 static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt)
1069 DEFINE_WAIT(rdwait);
1072 while (cur == *cnt) {
1073 prepare_to_wait(&pipe->rd_wait, &rdwait, TASK_INTERRUPTIBLE);
1076 finish_wait(&pipe->rd_wait, &rdwait);
1078 if (signal_pending(current))
1081 return cur == *cnt ? -ERESTARTSYS : 0;
1084 static void wake_up_partner(struct pipe_inode_info *pipe)
1086 wake_up_interruptible_all(&pipe->rd_wait);
1089 static int fifo_open(struct inode *inode, struct file *filp)
1091 struct pipe_inode_info *pipe;
1092 bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC;
1095 filp->f_version = 0;
1097 spin_lock(&inode->i_lock);
1098 if (inode->i_pipe) {
1099 pipe = inode->i_pipe;
1101 spin_unlock(&inode->i_lock);
1103 spin_unlock(&inode->i_lock);
1104 pipe = alloc_pipe_info();
1108 spin_lock(&inode->i_lock);
1109 if (unlikely(inode->i_pipe)) {
1110 inode->i_pipe->files++;
1111 spin_unlock(&inode->i_lock);
1112 free_pipe_info(pipe);
1113 pipe = inode->i_pipe;
1115 inode->i_pipe = pipe;
1116 spin_unlock(&inode->i_lock);
1119 filp->private_data = pipe;
1120 /* OK, we have a pipe and it's pinned down */
1124 /* We can only do regular read/write on fifos */
1125 stream_open(inode, filp);
1127 switch (filp->f_mode & (FMODE_READ | FMODE_WRITE)) {
1131 * POSIX.1 says that O_NONBLOCK means return with the FIFO
1132 * opened, even when there is no process writing the FIFO.
1135 if (pipe->readers++ == 0)
1136 wake_up_partner(pipe);
1138 if (!is_pipe && !pipe->writers) {
1139 if ((filp->f_flags & O_NONBLOCK)) {
1140 /* suppress EPOLLHUP until we have
1142 filp->f_version = pipe->w_counter;
1144 if (wait_for_partner(pipe, &pipe->w_counter))
1153 * POSIX.1 says that O_NONBLOCK means return -1 with
1154 * errno=ENXIO when there is no process reading the FIFO.
1157 if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
1161 if (!pipe->writers++)
1162 wake_up_partner(pipe);
1164 if (!is_pipe && !pipe->readers) {
1165 if (wait_for_partner(pipe, &pipe->r_counter))
1170 case FMODE_READ | FMODE_WRITE:
1173 * POSIX.1 leaves this case "undefined" when O_NONBLOCK is set.
1174 * This implementation will NEVER block on a O_RDWR open, since
1175 * the process can at least talk to itself.
1182 if (pipe->readers == 1 || pipe->writers == 1)
1183 wake_up_partner(pipe);
1192 __pipe_unlock(pipe);
1196 if (!--pipe->readers)
1197 wake_up_interruptible(&pipe->wr_wait);
1202 if (!--pipe->writers)
1203 wake_up_interruptible_all(&pipe->rd_wait);
1208 __pipe_unlock(pipe);
1210 put_pipe_info(inode, pipe);
1214 const struct file_operations pipefifo_fops = {
1216 .llseek = no_llseek,
1217 .read_iter = pipe_read,
1218 .write_iter = pipe_write,
1220 .unlocked_ioctl = pipe_ioctl,
1221 .release = pipe_release,
1222 .fasync = pipe_fasync,
1223 .splice_write = iter_file_splice_write,
1227 * Currently we rely on the pipe array holding a power-of-2 number
1228 * of pages. Returns 0 on error.
1230 unsigned int round_pipe_size(unsigned long size)
1232 if (size > (1U << 31))
1235 /* Minimum pipe size, as required by POSIX */
1236 if (size < PAGE_SIZE)
1239 return roundup_pow_of_two(size);
1243 * Resize the pipe ring to a number of slots.
1245 int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots)
1247 struct pipe_buffer *bufs;
1248 unsigned int head, tail, mask, n;
1251 * We can shrink the pipe, if arg is greater than the ring occupancy.
1252 * Since we don't expect a lot of shrink+grow operations, just free and
1253 * allocate again like we would do for growing. If the pipe currently
1254 * contains more buffers than arg, then return busy.
1256 mask = pipe->ring_size - 1;
1259 n = pipe_occupancy(pipe->head, pipe->tail);
1263 bufs = kcalloc(nr_slots, sizeof(*bufs),
1264 GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
1265 if (unlikely(!bufs))
1269 * The pipe array wraps around, so just start the new one at zero
1270 * and adjust the indices.
1273 unsigned int h = head & mask;
1274 unsigned int t = tail & mask;
1276 memcpy(bufs, pipe->bufs + t,
1277 n * sizeof(struct pipe_buffer));
1279 unsigned int tsize = pipe->ring_size - t;
1281 memcpy(bufs + tsize, pipe->bufs,
1282 h * sizeof(struct pipe_buffer));
1283 memcpy(bufs, pipe->bufs + t,
1284 tsize * sizeof(struct pipe_buffer));
1293 pipe->ring_size = nr_slots;
1294 if (pipe->max_usage > nr_slots)
1295 pipe->max_usage = nr_slots;
1299 /* This might have made more room for writers */
1300 wake_up_interruptible(&pipe->wr_wait);
1305 * Allocate a new array of pipe buffers and copy the info over. Returns the
1306 * pipe size if successful, or return -ERROR on error.
1308 static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
1310 unsigned long user_bufs;
1311 unsigned int nr_slots, size;
1314 #ifdef CONFIG_WATCH_QUEUE
1315 if (pipe->watch_queue)
1319 size = round_pipe_size(arg);
1320 nr_slots = size >> PAGE_SHIFT;
1326 * If trying to increase the pipe capacity, check that an
1327 * unprivileged user is not trying to exceed various limits
1328 * (soft limit check here, hard limit check just below).
1329 * Decreasing the pipe capacity is always permitted, even
1330 * if the user is currently over a limit.
1332 if (nr_slots > pipe->max_usage &&
1333 size > pipe_max_size && !capable(CAP_SYS_RESOURCE))
1336 user_bufs = account_pipe_buffers(pipe->user, pipe->nr_accounted, nr_slots);
1338 if (nr_slots > pipe->max_usage &&
1339 (too_many_pipe_buffers_hard(user_bufs) ||
1340 too_many_pipe_buffers_soft(user_bufs)) &&
1341 pipe_is_unprivileged_user()) {
1343 goto out_revert_acct;
1346 ret = pipe_resize_ring(pipe, nr_slots);
1348 goto out_revert_acct;
1350 pipe->max_usage = nr_slots;
1351 pipe->nr_accounted = nr_slots;
1352 return pipe->max_usage * PAGE_SIZE;
1355 (void) account_pipe_buffers(pipe->user, nr_slots, pipe->nr_accounted);
1360 * Note that i_pipe and i_cdev share the same location, so checking ->i_pipe is
1361 * not enough to verify that this is a pipe.
1363 struct pipe_inode_info *get_pipe_info(struct file *file, bool for_splice)
1365 struct pipe_inode_info *pipe = file->private_data;
1367 if (file->f_op != &pipefifo_fops || !pipe)
1369 #ifdef CONFIG_WATCH_QUEUE
1370 if (for_splice && pipe->watch_queue)
1376 long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
1378 struct pipe_inode_info *pipe;
1381 pipe = get_pipe_info(file, false);
1389 ret = pipe_set_size(pipe, arg);
1392 ret = pipe->max_usage * PAGE_SIZE;
1399 __pipe_unlock(pipe);
1403 static const struct super_operations pipefs_ops = {
1404 .destroy_inode = free_inode_nonrcu,
1405 .statfs = simple_statfs,
1409 * pipefs should _never_ be mounted by userland - too much of security hassle,
1410 * no real gain from having the whole whorehouse mounted. So we don't need
1411 * any operations on the root directory. However, we need a non-trivial
1412 * d_name - pipe: will go nicely and kill the special-casing in procfs.
1415 static int pipefs_init_fs_context(struct fs_context *fc)
1417 struct pseudo_fs_context *ctx = init_pseudo(fc, PIPEFS_MAGIC);
1420 ctx->ops = &pipefs_ops;
1421 ctx->dops = &pipefs_dentry_operations;
1425 static struct file_system_type pipe_fs_type = {
1427 .init_fs_context = pipefs_init_fs_context,
1428 .kill_sb = kill_anon_super,
1431 static int __init init_pipe_fs(void)
1433 int err = register_filesystem(&pipe_fs_type);
1436 pipe_mnt = kern_mount(&pipe_fs_type);
1437 if (IS_ERR(pipe_mnt)) {
1438 err = PTR_ERR(pipe_mnt);
1439 unregister_filesystem(&pipe_fs_type);
1445 fs_initcall(init_pipe_fs);