pipe: Use head and tail pointers for the ring, not cursor and length
[platform/kernel/linux-starfive.git] / fs / pipe.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/pipe.c
4  *
5  *  Copyright (C) 1991, 1992, 1999  Linus Torvalds
6  */
7
8 #include <linux/mm.h>
9 #include <linux/file.h>
10 #include <linux/poll.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/fs.h>
15 #include <linux/log2.h>
16 #include <linux/mount.h>
17 #include <linux/pseudo_fs.h>
18 #include <linux/magic.h>
19 #include <linux/pipe_fs_i.h>
20 #include <linux/uio.h>
21 #include <linux/highmem.h>
22 #include <linux/pagemap.h>
23 #include <linux/audit.h>
24 #include <linux/syscalls.h>
25 #include <linux/fcntl.h>
26 #include <linux/memcontrol.h>
27
28 #include <linux/uaccess.h>
29 #include <asm/ioctls.h>
30
31 #include "internal.h"
32
33 /*
34  * The max size that a non-root user is allowed to grow the pipe. Can
35  * be set by root in /proc/sys/fs/pipe-max-size
36  */
37 unsigned int pipe_max_size = 1048576;
38
39 /* Maximum allocatable pages per user. Hard limit is unset by default, soft
40  * matches default values.
41  */
42 unsigned long pipe_user_pages_hard;
43 unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR;
44
45 /*
46  * We use head and tail indices that aren't masked off, except at the point of
47  * dereference, but rather they're allowed to wrap naturally.  This means there
48  * isn't a dead spot in the buffer, but the ring has to be a power of two and
49  * <= 2^31.
50  * -- David Howells 2019-09-23.
51  *
52  * Reads with count = 0 should always return 0.
53  * -- Julian Bradfield 1999-06-07.
54  *
55  * FIFOs and Pipes now generate SIGIO for both readers and writers.
56  * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
57  *
58  * pipe_read & write cleanup
59  * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
60  */
61
62 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
63 {
64         if (pipe->files)
65                 mutex_lock_nested(&pipe->mutex, subclass);
66 }
67
68 void pipe_lock(struct pipe_inode_info *pipe)
69 {
70         /*
71          * pipe_lock() nests non-pipe inode locks (for writing to a file)
72          */
73         pipe_lock_nested(pipe, I_MUTEX_PARENT);
74 }
75 EXPORT_SYMBOL(pipe_lock);
76
77 void pipe_unlock(struct pipe_inode_info *pipe)
78 {
79         if (pipe->files)
80                 mutex_unlock(&pipe->mutex);
81 }
82 EXPORT_SYMBOL(pipe_unlock);
83
84 static inline void __pipe_lock(struct pipe_inode_info *pipe)
85 {
86         mutex_lock_nested(&pipe->mutex, I_MUTEX_PARENT);
87 }
88
89 static inline void __pipe_unlock(struct pipe_inode_info *pipe)
90 {
91         mutex_unlock(&pipe->mutex);
92 }
93
94 void pipe_double_lock(struct pipe_inode_info *pipe1,
95                       struct pipe_inode_info *pipe2)
96 {
97         BUG_ON(pipe1 == pipe2);
98
99         if (pipe1 < pipe2) {
100                 pipe_lock_nested(pipe1, I_MUTEX_PARENT);
101                 pipe_lock_nested(pipe2, I_MUTEX_CHILD);
102         } else {
103                 pipe_lock_nested(pipe2, I_MUTEX_PARENT);
104                 pipe_lock_nested(pipe1, I_MUTEX_CHILD);
105         }
106 }
107
108 /* Drop the inode semaphore and wait for a pipe event, atomically */
109 void pipe_wait(struct pipe_inode_info *pipe)
110 {
111         DEFINE_WAIT(wait);
112
113         /*
114          * Pipes are system-local resources, so sleeping on them
115          * is considered a noninteractive wait:
116          */
117         prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE);
118         pipe_unlock(pipe);
119         schedule();
120         finish_wait(&pipe->wait, &wait);
121         pipe_lock(pipe);
122 }
123
124 static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
125                                   struct pipe_buffer *buf)
126 {
127         struct page *page = buf->page;
128
129         /*
130          * If nobody else uses this page, and we don't already have a
131          * temporary page, let's keep track of it as a one-deep
132          * allocation cache. (Otherwise just release our reference to it)
133          */
134         if (page_count(page) == 1 && !pipe->tmp_page)
135                 pipe->tmp_page = page;
136         else
137                 put_page(page);
138 }
139
140 static int anon_pipe_buf_steal(struct pipe_inode_info *pipe,
141                                struct pipe_buffer *buf)
142 {
143         struct page *page = buf->page;
144
145         if (page_count(page) == 1) {
146                 memcg_kmem_uncharge(page, 0);
147                 __SetPageLocked(page);
148                 return 0;
149         }
150         return 1;
151 }
152
153 /**
154  * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer
155  * @pipe:       the pipe that the buffer belongs to
156  * @buf:        the buffer to attempt to steal
157  *
158  * Description:
159  *      This function attempts to steal the &struct page attached to
160  *      @buf. If successful, this function returns 0 and returns with
161  *      the page locked. The caller may then reuse the page for whatever
162  *      he wishes; the typical use is insertion into a different file
163  *      page cache.
164  */
165 int generic_pipe_buf_steal(struct pipe_inode_info *pipe,
166                            struct pipe_buffer *buf)
167 {
168         struct page *page = buf->page;
169
170         /*
171          * A reference of one is golden, that means that the owner of this
172          * page is the only one holding a reference to it. lock the page
173          * and return OK.
174          */
175         if (page_count(page) == 1) {
176                 lock_page(page);
177                 return 0;
178         }
179
180         return 1;
181 }
182 EXPORT_SYMBOL(generic_pipe_buf_steal);
183
184 /**
185  * generic_pipe_buf_get - get a reference to a &struct pipe_buffer
186  * @pipe:       the pipe that the buffer belongs to
187  * @buf:        the buffer to get a reference to
188  *
189  * Description:
190  *      This function grabs an extra reference to @buf. It's used in
191  *      in the tee() system call, when we duplicate the buffers in one
192  *      pipe into another.
193  */
194 bool generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
195 {
196         return try_get_page(buf->page);
197 }
198 EXPORT_SYMBOL(generic_pipe_buf_get);
199
200 /**
201  * generic_pipe_buf_confirm - verify contents of the pipe buffer
202  * @info:       the pipe that the buffer belongs to
203  * @buf:        the buffer to confirm
204  *
205  * Description:
206  *      This function does nothing, because the generic pipe code uses
207  *      pages that are always good when inserted into the pipe.
208  */
209 int generic_pipe_buf_confirm(struct pipe_inode_info *info,
210                              struct pipe_buffer *buf)
211 {
212         return 0;
213 }
214 EXPORT_SYMBOL(generic_pipe_buf_confirm);
215
216 /**
217  * generic_pipe_buf_release - put a reference to a &struct pipe_buffer
218  * @pipe:       the pipe that the buffer belongs to
219  * @buf:        the buffer to put a reference to
220  *
221  * Description:
222  *      This function releases a reference to @buf.
223  */
224 void generic_pipe_buf_release(struct pipe_inode_info *pipe,
225                               struct pipe_buffer *buf)
226 {
227         put_page(buf->page);
228 }
229 EXPORT_SYMBOL(generic_pipe_buf_release);
230
231 /* New data written to a pipe may be appended to a buffer with this type. */
232 static const struct pipe_buf_operations anon_pipe_buf_ops = {
233         .confirm = generic_pipe_buf_confirm,
234         .release = anon_pipe_buf_release,
235         .steal = anon_pipe_buf_steal,
236         .get = generic_pipe_buf_get,
237 };
238
239 static const struct pipe_buf_operations anon_pipe_buf_nomerge_ops = {
240         .confirm = generic_pipe_buf_confirm,
241         .release = anon_pipe_buf_release,
242         .steal = anon_pipe_buf_steal,
243         .get = generic_pipe_buf_get,
244 };
245
246 static const struct pipe_buf_operations packet_pipe_buf_ops = {
247         .confirm = generic_pipe_buf_confirm,
248         .release = anon_pipe_buf_release,
249         .steal = anon_pipe_buf_steal,
250         .get = generic_pipe_buf_get,
251 };
252
253 /**
254  * pipe_buf_mark_unmergeable - mark a &struct pipe_buffer as unmergeable
255  * @buf:        the buffer to mark
256  *
257  * Description:
258  *      This function ensures that no future writes will be merged into the
259  *      given &struct pipe_buffer. This is necessary when multiple pipe buffers
260  *      share the same backing page.
261  */
262 void pipe_buf_mark_unmergeable(struct pipe_buffer *buf)
263 {
264         if (buf->ops == &anon_pipe_buf_ops)
265                 buf->ops = &anon_pipe_buf_nomerge_ops;
266 }
267
268 static bool pipe_buf_can_merge(struct pipe_buffer *buf)
269 {
270         return buf->ops == &anon_pipe_buf_ops;
271 }
272
273 static ssize_t
274 pipe_read(struct kiocb *iocb, struct iov_iter *to)
275 {
276         size_t total_len = iov_iter_count(to);
277         struct file *filp = iocb->ki_filp;
278         struct pipe_inode_info *pipe = filp->private_data;
279         int do_wakeup;
280         ssize_t ret;
281
282         /* Null read succeeds. */
283         if (unlikely(total_len == 0))
284                 return 0;
285
286         do_wakeup = 0;
287         ret = 0;
288         __pipe_lock(pipe);
289         for (;;) {
290                 unsigned int head = pipe->head;
291                 unsigned int tail = pipe->tail;
292                 unsigned int mask = pipe->ring_size - 1;
293
294                 if (!pipe_empty(head, tail)) {
295                         struct pipe_buffer *buf = &pipe->bufs[tail & mask];
296                         size_t chars = buf->len;
297                         size_t written;
298                         int error;
299
300                         if (chars > total_len)
301                                 chars = total_len;
302
303                         error = pipe_buf_confirm(pipe, buf);
304                         if (error) {
305                                 if (!ret)
306                                         ret = error;
307                                 break;
308                         }
309
310                         written = copy_page_to_iter(buf->page, buf->offset, chars, to);
311                         if (unlikely(written < chars)) {
312                                 if (!ret)
313                                         ret = -EFAULT;
314                                 break;
315                         }
316                         ret += chars;
317                         buf->offset += chars;
318                         buf->len -= chars;
319
320                         /* Was it a packet buffer? Clean up and exit */
321                         if (buf->flags & PIPE_BUF_FLAG_PACKET) {
322                                 total_len = chars;
323                                 buf->len = 0;
324                         }
325
326                         if (!buf->len) {
327                                 pipe_buf_release(pipe, buf);
328                                 tail++;
329                                 pipe->tail = tail;
330                                 do_wakeup = 1;
331                         }
332                         total_len -= chars;
333                         if (!total_len)
334                                 break;  /* common path: read succeeded */
335                         if (!pipe_empty(head, tail))    /* More to do? */
336                                 continue;
337                 }
338
339                 if (!pipe->writers)
340                         break;
341                 if (!pipe->waiting_writers) {
342                         /* syscall merging: Usually we must not sleep
343                          * if O_NONBLOCK is set, or if we got some data.
344                          * But if a writer sleeps in kernel space, then
345                          * we can wait for that data without violating POSIX.
346                          */
347                         if (ret)
348                                 break;
349                         if (filp->f_flags & O_NONBLOCK) {
350                                 ret = -EAGAIN;
351                                 break;
352                         }
353                 }
354                 if (signal_pending(current)) {
355                         if (!ret)
356                                 ret = -ERESTARTSYS;
357                         break;
358                 }
359                 if (do_wakeup) {
360                         wake_up_interruptible_sync_poll(&pipe->wait, EPOLLOUT | EPOLLWRNORM);
361                         kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
362                 }
363                 pipe_wait(pipe);
364         }
365         __pipe_unlock(pipe);
366
367         /* Signal writers asynchronously that there is more room. */
368         if (do_wakeup) {
369                 wake_up_interruptible_sync_poll(&pipe->wait, EPOLLOUT | EPOLLWRNORM);
370                 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
371         }
372         if (ret > 0)
373                 file_accessed(filp);
374         return ret;
375 }
376
377 static inline int is_packetized(struct file *file)
378 {
379         return (file->f_flags & O_DIRECT) != 0;
380 }
381
382 static ssize_t
383 pipe_write(struct kiocb *iocb, struct iov_iter *from)
384 {
385         struct file *filp = iocb->ki_filp;
386         struct pipe_inode_info *pipe = filp->private_data;
387         unsigned int head, tail, max_usage, mask;
388         ssize_t ret = 0;
389         int do_wakeup = 0;
390         size_t total_len = iov_iter_count(from);
391         ssize_t chars;
392
393         /* Null write succeeds. */
394         if (unlikely(total_len == 0))
395                 return 0;
396
397         __pipe_lock(pipe);
398
399         if (!pipe->readers) {
400                 send_sig(SIGPIPE, current, 0);
401                 ret = -EPIPE;
402                 goto out;
403         }
404
405         tail = pipe->tail;
406         head = pipe->head;
407         max_usage = pipe->ring_size;
408         mask = pipe->ring_size - 1;
409
410         /* We try to merge small writes */
411         chars = total_len & (PAGE_SIZE-1); /* size of the last buffer */
412         if (!pipe_empty(head, tail) && chars != 0) {
413                 struct pipe_buffer *buf = &pipe->bufs[(head - 1) & mask];
414                 int offset = buf->offset + buf->len;
415
416                 if (pipe_buf_can_merge(buf) && offset + chars <= PAGE_SIZE) {
417                         ret = pipe_buf_confirm(pipe, buf);
418                         if (ret)
419                                 goto out;
420
421                         ret = copy_page_from_iter(buf->page, offset, chars, from);
422                         if (unlikely(ret < chars)) {
423                                 ret = -EFAULT;
424                                 goto out;
425                         }
426                         do_wakeup = 1;
427                         buf->len += ret;
428                         if (!iov_iter_count(from))
429                                 goto out;
430                 }
431         }
432
433         for (;;) {
434                 if (!pipe->readers) {
435                         send_sig(SIGPIPE, current, 0);
436                         if (!ret)
437                                 ret = -EPIPE;
438                         break;
439                 }
440
441                 tail = pipe->tail;
442                 if (!pipe_full(head, tail, max_usage)) {
443                         struct pipe_buffer *buf = &pipe->bufs[head & mask];
444                         struct page *page = pipe->tmp_page;
445                         int copied;
446
447                         if (!page) {
448                                 page = alloc_page(GFP_HIGHUSER | __GFP_ACCOUNT);
449                                 if (unlikely(!page)) {
450                                         ret = ret ? : -ENOMEM;
451                                         break;
452                                 }
453                                 pipe->tmp_page = page;
454                         }
455                         /* Always wake up, even if the copy fails. Otherwise
456                          * we lock up (O_NONBLOCK-)readers that sleep due to
457                          * syscall merging.
458                          * FIXME! Is this really true?
459                          */
460                         do_wakeup = 1;
461                         copied = copy_page_from_iter(page, 0, PAGE_SIZE, from);
462                         if (unlikely(copied < PAGE_SIZE && iov_iter_count(from))) {
463                                 if (!ret)
464                                         ret = -EFAULT;
465                                 break;
466                         }
467                         ret += copied;
468
469                         /* Insert it into the buffer array */
470                         buf->page = page;
471                         buf->ops = &anon_pipe_buf_ops;
472                         buf->offset = 0;
473                         buf->len = copied;
474                         buf->flags = 0;
475                         if (is_packetized(filp)) {
476                                 buf->ops = &packet_pipe_buf_ops;
477                                 buf->flags = PIPE_BUF_FLAG_PACKET;
478                         }
479
480                         head++;
481                         pipe->head = head;
482                         pipe->tmp_page = NULL;
483
484                         if (!iov_iter_count(from))
485                                 break;
486                 }
487
488                 if (!pipe_full(head, tail, max_usage))
489                         continue;
490
491                 /* Wait for buffer space to become available. */
492                 if (filp->f_flags & O_NONBLOCK) {
493                         if (!ret)
494                                 ret = -EAGAIN;
495                         break;
496                 }
497                 if (signal_pending(current)) {
498                         if (!ret)
499                                 ret = -ERESTARTSYS;
500                         break;
501                 }
502                 if (do_wakeup) {
503                         wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLRDNORM);
504                         kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
505                         do_wakeup = 0;
506                 }
507                 pipe->waiting_writers++;
508                 pipe_wait(pipe);
509                 pipe->waiting_writers--;
510         }
511 out:
512         __pipe_unlock(pipe);
513         if (do_wakeup) {
514                 wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLRDNORM);
515                 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
516         }
517         if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) {
518                 int err = file_update_time(filp);
519                 if (err)
520                         ret = err;
521                 sb_end_write(file_inode(filp)->i_sb);
522         }
523         return ret;
524 }
525
526 static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
527 {
528         struct pipe_inode_info *pipe = filp->private_data;
529         int count, head, tail, mask;
530
531         switch (cmd) {
532                 case FIONREAD:
533                         __pipe_lock(pipe);
534                         count = 0;
535                         head = pipe->head;
536                         tail = pipe->tail;
537                         mask = pipe->ring_size - 1;
538
539                         while (tail != head) {
540                                 count += pipe->bufs[tail & mask].len;
541                                 tail++;
542                         }
543                         __pipe_unlock(pipe);
544
545                         return put_user(count, (int __user *)arg);
546                 default:
547                         return -ENOIOCTLCMD;
548         }
549 }
550
551 /* No kernel lock held - fine */
552 static __poll_t
553 pipe_poll(struct file *filp, poll_table *wait)
554 {
555         __poll_t mask;
556         struct pipe_inode_info *pipe = filp->private_data;
557         unsigned int head = READ_ONCE(pipe->head);
558         unsigned int tail = READ_ONCE(pipe->tail);
559
560         poll_wait(filp, &pipe->wait, wait);
561
562         BUG_ON(pipe_occupancy(head, tail) > pipe->ring_size);
563
564         /* Reading only -- no need for acquiring the semaphore.  */
565         mask = 0;
566         if (filp->f_mode & FMODE_READ) {
567                 if (!pipe_empty(head, tail))
568                         mask |= EPOLLIN | EPOLLRDNORM;
569                 if (!pipe->writers && filp->f_version != pipe->w_counter)
570                         mask |= EPOLLHUP;
571         }
572
573         if (filp->f_mode & FMODE_WRITE) {
574                 if (!pipe_full(head, tail, pipe->ring_size))
575                         mask |= EPOLLOUT | EPOLLWRNORM;
576                 /*
577                  * Most Unices do not set EPOLLERR for FIFOs but on Linux they
578                  * behave exactly like pipes for poll().
579                  */
580                 if (!pipe->readers)
581                         mask |= EPOLLERR;
582         }
583
584         return mask;
585 }
586
587 static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
588 {
589         int kill = 0;
590
591         spin_lock(&inode->i_lock);
592         if (!--pipe->files) {
593                 inode->i_pipe = NULL;
594                 kill = 1;
595         }
596         spin_unlock(&inode->i_lock);
597
598         if (kill)
599                 free_pipe_info(pipe);
600 }
601
602 static int
603 pipe_release(struct inode *inode, struct file *file)
604 {
605         struct pipe_inode_info *pipe = file->private_data;
606
607         __pipe_lock(pipe);
608         if (file->f_mode & FMODE_READ)
609                 pipe->readers--;
610         if (file->f_mode & FMODE_WRITE)
611                 pipe->writers--;
612
613         if (pipe->readers || pipe->writers) {
614                 wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLOUT | EPOLLRDNORM | EPOLLWRNORM | EPOLLERR | EPOLLHUP);
615                 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
616                 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
617         }
618         __pipe_unlock(pipe);
619
620         put_pipe_info(inode, pipe);
621         return 0;
622 }
623
624 static int
625 pipe_fasync(int fd, struct file *filp, int on)
626 {
627         struct pipe_inode_info *pipe = filp->private_data;
628         int retval = 0;
629
630         __pipe_lock(pipe);
631         if (filp->f_mode & FMODE_READ)
632                 retval = fasync_helper(fd, filp, on, &pipe->fasync_readers);
633         if ((filp->f_mode & FMODE_WRITE) && retval >= 0) {
634                 retval = fasync_helper(fd, filp, on, &pipe->fasync_writers);
635                 if (retval < 0 && (filp->f_mode & FMODE_READ))
636                         /* this can happen only if on == T */
637                         fasync_helper(-1, filp, 0, &pipe->fasync_readers);
638         }
639         __pipe_unlock(pipe);
640         return retval;
641 }
642
643 static unsigned long account_pipe_buffers(struct user_struct *user,
644                                  unsigned long old, unsigned long new)
645 {
646         return atomic_long_add_return(new - old, &user->pipe_bufs);
647 }
648
649 static bool too_many_pipe_buffers_soft(unsigned long user_bufs)
650 {
651         unsigned long soft_limit = READ_ONCE(pipe_user_pages_soft);
652
653         return soft_limit && user_bufs > soft_limit;
654 }
655
656 static bool too_many_pipe_buffers_hard(unsigned long user_bufs)
657 {
658         unsigned long hard_limit = READ_ONCE(pipe_user_pages_hard);
659
660         return hard_limit && user_bufs > hard_limit;
661 }
662
663 static bool is_unprivileged_user(void)
664 {
665         return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
666 }
667
668 struct pipe_inode_info *alloc_pipe_info(void)
669 {
670         struct pipe_inode_info *pipe;
671         unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
672         struct user_struct *user = get_current_user();
673         unsigned long user_bufs;
674         unsigned int max_size = READ_ONCE(pipe_max_size);
675
676         pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL_ACCOUNT);
677         if (pipe == NULL)
678                 goto out_free_uid;
679
680         if (pipe_bufs * PAGE_SIZE > max_size && !capable(CAP_SYS_RESOURCE))
681                 pipe_bufs = max_size >> PAGE_SHIFT;
682
683         user_bufs = account_pipe_buffers(user, 0, pipe_bufs);
684
685         if (too_many_pipe_buffers_soft(user_bufs) && is_unprivileged_user()) {
686                 user_bufs = account_pipe_buffers(user, pipe_bufs, 1);
687                 pipe_bufs = 1;
688         }
689
690         if (too_many_pipe_buffers_hard(user_bufs) && is_unprivileged_user())
691                 goto out_revert_acct;
692
693         pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer),
694                              GFP_KERNEL_ACCOUNT);
695
696         if (pipe->bufs) {
697                 init_waitqueue_head(&pipe->wait);
698                 pipe->r_counter = pipe->w_counter = 1;
699                 pipe->ring_size = pipe_bufs;
700                 pipe->user = user;
701                 mutex_init(&pipe->mutex);
702                 return pipe;
703         }
704
705 out_revert_acct:
706         (void) account_pipe_buffers(user, pipe_bufs, 0);
707         kfree(pipe);
708 out_free_uid:
709         free_uid(user);
710         return NULL;
711 }
712
713 void free_pipe_info(struct pipe_inode_info *pipe)
714 {
715         int i;
716
717         (void) account_pipe_buffers(pipe->user, pipe->ring_size, 0);
718         free_uid(pipe->user);
719         for (i = 0; i < pipe->ring_size; i++) {
720                 struct pipe_buffer *buf = pipe->bufs + i;
721                 if (buf->ops)
722                         pipe_buf_release(pipe, buf);
723         }
724         if (pipe->tmp_page)
725                 __free_page(pipe->tmp_page);
726         kfree(pipe->bufs);
727         kfree(pipe);
728 }
729
730 static struct vfsmount *pipe_mnt __read_mostly;
731
732 /*
733  * pipefs_dname() is called from d_path().
734  */
735 static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen)
736 {
737         return dynamic_dname(dentry, buffer, buflen, "pipe:[%lu]",
738                                 d_inode(dentry)->i_ino);
739 }
740
741 static const struct dentry_operations pipefs_dentry_operations = {
742         .d_dname        = pipefs_dname,
743 };
744
745 static struct inode * get_pipe_inode(void)
746 {
747         struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb);
748         struct pipe_inode_info *pipe;
749
750         if (!inode)
751                 goto fail_inode;
752
753         inode->i_ino = get_next_ino();
754
755         pipe = alloc_pipe_info();
756         if (!pipe)
757                 goto fail_iput;
758
759         inode->i_pipe = pipe;
760         pipe->files = 2;
761         pipe->readers = pipe->writers = 1;
762         inode->i_fop = &pipefifo_fops;
763
764         /*
765          * Mark the inode dirty from the very beginning,
766          * that way it will never be moved to the dirty
767          * list because "mark_inode_dirty()" will think
768          * that it already _is_ on the dirty list.
769          */
770         inode->i_state = I_DIRTY;
771         inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
772         inode->i_uid = current_fsuid();
773         inode->i_gid = current_fsgid();
774         inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
775
776         return inode;
777
778 fail_iput:
779         iput(inode);
780
781 fail_inode:
782         return NULL;
783 }
784
785 int create_pipe_files(struct file **res, int flags)
786 {
787         struct inode *inode = get_pipe_inode();
788         struct file *f;
789
790         if (!inode)
791                 return -ENFILE;
792
793         f = alloc_file_pseudo(inode, pipe_mnt, "",
794                                 O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT)),
795                                 &pipefifo_fops);
796         if (IS_ERR(f)) {
797                 free_pipe_info(inode->i_pipe);
798                 iput(inode);
799                 return PTR_ERR(f);
800         }
801
802         f->private_data = inode->i_pipe;
803
804         res[0] = alloc_file_clone(f, O_RDONLY | (flags & O_NONBLOCK),
805                                   &pipefifo_fops);
806         if (IS_ERR(res[0])) {
807                 put_pipe_info(inode, inode->i_pipe);
808                 fput(f);
809                 return PTR_ERR(res[0]);
810         }
811         res[0]->private_data = inode->i_pipe;
812         res[1] = f;
813         return 0;
814 }
815
816 static int __do_pipe_flags(int *fd, struct file **files, int flags)
817 {
818         int error;
819         int fdw, fdr;
820
821         if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT))
822                 return -EINVAL;
823
824         error = create_pipe_files(files, flags);
825         if (error)
826                 return error;
827
828         error = get_unused_fd_flags(flags);
829         if (error < 0)
830                 goto err_read_pipe;
831         fdr = error;
832
833         error = get_unused_fd_flags(flags);
834         if (error < 0)
835                 goto err_fdr;
836         fdw = error;
837
838         audit_fd_pair(fdr, fdw);
839         fd[0] = fdr;
840         fd[1] = fdw;
841         return 0;
842
843  err_fdr:
844         put_unused_fd(fdr);
845  err_read_pipe:
846         fput(files[0]);
847         fput(files[1]);
848         return error;
849 }
850
851 int do_pipe_flags(int *fd, int flags)
852 {
853         struct file *files[2];
854         int error = __do_pipe_flags(fd, files, flags);
855         if (!error) {
856                 fd_install(fd[0], files[0]);
857                 fd_install(fd[1], files[1]);
858         }
859         return error;
860 }
861
862 /*
863  * sys_pipe() is the normal C calling standard for creating
864  * a pipe. It's not the way Unix traditionally does this, though.
865  */
866 static int do_pipe2(int __user *fildes, int flags)
867 {
868         struct file *files[2];
869         int fd[2];
870         int error;
871
872         error = __do_pipe_flags(fd, files, flags);
873         if (!error) {
874                 if (unlikely(copy_to_user(fildes, fd, sizeof(fd)))) {
875                         fput(files[0]);
876                         fput(files[1]);
877                         put_unused_fd(fd[0]);
878                         put_unused_fd(fd[1]);
879                         error = -EFAULT;
880                 } else {
881                         fd_install(fd[0], files[0]);
882                         fd_install(fd[1], files[1]);
883                 }
884         }
885         return error;
886 }
887
888 SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags)
889 {
890         return do_pipe2(fildes, flags);
891 }
892
893 SYSCALL_DEFINE1(pipe, int __user *, fildes)
894 {
895         return do_pipe2(fildes, 0);
896 }
897
898 static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt)
899 {
900         int cur = *cnt;
901
902         while (cur == *cnt) {
903                 pipe_wait(pipe);
904                 if (signal_pending(current))
905                         break;
906         }
907         return cur == *cnt ? -ERESTARTSYS : 0;
908 }
909
910 static void wake_up_partner(struct pipe_inode_info *pipe)
911 {
912         wake_up_interruptible(&pipe->wait);
913 }
914
915 static int fifo_open(struct inode *inode, struct file *filp)
916 {
917         struct pipe_inode_info *pipe;
918         bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC;
919         int ret;
920
921         filp->f_version = 0;
922
923         spin_lock(&inode->i_lock);
924         if (inode->i_pipe) {
925                 pipe = inode->i_pipe;
926                 pipe->files++;
927                 spin_unlock(&inode->i_lock);
928         } else {
929                 spin_unlock(&inode->i_lock);
930                 pipe = alloc_pipe_info();
931                 if (!pipe)
932                         return -ENOMEM;
933                 pipe->files = 1;
934                 spin_lock(&inode->i_lock);
935                 if (unlikely(inode->i_pipe)) {
936                         inode->i_pipe->files++;
937                         spin_unlock(&inode->i_lock);
938                         free_pipe_info(pipe);
939                         pipe = inode->i_pipe;
940                 } else {
941                         inode->i_pipe = pipe;
942                         spin_unlock(&inode->i_lock);
943                 }
944         }
945         filp->private_data = pipe;
946         /* OK, we have a pipe and it's pinned down */
947
948         __pipe_lock(pipe);
949
950         /* We can only do regular read/write on fifos */
951         filp->f_mode &= (FMODE_READ | FMODE_WRITE);
952
953         switch (filp->f_mode) {
954         case FMODE_READ:
955         /*
956          *  O_RDONLY
957          *  POSIX.1 says that O_NONBLOCK means return with the FIFO
958          *  opened, even when there is no process writing the FIFO.
959          */
960                 pipe->r_counter++;
961                 if (pipe->readers++ == 0)
962                         wake_up_partner(pipe);
963
964                 if (!is_pipe && !pipe->writers) {
965                         if ((filp->f_flags & O_NONBLOCK)) {
966                                 /* suppress EPOLLHUP until we have
967                                  * seen a writer */
968                                 filp->f_version = pipe->w_counter;
969                         } else {
970                                 if (wait_for_partner(pipe, &pipe->w_counter))
971                                         goto err_rd;
972                         }
973                 }
974                 break;
975
976         case FMODE_WRITE:
977         /*
978          *  O_WRONLY
979          *  POSIX.1 says that O_NONBLOCK means return -1 with
980          *  errno=ENXIO when there is no process reading the FIFO.
981          */
982                 ret = -ENXIO;
983                 if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
984                         goto err;
985
986                 pipe->w_counter++;
987                 if (!pipe->writers++)
988                         wake_up_partner(pipe);
989
990                 if (!is_pipe && !pipe->readers) {
991                         if (wait_for_partner(pipe, &pipe->r_counter))
992                                 goto err_wr;
993                 }
994                 break;
995
996         case FMODE_READ | FMODE_WRITE:
997         /*
998          *  O_RDWR
999          *  POSIX.1 leaves this case "undefined" when O_NONBLOCK is set.
1000          *  This implementation will NEVER block on a O_RDWR open, since
1001          *  the process can at least talk to itself.
1002          */
1003
1004                 pipe->readers++;
1005                 pipe->writers++;
1006                 pipe->r_counter++;
1007                 pipe->w_counter++;
1008                 if (pipe->readers == 1 || pipe->writers == 1)
1009                         wake_up_partner(pipe);
1010                 break;
1011
1012         default:
1013                 ret = -EINVAL;
1014                 goto err;
1015         }
1016
1017         /* Ok! */
1018         __pipe_unlock(pipe);
1019         return 0;
1020
1021 err_rd:
1022         if (!--pipe->readers)
1023                 wake_up_interruptible(&pipe->wait);
1024         ret = -ERESTARTSYS;
1025         goto err;
1026
1027 err_wr:
1028         if (!--pipe->writers)
1029                 wake_up_interruptible(&pipe->wait);
1030         ret = -ERESTARTSYS;
1031         goto err;
1032
1033 err:
1034         __pipe_unlock(pipe);
1035
1036         put_pipe_info(inode, pipe);
1037         return ret;
1038 }
1039
1040 const struct file_operations pipefifo_fops = {
1041         .open           = fifo_open,
1042         .llseek         = no_llseek,
1043         .read_iter      = pipe_read,
1044         .write_iter     = pipe_write,
1045         .poll           = pipe_poll,
1046         .unlocked_ioctl = pipe_ioctl,
1047         .release        = pipe_release,
1048         .fasync         = pipe_fasync,
1049 };
1050
1051 /*
1052  * Currently we rely on the pipe array holding a power-of-2 number
1053  * of pages. Returns 0 on error.
1054  */
1055 unsigned int round_pipe_size(unsigned long size)
1056 {
1057         if (size > (1U << 31))
1058                 return 0;
1059
1060         /* Minimum pipe size, as required by POSIX */
1061         if (size < PAGE_SIZE)
1062                 return PAGE_SIZE;
1063
1064         return roundup_pow_of_two(size);
1065 }
1066
1067 /*
1068  * Allocate a new array of pipe buffers and copy the info over. Returns the
1069  * pipe size if successful, or return -ERROR on error.
1070  */
1071 static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
1072 {
1073         struct pipe_buffer *bufs;
1074         unsigned int size, nr_slots, head, tail, mask, n;
1075         unsigned long user_bufs;
1076         long ret = 0;
1077
1078         size = round_pipe_size(arg);
1079         nr_slots = size >> PAGE_SHIFT;
1080
1081         if (!nr_slots)
1082                 return -EINVAL;
1083
1084         /*
1085          * If trying to increase the pipe capacity, check that an
1086          * unprivileged user is not trying to exceed various limits
1087          * (soft limit check here, hard limit check just below).
1088          * Decreasing the pipe capacity is always permitted, even
1089          * if the user is currently over a limit.
1090          */
1091         if (nr_slots > pipe->ring_size &&
1092                         size > pipe_max_size && !capable(CAP_SYS_RESOURCE))
1093                 return -EPERM;
1094
1095         user_bufs = account_pipe_buffers(pipe->user, pipe->ring_size, nr_slots);
1096
1097         if (nr_slots > pipe->ring_size &&
1098                         (too_many_pipe_buffers_hard(user_bufs) ||
1099                          too_many_pipe_buffers_soft(user_bufs)) &&
1100                         is_unprivileged_user()) {
1101                 ret = -EPERM;
1102                 goto out_revert_acct;
1103         }
1104
1105         /*
1106          * We can shrink the pipe, if arg is greater than the ring occupancy.
1107          * Since we don't expect a lot of shrink+grow operations, just free and
1108          * allocate again like we would do for growing.  If the pipe currently
1109          * contains more buffers than arg, then return busy.
1110          */
1111         mask = pipe->ring_size - 1;
1112         head = pipe->head;
1113         tail = pipe->tail;
1114         n = pipe_occupancy(pipe->head, pipe->tail);
1115         if (nr_slots < n) {
1116                 ret = -EBUSY;
1117                 goto out_revert_acct;
1118         }
1119
1120         bufs = kcalloc(nr_slots, sizeof(*bufs),
1121                        GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
1122         if (unlikely(!bufs)) {
1123                 ret = -ENOMEM;
1124                 goto out_revert_acct;
1125         }
1126
1127         /*
1128          * The pipe array wraps around, so just start the new one at zero
1129          * and adjust the indices.
1130          */
1131         if (n > 0) {
1132                 unsigned int h = head & mask;
1133                 unsigned int t = tail & mask;
1134                 if (h > t) {
1135                         memcpy(bufs, pipe->bufs + t,
1136                                n * sizeof(struct pipe_buffer));
1137                 } else {
1138                         unsigned int tsize = pipe->ring_size - t;
1139                         if (h > 0)
1140                                 memcpy(bufs + tsize, pipe->bufs,
1141                                        h * sizeof(struct pipe_buffer));
1142                         memcpy(bufs, pipe->bufs + t,
1143                                tsize * sizeof(struct pipe_buffer));
1144                 }
1145         }
1146
1147         head = n;
1148         tail = 0;
1149
1150         kfree(pipe->bufs);
1151         pipe->bufs = bufs;
1152         pipe->ring_size = nr_slots;
1153         pipe->tail = tail;
1154         pipe->head = head;
1155         return pipe->ring_size * PAGE_SIZE;
1156
1157 out_revert_acct:
1158         (void) account_pipe_buffers(pipe->user, nr_slots, pipe->ring_size);
1159         return ret;
1160 }
1161
1162 /*
1163  * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
1164  * location, so checking ->i_pipe is not enough to verify that this is a
1165  * pipe.
1166  */
1167 struct pipe_inode_info *get_pipe_info(struct file *file)
1168 {
1169         return file->f_op == &pipefifo_fops ? file->private_data : NULL;
1170 }
1171
1172 long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
1173 {
1174         struct pipe_inode_info *pipe;
1175         long ret;
1176
1177         pipe = get_pipe_info(file);
1178         if (!pipe)
1179                 return -EBADF;
1180
1181         __pipe_lock(pipe);
1182
1183         switch (cmd) {
1184         case F_SETPIPE_SZ:
1185                 ret = pipe_set_size(pipe, arg);
1186                 break;
1187         case F_GETPIPE_SZ:
1188                 ret = pipe->ring_size * PAGE_SIZE;
1189                 break;
1190         default:
1191                 ret = -EINVAL;
1192                 break;
1193         }
1194
1195         __pipe_unlock(pipe);
1196         return ret;
1197 }
1198
1199 static const struct super_operations pipefs_ops = {
1200         .destroy_inode = free_inode_nonrcu,
1201         .statfs = simple_statfs,
1202 };
1203
1204 /*
1205  * pipefs should _never_ be mounted by userland - too much of security hassle,
1206  * no real gain from having the whole whorehouse mounted. So we don't need
1207  * any operations on the root directory. However, we need a non-trivial
1208  * d_name - pipe: will go nicely and kill the special-casing in procfs.
1209  */
1210
1211 static int pipefs_init_fs_context(struct fs_context *fc)
1212 {
1213         struct pseudo_fs_context *ctx = init_pseudo(fc, PIPEFS_MAGIC);
1214         if (!ctx)
1215                 return -ENOMEM;
1216         ctx->ops = &pipefs_ops;
1217         ctx->dops = &pipefs_dentry_operations;
1218         return 0;
1219 }
1220
1221 static struct file_system_type pipe_fs_type = {
1222         .name           = "pipefs",
1223         .init_fs_context = pipefs_init_fs_context,
1224         .kill_sb        = kill_anon_super,
1225 };
1226
1227 static int __init init_pipe_fs(void)
1228 {
1229         int err = register_filesystem(&pipe_fs_type);
1230
1231         if (!err) {
1232                 pipe_mnt = kern_mount(&pipe_fs_type);
1233                 if (IS_ERR(pipe_mnt)) {
1234                         err = PTR_ERR(pipe_mnt);
1235                         unregister_filesystem(&pipe_fs_type);
1236                 }
1237         }
1238         return err;
1239 }
1240
1241 fs_initcall(init_pipe_fs);