pipe: Increase the writer-wakeup threshold to reduce context-switch count
[platform/kernel/linux-starfive.git] / fs / pipe.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/pipe.c
4  *
5  *  Copyright (C) 1991, 1992, 1999  Linus Torvalds
6  */
7
8 #include <linux/mm.h>
9 #include <linux/file.h>
10 #include <linux/poll.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/fs.h>
15 #include <linux/log2.h>
16 #include <linux/mount.h>
17 #include <linux/pseudo_fs.h>
18 #include <linux/magic.h>
19 #include <linux/pipe_fs_i.h>
20 #include <linux/uio.h>
21 #include <linux/highmem.h>
22 #include <linux/pagemap.h>
23 #include <linux/audit.h>
24 #include <linux/syscalls.h>
25 #include <linux/fcntl.h>
26 #include <linux/memcontrol.h>
27
28 #include <linux/uaccess.h>
29 #include <asm/ioctls.h>
30
31 #include "internal.h"
32
33 /*
34  * The max size that a non-root user is allowed to grow the pipe. Can
35  * be set by root in /proc/sys/fs/pipe-max-size
36  */
37 unsigned int pipe_max_size = 1048576;
38
39 /* Maximum allocatable pages per user. Hard limit is unset by default, soft
40  * matches default values.
41  */
42 unsigned long pipe_user_pages_hard;
43 unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR;
44
45 /*
46  * We use head and tail indices that aren't masked off, except at the point of
47  * dereference, but rather they're allowed to wrap naturally.  This means there
48  * isn't a dead spot in the buffer, but the ring has to be a power of two and
49  * <= 2^31.
50  * -- David Howells 2019-09-23.
51  *
52  * Reads with count = 0 should always return 0.
53  * -- Julian Bradfield 1999-06-07.
54  *
55  * FIFOs and Pipes now generate SIGIO for both readers and writers.
56  * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
57  *
58  * pipe_read & write cleanup
59  * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
60  */
61
62 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
63 {
64         if (pipe->files)
65                 mutex_lock_nested(&pipe->mutex, subclass);
66 }
67
68 void pipe_lock(struct pipe_inode_info *pipe)
69 {
70         /*
71          * pipe_lock() nests non-pipe inode locks (for writing to a file)
72          */
73         pipe_lock_nested(pipe, I_MUTEX_PARENT);
74 }
75 EXPORT_SYMBOL(pipe_lock);
76
77 void pipe_unlock(struct pipe_inode_info *pipe)
78 {
79         if (pipe->files)
80                 mutex_unlock(&pipe->mutex);
81 }
82 EXPORT_SYMBOL(pipe_unlock);
83
84 static inline void __pipe_lock(struct pipe_inode_info *pipe)
85 {
86         mutex_lock_nested(&pipe->mutex, I_MUTEX_PARENT);
87 }
88
89 static inline void __pipe_unlock(struct pipe_inode_info *pipe)
90 {
91         mutex_unlock(&pipe->mutex);
92 }
93
94 void pipe_double_lock(struct pipe_inode_info *pipe1,
95                       struct pipe_inode_info *pipe2)
96 {
97         BUG_ON(pipe1 == pipe2);
98
99         if (pipe1 < pipe2) {
100                 pipe_lock_nested(pipe1, I_MUTEX_PARENT);
101                 pipe_lock_nested(pipe2, I_MUTEX_CHILD);
102         } else {
103                 pipe_lock_nested(pipe2, I_MUTEX_PARENT);
104                 pipe_lock_nested(pipe1, I_MUTEX_CHILD);
105         }
106 }
107
108 /* Drop the inode semaphore and wait for a pipe event, atomically */
109 void pipe_wait(struct pipe_inode_info *pipe)
110 {
111         DEFINE_WAIT(wait);
112
113         /*
114          * Pipes are system-local resources, so sleeping on them
115          * is considered a noninteractive wait:
116          */
117         prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE);
118         pipe_unlock(pipe);
119         schedule();
120         finish_wait(&pipe->wait, &wait);
121         pipe_lock(pipe);
122 }
123
124 static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
125                                   struct pipe_buffer *buf)
126 {
127         struct page *page = buf->page;
128
129         /*
130          * If nobody else uses this page, and we don't already have a
131          * temporary page, let's keep track of it as a one-deep
132          * allocation cache. (Otherwise just release our reference to it)
133          */
134         if (page_count(page) == 1 && !pipe->tmp_page)
135                 pipe->tmp_page = page;
136         else
137                 put_page(page);
138 }
139
140 static int anon_pipe_buf_steal(struct pipe_inode_info *pipe,
141                                struct pipe_buffer *buf)
142 {
143         struct page *page = buf->page;
144
145         if (page_count(page) == 1) {
146                 memcg_kmem_uncharge(page, 0);
147                 __SetPageLocked(page);
148                 return 0;
149         }
150         return 1;
151 }
152
153 /**
154  * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer
155  * @pipe:       the pipe that the buffer belongs to
156  * @buf:        the buffer to attempt to steal
157  *
158  * Description:
159  *      This function attempts to steal the &struct page attached to
160  *      @buf. If successful, this function returns 0 and returns with
161  *      the page locked. The caller may then reuse the page for whatever
162  *      he wishes; the typical use is insertion into a different file
163  *      page cache.
164  */
165 int generic_pipe_buf_steal(struct pipe_inode_info *pipe,
166                            struct pipe_buffer *buf)
167 {
168         struct page *page = buf->page;
169
170         /*
171          * A reference of one is golden, that means that the owner of this
172          * page is the only one holding a reference to it. lock the page
173          * and return OK.
174          */
175         if (page_count(page) == 1) {
176                 lock_page(page);
177                 return 0;
178         }
179
180         return 1;
181 }
182 EXPORT_SYMBOL(generic_pipe_buf_steal);
183
184 /**
185  * generic_pipe_buf_get - get a reference to a &struct pipe_buffer
186  * @pipe:       the pipe that the buffer belongs to
187  * @buf:        the buffer to get a reference to
188  *
189  * Description:
190  *      This function grabs an extra reference to @buf. It's used in
191  *      in the tee() system call, when we duplicate the buffers in one
192  *      pipe into another.
193  */
194 bool generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
195 {
196         return try_get_page(buf->page);
197 }
198 EXPORT_SYMBOL(generic_pipe_buf_get);
199
200 /**
201  * generic_pipe_buf_confirm - verify contents of the pipe buffer
202  * @info:       the pipe that the buffer belongs to
203  * @buf:        the buffer to confirm
204  *
205  * Description:
206  *      This function does nothing, because the generic pipe code uses
207  *      pages that are always good when inserted into the pipe.
208  */
209 int generic_pipe_buf_confirm(struct pipe_inode_info *info,
210                              struct pipe_buffer *buf)
211 {
212         return 0;
213 }
214 EXPORT_SYMBOL(generic_pipe_buf_confirm);
215
216 /**
217  * generic_pipe_buf_release - put a reference to a &struct pipe_buffer
218  * @pipe:       the pipe that the buffer belongs to
219  * @buf:        the buffer to put a reference to
220  *
221  * Description:
222  *      This function releases a reference to @buf.
223  */
224 void generic_pipe_buf_release(struct pipe_inode_info *pipe,
225                               struct pipe_buffer *buf)
226 {
227         put_page(buf->page);
228 }
229 EXPORT_SYMBOL(generic_pipe_buf_release);
230
231 /* New data written to a pipe may be appended to a buffer with this type. */
232 static const struct pipe_buf_operations anon_pipe_buf_ops = {
233         .confirm = generic_pipe_buf_confirm,
234         .release = anon_pipe_buf_release,
235         .steal = anon_pipe_buf_steal,
236         .get = generic_pipe_buf_get,
237 };
238
239 static const struct pipe_buf_operations anon_pipe_buf_nomerge_ops = {
240         .confirm = generic_pipe_buf_confirm,
241         .release = anon_pipe_buf_release,
242         .steal = anon_pipe_buf_steal,
243         .get = generic_pipe_buf_get,
244 };
245
246 static const struct pipe_buf_operations packet_pipe_buf_ops = {
247         .confirm = generic_pipe_buf_confirm,
248         .release = anon_pipe_buf_release,
249         .steal = anon_pipe_buf_steal,
250         .get = generic_pipe_buf_get,
251 };
252
253 /**
254  * pipe_buf_mark_unmergeable - mark a &struct pipe_buffer as unmergeable
255  * @buf:        the buffer to mark
256  *
257  * Description:
258  *      This function ensures that no future writes will be merged into the
259  *      given &struct pipe_buffer. This is necessary when multiple pipe buffers
260  *      share the same backing page.
261  */
262 void pipe_buf_mark_unmergeable(struct pipe_buffer *buf)
263 {
264         if (buf->ops == &anon_pipe_buf_ops)
265                 buf->ops = &anon_pipe_buf_nomerge_ops;
266 }
267
268 static bool pipe_buf_can_merge(struct pipe_buffer *buf)
269 {
270         return buf->ops == &anon_pipe_buf_ops;
271 }
272
273 static ssize_t
274 pipe_read(struct kiocb *iocb, struct iov_iter *to)
275 {
276         size_t total_len = iov_iter_count(to);
277         struct file *filp = iocb->ki_filp;
278         struct pipe_inode_info *pipe = filp->private_data;
279         int do_wakeup;
280         ssize_t ret;
281
282         /* Null read succeeds. */
283         if (unlikely(total_len == 0))
284                 return 0;
285
286         do_wakeup = 0;
287         ret = 0;
288         __pipe_lock(pipe);
289         for (;;) {
290                 unsigned int head = pipe->head;
291                 unsigned int tail = pipe->tail;
292                 unsigned int mask = pipe->ring_size - 1;
293
294                 if (!pipe_empty(head, tail)) {
295                         struct pipe_buffer *buf = &pipe->bufs[tail & mask];
296                         size_t chars = buf->len;
297                         size_t written;
298                         int error;
299
300                         if (chars > total_len)
301                                 chars = total_len;
302
303                         error = pipe_buf_confirm(pipe, buf);
304                         if (error) {
305                                 if (!ret)
306                                         ret = error;
307                                 break;
308                         }
309
310                         written = copy_page_to_iter(buf->page, buf->offset, chars, to);
311                         if (unlikely(written < chars)) {
312                                 if (!ret)
313                                         ret = -EFAULT;
314                                 break;
315                         }
316                         ret += chars;
317                         buf->offset += chars;
318                         buf->len -= chars;
319
320                         /* Was it a packet buffer? Clean up and exit */
321                         if (buf->flags & PIPE_BUF_FLAG_PACKET) {
322                                 total_len = chars;
323                                 buf->len = 0;
324                         }
325
326                         if (!buf->len) {
327                                 bool wake;
328                                 pipe_buf_release(pipe, buf);
329                                 spin_lock_irq(&pipe->wait.lock);
330                                 tail++;
331                                 pipe->tail = tail;
332                                 do_wakeup = 1;
333                                 wake = head - (tail - 1) == pipe->max_usage / 2;
334                                 if (wake)
335                                         wake_up_interruptible_sync_poll_locked(
336                                                 &pipe->wait, EPOLLOUT | EPOLLWRNORM);
337                                 spin_unlock_irq(&pipe->wait.lock);
338                                 if (wake)
339                                         kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
340                         }
341                         total_len -= chars;
342                         if (!total_len)
343                                 break;  /* common path: read succeeded */
344                         if (!pipe_empty(head, tail))    /* More to do? */
345                                 continue;
346                 }
347
348                 if (!pipe->writers)
349                         break;
350                 if (!pipe->waiting_writers) {
351                         /* syscall merging: Usually we must not sleep
352                          * if O_NONBLOCK is set, or if we got some data.
353                          * But if a writer sleeps in kernel space, then
354                          * we can wait for that data without violating POSIX.
355                          */
356                         if (ret)
357                                 break;
358                         if (filp->f_flags & O_NONBLOCK) {
359                                 ret = -EAGAIN;
360                                 break;
361                         }
362                 }
363                 if (signal_pending(current)) {
364                         if (!ret)
365                                 ret = -ERESTARTSYS;
366                         break;
367                 }
368                 pipe_wait(pipe);
369         }
370         __pipe_unlock(pipe);
371
372         /* Signal writers asynchronously that there is more room. */
373         if (do_wakeup) {
374                 wake_up_interruptible_sync_poll(&pipe->wait, EPOLLOUT | EPOLLWRNORM);
375                 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
376         }
377         if (ret > 0)
378                 file_accessed(filp);
379         return ret;
380 }
381
382 static inline int is_packetized(struct file *file)
383 {
384         return (file->f_flags & O_DIRECT) != 0;
385 }
386
387 static ssize_t
388 pipe_write(struct kiocb *iocb, struct iov_iter *from)
389 {
390         struct file *filp = iocb->ki_filp;
391         struct pipe_inode_info *pipe = filp->private_data;
392         unsigned int head, max_usage, mask;
393         ssize_t ret = 0;
394         int do_wakeup = 0;
395         size_t total_len = iov_iter_count(from);
396         ssize_t chars;
397
398         /* Null write succeeds. */
399         if (unlikely(total_len == 0))
400                 return 0;
401
402         __pipe_lock(pipe);
403
404         if (!pipe->readers) {
405                 send_sig(SIGPIPE, current, 0);
406                 ret = -EPIPE;
407                 goto out;
408         }
409
410         head = pipe->head;
411         max_usage = pipe->max_usage;
412         mask = pipe->ring_size - 1;
413
414         /* We try to merge small writes */
415         chars = total_len & (PAGE_SIZE-1); /* size of the last buffer */
416         if (!pipe_empty(head, pipe->tail) && chars != 0) {
417                 struct pipe_buffer *buf = &pipe->bufs[(head - 1) & mask];
418                 int offset = buf->offset + buf->len;
419
420                 if (pipe_buf_can_merge(buf) && offset + chars <= PAGE_SIZE) {
421                         ret = pipe_buf_confirm(pipe, buf);
422                         if (ret)
423                                 goto out;
424
425                         ret = copy_page_from_iter(buf->page, offset, chars, from);
426                         if (unlikely(ret < chars)) {
427                                 ret = -EFAULT;
428                                 goto out;
429                         }
430                         do_wakeup = 1;
431                         buf->len += ret;
432                         if (!iov_iter_count(from))
433                                 goto out;
434                 }
435         }
436
437         for (;;) {
438                 if (!pipe->readers) {
439                         send_sig(SIGPIPE, current, 0);
440                         if (!ret)
441                                 ret = -EPIPE;
442                         break;
443                 }
444
445                 head = pipe->head;
446                 if (!pipe_full(head, pipe->tail, max_usage)) {
447                         struct pipe_buffer *buf = &pipe->bufs[head & mask];
448                         struct page *page = pipe->tmp_page;
449                         int copied;
450
451                         if (!page) {
452                                 page = alloc_page(GFP_HIGHUSER | __GFP_ACCOUNT);
453                                 if (unlikely(!page)) {
454                                         ret = ret ? : -ENOMEM;
455                                         break;
456                                 }
457                                 pipe->tmp_page = page;
458                         }
459
460                         /* Allocate a slot in the ring in advance and attach an
461                          * empty buffer.  If we fault or otherwise fail to use
462                          * it, either the reader will consume it or it'll still
463                          * be there for the next write.
464                          */
465                         spin_lock_irq(&pipe->wait.lock);
466
467                         head = pipe->head;
468                         if (pipe_full(head, pipe->tail, max_usage)) {
469                                 spin_unlock_irq(&pipe->wait.lock);
470                                 continue;
471                         }
472
473                         pipe->head = head + 1;
474
475                         /* Always wake up, even if the copy fails. Otherwise
476                          * we lock up (O_NONBLOCK-)readers that sleep due to
477                          * syscall merging.
478                          * FIXME! Is this really true?
479                          */
480                         wake_up_interruptible_sync_poll_locked(
481                                 &pipe->wait, EPOLLIN | EPOLLRDNORM);
482
483                         spin_unlock_irq(&pipe->wait.lock);
484                         kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
485
486                         /* Insert it into the buffer array */
487                         buf = &pipe->bufs[head & mask];
488                         buf->page = page;
489                         buf->ops = &anon_pipe_buf_ops;
490                         buf->offset = 0;
491                         buf->len = 0;
492                         buf->flags = 0;
493                         if (is_packetized(filp)) {
494                                 buf->ops = &packet_pipe_buf_ops;
495                                 buf->flags = PIPE_BUF_FLAG_PACKET;
496                         }
497                         pipe->tmp_page = NULL;
498
499                         copied = copy_page_from_iter(page, 0, PAGE_SIZE, from);
500                         if (unlikely(copied < PAGE_SIZE && iov_iter_count(from))) {
501                                 if (!ret)
502                                         ret = -EFAULT;
503                                 break;
504                         }
505                         ret += copied;
506                         buf->offset = 0;
507                         buf->len = copied;
508
509                         if (!iov_iter_count(from))
510                                 break;
511                 }
512
513                 if (!pipe_full(head, pipe->tail, max_usage))
514                         continue;
515
516                 /* Wait for buffer space to become available. */
517                 if (filp->f_flags & O_NONBLOCK) {
518                         if (!ret)
519                                 ret = -EAGAIN;
520                         break;
521                 }
522                 if (signal_pending(current)) {
523                         if (!ret)
524                                 ret = -ERESTARTSYS;
525                         break;
526                 }
527                 pipe->waiting_writers++;
528                 pipe_wait(pipe);
529                 pipe->waiting_writers--;
530         }
531 out:
532         __pipe_unlock(pipe);
533         if (do_wakeup) {
534                 wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLRDNORM);
535                 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
536         }
537         if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) {
538                 int err = file_update_time(filp);
539                 if (err)
540                         ret = err;
541                 sb_end_write(file_inode(filp)->i_sb);
542         }
543         return ret;
544 }
545
546 static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
547 {
548         struct pipe_inode_info *pipe = filp->private_data;
549         int count, head, tail, mask;
550
551         switch (cmd) {
552                 case FIONREAD:
553                         __pipe_lock(pipe);
554                         count = 0;
555                         head = pipe->head;
556                         tail = pipe->tail;
557                         mask = pipe->ring_size - 1;
558
559                         while (tail != head) {
560                                 count += pipe->bufs[tail & mask].len;
561                                 tail++;
562                         }
563                         __pipe_unlock(pipe);
564
565                         return put_user(count, (int __user *)arg);
566                 default:
567                         return -ENOIOCTLCMD;
568         }
569 }
570
571 /* No kernel lock held - fine */
572 static __poll_t
573 pipe_poll(struct file *filp, poll_table *wait)
574 {
575         __poll_t mask;
576         struct pipe_inode_info *pipe = filp->private_data;
577         unsigned int head = READ_ONCE(pipe->head);
578         unsigned int tail = READ_ONCE(pipe->tail);
579
580         poll_wait(filp, &pipe->wait, wait);
581
582         BUG_ON(pipe_occupancy(head, tail) > pipe->ring_size);
583
584         /* Reading only -- no need for acquiring the semaphore.  */
585         mask = 0;
586         if (filp->f_mode & FMODE_READ) {
587                 if (!pipe_empty(head, tail))
588                         mask |= EPOLLIN | EPOLLRDNORM;
589                 if (!pipe->writers && filp->f_version != pipe->w_counter)
590                         mask |= EPOLLHUP;
591         }
592
593         if (filp->f_mode & FMODE_WRITE) {
594                 if (!pipe_full(head, tail, pipe->max_usage))
595                         mask |= EPOLLOUT | EPOLLWRNORM;
596                 /*
597                  * Most Unices do not set EPOLLERR for FIFOs but on Linux they
598                  * behave exactly like pipes for poll().
599                  */
600                 if (!pipe->readers)
601                         mask |= EPOLLERR;
602         }
603
604         return mask;
605 }
606
607 static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
608 {
609         int kill = 0;
610
611         spin_lock(&inode->i_lock);
612         if (!--pipe->files) {
613                 inode->i_pipe = NULL;
614                 kill = 1;
615         }
616         spin_unlock(&inode->i_lock);
617
618         if (kill)
619                 free_pipe_info(pipe);
620 }
621
622 static int
623 pipe_release(struct inode *inode, struct file *file)
624 {
625         struct pipe_inode_info *pipe = file->private_data;
626
627         __pipe_lock(pipe);
628         if (file->f_mode & FMODE_READ)
629                 pipe->readers--;
630         if (file->f_mode & FMODE_WRITE)
631                 pipe->writers--;
632
633         if (pipe->readers || pipe->writers) {
634                 wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLOUT | EPOLLRDNORM | EPOLLWRNORM | EPOLLERR | EPOLLHUP);
635                 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
636                 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
637         }
638         __pipe_unlock(pipe);
639
640         put_pipe_info(inode, pipe);
641         return 0;
642 }
643
644 static int
645 pipe_fasync(int fd, struct file *filp, int on)
646 {
647         struct pipe_inode_info *pipe = filp->private_data;
648         int retval = 0;
649
650         __pipe_lock(pipe);
651         if (filp->f_mode & FMODE_READ)
652                 retval = fasync_helper(fd, filp, on, &pipe->fasync_readers);
653         if ((filp->f_mode & FMODE_WRITE) && retval >= 0) {
654                 retval = fasync_helper(fd, filp, on, &pipe->fasync_writers);
655                 if (retval < 0 && (filp->f_mode & FMODE_READ))
656                         /* this can happen only if on == T */
657                         fasync_helper(-1, filp, 0, &pipe->fasync_readers);
658         }
659         __pipe_unlock(pipe);
660         return retval;
661 }
662
663 static unsigned long account_pipe_buffers(struct user_struct *user,
664                                  unsigned long old, unsigned long new)
665 {
666         return atomic_long_add_return(new - old, &user->pipe_bufs);
667 }
668
669 static bool too_many_pipe_buffers_soft(unsigned long user_bufs)
670 {
671         unsigned long soft_limit = READ_ONCE(pipe_user_pages_soft);
672
673         return soft_limit && user_bufs > soft_limit;
674 }
675
676 static bool too_many_pipe_buffers_hard(unsigned long user_bufs)
677 {
678         unsigned long hard_limit = READ_ONCE(pipe_user_pages_hard);
679
680         return hard_limit && user_bufs > hard_limit;
681 }
682
683 static bool is_unprivileged_user(void)
684 {
685         return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
686 }
687
688 struct pipe_inode_info *alloc_pipe_info(void)
689 {
690         struct pipe_inode_info *pipe;
691         unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
692         struct user_struct *user = get_current_user();
693         unsigned long user_bufs;
694         unsigned int max_size = READ_ONCE(pipe_max_size);
695
696         pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL_ACCOUNT);
697         if (pipe == NULL)
698                 goto out_free_uid;
699
700         if (pipe_bufs * PAGE_SIZE > max_size && !capable(CAP_SYS_RESOURCE))
701                 pipe_bufs = max_size >> PAGE_SHIFT;
702
703         user_bufs = account_pipe_buffers(user, 0, pipe_bufs);
704
705         if (too_many_pipe_buffers_soft(user_bufs) && is_unprivileged_user()) {
706                 user_bufs = account_pipe_buffers(user, pipe_bufs, 1);
707                 pipe_bufs = 1;
708         }
709
710         if (too_many_pipe_buffers_hard(user_bufs) && is_unprivileged_user())
711                 goto out_revert_acct;
712
713         pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer),
714                              GFP_KERNEL_ACCOUNT);
715
716         if (pipe->bufs) {
717                 init_waitqueue_head(&pipe->wait);
718                 pipe->r_counter = pipe->w_counter = 1;
719                 pipe->max_usage = pipe_bufs;
720                 pipe->ring_size = pipe_bufs;
721                 pipe->user = user;
722                 mutex_init(&pipe->mutex);
723                 return pipe;
724         }
725
726 out_revert_acct:
727         (void) account_pipe_buffers(user, pipe_bufs, 0);
728         kfree(pipe);
729 out_free_uid:
730         free_uid(user);
731         return NULL;
732 }
733
734 void free_pipe_info(struct pipe_inode_info *pipe)
735 {
736         int i;
737
738         (void) account_pipe_buffers(pipe->user, pipe->ring_size, 0);
739         free_uid(pipe->user);
740         for (i = 0; i < pipe->ring_size; i++) {
741                 struct pipe_buffer *buf = pipe->bufs + i;
742                 if (buf->ops)
743                         pipe_buf_release(pipe, buf);
744         }
745         if (pipe->tmp_page)
746                 __free_page(pipe->tmp_page);
747         kfree(pipe->bufs);
748         kfree(pipe);
749 }
750
751 static struct vfsmount *pipe_mnt __read_mostly;
752
753 /*
754  * pipefs_dname() is called from d_path().
755  */
756 static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen)
757 {
758         return dynamic_dname(dentry, buffer, buflen, "pipe:[%lu]",
759                                 d_inode(dentry)->i_ino);
760 }
761
762 static const struct dentry_operations pipefs_dentry_operations = {
763         .d_dname        = pipefs_dname,
764 };
765
766 static struct inode * get_pipe_inode(void)
767 {
768         struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb);
769         struct pipe_inode_info *pipe;
770
771         if (!inode)
772                 goto fail_inode;
773
774         inode->i_ino = get_next_ino();
775
776         pipe = alloc_pipe_info();
777         if (!pipe)
778                 goto fail_iput;
779
780         inode->i_pipe = pipe;
781         pipe->files = 2;
782         pipe->readers = pipe->writers = 1;
783         inode->i_fop = &pipefifo_fops;
784
785         /*
786          * Mark the inode dirty from the very beginning,
787          * that way it will never be moved to the dirty
788          * list because "mark_inode_dirty()" will think
789          * that it already _is_ on the dirty list.
790          */
791         inode->i_state = I_DIRTY;
792         inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
793         inode->i_uid = current_fsuid();
794         inode->i_gid = current_fsgid();
795         inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
796
797         return inode;
798
799 fail_iput:
800         iput(inode);
801
802 fail_inode:
803         return NULL;
804 }
805
806 int create_pipe_files(struct file **res, int flags)
807 {
808         struct inode *inode = get_pipe_inode();
809         struct file *f;
810
811         if (!inode)
812                 return -ENFILE;
813
814         f = alloc_file_pseudo(inode, pipe_mnt, "",
815                                 O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT)),
816                                 &pipefifo_fops);
817         if (IS_ERR(f)) {
818                 free_pipe_info(inode->i_pipe);
819                 iput(inode);
820                 return PTR_ERR(f);
821         }
822
823         f->private_data = inode->i_pipe;
824
825         res[0] = alloc_file_clone(f, O_RDONLY | (flags & O_NONBLOCK),
826                                   &pipefifo_fops);
827         if (IS_ERR(res[0])) {
828                 put_pipe_info(inode, inode->i_pipe);
829                 fput(f);
830                 return PTR_ERR(res[0]);
831         }
832         res[0]->private_data = inode->i_pipe;
833         res[1] = f;
834         return 0;
835 }
836
837 static int __do_pipe_flags(int *fd, struct file **files, int flags)
838 {
839         int error;
840         int fdw, fdr;
841
842         if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT))
843                 return -EINVAL;
844
845         error = create_pipe_files(files, flags);
846         if (error)
847                 return error;
848
849         error = get_unused_fd_flags(flags);
850         if (error < 0)
851                 goto err_read_pipe;
852         fdr = error;
853
854         error = get_unused_fd_flags(flags);
855         if (error < 0)
856                 goto err_fdr;
857         fdw = error;
858
859         audit_fd_pair(fdr, fdw);
860         fd[0] = fdr;
861         fd[1] = fdw;
862         return 0;
863
864  err_fdr:
865         put_unused_fd(fdr);
866  err_read_pipe:
867         fput(files[0]);
868         fput(files[1]);
869         return error;
870 }
871
872 int do_pipe_flags(int *fd, int flags)
873 {
874         struct file *files[2];
875         int error = __do_pipe_flags(fd, files, flags);
876         if (!error) {
877                 fd_install(fd[0], files[0]);
878                 fd_install(fd[1], files[1]);
879         }
880         return error;
881 }
882
883 /*
884  * sys_pipe() is the normal C calling standard for creating
885  * a pipe. It's not the way Unix traditionally does this, though.
886  */
887 static int do_pipe2(int __user *fildes, int flags)
888 {
889         struct file *files[2];
890         int fd[2];
891         int error;
892
893         error = __do_pipe_flags(fd, files, flags);
894         if (!error) {
895                 if (unlikely(copy_to_user(fildes, fd, sizeof(fd)))) {
896                         fput(files[0]);
897                         fput(files[1]);
898                         put_unused_fd(fd[0]);
899                         put_unused_fd(fd[1]);
900                         error = -EFAULT;
901                 } else {
902                         fd_install(fd[0], files[0]);
903                         fd_install(fd[1], files[1]);
904                 }
905         }
906         return error;
907 }
908
909 SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags)
910 {
911         return do_pipe2(fildes, flags);
912 }
913
914 SYSCALL_DEFINE1(pipe, int __user *, fildes)
915 {
916         return do_pipe2(fildes, 0);
917 }
918
919 static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt)
920 {
921         int cur = *cnt;
922
923         while (cur == *cnt) {
924                 pipe_wait(pipe);
925                 if (signal_pending(current))
926                         break;
927         }
928         return cur == *cnt ? -ERESTARTSYS : 0;
929 }
930
931 static void wake_up_partner(struct pipe_inode_info *pipe)
932 {
933         wake_up_interruptible(&pipe->wait);
934 }
935
936 static int fifo_open(struct inode *inode, struct file *filp)
937 {
938         struct pipe_inode_info *pipe;
939         bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC;
940         int ret;
941
942         filp->f_version = 0;
943
944         spin_lock(&inode->i_lock);
945         if (inode->i_pipe) {
946                 pipe = inode->i_pipe;
947                 pipe->files++;
948                 spin_unlock(&inode->i_lock);
949         } else {
950                 spin_unlock(&inode->i_lock);
951                 pipe = alloc_pipe_info();
952                 if (!pipe)
953                         return -ENOMEM;
954                 pipe->files = 1;
955                 spin_lock(&inode->i_lock);
956                 if (unlikely(inode->i_pipe)) {
957                         inode->i_pipe->files++;
958                         spin_unlock(&inode->i_lock);
959                         free_pipe_info(pipe);
960                         pipe = inode->i_pipe;
961                 } else {
962                         inode->i_pipe = pipe;
963                         spin_unlock(&inode->i_lock);
964                 }
965         }
966         filp->private_data = pipe;
967         /* OK, we have a pipe and it's pinned down */
968
969         __pipe_lock(pipe);
970
971         /* We can only do regular read/write on fifos */
972         filp->f_mode &= (FMODE_READ | FMODE_WRITE);
973
974         switch (filp->f_mode) {
975         case FMODE_READ:
976         /*
977          *  O_RDONLY
978          *  POSIX.1 says that O_NONBLOCK means return with the FIFO
979          *  opened, even when there is no process writing the FIFO.
980          */
981                 pipe->r_counter++;
982                 if (pipe->readers++ == 0)
983                         wake_up_partner(pipe);
984
985                 if (!is_pipe && !pipe->writers) {
986                         if ((filp->f_flags & O_NONBLOCK)) {
987                                 /* suppress EPOLLHUP until we have
988                                  * seen a writer */
989                                 filp->f_version = pipe->w_counter;
990                         } else {
991                                 if (wait_for_partner(pipe, &pipe->w_counter))
992                                         goto err_rd;
993                         }
994                 }
995                 break;
996
997         case FMODE_WRITE:
998         /*
999          *  O_WRONLY
1000          *  POSIX.1 says that O_NONBLOCK means return -1 with
1001          *  errno=ENXIO when there is no process reading the FIFO.
1002          */
1003                 ret = -ENXIO;
1004                 if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
1005                         goto err;
1006
1007                 pipe->w_counter++;
1008                 if (!pipe->writers++)
1009                         wake_up_partner(pipe);
1010
1011                 if (!is_pipe && !pipe->readers) {
1012                         if (wait_for_partner(pipe, &pipe->r_counter))
1013                                 goto err_wr;
1014                 }
1015                 break;
1016
1017         case FMODE_READ | FMODE_WRITE:
1018         /*
1019          *  O_RDWR
1020          *  POSIX.1 leaves this case "undefined" when O_NONBLOCK is set.
1021          *  This implementation will NEVER block on a O_RDWR open, since
1022          *  the process can at least talk to itself.
1023          */
1024
1025                 pipe->readers++;
1026                 pipe->writers++;
1027                 pipe->r_counter++;
1028                 pipe->w_counter++;
1029                 if (pipe->readers == 1 || pipe->writers == 1)
1030                         wake_up_partner(pipe);
1031                 break;
1032
1033         default:
1034                 ret = -EINVAL;
1035                 goto err;
1036         }
1037
1038         /* Ok! */
1039         __pipe_unlock(pipe);
1040         return 0;
1041
1042 err_rd:
1043         if (!--pipe->readers)
1044                 wake_up_interruptible(&pipe->wait);
1045         ret = -ERESTARTSYS;
1046         goto err;
1047
1048 err_wr:
1049         if (!--pipe->writers)
1050                 wake_up_interruptible(&pipe->wait);
1051         ret = -ERESTARTSYS;
1052         goto err;
1053
1054 err:
1055         __pipe_unlock(pipe);
1056
1057         put_pipe_info(inode, pipe);
1058         return ret;
1059 }
1060
1061 const struct file_operations pipefifo_fops = {
1062         .open           = fifo_open,
1063         .llseek         = no_llseek,
1064         .read_iter      = pipe_read,
1065         .write_iter     = pipe_write,
1066         .poll           = pipe_poll,
1067         .unlocked_ioctl = pipe_ioctl,
1068         .release        = pipe_release,
1069         .fasync         = pipe_fasync,
1070 };
1071
1072 /*
1073  * Currently we rely on the pipe array holding a power-of-2 number
1074  * of pages. Returns 0 on error.
1075  */
1076 unsigned int round_pipe_size(unsigned long size)
1077 {
1078         if (size > (1U << 31))
1079                 return 0;
1080
1081         /* Minimum pipe size, as required by POSIX */
1082         if (size < PAGE_SIZE)
1083                 return PAGE_SIZE;
1084
1085         return roundup_pow_of_two(size);
1086 }
1087
1088 /*
1089  * Allocate a new array of pipe buffers and copy the info over. Returns the
1090  * pipe size if successful, or return -ERROR on error.
1091  */
1092 static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
1093 {
1094         struct pipe_buffer *bufs;
1095         unsigned int size, nr_slots, head, tail, mask, n;
1096         unsigned long user_bufs;
1097         long ret = 0;
1098
1099         size = round_pipe_size(arg);
1100         nr_slots = size >> PAGE_SHIFT;
1101
1102         if (!nr_slots)
1103                 return -EINVAL;
1104
1105         /*
1106          * If trying to increase the pipe capacity, check that an
1107          * unprivileged user is not trying to exceed various limits
1108          * (soft limit check here, hard limit check just below).
1109          * Decreasing the pipe capacity is always permitted, even
1110          * if the user is currently over a limit.
1111          */
1112         if (nr_slots > pipe->ring_size &&
1113                         size > pipe_max_size && !capable(CAP_SYS_RESOURCE))
1114                 return -EPERM;
1115
1116         user_bufs = account_pipe_buffers(pipe->user, pipe->ring_size, nr_slots);
1117
1118         if (nr_slots > pipe->ring_size &&
1119                         (too_many_pipe_buffers_hard(user_bufs) ||
1120                          too_many_pipe_buffers_soft(user_bufs)) &&
1121                         is_unprivileged_user()) {
1122                 ret = -EPERM;
1123                 goto out_revert_acct;
1124         }
1125
1126         /*
1127          * We can shrink the pipe, if arg is greater than the ring occupancy.
1128          * Since we don't expect a lot of shrink+grow operations, just free and
1129          * allocate again like we would do for growing.  If the pipe currently
1130          * contains more buffers than arg, then return busy.
1131          */
1132         mask = pipe->ring_size - 1;
1133         head = pipe->head;
1134         tail = pipe->tail;
1135         n = pipe_occupancy(pipe->head, pipe->tail);
1136         if (nr_slots < n) {
1137                 ret = -EBUSY;
1138                 goto out_revert_acct;
1139         }
1140
1141         bufs = kcalloc(nr_slots, sizeof(*bufs),
1142                        GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
1143         if (unlikely(!bufs)) {
1144                 ret = -ENOMEM;
1145                 goto out_revert_acct;
1146         }
1147
1148         /*
1149          * The pipe array wraps around, so just start the new one at zero
1150          * and adjust the indices.
1151          */
1152         if (n > 0) {
1153                 unsigned int h = head & mask;
1154                 unsigned int t = tail & mask;
1155                 if (h > t) {
1156                         memcpy(bufs, pipe->bufs + t,
1157                                n * sizeof(struct pipe_buffer));
1158                 } else {
1159                         unsigned int tsize = pipe->ring_size - t;
1160                         if (h > 0)
1161                                 memcpy(bufs + tsize, pipe->bufs,
1162                                        h * sizeof(struct pipe_buffer));
1163                         memcpy(bufs, pipe->bufs + t,
1164                                tsize * sizeof(struct pipe_buffer));
1165                 }
1166         }
1167
1168         head = n;
1169         tail = 0;
1170
1171         kfree(pipe->bufs);
1172         pipe->bufs = bufs;
1173         pipe->ring_size = nr_slots;
1174         pipe->max_usage = nr_slots;
1175         pipe->tail = tail;
1176         pipe->head = head;
1177         return pipe->max_usage * PAGE_SIZE;
1178
1179 out_revert_acct:
1180         (void) account_pipe_buffers(pipe->user, nr_slots, pipe->ring_size);
1181         return ret;
1182 }
1183
1184 /*
1185  * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
1186  * location, so checking ->i_pipe is not enough to verify that this is a
1187  * pipe.
1188  */
1189 struct pipe_inode_info *get_pipe_info(struct file *file)
1190 {
1191         return file->f_op == &pipefifo_fops ? file->private_data : NULL;
1192 }
1193
1194 long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
1195 {
1196         struct pipe_inode_info *pipe;
1197         long ret;
1198
1199         pipe = get_pipe_info(file);
1200         if (!pipe)
1201                 return -EBADF;
1202
1203         __pipe_lock(pipe);
1204
1205         switch (cmd) {
1206         case F_SETPIPE_SZ:
1207                 ret = pipe_set_size(pipe, arg);
1208                 break;
1209         case F_GETPIPE_SZ:
1210                 ret = pipe->max_usage * PAGE_SIZE;
1211                 break;
1212         default:
1213                 ret = -EINVAL;
1214                 break;
1215         }
1216
1217         __pipe_unlock(pipe);
1218         return ret;
1219 }
1220
1221 static const struct super_operations pipefs_ops = {
1222         .destroy_inode = free_inode_nonrcu,
1223         .statfs = simple_statfs,
1224 };
1225
1226 /*
1227  * pipefs should _never_ be mounted by userland - too much of security hassle,
1228  * no real gain from having the whole whorehouse mounted. So we don't need
1229  * any operations on the root directory. However, we need a non-trivial
1230  * d_name - pipe: will go nicely and kill the special-casing in procfs.
1231  */
1232
1233 static int pipefs_init_fs_context(struct fs_context *fc)
1234 {
1235         struct pseudo_fs_context *ctx = init_pseudo(fc, PIPEFS_MAGIC);
1236         if (!ctx)
1237                 return -ENOMEM;
1238         ctx->ops = &pipefs_ops;
1239         ctx->dops = &pipefs_dentry_operations;
1240         return 0;
1241 }
1242
1243 static struct file_system_type pipe_fs_type = {
1244         .name           = "pipefs",
1245         .init_fs_context = pipefs_init_fs_context,
1246         .kill_sb        = kill_anon_super,
1247 };
1248
1249 static int __init init_pipe_fs(void)
1250 {
1251         int err = register_filesystem(&pipe_fs_type);
1252
1253         if (!err) {
1254                 pipe_mnt = kern_mount(&pipe_fs_type);
1255                 if (IS_ERR(pipe_mnt)) {
1256                         err = PTR_ERR(pipe_mnt);
1257                         unregister_filesystem(&pipe_fs_type);
1258                 }
1259         }
1260         return err;
1261 }
1262
1263 fs_initcall(init_pipe_fs);