btrfs: fix race between quota disable and quota assign ioctls
[platform/kernel/linux-rpi.git] / fs / pipe.c
index 6d4342b..e08f0fe 100644 (file)
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -252,7 +252,8 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
         */
        was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage);
        for (;;) {
-               unsigned int head = pipe->head;
+               /* Read ->head with a barrier vs post_one_notification() */
+               unsigned int head = smp_load_acquire(&pipe->head);
                unsigned int tail = pipe->tail;
                unsigned int mask = pipe->ring_size - 1;
 
@@ -651,7 +652,7 @@ pipe_poll(struct file *filp, poll_table *wait)
        unsigned int head, tail;
 
        /* Epoll has some historical nasty semantics, this enables them */
-       pipe->poll_usage = 1;
+       WRITE_ONCE(pipe->poll_usage, true);
 
        /*
         * Reading pipe state only -- no need for acquiring the semaphore.
@@ -830,10 +831,8 @@ void free_pipe_info(struct pipe_inode_info *pipe)
        int i;
 
 #ifdef CONFIG_WATCH_QUEUE
-       if (pipe->watch_queue) {
+       if (pipe->watch_queue)
                watch_queue_clear(pipe->watch_queue);
-               put_watch_queue(pipe->watch_queue);
-       }
 #endif
 
        (void) account_pipe_buffers(pipe->user, pipe->nr_accounted, 0);
@@ -843,6 +842,10 @@ void free_pipe_info(struct pipe_inode_info *pipe)
                if (buf->ops)
                        pipe_buf_release(pipe, buf);
        }
+#ifdef CONFIG_WATCH_QUEUE
+       if (pipe->watch_queue)
+               put_watch_queue(pipe->watch_queue);
+#endif
        if (pipe->tmp_page)
                __free_page(pipe->tmp_page);
        kfree(pipe->bufs);
@@ -1241,30 +1244,33 @@ unsigned int round_pipe_size(unsigned long size)
 
 /*
  * Resize the pipe ring to a number of slots.
+ *
+ * Note the pipe can be reduced in capacity, but only if the current
+ * occupancy doesn't exceed nr_slots; if it does, EBUSY will be
+ * returned instead.
  */
 int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots)
 {
        struct pipe_buffer *bufs;
        unsigned int head, tail, mask, n;
 
-       /*
-        * We can shrink the pipe, if arg is greater than the ring occupancy.
-        * Since we don't expect a lot of shrink+grow operations, just free and
-        * allocate again like we would do for growing.  If the pipe currently
-        * contains more buffers than arg, then return busy.
-        */
-       mask = pipe->ring_size - 1;
-       head = pipe->head;
-       tail = pipe->tail;
-       n = pipe_occupancy(pipe->head, pipe->tail);
-       if (nr_slots < n)
-               return -EBUSY;
-
        bufs = kcalloc(nr_slots, sizeof(*bufs),
                       GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
        if (unlikely(!bufs))
                return -ENOMEM;
 
+       spin_lock_irq(&pipe->rd_wait.lock);
+       mask = pipe->ring_size - 1;
+       head = pipe->head;
+       tail = pipe->tail;
+
+       n = pipe_occupancy(head, tail);
+       if (nr_slots < n) {
+               spin_unlock_irq(&pipe->rd_wait.lock);
+               kfree(bufs);
+               return -EBUSY;
+       }
+
        /*
         * The pipe array wraps around, so just start the new one at zero
         * and adjust the indices.
@@ -1296,6 +1302,8 @@ int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots)
        pipe->tail = tail;
        pipe->head = head;
 
+       spin_unlock_irq(&pipe->rd_wait.lock);
+
        /* This might have made more room for writers */
        wake_up_interruptible(&pipe->wr_wait);
        return 0;