eventfd: use wait_event_interruptible_locked_irq() helper
authorWen Yang <wenyang.linux@foxmail.com>
Wed, 5 Apr 2023 19:20:02 +0000 (03:20 +0800)
committerChristian Brauner <brauner@kernel.org>
Thu, 6 Apr 2023 08:01:50 +0000 (10:01 +0200)
wait_event_interruptible_locked_irq was introduced by commit 22c43c81a51e
("wait_event_interruptible_locked() interface"), but older code such as
eventfd_{write,read} still uses the open code implementation.
Inspired by commit 8120a8aadb20
("fs/timerfd.c: make use of wait_event_interruptible_locked_irq()"), this
patch replaces the open code implementation with a single macro call.

No functional change intended.

Signed-off-by: Wen Yang <wenyang.linux@foxmail.com>
Reviewed-by: Eric Biggers <ebiggers@google.com>
Reviewed-by: Jens Axboe <axboe@kernel.dk>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Dylan Yudaken <dylany@fb.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: David Woodhouse <dwmw@amazon.co.uk>
Cc: Fu Wei <wefu@redhat.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Michal Nazarewicz <m.nazarewicz@samsung.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: linux-fsdevel@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Message-Id: <tencent_16F9553E8354D950D704214D6EA407315F0A@qq.com>
Signed-off-by: Christian Brauner <brauner@kernel.org>
fs/eventfd.c

index 249ca6c..95850a1 100644 (file)
@@ -228,7 +228,6 @@ static ssize_t eventfd_read(struct kiocb *iocb, struct iov_iter *to)
        struct file *file = iocb->ki_filp;
        struct eventfd_ctx *ctx = file->private_data;
        __u64 ucnt = 0;
-       DECLARE_WAITQUEUE(wait, current);
 
        if (iov_iter_count(to) < sizeof(ucnt))
                return -EINVAL;
@@ -239,23 +238,11 @@ static ssize_t eventfd_read(struct kiocb *iocb, struct iov_iter *to)
                        spin_unlock_irq(&ctx->wqh.lock);
                        return -EAGAIN;
                }
-               __add_wait_queue(&ctx->wqh, &wait);
-               for (;;) {
-                       set_current_state(TASK_INTERRUPTIBLE);
-                       if (ctx->count)
-                               break;
-                       if (signal_pending(current)) {
-                               __remove_wait_queue(&ctx->wqh, &wait);
-                               __set_current_state(TASK_RUNNING);
-                               spin_unlock_irq(&ctx->wqh.lock);
-                               return -ERESTARTSYS;
-                       }
+
+               if (wait_event_interruptible_locked_irq(ctx->wqh, ctx->count)) {
                        spin_unlock_irq(&ctx->wqh.lock);
-                       schedule();
-                       spin_lock_irq(&ctx->wqh.lock);
+                       return -ERESTARTSYS;
                }
-               __remove_wait_queue(&ctx->wqh, &wait);
-               __set_current_state(TASK_RUNNING);
        }
        eventfd_ctx_do_read(ctx, &ucnt);
        current->in_eventfd = 1;
@@ -275,7 +262,6 @@ static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t c
        struct eventfd_ctx *ctx = file->private_data;
        ssize_t res;
        __u64 ucnt;
-       DECLARE_WAITQUEUE(wait, current);
 
        if (count < sizeof(ucnt))
                return -EINVAL;
@@ -288,23 +274,10 @@ static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t c
        if (ULLONG_MAX - ctx->count > ucnt)
                res = sizeof(ucnt);
        else if (!(file->f_flags & O_NONBLOCK)) {
-               __add_wait_queue(&ctx->wqh, &wait);
-               for (res = 0;;) {
-                       set_current_state(TASK_INTERRUPTIBLE);
-                       if (ULLONG_MAX - ctx->count > ucnt) {
-                               res = sizeof(ucnt);
-                               break;
-                       }
-                       if (signal_pending(current)) {
-                               res = -ERESTARTSYS;
-                               break;
-                       }
-                       spin_unlock_irq(&ctx->wqh.lock);
-                       schedule();
-                       spin_lock_irq(&ctx->wqh.lock);
-               }
-               __remove_wait_queue(&ctx->wqh, &wait);
-               __set_current_state(TASK_RUNNING);
+               res = wait_event_interruptible_locked_irq(ctx->wqh,
+                               ULLONG_MAX - ctx->count > ucnt);
+               if (!res)
+                       res = sizeof(ucnt);
        }
        if (likely(res > 0)) {
                ctx->count += ucnt;