drm/amd/display: Implement bounds check for stream encoder creation in DCN301
[platform/kernel/linux-starfive.git] / io_uring / io_uring.c
index bb8880d..ea772a0 100644 (file)
@@ -1339,7 +1339,7 @@ static inline void io_req_local_work_add(struct io_kiocb *req, unsigned flags)
                nr_tw = nr_tw_prev + 1;
                /* Large enough to fail the nr_wait comparison below */
                if (!(flags & IOU_F_TWQ_LAZY_WAKE))
-                       nr_tw = -1U;
+                       nr_tw = INT_MAX;
 
                req->nr_tw = nr_tw;
                req->io_task_work.node.next = first;
@@ -1891,7 +1891,11 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
                        io_req_complete_defer(req);
                else
                        io_req_complete_post(req, issue_flags);
-       } else if (ret != IOU_ISSUE_SKIP_COMPLETE)
+
+               return 0;
+       }
+
+       if (ret != IOU_ISSUE_SKIP_COMPLETE)
                return ret;
 
        /* If the op doesn't have a file, we're not polling for it */
@@ -2626,8 +2630,6 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
                __set_current_state(TASK_RUNNING);
                atomic_set(&ctx->cq_wait_nr, 0);
 
-               if (ret < 0)
-                       break;
                /*
                 * Run task_work after scheduling and before io_should_wake().
                 * If we got woken because of task_work being processed, run it
@@ -2637,6 +2639,18 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
                if (!llist_empty(&ctx->work_llist))
                        io_run_local_work(ctx);
 
+               /*
+                * Non-local task_work will be run on exit to userspace, but
+                * if we're using DEFER_TASKRUN, then we could have waited
+                * with a timeout for a number of requests. If the timeout
+                * hits, we could have some requests ready to process. Ensure
+                * this break is _after_ we have run task_work, to avoid
+                * deferring running potentially pending requests until the
+                * next time we wait for events.
+                */
+               if (ret < 0)
+                       break;
+
                check_cq = READ_ONCE(ctx->check_cq);
                if (unlikely(check_cq)) {
                        /* let the caller flush overflows, retry */
@@ -3607,7 +3621,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
                size_t, argsz)
 {
        struct io_ring_ctx *ctx;
-       struct fd f;
+       struct file *file;
        long ret;
 
        if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
@@ -3625,20 +3639,19 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
                if (unlikely(!tctx || fd >= IO_RINGFD_REG_MAX))
                        return -EINVAL;
                fd = array_index_nospec(fd, IO_RINGFD_REG_MAX);
-               f.file = tctx->registered_rings[fd];
-               f.flags = 0;
-               if (unlikely(!f.file))
+               file = tctx->registered_rings[fd];
+               if (unlikely(!file))
                        return -EBADF;
        } else {
-               f = fdget(fd);
-               if (unlikely(!f.file))
+               file = fget(fd);
+               if (unlikely(!file))
                        return -EBADF;
                ret = -EOPNOTSUPP;
-               if (unlikely(!io_is_uring_fops(f.file)))
+               if (unlikely(!io_is_uring_fops(file)))
                        goto out;
        }
 
-       ctx = f.file->private_data;
+       ctx = file->private_data;
        ret = -EBADFD;
        if (unlikely(ctx->flags & IORING_SETUP_R_DISABLED))
                goto out;
@@ -3732,7 +3745,8 @@ iopoll_locked:
                }
        }
 out:
-       fdput(f);
+       if (!(flags & IORING_ENTER_REGISTERED_RING))
+               fput(file);
        return ret;
 }
 
@@ -4573,7 +4587,7 @@ SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
 {
        struct io_ring_ctx *ctx;
        long ret = -EBADF;
-       struct fd f;
+       struct file *file;
        bool use_registered_ring;
 
        use_registered_ring = !!(opcode & IORING_REGISTER_USE_REGISTERED_RING);
@@ -4592,27 +4606,27 @@ SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
                if (unlikely(!tctx || fd >= IO_RINGFD_REG_MAX))
                        return -EINVAL;
                fd = array_index_nospec(fd, IO_RINGFD_REG_MAX);
-               f.file = tctx->registered_rings[fd];
-               f.flags = 0;
-               if (unlikely(!f.file))
+               file = tctx->registered_rings[fd];
+               if (unlikely(!file))
                        return -EBADF;
        } else {
-               f = fdget(fd);
-               if (unlikely(!f.file))
+               file = fget(fd);
+               if (unlikely(!file))
                        return -EBADF;
                ret = -EOPNOTSUPP;
-               if (!io_is_uring_fops(f.file))
+               if (!io_is_uring_fops(file))
                        goto out_fput;
        }
 
-       ctx = f.file->private_data;
+       ctx = file->private_data;
 
        mutex_lock(&ctx->uring_lock);
        ret = __io_uring_register(ctx, opcode, arg, nr_args);
        mutex_unlock(&ctx->uring_lock);
        trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs, ret);
 out_fput:
-       fdput(f);
+       if (!use_registered_ring)
+               fput(file);
        return ret;
 }