Merge tag 'for-6.4/io_uring-2023-05-07' of git://git.kernel.dk/linux
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 7 May 2023 17:00:09 +0000 (10:00 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 7 May 2023 17:00:09 +0000 (10:00 -0700)
Pull more io_uring updates from Jens Axboe:
 "Nothing major in here, just two different parts:

   - A small series from Breno that enables passing the full SQE down
     for ->uring_cmd().

     This is a prerequisite for enabling full network socket operations.
     Queued up a bit late because of some stylistic concerns that got
     resolved, would be nice to have this in 6.4-rc1 so the dependent
     work will be easier to handle for 6.5.

   - Fix for the huge page coalescing, which was a regression introduced
     in the 6.3 kernel release (Tobias)"

* tag 'for-6.4/io_uring-2023-05-07' of git://git.kernel.dk/linux:
  io_uring: Remove unnecessary BUILD_BUG_ON
  io_uring: Pass whole sqe to commands
  io_uring: Create a helper to return the SQE size
  io_uring/rsrc: check for nonconsecutive pages

1  2 
drivers/block/ublk_drv.c
io_uring/rsrc.c

diff --combined drivers/block/ublk_drv.c
@@@ -53,8 -53,7 +53,8 @@@
                | UBLK_F_NEED_GET_DATA \
                | UBLK_F_USER_RECOVERY \
                | UBLK_F_USER_RECOVERY_REISSUE \
 -              | UBLK_F_UNPRIVILEGED_DEV)
 +              | UBLK_F_UNPRIVILEGED_DEV \
 +              | UBLK_F_CMD_IOCTL_ENCODE)
  
  /* All UBLK_PARAM_TYPE_* should be included here */
  #define UBLK_PARAM_TYPE_ALL (UBLK_PARAM_TYPE_BASIC | \
@@@ -129,7 -128,6 +129,7 @@@ struct ublk_queue 
        unsigned long io_addr;  /* mapped vm address */
        unsigned int max_io_sz;
        bool force_abort;
 +      bool timeout;
        unsigned short nr_io_ready;     /* how many ios setup */
        struct ublk_device *dev;
        struct ublk_io ios[];
@@@ -248,7 -246,7 +248,7 @@@ static int ublk_validate_params(const s
        if (ub->params.types & UBLK_PARAM_TYPE_BASIC) {
                const struct ublk_param_basic *p = &ub->params.basic;
  
 -              if (p->logical_bs_shift > PAGE_SHIFT)
 +              if (p->logical_bs_shift > PAGE_SHIFT || p->logical_bs_shift < 9)
                        return -EINVAL;
  
                if (p->logical_bs_shift > p->physical_bs_shift)
@@@ -300,7 -298,9 +300,7 @@@ static inline bool ublk_can_use_task_wo
  
  static inline bool ublk_need_get_data(const struct ublk_queue *ubq)
  {
 -      if (ubq->flags & UBLK_F_NEED_GET_DATA)
 -              return true;
 -      return false;
 +      return ubq->flags & UBLK_F_NEED_GET_DATA;
  }
  
  static struct ublk_device *ublk_get_device(struct ublk_device *ub)
@@@ -349,19 -349,25 +349,19 @@@ static inline int ublk_queue_cmd_buf_si
  static inline bool ublk_queue_can_use_recovery_reissue(
                struct ublk_queue *ubq)
  {
 -      if ((ubq->flags & UBLK_F_USER_RECOVERY) &&
 -                      (ubq->flags & UBLK_F_USER_RECOVERY_REISSUE))
 -              return true;
 -      return false;
 +      return (ubq->flags & UBLK_F_USER_RECOVERY) &&
 +                      (ubq->flags & UBLK_F_USER_RECOVERY_REISSUE);
  }
  
  static inline bool ublk_queue_can_use_recovery(
                struct ublk_queue *ubq)
  {
 -      if (ubq->flags & UBLK_F_USER_RECOVERY)
 -              return true;
 -      return false;
 +      return ubq->flags & UBLK_F_USER_RECOVERY;
  }
  
  static inline bool ublk_can_use_recovery(struct ublk_device *ub)
  {
 -      if (ub->dev_info.flags & UBLK_F_USER_RECOVERY)
 -              return true;
 -      return false;
 +      return ub->dev_info.flags & UBLK_F_USER_RECOVERY;
  }
  
  static void ublk_free_disk(struct gendisk *disk)
@@@ -422,9 -428,10 +422,9 @@@ static const struct block_device_operat
  #define UBLK_MAX_PIN_PAGES    32
  
  struct ublk_map_data {
 -      const struct ublk_queue *ubq;
        const struct request *rq;
 -      const struct ublk_io *io;
 -      unsigned max_bytes;
 +      unsigned long   ubuf;
 +      unsigned int    len;
  };
  
  struct ublk_io_iter {
@@@ -481,17 -488,18 +481,17 @@@ static inline unsigned ublk_copy_io_pag
        return done;
  }
  
 -static inline int ublk_copy_user_pages(struct ublk_map_data *data,
 -              bool to_vm)
 +static int ublk_copy_user_pages(struct ublk_map_data *data, bool to_vm)
  {
        const unsigned int gup_flags = to_vm ? FOLL_WRITE : 0;
 -      const unsigned long start_vm = data->io->addr;
 +      const unsigned long start_vm = data->ubuf;
        unsigned int done = 0;
        struct ublk_io_iter iter = {
                .pg_off = start_vm & (PAGE_SIZE - 1),
                .bio    = data->rq->bio,
                .iter   = data->rq->bio->bi_iter,
        };
 -      const unsigned int nr_pages = round_up(data->max_bytes +
 +      const unsigned int nr_pages = round_up(data->len +
                        (start_vm & (PAGE_SIZE - 1)), PAGE_SIZE) >> PAGE_SHIFT;
  
        while (done < nr_pages) {
                                iter.pages);
                if (iter.nr_pages <= 0)
                        return done == 0 ? iter.nr_pages : done;
 -              len = ublk_copy_io_pages(&iter, data->max_bytes, to_vm);
 +              len = ublk_copy_io_pages(&iter, data->len, to_vm);
                for (i = 0; i < iter.nr_pages; i++) {
                        if (to_vm)
                                set_page_dirty(iter.pages[i]);
                        put_page(iter.pages[i]);
                }
 -              data->max_bytes -= len;
 +              data->len -= len;
                done += iter.nr_pages;
        }
  
        return done;
  }
  
 +static inline bool ublk_need_map_req(const struct request *req)
 +{
 +      return ublk_rq_has_data(req) && req_op(req) == REQ_OP_WRITE;
 +}
 +
 +static inline bool ublk_need_unmap_req(const struct request *req)
 +{
 +      return ublk_rq_has_data(req) && req_op(req) == REQ_OP_READ;
 +}
 +
  static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
                struct ublk_io *io)
  {
        const unsigned int rq_bytes = blk_rq_bytes(req);
 +
        /*
         * no zero copy, we delay copy WRITE request data into ublksrv
         * context and the big benefit is that pinning pages in current
         * context is pretty fast, see ublk_pin_user_pages
         */
 -      if (req_op(req) != REQ_OP_WRITE && req_op(req) != REQ_OP_FLUSH)
 -              return rq_bytes;
 -
 -      if (ublk_rq_has_data(req)) {
 +      if (ublk_need_map_req(req)) {
                struct ublk_map_data data = {
 -                      .ubq    =       ubq,
                        .rq     =       req,
 -                      .io     =       io,
 -                      .max_bytes =    rq_bytes,
 +                      .ubuf   =       io->addr,
 +                      .len    =       rq_bytes,
                };
  
                ublk_copy_user_pages(&data, true);
  
 -              return rq_bytes - data.max_bytes;
 +              return rq_bytes - data.len;
        }
        return rq_bytes;
  }
@@@ -557,18 -558,19 +557,18 @@@ static int ublk_unmap_io(const struct u
  {
        const unsigned int rq_bytes = blk_rq_bytes(req);
  
 -      if (req_op(req) == REQ_OP_READ && ublk_rq_has_data(req)) {
 +      if (ublk_need_unmap_req(req)) {
                struct ublk_map_data data = {
 -                      .ubq    =       ubq,
                        .rq     =       req,
 -                      .io     =       io,
 -                      .max_bytes =    io->res,
 +                      .ubuf   =       io->addr,
 +                      .len    =       io->res,
                };
  
                WARN_ON_ONCE(io->res > rq_bytes);
  
                ublk_copy_user_pages(&data, false);
  
 -              return io->res - data.max_bytes;
 +              return io->res - data.len;
        }
        return rq_bytes;
  }
@@@ -653,15 -655,14 +653,15 @@@ static void ublk_complete_rq(struct req
        struct ublk_queue *ubq = req->mq_hctx->driver_data;
        struct ublk_io *io = &ubq->ios[req->tag];
        unsigned int unmapped_bytes;
 +      blk_status_t res = BLK_STS_OK;
  
        /* failed read IO if nothing is read */
        if (!io->res && req_op(req) == REQ_OP_READ)
                io->res = -EIO;
  
        if (io->res < 0) {
 -              blk_mq_end_request(req, errno_to_blk_status(io->res));
 -              return;
 +              res = errno_to_blk_status(io->res);
 +              goto exit;
        }
  
        /*
         *
         * Both the two needn't unmap.
         */
 -      if (req_op(req) != REQ_OP_READ && req_op(req) != REQ_OP_WRITE) {
 -              blk_mq_end_request(req, BLK_STS_OK);
 -              return;
 -      }
 +      if (req_op(req) != REQ_OP_READ && req_op(req) != REQ_OP_WRITE)
 +              goto exit;
  
        /* for READ request, writing data in iod->addr to rq buffers */
        unmapped_bytes = ublk_unmap_io(ubq, req, io);
                blk_mq_requeue_request(req, true);
        else
                __blk_mq_end_request(req, BLK_STS_OK);
 +
 +      return;
 +exit:
 +      blk_mq_end_request(req, res);
  }
  
  /*
@@@ -772,7 -771,9 +772,7 @@@ static inline void __ublk_rq_task_work(
                return;
        }
  
 -      if (ublk_need_get_data(ubq) &&
 -                      (req_op(req) == REQ_OP_WRITE ||
 -                      req_op(req) == REQ_OP_FLUSH)) {
 +      if (ublk_need_get_data(ubq) && ublk_need_map_req(req)) {
                /*
                 * We have not handled UBLK_IO_NEED_GET_DATA command yet,
                 * so immepdately pass UBLK_IO_RES_NEED_GET_DATA to ublksrv
@@@ -899,22 -900,6 +899,22 @@@ static void ublk_queue_cmd(struct ublk_
        }
  }
  
 +static enum blk_eh_timer_return ublk_timeout(struct request *rq)
 +{
 +      struct ublk_queue *ubq = rq->mq_hctx->driver_data;
 +
 +      if (ubq->flags & UBLK_F_UNPRIVILEGED_DEV) {
 +              if (!ubq->timeout) {
 +                      send_sig(SIGKILL, ubq->ubq_daemon, 0);
 +                      ubq->timeout = true;
 +              }
 +
 +              return BLK_EH_DONE;
 +      }
 +
 +      return BLK_EH_RESET_TIMER;
 +}
 +
  static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
                const struct blk_mq_queue_data *bd)
  {
@@@ -974,7 -959,6 +974,7 @@@ static const struct blk_mq_ops ublk_mq_
        .queue_rq       = ublk_queue_rq,
        .init_hctx      = ublk_init_hctx,
        .init_request   = ublk_init_rq,
 +      .timeout        = ublk_timeout,
  };
  
  static int ublk_ch_open(struct inode *inode, struct file *filp)
@@@ -1035,7 -1019,7 +1035,7 @@@ static int ublk_ch_mmap(struct file *fi
  }
  
  static void ublk_commit_completion(struct ublk_device *ub,
-               struct ublksrv_io_cmd *ub_cmd)
+               const struct ublksrv_io_cmd *ub_cmd)
  {
        u32 qid = ub_cmd->q_id, tag = ub_cmd->tag;
        struct ublk_queue *ubq = ublk_get_queue(ub, qid);
@@@ -1277,23 -1261,9 +1277,23 @@@ static void ublk_handle_need_get_data(s
        ublk_queue_cmd(ubq, req);
  }
  
 -static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
 +static inline int ublk_check_cmd_op(u32 cmd_op)
 +{
 +      u32 ioc_type = _IOC_TYPE(cmd_op);
 +
 +      if (IS_ENABLED(CONFIG_BLKDEV_UBLK_LEGACY_OPCODES) && ioc_type != 'u')
 +              return -EOPNOTSUPP;
 +
 +      if (ioc_type != 'u' && ioc_type != 0)
 +              return -EOPNOTSUPP;
 +
 +      return 0;
 +}
 +
 +static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
 +                             unsigned int issue_flags,
-                              struct ublksrv_io_cmd *ub_cmd)
++                             const struct ublksrv_io_cmd *ub_cmd)
  {
 -      const struct ublksrv_io_cmd *ub_cmd = io_uring_sqe_cmd(cmd->sqe);
        struct ublk_device *ub = cmd->file->private_data;
        struct ublk_queue *ubq;
        struct ublk_io *io;
         * iff the driver have set the UBLK_IO_FLAG_NEED_GET_DATA.
         */
        if ((!!(io->flags & UBLK_IO_FLAG_NEED_GET_DATA))
 -                      ^ (cmd_op == UBLK_IO_NEED_GET_DATA))
 +                      ^ (_IOC_NR(cmd_op) == UBLK_IO_NEED_GET_DATA))
 +              goto out;
 +
 +      ret = ublk_check_cmd_op(cmd_op);
 +      if (ret)
                goto out;
  
 -      switch (cmd_op) {
 +      ret = -EINVAL;
 +      switch (_IOC_NR(cmd_op)) {
        case UBLK_IO_FETCH_REQ:
                /* UBLK_IO_FETCH_REQ is only allowed before queue is setup */
                if (ublk_queue_ready(ubq)) {
        return -EIOCBQUEUED;
  }
  
-       struct ublksrv_io_cmd *ub_src = (struct ublksrv_io_cmd *) cmd->cmd;
-       struct ublksrv_io_cmd ub_cmd;
 +static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
 +{
-       ub_cmd.q_id = READ_ONCE(ub_src->q_id);
-       ub_cmd.tag = READ_ONCE(ub_src->tag);
-       ub_cmd.result = READ_ONCE(ub_src->result);
-       ub_cmd.addr = READ_ONCE(ub_src->addr);
 +      /*
 +       * Not necessary for async retry, but let's keep it simple and always
 +       * copy the values to avoid any potential reuse.
 +       */
++      const struct ublksrv_io_cmd *ub_src = io_uring_sqe_cmd(cmd->sqe);
++      const struct ublksrv_io_cmd ub_cmd = {
++              .q_id = READ_ONCE(ub_src->q_id),
++              .tag = READ_ONCE(ub_src->tag),
++              .result = READ_ONCE(ub_src->result),
++              .addr = READ_ONCE(ub_src->addr)
++      };
 +
 +      return __ublk_ch_uring_cmd(cmd, issue_flags, &ub_cmd);
 +}
 +
  static const struct file_operations ublk_ch_fops = {
        .owner = THIS_MODULE,
        .open = ublk_ch_open,
@@@ -1619,7 -1567,7 +1619,7 @@@ static struct ublk_device *ublk_get_dev
  
  static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
  {
-       struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+       const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
        int ublksrv_pid = (int)header->data[0];
        struct gendisk *disk;
        int ret = -EINVAL;
@@@ -1682,7 -1630,7 +1682,7 @@@ out_unlock
  static int ublk_ctrl_get_queue_affinity(struct ublk_device *ub,
                struct io_uring_cmd *cmd)
  {
-       struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+       const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
        void __user *argp = (void __user *)(unsigned long)header->addr;
        cpumask_var_t cpumask;
        unsigned long queue;
@@@ -1733,7 -1681,7 +1733,7 @@@ static inline void ublk_dump_dev_info(s
  
  static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
  {
-       struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+       const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
        void __user *argp = (void __user *)(unsigned long)header->addr;
        struct ublksrv_ctrl_dev_info info;
        struct ublk_device *ub;
        else if (!(info.flags & UBLK_F_UNPRIVILEGED_DEV))
                return -EPERM;
  
 +      /*
 +       * unprivileged device can't be trusted, but RECOVERY and
 +       * RECOVERY_REISSUE still may hang error handling, so can't
 +       * support recovery features for unprivileged ublk now
 +       *
 +       * TODO: provide forward progress for RECOVERY handler, so that
 +       * unprivileged device can benefit from it
 +       */
 +      if (info.flags & UBLK_F_UNPRIVILEGED_DEV)
 +              info.flags &= ~(UBLK_F_USER_RECOVERY_REISSUE |
 +                              UBLK_F_USER_RECOVERY);
 +
        /* the created device is always owned by current user */
        ublk_store_owner_uid_gid(&info.owner_uid, &info.owner_gid);
  
        if (!IS_BUILTIN(CONFIG_BLK_DEV_UBLK))
                ub->dev_info.flags |= UBLK_F_URING_CMD_COMP_IN_TASK;
  
 +      ub->dev_info.flags |= UBLK_F_CMD_IOCTL_ENCODE;
 +
        /* We are not ready to support zero copy */
        ub->dev_info.flags &= ~UBLK_F_SUPPORT_ZERO_COPY;
  
@@@ -1910,7 -1844,7 +1910,7 @@@ static int ublk_ctrl_del_dev(struct ubl
  
  static inline void ublk_ctrl_cmd_dump(struct io_uring_cmd *cmd)
  {
-       struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+       const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
  
        pr_devel("%s: cmd_op %x, dev id %d qid %d data %llx buf %llx len %u\n",
                        __func__, cmd->cmd_op, header->dev_id, header->queue_id,
@@@ -1929,7 -1863,7 +1929,7 @@@ static int ublk_ctrl_stop_dev(struct ub
  static int ublk_ctrl_get_dev_info(struct ublk_device *ub,
                struct io_uring_cmd *cmd)
  {
-       struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+       const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
        void __user *argp = (void __user *)(unsigned long)header->addr;
  
        if (header->len < sizeof(struct ublksrv_ctrl_dev_info) || !header->addr)
@@@ -1960,7 -1894,7 +1960,7 @@@ static void ublk_ctrl_fill_params_devt(
  static int ublk_ctrl_get_params(struct ublk_device *ub,
                struct io_uring_cmd *cmd)
  {
-       struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+       const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
        void __user *argp = (void __user *)(unsigned long)header->addr;
        struct ublk_params_header ph;
        int ret;
  static int ublk_ctrl_set_params(struct ublk_device *ub,
                struct io_uring_cmd *cmd)
  {
-       struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+       const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
        void __user *argp = (void __user *)(unsigned long)header->addr;
        struct ublk_params_header ph;
        int ret = -EFAULT;
                /* clear all we don't support yet */
                ub->params.types &= UBLK_PARAM_TYPE_ALL;
                ret = ublk_validate_params(ub);
 +              if (ret)
 +                      ub->params.types = 0;
        }
        mutex_unlock(&ub->mutex);
  
@@@ -2037,7 -1969,6 +2037,7 @@@ static void ublk_queue_reinit(struct ub
        put_task_struct(ubq->ubq_daemon);
        /* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */
        ubq->ubq_daemon = NULL;
 +      ubq->timeout = false;
  
        for (i = 0; i < ubq->q_depth; i++) {
                struct ublk_io *io = &ubq->ios[i];
  static int ublk_ctrl_start_recovery(struct ublk_device *ub,
                struct io_uring_cmd *cmd)
  {
-       struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+       const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
        int ret = -EINVAL;
        int i;
  
  static int ublk_ctrl_end_recovery(struct ublk_device *ub,
                struct io_uring_cmd *cmd)
  {
-       struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+       const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
        int ublksrv_pid = (int)header->data[0];
        int ret = -EINVAL;
  
@@@ -2161,7 -2092,7 +2161,7 @@@ exit
  static int ublk_ctrl_uring_cmd_permission(struct ublk_device *ub,
                struct io_uring_cmd *cmd)
  {
-       struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+       struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)io_uring_sqe_cmd(cmd->sqe);
        bool unprivileged = ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV;
        void __user *argp = (void __user *)(unsigned long)header->addr;
        char *dev_path = NULL;
                 * know if the specified device is created as unprivileged
                 * mode.
                 */
 -              if (cmd->cmd_op != UBLK_CMD_GET_DEV_INFO2)
 +              if (_IOC_NR(cmd->cmd_op) != UBLK_CMD_GET_DEV_INFO2)
                        return 0;
        }
  
        dev_path[header->dev_path_len] = 0;
  
        ret = -EINVAL;
 -      switch (cmd->cmd_op) {
 +      switch (_IOC_NR(cmd->cmd_op)) {
        case UBLK_CMD_GET_DEV_INFO:
        case UBLK_CMD_GET_DEV_INFO2:
        case UBLK_CMD_GET_QUEUE_AFFINITY:
@@@ -2240,9 -2171,8 +2240,9 @@@ exit
  static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
                unsigned int issue_flags)
  {
-       struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+       const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
        struct ublk_device *ub = NULL;
 +      u32 cmd_op = cmd->cmd_op;
        int ret = -EINVAL;
  
        if (issue_flags & IO_URING_F_NONBLOCK)
        if (!(issue_flags & IO_URING_F_SQE128))
                goto out;
  
 -      if (cmd->cmd_op != UBLK_CMD_ADD_DEV) {
 +      ret = ublk_check_cmd_op(cmd_op);
 +      if (ret)
 +              goto out;
 +
 +      if (_IOC_NR(cmd_op) != UBLK_CMD_ADD_DEV) {
                ret = -ENODEV;
                ub = ublk_get_device_from_id(header->dev_id);
                if (!ub)
                        goto out;
  
                ret = ublk_ctrl_uring_cmd_permission(ub, cmd);
 -      } else {
 -              /* ADD_DEV permission check is done in command handler */
 -              ret = 0;
 +              if (ret)
 +                      goto put_dev;
        }
  
 -      if (ret)
 -              goto put_dev;
 -
 -      switch (cmd->cmd_op) {
 +      switch (_IOC_NR(cmd_op)) {
        case UBLK_CMD_START_DEV:
                ret = ublk_ctrl_start_dev(ub, cmd);
                break;
@@@ -2342,7 -2272,7 +2342,7 @@@ static int __init ublk_init(void
        if (ret)
                goto unregister_mis;
  
 -      ublk_chr_class = class_create(THIS_MODULE, "ublk-char");
 +      ublk_chr_class = class_create("ublk-char");
        if (IS_ERR(ublk_chr_class)) {
                ret = PTR_ERR(ublk_chr_class);
                goto free_chrdev_region;
diff --combined io_uring/rsrc.c
@@@ -760,7 -760,8 +760,7 @@@ int __io_scm_file_account(struct io_rin
  
                UNIXCB(skb).fp = fpl;
                skb->sk = sk;
 -              skb->scm_io_uring = 1;
 -              skb->destructor = unix_destruct_scm;
 +              skb->destructor = io_uring_destruct_scm;
                refcount_add(skb->truesize, &sk->sk_wmem_alloc);
        }
  
@@@ -1116,7 -1117,12 +1116,12 @@@ static int io_sqe_buffer_register(struc
        if (nr_pages > 1) {
                folio = page_folio(pages[0]);
                for (i = 1; i < nr_pages; i++) {
-                       if (page_folio(pages[i]) != folio) {
+                       /*
+                        * Pages must be consecutive and on the same folio for
+                        * this to work
+                        */
+                       if (page_folio(pages[i]) != folio ||
+                           pages[i] != pages[i - 1] + 1) {
                                folio = NULL;
                                break;
                        }