return res;
}
-static void io_req_rw_complete(struct io_kiocb *req, bool *locked)
+static void io_req_rw_complete(struct io_kiocb *req, struct io_tw_state *ts)
{
io_req_io_end(req);
if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) {
- unsigned issue_flags = *locked ? 0 : IO_URING_F_UNLOCKED;
+ unsigned issue_flags = ts->locked ? 0 : IO_URING_F_UNLOCKED;
req->cqe.flags |= io_put_kbuf(req, issue_flags);
}
- io_req_task_complete(req, locked);
+ io_req_task_complete(req, ts);
}
static void io_complete_rw(struct kiocb *kiocb, long res)
return;
io_req_set_res(req, io_fixup_rw_res(req, res), 0);
req->io_task_work.func = io_req_rw_complete;
- io_req_task_work_add(req);
+ __io_req_task_work_add(req, IOU_F_TWQ_LAZY_WAKE);
}
static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
rw->len = sqe_len;
}
- ret = import_single_range(ddir, buf, sqe_len, s->fast_iov, iter);
+ ret = import_ubuf(ddir, buf, sqe_len, iter);
if (ret)
return ERR_PTR(ret);
return NULL;
unsigned int issue_flags)
{
*iovec = __io_import_iovec(rw, req, s, issue_flags);
- if (unlikely(IS_ERR(*iovec)))
+ if (IS_ERR(*iovec))
return PTR_ERR(*iovec);
iov_iter_save_state(&s->iter, &s->iter_state);
struct iovec iovec;
ssize_t nr;
- if (!iov_iter_is_bvec(iter)) {
+ if (iter_is_ubuf(iter)) {
+ iovec.iov_base = iter->ubuf + iter->iov_offset;
+ iovec.iov_len = iov_iter_count(iter);
+ } else if (!iov_iter_is_bvec(iter)) {
iovec = iov_iter_iovec(iter);
} else {
iovec.iov_base = u64_to_user_ptr(rw->addr);
io->free_iovec = iovec;
io->bytes_done = 0;
/* can only be fixed buffers, no need to do anything */
- if (iov_iter_is_bvec(iter))
+ if (iov_iter_is_bvec(iter) || iter_is_ubuf(iter))
return;
if (!iovec) {
unsigned iov_off = 0;
static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
struct io_rw_state *s, bool force)
{
- if (!force && !io_op_defs[req->opcode].prep_async)
+ if (!force && !io_cold_defs[req->opcode].prep_async)
return 0;
if (!req_has_async_data(req)) {
struct io_async_rw *iorw;