void __user *buf;
};
int msg_flags;
- int bgid;
size_t len;
size_t done_io;
unsigned int flags;
req->rw.addr = READ_ONCE(sqe->addr);
req->rw.len = READ_ONCE(sqe->len);
req->rw.flags = READ_ONCE(sqe->rw_flags);
+ /* used for fixed read/write too - just read unconditionally */
req->buf_index = READ_ONCE(sqe->buf_index);
return 0;
}
}
static void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
- int bgid, unsigned int issue_flags)
+ unsigned int issue_flags)
{
struct io_buffer *kbuf = req->kbuf;
struct io_ring_ctx *ctx = req->ctx;
io_ring_submit_lock(req->ctx, issue_flags);
- bl = io_buffer_get_list(ctx, bgid);
+ bl = io_buffer_get_list(ctx, req->buf_index);
if (bl && !list_empty(&bl->buf_list)) {
kbuf = list_first_entry(&bl->buf_list, struct io_buffer, list);
list_del(&kbuf->list);
return -EINVAL;
len = clen;
- buf = io_buffer_select(req, &len, req->buf_index, issue_flags);
+ buf = io_buffer_select(req, &len, issue_flags);
if (IS_ERR(buf))
return PTR_ERR(buf);
iov[0].iov_base = buf;
len = iov[0].iov_len;
if (len < 0)
return -EINVAL;
- buf = io_buffer_select(req, &len, req->buf_index, issue_flags);
+ buf = io_buffer_select(req, &len, issue_flags);
if (IS_ERR(buf))
return PTR_ERR(buf);
iov[0].iov_base = buf;
if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
if (req->flags & REQ_F_BUFFER_SELECT) {
- buf = io_buffer_select(req, &sqe_len, req->buf_index,
- issue_flags);
+ buf = io_buffer_select(req, &sqe_len, issue_flags);
if (IS_ERR(buf))
return ERR_CAST(buf);
req->rw.len = sqe_len;
sr->flags = READ_ONCE(sqe->addr2);
if (sr->flags & ~IORING_RECVSEND_POLL_FIRST)
return -EINVAL;
- sr->bgid = READ_ONCE(sqe->buf_group);
sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
if (sr->msg_flags & MSG_DONTWAIT)
req->flags |= REQ_F_NOWAIT;
if (req->flags & REQ_F_BUFFER_SELECT) {
void __user *buf;
- buf = io_buffer_select(req, &sr->len, sr->bgid, issue_flags);
+ buf = io_buffer_select(req, &sr->len, issue_flags);
if (IS_ERR(buf))
return PTR_ERR(buf);
kmsg->fast_iov[0].iov_base = buf;
if (req->flags & REQ_F_BUFFER_SELECT) {
void __user *buf;
- buf = io_buffer_select(req, &sr->len, sr->bgid, issue_flags);
+ buf = io_buffer_select(req, &sr->len, issue_flags);
if (IS_ERR(buf))
return PTR_ERR(buf);
sr->buf = buf;
/* enforce forwards compatibility on users */
if (sqe_flags & ~SQE_VALID_FLAGS)
return -EINVAL;
- if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
- !io_op_defs[opcode].buffer_select)
- return -EOPNOTSUPP;
+ if (sqe_flags & IOSQE_BUFFER_SELECT) {
+ if (!io_op_defs[opcode].buffer_select)
+ return -EOPNOTSUPP;
+ req->buf_index = READ_ONCE(sqe->buf_group);
+ }
if (sqe_flags & IOSQE_CQE_SKIP_SUCCESS)
ctx->drain_disabled = true;
if (sqe_flags & IOSQE_IO_DRAIN) {