return false;
}
+static inline int io_fixup_rw_res(struct io_kiocb *req, unsigned res)
+{
+ struct io_async_rw *io = req->async_data;
+
+ /* add previously done IO, if any */
+ if (io && io->bytes_done > 0) {
+ if (res < 0)
+ res = io->bytes_done;
+ else
+ res += io->bytes_done;
+ }
+ return res;
+}
+
static void io_req_task_complete(struct io_kiocb *req, bool *locked)
{
unsigned int cflags = io_put_rw_kbuf(req);
{
if (__io_complete_rw_common(req, res))
return;
- __io_req_complete(req, issue_flags, req->result, io_put_rw_kbuf(req));
+ __io_req_complete(req, issue_flags, io_fixup_rw_res(req, res), io_put_rw_kbuf(req));
}
static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
if (__io_complete_rw_common(req, res))
return;
- req->result = res;
+ req->result = io_fixup_rw_res(req, res);
req->io_task_work.func = io_req_task_complete;
io_req_task_work_add(req);
}
unsigned int issue_flags)
{
struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
- struct io_async_rw *io = req->async_data;
-
- /* add previously done IO, if any */
- if (io && io->bytes_done > 0) {
- if (ret < 0)
- ret = io->bytes_done;
- else
- ret += io->bytes_done;
- }
if (req->flags & REQ_F_CUR_POS)
req->file->f_pos = kiocb->ki_pos;
unsigned int cflags = io_put_rw_kbuf(req);
struct io_ring_ctx *ctx = req->ctx;
+ ret = io_fixup_rw_res(req, ret);
req_set_fail(req);
if (!(issue_flags & IO_URING_F_NONBLOCK)) {
mutex_lock(&ctx->uring_lock);
return -EAGAIN;
}
+ req->result = iov_iter_count(iter);
/*
* Now retry read with the IOCB_WAITQ parts set in the iocb. If
* we get -EIOCBQUEUED, then we'll get a notification when the
async_msg = req->async_data;
req->flags |= REQ_F_NEED_CLEANUP;
memcpy(async_msg, kmsg, sizeof(*kmsg));
- async_msg->msg.msg_name = &async_msg->addr;
+ if (async_msg->msg.msg_name)
+ async_msg->msg.msg_name = &async_msg->addr;
/* if were using fast_iov, set it to the new one */
if (!async_msg->free_iov)
async_msg->msg.msg_iter.iov = async_msg->fast_iov;
};
#define IO_POLL_CANCEL_FLAG BIT(31)
-#define IO_POLL_REF_MASK ((1u << 20)-1)
+#define IO_POLL_REF_MASK GENMASK(30, 0)
/*
* If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can
static inline void io_poll_remove_entry(struct io_poll_iocb *poll)
{
- struct wait_queue_head *head = poll->head;
+ struct wait_queue_head *head = smp_load_acquire(&poll->head);
- spin_lock_irq(&head->lock);
- list_del_init(&poll->wait.entry);
- poll->head = NULL;
- spin_unlock_irq(&head->lock);
+ if (head) {
+ spin_lock_irq(&head->lock);
+ list_del_init(&poll->wait.entry);
+ poll->head = NULL;
+ spin_unlock_irq(&head->lock);
+ }
}
static void io_poll_remove_entries(struct io_kiocb *req)
struct io_poll_iocb *poll = io_poll_get_single(req);
struct io_poll_iocb *poll_double = io_poll_get_double(req);
- if (poll->head)
- io_poll_remove_entry(poll);
- if (poll_double && poll_double->head)
+ /*
+ * While we hold the waitqueue lock and the waitqueue is nonempty,
+ * wake_up_pollfree() will wait for us. However, taking the waitqueue
+ * lock in the first place can race with the waitqueue being freed.
+ *
+ * We solve this as eventpoll does: by taking advantage of the fact that
+ * all users of wake_up_pollfree() will RCU-delay the actual free. If
+ * we enter rcu_read_lock() and see that the pointer to the queue is
+ * non-NULL, we can then lock it without the memory being freed out from
+ * under us.
+ *
+ * Keep holding rcu_read_lock() as long as we hold the queue lock, in
+ * case the caller deletes the entry from the queue, leaving it empty.
+ * In that case, only RCU prevents the queue memory from being freed.
+ */
+ rcu_read_lock();
+ io_poll_remove_entry(poll);
+ if (poll_double)
io_poll_remove_entry(poll_double);
+ rcu_read_unlock();
}
/*
wait);
__poll_t mask = key_to_poll(key);
+ if (unlikely(mask & POLLFREE)) {
+ io_poll_mark_cancelled(req);
+ /* we have to kick tw in case it's not already */
+ io_poll_execute(req, 0);
+
+ /*
+ * If the waitqueue is being freed early but someone is already
+ * holds ownership over it, we have to tear down the request as
+ * best we can. That means immediately removing the request from
+ * its waitqueue and preventing all further accesses to the
+ * waitqueue via the request.
+ */
+ list_del_init(&poll->wait.entry);
+
+ /*
+ * Careful: this *must* be the last step, since as soon
+ * as req->head is NULL'ed out, the request can be
+ * completed and freed, since aio_poll_complete_work()
+ * will no longer need to take the waitqueue lock.
+ */
+ smp_store_release(&poll->head, NULL);
+ return 1;
+ }
+
/* for instances that support it check for an event match first */
if (mask && !(mask & poll->events))
return 0;
if (mask) {
/* can't multishot if failed, just queue the event we've got */
- if (unlikely(ipt->error || !ipt->nr_entries))
+ if (unlikely(ipt->error || !ipt->nr_entries)) {
poll->events |= EPOLLONESHOT;
+ ipt->error = 0;
+ }
__io_poll_execute(req, mask);
return 0;
}
ipt.pt._qproc = io_poll_queue_proc;
ret = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events);
+ if (!ret && ipt.error)
+ req_set_fail(req);
ret = ret ?: ipt.error;
if (ret)
__io_req_complete(req, issue_flags, ret, 0);
}
skb->sk = sk;
+ skb->scm_io_uring = 1;
nr_files = 0;
fpl->user = get_uid(current_user());
{
io_sq_thread_finish(ctx);
- if (ctx->mm_account) {
- mmdrop(ctx->mm_account);
- ctx->mm_account = NULL;
- }
-
/* __io_rsrc_put_work() may need uring_lock to progress, wait w/o it */
io_wait_rsrc_data(ctx->buf_data);
io_wait_rsrc_data(ctx->file_data);
#endif
WARN_ON_ONCE(!list_empty(&ctx->ltimeout_list));
+ if (ctx->mm_account) {
+ mmdrop(ctx->mm_account);
+ ctx->mm_account = NULL;
+ }
+
io_mem_free(ctx->rings);
io_mem_free(ctx->sq_sqes);