1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
5 #include <linux/file.h>
7 #include <linux/slab.h>
8 #include <linux/poll.h>
9 #include <linux/hashtable.h>
10 #include <linux/io_uring.h>
12 #include <trace/events/io_uring.h>
14 #include <uapi/linux/io_uring.h>
23 struct io_poll_update {
29 bool update_user_data;
32 struct io_poll_table {
33 struct poll_table_struct pt;
38 /* output value, set only if arm poll returns >0 */
42 #define IO_POLL_CANCEL_FLAG BIT(31)
43 #define IO_POLL_REF_MASK GENMASK(30, 0)
45 #define IO_WQE_F_DOUBLE 1
47 static inline struct io_kiocb *wqe_to_req(struct wait_queue_entry *wqe)
49 unsigned long priv = (unsigned long)wqe->private;
51 return (struct io_kiocb *)(priv & ~IO_WQE_F_DOUBLE);
54 static inline bool wqe_is_double(struct wait_queue_entry *wqe)
56 unsigned long priv = (unsigned long)wqe->private;
58 return priv & IO_WQE_F_DOUBLE;
62 * If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can
63 * bump it and acquire ownership. It's disallowed to modify requests while not
64 * owning it, that prevents from races for enqueueing task_work's and b/w
65 * arming poll and wakeups.
67 static inline bool io_poll_get_ownership(struct io_kiocb *req)
69 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
72 static void io_poll_mark_cancelled(struct io_kiocb *req)
74 atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs);
77 static struct io_poll *io_poll_get_double(struct io_kiocb *req)
79 /* pure poll stashes this in ->async_data, poll driven retry elsewhere */
80 if (req->opcode == IORING_OP_POLL_ADD)
81 return req->async_data;
82 return req->apoll->double_poll;
85 static struct io_poll *io_poll_get_single(struct io_kiocb *req)
87 if (req->opcode == IORING_OP_POLL_ADD)
88 return io_kiocb_to_cmd(req, struct io_poll);
89 return &req->apoll->poll;
92 static void io_poll_req_insert(struct io_kiocb *req)
94 struct io_hash_table *table = &req->ctx->cancel_table;
95 u32 index = hash_long(req->cqe.user_data, table->hash_bits);
96 struct io_hash_bucket *hb = &table->hbs[index];
99 hlist_add_head(&req->hash_node, &hb->list);
100 spin_unlock(&hb->lock);
103 static void io_poll_req_delete(struct io_kiocb *req, struct io_ring_ctx *ctx)
105 struct io_hash_table *table = &req->ctx->cancel_table;
106 u32 index = hash_long(req->cqe.user_data, table->hash_bits);
107 spinlock_t *lock = &table->hbs[index].lock;
110 hash_del(&req->hash_node);
114 static void io_poll_req_insert_locked(struct io_kiocb *req)
116 struct io_hash_table *table = &req->ctx->cancel_table_locked;
117 u32 index = hash_long(req->cqe.user_data, table->hash_bits);
119 lockdep_assert_held(&req->ctx->uring_lock);
121 hlist_add_head(&req->hash_node, &table->hbs[index].list);
124 static void io_poll_tw_hash_eject(struct io_kiocb *req, bool *locked)
126 struct io_ring_ctx *ctx = req->ctx;
128 if (req->flags & REQ_F_HASH_LOCKED) {
130 * ->cancel_table_locked is protected by ->uring_lock in
131 * contrast to per bucket spinlocks. Likely, tctx_task_work()
132 * already grabbed the mutex for us, but there is a chance it
135 io_tw_lock(ctx, locked);
136 hash_del(&req->hash_node);
137 req->flags &= ~REQ_F_HASH_LOCKED;
139 io_poll_req_delete(req, ctx);
143 static void io_init_poll_iocb(struct io_poll *poll, __poll_t events,
144 wait_queue_func_t wake_func)
147 #define IO_POLL_UNMASK (EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
148 /* mask in events that we always want/need */
149 poll->events = events | IO_POLL_UNMASK;
150 INIT_LIST_HEAD(&poll->wait.entry);
151 init_waitqueue_func_entry(&poll->wait, wake_func);
154 static inline void io_poll_remove_entry(struct io_poll *poll)
156 struct wait_queue_head *head = smp_load_acquire(&poll->head);
159 spin_lock_irq(&head->lock);
160 list_del_init(&poll->wait.entry);
162 spin_unlock_irq(&head->lock);
166 static void io_poll_remove_entries(struct io_kiocb *req)
169 * Nothing to do if neither of those flags are set. Avoid dipping
170 * into the poll/apoll/double cachelines if we can.
172 if (!(req->flags & (REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL)))
176 * While we hold the waitqueue lock and the waitqueue is nonempty,
177 * wake_up_pollfree() will wait for us. However, taking the waitqueue
178 * lock in the first place can race with the waitqueue being freed.
180 * We solve this as eventpoll does: by taking advantage of the fact that
181 * all users of wake_up_pollfree() will RCU-delay the actual free. If
182 * we enter rcu_read_lock() and see that the pointer to the queue is
183 * non-NULL, we can then lock it without the memory being freed out from
186 * Keep holding rcu_read_lock() as long as we hold the queue lock, in
187 * case the caller deletes the entry from the queue, leaving it empty.
188 * In that case, only RCU prevents the queue memory from being freed.
191 if (req->flags & REQ_F_SINGLE_POLL)
192 io_poll_remove_entry(io_poll_get_single(req));
193 if (req->flags & REQ_F_DOUBLE_POLL)
194 io_poll_remove_entry(io_poll_get_double(req));
200 IOU_POLL_NO_ACTION = 1,
201 IOU_POLL_REMOVE_POLL_USE_RES = 2,
205 * All poll tw should go through this. Checks for poll events, manages
206 * references, does rewait, etc.
208 * Returns a negative error on failure. IOU_POLL_NO_ACTION when no action require,
209 * which is either spurious wakeup or multishot CQE is served.
210 * IOU_POLL_DONE when it's done with the request, then the mask is stored in req->cqe.res.
211 * IOU_POLL_REMOVE_POLL_USE_RES indicates to remove multishot poll and that the result
212 * is stored in req->cqe.
214 static int io_poll_check_events(struct io_kiocb *req, bool *locked)
216 struct io_ring_ctx *ctx = req->ctx;
219 /* req->task == current here, checking PF_EXITING is safe */
220 if (unlikely(req->task->flags & PF_EXITING))
224 v = atomic_read(&req->poll_refs);
226 /* tw handler should be the owner, and so have some references */
227 if (WARN_ON_ONCE(!(v & IO_POLL_REF_MASK)))
228 return IOU_POLL_DONE;
229 if (v & IO_POLL_CANCEL_FLAG)
232 /* the mask was stashed in __io_poll_execute */
234 struct poll_table_struct pt = { ._key = req->apoll_events };
235 req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events;
238 if ((unlikely(!req->cqe.res)))
240 if (req->apoll_events & EPOLLONESHOT)
241 return IOU_POLL_DONE;
243 /* multishot, just fill a CQE and proceed */
244 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
245 __poll_t mask = mangle_poll(req->cqe.res &
248 if (!io_post_aux_cqe(ctx, req->cqe.user_data,
249 mask, IORING_CQE_F_MORE, false)) {
250 io_req_set_res(req, mask, 0);
251 return IOU_POLL_REMOVE_POLL_USE_RES;
254 ret = io_poll_issue(req, locked);
255 if (ret == IOU_STOP_MULTISHOT)
256 return IOU_POLL_REMOVE_POLL_USE_RES;
262 * Release all references, retry if someone tried to restart
263 * task_work while we were executing it.
265 } while (atomic_sub_return(v & IO_POLL_REF_MASK, &req->poll_refs));
267 return IOU_POLL_NO_ACTION;
270 static void io_poll_task_func(struct io_kiocb *req, bool *locked)
274 ret = io_poll_check_events(req, locked);
275 if (ret == IOU_POLL_NO_ACTION)
278 if (ret == IOU_POLL_DONE) {
279 struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
280 req->cqe.res = mangle_poll(req->cqe.res & poll->events);
281 } else if (ret != IOU_POLL_REMOVE_POLL_USE_RES) {
286 io_poll_remove_entries(req);
287 io_poll_tw_hash_eject(req, locked);
289 io_req_set_res(req, req->cqe.res, 0);
290 io_req_task_complete(req, locked);
293 static void io_apoll_task_func(struct io_kiocb *req, bool *locked)
297 ret = io_poll_check_events(req, locked);
298 if (ret == IOU_POLL_NO_ACTION)
301 io_poll_remove_entries(req);
302 io_poll_tw_hash_eject(req, locked);
304 if (ret == IOU_POLL_REMOVE_POLL_USE_RES)
305 io_req_complete_post(req);
306 else if (ret == IOU_POLL_DONE)
307 io_req_task_submit(req, locked);
309 io_req_complete_failed(req, ret);
312 static void __io_poll_execute(struct io_kiocb *req, int mask)
314 io_req_set_res(req, mask, 0);
316 * This is useful for poll that is armed on behalf of another
317 * request, and where the wakeup path could be on a different
318 * CPU. We want to avoid pulling in req->apoll->events for that
321 if (req->opcode == IORING_OP_POLL_ADD)
322 req->io_task_work.func = io_poll_task_func;
324 req->io_task_work.func = io_apoll_task_func;
326 trace_io_uring_task_add(req, mask);
327 io_req_task_work_add(req);
330 static inline void io_poll_execute(struct io_kiocb *req, int res)
332 if (io_poll_get_ownership(req))
333 __io_poll_execute(req, res);
336 static void io_poll_cancel_req(struct io_kiocb *req)
338 io_poll_mark_cancelled(req);
339 /* kick tw, which should complete the request */
340 io_poll_execute(req, 0);
343 #define IO_ASYNC_POLL_COMMON (EPOLLONESHOT | EPOLLPRI)
345 static __cold int io_pollfree_wake(struct io_kiocb *req, struct io_poll *poll)
347 io_poll_mark_cancelled(req);
348 /* we have to kick tw in case it's not already */
349 io_poll_execute(req, 0);
352 * If the waitqueue is being freed early but someone is already
353 * holds ownership over it, we have to tear down the request as
354 * best we can. That means immediately removing the request from
355 * its waitqueue and preventing all further accesses to the
356 * waitqueue via the request.
358 list_del_init(&poll->wait.entry);
361 * Careful: this *must* be the last step, since as soon
362 * as req->head is NULL'ed out, the request can be
363 * completed and freed, since aio_poll_complete_work()
364 * will no longer need to take the waitqueue lock.
366 smp_store_release(&poll->head, NULL);
370 static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
373 struct io_kiocb *req = wqe_to_req(wait);
374 struct io_poll *poll = container_of(wait, struct io_poll, wait);
375 __poll_t mask = key_to_poll(key);
377 if (unlikely(mask & POLLFREE))
378 return io_pollfree_wake(req, poll);
380 /* for instances that support it check for an event match first */
381 if (mask && !(mask & (poll->events & ~IO_ASYNC_POLL_COMMON)))
384 if (io_poll_get_ownership(req)) {
385 /* optional, saves extra locking for removal in tw handler */
386 if (mask && poll->events & EPOLLONESHOT) {
387 list_del_init(&poll->wait.entry);
389 if (wqe_is_double(wait))
390 req->flags &= ~REQ_F_DOUBLE_POLL;
392 req->flags &= ~REQ_F_SINGLE_POLL;
394 __io_poll_execute(req, mask);
399 /* fails only when polling is already completing by the first entry */
400 static bool io_poll_double_prepare(struct io_kiocb *req)
402 struct wait_queue_head *head;
403 struct io_poll *poll = io_poll_get_single(req);
405 /* head is RCU protected, see io_poll_remove_entries() comments */
407 head = smp_load_acquire(&poll->head);
409 * poll arm might not hold ownership and so race for req->flags with
410 * io_poll_wake(). There is only one poll entry queued, serialise with
411 * it by taking its head lock. As we're still arming the tw hanlder
412 * is not going to be run, so there are no races with it.
415 spin_lock_irq(&head->lock);
416 req->flags |= REQ_F_DOUBLE_POLL;
417 if (req->opcode == IORING_OP_POLL_ADD)
418 req->flags |= REQ_F_ASYNC_DATA;
419 spin_unlock_irq(&head->lock);
425 static void __io_queue_proc(struct io_poll *poll, struct io_poll_table *pt,
426 struct wait_queue_head *head,
427 struct io_poll **poll_ptr)
429 struct io_kiocb *req = pt->req;
430 unsigned long wqe_private = (unsigned long) req;
433 * The file being polled uses multiple waitqueues for poll handling
434 * (e.g. one for read, one for write). Setup a separate io_poll
437 if (unlikely(pt->nr_entries)) {
438 struct io_poll *first = poll;
440 /* double add on the same waitqueue head, ignore */
441 if (first->head == head)
443 /* already have a 2nd entry, fail a third attempt */
445 if ((*poll_ptr)->head == head)
451 poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
457 /* mark as double wq entry */
458 wqe_private |= IO_WQE_F_DOUBLE;
459 io_init_poll_iocb(poll, first->events, first->wait.func);
460 if (!io_poll_double_prepare(req)) {
461 /* the request is completing, just back off */
467 /* fine to modify, there is no poll queued to race with us */
468 req->flags |= REQ_F_SINGLE_POLL;
473 poll->wait.private = (void *) wqe_private;
475 if (poll->events & EPOLLEXCLUSIVE)
476 add_wait_queue_exclusive(head, &poll->wait);
478 add_wait_queue(head, &poll->wait);
481 static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
482 struct poll_table_struct *p)
484 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
485 struct io_poll *poll = io_kiocb_to_cmd(pt->req, struct io_poll);
487 __io_queue_proc(poll, pt, head,
488 (struct io_poll **) &pt->req->async_data);
491 static bool io_poll_can_finish_inline(struct io_kiocb *req,
492 struct io_poll_table *pt)
494 return pt->owning || io_poll_get_ownership(req);
498 * Returns 0 when it's handed over for polling. The caller owns the requests if
499 * it returns non-zero, but otherwise should not touch it. Negative values
500 * contain an error code. When the result is >0, the polling has completed
501 * inline and ipt.result_mask is set to the mask.
503 static int __io_arm_poll_handler(struct io_kiocb *req,
504 struct io_poll *poll,
505 struct io_poll_table *ipt, __poll_t mask,
506 unsigned issue_flags)
508 struct io_ring_ctx *ctx = req->ctx;
511 INIT_HLIST_NODE(&req->hash_node);
512 req->work.cancel_seq = atomic_read(&ctx->cancel_seq);
513 io_init_poll_iocb(poll, mask, io_poll_wake);
514 poll->file = req->file;
515 req->apoll_events = poll->events;
522 * Polling is either completed here or via task_work, so if we're in the
523 * task context we're naturally serialised with tw by merit of running
524 * the same task. When it's io-wq, take the ownership to prevent tw
525 * from running. However, when we're in the task context, skip taking
526 * it as an optimisation.
528 * Note: even though the request won't be completed/freed, without
529 * ownership we still can race with io_poll_wake().
530 * io_poll_can_finish_inline() tries to deal with that.
532 ipt->owning = issue_flags & IO_URING_F_UNLOCKED;
533 atomic_set(&req->poll_refs, (int)ipt->owning);
535 /* io-wq doesn't hold uring_lock */
536 if (issue_flags & IO_URING_F_UNLOCKED)
537 req->flags &= ~REQ_F_HASH_LOCKED;
539 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
541 if (unlikely(ipt->error || !ipt->nr_entries)) {
542 io_poll_remove_entries(req);
544 if (!io_poll_can_finish_inline(req, ipt)) {
545 io_poll_mark_cancelled(req);
547 } else if (mask && (poll->events & EPOLLET)) {
548 ipt->result_mask = mask;
551 return ipt->error ?: -EINVAL;
555 ((poll->events & (EPOLLET|EPOLLONESHOT)) == (EPOLLET|EPOLLONESHOT))) {
556 if (!io_poll_can_finish_inline(req, ipt))
558 io_poll_remove_entries(req);
559 ipt->result_mask = mask;
560 /* no one else has access to the req, forget about the ref */
564 if (req->flags & REQ_F_HASH_LOCKED)
565 io_poll_req_insert_locked(req);
567 io_poll_req_insert(req);
569 if (mask && (poll->events & EPOLLET) &&
570 io_poll_can_finish_inline(req, ipt)) {
571 __io_poll_execute(req, mask);
577 * Release ownership. If someone tried to queue a tw while it was
578 * locked, kick it off for them.
580 v = atomic_dec_return(&req->poll_refs);
581 if (unlikely(v & IO_POLL_REF_MASK))
582 __io_poll_execute(req, 0);
587 static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
588 struct poll_table_struct *p)
590 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
591 struct async_poll *apoll = pt->req->apoll;
593 __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
596 static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req,
597 unsigned issue_flags)
599 struct io_ring_ctx *ctx = req->ctx;
600 struct io_cache_entry *entry;
601 struct async_poll *apoll;
603 if (req->flags & REQ_F_POLLED) {
605 kfree(apoll->double_poll);
606 } else if (!(issue_flags & IO_URING_F_UNLOCKED) &&
607 (entry = io_alloc_cache_get(&ctx->apoll_cache)) != NULL) {
608 apoll = container_of(entry, struct async_poll, cache);
610 apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
611 if (unlikely(!apoll))
614 apoll->double_poll = NULL;
619 int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
621 const struct io_op_def *def = &io_op_defs[req->opcode];
622 struct async_poll *apoll;
623 struct io_poll_table ipt;
624 __poll_t mask = POLLPRI | POLLERR | EPOLLET;
628 * apoll requests already grab the mutex to complete in the tw handler,
629 * so removal from the mutex-backed hash is free, use it by default.
631 req->flags |= REQ_F_HASH_LOCKED;
633 if (!def->pollin && !def->pollout)
634 return IO_APOLL_ABORTED;
635 if (!file_can_poll(req->file))
636 return IO_APOLL_ABORTED;
637 if ((req->flags & (REQ_F_POLLED|REQ_F_PARTIAL_IO)) == REQ_F_POLLED)
638 return IO_APOLL_ABORTED;
639 if (!(req->flags & REQ_F_APOLL_MULTISHOT))
640 mask |= EPOLLONESHOT;
643 mask |= EPOLLIN | EPOLLRDNORM;
645 /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
646 if (req->flags & REQ_F_CLEAR_POLLIN)
649 mask |= EPOLLOUT | EPOLLWRNORM;
651 if (def->poll_exclusive)
652 mask |= EPOLLEXCLUSIVE;
654 apoll = io_req_alloc_apoll(req, issue_flags);
656 return IO_APOLL_ABORTED;
657 req->flags |= REQ_F_POLLED;
658 ipt.pt._qproc = io_async_queue_proc;
660 io_kbuf_recycle(req, issue_flags);
662 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, issue_flags);
664 return ret > 0 ? IO_APOLL_READY : IO_APOLL_ABORTED;
665 trace_io_uring_poll_arm(req, mask, apoll->poll.events);
669 static __cold bool io_poll_remove_all_table(struct task_struct *tsk,
670 struct io_hash_table *table,
673 unsigned nr_buckets = 1U << table->hash_bits;
674 struct hlist_node *tmp;
675 struct io_kiocb *req;
679 for (i = 0; i < nr_buckets; i++) {
680 struct io_hash_bucket *hb = &table->hbs[i];
682 spin_lock(&hb->lock);
683 hlist_for_each_entry_safe(req, tmp, &hb->list, hash_node) {
684 if (io_match_task_safe(req, tsk, cancel_all)) {
685 hlist_del_init(&req->hash_node);
686 io_poll_cancel_req(req);
690 spin_unlock(&hb->lock);
696 * Returns true if we found and killed one or more poll requests
698 __cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
700 __must_hold(&ctx->uring_lock)
704 ret = io_poll_remove_all_table(tsk, &ctx->cancel_table, cancel_all);
705 ret |= io_poll_remove_all_table(tsk, &ctx->cancel_table_locked, cancel_all);
709 static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
710 struct io_cancel_data *cd,
711 struct io_hash_table *table,
712 struct io_hash_bucket **out_bucket)
714 struct io_kiocb *req;
715 u32 index = hash_long(cd->data, table->hash_bits);
716 struct io_hash_bucket *hb = &table->hbs[index];
720 spin_lock(&hb->lock);
721 hlist_for_each_entry(req, &hb->list, hash_node) {
722 if (cd->data != req->cqe.user_data)
724 if (poll_only && req->opcode != IORING_OP_POLL_ADD)
726 if (cd->flags & IORING_ASYNC_CANCEL_ALL) {
727 if (cd->seq == req->work.cancel_seq)
729 req->work.cancel_seq = cd->seq;
734 spin_unlock(&hb->lock);
738 static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx,
739 struct io_cancel_data *cd,
740 struct io_hash_table *table,
741 struct io_hash_bucket **out_bucket)
743 unsigned nr_buckets = 1U << table->hash_bits;
744 struct io_kiocb *req;
749 for (i = 0; i < nr_buckets; i++) {
750 struct io_hash_bucket *hb = &table->hbs[i];
752 spin_lock(&hb->lock);
753 hlist_for_each_entry(req, &hb->list, hash_node) {
754 if (!(cd->flags & IORING_ASYNC_CANCEL_ANY) &&
755 req->file != cd->file)
757 if (cd->seq == req->work.cancel_seq)
759 req->work.cancel_seq = cd->seq;
763 spin_unlock(&hb->lock);
768 static int io_poll_disarm(struct io_kiocb *req)
772 if (!io_poll_get_ownership(req))
774 io_poll_remove_entries(req);
775 hash_del(&req->hash_node);
779 static int __io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
780 struct io_hash_table *table)
782 struct io_hash_bucket *bucket;
783 struct io_kiocb *req;
785 if (cd->flags & (IORING_ASYNC_CANCEL_FD|IORING_ASYNC_CANCEL_ANY))
786 req = io_poll_file_find(ctx, cd, table, &bucket);
788 req = io_poll_find(ctx, false, cd, table, &bucket);
791 io_poll_cancel_req(req);
793 spin_unlock(&bucket->lock);
794 return req ? 0 : -ENOENT;
797 int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
798 unsigned issue_flags)
802 ret = __io_poll_cancel(ctx, cd, &ctx->cancel_table);
806 io_ring_submit_lock(ctx, issue_flags);
807 ret = __io_poll_cancel(ctx, cd, &ctx->cancel_table_locked);
808 io_ring_submit_unlock(ctx, issue_flags);
812 static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
817 events = READ_ONCE(sqe->poll32_events);
819 events = swahw32(events);
821 if (!(flags & IORING_POLL_ADD_MULTI))
822 events |= EPOLLONESHOT;
823 if (!(flags & IORING_POLL_ADD_LEVEL))
825 return demangle_poll(events) |
826 (events & (EPOLLEXCLUSIVE|EPOLLONESHOT|EPOLLET));
829 int io_poll_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
831 struct io_poll_update *upd = io_kiocb_to_cmd(req, struct io_poll_update);
834 if (sqe->buf_index || sqe->splice_fd_in)
836 flags = READ_ONCE(sqe->len);
837 if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA |
838 IORING_POLL_ADD_MULTI))
840 /* meaningless without update */
841 if (flags == IORING_POLL_ADD_MULTI)
844 upd->old_user_data = READ_ONCE(sqe->addr);
845 upd->update_events = flags & IORING_POLL_UPDATE_EVENTS;
846 upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA;
848 upd->new_user_data = READ_ONCE(sqe->off);
849 if (!upd->update_user_data && upd->new_user_data)
851 if (upd->update_events)
852 upd->events = io_poll_parse_events(sqe, flags);
853 else if (sqe->poll32_events)
859 int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
861 struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
864 if (sqe->buf_index || sqe->off || sqe->addr)
866 flags = READ_ONCE(sqe->len);
867 if (flags & ~IORING_POLL_ADD_MULTI)
869 if ((flags & IORING_POLL_ADD_MULTI) && (req->flags & REQ_F_CQE_SKIP))
872 poll->events = io_poll_parse_events(sqe, flags);
876 int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
878 struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
879 struct io_poll_table ipt;
882 ipt.pt._qproc = io_poll_queue_proc;
885 * If sqpoll or single issuer, there is no contention for ->uring_lock
886 * and we'll end up holding it in tw handlers anyway.
888 if (req->ctx->flags & (IORING_SETUP_SQPOLL|IORING_SETUP_SINGLE_ISSUER))
889 req->flags |= REQ_F_HASH_LOCKED;
891 ret = __io_arm_poll_handler(req, poll, &ipt, poll->events, issue_flags);
893 io_req_set_res(req, ipt.result_mask, 0);
896 return ret ?: IOU_ISSUE_SKIP_COMPLETE;
899 int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
901 struct io_poll_update *poll_update = io_kiocb_to_cmd(req, struct io_poll_update);
902 struct io_cancel_data cd = { .data = poll_update->old_user_data, };
903 struct io_ring_ctx *ctx = req->ctx;
904 struct io_hash_bucket *bucket;
905 struct io_kiocb *preq;
909 preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table, &bucket);
910 ret2 = io_poll_disarm(preq);
912 spin_unlock(&bucket->lock);
915 if (ret2 != -ENOENT) {
920 io_ring_submit_lock(ctx, issue_flags);
921 preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table_locked, &bucket);
922 ret2 = io_poll_disarm(preq);
924 spin_unlock(&bucket->lock);
925 io_ring_submit_unlock(ctx, issue_flags);
932 if (WARN_ON_ONCE(preq->opcode != IORING_OP_POLL_ADD)) {
937 if (poll_update->update_events || poll_update->update_user_data) {
938 /* only mask one event flags, keep behavior flags */
939 if (poll_update->update_events) {
940 struct io_poll *poll = io_kiocb_to_cmd(preq, struct io_poll);
942 poll->events &= ~0xffff;
943 poll->events |= poll_update->events & 0xffff;
944 poll->events |= IO_POLL_UNMASK;
946 if (poll_update->update_user_data)
947 preq->cqe.user_data = poll_update->new_user_data;
949 ret2 = io_poll_add(preq, issue_flags);
950 /* successfully updated, don't complete poll request */
951 if (!ret2 || ret2 == -EIOCBQUEUED)
956 io_req_set_res(preq, -ECANCELED, 0);
957 locked = !(issue_flags & IO_URING_F_UNLOCKED);
958 io_req_task_complete(preq, &locked);
964 /* complete update request, we're done with it */
965 io_req_set_res(req, ret, 0);
969 void io_apoll_cache_free(struct io_cache_entry *entry)
971 kfree(container_of(entry, struct async_poll, cache));