1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
5 #include <linux/file.h>
7 #include <linux/slab.h>
8 #include <linux/poll.h>
9 #include <linux/hashtable.h>
10 #include <linux/io_uring.h>
12 #include <trace/events/io_uring.h>
14 #include <uapi/linux/io_uring.h>
23 struct io_poll_update {
29 bool update_user_data;
32 struct io_poll_table {
33 struct poll_table_struct pt;
39 #define IO_POLL_CANCEL_FLAG BIT(31)
40 #define IO_POLL_REF_MASK GENMASK(30, 0)
43 * If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can
44 * bump it and acquire ownership. It's disallowed to modify requests while not
45 * owning it, that prevents from races for enqueueing task_work's and b/w
46 * arming poll and wakeups.
48 static inline bool io_poll_get_ownership(struct io_kiocb *req)
50 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
53 static void io_poll_mark_cancelled(struct io_kiocb *req)
55 atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs);
58 static struct io_poll *io_poll_get_double(struct io_kiocb *req)
60 /* pure poll stashes this in ->async_data, poll driven retry elsewhere */
61 if (req->opcode == IORING_OP_POLL_ADD)
62 return req->async_data;
63 return req->apoll->double_poll;
66 static struct io_poll *io_poll_get_single(struct io_kiocb *req)
68 if (req->opcode == IORING_OP_POLL_ADD)
69 return io_kiocb_to_cmd(req);
70 return &req->apoll->poll;
73 static void io_poll_req_insert(struct io_kiocb *req)
75 struct io_hash_table *table = &req->ctx->cancel_table;
76 u32 index = hash_long(req->cqe.user_data, table->hash_bits);
77 struct io_hash_bucket *hb = &table->hbs[index];
80 hlist_add_head(&req->hash_node, &hb->list);
81 spin_unlock(&hb->lock);
84 static void io_poll_req_delete(struct io_kiocb *req, struct io_ring_ctx *ctx)
86 struct io_hash_table *table = &req->ctx->cancel_table;
87 u32 index = hash_long(req->cqe.user_data, table->hash_bits);
88 spinlock_t *lock = &table->hbs[index].lock;
91 hash_del(&req->hash_node);
95 static void io_poll_req_insert_locked(struct io_kiocb *req)
97 struct io_hash_table *table = &req->ctx->cancel_table_locked;
98 u32 index = hash_long(req->cqe.user_data, table->hash_bits);
100 hlist_add_head(&req->hash_node, &table->hbs[index].list);
103 static void io_poll_tw_hash_eject(struct io_kiocb *req, bool *locked)
105 struct io_ring_ctx *ctx = req->ctx;
107 if (req->flags & REQ_F_HASH_LOCKED) {
109 * ->cancel_table_locked is protected by ->uring_lock in
110 * contrast to per bucket spinlocks. Likely, tctx_task_work()
111 * already grabbed the mutex for us, but there is a chance it
114 io_tw_lock(ctx, locked);
115 hash_del(&req->hash_node);
117 io_poll_req_delete(req, ctx);
121 static void io_init_poll_iocb(struct io_poll *poll, __poll_t events,
122 wait_queue_func_t wake_func)
125 #define IO_POLL_UNMASK (EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
126 /* mask in events that we always want/need */
127 poll->events = events | IO_POLL_UNMASK;
128 INIT_LIST_HEAD(&poll->wait.entry);
129 init_waitqueue_func_entry(&poll->wait, wake_func);
132 static inline void io_poll_remove_entry(struct io_poll *poll)
134 struct wait_queue_head *head = smp_load_acquire(&poll->head);
137 spin_lock_irq(&head->lock);
138 list_del_init(&poll->wait.entry);
140 spin_unlock_irq(&head->lock);
144 static void io_poll_remove_entries(struct io_kiocb *req)
147 * Nothing to do if neither of those flags are set. Avoid dipping
148 * into the poll/apoll/double cachelines if we can.
150 if (!(req->flags & (REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL)))
154 * While we hold the waitqueue lock and the waitqueue is nonempty,
155 * wake_up_pollfree() will wait for us. However, taking the waitqueue
156 * lock in the first place can race with the waitqueue being freed.
158 * We solve this as eventpoll does: by taking advantage of the fact that
159 * all users of wake_up_pollfree() will RCU-delay the actual free. If
160 * we enter rcu_read_lock() and see that the pointer to the queue is
161 * non-NULL, we can then lock it without the memory being freed out from
164 * Keep holding rcu_read_lock() as long as we hold the queue lock, in
165 * case the caller deletes the entry from the queue, leaving it empty.
166 * In that case, only RCU prevents the queue memory from being freed.
169 if (req->flags & REQ_F_SINGLE_POLL)
170 io_poll_remove_entry(io_poll_get_single(req));
171 if (req->flags & REQ_F_DOUBLE_POLL)
172 io_poll_remove_entry(io_poll_get_double(req));
177 * All poll tw should go through this. Checks for poll events, manages
178 * references, does rewait, etc.
180 * Returns a negative error on failure. >0 when no action require, which is
181 * either spurious wakeup or multishot CQE is served. 0 when it's done with
182 * the request, then the mask is stored in req->cqe.res.
184 static int io_poll_check_events(struct io_kiocb *req, bool *locked)
186 struct io_ring_ctx *ctx = req->ctx;
189 /* req->task == current here, checking PF_EXITING is safe */
190 if (unlikely(req->task->flags & PF_EXITING))
194 v = atomic_read(&req->poll_refs);
196 /* tw handler should be the owner, and so have some references */
197 if (WARN_ON_ONCE(!(v & IO_POLL_REF_MASK)))
199 if (v & IO_POLL_CANCEL_FLAG)
203 struct poll_table_struct pt = { ._key = req->apoll_events };
204 req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events;
207 if ((unlikely(!req->cqe.res)))
209 if (req->apoll_events & EPOLLONESHOT)
212 /* multishot, just fill a CQE and proceed */
213 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
214 __poll_t mask = mangle_poll(req->cqe.res &
217 if (!io_post_aux_cqe(ctx, req->cqe.user_data,
218 mask, IORING_CQE_F_MORE))
221 ret = io_poll_issue(req, locked);
227 * Release all references, retry if someone tried to restart
228 * task_work while we were executing it.
230 } while (atomic_sub_return(v & IO_POLL_REF_MASK, &req->poll_refs));
235 static void io_poll_task_func(struct io_kiocb *req, bool *locked)
239 ret = io_poll_check_events(req, locked);
244 struct io_poll *poll = io_kiocb_to_cmd(req);
246 req->cqe.res = mangle_poll(req->cqe.res & poll->events);
252 io_poll_remove_entries(req);
253 io_poll_tw_hash_eject(req, locked);
255 io_req_set_res(req, req->cqe.res, 0);
256 io_req_task_complete(req, locked);
259 static void io_apoll_task_func(struct io_kiocb *req, bool *locked)
263 ret = io_poll_check_events(req, locked);
267 io_poll_remove_entries(req);
268 io_poll_tw_hash_eject(req, locked);
271 io_req_task_submit(req, locked);
273 io_req_complete_failed(req, ret);
276 static void __io_poll_execute(struct io_kiocb *req, int mask,
277 __poll_t __maybe_unused events)
279 io_req_set_res(req, mask, 0);
281 * This is useful for poll that is armed on behalf of another
282 * request, and where the wakeup path could be on a different
283 * CPU. We want to avoid pulling in req->apoll->events for that
286 if (req->opcode == IORING_OP_POLL_ADD)
287 req->io_task_work.func = io_poll_task_func;
289 req->io_task_work.func = io_apoll_task_func;
291 trace_io_uring_task_add(req->ctx, req, req->cqe.user_data, req->opcode, mask);
292 io_req_task_work_add(req);
295 static inline void io_poll_execute(struct io_kiocb *req, int res,
298 if (io_poll_get_ownership(req))
299 __io_poll_execute(req, res, events);
302 static void io_poll_cancel_req(struct io_kiocb *req)
304 io_poll_mark_cancelled(req);
305 /* kick tw, which should complete the request */
306 io_poll_execute(req, 0, 0);
309 #define wqe_to_req(wait) ((void *)((unsigned long) (wait)->private & ~1))
310 #define wqe_is_double(wait) ((unsigned long) (wait)->private & 1)
311 #define IO_ASYNC_POLL_COMMON (EPOLLONESHOT | EPOLLPRI)
313 static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
316 struct io_kiocb *req = wqe_to_req(wait);
317 struct io_poll *poll = container_of(wait, struct io_poll, wait);
318 __poll_t mask = key_to_poll(key);
320 if (unlikely(mask & POLLFREE)) {
321 io_poll_mark_cancelled(req);
322 /* we have to kick tw in case it's not already */
323 io_poll_execute(req, 0, poll->events);
326 * If the waitqueue is being freed early but someone is already
327 * holds ownership over it, we have to tear down the request as
328 * best we can. That means immediately removing the request from
329 * its waitqueue and preventing all further accesses to the
330 * waitqueue via the request.
332 list_del_init(&poll->wait.entry);
335 * Careful: this *must* be the last step, since as soon
336 * as req->head is NULL'ed out, the request can be
337 * completed and freed, since aio_poll_complete_work()
338 * will no longer need to take the waitqueue lock.
340 smp_store_release(&poll->head, NULL);
344 /* for instances that support it check for an event match first */
345 if (mask && !(mask & (poll->events & ~IO_ASYNC_POLL_COMMON)))
348 if (io_poll_get_ownership(req)) {
349 /* optional, saves extra locking for removal in tw handler */
350 if (mask && poll->events & EPOLLONESHOT) {
351 list_del_init(&poll->wait.entry);
353 if (wqe_is_double(wait))
354 req->flags &= ~REQ_F_DOUBLE_POLL;
356 req->flags &= ~REQ_F_SINGLE_POLL;
358 __io_poll_execute(req, mask, poll->events);
363 static void __io_queue_proc(struct io_poll *poll, struct io_poll_table *pt,
364 struct wait_queue_head *head,
365 struct io_poll **poll_ptr)
367 struct io_kiocb *req = pt->req;
368 unsigned long wqe_private = (unsigned long) req;
371 * The file being polled uses multiple waitqueues for poll handling
372 * (e.g. one for read, one for write). Setup a separate io_poll
375 if (unlikely(pt->nr_entries)) {
376 struct io_poll *first = poll;
378 /* double add on the same waitqueue head, ignore */
379 if (first->head == head)
381 /* already have a 2nd entry, fail a third attempt */
383 if ((*poll_ptr)->head == head)
389 poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
394 /* mark as double wq entry */
396 req->flags |= REQ_F_DOUBLE_POLL;
397 io_init_poll_iocb(poll, first->events, first->wait.func);
399 if (req->opcode == IORING_OP_POLL_ADD)
400 req->flags |= REQ_F_ASYNC_DATA;
403 req->flags |= REQ_F_SINGLE_POLL;
406 poll->wait.private = (void *) wqe_private;
408 if (poll->events & EPOLLEXCLUSIVE)
409 add_wait_queue_exclusive(head, &poll->wait);
411 add_wait_queue(head, &poll->wait);
414 static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
415 struct poll_table_struct *p)
417 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
418 struct io_poll *poll = io_kiocb_to_cmd(pt->req);
420 __io_queue_proc(poll, pt, head,
421 (struct io_poll **) &pt->req->async_data);
424 static int __io_arm_poll_handler(struct io_kiocb *req,
425 struct io_poll *poll,
426 struct io_poll_table *ipt, __poll_t mask)
428 struct io_ring_ctx *ctx = req->ctx;
431 INIT_HLIST_NODE(&req->hash_node);
432 req->work.cancel_seq = atomic_read(&ctx->cancel_seq);
433 io_init_poll_iocb(poll, mask, io_poll_wake);
434 poll->file = req->file;
436 req->apoll_events = poll->events;
444 * Take the ownership to delay any tw execution up until we're done
445 * with poll arming. see io_poll_get_ownership().
447 atomic_set(&req->poll_refs, 1);
448 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
451 ((poll->events & (EPOLLET|EPOLLONESHOT)) == (EPOLLET|EPOLLONESHOT))) {
452 io_poll_remove_entries(req);
453 /* no one else has access to the req, forget about the ref */
457 if (!mask && unlikely(ipt->error || !ipt->nr_entries)) {
458 io_poll_remove_entries(req);
460 ipt->error = -EINVAL;
464 if (req->flags & REQ_F_HASH_LOCKED)
465 io_poll_req_insert_locked(req);
467 io_poll_req_insert(req);
469 if (mask && (poll->events & EPOLLET)) {
470 /* can't multishot if failed, just queue the event we've got */
471 if (unlikely(ipt->error || !ipt->nr_entries)) {
472 poll->events |= EPOLLONESHOT;
473 req->apoll_events |= EPOLLONESHOT;
476 __io_poll_execute(req, mask, poll->events);
481 * Release ownership. If someone tried to queue a tw while it was
482 * locked, kick it off for them.
484 v = atomic_dec_return(&req->poll_refs);
485 if (unlikely(v & IO_POLL_REF_MASK))
486 __io_poll_execute(req, 0, poll->events);
490 static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
491 struct poll_table_struct *p)
493 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
494 struct async_poll *apoll = pt->req->apoll;
496 __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
499 int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
501 const struct io_op_def *def = &io_op_defs[req->opcode];
502 struct io_ring_ctx *ctx = req->ctx;
503 struct async_poll *apoll;
504 struct io_poll_table ipt;
505 __poll_t mask = POLLPRI | POLLERR | EPOLLET;
509 * apoll requests already grab the mutex to complete in the tw handler,
510 * so removal from the mutex-backed hash is free, use it by default.
512 if (issue_flags & IO_URING_F_UNLOCKED)
513 req->flags &= ~REQ_F_HASH_LOCKED;
515 req->flags |= REQ_F_HASH_LOCKED;
517 if (!def->pollin && !def->pollout)
518 return IO_APOLL_ABORTED;
519 if (!file_can_poll(req->file))
520 return IO_APOLL_ABORTED;
521 if ((req->flags & (REQ_F_POLLED|REQ_F_PARTIAL_IO)) == REQ_F_POLLED)
522 return IO_APOLL_ABORTED;
523 if (!(req->flags & REQ_F_APOLL_MULTISHOT))
524 mask |= EPOLLONESHOT;
527 mask |= EPOLLIN | EPOLLRDNORM;
529 /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
530 if (req->flags & REQ_F_CLEAR_POLLIN)
533 mask |= EPOLLOUT | EPOLLWRNORM;
535 if (def->poll_exclusive)
536 mask |= EPOLLEXCLUSIVE;
537 if (req->flags & REQ_F_POLLED) {
539 kfree(apoll->double_poll);
540 } else if (!(issue_flags & IO_URING_F_UNLOCKED) &&
541 !list_empty(&ctx->apoll_cache)) {
542 apoll = list_first_entry(&ctx->apoll_cache, struct async_poll,
544 list_del_init(&apoll->poll.wait.entry);
546 apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
547 if (unlikely(!apoll))
548 return IO_APOLL_ABORTED;
550 apoll->double_poll = NULL;
552 req->flags |= REQ_F_POLLED;
553 ipt.pt._qproc = io_async_queue_proc;
555 io_kbuf_recycle(req, issue_flags);
557 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask);
558 if (ret || ipt.error)
559 return ret ? IO_APOLL_READY : IO_APOLL_ABORTED;
561 trace_io_uring_poll_arm(ctx, req, req->cqe.user_data, req->opcode,
562 mask, apoll->poll.events);
566 static __cold bool io_poll_remove_all_table(struct task_struct *tsk,
567 struct io_hash_table *table,
570 unsigned nr_buckets = 1U << table->hash_bits;
571 struct hlist_node *tmp;
572 struct io_kiocb *req;
576 for (i = 0; i < nr_buckets; i++) {
577 struct io_hash_bucket *hb = &table->hbs[i];
579 spin_lock(&hb->lock);
580 hlist_for_each_entry_safe(req, tmp, &hb->list, hash_node) {
581 if (io_match_task_safe(req, tsk, cancel_all)) {
582 hlist_del_init(&req->hash_node);
583 io_poll_cancel_req(req);
587 spin_unlock(&hb->lock);
593 * Returns true if we found and killed one or more poll requests
595 __cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
597 __must_hold(&ctx->uring_lock)
599 return io_poll_remove_all_table(tsk, &ctx->cancel_table, cancel_all) |
600 io_poll_remove_all_table(tsk, &ctx->cancel_table_locked, cancel_all);
603 static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
604 struct io_cancel_data *cd,
605 struct io_hash_table *table,
606 struct io_hash_bucket **out_bucket)
608 struct io_kiocb *req;
609 u32 index = hash_long(cd->data, table->hash_bits);
610 struct io_hash_bucket *hb = &table->hbs[index];
614 spin_lock(&hb->lock);
615 hlist_for_each_entry(req, &hb->list, hash_node) {
616 if (cd->data != req->cqe.user_data)
618 if (poll_only && req->opcode != IORING_OP_POLL_ADD)
620 if (cd->flags & IORING_ASYNC_CANCEL_ALL) {
621 if (cd->seq == req->work.cancel_seq)
623 req->work.cancel_seq = cd->seq;
628 spin_unlock(&hb->lock);
632 static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx,
633 struct io_cancel_data *cd,
634 struct io_hash_table *table,
635 struct io_hash_bucket **out_bucket)
637 unsigned nr_buckets = 1U << table->hash_bits;
638 struct io_kiocb *req;
643 for (i = 0; i < nr_buckets; i++) {
644 struct io_hash_bucket *hb = &table->hbs[i];
646 spin_lock(&hb->lock);
647 hlist_for_each_entry(req, &hb->list, hash_node) {
648 if (!(cd->flags & IORING_ASYNC_CANCEL_ANY) &&
649 req->file != cd->file)
651 if (cd->seq == req->work.cancel_seq)
653 req->work.cancel_seq = cd->seq;
657 spin_unlock(&hb->lock);
662 static int io_poll_disarm(struct io_kiocb *req)
666 if (!io_poll_get_ownership(req))
668 io_poll_remove_entries(req);
669 hash_del(&req->hash_node);
673 static int __io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
674 struct io_hash_table *table)
676 struct io_hash_bucket *bucket;
677 struct io_kiocb *req;
679 if (cd->flags & (IORING_ASYNC_CANCEL_FD|IORING_ASYNC_CANCEL_ANY))
680 req = io_poll_file_find(ctx, cd, table, &bucket);
682 req = io_poll_find(ctx, false, cd, table, &bucket);
685 io_poll_cancel_req(req);
687 spin_unlock(&bucket->lock);
688 return req ? 0 : -ENOENT;
691 int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
692 unsigned issue_flags)
696 ret = __io_poll_cancel(ctx, cd, &ctx->cancel_table);
700 io_ring_submit_lock(ctx, issue_flags);
701 ret = __io_poll_cancel(ctx, cd, &ctx->cancel_table_locked);
702 io_ring_submit_unlock(ctx, issue_flags);
706 static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
711 events = READ_ONCE(sqe->poll32_events);
713 events = swahw32(events);
715 if (!(flags & IORING_POLL_ADD_MULTI))
716 events |= EPOLLONESHOT;
717 if (!(flags & IORING_POLL_ADD_LEVEL))
719 return demangle_poll(events) |
720 (events & (EPOLLEXCLUSIVE|EPOLLONESHOT|EPOLLET));
723 int io_poll_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
725 struct io_poll_update *upd = io_kiocb_to_cmd(req);
728 if (sqe->buf_index || sqe->splice_fd_in)
730 flags = READ_ONCE(sqe->len);
731 if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA |
732 IORING_POLL_ADD_MULTI))
734 /* meaningless without update */
735 if (flags == IORING_POLL_ADD_MULTI)
738 upd->old_user_data = READ_ONCE(sqe->addr);
739 upd->update_events = flags & IORING_POLL_UPDATE_EVENTS;
740 upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA;
742 upd->new_user_data = READ_ONCE(sqe->off);
743 if (!upd->update_user_data && upd->new_user_data)
745 if (upd->update_events)
746 upd->events = io_poll_parse_events(sqe, flags);
747 else if (sqe->poll32_events)
753 int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
755 struct io_poll *poll = io_kiocb_to_cmd(req);
758 if (sqe->buf_index || sqe->off || sqe->addr)
760 flags = READ_ONCE(sqe->len);
761 if (flags & ~(IORING_POLL_ADD_MULTI|IORING_POLL_ADD_LEVEL))
763 if ((flags & IORING_POLL_ADD_MULTI) && (req->flags & REQ_F_CQE_SKIP))
766 poll->events = io_poll_parse_events(sqe, flags);
770 int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
772 struct io_poll *poll = io_kiocb_to_cmd(req);
773 struct io_poll_table ipt;
776 ipt.pt._qproc = io_poll_queue_proc;
779 * If sqpoll or single issuer, there is no contention for ->uring_lock
780 * and we'll end up holding it in tw handlers anyway.
782 if (!(issue_flags & IO_URING_F_UNLOCKED) &&
783 (req->ctx->flags & (IORING_SETUP_SQPOLL | IORING_SETUP_SINGLE_ISSUER)))
784 req->flags |= REQ_F_HASH_LOCKED;
786 req->flags &= ~REQ_F_HASH_LOCKED;
788 ret = __io_arm_poll_handler(req, poll, &ipt, poll->events);
790 io_req_set_res(req, ret, 0);
798 return IOU_ISSUE_SKIP_COMPLETE;
801 int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
803 struct io_poll_update *poll_update = io_kiocb_to_cmd(req);
804 struct io_cancel_data cd = { .data = poll_update->old_user_data, };
805 struct io_ring_ctx *ctx = req->ctx;
806 struct io_hash_bucket *bucket;
807 struct io_kiocb *preq;
811 preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table, &bucket);
812 ret2 = io_poll_disarm(preq);
814 spin_unlock(&bucket->lock);
817 if (ret2 != -ENOENT) {
822 io_ring_submit_lock(ctx, issue_flags);
823 preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table_locked, &bucket);
824 ret2 = io_poll_disarm(preq);
826 spin_unlock(&bucket->lock);
827 io_ring_submit_unlock(ctx, issue_flags);
834 if (poll_update->update_events || poll_update->update_user_data) {
835 /* only mask one event flags, keep behavior flags */
836 if (poll_update->update_events) {
837 struct io_poll *poll = io_kiocb_to_cmd(preq);
839 poll->events &= ~0xffff;
840 poll->events |= poll_update->events & 0xffff;
841 poll->events |= IO_POLL_UNMASK;
843 if (poll_update->update_user_data)
844 preq->cqe.user_data = poll_update->new_user_data;
846 ret2 = io_poll_add(preq, issue_flags);
847 /* successfully updated, don't complete poll request */
848 if (!ret2 || ret2 == -EIOCBQUEUED)
853 io_req_set_res(preq, -ECANCELED, 0);
854 locked = !(issue_flags & IO_URING_F_UNLOCKED);
855 io_req_task_complete(preq, &locked);
861 /* complete update request, we're done with it */
862 io_req_set_res(req, ret, 0);