drm/vc4: hdmi: Fix hotplug extcon uevent to works
[platform/kernel/linux-rpi.git] / fs / eventpoll.c
index 648ed77..cf326c5 100644 (file)
@@ -1686,8 +1686,8 @@ static int ep_send_events(struct eventpoll *ep,
                if (!revents)
                        continue;
 
-               if (__put_user(revents, &events->events) ||
-                   __put_user(epi->event.data, &events->data)) {
+               events = epoll_put_uevent(revents, epi->event.data, events);
+               if (!events) {
                        list_add(&epi->rdllink, &txlist);
                        ep_pm_stay_awake(epi);
                        if (!res)
@@ -1695,7 +1695,6 @@ static int ep_send_events(struct eventpoll *ep,
                        break;
                }
                res++;
-               events++;
                if (epi->event.events & EPOLLONESHOT)
                        epi->event.events &= EP_PRIVATE_BITS;
                else if (!(epi->event.events & EPOLLET)) {
@@ -1741,6 +1740,21 @@ static struct timespec64 *ep_timeout_to_timespec(struct timespec64 *to, long ms)
        return to;
 }
 
+/*
+ * autoremove_wake_function, but remove even on failure to wake up, because we
+ * know that default_wake_function/ttwu will only fail if the thread is already
+ * woken, and in that case the ep_poll loop will remove the entry anyways, not
+ * try to reuse it.
+ */
+static int ep_autoremove_wake_function(struct wait_queue_entry *wq_entry,
+                                      unsigned int mode, int sync, void *key)
+{
+       int ret = default_wake_function(wq_entry, mode, sync, key);
+
+       list_del_init(&wq_entry->entry);
+       return ret;
+}
+
 /**
  * ep_poll - Retrieves ready events, and delivers them to the caller-supplied
  *           event buffer.
@@ -1822,8 +1836,15 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
                 * normal wakeup path no need to call __remove_wait_queue()
                 * explicitly, thus ep->lock is not taken, which halts the
                 * event delivery.
+                *
+                * In fact, we now use an even more aggressive function that
+                * unconditionally removes, because we don't reuse the wait
+                * entry between loop iterations. This lets us also avoid the
+                * performance issue if a process is killed, causing all of its
+                * threads to wake up without being removed normally.
                 */
                init_wait(&wait);
+               wait.func = ep_autoremove_wake_function;
 
                write_lock_irq(&ep->lock);
                /*