drm/edid/firmware: Add built-in edid/1280x720.bin firmware
[platform/kernel/linux-starfive.git] / kernel / sched / psi.c
index ee2ecc0..e83c321 100644 (file)
@@ -189,6 +189,7 @@ static void group_init(struct psi_group *group)
        INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work);
        mutex_init(&group->avgs_lock);
        /* Init trigger-related members */
+       atomic_set(&group->poll_scheduled, 0);
        mutex_init(&group->trigger_lock);
        INIT_LIST_HEAD(&group->triggers);
        group->poll_min_period = U32_MAX;
@@ -539,10 +540,12 @@ static u64 update_triggers(struct psi_group *group, u64 now)
 
                        /* Calculate growth since last update */
                        growth = window_update(&t->win, now, total[t->state]);
-                       if (growth < t->threshold)
-                               continue;
+                       if (!t->pending_event) {
+                               if (growth < t->threshold)
+                                       continue;
 
-                       t->pending_event = true;
+                               t->pending_event = true;
+                       }
                }
                /* Limit event signaling to once per window */
                if (now < t->last_event_time + t->win.size)
@@ -563,18 +566,17 @@ static u64 update_triggers(struct psi_group *group, u64 now)
        return now + group->poll_min_period;
 }
 
-/* Schedule polling if it's not already scheduled. */
-static void psi_schedule_poll_work(struct psi_group *group, unsigned long delay)
+/* Schedule polling if it's not already scheduled or forced. */
+static void psi_schedule_poll_work(struct psi_group *group, unsigned long delay,
+                                  bool force)
 {
        struct task_struct *task;
 
        /*
-        * Do not reschedule if already scheduled.
-        * Possible race with a timer scheduled after this check but before
-        * mod_timer below can be tolerated because group->polling_next_update
-        * will keep updates on schedule.
+        * atomic_xchg should be called even when !force to provide a
+        * full memory barrier (see the comment inside psi_poll_work).
         */
-       if (timer_pending(&group->poll_timer))
+       if (atomic_xchg(&group->poll_scheduled, 1) && !force)
                return;
 
        rcu_read_lock();
@@ -586,12 +588,15 @@ static void psi_schedule_poll_work(struct psi_group *group, unsigned long delay)
         */
        if (likely(task))
                mod_timer(&group->poll_timer, jiffies + delay);
+       else
+               atomic_set(&group->poll_scheduled, 0);
 
        rcu_read_unlock();
 }
 
 static void psi_poll_work(struct psi_group *group)
 {
+       bool force_reschedule = false;
        u32 changed_states;
        u64 now;
 
@@ -599,6 +604,43 @@ static void psi_poll_work(struct psi_group *group)
 
        now = sched_clock();
 
+       if (now > group->polling_until) {
+               /*
+                * We are either about to start or might stop polling if no
+                * state change was recorded. Resetting poll_scheduled leaves
+                * a small window for psi_group_change to sneak in and schedule
+                * an immediate poll_work before we get to rescheduling. One
+                * potential extra wakeup at the end of the polling window
+                * should be negligible and polling_next_update still keeps
+                * updates correctly on schedule.
+                */
+               atomic_set(&group->poll_scheduled, 0);
+               /*
+                * A task change can race with the poll worker that is supposed to
+                * report on it. To avoid missing events, ensure ordering between
+                * poll_scheduled and the task state accesses, such that if the poll
+                * worker misses the state update, the task change is guaranteed to
+                * reschedule the poll worker:
+                *
+                * poll worker:
+                *   atomic_set(poll_scheduled, 0)
+                *   smp_mb()
+                *   LOAD states
+                *
+                * task change:
+                *   STORE states
+                *   if atomic_xchg(poll_scheduled, 1) == 0:
+                *     schedule poll worker
+                *
+                * The atomic_xchg() implies a full barrier.
+                */
+               smp_mb();
+       } else {
+               /* Polling window is not over, keep rescheduling */
+               force_reschedule = true;
+       }
+
+
        collect_percpu_times(group, PSI_POLL, &changed_states);
 
        if (changed_states & group->poll_states) {
@@ -624,7 +666,8 @@ static void psi_poll_work(struct psi_group *group)
                group->polling_next_update = update_triggers(group, now);
 
        psi_schedule_poll_work(group,
-               nsecs_to_jiffies(group->polling_next_update - now) + 1);
+               nsecs_to_jiffies(group->polling_next_update - now) + 1,
+               force_reschedule);
 
 out:
        mutex_unlock(&group->trigger_lock);
@@ -785,7 +828,7 @@ static void psi_group_change(struct psi_group *group, int cpu,
        write_seqcount_end(&groupc->seq);
 
        if (state_mask & group->poll_states)
-               psi_schedule_poll_work(group, 1);
+               psi_schedule_poll_work(group, 1, false);
 
        if (wake_clock && !delayed_work_pending(&group->avgs_work))
                schedule_delayed_work(&group->avgs_work, PSI_FREQ);
@@ -939,7 +982,7 @@ void psi_account_irqtime(struct task_struct *task, u32 delta)
                write_seqcount_end(&groupc->seq);
 
                if (group->poll_states & (1 << PSI_IRQ_FULL))
-                       psi_schedule_poll_work(group, 1);
+                       psi_schedule_poll_work(group, 1, false);
        } while ((group = group->parent));
 }
 #endif
@@ -1276,10 +1319,11 @@ void psi_trigger_destroy(struct psi_trigger *t)
 
        group = t->group;
        /*
-        * Wakeup waiters to stop polling. Can happen if cgroup is deleted
-        * from under a polling process.
+        * Wakeup waiters to stop polling and clear the queue to prevent it from
+        * being accessed later. Can happen if cgroup is deleted from under a
+        * polling process.
         */
-       wake_up_interruptible(&t->event_wait);
+       wake_up_pollfree(&t->event_wait);
 
        mutex_lock(&group->trigger_lock);
 
@@ -1325,6 +1369,7 @@ void psi_trigger_destroy(struct psi_trigger *t)
                 * can no longer be found through group->poll_task.
                 */
                kthread_stop(task_to_destroy);
+               atomic_set(&group->poll_scheduled, 0);
        }
        kfree(t);
 }