irqchip/irq-sifive-plic: Add syscore callbacks for hibernation
authormason.huo <mason.huo@starfivetech.com>
Tue, 3 Jan 2023 05:39:30 +0000 (13:39 +0800)
committermason.huo <mason.huo@starfivetech.com>
Thu, 5 Jan 2023 06:27:34 +0000 (14:27 +0800)
The priority and enable registers of plic will be reset
during hibernation power cycle in poweroff mode,
add the syscore callbacks to save/restore those registers.

Signed-off-by: mason.huo <mason.huo@starfivetech.com>
drivers/irqchip/irq-sifive-plic.c [changed mode: 0755->0644]

old mode 100755 (executable)
new mode 100644 (file)
index 259065d..92ec59b
@@ -17,6 +17,7 @@
 #include <linux/of_irq.h>
 #include <linux/platform_device.h>
 #include <linux/spinlock.h>
+#include <linux/syscore_ops.h>
 #include <asm/smp.h>
 
 /*
@@ -64,6 +65,8 @@ struct plic_priv {
        struct cpumask lmask;
        struct irq_domain *irqdomain;
        void __iomem *regs;
+       unsigned int nr_irqs;
+       u32 *priority_reg;
 };
 
 struct plic_handler {
@@ -76,10 +79,13 @@ struct plic_handler {
        raw_spinlock_t          enable_lock;
        void __iomem            *enable_base;
        struct plic_priv        *priv;
+       /* To record interrupts that are enabled before suspend. */
+       u32 enable_reg[MAX_DEVICES / 32];
 };
 static int plic_parent_irq __ro_after_init;
 static bool plic_cpuhp_setup_done __ro_after_init;
 static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
+static struct plic_priv *priv_data;
 
 static inline void plic_toggle(struct plic_handler *handler,
                                int hwirq, int enable)
@@ -182,6 +188,78 @@ static struct irq_chip plic_chip = {
 #endif
 };
 
+static void plic_irq_resume(void)
+{
+       unsigned int i, cpu;
+       u32 __iomem *reg;
+
+       for (i = 0; i < priv_data->nr_irqs; i++)
+               writel(priv_data->priority_reg[i],
+                               priv_data->regs + PRIORITY_BASE + i * PRIORITY_PER_ID);
+
+       for_each_cpu(cpu, cpu_present_mask) {
+               struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
+
+               if (!handler->present)
+                       continue;
+
+               for (i = 0; i < DIV_ROUND_UP(priv_data->nr_irqs, 32); i++) {
+                       reg = handler->enable_base + i * sizeof(u32);
+                       raw_spin_lock(&handler->enable_lock);
+                       writel(handler->enable_reg[i], reg);
+                       raw_spin_unlock(&handler->enable_lock);
+               }
+       }
+}
+
+static int plic_irq_suspend(void)
+{
+       unsigned int i, cpu;
+       u32 __iomem *reg;
+
+       for (i = 0; i < priv_data->nr_irqs; i++)
+               priv_data->priority_reg[i] =
+                       readl(priv_data->regs + PRIORITY_BASE + i * PRIORITY_PER_ID);
+
+       for_each_cpu(cpu, cpu_present_mask) {
+               struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
+
+               if (!handler->present)
+                       continue;
+
+               for (i = 0; i < DIV_ROUND_UP(priv_data->nr_irqs, 32); i++) {
+                       reg = handler->enable_base + i * sizeof(u32);
+                       raw_spin_lock(&handler->enable_lock);
+                       handler->enable_reg[i] = readl(reg);
+                       raw_spin_unlock(&handler->enable_lock);
+               }
+       }
+
+       return 0;
+}
+
+static struct syscore_ops plic_irq_syscore_ops = {
+       .suspend        = plic_irq_suspend,
+       .resume         = plic_irq_resume,
+};
+
+static void plic_irq_pm_init(void)
+{
+       unsigned int cpu;
+
+       for_each_cpu(cpu, cpu_present_mask) {
+               struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
+
+               if (!handler->present)
+                       continue;
+
+               memset(&handler->enable_reg[0], 0,
+                       sizeof(handler->enable_reg));
+       }
+
+       register_syscore_ops(&plic_irq_syscore_ops);
+}
+
 static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq,
                              irq_hw_number_t hwirq)
 {
@@ -295,20 +373,27 @@ static int __init plic_init(struct device_node *node,
                goto out_free_priv;
        }
 
+       priv_data = priv;
+
        error = -EINVAL;
        of_property_read_u32(node, "riscv,ndev", &nr_irqs);
        if (WARN_ON(!nr_irqs))
                goto out_iounmap;
+       priv->nr_irqs = nr_irqs;
+
+       priv->priority_reg = kcalloc(nr_irqs, sizeof(u32), GFP_KERNEL);
+       if (!priv->priority_reg)
+               goto out_free_priority_reg;
 
        nr_contexts = of_irq_count(node);
        if (WARN_ON(!nr_contexts))
-               goto out_iounmap;
+               goto out_free_priority_reg;
 
        error = -ENOMEM;
        priv->irqdomain = irq_domain_add_linear(node, nr_irqs + 1,
                        &plic_irqdomain_ops, priv);
        if (WARN_ON(!priv->irqdomain))
-               goto out_iounmap;
+               goto out_free_priority_reg;
 
        for (i = 0; i < nr_contexts; i++) {
                struct of_phandle_args parent;
@@ -385,10 +470,14 @@ done:
                plic_cpuhp_setup_done = true;
        }
 
+       plic_irq_pm_init();
+
        pr_info("%pOFP: mapped %d interrupts with %d handlers for"
                " %d contexts.\n", node, nr_irqs, nr_handlers, nr_contexts);
        return 0;
 
+out_free_priority_reg:
+       kfree(priv->priority_reg);
 out_iounmap:
        iounmap(priv->regs);
 out_free_priv: