#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
+#include <linux/syscore_ops.h>
#include <asm/smp.h>
/*
struct cpumask lmask;
struct irq_domain *irqdomain;
void __iomem *regs;
+ unsigned int nr_irqs;
+ u32 *priority_reg;
};
struct plic_handler {
raw_spinlock_t enable_lock;
void __iomem *enable_base;
struct plic_priv *priv;
+ /* To record interrupts that are enabled before suspend. */
+ u32 enable_reg[MAX_DEVICES / 32];
};
static int plic_parent_irq __ro_after_init;
static bool plic_cpuhp_setup_done __ro_after_init;
static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
+static struct plic_priv *priv_data;
static inline void plic_toggle(struct plic_handler *handler,
int hwirq, int enable)
#endif
};
+static void plic_irq_resume(void)
+{
+ unsigned int i, cpu;
+ u32 __iomem *reg;
+
+ for (i = 0; i < priv_data->nr_irqs; i++)
+ writel(priv_data->priority_reg[i],
+ priv_data->regs + PRIORITY_BASE + i * PRIORITY_PER_ID);
+
+ for_each_cpu(cpu, cpu_present_mask) {
+ struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
+
+ if (!handler->present)
+ continue;
+
+ for (i = 0; i < DIV_ROUND_UP(priv_data->nr_irqs, 32); i++) {
+ reg = handler->enable_base + i * sizeof(u32);
+ raw_spin_lock(&handler->enable_lock);
+ writel(handler->enable_reg[i], reg);
+ raw_spin_unlock(&handler->enable_lock);
+ }
+ }
+}
+
+static int plic_irq_suspend(void)
+{
+ unsigned int i, cpu;
+ u32 __iomem *reg;
+
+ for (i = 0; i < priv_data->nr_irqs; i++)
+ priv_data->priority_reg[i] =
+ readl(priv_data->regs + PRIORITY_BASE + i * PRIORITY_PER_ID);
+
+ for_each_cpu(cpu, cpu_present_mask) {
+ struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
+
+ if (!handler->present)
+ continue;
+
+ for (i = 0; i < DIV_ROUND_UP(priv_data->nr_irqs, 32); i++) {
+ reg = handler->enable_base + i * sizeof(u32);
+ raw_spin_lock(&handler->enable_lock);
+ handler->enable_reg[i] = readl(reg);
+ raw_spin_unlock(&handler->enable_lock);
+ }
+ }
+
+ return 0;
+}
+
+static struct syscore_ops plic_irq_syscore_ops = {
+ .suspend = plic_irq_suspend,
+ .resume = plic_irq_resume,
+};
+
+static void plic_irq_pm_init(void)
+{
+ unsigned int cpu;
+
+ for_each_cpu(cpu, cpu_present_mask) {
+ struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
+
+ if (!handler->present)
+ continue;
+
+ memset(&handler->enable_reg[0], 0,
+ sizeof(handler->enable_reg));
+ }
+
+ register_syscore_ops(&plic_irq_syscore_ops);
+}
+
static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
{
goto out_free_priv;
}
+ priv_data = priv;
+
error = -EINVAL;
of_property_read_u32(node, "riscv,ndev", &nr_irqs);
if (WARN_ON(!nr_irqs))
goto out_iounmap;
+ priv->nr_irqs = nr_irqs;
+
+ priv->priority_reg = kcalloc(nr_irqs, sizeof(u32), GFP_KERNEL);
+ if (!priv->priority_reg)
+ goto out_free_priority_reg;
nr_contexts = of_irq_count(node);
if (WARN_ON(!nr_contexts))
- goto out_iounmap;
+ goto out_free_priority_reg;
error = -ENOMEM;
priv->irqdomain = irq_domain_add_linear(node, nr_irqs + 1,
&plic_irqdomain_ops, priv);
if (WARN_ON(!priv->irqdomain))
- goto out_iounmap;
+ goto out_free_priority_reg;
for (i = 0; i < nr_contexts; i++) {
struct of_phandle_args parent;
plic_cpuhp_setup_done = true;
}
+ plic_irq_pm_init();
+
pr_info("%pOFP: mapped %d interrupts with %d handlers for"
" %d contexts.\n", node, nr_irqs, nr_handlers, nr_contexts);
return 0;
+out_free_priority_reg:
+ kfree(priv->priority_reg);
out_iounmap:
iounmap(priv->regs);
out_free_priv: