2 * Copyright (C) 2017 Marvell
4 * Hanna Hawa <hannah@marvell.com>
5 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any
9 * warranty of any kind, whether express or implied.
12 #include <linux/interrupt.h>
13 #include <linux/irq.h>
14 #include <linux/irqchip.h>
15 #include <linux/irqdomain.h>
16 #include <linux/kernel.h>
17 #include <linux/msi.h>
18 #include <linux/of_irq.h>
19 #include <linux/of_platform.h>
20 #include <linux/platform_device.h>
22 #include <dt-bindings/interrupt-controller/mvebu-icu.h>
24 #include "irq-mvebu-gicp.h"
27 #define ICU_SETSPI_NSR_AL 0x10
28 #define ICU_SETSPI_NSR_AH 0x14
29 #define ICU_CLRSPI_NSR_AL 0x18
30 #define ICU_CLRSPI_NSR_AH 0x1c
31 #define ICU_INT_CFG(x) (0x100 + 4 * (x))
32 #define ICU_INT_ENABLE BIT(24)
33 #define ICU_IS_EDGE BIT(28)
34 #define ICU_GROUP_SHIFT 29
37 #define ICU_MAX_IRQS 207
38 #define ICU_SATA0_ICU_ID 109
39 #define ICU_SATA1_ICU_ID 107
42 struct irq_chip irq_chip;
44 struct irq_domain *domain;
48 struct mvebu_icu_irq_data {
49 struct mvebu_icu *icu;
50 unsigned int icu_group;
54 static void mvebu_icu_write_msg(struct msi_desc *desc, struct msi_msg *msg)
56 struct irq_data *d = irq_get_irq_data(desc->irq);
57 struct mvebu_icu_irq_data *icu_irqd = d->chip_data;
58 struct mvebu_icu *icu = icu_irqd->icu;
61 if (msg->address_lo || msg->address_hi) {
62 /* Configure the ICU with irq number & type */
63 icu_int = msg->data | ICU_INT_ENABLE;
64 if (icu_irqd->type & IRQ_TYPE_EDGE_RISING)
65 icu_int |= ICU_IS_EDGE;
66 icu_int |= icu_irqd->icu_group << ICU_GROUP_SHIFT;
68 /* De-configure the ICU */
72 writel_relaxed(icu_int, icu->base + ICU_INT_CFG(d->hwirq));
75 * The SATA unit has 2 ports, and a dedicated ICU entry per
76 * port. The ahci sata driver supports only one irq interrupt
77 * per SATA unit. To solve this conflict, we configure the 2
78 * SATA wired interrupts in the south bridge into 1 GIC
79 * interrupt in the north bridge. Even if only a single port
80 * is enabled, if sata node is enabled, both interrupts are
81 * configured (regardless of which port is actually in use).
83 if (d->hwirq == ICU_SATA0_ICU_ID || d->hwirq == ICU_SATA1_ICU_ID) {
84 writel_relaxed(icu_int,
85 icu->base + ICU_INT_CFG(ICU_SATA0_ICU_ID));
86 writel_relaxed(icu_int,
87 icu->base + ICU_INT_CFG(ICU_SATA1_ICU_ID));
92 mvebu_icu_irq_domain_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
93 unsigned long *hwirq, unsigned int *type)
95 struct mvebu_icu *icu = d->host_data;
96 unsigned int icu_group;
98 /* Check the count of the parameters in dt */
99 if (WARN_ON(fwspec->param_count < 3)) {
100 dev_err(icu->dev, "wrong ICU parameter count %d\n",
101 fwspec->param_count);
105 /* Only ICU group type is handled */
106 icu_group = fwspec->param[0];
107 if (icu_group != ICU_GRP_NSR && icu_group != ICU_GRP_SR &&
108 icu_group != ICU_GRP_SEI && icu_group != ICU_GRP_REI) {
109 dev_err(icu->dev, "wrong ICU group type %x\n", icu_group);
113 *hwirq = fwspec->param[1];
114 if (*hwirq >= ICU_MAX_IRQS) {
115 dev_err(icu->dev, "invalid interrupt number %ld\n", *hwirq);
119 /* Mask the type to prevent wrong DT configuration */
120 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
126 mvebu_icu_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
127 unsigned int nr_irqs, void *args)
131 struct irq_fwspec *fwspec = args;
132 struct mvebu_icu *icu = platform_msi_get_host_data(domain);
133 struct mvebu_icu_irq_data *icu_irqd;
135 icu_irqd = kmalloc(sizeof(*icu_irqd), GFP_KERNEL);
139 err = mvebu_icu_irq_domain_translate(domain, fwspec, &hwirq,
142 dev_err(icu->dev, "failed to translate ICU parameters\n");
146 icu_irqd->icu_group = fwspec->param[0];
149 err = platform_msi_domain_alloc(domain, virq, nr_irqs);
151 dev_err(icu->dev, "failed to allocate ICU interrupt in parent domain\n");
155 /* Make sure there is no interrupt left pending by the firmware */
156 err = irq_set_irqchip_state(virq, IRQCHIP_STATE_PENDING, false);
160 err = irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
161 &icu->irq_chip, icu_irqd);
163 dev_err(icu->dev, "failed to set the data to IRQ domain\n");
170 platform_msi_domain_free(domain, virq, nr_irqs);
177 mvebu_icu_irq_domain_free(struct irq_domain *domain, unsigned int virq,
178 unsigned int nr_irqs)
180 struct irq_data *d = irq_get_irq_data(virq);
181 struct mvebu_icu_irq_data *icu_irqd = d->chip_data;
185 platform_msi_domain_free(domain, virq, nr_irqs);
188 static const struct irq_domain_ops mvebu_icu_domain_ops = {
189 .translate = mvebu_icu_irq_domain_translate,
190 .alloc = mvebu_icu_irq_domain_alloc,
191 .free = mvebu_icu_irq_domain_free,
194 static int mvebu_icu_probe(struct platform_device *pdev)
196 struct mvebu_icu *icu;
197 struct device_node *node = pdev->dev.of_node;
198 struct device_node *gicp_dn;
199 struct resource *res;
200 phys_addr_t setspi, clrspi;
204 icu = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_icu),
209 icu->dev = &pdev->dev;
211 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
212 icu->base = devm_ioremap_resource(&pdev->dev, res);
213 if (IS_ERR(icu->base)) {
214 dev_err(&pdev->dev, "Failed to map icu base address.\n");
215 return PTR_ERR(icu->base);
218 icu->irq_chip.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
220 (unsigned int)res->start);
221 if (!icu->irq_chip.name)
224 icu->irq_chip.irq_mask = irq_chip_mask_parent;
225 icu->irq_chip.irq_unmask = irq_chip_unmask_parent;
226 icu->irq_chip.irq_eoi = irq_chip_eoi_parent;
227 icu->irq_chip.irq_set_type = irq_chip_set_type_parent;
229 icu->irq_chip.irq_set_affinity = irq_chip_set_affinity_parent;
233 * We're probed after MSI domains have been resolved, so force
236 pdev->dev.msi_domain = of_msi_get_domain(&pdev->dev, node,
237 DOMAIN_BUS_PLATFORM_MSI);
238 if (!pdev->dev.msi_domain)
239 return -EPROBE_DEFER;
241 gicp_dn = irq_domain_get_of_node(pdev->dev.msi_domain);
245 ret = mvebu_gicp_get_doorbells(gicp_dn, &setspi, &clrspi);
249 /* Set Clear/Set ICU SPI message address in AP */
250 writel_relaxed(upper_32_bits(setspi), icu->base + ICU_SETSPI_NSR_AH);
251 writel_relaxed(lower_32_bits(setspi), icu->base + ICU_SETSPI_NSR_AL);
252 writel_relaxed(upper_32_bits(clrspi), icu->base + ICU_CLRSPI_NSR_AH);
253 writel_relaxed(lower_32_bits(clrspi), icu->base + ICU_CLRSPI_NSR_AL);
256 * Clean all ICU interrupts with type SPI_NSR, required to
257 * avoid unpredictable SPI assignments done by firmware.
259 for (i = 0 ; i < ICU_MAX_IRQS ; i++) {
260 icu_int = readl(icu->base + ICU_INT_CFG(i));
261 if ((icu_int >> ICU_GROUP_SHIFT) == ICU_GRP_NSR)
262 writel_relaxed(0x0, icu->base + ICU_INT_CFG(i));
266 platform_msi_create_device_domain(&pdev->dev, ICU_MAX_IRQS,
268 &mvebu_icu_domain_ops, icu);
270 dev_err(&pdev->dev, "Failed to create ICU domain\n");
277 static const struct of_device_id mvebu_icu_of_match[] = {
278 { .compatible = "marvell,cp110-icu", },
282 static struct platform_driver mvebu_icu_driver = {
283 .probe = mvebu_icu_probe,
286 .of_match_table = mvebu_icu_of_match,
289 builtin_platform_driver(mvebu_icu_driver);