1 // SPDX-License-Identifier: GPL-2.0
3 * Loongson Extend I/O Interrupt Controller support
5 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
8 #define pr_fmt(fmt) "eiointc: " fmt
10 #include <linux/interrupt.h>
11 #include <linux/irq.h>
12 #include <linux/irqchip.h>
13 #include <linux/irqdomain.h>
14 #include <linux/irqchip/chained_irq.h>
15 #include <linux/kernel.h>
16 #include <linux/platform_device.h>
17 #include <linux/of_address.h>
18 #include <linux/of_irq.h>
19 #include <linux/of_platform.h>
21 #define EIOINTC_REG_NODEMAP 0x14a0
22 #define EIOINTC_REG_IPMAP 0x14c0
23 #define EIOINTC_REG_ENABLE 0x1600
24 #define EIOINTC_REG_BOUNCE 0x1680
25 #define EIOINTC_REG_ISR 0x1800
26 #define EIOINTC_REG_ROUTE 0x1c00
28 #define VEC_REG_COUNT 4
29 #define VEC_COUNT_PER_REG 64
30 #define VEC_COUNT (VEC_REG_COUNT * VEC_COUNT_PER_REG)
31 #define VEC_REG_IDX(irq_id) ((irq_id) / VEC_COUNT_PER_REG)
32 #define VEC_REG_BIT(irq_id) ((irq_id) % VEC_COUNT_PER_REG)
33 #define EIOINTC_ALL_ENABLE 0xffffffff
35 #define MAX_EIO_NODES (NR_CPUS / CORES_PER_EIO_NODE)
42 cpumask_t cpuspan_map;
43 struct fwnode_handle *domain_handle;
44 struct irq_domain *eiointc_domain;
47 static struct eiointc_priv *eiointc_priv[MAX_IO_PICS];
49 static void eiointc_enable(void)
53 misc = iocsr_read64(LOONGARCH_IOCSR_MISC_FUNC);
54 misc |= IOCSR_MISC_FUNC_EXT_IOI_EN;
55 iocsr_write64(misc, LOONGARCH_IOCSR_MISC_FUNC);
58 static int cpu_to_eio_node(int cpu)
60 return cpu_logical_map(cpu) / CORES_PER_EIO_NODE;
63 static void eiointc_set_irq_route(int pos, unsigned int cpu, unsigned int mnode, nodemask_t *node_map)
65 int i, node, cpu_node, route_node;
66 unsigned char coremap;
67 uint32_t pos_off, data, data_byte, data_mask;
71 data_mask = ~BIT_MASK(data_byte) & 0xf;
73 /* Calculate node and coremap of target irq */
74 cpu_node = cpu_logical_map(cpu) / CORES_PER_EIO_NODE;
75 coremap = BIT(cpu_logical_map(cpu) % CORES_PER_EIO_NODE);
77 for_each_online_cpu(i) {
78 node = cpu_to_eio_node(i);
79 if (!node_isset(node, *node_map))
82 /* EIO node 0 is in charge of inter-node interrupt dispatch */
83 route_node = (node == mnode) ? cpu_node : node;
84 data = ((coremap | (route_node << 4)) << (data_byte * 8));
85 csr_any_send(EIOINTC_REG_ROUTE + pos_off, data, data_mask, node * CORES_PER_EIO_NODE);
89 static DEFINE_RAW_SPINLOCK(affinity_lock);
91 static int eiointc_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity, bool force)
95 uint32_t vector, regaddr;
96 struct cpumask intersect_affinity;
97 struct eiointc_priv *priv = d->domain->host_data;
99 raw_spin_lock_irqsave(&affinity_lock, flags);
101 cpumask_and(&intersect_affinity, affinity, cpu_online_mask);
102 cpumask_and(&intersect_affinity, &intersect_affinity, &priv->cpuspan_map);
104 if (cpumask_empty(&intersect_affinity)) {
105 raw_spin_unlock_irqrestore(&affinity_lock, flags);
108 cpu = cpumask_first(&intersect_affinity);
111 regaddr = EIOINTC_REG_ENABLE + ((vector >> 5) << 2);
113 /* Mask target vector */
114 csr_any_send(regaddr, EIOINTC_ALL_ENABLE & (~BIT(vector & 0x1F)), 0x0, 0);
115 /* Set route for target vector */
116 eiointc_set_irq_route(vector, cpu, priv->node, &priv->node_map);
117 /* Unmask target vector */
118 csr_any_send(regaddr, EIOINTC_ALL_ENABLE, 0x0, 0);
120 irq_data_update_effective_affinity(d, cpumask_of(cpu));
122 raw_spin_unlock_irqrestore(&affinity_lock, flags);
124 return IRQ_SET_MASK_OK;
127 static int eiointc_index(int node)
131 for (i = 0; i < nr_pics; i++) {
132 if (node_isset(node, eiointc_priv[i]->node_map))
139 static int eiointc_router_init(unsigned int cpu)
143 uint32_t node = cpu_to_eio_node(cpu);
144 uint32_t index = eiointc_index(node);
147 pr_err("Error: invalid nodemap!\n");
151 if ((cpu_logical_map(cpu) % CORES_PER_EIO_NODE) == 0) {
154 for (i = 0; i < VEC_COUNT / 32; i++) {
155 data = (((1 << (i * 2 + 1)) << 16) | (1 << (i * 2)));
156 iocsr_write32(data, EIOINTC_REG_NODEMAP + i * 4);
159 for (i = 0; i < VEC_COUNT / 32 / 4; i++) {
160 bit = BIT(1 + index); /* Route to IP[1 + index] */
161 data = bit | (bit << 8) | (bit << 16) | (bit << 24);
162 iocsr_write32(data, EIOINTC_REG_IPMAP + i * 4);
165 for (i = 0; i < VEC_COUNT / 4; i++) {
166 /* Route to Node-0 Core-0 */
168 bit = BIT(cpu_logical_map(0));
170 bit = (eiointc_priv[index]->node << 4) | 1;
172 data = bit | (bit << 8) | (bit << 16) | (bit << 24);
173 iocsr_write32(data, EIOINTC_REG_ROUTE + i * 4);
176 for (i = 0; i < VEC_COUNT / 32; i++) {
178 iocsr_write32(data, EIOINTC_REG_ENABLE + i * 4);
179 iocsr_write32(data, EIOINTC_REG_BOUNCE + i * 4);
186 static void eiointc_irq_dispatch(struct irq_desc *desc)
190 bool handled = false;
191 struct irq_chip *chip = irq_desc_get_chip(desc);
192 struct eiointc_priv *priv = irq_desc_get_handler_data(desc);
194 chained_irq_enter(chip, desc);
196 for (i = 0; i < VEC_REG_COUNT; i++) {
197 pending = iocsr_read64(EIOINTC_REG_ISR + (i << 3));
198 iocsr_write64(pending, EIOINTC_REG_ISR + (i << 3));
200 int bit = __ffs(pending);
201 int irq = bit + VEC_COUNT_PER_REG * i;
203 generic_handle_domain_irq(priv->eiointc_domain, irq);
204 pending &= ~BIT(bit);
210 spurious_interrupt();
212 chained_irq_exit(chip, desc);
215 static void eiointc_ack_irq(struct irq_data *d)
219 static void eiointc_mask_irq(struct irq_data *d)
223 static void eiointc_unmask_irq(struct irq_data *d)
227 static struct irq_chip eiointc_irq_chip = {
229 .irq_ack = eiointc_ack_irq,
230 .irq_mask = eiointc_mask_irq,
231 .irq_unmask = eiointc_unmask_irq,
232 .irq_set_affinity = eiointc_set_irq_affinity,
235 static int eiointc_domain_alloc(struct irq_domain *domain, unsigned int virq,
236 unsigned int nr_irqs, void *arg)
239 unsigned int i, type;
240 unsigned long hwirq = 0;
241 struct eiointc *priv = domain->host_data;
243 ret = irq_domain_translate_onecell(domain, arg, &hwirq, &type);
247 for (i = 0; i < nr_irqs; i++) {
248 irq_domain_set_info(domain, virq + i, hwirq + i, &eiointc_irq_chip,
249 priv, handle_edge_irq, NULL, NULL);
255 static void eiointc_domain_free(struct irq_domain *domain, unsigned int virq,
256 unsigned int nr_irqs)
260 for (i = 0; i < nr_irqs; i++) {
261 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
263 irq_set_handler(virq + i, NULL);
264 irq_domain_reset_irq_data(d);
268 static const struct irq_domain_ops eiointc_domain_ops = {
269 .translate = irq_domain_translate_onecell,
270 .alloc = eiointc_domain_alloc,
271 .free = eiointc_domain_free,
274 static void acpi_set_vec_parent(int node, struct irq_domain *parent, struct acpi_vector_group *vec_group)
278 if (cpu_has_flatmode)
279 node = cpu_to_node(node * CORES_PER_EIO_NODE);
281 for (i = 0; i < MAX_IO_PICS; i++) {
282 if (node == vec_group[i].node) {
283 vec_group[i].parent = parent;
289 struct irq_domain *acpi_get_vec_parent(int node, struct acpi_vector_group *vec_group)
293 for (i = 0; i < MAX_IO_PICS; i++) {
294 if (node == vec_group[i].node)
295 return vec_group[i].parent;
301 pch_pic_parse_madt(union acpi_subtable_headers *header,
302 const unsigned long end)
304 struct acpi_madt_bio_pic *pchpic_entry = (struct acpi_madt_bio_pic *)header;
305 unsigned int node = (pchpic_entry->address >> 44) & 0xf;
306 struct irq_domain *parent = acpi_get_vec_parent(node, pch_group);
309 return pch_pic_acpi_init(parent, pchpic_entry);
315 pch_msi_parse_madt(union acpi_subtable_headers *header,
316 const unsigned long end)
318 struct acpi_madt_msi_pic *pchmsi_entry = (struct acpi_madt_msi_pic *)header;
319 struct irq_domain *parent = acpi_get_vec_parent(eiointc_priv[nr_pics - 1]->node, msi_group);
322 return pch_msi_acpi_init(parent, pchmsi_entry);
327 static int __init acpi_cascade_irqdomain_init(void)
329 acpi_table_parse_madt(ACPI_MADT_TYPE_BIO_PIC,
330 pch_pic_parse_madt, 0);
331 acpi_table_parse_madt(ACPI_MADT_TYPE_MSI_PIC,
332 pch_msi_parse_madt, 1);
336 int __init eiointc_acpi_init(struct irq_domain *parent,
337 struct acpi_madt_eio_pic *acpi_eiointc)
340 unsigned long node_map;
341 struct eiointc_priv *priv;
343 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
347 priv->domain_handle = irq_domain_alloc_fwnode((phys_addr_t *)acpi_eiointc);
348 if (!priv->domain_handle) {
349 pr_err("Unable to allocate domain handle\n");
353 priv->node = acpi_eiointc->node;
354 node_map = acpi_eiointc->node_map ? : -1ULL;
356 for_each_possible_cpu(i) {
357 if (node_map & (1ULL << cpu_to_eio_node(i))) {
358 node_set(cpu_to_eio_node(i), priv->node_map);
359 cpumask_or(&priv->cpuspan_map, &priv->cpuspan_map, cpumask_of(i));
363 /* Setup IRQ domain */
364 priv->eiointc_domain = irq_domain_create_linear(priv->domain_handle, VEC_COUNT,
365 &eiointc_domain_ops, priv);
366 if (!priv->eiointc_domain) {
367 pr_err("loongson-eiointc: cannot add IRQ domain\n");
368 goto out_free_handle;
371 eiointc_priv[nr_pics++] = priv;
373 eiointc_router_init(0);
375 parent_irq = irq_create_mapping(parent, acpi_eiointc->cascade);
376 irq_set_chained_handler_and_data(parent_irq, eiointc_irq_dispatch, priv);
378 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_LOONGARCH_STARTING,
379 "irqchip/loongarch/intc:starting",
380 eiointc_router_init, NULL);
382 acpi_set_vec_parent(acpi_eiointc->node, priv->eiointc_domain, pch_group);
383 acpi_set_vec_parent(acpi_eiointc->node, priv->eiointc_domain, msi_group);
384 acpi_cascade_irqdomain_init();
389 irq_domain_free_fwnode(priv->domain_handle);
390 priv->domain_handle = NULL;