2 * ARC ARConnect (MultiCore IP) support (formerly known as MCIP)
4 * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <linux/smp.h>
12 #include <linux/irq.h>
13 #include <linux/irqchip/chained_irq.h>
14 #include <linux/spinlock.h>
15 #include <soc/arc/mcip.h>
16 #include <asm/irqflags-arcv2.h>
17 #include <asm/setup.h>
19 static DEFINE_RAW_SPINLOCK(mcip_lock);
23 static char smp_cpuinfo_buf[128];
26 * Set mask to halt GFRC if any online core in SMP cluster is halted.
27 * Only works for ARC HS v3.0+, on earlier versions has no effect.
29 static void mcip_update_gfrc_halt_mask(int cpu)
31 struct bcr_generic gfrc;
35 READ_BCR(ARC_REG_GFRC_BUILD, gfrc);
38 * CMD_GFRC_SET_CORE and CMD_GFRC_READ_CORE commands were added in
44 raw_spin_lock_irqsave(&mcip_lock, flags);
46 __mcip_cmd(CMD_GFRC_READ_CORE, 0);
47 gfrc_halt_mask = read_aux_reg(ARC_REG_MCIP_READBACK);
48 gfrc_halt_mask |= BIT(cpu);
49 __mcip_cmd_data(CMD_GFRC_SET_CORE, 0, gfrc_halt_mask);
51 raw_spin_unlock_irqrestore(&mcip_lock, flags);
54 static void mcip_setup_per_cpu(int cpu)
58 READ_BCR(ARC_REG_MCIP_BCR, mp);
60 smp_ipi_irq_setup(cpu, IPI_IRQ);
61 smp_ipi_irq_setup(cpu, SOFTIRQ_IRQ);
63 /* Update GFRC halt mask as new CPU came online */
65 mcip_update_gfrc_halt_mask(cpu);
68 static void mcip_ipi_send(int cpu)
73 /* ARConnect can only send IPI to others */
74 if (unlikely(cpu == raw_smp_processor_id())) {
75 arc_softirq_trigger(SOFTIRQ_IRQ);
79 raw_spin_lock_irqsave(&mcip_lock, flags);
82 * If receiver already has a pending interrupt, elide sending this one.
83 * Linux cross core calling works well with concurrent IPIs
85 * see arch/arc/kernel/smp.c: ipi_send_msg_one()
87 __mcip_cmd(CMD_INTRPT_READ_STATUS, cpu);
88 ipi_was_pending = read_aux_reg(ARC_REG_MCIP_READBACK);
90 __mcip_cmd(CMD_INTRPT_GENERATE_IRQ, cpu);
92 raw_spin_unlock_irqrestore(&mcip_lock, flags);
95 static void mcip_ipi_clear(int irq)
100 if (unlikely(irq == SOFTIRQ_IRQ)) {
101 arc_softirq_clear(irq);
105 raw_spin_lock_irqsave(&mcip_lock, flags);
107 /* Who sent the IPI */
108 __mcip_cmd(CMD_INTRPT_CHECK_SOURCE, 0);
110 cpu = read_aux_reg(ARC_REG_MCIP_READBACK); /* 1,2,4,8... */
113 * In rare case, multiple concurrent IPIs sent to same target can
114 * possibly be coalesced by MCIP into 1 asserted IRQ, so @cpus can be
115 * "vectored" (multiple bits sets) as opposed to typical single bit
118 c = __ffs(cpu); /* 0,1,2,3 */
119 __mcip_cmd(CMD_INTRPT_GENERATE_ACK, c);
123 raw_spin_unlock_irqrestore(&mcip_lock, flags);
126 static void mcip_probe_n_setup(void)
130 READ_BCR(ARC_REG_MCIP_BCR, mp);
132 sprintf(smp_cpuinfo_buf,
133 "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s\n",
134 mp.ver, mp.num_cores,
135 IS_AVAIL1(mp.ipi, "IPI "),
136 IS_AVAIL1(mp.idu, "IDU "),
137 IS_AVAIL1(mp.dbg, "DEBUG "),
138 IS_AVAIL1(mp.gfrc, "GFRC"));
140 cpuinfo_arc700[0].extn.gfrc = mp.gfrc;
143 __mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, 0xf);
144 __mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xf, 0xf);
148 struct plat_smp_ops plat_smp_ops = {
149 .info = smp_cpuinfo_buf,
150 .init_early_smp = mcip_probe_n_setup,
151 .init_per_cpu = mcip_setup_per_cpu,
152 .ipi_send = mcip_ipi_send,
153 .ipi_clear = mcip_ipi_clear,
158 /***************************************************************************
159 * ARCv2 Interrupt Distribution Unit (IDU)
161 * Connects external "COMMON" IRQs to core intc, providing:
162 * -dynamic routing (IRQ affinity)
163 * -load balancing (Round Robin interrupt distribution)
166 * It physically resides in the MCIP hw block
169 #include <linux/irqchip.h>
170 #include <linux/of.h>
171 #include <linux/of_irq.h>
174 * Set the DEST for @cmn_irq to @cpu_mask (1 bit per core)
176 static void idu_set_dest(unsigned int cmn_irq, unsigned int cpu_mask)
178 __mcip_cmd_data(CMD_IDU_SET_DEST, cmn_irq, cpu_mask);
181 static void idu_set_mode(unsigned int cmn_irq, unsigned int lvl,
187 unsigned int distr:2, pad:2, lvl:1, pad2:27;
193 __mcip_cmd_data(CMD_IDU_SET_MODE, cmn_irq, data.word);
196 static void idu_irq_mask_raw(irq_hw_number_t hwirq)
200 raw_spin_lock_irqsave(&mcip_lock, flags);
201 __mcip_cmd_data(CMD_IDU_SET_MASK, hwirq, 1);
202 raw_spin_unlock_irqrestore(&mcip_lock, flags);
205 static void idu_irq_mask(struct irq_data *data)
207 idu_irq_mask_raw(data->hwirq);
210 static void idu_irq_unmask(struct irq_data *data)
214 raw_spin_lock_irqsave(&mcip_lock, flags);
215 __mcip_cmd_data(CMD_IDU_SET_MASK, data->hwirq, 0);
216 raw_spin_unlock_irqrestore(&mcip_lock, flags);
220 idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
225 unsigned int destination_bits;
226 unsigned int distribution_mode;
228 /* errout if no online cpu per @cpumask */
229 if (!cpumask_and(&online, cpumask, cpu_online_mask))
232 raw_spin_lock_irqsave(&mcip_lock, flags);
234 destination_bits = cpumask_bits(&online)[0];
235 idu_set_dest(data->hwirq, destination_bits);
237 if (ffs(destination_bits) == fls(destination_bits))
238 distribution_mode = IDU_M_DISTRI_DEST;
240 distribution_mode = IDU_M_DISTRI_RR;
242 idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, distribution_mode);
244 raw_spin_unlock_irqrestore(&mcip_lock, flags);
246 return IRQ_SET_MASK_OK;
249 static void idu_irq_enable(struct irq_data *data)
252 * By default send all common interrupts to all available online CPUs.
253 * The affinity of common interrupts in IDU must be set manually since
254 * in some cases the kernel will not call irq_set_affinity() by itself:
255 * 1. When the kernel is not configured with support of SMP.
256 * 2. When the kernel is configured with support of SMP but upper
257 * interrupt controllers does not support setting of the affinity
258 * and cannot propagate it to IDU.
260 idu_irq_set_affinity(data, cpu_online_mask, false);
261 idu_irq_unmask(data);
264 static struct irq_chip idu_irq_chip = {
265 .name = "MCIP IDU Intc",
266 .irq_mask = idu_irq_mask,
267 .irq_unmask = idu_irq_unmask,
268 .irq_enable = idu_irq_enable,
270 .irq_set_affinity = idu_irq_set_affinity,
275 static void idu_cascade_isr(struct irq_desc *desc)
277 struct irq_domain *idu_domain = irq_desc_get_handler_data(desc);
278 struct irq_chip *core_chip = irq_desc_get_chip(desc);
279 irq_hw_number_t core_hwirq = irqd_to_hwirq(irq_desc_get_irq_data(desc));
280 irq_hw_number_t idu_hwirq = core_hwirq - FIRST_EXT_IRQ;
282 chained_irq_enter(core_chip, desc);
283 generic_handle_irq(irq_find_mapping(idu_domain, idu_hwirq));
284 chained_irq_exit(core_chip, desc);
287 static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq)
289 irq_set_chip_and_handler(virq, &idu_irq_chip, handle_level_irq);
290 irq_set_status_flags(virq, IRQ_MOVE_PCNTXT);
295 static const struct irq_domain_ops idu_irq_ops = {
296 .xlate = irq_domain_xlate_onecell,
301 * [16, 23]: Statically assigned always private-per-core (Timers, WDT, IPI)
302 * [24, 23+C]: If C > 0 then "C" common IRQs
303 * [24+C, N]: Not statically assigned, private-per-core
308 idu_of_init(struct device_node *intc, struct device_node *parent)
310 struct irq_domain *domain;
314 struct mcip_idu_bcr idu_bcr;
316 READ_BCR(ARC_REG_MCIP_BCR, mp);
319 panic("IDU not detected, but DeviceTree using it");
321 READ_BCR(ARC_REG_MCIP_IDU_BCR, idu_bcr);
322 nr_irqs = mcip_idu_bcr_to_nr_irqs(idu_bcr);
324 pr_info("MCIP: IDU supports %u common irqs\n", nr_irqs);
326 domain = irq_domain_add_linear(intc, nr_irqs, &idu_irq_ops, NULL);
328 /* Parent interrupts (core-intc) are already mapped */
330 for (i = 0; i < nr_irqs; i++) {
331 /* Mask all common interrupts by default */
335 * Return parent uplink IRQs (towards core intc) 24,25,.....
336 * this step has been done before already
337 * however we need it to get the parent virq and set IDU handler
340 virq = irq_create_mapping(NULL, i + FIRST_EXT_IRQ);
342 irq_set_chained_handler_and_data(virq, idu_cascade_isr, domain);
345 __mcip_cmd(CMD_IDU_ENABLE, 0);
349 IRQCHIP_DECLARE(arcv2_idu_intc, "snps,archs-idu-intc", idu_of_init);