1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 #include <linux/acpi.h>
8 #include <linux/acpi_iort.h>
9 #include <linux/bitfield.h>
10 #include <linux/bitmap.h>
11 #include <linux/cpu.h>
12 #include <linux/crash_dump.h>
13 #include <linux/delay.h>
14 #include <linux/efi.h>
15 #include <linux/interrupt.h>
16 #include <linux/iommu.h>
17 #include <linux/iopoll.h>
18 #include <linux/irqdomain.h>
19 #include <linux/list.h>
20 #include <linux/log2.h>
21 #include <linux/memblock.h>
23 #include <linux/msi.h>
25 #include <linux/of_address.h>
26 #include <linux/of_irq.h>
27 #include <linux/of_pci.h>
28 #include <linux/of_platform.h>
29 #include <linux/percpu.h>
30 #include <linux/slab.h>
31 #include <linux/syscore_ops.h>
33 #include <linux/irqchip.h>
34 #include <linux/irqchip/arm-gic-v3.h>
35 #include <linux/irqchip/arm-gic-v4.h>
37 #include <asm/cputype.h>
38 #include <asm/exception.h>
40 #include "irq-gic-common.h"
42 #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
43 #define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
44 #define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2)
45 #define ITS_FLAGS_FORCE_NON_SHAREABLE (1ULL << 3)
47 #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
48 #define RDIST_FLAGS_RD_TABLES_PREALLOCATED (1 << 1)
49 #define RDIST_FLAGS_FORCE_NON_SHAREABLE (1 << 2)
51 #define RD_LOCAL_LPI_ENABLED BIT(0)
52 #define RD_LOCAL_PENDTABLE_PREALLOCATED BIT(1)
53 #define RD_LOCAL_MEMRESERVE_DONE BIT(2)
55 static u32 lpi_id_bits;
58 * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to
59 * deal with (one configuration byte per interrupt). PENDBASE has to
60 * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
62 #define LPI_NRBITS lpi_id_bits
63 #define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K)
64 #define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
66 #define LPI_PROP_DEFAULT_PRIO GICD_INT_DEF_PRI
69 * Collection structure - just an ID, and a redistributor address to
70 * ping. We use one per CPU as a bag of interrupts assigned to this
73 struct its_collection {
79 * The ITS_BASER structure - contains memory information, cached
80 * value of BASER register configuration and ITS page size.
92 * The ITS structure - contains most of the infrastructure, with the
93 * top-level MSI domain, the command queue, the collections, and the
94 * list of devices writing to it.
96 * dev_alloc_lock has to be taken for device allocations, while the
97 * spinlock must be taken to parse data structures such as the device
102 struct mutex dev_alloc_lock;
103 struct list_head entry;
105 void __iomem *sgir_base;
106 phys_addr_t phys_base;
107 struct its_cmd_block *cmd_base;
108 struct its_cmd_block *cmd_write;
109 struct its_baser tables[GITS_BASER_NR_REGS];
110 struct its_collection *collections;
111 struct fwnode_handle *fwnode_handle;
112 u64 (*get_msi_base)(struct its_device *its_dev);
117 struct list_head its_device_list;
119 unsigned long list_nr;
121 unsigned int msi_domain_flags;
122 u32 pre_its_base; /* for Socionext Synquacer */
123 int vlpi_redist_offset;
126 #define is_v4(its) (!!((its)->typer & GITS_TYPER_VLPIS))
127 #define is_v4_1(its) (!!((its)->typer & GITS_TYPER_VMAPP))
128 #define device_ids(its) (FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1)
130 #define ITS_ITT_ALIGN SZ_256
132 /* The maximum number of VPEID bits supported by VLPI commands */
133 #define ITS_MAX_VPEID_BITS \
136 if (gic_rdists->has_rvpeid && \
137 gic_rdists->gicd_typer2 & GICD_TYPER2_VIL) \
138 nvpeid = 1 + (gic_rdists->gicd_typer2 & \
143 #define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS))
145 /* Convert page order to size in bytes */
146 #define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o))
148 struct event_lpi_map {
149 unsigned long *lpi_map;
151 irq_hw_number_t lpi_base;
153 raw_spinlock_t vlpi_lock;
155 struct its_vlpi_map *vlpi_maps;
160 * The ITS view of a device - belongs to an ITS, owns an interrupt
161 * translation table, and a list of interrupts. If it some of its
162 * LPIs are injected into a guest (GICv4), the event_map.vm field
163 * indicates which one.
166 struct list_head entry;
167 struct its_node *its;
168 struct event_lpi_map event_map;
177 struct its_device *dev;
178 struct its_vpe **vpes;
182 struct cpu_lpi_count {
187 static DEFINE_PER_CPU(struct cpu_lpi_count, cpu_lpi_count);
189 static LIST_HEAD(its_nodes);
190 static DEFINE_RAW_SPINLOCK(its_lock);
191 static struct rdists *gic_rdists;
192 static struct irq_domain *its_parent;
194 static unsigned long its_list_map;
195 static u16 vmovp_seq_num;
196 static DEFINE_RAW_SPINLOCK(vmovp_lock);
198 static DEFINE_IDA(its_vpeid_ida);
200 #define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
201 #define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu))
202 #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
203 #define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
206 * Skip ITSs that have no vLPIs mapped, unless we're on GICv4.1, as we
207 * always have vSGIs mapped.
209 static bool require_its_list_vmovp(struct its_vm *vm, struct its_node *its)
211 return (gic_rdists->has_rvpeid || vm->vlpi_count[its->list_nr]);
214 static u16 get_its_list(struct its_vm *vm)
216 struct its_node *its;
217 unsigned long its_list = 0;
219 list_for_each_entry(its, &its_nodes, entry) {
223 if (require_its_list_vmovp(vm, its))
224 __set_bit(its->list_nr, &its_list);
227 return (u16)its_list;
230 static inline u32 its_get_event_id(struct irq_data *d)
232 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
233 return d->hwirq - its_dev->event_map.lpi_base;
236 static struct its_collection *dev_event_to_col(struct its_device *its_dev,
239 struct its_node *its = its_dev->its;
241 return its->collections + its_dev->event_map.col_map[event];
244 static struct its_vlpi_map *dev_event_to_vlpi_map(struct its_device *its_dev,
247 if (WARN_ON_ONCE(event >= its_dev->event_map.nr_lpis))
250 return &its_dev->event_map.vlpi_maps[event];
253 static struct its_vlpi_map *get_vlpi_map(struct irq_data *d)
255 if (irqd_is_forwarded_to_vcpu(d)) {
256 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
257 u32 event = its_get_event_id(d);
259 return dev_event_to_vlpi_map(its_dev, event);
265 static int vpe_to_cpuid_lock(struct its_vpe *vpe, unsigned long *flags)
267 raw_spin_lock_irqsave(&vpe->vpe_lock, *flags);
271 static void vpe_to_cpuid_unlock(struct its_vpe *vpe, unsigned long flags)
273 raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
276 static int irq_to_cpuid_lock(struct irq_data *d, unsigned long *flags)
278 struct its_vlpi_map *map = get_vlpi_map(d);
282 cpu = vpe_to_cpuid_lock(map->vpe, flags);
284 /* Physical LPIs are already locked via the irq_desc lock */
285 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
286 cpu = its_dev->event_map.col_map[its_get_event_id(d)];
287 /* Keep GCC quiet... */
294 static void irq_to_cpuid_unlock(struct irq_data *d, unsigned long flags)
296 struct its_vlpi_map *map = get_vlpi_map(d);
299 vpe_to_cpuid_unlock(map->vpe, flags);
302 static struct its_collection *valid_col(struct its_collection *col)
304 if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(15, 0)))
310 static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe)
312 if (valid_col(its->collections + vpe->col_idx))
319 * ITS command descriptors - parameters to be encoded in a command
322 struct its_cmd_desc {
325 struct its_device *dev;
330 struct its_device *dev;
335 struct its_device *dev;
340 struct its_device *dev;
345 struct its_collection *col;
350 struct its_device *dev;
356 struct its_device *dev;
357 struct its_collection *col;
362 struct its_device *dev;
367 struct its_collection *col;
376 struct its_collection *col;
382 struct its_device *dev;
390 struct its_device *dev;
397 struct its_collection *col;
418 * The ITS command block, which is what the ITS actually parses.
420 struct its_cmd_block {
423 __le64 raw_cmd_le[4];
427 #define ITS_CMD_QUEUE_SZ SZ_64K
428 #define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))
430 typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *,
431 struct its_cmd_block *,
432 struct its_cmd_desc *);
434 typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *,
435 struct its_cmd_block *,
436 struct its_cmd_desc *);
438 static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l)
440 u64 mask = GENMASK_ULL(h, l);
442 *raw_cmd |= (val << l) & mask;
445 static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
447 its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0);
450 static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
452 its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32);
455 static void its_encode_event_id(struct its_cmd_block *cmd, u32 id)
457 its_mask_encode(&cmd->raw_cmd[1], id, 31, 0);
460 static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id)
462 its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32);
465 static void its_encode_size(struct its_cmd_block *cmd, u8 size)
467 its_mask_encode(&cmd->raw_cmd[1], size, 4, 0);
470 static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
472 its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8);
475 static void its_encode_valid(struct its_cmd_block *cmd, int valid)
477 its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63);
480 static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
482 its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16);
485 static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
487 its_mask_encode(&cmd->raw_cmd[2], col, 15, 0);
490 static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid)
492 its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32);
495 static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id)
497 its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0);
500 static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id)
502 its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32);
505 static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid)
507 its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0);
510 static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num)
512 its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32);
515 static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list)
517 its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0);
520 static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa)
522 its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16);
525 static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
527 its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0);
530 static void its_encode_vconf_addr(struct its_cmd_block *cmd, u64 vconf_pa)
532 its_mask_encode(&cmd->raw_cmd[0], vconf_pa >> 16, 51, 16);
535 static void its_encode_alloc(struct its_cmd_block *cmd, bool alloc)
537 its_mask_encode(&cmd->raw_cmd[0], alloc, 8, 8);
540 static void its_encode_ptz(struct its_cmd_block *cmd, bool ptz)
542 its_mask_encode(&cmd->raw_cmd[0], ptz, 9, 9);
545 static void its_encode_vmapp_default_db(struct its_cmd_block *cmd,
548 its_mask_encode(&cmd->raw_cmd[1], vpe_db_lpi, 31, 0);
551 static void its_encode_vmovp_default_db(struct its_cmd_block *cmd,
554 its_mask_encode(&cmd->raw_cmd[3], vpe_db_lpi, 31, 0);
557 static void its_encode_db(struct its_cmd_block *cmd, bool db)
559 its_mask_encode(&cmd->raw_cmd[2], db, 63, 63);
562 static void its_encode_sgi_intid(struct its_cmd_block *cmd, u8 sgi)
564 its_mask_encode(&cmd->raw_cmd[0], sgi, 35, 32);
567 static void its_encode_sgi_priority(struct its_cmd_block *cmd, u8 prio)
569 its_mask_encode(&cmd->raw_cmd[0], prio >> 4, 23, 20);
572 static void its_encode_sgi_group(struct its_cmd_block *cmd, bool grp)
574 its_mask_encode(&cmd->raw_cmd[0], grp, 10, 10);
577 static void its_encode_sgi_clear(struct its_cmd_block *cmd, bool clr)
579 its_mask_encode(&cmd->raw_cmd[0], clr, 9, 9);
582 static void its_encode_sgi_enable(struct its_cmd_block *cmd, bool en)
584 its_mask_encode(&cmd->raw_cmd[0], en, 8, 8);
587 static inline void its_fixup_cmd(struct its_cmd_block *cmd)
589 /* Let's fixup BE commands */
590 cmd->raw_cmd_le[0] = cpu_to_le64(cmd->raw_cmd[0]);
591 cmd->raw_cmd_le[1] = cpu_to_le64(cmd->raw_cmd[1]);
592 cmd->raw_cmd_le[2] = cpu_to_le64(cmd->raw_cmd[2]);
593 cmd->raw_cmd_le[3] = cpu_to_le64(cmd->raw_cmd[3]);
596 static struct its_collection *its_build_mapd_cmd(struct its_node *its,
597 struct its_cmd_block *cmd,
598 struct its_cmd_desc *desc)
600 unsigned long itt_addr;
601 u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites);
603 itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt);
604 itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN);
606 its_encode_cmd(cmd, GITS_CMD_MAPD);
607 its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id);
608 its_encode_size(cmd, size - 1);
609 its_encode_itt(cmd, itt_addr);
610 its_encode_valid(cmd, desc->its_mapd_cmd.valid);
617 static struct its_collection *its_build_mapc_cmd(struct its_node *its,
618 struct its_cmd_block *cmd,
619 struct its_cmd_desc *desc)
621 its_encode_cmd(cmd, GITS_CMD_MAPC);
622 its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
623 its_encode_target(cmd, desc->its_mapc_cmd.col->target_address);
624 its_encode_valid(cmd, desc->its_mapc_cmd.valid);
628 return desc->its_mapc_cmd.col;
631 static struct its_collection *its_build_mapti_cmd(struct its_node *its,
632 struct its_cmd_block *cmd,
633 struct its_cmd_desc *desc)
635 struct its_collection *col;
637 col = dev_event_to_col(desc->its_mapti_cmd.dev,
638 desc->its_mapti_cmd.event_id);
640 its_encode_cmd(cmd, GITS_CMD_MAPTI);
641 its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id);
642 its_encode_event_id(cmd, desc->its_mapti_cmd.event_id);
643 its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id);
644 its_encode_collection(cmd, col->col_id);
648 return valid_col(col);
651 static struct its_collection *its_build_movi_cmd(struct its_node *its,
652 struct its_cmd_block *cmd,
653 struct its_cmd_desc *desc)
655 struct its_collection *col;
657 col = dev_event_to_col(desc->its_movi_cmd.dev,
658 desc->its_movi_cmd.event_id);
660 its_encode_cmd(cmd, GITS_CMD_MOVI);
661 its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
662 its_encode_event_id(cmd, desc->its_movi_cmd.event_id);
663 its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);
667 return valid_col(col);
670 static struct its_collection *its_build_discard_cmd(struct its_node *its,
671 struct its_cmd_block *cmd,
672 struct its_cmd_desc *desc)
674 struct its_collection *col;
676 col = dev_event_to_col(desc->its_discard_cmd.dev,
677 desc->its_discard_cmd.event_id);
679 its_encode_cmd(cmd, GITS_CMD_DISCARD);
680 its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
681 its_encode_event_id(cmd, desc->its_discard_cmd.event_id);
685 return valid_col(col);
688 static struct its_collection *its_build_inv_cmd(struct its_node *its,
689 struct its_cmd_block *cmd,
690 struct its_cmd_desc *desc)
692 struct its_collection *col;
694 col = dev_event_to_col(desc->its_inv_cmd.dev,
695 desc->its_inv_cmd.event_id);
697 its_encode_cmd(cmd, GITS_CMD_INV);
698 its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
699 its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
703 return valid_col(col);
706 static struct its_collection *its_build_int_cmd(struct its_node *its,
707 struct its_cmd_block *cmd,
708 struct its_cmd_desc *desc)
710 struct its_collection *col;
712 col = dev_event_to_col(desc->its_int_cmd.dev,
713 desc->its_int_cmd.event_id);
715 its_encode_cmd(cmd, GITS_CMD_INT);
716 its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
717 its_encode_event_id(cmd, desc->its_int_cmd.event_id);
721 return valid_col(col);
724 static struct its_collection *its_build_clear_cmd(struct its_node *its,
725 struct its_cmd_block *cmd,
726 struct its_cmd_desc *desc)
728 struct its_collection *col;
730 col = dev_event_to_col(desc->its_clear_cmd.dev,
731 desc->its_clear_cmd.event_id);
733 its_encode_cmd(cmd, GITS_CMD_CLEAR);
734 its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
735 its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
739 return valid_col(col);
742 static struct its_collection *its_build_invall_cmd(struct its_node *its,
743 struct its_cmd_block *cmd,
744 struct its_cmd_desc *desc)
746 its_encode_cmd(cmd, GITS_CMD_INVALL);
747 its_encode_collection(cmd, desc->its_invall_cmd.col->col_id);
751 return desc->its_invall_cmd.col;
754 static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
755 struct its_cmd_block *cmd,
756 struct its_cmd_desc *desc)
758 its_encode_cmd(cmd, GITS_CMD_VINVALL);
759 its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id);
763 return valid_vpe(its, desc->its_vinvall_cmd.vpe);
766 static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
767 struct its_cmd_block *cmd,
768 struct its_cmd_desc *desc)
770 unsigned long vpt_addr, vconf_addr;
774 its_encode_cmd(cmd, GITS_CMD_VMAPP);
775 its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
776 its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
778 if (!desc->its_vmapp_cmd.valid) {
780 alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count);
781 its_encode_alloc(cmd, alloc);
787 vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
788 target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset;
790 its_encode_target(cmd, target);
791 its_encode_vpt_addr(cmd, vpt_addr);
792 its_encode_vpt_size(cmd, LPI_NRBITS - 1);
797 vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page));
799 alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count);
801 its_encode_alloc(cmd, alloc);
804 * GICv4.1 provides a way to get the VLPI state, which needs the vPE
805 * to be unmapped first, and in this case, we may remap the vPE
806 * back while the VPT is not empty. So we can't assume that the
807 * VPT is empty on map. This is why we never advertise PTZ.
809 its_encode_ptz(cmd, false);
810 its_encode_vconf_addr(cmd, vconf_addr);
811 its_encode_vmapp_default_db(cmd, desc->its_vmapp_cmd.vpe->vpe_db_lpi);
816 return valid_vpe(its, desc->its_vmapp_cmd.vpe);
819 static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
820 struct its_cmd_block *cmd,
821 struct its_cmd_desc *desc)
825 if (!is_v4_1(its) && desc->its_vmapti_cmd.db_enabled)
826 db = desc->its_vmapti_cmd.vpe->vpe_db_lpi;
830 its_encode_cmd(cmd, GITS_CMD_VMAPTI);
831 its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id);
832 its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id);
833 its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id);
834 its_encode_db_phys_id(cmd, db);
835 its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id);
839 return valid_vpe(its, desc->its_vmapti_cmd.vpe);
842 static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
843 struct its_cmd_block *cmd,
844 struct its_cmd_desc *desc)
848 if (!is_v4_1(its) && desc->its_vmovi_cmd.db_enabled)
849 db = desc->its_vmovi_cmd.vpe->vpe_db_lpi;
853 its_encode_cmd(cmd, GITS_CMD_VMOVI);
854 its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id);
855 its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id);
856 its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id);
857 its_encode_db_phys_id(cmd, db);
858 its_encode_db_valid(cmd, true);
862 return valid_vpe(its, desc->its_vmovi_cmd.vpe);
865 static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
866 struct its_cmd_block *cmd,
867 struct its_cmd_desc *desc)
871 target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset;
872 its_encode_cmd(cmd, GITS_CMD_VMOVP);
873 its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num);
874 its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list);
875 its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
876 its_encode_target(cmd, target);
879 its_encode_db(cmd, true);
880 its_encode_vmovp_default_db(cmd, desc->its_vmovp_cmd.vpe->vpe_db_lpi);
885 return valid_vpe(its, desc->its_vmovp_cmd.vpe);
888 static struct its_vpe *its_build_vinv_cmd(struct its_node *its,
889 struct its_cmd_block *cmd,
890 struct its_cmd_desc *desc)
892 struct its_vlpi_map *map;
894 map = dev_event_to_vlpi_map(desc->its_inv_cmd.dev,
895 desc->its_inv_cmd.event_id);
897 its_encode_cmd(cmd, GITS_CMD_INV);
898 its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
899 its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
903 return valid_vpe(its, map->vpe);
906 static struct its_vpe *its_build_vint_cmd(struct its_node *its,
907 struct its_cmd_block *cmd,
908 struct its_cmd_desc *desc)
910 struct its_vlpi_map *map;
912 map = dev_event_to_vlpi_map(desc->its_int_cmd.dev,
913 desc->its_int_cmd.event_id);
915 its_encode_cmd(cmd, GITS_CMD_INT);
916 its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
917 its_encode_event_id(cmd, desc->its_int_cmd.event_id);
921 return valid_vpe(its, map->vpe);
924 static struct its_vpe *its_build_vclear_cmd(struct its_node *its,
925 struct its_cmd_block *cmd,
926 struct its_cmd_desc *desc)
928 struct its_vlpi_map *map;
930 map = dev_event_to_vlpi_map(desc->its_clear_cmd.dev,
931 desc->its_clear_cmd.event_id);
933 its_encode_cmd(cmd, GITS_CMD_CLEAR);
934 its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
935 its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
939 return valid_vpe(its, map->vpe);
942 static struct its_vpe *its_build_invdb_cmd(struct its_node *its,
943 struct its_cmd_block *cmd,
944 struct its_cmd_desc *desc)
946 if (WARN_ON(!is_v4_1(its)))
949 its_encode_cmd(cmd, GITS_CMD_INVDB);
950 its_encode_vpeid(cmd, desc->its_invdb_cmd.vpe->vpe_id);
954 return valid_vpe(its, desc->its_invdb_cmd.vpe);
957 static struct its_vpe *its_build_vsgi_cmd(struct its_node *its,
958 struct its_cmd_block *cmd,
959 struct its_cmd_desc *desc)
961 if (WARN_ON(!is_v4_1(its)))
964 its_encode_cmd(cmd, GITS_CMD_VSGI);
965 its_encode_vpeid(cmd, desc->its_vsgi_cmd.vpe->vpe_id);
966 its_encode_sgi_intid(cmd, desc->its_vsgi_cmd.sgi);
967 its_encode_sgi_priority(cmd, desc->its_vsgi_cmd.priority);
968 its_encode_sgi_group(cmd, desc->its_vsgi_cmd.group);
969 its_encode_sgi_clear(cmd, desc->its_vsgi_cmd.clear);
970 its_encode_sgi_enable(cmd, desc->its_vsgi_cmd.enable);
974 return valid_vpe(its, desc->its_vsgi_cmd.vpe);
977 static u64 its_cmd_ptr_to_offset(struct its_node *its,
978 struct its_cmd_block *ptr)
980 return (ptr - its->cmd_base) * sizeof(*ptr);
983 static int its_queue_full(struct its_node *its)
988 widx = its->cmd_write - its->cmd_base;
989 ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block);
991 /* This is incredibly unlikely to happen, unless the ITS locks up. */
992 if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx)
998 static struct its_cmd_block *its_allocate_entry(struct its_node *its)
1000 struct its_cmd_block *cmd;
1001 u32 count = 1000000; /* 1s! */
1003 while (its_queue_full(its)) {
1006 pr_err_ratelimited("ITS queue not draining\n");
1013 cmd = its->cmd_write++;
1015 /* Handle queue wrapping */
1016 if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES))
1017 its->cmd_write = its->cmd_base;
1020 cmd->raw_cmd[0] = 0;
1021 cmd->raw_cmd[1] = 0;
1022 cmd->raw_cmd[2] = 0;
1023 cmd->raw_cmd[3] = 0;
1028 static struct its_cmd_block *its_post_commands(struct its_node *its)
1030 u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write);
1032 writel_relaxed(wr, its->base + GITS_CWRITER);
1034 return its->cmd_write;
1037 static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
1040 * Make sure the commands written to memory are observable by
1043 if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
1044 gic_flush_dcache_to_poc(cmd, sizeof(*cmd));
1049 static int its_wait_for_range_completion(struct its_node *its,
1051 struct its_cmd_block *to)
1053 u64 rd_idx, to_idx, linear_idx;
1054 u32 count = 1000000; /* 1s! */
1056 /* Linearize to_idx if the command set has wrapped around */
1057 to_idx = its_cmd_ptr_to_offset(its, to);
1058 if (to_idx < prev_idx)
1059 to_idx += ITS_CMD_QUEUE_SZ;
1061 linear_idx = prev_idx;
1066 rd_idx = readl_relaxed(its->base + GITS_CREADR);
1069 * Compute the read pointer progress, taking the
1070 * potential wrap-around into account.
1072 delta = rd_idx - prev_idx;
1073 if (rd_idx < prev_idx)
1074 delta += ITS_CMD_QUEUE_SZ;
1076 linear_idx += delta;
1077 if (linear_idx >= to_idx)
1082 pr_err_ratelimited("ITS queue timeout (%llu %llu)\n",
1083 to_idx, linear_idx);
1094 /* Warning, macro hell follows */
1095 #define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn) \
1096 void name(struct its_node *its, \
1097 buildtype builder, \
1098 struct its_cmd_desc *desc) \
1100 struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \
1101 synctype *sync_obj; \
1102 unsigned long flags; \
1105 raw_spin_lock_irqsave(&its->lock, flags); \
1107 cmd = its_allocate_entry(its); \
1108 if (!cmd) { /* We're soooooo screewed... */ \
1109 raw_spin_unlock_irqrestore(&its->lock, flags); \
1112 sync_obj = builder(its, cmd, desc); \
1113 its_flush_cmd(its, cmd); \
1116 sync_cmd = its_allocate_entry(its); \
1120 buildfn(its, sync_cmd, sync_obj); \
1121 its_flush_cmd(its, sync_cmd); \
1125 rd_idx = readl_relaxed(its->base + GITS_CREADR); \
1126 next_cmd = its_post_commands(its); \
1127 raw_spin_unlock_irqrestore(&its->lock, flags); \
1129 if (its_wait_for_range_completion(its, rd_idx, next_cmd)) \
1130 pr_err_ratelimited("ITS cmd %ps failed\n", builder); \
1133 static void its_build_sync_cmd(struct its_node *its,
1134 struct its_cmd_block *sync_cmd,
1135 struct its_collection *sync_col)
1137 its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
1138 its_encode_target(sync_cmd, sync_col->target_address);
1140 its_fixup_cmd(sync_cmd);
1143 static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t,
1144 struct its_collection, its_build_sync_cmd)
1146 static void its_build_vsync_cmd(struct its_node *its,
1147 struct its_cmd_block *sync_cmd,
1148 struct its_vpe *sync_vpe)
1150 its_encode_cmd(sync_cmd, GITS_CMD_VSYNC);
1151 its_encode_vpeid(sync_cmd, sync_vpe->vpe_id);
1153 its_fixup_cmd(sync_cmd);
1156 static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t,
1157 struct its_vpe, its_build_vsync_cmd)
1159 static void its_send_int(struct its_device *dev, u32 event_id)
1161 struct its_cmd_desc desc;
1163 desc.its_int_cmd.dev = dev;
1164 desc.its_int_cmd.event_id = event_id;
1166 its_send_single_command(dev->its, its_build_int_cmd, &desc);
1169 static void its_send_clear(struct its_device *dev, u32 event_id)
1171 struct its_cmd_desc desc;
1173 desc.its_clear_cmd.dev = dev;
1174 desc.its_clear_cmd.event_id = event_id;
1176 its_send_single_command(dev->its, its_build_clear_cmd, &desc);
1179 static void its_send_inv(struct its_device *dev, u32 event_id)
1181 struct its_cmd_desc desc;
1183 desc.its_inv_cmd.dev = dev;
1184 desc.its_inv_cmd.event_id = event_id;
1186 its_send_single_command(dev->its, its_build_inv_cmd, &desc);
1189 static void its_send_mapd(struct its_device *dev, int valid)
1191 struct its_cmd_desc desc;
1193 desc.its_mapd_cmd.dev = dev;
1194 desc.its_mapd_cmd.valid = !!valid;
1196 its_send_single_command(dev->its, its_build_mapd_cmd, &desc);
1199 static void its_send_mapc(struct its_node *its, struct its_collection *col,
1202 struct its_cmd_desc desc;
1204 desc.its_mapc_cmd.col = col;
1205 desc.its_mapc_cmd.valid = !!valid;
1207 its_send_single_command(its, its_build_mapc_cmd, &desc);
1210 static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id)
1212 struct its_cmd_desc desc;
1214 desc.its_mapti_cmd.dev = dev;
1215 desc.its_mapti_cmd.phys_id = irq_id;
1216 desc.its_mapti_cmd.event_id = id;
1218 its_send_single_command(dev->its, its_build_mapti_cmd, &desc);
1221 static void its_send_movi(struct its_device *dev,
1222 struct its_collection *col, u32 id)
1224 struct its_cmd_desc desc;
1226 desc.its_movi_cmd.dev = dev;
1227 desc.its_movi_cmd.col = col;
1228 desc.its_movi_cmd.event_id = id;
1230 its_send_single_command(dev->its, its_build_movi_cmd, &desc);
1233 static void its_send_discard(struct its_device *dev, u32 id)
1235 struct its_cmd_desc desc;
1237 desc.its_discard_cmd.dev = dev;
1238 desc.its_discard_cmd.event_id = id;
1240 its_send_single_command(dev->its, its_build_discard_cmd, &desc);
1243 static void its_send_invall(struct its_node *its, struct its_collection *col)
1245 struct its_cmd_desc desc;
1247 desc.its_invall_cmd.col = col;
1249 its_send_single_command(its, its_build_invall_cmd, &desc);
1252 static void its_send_vmapti(struct its_device *dev, u32 id)
1254 struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id);
1255 struct its_cmd_desc desc;
1257 desc.its_vmapti_cmd.vpe = map->vpe;
1258 desc.its_vmapti_cmd.dev = dev;
1259 desc.its_vmapti_cmd.virt_id = map->vintid;
1260 desc.its_vmapti_cmd.event_id = id;
1261 desc.its_vmapti_cmd.db_enabled = map->db_enabled;
1263 its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc);
1266 static void its_send_vmovi(struct its_device *dev, u32 id)
1268 struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id);
1269 struct its_cmd_desc desc;
1271 desc.its_vmovi_cmd.vpe = map->vpe;
1272 desc.its_vmovi_cmd.dev = dev;
1273 desc.its_vmovi_cmd.event_id = id;
1274 desc.its_vmovi_cmd.db_enabled = map->db_enabled;
1276 its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc);
1279 static void its_send_vmapp(struct its_node *its,
1280 struct its_vpe *vpe, bool valid)
1282 struct its_cmd_desc desc;
1284 desc.its_vmapp_cmd.vpe = vpe;
1285 desc.its_vmapp_cmd.valid = valid;
1286 desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
1288 its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
1291 static void its_send_vmovp(struct its_vpe *vpe)
1293 struct its_cmd_desc desc = {};
1294 struct its_node *its;
1295 unsigned long flags;
1296 int col_id = vpe->col_idx;
1298 desc.its_vmovp_cmd.vpe = vpe;
1300 if (!its_list_map) {
1301 its = list_first_entry(&its_nodes, struct its_node, entry);
1302 desc.its_vmovp_cmd.col = &its->collections[col_id];
1303 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
1308 * Yet another marvel of the architecture. If using the
1309 * its_list "feature", we need to make sure that all ITSs
1310 * receive all VMOVP commands in the same order. The only way
1311 * to guarantee this is to make vmovp a serialization point.
1315 raw_spin_lock_irqsave(&vmovp_lock, flags);
1317 desc.its_vmovp_cmd.seq_num = vmovp_seq_num++;
1318 desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm);
1321 list_for_each_entry(its, &its_nodes, entry) {
1325 if (!require_its_list_vmovp(vpe->its_vm, its))
1328 desc.its_vmovp_cmd.col = &its->collections[col_id];
1329 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
1332 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1335 static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
1337 struct its_cmd_desc desc;
1339 desc.its_vinvall_cmd.vpe = vpe;
1340 its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
1343 static void its_send_vinv(struct its_device *dev, u32 event_id)
1345 struct its_cmd_desc desc;
1348 * There is no real VINV command. This is just a normal INV,
1349 * with a VSYNC instead of a SYNC.
1351 desc.its_inv_cmd.dev = dev;
1352 desc.its_inv_cmd.event_id = event_id;
1354 its_send_single_vcommand(dev->its, its_build_vinv_cmd, &desc);
1357 static void its_send_vint(struct its_device *dev, u32 event_id)
1359 struct its_cmd_desc desc;
1362 * There is no real VINT command. This is just a normal INT,
1363 * with a VSYNC instead of a SYNC.
1365 desc.its_int_cmd.dev = dev;
1366 desc.its_int_cmd.event_id = event_id;
1368 its_send_single_vcommand(dev->its, its_build_vint_cmd, &desc);
1371 static void its_send_vclear(struct its_device *dev, u32 event_id)
1373 struct its_cmd_desc desc;
1376 * There is no real VCLEAR command. This is just a normal CLEAR,
1377 * with a VSYNC instead of a SYNC.
1379 desc.its_clear_cmd.dev = dev;
1380 desc.its_clear_cmd.event_id = event_id;
1382 its_send_single_vcommand(dev->its, its_build_vclear_cmd, &desc);
1385 static void its_send_invdb(struct its_node *its, struct its_vpe *vpe)
1387 struct its_cmd_desc desc;
1389 desc.its_invdb_cmd.vpe = vpe;
1390 its_send_single_vcommand(its, its_build_invdb_cmd, &desc);
1394 * irqchip functions - assumes MSI, mostly.
1396 static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
1398 struct its_vlpi_map *map = get_vlpi_map(d);
1399 irq_hw_number_t hwirq;
1404 va = page_address(map->vm->vprop_page);
1405 hwirq = map->vintid;
1407 /* Remember the updated property */
1408 map->properties &= ~clr;
1409 map->properties |= set | LPI_PROP_GROUP1;
1411 va = gic_rdists->prop_table_va;
1415 cfg = va + hwirq - 8192;
1417 *cfg |= set | LPI_PROP_GROUP1;
1420 * Make the above write visible to the redistributors.
1421 * And yes, we're flushing exactly: One. Single. Byte.
1424 if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
1425 gic_flush_dcache_to_poc(cfg, sizeof(*cfg));
1430 static void wait_for_syncr(void __iomem *rdbase)
1432 while (readl_relaxed(rdbase + GICR_SYNCR) & 1)
1436 static void direct_lpi_inv(struct irq_data *d)
1438 struct its_vlpi_map *map = get_vlpi_map(d);
1439 void __iomem *rdbase;
1440 unsigned long flags;
1445 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1447 WARN_ON(!is_v4_1(its_dev->its));
1449 val = GICR_INVLPIR_V;
1450 val |= FIELD_PREP(GICR_INVLPIR_VPEID, map->vpe->vpe_id);
1451 val |= FIELD_PREP(GICR_INVLPIR_INTID, map->vintid);
1456 /* Target the redistributor this LPI is currently routed to */
1457 cpu = irq_to_cpuid_lock(d, &flags);
1458 raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
1459 rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
1460 gic_write_lpir(val, rdbase + GICR_INVLPIR);
1462 wait_for_syncr(rdbase);
1463 raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
1464 irq_to_cpuid_unlock(d, flags);
1467 static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
1469 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1471 lpi_write_config(d, clr, set);
1472 if (gic_rdists->has_direct_lpi &&
1473 (is_v4_1(its_dev->its) || !irqd_is_forwarded_to_vcpu(d)))
1475 else if (!irqd_is_forwarded_to_vcpu(d))
1476 its_send_inv(its_dev, its_get_event_id(d));
1478 its_send_vinv(its_dev, its_get_event_id(d));
1481 static void its_vlpi_set_doorbell(struct irq_data *d, bool enable)
1483 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1484 u32 event = its_get_event_id(d);
1485 struct its_vlpi_map *map;
1488 * GICv4.1 does away with the per-LPI nonsense, nothing to do
1491 if (is_v4_1(its_dev->its))
1494 map = dev_event_to_vlpi_map(its_dev, event);
1496 if (map->db_enabled == enable)
1499 map->db_enabled = enable;
1502 * More fun with the architecture:
1504 * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI
1505 * value or to 1023, depending on the enable bit. But that
1506 * would be issuing a mapping for an /existing/ DevID+EventID
1507 * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI
1508 * to the /same/ vPE, using this opportunity to adjust the
1509 * doorbell. Mouahahahaha. We loves it, Precious.
1511 its_send_vmovi(its_dev, event);
1514 static void its_mask_irq(struct irq_data *d)
1516 if (irqd_is_forwarded_to_vcpu(d))
1517 its_vlpi_set_doorbell(d, false);
1519 lpi_update_config(d, LPI_PROP_ENABLED, 0);
1522 static void its_unmask_irq(struct irq_data *d)
1524 if (irqd_is_forwarded_to_vcpu(d))
1525 its_vlpi_set_doorbell(d, true);
1527 lpi_update_config(d, 0, LPI_PROP_ENABLED);
1530 static __maybe_unused u32 its_read_lpi_count(struct irq_data *d, int cpu)
1532 if (irqd_affinity_is_managed(d))
1533 return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
1535 return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
1538 static void its_inc_lpi_count(struct irq_data *d, int cpu)
1540 if (irqd_affinity_is_managed(d))
1541 atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
1543 atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
1546 static void its_dec_lpi_count(struct irq_data *d, int cpu)
1548 if (irqd_affinity_is_managed(d))
1549 atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
1551 atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
1554 static unsigned int cpumask_pick_least_loaded(struct irq_data *d,
1555 const struct cpumask *cpu_mask)
1557 unsigned int cpu = nr_cpu_ids, tmp;
1558 int count = S32_MAX;
1560 for_each_cpu(tmp, cpu_mask) {
1561 int this_count = its_read_lpi_count(d, tmp);
1562 if (this_count < count) {
1572 * As suggested by Thomas Gleixner in:
1573 * https://lore.kernel.org/r/87h80q2aoc.fsf@nanos.tec.linutronix.de
1575 static int its_select_cpu(struct irq_data *d,
1576 const struct cpumask *aff_mask)
1578 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1579 static DEFINE_RAW_SPINLOCK(tmpmask_lock);
1580 static struct cpumask __tmpmask;
1581 struct cpumask *tmpmask;
1582 unsigned long flags;
1584 node = its_dev->its->numa_node;
1585 tmpmask = &__tmpmask;
1587 raw_spin_lock_irqsave(&tmpmask_lock, flags);
1589 if (!irqd_affinity_is_managed(d)) {
1590 /* First try the NUMA node */
1591 if (node != NUMA_NO_NODE) {
1593 * Try the intersection of the affinity mask and the
1594 * node mask (and the online mask, just to be safe).
1596 cpumask_and(tmpmask, cpumask_of_node(node), aff_mask);
1597 cpumask_and(tmpmask, tmpmask, cpu_online_mask);
1600 * Ideally, we would check if the mask is empty, and
1601 * try again on the full node here.
1603 * But it turns out that the way ACPI describes the
1604 * affinity for ITSs only deals about memory, and
1605 * not target CPUs, so it cannot describe a single
1606 * ITS placed next to two NUMA nodes.
1608 * Instead, just fallback on the online mask. This
1609 * diverges from Thomas' suggestion above.
1611 cpu = cpumask_pick_least_loaded(d, tmpmask);
1612 if (cpu < nr_cpu_ids)
1615 /* If we can't cross sockets, give up */
1616 if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144))
1619 /* If the above failed, expand the search */
1622 /* Try the intersection of the affinity and online masks */
1623 cpumask_and(tmpmask, aff_mask, cpu_online_mask);
1625 /* If that doesn't fly, the online mask is the last resort */
1626 if (cpumask_empty(tmpmask))
1627 cpumask_copy(tmpmask, cpu_online_mask);
1629 cpu = cpumask_pick_least_loaded(d, tmpmask);
1631 cpumask_copy(tmpmask, aff_mask);
1633 /* If we cannot cross sockets, limit the search to that node */
1634 if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) &&
1635 node != NUMA_NO_NODE)
1636 cpumask_and(tmpmask, tmpmask, cpumask_of_node(node));
1638 cpu = cpumask_pick_least_loaded(d, tmpmask);
1641 raw_spin_unlock_irqrestore(&tmpmask_lock, flags);
1643 pr_debug("IRQ%d -> %*pbl CPU%d\n", d->irq, cpumask_pr_args(aff_mask), cpu);
1647 static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
1650 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1651 struct its_collection *target_col;
1652 u32 id = its_get_event_id(d);
1655 /* A forwarded interrupt should use irq_set_vcpu_affinity */
1656 if (irqd_is_forwarded_to_vcpu(d))
1659 prev_cpu = its_dev->event_map.col_map[id];
1660 its_dec_lpi_count(d, prev_cpu);
1663 cpu = its_select_cpu(d, mask_val);
1665 cpu = cpumask_pick_least_loaded(d, mask_val);
1667 if (cpu < 0 || cpu >= nr_cpu_ids)
1670 /* don't set the affinity when the target cpu is same as current one */
1671 if (cpu != prev_cpu) {
1672 target_col = &its_dev->its->collections[cpu];
1673 its_send_movi(its_dev, target_col, id);
1674 its_dev->event_map.col_map[id] = cpu;
1675 irq_data_update_effective_affinity(d, cpumask_of(cpu));
1678 its_inc_lpi_count(d, cpu);
1680 return IRQ_SET_MASK_OK_DONE;
1683 its_inc_lpi_count(d, prev_cpu);
1687 static u64 its_irq_get_msi_base(struct its_device *its_dev)
1689 struct its_node *its = its_dev->its;
1691 return its->phys_base + GITS_TRANSLATER;
1694 static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
1696 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1697 struct its_node *its;
1701 addr = its->get_msi_base(its_dev);
1703 msg->address_lo = lower_32_bits(addr);
1704 msg->address_hi = upper_32_bits(addr);
1705 msg->data = its_get_event_id(d);
1707 iommu_dma_compose_msi_msg(irq_data_get_msi_desc(d), msg);
1710 static int its_irq_set_irqchip_state(struct irq_data *d,
1711 enum irqchip_irq_state which,
1714 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1715 u32 event = its_get_event_id(d);
1717 if (which != IRQCHIP_STATE_PENDING)
1720 if (irqd_is_forwarded_to_vcpu(d)) {
1722 its_send_vint(its_dev, event);
1724 its_send_vclear(its_dev, event);
1727 its_send_int(its_dev, event);
1729 its_send_clear(its_dev, event);
1735 static int its_irq_retrigger(struct irq_data *d)
1737 return !its_irq_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true);
1741 * Two favourable cases:
1743 * (a) Either we have a GICv4.1, and all vPEs have to be mapped at all times
1746 * (b) Or the ITSs do not use a list map, meaning that VMOVP is cheap enough
1747 * and we're better off mapping all VPEs always
1749 * If neither (a) nor (b) is true, then we map vPEs on demand.
1752 static bool gic_requires_eager_mapping(void)
1754 if (!its_list_map || gic_rdists->has_rvpeid)
1760 static void its_map_vm(struct its_node *its, struct its_vm *vm)
1762 unsigned long flags;
1764 if (gic_requires_eager_mapping())
1767 raw_spin_lock_irqsave(&vmovp_lock, flags);
1770 * If the VM wasn't mapped yet, iterate over the vpes and get
1773 vm->vlpi_count[its->list_nr]++;
1775 if (vm->vlpi_count[its->list_nr] == 1) {
1778 for (i = 0; i < vm->nr_vpes; i++) {
1779 struct its_vpe *vpe = vm->vpes[i];
1780 struct irq_data *d = irq_get_irq_data(vpe->irq);
1782 /* Map the VPE to the first possible CPU */
1783 vpe->col_idx = cpumask_first(cpu_online_mask);
1784 its_send_vmapp(its, vpe, true);
1785 its_send_vinvall(its, vpe);
1786 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
1790 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1793 static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
1795 unsigned long flags;
1797 /* Not using the ITS list? Everything is always mapped. */
1798 if (gic_requires_eager_mapping())
1801 raw_spin_lock_irqsave(&vmovp_lock, flags);
1803 if (!--vm->vlpi_count[its->list_nr]) {
1806 for (i = 0; i < vm->nr_vpes; i++)
1807 its_send_vmapp(its, vm->vpes[i], false);
1810 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1813 static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
1815 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1816 u32 event = its_get_event_id(d);
1822 raw_spin_lock(&its_dev->event_map.vlpi_lock);
1824 if (!its_dev->event_map.vm) {
1825 struct its_vlpi_map *maps;
1827 maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps),
1834 its_dev->event_map.vm = info->map->vm;
1835 its_dev->event_map.vlpi_maps = maps;
1836 } else if (its_dev->event_map.vm != info->map->vm) {
1841 /* Get our private copy of the mapping information */
1842 its_dev->event_map.vlpi_maps[event] = *info->map;
1844 if (irqd_is_forwarded_to_vcpu(d)) {
1845 /* Already mapped, move it around */
1846 its_send_vmovi(its_dev, event);
1848 /* Ensure all the VPEs are mapped on this ITS */
1849 its_map_vm(its_dev->its, info->map->vm);
1852 * Flag the interrupt as forwarded so that we can
1853 * start poking the virtual property table.
1855 irqd_set_forwarded_to_vcpu(d);
1857 /* Write out the property to the prop table */
1858 lpi_write_config(d, 0xff, info->map->properties);
1860 /* Drop the physical mapping */
1861 its_send_discard(its_dev, event);
1863 /* and install the virtual one */
1864 its_send_vmapti(its_dev, event);
1866 /* Increment the number of VLPIs */
1867 its_dev->event_map.nr_vlpis++;
1871 raw_spin_unlock(&its_dev->event_map.vlpi_lock);
1875 static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
1877 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1878 struct its_vlpi_map *map;
1881 raw_spin_lock(&its_dev->event_map.vlpi_lock);
1883 map = get_vlpi_map(d);
1885 if (!its_dev->event_map.vm || !map) {
1890 /* Copy our mapping information to the incoming request */
1894 raw_spin_unlock(&its_dev->event_map.vlpi_lock);
1898 static int its_vlpi_unmap(struct irq_data *d)
1900 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1901 u32 event = its_get_event_id(d);
1904 raw_spin_lock(&its_dev->event_map.vlpi_lock);
1906 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) {
1911 /* Drop the virtual mapping */
1912 its_send_discard(its_dev, event);
1914 /* and restore the physical one */
1915 irqd_clr_forwarded_to_vcpu(d);
1916 its_send_mapti(its_dev, d->hwirq, event);
1917 lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO |
1921 /* Potentially unmap the VM from this ITS */
1922 its_unmap_vm(its_dev->its, its_dev->event_map.vm);
1925 * Drop the refcount and make the device available again if
1926 * this was the last VLPI.
1928 if (!--its_dev->event_map.nr_vlpis) {
1929 its_dev->event_map.vm = NULL;
1930 kfree(its_dev->event_map.vlpi_maps);
1934 raw_spin_unlock(&its_dev->event_map.vlpi_lock);
1938 static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info)
1940 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1942 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
1945 if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI)
1946 lpi_update_config(d, 0xff, info->config);
1948 lpi_write_config(d, 0xff, info->config);
1949 its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED));
1954 static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
1956 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1957 struct its_cmd_info *info = vcpu_info;
1960 if (!is_v4(its_dev->its))
1963 /* Unmap request? */
1965 return its_vlpi_unmap(d);
1967 switch (info->cmd_type) {
1969 return its_vlpi_map(d, info);
1972 return its_vlpi_get(d, info);
1974 case PROP_UPDATE_VLPI:
1975 case PROP_UPDATE_AND_INV_VLPI:
1976 return its_vlpi_prop_update(d, info);
1983 static struct irq_chip its_irq_chip = {
1985 .irq_mask = its_mask_irq,
1986 .irq_unmask = its_unmask_irq,
1987 .irq_eoi = irq_chip_eoi_parent,
1988 .irq_set_affinity = its_set_affinity,
1989 .irq_compose_msi_msg = its_irq_compose_msi_msg,
1990 .irq_set_irqchip_state = its_irq_set_irqchip_state,
1991 .irq_retrigger = its_irq_retrigger,
1992 .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity,
1997 * How we allocate LPIs:
1999 * lpi_range_list contains ranges of LPIs that are to available to
2000 * allocate from. To allocate LPIs, just pick the first range that
2001 * fits the required allocation, and reduce it by the required
2002 * amount. Once empty, remove the range from the list.
2004 * To free a range of LPIs, add a free range to the list, sort it and
2005 * merge the result if the new range happens to be adjacent to an
2006 * already free block.
2008 * The consequence of the above is that allocation is cost is low, but
2009 * freeing is expensive. We assumes that freeing rarely occurs.
2011 #define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */
2013 static DEFINE_MUTEX(lpi_range_lock);
2014 static LIST_HEAD(lpi_range_list);
2017 struct list_head entry;
2022 static struct lpi_range *mk_lpi_range(u32 base, u32 span)
2024 struct lpi_range *range;
2026 range = kmalloc(sizeof(*range), GFP_KERNEL);
2028 range->base_id = base;
2035 static int alloc_lpi_range(u32 nr_lpis, u32 *base)
2037 struct lpi_range *range, *tmp;
2040 mutex_lock(&lpi_range_lock);
2042 list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) {
2043 if (range->span >= nr_lpis) {
2044 *base = range->base_id;
2045 range->base_id += nr_lpis;
2046 range->span -= nr_lpis;
2048 if (range->span == 0) {
2049 list_del(&range->entry);
2058 mutex_unlock(&lpi_range_lock);
2060 pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis);
2064 static void merge_lpi_ranges(struct lpi_range *a, struct lpi_range *b)
2066 if (&a->entry == &lpi_range_list || &b->entry == &lpi_range_list)
2068 if (a->base_id + a->span != b->base_id)
2070 b->base_id = a->base_id;
2072 list_del(&a->entry);
2076 static int free_lpi_range(u32 base, u32 nr_lpis)
2078 struct lpi_range *new, *old;
2080 new = mk_lpi_range(base, nr_lpis);
2084 mutex_lock(&lpi_range_lock);
2086 list_for_each_entry_reverse(old, &lpi_range_list, entry) {
2087 if (old->base_id < base)
2091 * old is the last element with ->base_id smaller than base,
2092 * so new goes right after it. If there are no elements with
2093 * ->base_id smaller than base, &old->entry ends up pointing
2094 * at the head of the list, and inserting new it the start of
2095 * the list is the right thing to do in that case as well.
2097 list_add(&new->entry, &old->entry);
2099 * Now check if we can merge with the preceding and/or
2102 merge_lpi_ranges(old, new);
2103 merge_lpi_ranges(new, list_next_entry(new, entry));
2105 mutex_unlock(&lpi_range_lock);
2109 static int __init its_lpi_init(u32 id_bits)
2111 u32 lpis = (1UL << id_bits) - 8192;
2115 numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic_rdists->gicd_typer);
2117 if (numlpis > 2 && !WARN_ON(numlpis > lpis)) {
2119 pr_info("ITS: Using hypervisor restricted LPI range [%u]\n",
2124 * Initializing the allocator is just the same as freeing the
2125 * full range of LPIs.
2127 err = free_lpi_range(8192, lpis);
2128 pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis);
2132 static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids)
2134 unsigned long *bitmap = NULL;
2138 err = alloc_lpi_range(nr_irqs, base);
2143 } while (nr_irqs > 0);
2151 bitmap = bitmap_zalloc(nr_irqs, GFP_ATOMIC);
2159 *base = *nr_ids = 0;
2164 static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids)
2166 WARN_ON(free_lpi_range(base, nr_ids));
2167 bitmap_free(bitmap);
2170 static void gic_reset_prop_table(void *va)
2172 /* Priority 0xa0, Group-1, disabled */
2173 memset(va, LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, LPI_PROPBASE_SZ);
2175 /* Make sure the GIC will observe the written configuration */
2176 gic_flush_dcache_to_poc(va, LPI_PROPBASE_SZ);
2179 static struct page *its_allocate_prop_table(gfp_t gfp_flags)
2181 struct page *prop_page;
2183 prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ));
2187 gic_reset_prop_table(page_address(prop_page));
2192 static void its_free_prop_table(struct page *prop_page)
2194 free_pages((unsigned long)page_address(prop_page),
2195 get_order(LPI_PROPBASE_SZ));
2198 static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size)
2200 phys_addr_t start, end, addr_end;
2204 * We don't bother checking for a kdump kernel as by
2205 * construction, the LPI tables are out of this kernel's
2208 if (is_kdump_kernel())
2211 addr_end = addr + size - 1;
2213 for_each_reserved_mem_range(i, &start, &end) {
2214 if (addr >= start && addr_end <= end)
2218 /* Not found, not a good sign... */
2219 pr_warn("GICv3: Expected reserved range [%pa:%pa], not found\n",
2221 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
2225 static int gic_reserve_range(phys_addr_t addr, unsigned long size)
2227 if (efi_enabled(EFI_CONFIG_TABLES))
2228 return efi_mem_reserve_persistent(addr, size);
2233 static int __init its_setup_lpi_prop_table(void)
2235 if (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) {
2238 val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER);
2239 lpi_id_bits = (val & GICR_PROPBASER_IDBITS_MASK) + 1;
2241 gic_rdists->prop_table_pa = val & GENMASK_ULL(51, 12);
2242 gic_rdists->prop_table_va = memremap(gic_rdists->prop_table_pa,
2245 gic_reset_prop_table(gic_rdists->prop_table_va);
2249 lpi_id_bits = min_t(u32,
2250 GICD_TYPER_ID_BITS(gic_rdists->gicd_typer),
2251 ITS_MAX_LPI_NRBITS);
2252 page = its_allocate_prop_table(GFP_NOWAIT);
2254 pr_err("Failed to allocate PROPBASE\n");
2258 gic_rdists->prop_table_pa = page_to_phys(page);
2259 gic_rdists->prop_table_va = page_address(page);
2260 WARN_ON(gic_reserve_range(gic_rdists->prop_table_pa,
2264 pr_info("GICv3: using LPI property table @%pa\n",
2265 &gic_rdists->prop_table_pa);
2267 return its_lpi_init(lpi_id_bits);
2270 static const char *its_base_type_string[] = {
2271 [GITS_BASER_TYPE_DEVICE] = "Devices",
2272 [GITS_BASER_TYPE_VCPU] = "Virtual CPUs",
2273 [GITS_BASER_TYPE_RESERVED3] = "Reserved (3)",
2274 [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections",
2275 [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)",
2276 [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)",
2277 [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)",
2280 static u64 its_read_baser(struct its_node *its, struct its_baser *baser)
2282 u32 idx = baser - its->tables;
2284 return gits_read_baser(its->base + GITS_BASER + (idx << 3));
2287 static void its_write_baser(struct its_node *its, struct its_baser *baser,
2290 u32 idx = baser - its->tables;
2292 gits_write_baser(val, its->base + GITS_BASER + (idx << 3));
2293 baser->val = its_read_baser(its, baser);
2296 static int its_setup_baser(struct its_node *its, struct its_baser *baser,
2297 u64 cache, u64 shr, u32 order, bool indirect)
2299 u64 val = its_read_baser(its, baser);
2300 u64 esz = GITS_BASER_ENTRY_SIZE(val);
2301 u64 type = GITS_BASER_TYPE(val);
2302 u64 baser_phys, tmp;
2303 u32 alloc_pages, psz;
2308 alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
2309 if (alloc_pages > GITS_BASER_PAGES_MAX) {
2310 pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n",
2311 &its->phys_base, its_base_type_string[type],
2312 alloc_pages, GITS_BASER_PAGES_MAX);
2313 alloc_pages = GITS_BASER_PAGES_MAX;
2314 order = get_order(GITS_BASER_PAGES_MAX * psz);
2317 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order);
2321 base = (void *)page_address(page);
2322 baser_phys = virt_to_phys(base);
2324 /* Check if the physical address of the memory is above 48bits */
2325 if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) {
2327 /* 52bit PA is supported only when PageSize=64K */
2328 if (psz != SZ_64K) {
2329 pr_err("ITS: no 52bit PA support when psz=%d\n", psz);
2330 free_pages((unsigned long)base, order);
2334 /* Convert 52bit PA to 48bit field */
2335 baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys);
2340 (type << GITS_BASER_TYPE_SHIFT) |
2341 ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
2342 ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) |
2347 val |= indirect ? GITS_BASER_INDIRECT : 0x0;
2351 val |= GITS_BASER_PAGE_SIZE_4K;
2354 val |= GITS_BASER_PAGE_SIZE_16K;
2357 val |= GITS_BASER_PAGE_SIZE_64K;
2361 its_write_baser(its, baser, val);
2364 if (its->flags & ITS_FLAGS_FORCE_NON_SHAREABLE)
2365 tmp &= ~GITS_BASER_SHAREABILITY_MASK;
2367 if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
2369 * Shareability didn't stick. Just use
2370 * whatever the read reported, which is likely
2371 * to be the only thing this redistributor
2372 * supports. If that's zero, make it
2373 * non-cacheable as well.
2375 shr = tmp & GITS_BASER_SHAREABILITY_MASK;
2377 cache = GITS_BASER_nC;
2378 gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order));
2384 pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
2385 &its->phys_base, its_base_type_string[type],
2387 free_pages((unsigned long)base, order);
2391 baser->order = order;
2394 tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz;
2396 pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n",
2397 &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp),
2398 its_base_type_string[type],
2399 (unsigned long)virt_to_phys(base),
2400 indirect ? "indirect" : "flat", (int)esz,
2401 psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
2406 static bool its_parse_indirect_baser(struct its_node *its,
2407 struct its_baser *baser,
2408 u32 *order, u32 ids)
2410 u64 tmp = its_read_baser(its, baser);
2411 u64 type = GITS_BASER_TYPE(tmp);
2412 u64 esz = GITS_BASER_ENTRY_SIZE(tmp);
2413 u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb;
2414 u32 new_order = *order;
2415 u32 psz = baser->psz;
2416 bool indirect = false;
2418 /* No need to enable Indirection if memory requirement < (psz*2)bytes */
2419 if ((esz << ids) > (psz * 2)) {
2421 * Find out whether hw supports a single or two-level table by
2422 * table by reading bit at offset '62' after writing '1' to it.
2424 its_write_baser(its, baser, val | GITS_BASER_INDIRECT);
2425 indirect = !!(baser->val & GITS_BASER_INDIRECT);
2429 * The size of the lvl2 table is equal to ITS page size
2430 * which is 'psz'. For computing lvl1 table size,
2431 * subtract ID bits that sparse lvl2 table from 'ids'
2432 * which is reported by ITS hardware times lvl1 table
2435 ids -= ilog2(psz / (int)esz);
2436 esz = GITS_LVL1_ENTRY_SIZE;
2441 * Allocate as many entries as required to fit the
2442 * range of device IDs that the ITS can grok... The ID
2443 * space being incredibly sparse, this results in a
2444 * massive waste of memory if two-level device table
2445 * feature is not supported by hardware.
2447 new_order = max_t(u32, get_order(esz << ids), new_order);
2448 if (new_order > MAX_ORDER) {
2449 new_order = MAX_ORDER;
2450 ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz);
2451 pr_warn("ITS@%pa: %s Table too large, reduce ids %llu->%u\n",
2452 &its->phys_base, its_base_type_string[type],
2453 device_ids(its), ids);
2461 static u32 compute_common_aff(u64 val)
2465 aff = FIELD_GET(GICR_TYPER_AFFINITY, val);
2466 clpiaff = FIELD_GET(GICR_TYPER_COMMON_LPI_AFF, val);
2468 return aff & ~(GENMASK(31, 0) >> (clpiaff * 8));
2471 static u32 compute_its_aff(struct its_node *its)
2477 * Reencode the ITS SVPET and MPIDR as a GICR_TYPER, and compute
2478 * the resulting affinity. We then use that to see if this match
2481 svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer);
2482 val = FIELD_PREP(GICR_TYPER_COMMON_LPI_AFF, svpet);
2483 val |= FIELD_PREP(GICR_TYPER_AFFINITY, its->mpidr);
2484 return compute_common_aff(val);
2487 static struct its_node *find_sibling_its(struct its_node *cur_its)
2489 struct its_node *its;
2492 if (!FIELD_GET(GITS_TYPER_SVPET, cur_its->typer))
2495 aff = compute_its_aff(cur_its);
2497 list_for_each_entry(its, &its_nodes, entry) {
2500 if (!is_v4_1(its) || its == cur_its)
2503 if (!FIELD_GET(GITS_TYPER_SVPET, its->typer))
2506 if (aff != compute_its_aff(its))
2509 /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */
2510 baser = its->tables[2].val;
2511 if (!(baser & GITS_BASER_VALID))
2520 static void its_free_tables(struct its_node *its)
2524 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
2525 if (its->tables[i].base) {
2526 free_pages((unsigned long)its->tables[i].base,
2527 its->tables[i].order);
2528 its->tables[i].base = NULL;
2533 static int its_probe_baser_psz(struct its_node *its, struct its_baser *baser)
2540 val = its_read_baser(its, baser);
2541 val &= ~GITS_BASER_PAGE_SIZE_MASK;
2545 gpsz = GITS_BASER_PAGE_SIZE_64K;
2548 gpsz = GITS_BASER_PAGE_SIZE_16K;
2552 gpsz = GITS_BASER_PAGE_SIZE_4K;
2556 gpsz >>= GITS_BASER_PAGE_SIZE_SHIFT;
2558 val |= FIELD_PREP(GITS_BASER_PAGE_SIZE_MASK, gpsz);
2559 its_write_baser(its, baser, val);
2561 if (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser->val) == gpsz)
2581 static int its_alloc_tables(struct its_node *its)
2583 u64 shr = GITS_BASER_InnerShareable;
2584 u64 cache = GITS_BASER_RaWaWb;
2587 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375)
2588 /* erratum 24313: ignore memory access type */
2589 cache = GITS_BASER_nCnB;
2591 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
2592 struct its_baser *baser = its->tables + i;
2593 u64 val = its_read_baser(its, baser);
2594 u64 type = GITS_BASER_TYPE(val);
2595 bool indirect = false;
2598 if (type == GITS_BASER_TYPE_NONE)
2601 if (its_probe_baser_psz(its, baser)) {
2602 its_free_tables(its);
2606 order = get_order(baser->psz);
2609 case GITS_BASER_TYPE_DEVICE:
2610 indirect = its_parse_indirect_baser(its, baser, &order,
2614 case GITS_BASER_TYPE_VCPU:
2616 struct its_node *sibling;
2619 if ((sibling = find_sibling_its(its))) {
2620 *baser = sibling->tables[2];
2621 its_write_baser(its, baser, baser->val);
2626 indirect = its_parse_indirect_baser(its, baser, &order,
2627 ITS_MAX_VPEID_BITS);
2631 err = its_setup_baser(its, baser, cache, shr, order, indirect);
2633 its_free_tables(its);
2637 /* Update settings which will be used for next BASERn */
2638 cache = baser->val & GITS_BASER_CACHEABILITY_MASK;
2639 shr = baser->val & GITS_BASER_SHAREABILITY_MASK;
2645 static u64 inherit_vpe_l1_table_from_its(void)
2647 struct its_node *its;
2651 val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
2652 aff = compute_common_aff(val);
2654 list_for_each_entry(its, &its_nodes, entry) {
2660 if (!FIELD_GET(GITS_TYPER_SVPET, its->typer))
2663 if (aff != compute_its_aff(its))
2666 /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */
2667 baser = its->tables[2].val;
2668 if (!(baser & GITS_BASER_VALID))
2671 /* We have a winner! */
2672 gic_data_rdist()->vpe_l1_base = its->tables[2].base;
2674 val = GICR_VPROPBASER_4_1_VALID;
2675 if (baser & GITS_BASER_INDIRECT)
2676 val |= GICR_VPROPBASER_4_1_INDIRECT;
2677 val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE,
2678 FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser));
2679 switch (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser)) {
2680 case GIC_PAGE_SIZE_64K:
2681 addr = GITS_BASER_ADDR_48_to_52(baser);
2684 addr = baser & GENMASK_ULL(47, 12);
2687 val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, addr >> 12);
2688 val |= FIELD_PREP(GICR_VPROPBASER_SHAREABILITY_MASK,
2689 FIELD_GET(GITS_BASER_SHAREABILITY_MASK, baser));
2690 val |= FIELD_PREP(GICR_VPROPBASER_INNER_CACHEABILITY_MASK,
2691 FIELD_GET(GITS_BASER_INNER_CACHEABILITY_MASK, baser));
2692 val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, GITS_BASER_NR_PAGES(baser) - 1);
2700 static u64 inherit_vpe_l1_table_from_rd(cpumask_t **mask)
2706 val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
2707 aff = compute_common_aff(val);
2709 for_each_possible_cpu(cpu) {
2710 void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base;
2712 if (!base || cpu == smp_processor_id())
2715 val = gic_read_typer(base + GICR_TYPER);
2716 if (aff != compute_common_aff(val))
2720 * At this point, we have a victim. This particular CPU
2721 * has already booted, and has an affinity that matches
2722 * ours wrt CommonLPIAff. Let's use its own VPROPBASER.
2723 * Make sure we don't write the Z bit in that case.
2725 val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER);
2726 val &= ~GICR_VPROPBASER_4_1_Z;
2728 gic_data_rdist()->vpe_l1_base = gic_data_rdist_cpu(cpu)->vpe_l1_base;
2729 *mask = gic_data_rdist_cpu(cpu)->vpe_table_mask;
2737 static bool allocate_vpe_l2_table(int cpu, u32 id)
2739 void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base;
2740 unsigned int psz, esz, idx, npg, gpsz;
2745 if (!gic_rdists->has_rvpeid)
2748 /* Skip non-present CPUs */
2752 val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER);
2754 esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val) + 1;
2755 gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val);
2756 npg = FIELD_GET(GICR_VPROPBASER_4_1_SIZE, val) + 1;
2762 case GIC_PAGE_SIZE_4K:
2765 case GIC_PAGE_SIZE_16K:
2768 case GIC_PAGE_SIZE_64K:
2773 /* Don't allow vpe_id that exceeds single, flat table limit */
2774 if (!(val & GICR_VPROPBASER_4_1_INDIRECT))
2775 return (id < (npg * psz / (esz * SZ_8)));
2777 /* Compute 1st level table index & check if that exceeds table limit */
2778 idx = id >> ilog2(psz / (esz * SZ_8));
2779 if (idx >= (npg * psz / GITS_LVL1_ENTRY_SIZE))
2782 table = gic_data_rdist_cpu(cpu)->vpe_l1_base;
2784 /* Allocate memory for 2nd level table */
2786 page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(psz));
2790 /* Flush Lvl2 table to PoC if hw doesn't support coherency */
2791 if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK))
2792 gic_flush_dcache_to_poc(page_address(page), psz);
2794 table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
2796 /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
2797 if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK))
2798 gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
2800 /* Ensure updated table contents are visible to RD hardware */
2807 static int allocate_vpe_l1_table(void)
2809 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
2810 u64 val, gpsz, npg, pa;
2811 unsigned int psz = SZ_64K;
2812 unsigned int np, epp, esz;
2815 if (!gic_rdists->has_rvpeid)
2819 * if VPENDBASER.Valid is set, disable any previously programmed
2820 * VPE by setting PendingLast while clearing Valid. This has the
2821 * effect of making sure no doorbell will be generated and we can
2822 * then safely clear VPROPBASER.Valid.
2824 if (gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER) & GICR_VPENDBASER_Valid)
2825 gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast,
2826 vlpi_base + GICR_VPENDBASER);
2829 * If we can inherit the configuration from another RD, let's do
2830 * so. Otherwise, we have to go through the allocation process. We
2831 * assume that all RDs have the exact same requirements, as
2832 * nothing will work otherwise.
2834 val = inherit_vpe_l1_table_from_rd(&gic_data_rdist()->vpe_table_mask);
2835 if (val & GICR_VPROPBASER_4_1_VALID)
2838 gic_data_rdist()->vpe_table_mask = kzalloc(sizeof(cpumask_t), GFP_ATOMIC);
2839 if (!gic_data_rdist()->vpe_table_mask)
2842 val = inherit_vpe_l1_table_from_its();
2843 if (val & GICR_VPROPBASER_4_1_VALID)
2846 /* First probe the page size */
2847 val = FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, GIC_PAGE_SIZE_64K);
2848 gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2849 val = gicr_read_vpropbaser(vlpi_base + GICR_VPROPBASER);
2850 gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val);
2851 esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val);
2855 gpsz = GIC_PAGE_SIZE_4K;
2857 case GIC_PAGE_SIZE_4K:
2860 case GIC_PAGE_SIZE_16K:
2863 case GIC_PAGE_SIZE_64K:
2869 * Start populating the register from scratch, including RO fields
2870 * (which we want to print in debug cases...)
2873 val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, gpsz);
2874 val |= FIELD_PREP(GICR_VPROPBASER_4_1_ENTRY_SIZE, esz);
2876 /* How many entries per GIC page? */
2878 epp = psz / (esz * SZ_8);
2881 * If we need more than just a single L1 page, flag the table
2882 * as indirect and compute the number of required L1 pages.
2884 if (epp < ITS_MAX_VPEID) {
2887 val |= GICR_VPROPBASER_4_1_INDIRECT;
2889 /* Number of L2 pages required to cover the VPEID space */
2890 nl2 = DIV_ROUND_UP(ITS_MAX_VPEID, epp);
2892 /* Number of L1 pages to point to the L2 pages */
2893 npg = DIV_ROUND_UP(nl2 * SZ_8, psz);
2898 val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, npg - 1);
2900 /* Right, that's the number of CPU pages we need for L1 */
2901 np = DIV_ROUND_UP(npg * psz, PAGE_SIZE);
2903 pr_debug("np = %d, npg = %lld, psz = %d, epp = %d, esz = %d\n",
2904 np, npg, psz, epp, esz);
2905 page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, get_order(np * PAGE_SIZE));
2909 gic_data_rdist()->vpe_l1_base = page_address(page);
2910 pa = virt_to_phys(page_address(page));
2911 WARN_ON(!IS_ALIGNED(pa, psz));
2913 val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, pa >> 12);
2914 val |= GICR_VPROPBASER_RaWb;
2915 val |= GICR_VPROPBASER_InnerShareable;
2916 val |= GICR_VPROPBASER_4_1_Z;
2917 val |= GICR_VPROPBASER_4_1_VALID;
2920 gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2921 cpumask_set_cpu(smp_processor_id(), gic_data_rdist()->vpe_table_mask);
2923 pr_debug("CPU%d: VPROPBASER = %llx %*pbl\n",
2924 smp_processor_id(), val,
2925 cpumask_pr_args(gic_data_rdist()->vpe_table_mask));
2930 static int its_alloc_collections(struct its_node *its)
2934 its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections),
2936 if (!its->collections)
2939 for (i = 0; i < nr_cpu_ids; i++)
2940 its->collections[i].target_address = ~0ULL;
2945 static struct page *its_allocate_pending_table(gfp_t gfp_flags)
2947 struct page *pend_page;
2949 pend_page = alloc_pages(gfp_flags | __GFP_ZERO,
2950 get_order(LPI_PENDBASE_SZ));
2954 /* Make sure the GIC will observe the zero-ed page */
2955 gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ);
2960 static void its_free_pending_table(struct page *pt)
2962 free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ));
2966 * Booting with kdump and LPIs enabled is generally fine. Any other
2967 * case is wrong in the absence of firmware/EFI support.
2969 static bool enabled_lpis_allowed(void)
2974 /* Check whether the property table is in a reserved region */
2975 val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER);
2976 addr = val & GENMASK_ULL(51, 12);
2978 return gic_check_reserved_range(addr, LPI_PROPBASE_SZ);
2981 static int __init allocate_lpi_tables(void)
2987 * If LPIs are enabled while we run this from the boot CPU,
2988 * flag the RD tables as pre-allocated if the stars do align.
2990 val = readl_relaxed(gic_data_rdist_rd_base() + GICR_CTLR);
2991 if ((val & GICR_CTLR_ENABLE_LPIS) && enabled_lpis_allowed()) {
2992 gic_rdists->flags |= (RDIST_FLAGS_RD_TABLES_PREALLOCATED |
2993 RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING);
2994 pr_info("GICv3: Using preallocated redistributor tables\n");
2997 err = its_setup_lpi_prop_table();
3002 * We allocate all the pending tables anyway, as we may have a
3003 * mix of RDs that have had LPIs enabled, and some that
3004 * don't. We'll free the unused ones as each CPU comes online.
3006 for_each_possible_cpu(cpu) {
3007 struct page *pend_page;
3009 pend_page = its_allocate_pending_table(GFP_NOWAIT);
3011 pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu);
3015 gic_data_rdist_cpu(cpu)->pend_page = pend_page;
3021 static u64 read_vpend_dirty_clear(void __iomem *vlpi_base)
3023 u32 count = 1000000; /* 1s! */
3028 val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
3029 clean = !(val & GICR_VPENDBASER_Dirty);
3035 } while (!clean && count);
3037 if (unlikely(!clean))
3038 pr_err_ratelimited("ITS virtual pending table not cleaning\n");
3043 static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set)
3047 /* Make sure we wait until the RD is done with the initial scan */
3048 val = read_vpend_dirty_clear(vlpi_base);
3049 val &= ~GICR_VPENDBASER_Valid;
3052 gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
3054 val = read_vpend_dirty_clear(vlpi_base);
3055 if (unlikely(val & GICR_VPENDBASER_Dirty))
3056 val |= GICR_VPENDBASER_PendingLast;
3061 static void its_cpu_init_lpis(void)
3063 void __iomem *rbase = gic_data_rdist_rd_base();
3064 struct page *pend_page;
3068 if (gic_data_rdist()->flags & RD_LOCAL_LPI_ENABLED)
3071 val = readl_relaxed(rbase + GICR_CTLR);
3072 if ((gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) &&
3073 (val & GICR_CTLR_ENABLE_LPIS)) {
3075 * Check that we get the same property table on all
3076 * RDs. If we don't, this is hopeless.
3078 paddr = gicr_read_propbaser(rbase + GICR_PROPBASER);
3079 paddr &= GENMASK_ULL(51, 12);
3080 if (WARN_ON(gic_rdists->prop_table_pa != paddr))
3081 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
3083 paddr = gicr_read_pendbaser(rbase + GICR_PENDBASER);
3084 paddr &= GENMASK_ULL(51, 16);
3086 WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ));
3087 gic_data_rdist()->flags |= RD_LOCAL_PENDTABLE_PREALLOCATED;
3092 pend_page = gic_data_rdist()->pend_page;
3093 paddr = page_to_phys(pend_page);
3096 val = (gic_rdists->prop_table_pa |
3097 GICR_PROPBASER_InnerShareable |
3098 GICR_PROPBASER_RaWaWb |
3099 ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
3101 gicr_write_propbaser(val, rbase + GICR_PROPBASER);
3102 tmp = gicr_read_propbaser(rbase + GICR_PROPBASER);
3104 if (gic_rdists->flags & RDIST_FLAGS_FORCE_NON_SHAREABLE)
3105 tmp &= ~GICR_PROPBASER_SHAREABILITY_MASK;
3107 if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
3108 if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
3110 * The HW reports non-shareable, we must
3111 * remove the cacheability attributes as
3114 val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
3115 GICR_PROPBASER_CACHEABILITY_MASK);
3116 val |= GICR_PROPBASER_nC;
3117 gicr_write_propbaser(val, rbase + GICR_PROPBASER);
3119 pr_info_once("GIC: using cache flushing for LPI property table\n");
3120 gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
3124 val = (page_to_phys(pend_page) |
3125 GICR_PENDBASER_InnerShareable |
3126 GICR_PENDBASER_RaWaWb);
3128 gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
3129 tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER);
3131 if (gic_rdists->flags & RDIST_FLAGS_FORCE_NON_SHAREABLE)
3132 tmp &= ~GICR_PENDBASER_SHAREABILITY_MASK;
3134 if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
3136 * The HW reports non-shareable, we must remove the
3137 * cacheability attributes as well.
3139 val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
3140 GICR_PENDBASER_CACHEABILITY_MASK);
3141 val |= GICR_PENDBASER_nC;
3142 gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
3146 val = readl_relaxed(rbase + GICR_CTLR);
3147 val |= GICR_CTLR_ENABLE_LPIS;
3148 writel_relaxed(val, rbase + GICR_CTLR);
3150 if (gic_rdists->has_vlpis && !gic_rdists->has_rvpeid) {
3151 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
3154 * It's possible for CPU to receive VLPIs before it is
3155 * scheduled as a vPE, especially for the first CPU, and the
3156 * VLPI with INTID larger than 2^(IDbits+1) will be considered
3157 * as out of range and dropped by GIC.
3158 * So we initialize IDbits to known value to avoid VLPI drop.
3160 val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
3161 pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n",
3162 smp_processor_id(), val);
3163 gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
3166 * Also clear Valid bit of GICR_VPENDBASER, in case some
3167 * ancient programming gets left in and has possibility of
3168 * corrupting memory.
3170 val = its_clear_vpend_valid(vlpi_base, 0, 0);
3173 if (allocate_vpe_l1_table()) {
3175 * If the allocation has failed, we're in massive trouble.
3176 * Disable direct injection, and pray that no VM was
3177 * already running...
3179 gic_rdists->has_rvpeid = false;
3180 gic_rdists->has_vlpis = false;
3183 /* Make sure the GIC has seen the above */
3186 gic_data_rdist()->flags |= RD_LOCAL_LPI_ENABLED;
3187 pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n",
3189 gic_data_rdist()->flags & RD_LOCAL_PENDTABLE_PREALLOCATED ?
3190 "reserved" : "allocated",
3194 static void its_cpu_init_collection(struct its_node *its)
3196 int cpu = smp_processor_id();
3199 /* avoid cross node collections and its mapping */
3200 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
3201 struct device_node *cpu_node;
3203 cpu_node = of_get_cpu_node(cpu, NULL);
3204 if (its->numa_node != NUMA_NO_NODE &&
3205 its->numa_node != of_node_to_nid(cpu_node))
3210 * We now have to bind each collection to its target
3213 if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
3215 * This ITS wants the physical address of the
3218 target = gic_data_rdist()->phys_base;
3220 /* This ITS wants a linear CPU number. */
3221 target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
3222 target = GICR_TYPER_CPU_NUMBER(target) << 16;
3225 /* Perform collection mapping */
3226 its->collections[cpu].target_address = target;
3227 its->collections[cpu].col_id = cpu;
3229 its_send_mapc(its, &its->collections[cpu], 1);
3230 its_send_invall(its, &its->collections[cpu]);
3233 static void its_cpu_init_collections(void)
3235 struct its_node *its;
3237 raw_spin_lock(&its_lock);
3239 list_for_each_entry(its, &its_nodes, entry)
3240 its_cpu_init_collection(its);
3242 raw_spin_unlock(&its_lock);
3245 static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
3247 struct its_device *its_dev = NULL, *tmp;
3248 unsigned long flags;
3250 raw_spin_lock_irqsave(&its->lock, flags);
3252 list_for_each_entry(tmp, &its->its_device_list, entry) {
3253 if (tmp->device_id == dev_id) {
3259 raw_spin_unlock_irqrestore(&its->lock, flags);
3264 static struct its_baser *its_get_baser(struct its_node *its, u32 type)
3268 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
3269 if (GITS_BASER_TYPE(its->tables[i].val) == type)
3270 return &its->tables[i];
3276 static bool its_alloc_table_entry(struct its_node *its,
3277 struct its_baser *baser, u32 id)
3283 /* Don't allow device id that exceeds single, flat table limit */
3284 esz = GITS_BASER_ENTRY_SIZE(baser->val);
3285 if (!(baser->val & GITS_BASER_INDIRECT))
3286 return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz));
3288 /* Compute 1st level table index & check if that exceeds table limit */
3289 idx = id >> ilog2(baser->psz / esz);
3290 if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE))
3293 table = baser->base;
3295 /* Allocate memory for 2nd level table */
3297 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
3298 get_order(baser->psz));
3302 /* Flush Lvl2 table to PoC if hw doesn't support coherency */
3303 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
3304 gic_flush_dcache_to_poc(page_address(page), baser->psz);
3306 table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
3308 /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
3309 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
3310 gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
3312 /* Ensure updated table contents are visible to ITS hardware */
3319 static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
3321 struct its_baser *baser;
3323 baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
3325 /* Don't allow device id that exceeds ITS hardware limit */
3327 return (ilog2(dev_id) < device_ids(its));
3329 return its_alloc_table_entry(its, baser, dev_id);
3332 static bool its_alloc_vpe_table(u32 vpe_id)
3334 struct its_node *its;
3338 * Make sure the L2 tables are allocated on *all* v4 ITSs. We
3339 * could try and only do it on ITSs corresponding to devices
3340 * that have interrupts targeted at this VPE, but the
3341 * complexity becomes crazy (and you have tons of memory
3344 list_for_each_entry(its, &its_nodes, entry) {
3345 struct its_baser *baser;
3350 baser = its_get_baser(its, GITS_BASER_TYPE_VCPU);
3354 if (!its_alloc_table_entry(its, baser, vpe_id))
3358 /* Non v4.1? No need to iterate RDs and go back early. */
3359 if (!gic_rdists->has_rvpeid)
3363 * Make sure the L2 tables are allocated for all copies of
3364 * the L1 table on *all* v4.1 RDs.
3366 for_each_possible_cpu(cpu) {
3367 if (!allocate_vpe_l2_table(cpu, vpe_id))
3374 static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
3375 int nvecs, bool alloc_lpis)
3377 struct its_device *dev;
3378 unsigned long *lpi_map = NULL;
3379 unsigned long flags;
3380 u16 *col_map = NULL;
3387 if (!its_alloc_device_table(its, dev_id))
3390 if (WARN_ON(!is_power_of_2(nvecs)))
3391 nvecs = roundup_pow_of_two(nvecs);
3393 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
3395 * Even if the device wants a single LPI, the ITT must be
3396 * sized as a power of two (and you need at least one bit...).
3398 nr_ites = max(2, nvecs);
3399 sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1);
3400 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
3401 itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node);
3403 lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis);
3405 col_map = kcalloc(nr_lpis, sizeof(*col_map),
3408 col_map = kcalloc(nr_ites, sizeof(*col_map), GFP_KERNEL);
3413 if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) {
3416 bitmap_free(lpi_map);
3421 gic_flush_dcache_to_poc(itt, sz);
3425 dev->nr_ites = nr_ites;
3426 dev->event_map.lpi_map = lpi_map;
3427 dev->event_map.col_map = col_map;
3428 dev->event_map.lpi_base = lpi_base;
3429 dev->event_map.nr_lpis = nr_lpis;
3430 raw_spin_lock_init(&dev->event_map.vlpi_lock);
3431 dev->device_id = dev_id;
3432 INIT_LIST_HEAD(&dev->entry);
3434 raw_spin_lock_irqsave(&its->lock, flags);
3435 list_add(&dev->entry, &its->its_device_list);
3436 raw_spin_unlock_irqrestore(&its->lock, flags);
3438 /* Map device to its ITT */
3439 its_send_mapd(dev, 1);
3444 static void its_free_device(struct its_device *its_dev)
3446 unsigned long flags;
3448 raw_spin_lock_irqsave(&its_dev->its->lock, flags);
3449 list_del(&its_dev->entry);
3450 raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
3451 kfree(its_dev->event_map.col_map);
3452 kfree(its_dev->itt);
3456 static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq)
3460 /* Find a free LPI region in lpi_map and allocate them. */
3461 idx = bitmap_find_free_region(dev->event_map.lpi_map,
3462 dev->event_map.nr_lpis,
3463 get_count_order(nvecs));
3467 *hwirq = dev->event_map.lpi_base + idx;
3472 static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
3473 int nvec, msi_alloc_info_t *info)
3475 struct its_node *its;
3476 struct its_device *its_dev;
3477 struct msi_domain_info *msi_info;
3482 * We ignore "dev" entirely, and rely on the dev_id that has
3483 * been passed via the scratchpad. This limits this domain's
3484 * usefulness to upper layers that definitely know that they
3485 * are built on top of the ITS.
3487 dev_id = info->scratchpad[0].ul;
3489 msi_info = msi_get_domain_info(domain);
3490 its = msi_info->data;
3492 if (!gic_rdists->has_direct_lpi &&
3494 vpe_proxy.dev->its == its &&
3495 dev_id == vpe_proxy.dev->device_id) {
3496 /* Bad luck. Get yourself a better implementation */
3497 WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n",
3502 mutex_lock(&its->dev_alloc_lock);
3503 its_dev = its_find_device(its, dev_id);
3506 * We already have seen this ID, probably through
3507 * another alias (PCI bridge of some sort). No need to
3508 * create the device.
3510 its_dev->shared = true;
3511 pr_debug("Reusing ITT for devID %x\n", dev_id);
3515 its_dev = its_create_device(its, dev_id, nvec, true);
3521 if (info->flags & MSI_ALLOC_FLAGS_PROXY_DEVICE)
3522 its_dev->shared = true;
3524 pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec));
3526 mutex_unlock(&its->dev_alloc_lock);
3527 info->scratchpad[0].ptr = its_dev;
3531 static struct msi_domain_ops its_msi_domain_ops = {
3532 .msi_prepare = its_msi_prepare,
3535 static int its_irq_gic_domain_alloc(struct irq_domain *domain,
3537 irq_hw_number_t hwirq)
3539 struct irq_fwspec fwspec;
3541 if (irq_domain_get_of_node(domain->parent)) {
3542 fwspec.fwnode = domain->parent->fwnode;
3543 fwspec.param_count = 3;
3544 fwspec.param[0] = GIC_IRQ_TYPE_LPI;
3545 fwspec.param[1] = hwirq;
3546 fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
3547 } else if (is_fwnode_irqchip(domain->parent->fwnode)) {
3548 fwspec.fwnode = domain->parent->fwnode;
3549 fwspec.param_count = 2;
3550 fwspec.param[0] = hwirq;
3551 fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
3556 return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
3559 static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
3560 unsigned int nr_irqs, void *args)
3562 msi_alloc_info_t *info = args;
3563 struct its_device *its_dev = info->scratchpad[0].ptr;
3564 struct its_node *its = its_dev->its;
3565 struct irq_data *irqd;
3566 irq_hw_number_t hwirq;
3570 err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq);
3574 err = iommu_dma_prepare_msi(info->desc, its->get_msi_base(its_dev));
3578 for (i = 0; i < nr_irqs; i++) {
3579 err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
3583 irq_domain_set_hwirq_and_chip(domain, virq + i,
3584 hwirq + i, &its_irq_chip, its_dev);
3585 irqd = irq_get_irq_data(virq + i);
3586 irqd_set_single_target(irqd);
3587 irqd_set_affinity_on_activate(irqd);
3588 irqd_set_resend_when_in_progress(irqd);
3589 pr_debug("ID:%d pID:%d vID:%d\n",
3590 (int)(hwirq + i - its_dev->event_map.lpi_base),
3591 (int)(hwirq + i), virq + i);
3597 static int its_irq_domain_activate(struct irq_domain *domain,
3598 struct irq_data *d, bool reserve)
3600 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
3601 u32 event = its_get_event_id(d);
3604 cpu = its_select_cpu(d, cpu_online_mask);
3605 if (cpu < 0 || cpu >= nr_cpu_ids)
3608 its_inc_lpi_count(d, cpu);
3609 its_dev->event_map.col_map[event] = cpu;
3610 irq_data_update_effective_affinity(d, cpumask_of(cpu));
3612 /* Map the GIC IRQ and event to the device */
3613 its_send_mapti(its_dev, d->hwirq, event);
3617 static void its_irq_domain_deactivate(struct irq_domain *domain,
3620 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
3621 u32 event = its_get_event_id(d);
3623 its_dec_lpi_count(d, its_dev->event_map.col_map[event]);
3624 /* Stop the delivery of interrupts */
3625 its_send_discard(its_dev, event);
3628 static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
3629 unsigned int nr_irqs)
3631 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
3632 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
3633 struct its_node *its = its_dev->its;
3636 bitmap_release_region(its_dev->event_map.lpi_map,
3637 its_get_event_id(irq_domain_get_irq_data(domain, virq)),
3638 get_count_order(nr_irqs));
3640 for (i = 0; i < nr_irqs; i++) {
3641 struct irq_data *data = irq_domain_get_irq_data(domain,
3643 /* Nuke the entry in the domain */
3644 irq_domain_reset_irq_data(data);
3647 mutex_lock(&its->dev_alloc_lock);
3650 * If all interrupts have been freed, start mopping the
3651 * floor. This is conditioned on the device not being shared.
3653 if (!its_dev->shared &&
3654 bitmap_empty(its_dev->event_map.lpi_map,
3655 its_dev->event_map.nr_lpis)) {
3656 its_lpi_free(its_dev->event_map.lpi_map,
3657 its_dev->event_map.lpi_base,
3658 its_dev->event_map.nr_lpis);
3660 /* Unmap device/itt */
3661 its_send_mapd(its_dev, 0);
3662 its_free_device(its_dev);
3665 mutex_unlock(&its->dev_alloc_lock);
3667 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
3670 static const struct irq_domain_ops its_domain_ops = {
3671 .alloc = its_irq_domain_alloc,
3672 .free = its_irq_domain_free,
3673 .activate = its_irq_domain_activate,
3674 .deactivate = its_irq_domain_deactivate,
3680 * If a GICv4.0 doesn't implement Direct LPIs (which is extremely
3681 * likely), the only way to perform an invalidate is to use a fake
3682 * device to issue an INV command, implying that the LPI has first
3683 * been mapped to some event on that device. Since this is not exactly
3684 * cheap, we try to keep that mapping around as long as possible, and
3685 * only issue an UNMAP if we're short on available slots.
3687 * Broken by design(tm).
3689 * GICv4.1, on the other hand, mandates that we're able to invalidate
3690 * by writing to a MMIO register. It doesn't implement the whole of
3691 * DirectLPI, but that's good enough. And most of the time, we don't
3692 * even have to invalidate anything, as the redistributor can be told
3693 * whether to generate a doorbell or not (we thus leave it enabled,
3696 static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe)
3698 /* GICv4.1 doesn't use a proxy, so nothing to do here */
3699 if (gic_rdists->has_rvpeid)
3702 /* Already unmapped? */
3703 if (vpe->vpe_proxy_event == -1)
3706 its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event);
3707 vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL;
3710 * We don't track empty slots at all, so let's move the
3711 * next_victim pointer if we can quickly reuse that slot
3712 * instead of nuking an existing entry. Not clear that this is
3713 * always a win though, and this might just generate a ripple
3714 * effect... Let's just hope VPEs don't migrate too often.
3716 if (vpe_proxy.vpes[vpe_proxy.next_victim])
3717 vpe_proxy.next_victim = vpe->vpe_proxy_event;
3719 vpe->vpe_proxy_event = -1;
3722 static void its_vpe_db_proxy_unmap(struct its_vpe *vpe)
3724 /* GICv4.1 doesn't use a proxy, so nothing to do here */
3725 if (gic_rdists->has_rvpeid)
3728 if (!gic_rdists->has_direct_lpi) {
3729 unsigned long flags;
3731 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
3732 its_vpe_db_proxy_unmap_locked(vpe);
3733 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
3737 static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe)
3739 /* GICv4.1 doesn't use a proxy, so nothing to do here */
3740 if (gic_rdists->has_rvpeid)
3743 /* Already mapped? */
3744 if (vpe->vpe_proxy_event != -1)
3747 /* This slot was already allocated. Kick the other VPE out. */
3748 if (vpe_proxy.vpes[vpe_proxy.next_victim])
3749 its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]);
3751 /* Map the new VPE instead */
3752 vpe_proxy.vpes[vpe_proxy.next_victim] = vpe;
3753 vpe->vpe_proxy_event = vpe_proxy.next_victim;
3754 vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites;
3756 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx;
3757 its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event);
3760 static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
3762 unsigned long flags;
3763 struct its_collection *target_col;
3765 /* GICv4.1 doesn't use a proxy, so nothing to do here */
3766 if (gic_rdists->has_rvpeid)
3769 if (gic_rdists->has_direct_lpi) {
3770 void __iomem *rdbase;
3772 rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base;
3773 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
3774 wait_for_syncr(rdbase);
3779 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
3781 its_vpe_db_proxy_map_locked(vpe);
3783 target_col = &vpe_proxy.dev->its->collections[to];
3784 its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event);
3785 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to;
3787 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
3790 static int its_vpe_set_affinity(struct irq_data *d,
3791 const struct cpumask *mask_val,
3794 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3795 int from, cpu = cpumask_first(mask_val);
3796 unsigned long flags;
3799 * Changing affinity is mega expensive, so let's be as lazy as
3800 * we can and only do it if we really have to. Also, if mapped
3801 * into the proxy device, we need to move the doorbell
3802 * interrupt to its new location.
3804 * Another thing is that changing the affinity of a vPE affects
3805 * *other interrupts* such as all the vLPIs that are routed to
3806 * this vPE. This means that the irq_desc lock is not enough to
3807 * protect us, and that we must ensure nobody samples vpe->col_idx
3808 * during the update, hence the lock below which must also be
3809 * taken on any vLPI handling path that evaluates vpe->col_idx.
3811 from = vpe_to_cpuid_lock(vpe, &flags);
3818 * GICv4.1 allows us to skip VMOVP if moving to a cpu whose RD
3819 * is sharing its VPE table with the current one.
3821 if (gic_data_rdist_cpu(cpu)->vpe_table_mask &&
3822 cpumask_test_cpu(from, gic_data_rdist_cpu(cpu)->vpe_table_mask))
3825 its_send_vmovp(vpe);
3826 its_vpe_db_proxy_move(vpe, from, cpu);
3829 irq_data_update_effective_affinity(d, cpumask_of(cpu));
3830 vpe_to_cpuid_unlock(vpe, flags);
3832 return IRQ_SET_MASK_OK_DONE;
3835 static void its_wait_vpt_parse_complete(void)
3837 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
3840 if (!gic_rdists->has_vpend_valid_dirty)
3843 WARN_ON_ONCE(readq_relaxed_poll_timeout_atomic(vlpi_base + GICR_VPENDBASER,
3845 !(val & GICR_VPENDBASER_Dirty),
3849 static void its_vpe_schedule(struct its_vpe *vpe)
3851 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
3854 /* Schedule the VPE */
3855 val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) &
3856 GENMASK_ULL(51, 12);
3857 val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
3858 val |= GICR_VPROPBASER_RaWb;
3859 val |= GICR_VPROPBASER_InnerShareable;
3860 gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
3862 val = virt_to_phys(page_address(vpe->vpt_page)) &
3863 GENMASK_ULL(51, 16);
3864 val |= GICR_VPENDBASER_RaWaWb;
3865 val |= GICR_VPENDBASER_InnerShareable;
3867 * There is no good way of finding out if the pending table is
3868 * empty as we can race against the doorbell interrupt very
3869 * easily. So in the end, vpe->pending_last is only an
3870 * indication that the vcpu has something pending, not one
3871 * that the pending table is empty. A good implementation
3872 * would be able to read its coarse map pretty quickly anyway,
3873 * making this a tolerable issue.
3875 val |= GICR_VPENDBASER_PendingLast;
3876 val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
3877 val |= GICR_VPENDBASER_Valid;
3878 gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
3881 static void its_vpe_deschedule(struct its_vpe *vpe)
3883 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
3886 val = its_clear_vpend_valid(vlpi_base, 0, 0);
3888 vpe->idai = !!(val & GICR_VPENDBASER_IDAI);
3889 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
3892 static void its_vpe_invall(struct its_vpe *vpe)
3894 struct its_node *its;
3896 list_for_each_entry(its, &its_nodes, entry) {
3900 if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr])
3904 * Sending a VINVALL to a single ITS is enough, as all
3905 * we need is to reach the redistributors.
3907 its_send_vinvall(its, vpe);
3912 static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
3914 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3915 struct its_cmd_info *info = vcpu_info;
3917 switch (info->cmd_type) {
3919 its_vpe_schedule(vpe);
3922 case DESCHEDULE_VPE:
3923 its_vpe_deschedule(vpe);
3927 its_wait_vpt_parse_complete();
3931 its_vpe_invall(vpe);
3939 static void its_vpe_send_cmd(struct its_vpe *vpe,
3940 void (*cmd)(struct its_device *, u32))
3942 unsigned long flags;
3944 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
3946 its_vpe_db_proxy_map_locked(vpe);
3947 cmd(vpe_proxy.dev, vpe->vpe_proxy_event);
3949 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
3952 static void its_vpe_send_inv(struct irq_data *d)
3954 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3956 if (gic_rdists->has_direct_lpi) {
3957 void __iomem *rdbase;
3959 /* Target the redistributor this VPE is currently known on */
3960 raw_spin_lock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock);
3961 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
3962 gic_write_lpir(d->parent_data->hwirq, rdbase + GICR_INVLPIR);
3963 wait_for_syncr(rdbase);
3964 raw_spin_unlock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock);
3966 its_vpe_send_cmd(vpe, its_send_inv);
3970 static void its_vpe_mask_irq(struct irq_data *d)
3973 * We need to unmask the LPI, which is described by the parent
3974 * irq_data. Instead of calling into the parent (which won't
3975 * exactly do the right thing, let's simply use the
3976 * parent_data pointer. Yes, I'm naughty.
3978 lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
3979 its_vpe_send_inv(d);
3982 static void its_vpe_unmask_irq(struct irq_data *d)
3984 /* Same hack as above... */
3985 lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
3986 its_vpe_send_inv(d);
3989 static int its_vpe_set_irqchip_state(struct irq_data *d,
3990 enum irqchip_irq_state which,
3993 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3995 if (which != IRQCHIP_STATE_PENDING)
3998 if (gic_rdists->has_direct_lpi) {
3999 void __iomem *rdbase;
4001 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
4003 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR);
4005 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
4006 wait_for_syncr(rdbase);
4010 its_vpe_send_cmd(vpe, its_send_int);
4012 its_vpe_send_cmd(vpe, its_send_clear);
4018 static int its_vpe_retrigger(struct irq_data *d)
4020 return !its_vpe_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true);
4023 static struct irq_chip its_vpe_irq_chip = {
4024 .name = "GICv4-vpe",
4025 .irq_mask = its_vpe_mask_irq,
4026 .irq_unmask = its_vpe_unmask_irq,
4027 .irq_eoi = irq_chip_eoi_parent,
4028 .irq_set_affinity = its_vpe_set_affinity,
4029 .irq_retrigger = its_vpe_retrigger,
4030 .irq_set_irqchip_state = its_vpe_set_irqchip_state,
4031 .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity,
4034 static struct its_node *find_4_1_its(void)
4036 static struct its_node *its = NULL;
4039 list_for_each_entry(its, &its_nodes, entry) {
4051 static void its_vpe_4_1_send_inv(struct irq_data *d)
4053 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4054 struct its_node *its;
4057 * GICv4.1 wants doorbells to be invalidated using the
4058 * INVDB command in order to be broadcast to all RDs. Send
4059 * it to the first valid ITS, and let the HW do its magic.
4061 its = find_4_1_its();
4063 its_send_invdb(its, vpe);
4066 static void its_vpe_4_1_mask_irq(struct irq_data *d)
4068 lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
4069 its_vpe_4_1_send_inv(d);
4072 static void its_vpe_4_1_unmask_irq(struct irq_data *d)
4074 lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
4075 its_vpe_4_1_send_inv(d);
4078 static void its_vpe_4_1_schedule(struct its_vpe *vpe,
4079 struct its_cmd_info *info)
4081 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
4084 /* Schedule the VPE */
4085 val |= GICR_VPENDBASER_Valid;
4086 val |= info->g0en ? GICR_VPENDBASER_4_1_VGRP0EN : 0;
4087 val |= info->g1en ? GICR_VPENDBASER_4_1_VGRP1EN : 0;
4088 val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id);
4090 gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
4093 static void its_vpe_4_1_deschedule(struct its_vpe *vpe,
4094 struct its_cmd_info *info)
4096 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
4100 unsigned long flags;
4103 * vPE is going to block: make the vPE non-resident with
4104 * PendingLast clear and DB set. The GIC guarantees that if
4105 * we read-back PendingLast clear, then a doorbell will be
4106 * delivered when an interrupt comes.
4108 * Note the locking to deal with the concurrent update of
4109 * pending_last from the doorbell interrupt handler that can
4112 raw_spin_lock_irqsave(&vpe->vpe_lock, flags);
4113 val = its_clear_vpend_valid(vlpi_base,
4114 GICR_VPENDBASER_PendingLast,
4115 GICR_VPENDBASER_4_1_DB);
4116 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
4117 raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
4120 * We're not blocking, so just make the vPE non-resident
4121 * with PendingLast set, indicating that we'll be back.
4123 val = its_clear_vpend_valid(vlpi_base,
4125 GICR_VPENDBASER_PendingLast);
4126 vpe->pending_last = true;
4130 static void its_vpe_4_1_invall(struct its_vpe *vpe)
4132 void __iomem *rdbase;
4133 unsigned long flags;
4137 val = GICR_INVALLR_V;
4138 val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id);
4140 /* Target the redistributor this vPE is currently known on */
4141 cpu = vpe_to_cpuid_lock(vpe, &flags);
4142 raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
4143 rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
4144 gic_write_lpir(val, rdbase + GICR_INVALLR);
4146 wait_for_syncr(rdbase);
4147 raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
4148 vpe_to_cpuid_unlock(vpe, flags);
4151 static int its_vpe_4_1_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
4153 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4154 struct its_cmd_info *info = vcpu_info;
4156 switch (info->cmd_type) {
4158 its_vpe_4_1_schedule(vpe, info);
4161 case DESCHEDULE_VPE:
4162 its_vpe_4_1_deschedule(vpe, info);
4166 its_wait_vpt_parse_complete();
4170 its_vpe_4_1_invall(vpe);
4178 static struct irq_chip its_vpe_4_1_irq_chip = {
4179 .name = "GICv4.1-vpe",
4180 .irq_mask = its_vpe_4_1_mask_irq,
4181 .irq_unmask = its_vpe_4_1_unmask_irq,
4182 .irq_eoi = irq_chip_eoi_parent,
4183 .irq_set_affinity = its_vpe_set_affinity,
4184 .irq_set_vcpu_affinity = its_vpe_4_1_set_vcpu_affinity,
4187 static void its_configure_sgi(struct irq_data *d, bool clear)
4189 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4190 struct its_cmd_desc desc;
4192 desc.its_vsgi_cmd.vpe = vpe;
4193 desc.its_vsgi_cmd.sgi = d->hwirq;
4194 desc.its_vsgi_cmd.priority = vpe->sgi_config[d->hwirq].priority;
4195 desc.its_vsgi_cmd.enable = vpe->sgi_config[d->hwirq].enabled;
4196 desc.its_vsgi_cmd.group = vpe->sgi_config[d->hwirq].group;
4197 desc.its_vsgi_cmd.clear = clear;
4200 * GICv4.1 allows us to send VSGI commands to any ITS as long as the
4201 * destination VPE is mapped there. Since we map them eagerly at
4202 * activation time, we're pretty sure the first GICv4.1 ITS will do.
4204 its_send_single_vcommand(find_4_1_its(), its_build_vsgi_cmd, &desc);
4207 static void its_sgi_mask_irq(struct irq_data *d)
4209 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4211 vpe->sgi_config[d->hwirq].enabled = false;
4212 its_configure_sgi(d, false);
4215 static void its_sgi_unmask_irq(struct irq_data *d)
4217 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4219 vpe->sgi_config[d->hwirq].enabled = true;
4220 its_configure_sgi(d, false);
4223 static int its_sgi_set_affinity(struct irq_data *d,
4224 const struct cpumask *mask_val,
4228 * There is no notion of affinity for virtual SGIs, at least
4229 * not on the host (since they can only be targeting a vPE).
4230 * Tell the kernel we've done whatever it asked for.
4232 irq_data_update_effective_affinity(d, mask_val);
4233 return IRQ_SET_MASK_OK;
4236 static int its_sgi_set_irqchip_state(struct irq_data *d,
4237 enum irqchip_irq_state which,
4240 if (which != IRQCHIP_STATE_PENDING)
4244 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4245 struct its_node *its = find_4_1_its();
4248 val = FIELD_PREP(GITS_SGIR_VPEID, vpe->vpe_id);
4249 val |= FIELD_PREP(GITS_SGIR_VINTID, d->hwirq);
4250 writeq_relaxed(val, its->sgir_base + GITS_SGIR - SZ_128K);
4252 its_configure_sgi(d, true);
4258 static int its_sgi_get_irqchip_state(struct irq_data *d,
4259 enum irqchip_irq_state which, bool *val)
4261 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4263 unsigned long flags;
4264 u32 count = 1000000; /* 1s! */
4268 if (which != IRQCHIP_STATE_PENDING)
4272 * Locking galore! We can race against two different events:
4274 * - Concurrent vPE affinity change: we must make sure it cannot
4275 * happen, or we'll talk to the wrong redistributor. This is
4276 * identical to what happens with vLPIs.
4278 * - Concurrent VSGIPENDR access: As it involves accessing two
4279 * MMIO registers, this must be made atomic one way or another.
4281 cpu = vpe_to_cpuid_lock(vpe, &flags);
4282 raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
4283 base = gic_data_rdist_cpu(cpu)->rd_base + SZ_128K;
4284 writel_relaxed(vpe->vpe_id, base + GICR_VSGIR);
4286 status = readl_relaxed(base + GICR_VSGIPENDR);
4287 if (!(status & GICR_VSGIPENDR_BUSY))
4292 pr_err_ratelimited("Unable to get SGI status\n");
4300 raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
4301 vpe_to_cpuid_unlock(vpe, flags);
4306 *val = !!(status & (1 << d->hwirq));
4311 static int its_sgi_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
4313 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4314 struct its_cmd_info *info = vcpu_info;
4316 switch (info->cmd_type) {
4317 case PROP_UPDATE_VSGI:
4318 vpe->sgi_config[d->hwirq].priority = info->priority;
4319 vpe->sgi_config[d->hwirq].group = info->group;
4320 its_configure_sgi(d, false);
4328 static struct irq_chip its_sgi_irq_chip = {
4329 .name = "GICv4.1-sgi",
4330 .irq_mask = its_sgi_mask_irq,
4331 .irq_unmask = its_sgi_unmask_irq,
4332 .irq_set_affinity = its_sgi_set_affinity,
4333 .irq_set_irqchip_state = its_sgi_set_irqchip_state,
4334 .irq_get_irqchip_state = its_sgi_get_irqchip_state,
4335 .irq_set_vcpu_affinity = its_sgi_set_vcpu_affinity,
4338 static int its_sgi_irq_domain_alloc(struct irq_domain *domain,
4339 unsigned int virq, unsigned int nr_irqs,
4342 struct its_vpe *vpe = args;
4345 /* Yes, we do want 16 SGIs */
4346 WARN_ON(nr_irqs != 16);
4348 for (i = 0; i < 16; i++) {
4349 vpe->sgi_config[i].priority = 0;
4350 vpe->sgi_config[i].enabled = false;
4351 vpe->sgi_config[i].group = false;
4353 irq_domain_set_hwirq_and_chip(domain, virq + i, i,
4354 &its_sgi_irq_chip, vpe);
4355 irq_set_status_flags(virq + i, IRQ_DISABLE_UNLAZY);
4361 static void its_sgi_irq_domain_free(struct irq_domain *domain,
4363 unsigned int nr_irqs)
4368 static int its_sgi_irq_domain_activate(struct irq_domain *domain,
4369 struct irq_data *d, bool reserve)
4371 /* Write out the initial SGI configuration */
4372 its_configure_sgi(d, false);
4376 static void its_sgi_irq_domain_deactivate(struct irq_domain *domain,
4379 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4382 * The VSGI command is awkward:
4384 * - To change the configuration, CLEAR must be set to false,
4385 * leaving the pending bit unchanged.
4386 * - To clear the pending bit, CLEAR must be set to true, leaving
4387 * the configuration unchanged.
4389 * You just can't do both at once, hence the two commands below.
4391 vpe->sgi_config[d->hwirq].enabled = false;
4392 its_configure_sgi(d, false);
4393 its_configure_sgi(d, true);
4396 static const struct irq_domain_ops its_sgi_domain_ops = {
4397 .alloc = its_sgi_irq_domain_alloc,
4398 .free = its_sgi_irq_domain_free,
4399 .activate = its_sgi_irq_domain_activate,
4400 .deactivate = its_sgi_irq_domain_deactivate,
4403 static int its_vpe_id_alloc(void)
4405 return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL);
4408 static void its_vpe_id_free(u16 id)
4410 ida_simple_remove(&its_vpeid_ida, id);
4413 static int its_vpe_init(struct its_vpe *vpe)
4415 struct page *vpt_page;
4418 /* Allocate vpe_id */
4419 vpe_id = its_vpe_id_alloc();
4424 vpt_page = its_allocate_pending_table(GFP_KERNEL);
4426 its_vpe_id_free(vpe_id);
4430 if (!its_alloc_vpe_table(vpe_id)) {
4431 its_vpe_id_free(vpe_id);
4432 its_free_pending_table(vpt_page);
4436 raw_spin_lock_init(&vpe->vpe_lock);
4437 vpe->vpe_id = vpe_id;
4438 vpe->vpt_page = vpt_page;
4439 if (gic_rdists->has_rvpeid)
4440 atomic_set(&vpe->vmapp_count, 0);
4442 vpe->vpe_proxy_event = -1;
4447 static void its_vpe_teardown(struct its_vpe *vpe)
4449 its_vpe_db_proxy_unmap(vpe);
4450 its_vpe_id_free(vpe->vpe_id);
4451 its_free_pending_table(vpe->vpt_page);
4454 static void its_vpe_irq_domain_free(struct irq_domain *domain,
4456 unsigned int nr_irqs)
4458 struct its_vm *vm = domain->host_data;
4461 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
4463 for (i = 0; i < nr_irqs; i++) {
4464 struct irq_data *data = irq_domain_get_irq_data(domain,
4466 struct its_vpe *vpe = irq_data_get_irq_chip_data(data);
4468 BUG_ON(vm != vpe->its_vm);
4470 clear_bit(data->hwirq, vm->db_bitmap);
4471 its_vpe_teardown(vpe);
4472 irq_domain_reset_irq_data(data);
4475 if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) {
4476 its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis);
4477 its_free_prop_table(vm->vprop_page);
4481 static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
4482 unsigned int nr_irqs, void *args)
4484 struct irq_chip *irqchip = &its_vpe_irq_chip;
4485 struct its_vm *vm = args;
4486 unsigned long *bitmap;
4487 struct page *vprop_page;
4488 int base, nr_ids, i, err = 0;
4492 bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids);
4496 if (nr_ids < nr_irqs) {
4497 its_lpi_free(bitmap, base, nr_ids);
4501 vprop_page = its_allocate_prop_table(GFP_KERNEL);
4503 its_lpi_free(bitmap, base, nr_ids);
4507 vm->db_bitmap = bitmap;
4508 vm->db_lpi_base = base;
4509 vm->nr_db_lpis = nr_ids;
4510 vm->vprop_page = vprop_page;
4512 if (gic_rdists->has_rvpeid)
4513 irqchip = &its_vpe_4_1_irq_chip;
4515 for (i = 0; i < nr_irqs; i++) {
4516 vm->vpes[i]->vpe_db_lpi = base + i;
4517 err = its_vpe_init(vm->vpes[i]);
4520 err = its_irq_gic_domain_alloc(domain, virq + i,
4521 vm->vpes[i]->vpe_db_lpi);
4524 irq_domain_set_hwirq_and_chip(domain, virq + i, i,
4525 irqchip, vm->vpes[i]);
4527 irqd_set_resend_when_in_progress(irq_get_irq_data(virq + i));
4532 its_vpe_irq_domain_free(domain, virq, i);
4534 its_lpi_free(bitmap, base, nr_ids);
4535 its_free_prop_table(vprop_page);
4541 static int its_vpe_irq_domain_activate(struct irq_domain *domain,
4542 struct irq_data *d, bool reserve)
4544 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4545 struct its_node *its;
4548 * If we use the list map, we issue VMAPP on demand... Unless
4549 * we're on a GICv4.1 and we eagerly map the VPE on all ITSs
4550 * so that VSGIs can work.
4552 if (!gic_requires_eager_mapping())
4555 /* Map the VPE to the first possible CPU */
4556 vpe->col_idx = cpumask_first(cpu_online_mask);
4558 list_for_each_entry(its, &its_nodes, entry) {
4562 its_send_vmapp(its, vpe, true);
4563 its_send_vinvall(its, vpe);
4566 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
4571 static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
4574 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4575 struct its_node *its;
4578 * If we use the list map on GICv4.0, we unmap the VPE once no
4579 * VLPIs are associated with the VM.
4581 if (!gic_requires_eager_mapping())
4584 list_for_each_entry(its, &its_nodes, entry) {
4588 its_send_vmapp(its, vpe, false);
4592 * There may be a direct read to the VPT after unmapping the
4593 * vPE, to guarantee the validity of this, we make the VPT
4594 * memory coherent with the CPU caches here.
4596 if (find_4_1_its() && !atomic_read(&vpe->vmapp_count))
4597 gic_flush_dcache_to_poc(page_address(vpe->vpt_page),
4601 static const struct irq_domain_ops its_vpe_domain_ops = {
4602 .alloc = its_vpe_irq_domain_alloc,
4603 .free = its_vpe_irq_domain_free,
4604 .activate = its_vpe_irq_domain_activate,
4605 .deactivate = its_vpe_irq_domain_deactivate,
4608 static int its_force_quiescent(void __iomem *base)
4610 u32 count = 1000000; /* 1s */
4613 val = readl_relaxed(base + GITS_CTLR);
4615 * GIC architecture specification requires the ITS to be both
4616 * disabled and quiescent for writes to GITS_BASER<n> or
4617 * GITS_CBASER to not have UNPREDICTABLE results.
4619 if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE))
4622 /* Disable the generation of all interrupts to this ITS */
4623 val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe);
4624 writel_relaxed(val, base + GITS_CTLR);
4626 /* Poll GITS_CTLR and wait until ITS becomes quiescent */
4628 val = readl_relaxed(base + GITS_CTLR);
4629 if (val & GITS_CTLR_QUIESCENT)
4641 static bool __maybe_unused its_enable_quirk_cavium_22375(void *data)
4643 struct its_node *its = data;
4645 /* erratum 22375: only alloc 8MB table size (20 bits) */
4646 its->typer &= ~GITS_TYPER_DEVBITS;
4647 its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, 20 - 1);
4648 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
4653 static bool __maybe_unused its_enable_quirk_cavium_23144(void *data)
4655 struct its_node *its = data;
4657 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
4662 static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
4664 struct its_node *its = data;
4666 /* On QDF2400, the size of the ITE is 16Bytes */
4667 its->typer &= ~GITS_TYPER_ITT_ENTRY_SIZE;
4668 its->typer |= FIELD_PREP(GITS_TYPER_ITT_ENTRY_SIZE, 16 - 1);
4673 static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev)
4675 struct its_node *its = its_dev->its;
4678 * The Socionext Synquacer SoC has a so-called 'pre-ITS',
4679 * which maps 32-bit writes targeted at a separate window of
4680 * size '4 << device_id_bits' onto writes to GITS_TRANSLATER
4681 * with device ID taken from bits [device_id_bits + 1:2] of
4682 * the window offset.
4684 return its->pre_its_base + (its_dev->device_id << 2);
4687 static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data)
4689 struct its_node *its = data;
4690 u32 pre_its_window[2];
4693 if (!fwnode_property_read_u32_array(its->fwnode_handle,
4694 "socionext,synquacer-pre-its",
4696 ARRAY_SIZE(pre_its_window))) {
4698 its->pre_its_base = pre_its_window[0];
4699 its->get_msi_base = its_irq_get_msi_base_pre_its;
4701 ids = ilog2(pre_its_window[1]) - 2;
4702 if (device_ids(its) > ids) {
4703 its->typer &= ~GITS_TYPER_DEVBITS;
4704 its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, ids - 1);
4707 /* the pre-ITS breaks isolation, so disable MSI remapping */
4708 its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_ISOLATED_MSI;
4714 static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data)
4716 struct its_node *its = data;
4719 * Hip07 insists on using the wrong address for the VLPI
4720 * page. Trick it into doing the right thing...
4722 its->vlpi_redist_offset = SZ_128K;
4726 static bool __maybe_unused its_enable_rk3588001(void *data)
4728 struct its_node *its = data;
4730 if (!of_machine_is_compatible("rockchip,rk3588"))
4733 its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE;
4734 gic_rdists->flags |= RDIST_FLAGS_FORCE_NON_SHAREABLE;
4739 static const struct gic_quirk its_quirks[] = {
4740 #ifdef CONFIG_CAVIUM_ERRATUM_22375
4742 .desc = "ITS: Cavium errata 22375, 24313",
4743 .iidr = 0xa100034c, /* ThunderX pass 1.x */
4745 .init = its_enable_quirk_cavium_22375,
4748 #ifdef CONFIG_CAVIUM_ERRATUM_23144
4750 .desc = "ITS: Cavium erratum 23144",
4751 .iidr = 0xa100034c, /* ThunderX pass 1.x */
4753 .init = its_enable_quirk_cavium_23144,
4756 #ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065
4758 .desc = "ITS: QDF2400 erratum 0065",
4759 .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */
4761 .init = its_enable_quirk_qdf2400_e0065,
4764 #ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS
4767 * The Socionext Synquacer SoC incorporates ARM's own GIC-500
4768 * implementation, but with a 'pre-ITS' added that requires
4769 * special handling in software.
4771 .desc = "ITS: Socionext Synquacer pre-ITS",
4774 .init = its_enable_quirk_socionext_synquacer,
4777 #ifdef CONFIG_HISILICON_ERRATUM_161600802
4779 .desc = "ITS: Hip07 erratum 161600802",
4782 .init = its_enable_quirk_hip07_161600802,
4785 #ifdef CONFIG_ROCKCHIP_ERRATUM_3588001
4787 .desc = "ITS: Rockchip erratum RK3588001",
4790 .init = its_enable_rk3588001,
4797 static void its_enable_quirks(struct its_node *its)
4799 u32 iidr = readl_relaxed(its->base + GITS_IIDR);
4801 gic_enable_quirks(iidr, its_quirks, its);
4804 static int its_save_disable(void)
4806 struct its_node *its;
4809 raw_spin_lock(&its_lock);
4810 list_for_each_entry(its, &its_nodes, entry) {
4814 its->ctlr_save = readl_relaxed(base + GITS_CTLR);
4815 err = its_force_quiescent(base);
4817 pr_err("ITS@%pa: failed to quiesce: %d\n",
4818 &its->phys_base, err);
4819 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
4823 its->cbaser_save = gits_read_cbaser(base + GITS_CBASER);
4828 list_for_each_entry_continue_reverse(its, &its_nodes, entry) {
4832 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
4835 raw_spin_unlock(&its_lock);
4840 static void its_restore_enable(void)
4842 struct its_node *its;
4845 raw_spin_lock(&its_lock);
4846 list_for_each_entry(its, &its_nodes, entry) {
4853 * Make sure that the ITS is disabled. If it fails to quiesce,
4854 * don't restore it since writing to CBASER or BASER<n>
4855 * registers is undefined according to the GIC v3 ITS
4858 * Firmware resuming with the ITS enabled is terminally broken.
4860 WARN_ON(readl_relaxed(base + GITS_CTLR) & GITS_CTLR_ENABLE);
4861 ret = its_force_quiescent(base);
4863 pr_err("ITS@%pa: failed to quiesce on resume: %d\n",
4864 &its->phys_base, ret);
4868 gits_write_cbaser(its->cbaser_save, base + GITS_CBASER);
4871 * Writing CBASER resets CREADR to 0, so make CWRITER and
4872 * cmd_write line up with it.
4874 its->cmd_write = its->cmd_base;
4875 gits_write_cwriter(0, base + GITS_CWRITER);
4877 /* Restore GITS_BASER from the value cache. */
4878 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
4879 struct its_baser *baser = &its->tables[i];
4881 if (!(baser->val & GITS_BASER_VALID))
4884 its_write_baser(its, baser, baser->val);
4886 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
4889 * Reinit the collection if it's stored in the ITS. This is
4890 * indicated by the col_id being less than the HCC field.
4891 * CID < HCC as specified in the GIC v3 Documentation.
4893 if (its->collections[smp_processor_id()].col_id <
4894 GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER)))
4895 its_cpu_init_collection(its);
4897 raw_spin_unlock(&its_lock);
4900 static struct syscore_ops its_syscore_ops = {
4901 .suspend = its_save_disable,
4902 .resume = its_restore_enable,
4905 static void __init __iomem *its_map_one(struct resource *res, int *err)
4907 void __iomem *its_base;
4910 its_base = ioremap(res->start, SZ_64K);
4912 pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start);
4917 val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK;
4918 if (val != 0x30 && val != 0x40) {
4919 pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start);
4924 *err = its_force_quiescent(its_base);
4926 pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start);
4937 static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
4939 struct irq_domain *inner_domain;
4940 struct msi_domain_info *info;
4942 info = kzalloc(sizeof(*info), GFP_KERNEL);
4946 info->ops = &its_msi_domain_ops;
4949 inner_domain = irq_domain_create_hierarchy(its_parent,
4950 its->msi_domain_flags, 0,
4951 handle, &its_domain_ops,
4953 if (!inner_domain) {
4958 irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
4963 static int its_init_vpe_domain(void)
4965 struct its_node *its;
4969 if (gic_rdists->has_direct_lpi) {
4970 pr_info("ITS: Using DirectLPI for VPE invalidation\n");
4974 /* Any ITS will do, even if not v4 */
4975 its = list_first_entry(&its_nodes, struct its_node, entry);
4977 entries = roundup_pow_of_two(nr_cpu_ids);
4978 vpe_proxy.vpes = kcalloc(entries, sizeof(*vpe_proxy.vpes),
4980 if (!vpe_proxy.vpes)
4983 /* Use the last possible DevID */
4984 devid = GENMASK(device_ids(its) - 1, 0);
4985 vpe_proxy.dev = its_create_device(its, devid, entries, false);
4986 if (!vpe_proxy.dev) {
4987 kfree(vpe_proxy.vpes);
4988 pr_err("ITS: Can't allocate GICv4 proxy device\n");
4992 BUG_ON(entries > vpe_proxy.dev->nr_ites);
4994 raw_spin_lock_init(&vpe_proxy.lock);
4995 vpe_proxy.next_victim = 0;
4996 pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n",
4997 devid, vpe_proxy.dev->nr_ites);
5002 static int __init its_compute_its_list_map(struct resource *res,
5003 void __iomem *its_base)
5009 * This is assumed to be done early enough that we're
5010 * guaranteed to be single-threaded, hence no
5011 * locking. Should this change, we should address
5014 its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX);
5015 if (its_number >= GICv4_ITS_LIST_MAX) {
5016 pr_err("ITS@%pa: No ITSList entry available!\n",
5021 ctlr = readl_relaxed(its_base + GITS_CTLR);
5022 ctlr &= ~GITS_CTLR_ITS_NUMBER;
5023 ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT;
5024 writel_relaxed(ctlr, its_base + GITS_CTLR);
5025 ctlr = readl_relaxed(its_base + GITS_CTLR);
5026 if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) {
5027 its_number = ctlr & GITS_CTLR_ITS_NUMBER;
5028 its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT;
5031 if (test_and_set_bit(its_number, &its_list_map)) {
5032 pr_err("ITS@%pa: Duplicate ITSList entry %d\n",
5033 &res->start, its_number);
5040 static int __init its_probe_one(struct resource *res,
5041 struct fwnode_handle *handle, int numa_node)
5043 struct its_node *its;
5044 void __iomem *its_base;
5045 u64 baser, tmp, typer;
5050 its_base = its_map_one(res, &err);
5054 pr_info("ITS %pR\n", res);
5056 its = kzalloc(sizeof(*its), GFP_KERNEL);
5062 raw_spin_lock_init(&its->lock);
5063 mutex_init(&its->dev_alloc_lock);
5064 INIT_LIST_HEAD(&its->entry);
5065 INIT_LIST_HEAD(&its->its_device_list);
5066 typer = gic_read_typer(its_base + GITS_TYPER);
5068 its->base = its_base;
5069 its->phys_base = res->start;
5071 if (!(typer & GITS_TYPER_VMOVP)) {
5072 err = its_compute_its_list_map(res, its_base);
5078 pr_info("ITS@%pa: Using ITS number %d\n",
5081 pr_info("ITS@%pa: Single VMOVP capable\n", &res->start);
5085 u32 svpet = FIELD_GET(GITS_TYPER_SVPET, typer);
5087 its->sgir_base = ioremap(res->start + SZ_128K, SZ_64K);
5088 if (!its->sgir_base) {
5093 its->mpidr = readl_relaxed(its_base + GITS_MPIDR);
5095 pr_info("ITS@%pa: Using GICv4.1 mode %08x %08x\n",
5096 &res->start, its->mpidr, svpet);
5100 its->numa_node = numa_node;
5102 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
5103 get_order(ITS_CMD_QUEUE_SZ));
5106 goto out_unmap_sgir;
5108 its->cmd_base = (void *)page_address(page);
5109 its->cmd_write = its->cmd_base;
5110 its->fwnode_handle = handle;
5111 its->get_msi_base = its_irq_get_msi_base;
5112 its->msi_domain_flags = IRQ_DOMAIN_FLAG_ISOLATED_MSI;
5114 its_enable_quirks(its);
5116 err = its_alloc_tables(its);
5120 err = its_alloc_collections(its);
5122 goto out_free_tables;
5124 baser = (virt_to_phys(its->cmd_base) |
5125 GITS_CBASER_RaWaWb |
5126 GITS_CBASER_InnerShareable |
5127 (ITS_CMD_QUEUE_SZ / SZ_4K - 1) |
5130 gits_write_cbaser(baser, its->base + GITS_CBASER);
5131 tmp = gits_read_cbaser(its->base + GITS_CBASER);
5133 if (its->flags & ITS_FLAGS_FORCE_NON_SHAREABLE)
5134 tmp &= ~GITS_CBASER_SHAREABILITY_MASK;
5136 if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) {
5137 if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) {
5139 * The HW reports non-shareable, we must
5140 * remove the cacheability attributes as
5143 baser &= ~(GITS_CBASER_SHAREABILITY_MASK |
5144 GITS_CBASER_CACHEABILITY_MASK);
5145 baser |= GITS_CBASER_nC;
5146 gits_write_cbaser(baser, its->base + GITS_CBASER);
5148 pr_info("ITS: using cache flushing for cmd queue\n");
5149 its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
5152 gits_write_cwriter(0, its->base + GITS_CWRITER);
5153 ctlr = readl_relaxed(its->base + GITS_CTLR);
5154 ctlr |= GITS_CTLR_ENABLE;
5156 ctlr |= GITS_CTLR_ImDe;
5157 writel_relaxed(ctlr, its->base + GITS_CTLR);
5159 err = its_init_domain(handle, its);
5161 goto out_free_tables;
5163 raw_spin_lock(&its_lock);
5164 list_add(&its->entry, &its_nodes);
5165 raw_spin_unlock(&its_lock);
5170 its_free_tables(its);
5172 free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
5175 iounmap(its->sgir_base);
5180 pr_err("ITS@%pa: failed probing (%d)\n", &res->start, err);
5184 static bool gic_rdists_supports_plpis(void)
5186 return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
5189 static int redist_disable_lpis(void)
5191 void __iomem *rbase = gic_data_rdist_rd_base();
5192 u64 timeout = USEC_PER_SEC;
5195 if (!gic_rdists_supports_plpis()) {
5196 pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
5200 val = readl_relaxed(rbase + GICR_CTLR);
5201 if (!(val & GICR_CTLR_ENABLE_LPIS))
5205 * If coming via a CPU hotplug event, we don't need to disable
5206 * LPIs before trying to re-enable them. They are already
5207 * configured and all is well in the world.
5209 * If running with preallocated tables, there is nothing to do.
5211 if ((gic_data_rdist()->flags & RD_LOCAL_LPI_ENABLED) ||
5212 (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED))
5216 * From that point on, we only try to do some damage control.
5218 pr_warn("GICv3: CPU%d: Booted with LPIs enabled, memory probably corrupted\n",
5219 smp_processor_id());
5220 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
5223 val &= ~GICR_CTLR_ENABLE_LPIS;
5224 writel_relaxed(val, rbase + GICR_CTLR);
5226 /* Make sure any change to GICR_CTLR is observable by the GIC */
5230 * Software must observe RWP==0 after clearing GICR_CTLR.EnableLPIs
5231 * from 1 to 0 before programming GICR_PEND{PROP}BASER registers.
5232 * Error out if we time out waiting for RWP to clear.
5234 while (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_RWP) {
5236 pr_err("CPU%d: Timeout while disabling LPIs\n",
5237 smp_processor_id());
5245 * After it has been written to 1, it is IMPLEMENTATION
5246 * DEFINED whether GICR_CTLR.EnableLPI becomes RES1 or can be
5247 * cleared to 0. Error out if clearing the bit failed.
5249 if (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_ENABLE_LPIS) {
5250 pr_err("CPU%d: Failed to disable LPIs\n", smp_processor_id());
5257 int its_cpu_init(void)
5259 if (!list_empty(&its_nodes)) {
5262 ret = redist_disable_lpis();
5266 its_cpu_init_lpis();
5267 its_cpu_init_collections();
5273 static void rdist_memreserve_cpuhp_cleanup_workfn(struct work_struct *work)
5275 cpuhp_remove_state_nocalls(gic_rdists->cpuhp_memreserve_state);
5276 gic_rdists->cpuhp_memreserve_state = CPUHP_INVALID;
5279 static DECLARE_WORK(rdist_memreserve_cpuhp_cleanup_work,
5280 rdist_memreserve_cpuhp_cleanup_workfn);
5282 static int its_cpu_memreserve_lpi(unsigned int cpu)
5284 struct page *pend_page;
5287 /* This gets to run exactly once per CPU */
5288 if (gic_data_rdist()->flags & RD_LOCAL_MEMRESERVE_DONE)
5291 pend_page = gic_data_rdist()->pend_page;
5292 if (WARN_ON(!pend_page)) {
5297 * If the pending table was pre-programmed, free the memory we
5298 * preemptively allocated. Otherwise, reserve that memory for
5301 if (gic_data_rdist()->flags & RD_LOCAL_PENDTABLE_PREALLOCATED) {
5302 its_free_pending_table(pend_page);
5303 gic_data_rdist()->pend_page = NULL;
5305 phys_addr_t paddr = page_to_phys(pend_page);
5306 WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ));
5310 /* Last CPU being brought up gets to issue the cleanup */
5311 if (!IS_ENABLED(CONFIG_SMP) ||
5312 cpumask_equal(&cpus_booted_once_mask, cpu_possible_mask))
5313 schedule_work(&rdist_memreserve_cpuhp_cleanup_work);
5315 gic_data_rdist()->flags |= RD_LOCAL_MEMRESERVE_DONE;
5319 /* Mark all the BASER registers as invalid before they get reprogrammed */
5320 static int __init its_reset_one(struct resource *res)
5322 void __iomem *its_base;
5325 its_base = its_map_one(res, &err);
5329 for (i = 0; i < GITS_BASER_NR_REGS; i++)
5330 gits_write_baser(0, its_base + GITS_BASER + (i << 3));
5336 static const struct of_device_id its_device_id[] = {
5337 { .compatible = "arm,gic-v3-its", },
5341 static int __init its_of_probe(struct device_node *node)
5343 struct device_node *np;
5344 struct resource res;
5347 * Make sure *all* the ITS are reset before we probe any, as
5348 * they may be sharing memory. If any of the ITS fails to
5349 * reset, don't even try to go any further, as this could
5350 * result in something even worse.
5352 for (np = of_find_matching_node(node, its_device_id); np;
5353 np = of_find_matching_node(np, its_device_id)) {
5356 if (!of_device_is_available(np) ||
5357 !of_property_read_bool(np, "msi-controller") ||
5358 of_address_to_resource(np, 0, &res))
5361 err = its_reset_one(&res);
5366 for (np = of_find_matching_node(node, its_device_id); np;
5367 np = of_find_matching_node(np, its_device_id)) {
5368 if (!of_device_is_available(np))
5370 if (!of_property_read_bool(np, "msi-controller")) {
5371 pr_warn("%pOF: no msi-controller property, ITS ignored\n",
5376 if (of_address_to_resource(np, 0, &res)) {
5377 pr_warn("%pOF: no regs?\n", np);
5381 its_probe_one(&res, &np->fwnode, of_node_to_nid(np));
5388 #define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K)
5390 #ifdef CONFIG_ACPI_NUMA
5391 struct its_srat_map {
5398 static struct its_srat_map *its_srat_maps __initdata;
5399 static int its_in_srat __initdata;
5401 static int __init acpi_get_its_numa_node(u32 its_id)
5405 for (i = 0; i < its_in_srat; i++) {
5406 if (its_id == its_srat_maps[i].its_id)
5407 return its_srat_maps[i].numa_node;
5409 return NUMA_NO_NODE;
5412 static int __init gic_acpi_match_srat_its(union acpi_subtable_headers *header,
5413 const unsigned long end)
5418 static int __init gic_acpi_parse_srat_its(union acpi_subtable_headers *header,
5419 const unsigned long end)
5422 struct acpi_srat_gic_its_affinity *its_affinity;
5424 its_affinity = (struct acpi_srat_gic_its_affinity *)header;
5428 if (its_affinity->header.length < sizeof(*its_affinity)) {
5429 pr_err("SRAT: Invalid header length %d in ITS affinity\n",
5430 its_affinity->header.length);
5435 * Note that in theory a new proximity node could be created by this
5436 * entry as it is an SRAT resource allocation structure.
5437 * We do not currently support doing so.
5439 node = pxm_to_node(its_affinity->proximity_domain);
5441 if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
5442 pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node);
5446 its_srat_maps[its_in_srat].numa_node = node;
5447 its_srat_maps[its_in_srat].its_id = its_affinity->its_id;
5449 pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n",
5450 its_affinity->proximity_domain, its_affinity->its_id, node);
5455 static void __init acpi_table_parse_srat_its(void)
5459 count = acpi_table_parse_entries(ACPI_SIG_SRAT,
5460 sizeof(struct acpi_table_srat),
5461 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
5462 gic_acpi_match_srat_its, 0);
5466 its_srat_maps = kmalloc_array(count, sizeof(struct its_srat_map),
5471 acpi_table_parse_entries(ACPI_SIG_SRAT,
5472 sizeof(struct acpi_table_srat),
5473 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
5474 gic_acpi_parse_srat_its, 0);
5477 /* free the its_srat_maps after ITS probing */
5478 static void __init acpi_its_srat_maps_free(void)
5480 kfree(its_srat_maps);
5483 static void __init acpi_table_parse_srat_its(void) { }
5484 static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; }
5485 static void __init acpi_its_srat_maps_free(void) { }
5488 static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header,
5489 const unsigned long end)
5491 struct acpi_madt_generic_translator *its_entry;
5492 struct fwnode_handle *dom_handle;
5493 struct resource res;
5496 its_entry = (struct acpi_madt_generic_translator *)header;
5497 memset(&res, 0, sizeof(res));
5498 res.start = its_entry->base_address;
5499 res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1;
5500 res.flags = IORESOURCE_MEM;
5502 dom_handle = irq_domain_alloc_fwnode(&res.start);
5504 pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n",
5509 err = iort_register_domain_token(its_entry->translation_id, res.start,
5512 pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n",
5513 &res.start, its_entry->translation_id);
5517 err = its_probe_one(&res, dom_handle,
5518 acpi_get_its_numa_node(its_entry->translation_id));
5522 iort_deregister_domain_token(its_entry->translation_id);
5524 irq_domain_free_fwnode(dom_handle);
5528 static int __init its_acpi_reset(union acpi_subtable_headers *header,
5529 const unsigned long end)
5531 struct acpi_madt_generic_translator *its_entry;
5532 struct resource res;
5534 its_entry = (struct acpi_madt_generic_translator *)header;
5535 res = (struct resource) {
5536 .start = its_entry->base_address,
5537 .end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1,
5538 .flags = IORESOURCE_MEM,
5541 return its_reset_one(&res);
5544 static void __init its_acpi_probe(void)
5546 acpi_table_parse_srat_its();
5548 * Make sure *all* the ITS are reset before we probe any, as
5549 * they may be sharing memory. If any of the ITS fails to
5550 * reset, don't even try to go any further, as this could
5551 * result in something even worse.
5553 if (acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
5554 its_acpi_reset, 0) > 0)
5555 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
5556 gic_acpi_parse_madt_its, 0);
5557 acpi_its_srat_maps_free();
5560 static void __init its_acpi_probe(void) { }
5563 int __init its_lpi_memreserve_init(void)
5567 if (!efi_enabled(EFI_CONFIG_TABLES))
5570 if (list_empty(&its_nodes))
5573 gic_rdists->cpuhp_memreserve_state = CPUHP_INVALID;
5574 state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
5575 "irqchip/arm/gicv3/memreserve:online",
5576 its_cpu_memreserve_lpi,
5581 gic_rdists->cpuhp_memreserve_state = state;
5586 int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
5587 struct irq_domain *parent_domain)
5589 struct device_node *of_node;
5590 struct its_node *its;
5591 bool has_v4 = false;
5592 bool has_v4_1 = false;
5595 gic_rdists = rdists;
5597 its_parent = parent_domain;
5598 of_node = to_of_node(handle);
5600 its_of_probe(of_node);
5604 if (list_empty(&its_nodes)) {
5605 pr_warn("ITS: No ITS available, not enabling LPIs\n");
5609 err = allocate_lpi_tables();
5613 list_for_each_entry(its, &its_nodes, entry) {
5614 has_v4 |= is_v4(its);
5615 has_v4_1 |= is_v4_1(its);
5618 /* Don't bother with inconsistent systems */
5619 if (WARN_ON(!has_v4_1 && rdists->has_rvpeid))
5620 rdists->has_rvpeid = false;
5622 if (has_v4 & rdists->has_vlpis) {
5623 const struct irq_domain_ops *sgi_ops;
5626 sgi_ops = &its_sgi_domain_ops;
5630 if (its_init_vpe_domain() ||
5631 its_init_v4(parent_domain, &its_vpe_domain_ops, sgi_ops)) {
5632 rdists->has_vlpis = false;
5633 pr_err("ITS: Disabling GICv4 support\n");
5637 register_syscore_ops(&its_syscore_ops);