2 * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/acpi.h>
19 #include <linux/acpi_iort.h>
20 #include <linux/bitmap.h>
21 #include <linux/cpu.h>
22 #include <linux/delay.h>
23 #include <linux/dma-iommu.h>
24 #include <linux/interrupt.h>
25 #include <linux/irqdomain.h>
26 #include <linux/log2.h>
28 #include <linux/msi.h>
30 #include <linux/of_address.h>
31 #include <linux/of_irq.h>
32 #include <linux/of_pci.h>
33 #include <linux/of_platform.h>
34 #include <linux/percpu.h>
35 #include <linux/slab.h>
37 #include <linux/irqchip.h>
38 #include <linux/irqchip/arm-gic-v3.h>
39 #include <linux/irqchip/arm-gic-v4.h>
41 #include <asm/cputype.h>
42 #include <asm/exception.h>
44 #include "irq-gic-common.h"
46 #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
47 #define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
48 #define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2)
50 #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
52 static u32 lpi_id_bits;
55 * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to
56 * deal with (one configuration byte per interrupt). PENDBASE has to
57 * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
59 #define LPI_NRBITS lpi_id_bits
60 #define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K)
61 #define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
63 #define LPI_PROP_DEFAULT_PRIO 0xa0
66 * Collection structure - just an ID, and a redistributor address to
67 * ping. We use one per CPU as a bag of interrupts assigned to this
70 struct its_collection {
76 * The ITS_BASER structure - contains memory information, cached
77 * value of BASER register configuration and ITS page size.
87 * The ITS structure - contains most of the infrastructure, with the
88 * top-level MSI domain, the command queue, the collections, and the
89 * list of devices writing to it.
91 * dev_alloc_lock has to be taken for device allocations, while the
92 * spinlock must be taken to parse data structures such as the device
97 struct mutex dev_alloc_lock;
98 struct list_head entry;
100 phys_addr_t phys_base;
101 struct its_cmd_block *cmd_base;
102 struct its_cmd_block *cmd_write;
103 struct its_baser tables[GITS_BASER_NR_REGS];
104 struct its_collection *collections;
105 struct list_head its_device_list;
113 #define ITS_ITT_ALIGN SZ_256
115 /* The maximum number of VPEID bits supported by VLPI commands */
116 #define ITS_MAX_VPEID_BITS (16)
117 #define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS))
119 /* Convert page order to size in bytes */
120 #define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o))
122 struct event_lpi_map {
123 unsigned long *lpi_map;
125 irq_hw_number_t lpi_base;
127 struct mutex vlpi_lock;
129 struct its_vlpi_map *vlpi_maps;
134 * The ITS view of a device - belongs to an ITS, owns an interrupt
135 * translation table, and a list of interrupts. If it some of its
136 * LPIs are injected into a guest (GICv4), the event_map.vm field
137 * indicates which one.
140 struct list_head entry;
141 struct its_node *its;
142 struct event_lpi_map event_map;
151 struct its_device *dev;
152 struct its_vpe **vpes;
156 static LIST_HEAD(its_nodes);
157 static DEFINE_SPINLOCK(its_lock);
158 static struct rdists *gic_rdists;
159 static struct irq_domain *its_parent;
162 * We have a maximum number of 16 ITSs in the whole system if we're
163 * using the ITSList mechanism
165 #define ITS_LIST_MAX 16
167 static unsigned long its_list_map;
168 static u16 vmovp_seq_num;
169 static DEFINE_RAW_SPINLOCK(vmovp_lock);
171 static DEFINE_IDA(its_vpeid_ida);
173 #define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
174 #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
175 #define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
177 static struct its_collection *dev_event_to_col(struct its_device *its_dev,
180 struct its_node *its = its_dev->its;
182 return its->collections + its_dev->event_map.col_map[event];
186 * ITS command descriptors - parameters to be encoded in a command
189 struct its_cmd_desc {
192 struct its_device *dev;
197 struct its_device *dev;
202 struct its_device *dev;
207 struct its_device *dev;
212 struct its_collection *col;
217 struct its_device *dev;
223 struct its_device *dev;
224 struct its_collection *col;
229 struct its_device *dev;
234 struct its_collection *col;
243 struct its_collection *col;
249 struct its_device *dev;
257 struct its_device *dev;
264 struct its_collection *col;
272 * The ITS command block, which is what the ITS actually parses.
274 struct its_cmd_block {
278 #define ITS_CMD_QUEUE_SZ SZ_64K
279 #define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))
281 typedef struct its_collection *(*its_cmd_builder_t)(struct its_cmd_block *,
282 struct its_cmd_desc *);
284 typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_cmd_block *,
285 struct its_cmd_desc *);
287 static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l)
289 u64 mask = GENMASK_ULL(h, l);
291 *raw_cmd |= (val << l) & mask;
294 static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
296 its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0);
299 static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
301 its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32);
304 static void its_encode_event_id(struct its_cmd_block *cmd, u32 id)
306 its_mask_encode(&cmd->raw_cmd[1], id, 31, 0);
309 static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id)
311 its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32);
314 static void its_encode_size(struct its_cmd_block *cmd, u8 size)
316 its_mask_encode(&cmd->raw_cmd[1], size, 4, 0);
319 static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
321 its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8);
324 static void its_encode_valid(struct its_cmd_block *cmd, int valid)
326 its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63);
329 static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
331 its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16);
334 static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
336 its_mask_encode(&cmd->raw_cmd[2], col, 15, 0);
339 static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid)
341 its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32);
344 static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id)
346 its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0);
349 static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id)
351 its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32);
354 static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid)
356 its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0);
359 static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num)
361 its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32);
364 static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list)
366 its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0);
369 static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa)
371 its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16);
374 static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
376 its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0);
379 static inline void its_fixup_cmd(struct its_cmd_block *cmd)
381 /* Let's fixup BE commands */
382 cmd->raw_cmd[0] = cpu_to_le64(cmd->raw_cmd[0]);
383 cmd->raw_cmd[1] = cpu_to_le64(cmd->raw_cmd[1]);
384 cmd->raw_cmd[2] = cpu_to_le64(cmd->raw_cmd[2]);
385 cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]);
388 static struct its_collection *its_build_mapd_cmd(struct its_cmd_block *cmd,
389 struct its_cmd_desc *desc)
391 unsigned long itt_addr;
392 u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites);
394 itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt);
395 itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN);
397 its_encode_cmd(cmd, GITS_CMD_MAPD);
398 its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id);
399 its_encode_size(cmd, size - 1);
400 its_encode_itt(cmd, itt_addr);
401 its_encode_valid(cmd, desc->its_mapd_cmd.valid);
408 static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd,
409 struct its_cmd_desc *desc)
411 its_encode_cmd(cmd, GITS_CMD_MAPC);
412 its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
413 its_encode_target(cmd, desc->its_mapc_cmd.col->target_address);
414 its_encode_valid(cmd, desc->its_mapc_cmd.valid);
418 return desc->its_mapc_cmd.col;
421 static struct its_collection *its_build_mapti_cmd(struct its_cmd_block *cmd,
422 struct its_cmd_desc *desc)
424 struct its_collection *col;
426 col = dev_event_to_col(desc->its_mapti_cmd.dev,
427 desc->its_mapti_cmd.event_id);
429 its_encode_cmd(cmd, GITS_CMD_MAPTI);
430 its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id);
431 its_encode_event_id(cmd, desc->its_mapti_cmd.event_id);
432 its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id);
433 its_encode_collection(cmd, col->col_id);
440 static struct its_collection *its_build_movi_cmd(struct its_cmd_block *cmd,
441 struct its_cmd_desc *desc)
443 struct its_collection *col;
445 col = dev_event_to_col(desc->its_movi_cmd.dev,
446 desc->its_movi_cmd.event_id);
448 its_encode_cmd(cmd, GITS_CMD_MOVI);
449 its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
450 its_encode_event_id(cmd, desc->its_movi_cmd.event_id);
451 its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);
458 static struct its_collection *its_build_discard_cmd(struct its_cmd_block *cmd,
459 struct its_cmd_desc *desc)
461 struct its_collection *col;
463 col = dev_event_to_col(desc->its_discard_cmd.dev,
464 desc->its_discard_cmd.event_id);
466 its_encode_cmd(cmd, GITS_CMD_DISCARD);
467 its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
468 its_encode_event_id(cmd, desc->its_discard_cmd.event_id);
475 static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd,
476 struct its_cmd_desc *desc)
478 struct its_collection *col;
480 col = dev_event_to_col(desc->its_inv_cmd.dev,
481 desc->its_inv_cmd.event_id);
483 its_encode_cmd(cmd, GITS_CMD_INV);
484 its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
485 its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
492 static struct its_collection *its_build_int_cmd(struct its_cmd_block *cmd,
493 struct its_cmd_desc *desc)
495 struct its_collection *col;
497 col = dev_event_to_col(desc->its_int_cmd.dev,
498 desc->its_int_cmd.event_id);
500 its_encode_cmd(cmd, GITS_CMD_INT);
501 its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
502 its_encode_event_id(cmd, desc->its_int_cmd.event_id);
509 static struct its_collection *its_build_clear_cmd(struct its_cmd_block *cmd,
510 struct its_cmd_desc *desc)
512 struct its_collection *col;
514 col = dev_event_to_col(desc->its_clear_cmd.dev,
515 desc->its_clear_cmd.event_id);
517 its_encode_cmd(cmd, GITS_CMD_CLEAR);
518 its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
519 its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
526 static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd,
527 struct its_cmd_desc *desc)
529 its_encode_cmd(cmd, GITS_CMD_INVALL);
530 its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
537 static struct its_vpe *its_build_vinvall_cmd(struct its_cmd_block *cmd,
538 struct its_cmd_desc *desc)
540 its_encode_cmd(cmd, GITS_CMD_VINVALL);
541 its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id);
545 return desc->its_vinvall_cmd.vpe;
548 static struct its_vpe *its_build_vmapp_cmd(struct its_cmd_block *cmd,
549 struct its_cmd_desc *desc)
551 unsigned long vpt_addr;
553 vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
555 its_encode_cmd(cmd, GITS_CMD_VMAPP);
556 its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
557 its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
558 its_encode_target(cmd, desc->its_vmapp_cmd.col->target_address);
559 its_encode_vpt_addr(cmd, vpt_addr);
560 its_encode_vpt_size(cmd, LPI_NRBITS - 1);
564 return desc->its_vmapp_cmd.vpe;
567 static struct its_vpe *its_build_vmapti_cmd(struct its_cmd_block *cmd,
568 struct its_cmd_desc *desc)
572 if (desc->its_vmapti_cmd.db_enabled)
573 db = desc->its_vmapti_cmd.vpe->vpe_db_lpi;
577 its_encode_cmd(cmd, GITS_CMD_VMAPTI);
578 its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id);
579 its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id);
580 its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id);
581 its_encode_db_phys_id(cmd, db);
582 its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id);
586 return desc->its_vmapti_cmd.vpe;
589 static struct its_vpe *its_build_vmovi_cmd(struct its_cmd_block *cmd,
590 struct its_cmd_desc *desc)
594 if (desc->its_vmovi_cmd.db_enabled)
595 db = desc->its_vmovi_cmd.vpe->vpe_db_lpi;
599 its_encode_cmd(cmd, GITS_CMD_VMOVI);
600 its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id);
601 its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id);
602 its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id);
603 its_encode_db_phys_id(cmd, db);
604 its_encode_db_valid(cmd, true);
608 return desc->its_vmovi_cmd.vpe;
611 static struct its_vpe *its_build_vmovp_cmd(struct its_cmd_block *cmd,
612 struct its_cmd_desc *desc)
614 its_encode_cmd(cmd, GITS_CMD_VMOVP);
615 its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num);
616 its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list);
617 its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
618 its_encode_target(cmd, desc->its_vmovp_cmd.col->target_address);
622 return desc->its_vmovp_cmd.vpe;
625 static u64 its_cmd_ptr_to_offset(struct its_node *its,
626 struct its_cmd_block *ptr)
628 return (ptr - its->cmd_base) * sizeof(*ptr);
631 static int its_queue_full(struct its_node *its)
636 widx = its->cmd_write - its->cmd_base;
637 ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block);
639 /* This is incredibly unlikely to happen, unless the ITS locks up. */
640 if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx)
646 static struct its_cmd_block *its_allocate_entry(struct its_node *its)
648 struct its_cmd_block *cmd;
649 u32 count = 1000000; /* 1s! */
651 while (its_queue_full(its)) {
654 pr_err_ratelimited("ITS queue not draining\n");
661 cmd = its->cmd_write++;
663 /* Handle queue wrapping */
664 if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES))
665 its->cmd_write = its->cmd_base;
676 static struct its_cmd_block *its_post_commands(struct its_node *its)
678 u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write);
680 writel_relaxed(wr, its->base + GITS_CWRITER);
682 return its->cmd_write;
685 static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
688 * Make sure the commands written to memory are observable by
691 if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
692 gic_flush_dcache_to_poc(cmd, sizeof(*cmd));
697 static void its_wait_for_range_completion(struct its_node *its,
698 struct its_cmd_block *from,
699 struct its_cmd_block *to)
701 u64 rd_idx, from_idx, to_idx;
702 u32 count = 1000000; /* 1s! */
704 from_idx = its_cmd_ptr_to_offset(its, from);
705 to_idx = its_cmd_ptr_to_offset(its, to);
708 rd_idx = readl_relaxed(its->base + GITS_CREADR);
711 if (from_idx < to_idx && rd_idx >= to_idx)
715 if (from_idx >= to_idx && rd_idx >= to_idx && rd_idx < from_idx)
720 pr_err_ratelimited("ITS queue timeout\n");
728 /* Warning, macro hell follows */
729 #define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn) \
730 void name(struct its_node *its, \
732 struct its_cmd_desc *desc) \
734 struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \
735 synctype *sync_obj; \
736 unsigned long flags; \
738 raw_spin_lock_irqsave(&its->lock, flags); \
740 cmd = its_allocate_entry(its); \
741 if (!cmd) { /* We're soooooo screewed... */ \
742 raw_spin_unlock_irqrestore(&its->lock, flags); \
745 sync_obj = builder(cmd, desc); \
746 its_flush_cmd(its, cmd); \
749 sync_cmd = its_allocate_entry(its); \
753 buildfn(sync_cmd, sync_obj); \
754 its_flush_cmd(its, sync_cmd); \
758 next_cmd = its_post_commands(its); \
759 raw_spin_unlock_irqrestore(&its->lock, flags); \
761 its_wait_for_range_completion(its, cmd, next_cmd); \
764 static void its_build_sync_cmd(struct its_cmd_block *sync_cmd,
765 struct its_collection *sync_col)
767 its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
768 its_encode_target(sync_cmd, sync_col->target_address);
770 its_fixup_cmd(sync_cmd);
773 static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t,
774 struct its_collection, its_build_sync_cmd)
776 static void its_build_vsync_cmd(struct its_cmd_block *sync_cmd,
777 struct its_vpe *sync_vpe)
779 its_encode_cmd(sync_cmd, GITS_CMD_VSYNC);
780 its_encode_vpeid(sync_cmd, sync_vpe->vpe_id);
782 its_fixup_cmd(sync_cmd);
785 static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t,
786 struct its_vpe, its_build_vsync_cmd)
788 static void its_send_int(struct its_device *dev, u32 event_id)
790 struct its_cmd_desc desc;
792 desc.its_int_cmd.dev = dev;
793 desc.its_int_cmd.event_id = event_id;
795 its_send_single_command(dev->its, its_build_int_cmd, &desc);
798 static void its_send_clear(struct its_device *dev, u32 event_id)
800 struct its_cmd_desc desc;
802 desc.its_clear_cmd.dev = dev;
803 desc.its_clear_cmd.event_id = event_id;
805 its_send_single_command(dev->its, its_build_clear_cmd, &desc);
808 static void its_send_inv(struct its_device *dev, u32 event_id)
810 struct its_cmd_desc desc;
812 desc.its_inv_cmd.dev = dev;
813 desc.its_inv_cmd.event_id = event_id;
815 its_send_single_command(dev->its, its_build_inv_cmd, &desc);
818 static void its_send_mapd(struct its_device *dev, int valid)
820 struct its_cmd_desc desc;
822 desc.its_mapd_cmd.dev = dev;
823 desc.its_mapd_cmd.valid = !!valid;
825 its_send_single_command(dev->its, its_build_mapd_cmd, &desc);
828 static void its_send_mapc(struct its_node *its, struct its_collection *col,
831 struct its_cmd_desc desc;
833 desc.its_mapc_cmd.col = col;
834 desc.its_mapc_cmd.valid = !!valid;
836 its_send_single_command(its, its_build_mapc_cmd, &desc);
839 static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id)
841 struct its_cmd_desc desc;
843 desc.its_mapti_cmd.dev = dev;
844 desc.its_mapti_cmd.phys_id = irq_id;
845 desc.its_mapti_cmd.event_id = id;
847 its_send_single_command(dev->its, its_build_mapti_cmd, &desc);
850 static void its_send_movi(struct its_device *dev,
851 struct its_collection *col, u32 id)
853 struct its_cmd_desc desc;
855 desc.its_movi_cmd.dev = dev;
856 desc.its_movi_cmd.col = col;
857 desc.its_movi_cmd.event_id = id;
859 its_send_single_command(dev->its, its_build_movi_cmd, &desc);
862 static void its_send_discard(struct its_device *dev, u32 id)
864 struct its_cmd_desc desc;
866 desc.its_discard_cmd.dev = dev;
867 desc.its_discard_cmd.event_id = id;
869 its_send_single_command(dev->its, its_build_discard_cmd, &desc);
872 static void its_send_invall(struct its_node *its, struct its_collection *col)
874 struct its_cmd_desc desc;
876 desc.its_invall_cmd.col = col;
878 its_send_single_command(its, its_build_invall_cmd, &desc);
881 static void its_send_vmapti(struct its_device *dev, u32 id)
883 struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id];
884 struct its_cmd_desc desc;
886 desc.its_vmapti_cmd.vpe = map->vpe;
887 desc.its_vmapti_cmd.dev = dev;
888 desc.its_vmapti_cmd.virt_id = map->vintid;
889 desc.its_vmapti_cmd.event_id = id;
890 desc.its_vmapti_cmd.db_enabled = map->db_enabled;
892 its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc);
895 static void its_send_vmovi(struct its_device *dev, u32 id)
897 struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id];
898 struct its_cmd_desc desc;
900 desc.its_vmovi_cmd.vpe = map->vpe;
901 desc.its_vmovi_cmd.dev = dev;
902 desc.its_vmovi_cmd.event_id = id;
903 desc.its_vmovi_cmd.db_enabled = map->db_enabled;
905 its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc);
908 static void its_send_vmapp(struct its_vpe *vpe, bool valid)
910 struct its_cmd_desc desc;
911 struct its_node *its;
913 desc.its_vmapp_cmd.vpe = vpe;
914 desc.its_vmapp_cmd.valid = valid;
916 list_for_each_entry(its, &its_nodes, entry) {
920 desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
921 its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
925 static void its_send_vmovp(struct its_vpe *vpe)
927 struct its_cmd_desc desc;
928 struct its_node *its;
930 int col_id = vpe->col_idx;
932 desc.its_vmovp_cmd.vpe = vpe;
933 desc.its_vmovp_cmd.its_list = (u16)its_list_map;
936 its = list_first_entry(&its_nodes, struct its_node, entry);
937 desc.its_vmovp_cmd.seq_num = 0;
938 desc.its_vmovp_cmd.col = &its->collections[col_id];
939 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
944 * Yet another marvel of the architecture. If using the
945 * its_list "feature", we need to make sure that all ITSs
946 * receive all VMOVP commands in the same order. The only way
947 * to guarantee this is to make vmovp a serialization point.
951 raw_spin_lock_irqsave(&vmovp_lock, flags);
953 desc.its_vmovp_cmd.seq_num = vmovp_seq_num++;
956 list_for_each_entry(its, &its_nodes, entry) {
960 desc.its_vmovp_cmd.col = &its->collections[col_id];
961 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
964 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
967 static void its_send_vinvall(struct its_vpe *vpe)
969 struct its_cmd_desc desc;
970 struct its_node *its;
972 desc.its_vinvall_cmd.vpe = vpe;
974 list_for_each_entry(its, &its_nodes, entry) {
977 its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
982 * irqchip functions - assumes MSI, mostly.
985 static inline u32 its_get_event_id(struct irq_data *d)
987 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
988 return d->hwirq - its_dev->event_map.lpi_base;
991 static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
993 irq_hw_number_t hwirq;
994 struct page *prop_page;
997 if (irqd_is_forwarded_to_vcpu(d)) {
998 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
999 u32 event = its_get_event_id(d);
1001 prop_page = its_dev->event_map.vm->vprop_page;
1002 hwirq = its_dev->event_map.vlpi_maps[event].vintid;
1004 prop_page = gic_rdists->prop_page;
1008 cfg = page_address(prop_page) + hwirq - 8192;
1010 *cfg |= set | LPI_PROP_GROUP1;
1013 * Make the above write visible to the redistributors.
1014 * And yes, we're flushing exactly: One. Single. Byte.
1017 if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
1018 gic_flush_dcache_to_poc(cfg, sizeof(*cfg));
1023 static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
1025 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1027 lpi_write_config(d, clr, set);
1028 its_send_inv(its_dev, its_get_event_id(d));
1031 static void its_vlpi_set_doorbell(struct irq_data *d, bool enable)
1033 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1034 u32 event = its_get_event_id(d);
1036 if (its_dev->event_map.vlpi_maps[event].db_enabled == enable)
1039 its_dev->event_map.vlpi_maps[event].db_enabled = enable;
1042 * More fun with the architecture:
1044 * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI
1045 * value or to 1023, depending on the enable bit. But that
1046 * would be issueing a mapping for an /existing/ DevID+EventID
1047 * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI
1048 * to the /same/ vPE, using this opportunity to adjust the
1049 * doorbell. Mouahahahaha. We loves it, Precious.
1051 its_send_vmovi(its_dev, event);
1054 static void its_mask_irq(struct irq_data *d)
1056 if (irqd_is_forwarded_to_vcpu(d))
1057 its_vlpi_set_doorbell(d, false);
1059 lpi_update_config(d, LPI_PROP_ENABLED, 0);
1062 static void its_unmask_irq(struct irq_data *d)
1064 if (irqd_is_forwarded_to_vcpu(d))
1065 its_vlpi_set_doorbell(d, true);
1067 lpi_update_config(d, 0, LPI_PROP_ENABLED);
1070 static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
1074 const struct cpumask *cpu_mask = cpu_online_mask;
1075 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1076 struct its_collection *target_col;
1077 u32 id = its_get_event_id(d);
1079 /* A forwarded interrupt should use irq_set_vcpu_affinity */
1080 if (irqd_is_forwarded_to_vcpu(d))
1083 /* lpi cannot be routed to a redistributor that is on a foreign node */
1084 if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
1085 if (its_dev->its->numa_node >= 0) {
1086 cpu_mask = cpumask_of_node(its_dev->its->numa_node);
1087 if (!cpumask_intersects(mask_val, cpu_mask))
1092 cpu = cpumask_any_and(mask_val, cpu_mask);
1094 if (cpu >= nr_cpu_ids)
1097 /* don't set the affinity when the target cpu is same as current one */
1098 if (cpu != its_dev->event_map.col_map[id]) {
1099 target_col = &its_dev->its->collections[cpu];
1100 its_send_movi(its_dev, target_col, id);
1101 its_dev->event_map.col_map[id] = cpu;
1102 irq_data_update_effective_affinity(d, cpumask_of(cpu));
1105 return IRQ_SET_MASK_OK_DONE;
1108 static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
1110 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1111 struct its_node *its;
1115 addr = its->phys_base + GITS_TRANSLATER;
1117 msg->address_lo = lower_32_bits(addr);
1118 msg->address_hi = upper_32_bits(addr);
1119 msg->data = its_get_event_id(d);
1121 iommu_dma_map_msi_msg(d->irq, msg);
1124 static int its_irq_set_irqchip_state(struct irq_data *d,
1125 enum irqchip_irq_state which,
1128 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1129 u32 event = its_get_event_id(d);
1131 if (which != IRQCHIP_STATE_PENDING)
1135 its_send_int(its_dev, event);
1137 its_send_clear(its_dev, event);
1142 static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
1144 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1145 u32 event = its_get_event_id(d);
1151 mutex_lock(&its_dev->event_map.vlpi_lock);
1153 if (!its_dev->event_map.vm) {
1154 struct its_vlpi_map *maps;
1156 maps = kzalloc(sizeof(*maps) * its_dev->event_map.nr_lpis,
1163 its_dev->event_map.vm = info->map->vm;
1164 its_dev->event_map.vlpi_maps = maps;
1165 } else if (its_dev->event_map.vm != info->map->vm) {
1170 /* Get our private copy of the mapping information */
1171 its_dev->event_map.vlpi_maps[event] = *info->map;
1173 if (irqd_is_forwarded_to_vcpu(d)) {
1174 /* Already mapped, move it around */
1175 its_send_vmovi(its_dev, event);
1177 /* Drop the physical mapping */
1178 its_send_discard(its_dev, event);
1180 /* and install the virtual one */
1181 its_send_vmapti(its_dev, event);
1182 irqd_set_forwarded_to_vcpu(d);
1184 /* Increment the number of VLPIs */
1185 its_dev->event_map.nr_vlpis++;
1189 mutex_unlock(&its_dev->event_map.vlpi_lock);
1193 static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
1195 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1196 u32 event = its_get_event_id(d);
1199 mutex_lock(&its_dev->event_map.vlpi_lock);
1201 if (!its_dev->event_map.vm ||
1202 !its_dev->event_map.vlpi_maps[event].vm) {
1207 /* Copy our mapping information to the incoming request */
1208 *info->map = its_dev->event_map.vlpi_maps[event];
1211 mutex_unlock(&its_dev->event_map.vlpi_lock);
1215 static int its_vlpi_unmap(struct irq_data *d)
1217 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1218 u32 event = its_get_event_id(d);
1221 mutex_lock(&its_dev->event_map.vlpi_lock);
1223 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) {
1228 /* Drop the virtual mapping */
1229 its_send_discard(its_dev, event);
1231 /* and restore the physical one */
1232 irqd_clr_forwarded_to_vcpu(d);
1233 its_send_mapti(its_dev, d->hwirq, event);
1234 lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO |
1239 * Drop the refcount and make the device available again if
1240 * this was the last VLPI.
1242 if (!--its_dev->event_map.nr_vlpis) {
1243 its_dev->event_map.vm = NULL;
1244 kfree(its_dev->event_map.vlpi_maps);
1248 mutex_unlock(&its_dev->event_map.vlpi_lock);
1252 static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info)
1254 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1256 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
1259 if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI)
1260 lpi_update_config(d, 0xff, info->config);
1262 lpi_write_config(d, 0xff, info->config);
1263 its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED));
1268 static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
1270 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1271 struct its_cmd_info *info = vcpu_info;
1274 if (!its_dev->its->is_v4)
1277 /* Unmap request? */
1279 return its_vlpi_unmap(d);
1281 switch (info->cmd_type) {
1283 return its_vlpi_map(d, info);
1286 return its_vlpi_get(d, info);
1288 case PROP_UPDATE_VLPI:
1289 case PROP_UPDATE_AND_INV_VLPI:
1290 return its_vlpi_prop_update(d, info);
1297 static struct irq_chip its_irq_chip = {
1299 .irq_mask = its_mask_irq,
1300 .irq_unmask = its_unmask_irq,
1301 .irq_eoi = irq_chip_eoi_parent,
1302 .irq_set_affinity = its_set_affinity,
1303 .irq_compose_msi_msg = its_irq_compose_msi_msg,
1304 .irq_set_irqchip_state = its_irq_set_irqchip_state,
1305 .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity,
1309 * How we allocate LPIs:
1311 * The GIC has id_bits bits for interrupt identifiers. From there, we
1312 * must subtract 8192 which are reserved for SGIs/PPIs/SPIs. Then, as
1313 * we allocate LPIs by chunks of 32, we can shift the whole thing by 5
1314 * bits to the right.
1316 * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations.
1318 #define IRQS_PER_CHUNK_SHIFT 5
1319 #define IRQS_PER_CHUNK (1UL << IRQS_PER_CHUNK_SHIFT)
1320 #define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */
1322 static unsigned long *lpi_bitmap;
1323 static u32 lpi_chunks;
1324 static DEFINE_SPINLOCK(lpi_lock);
1326 static int its_lpi_to_chunk(int lpi)
1328 return (lpi - 8192) >> IRQS_PER_CHUNK_SHIFT;
1331 static int its_chunk_to_lpi(int chunk)
1333 return (chunk << IRQS_PER_CHUNK_SHIFT) + 8192;
1336 static int __init its_lpi_init(u32 id_bits)
1338 lpi_chunks = its_lpi_to_chunk(1UL << id_bits);
1340 lpi_bitmap = kzalloc(BITS_TO_LONGS(lpi_chunks) * sizeof(long),
1347 pr_info("ITS: Allocated %d chunks for LPIs\n", (int)lpi_chunks);
1351 static unsigned long *its_lpi_alloc_chunks(int nr_irqs, int *base, int *nr_ids)
1353 unsigned long *bitmap = NULL;
1358 nr_chunks = DIV_ROUND_UP(nr_irqs, IRQS_PER_CHUNK);
1360 spin_lock(&lpi_lock);
1363 chunk_id = bitmap_find_next_zero_area(lpi_bitmap, lpi_chunks,
1365 if (chunk_id < lpi_chunks)
1369 } while (nr_chunks > 0);
1374 bitmap = kzalloc(BITS_TO_LONGS(nr_chunks * IRQS_PER_CHUNK) * sizeof (long),
1379 for (i = 0; i < nr_chunks; i++)
1380 set_bit(chunk_id + i, lpi_bitmap);
1382 *base = its_chunk_to_lpi(chunk_id);
1383 *nr_ids = nr_chunks * IRQS_PER_CHUNK;
1386 spin_unlock(&lpi_lock);
1389 *base = *nr_ids = 0;
1394 static void its_lpi_free_chunks(unsigned long *bitmap, int base, int nr_ids)
1398 spin_lock(&lpi_lock);
1400 for (lpi = base; lpi < (base + nr_ids); lpi += IRQS_PER_CHUNK) {
1401 int chunk = its_lpi_to_chunk(lpi);
1403 BUG_ON(chunk > lpi_chunks);
1404 if (test_bit(chunk, lpi_bitmap)) {
1405 clear_bit(chunk, lpi_bitmap);
1407 pr_err("Bad LPI chunk %d\n", chunk);
1411 spin_unlock(&lpi_lock);
1416 static struct page *its_allocate_prop_table(gfp_t gfp_flags)
1418 struct page *prop_page;
1420 prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ));
1424 /* Priority 0xa0, Group-1, disabled */
1425 memset(page_address(prop_page),
1426 LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1,
1429 /* Make sure the GIC will observe the written configuration */
1430 gic_flush_dcache_to_poc(page_address(prop_page), LPI_PROPBASE_SZ);
1435 static void its_free_prop_table(struct page *prop_page)
1437 free_pages((unsigned long)page_address(prop_page),
1438 get_order(LPI_PROPBASE_SZ));
1441 static int __init its_alloc_lpi_tables(void)
1445 lpi_id_bits = min_t(u32, gic_rdists->id_bits, ITS_MAX_LPI_NRBITS);
1446 gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT);
1447 if (!gic_rdists->prop_page) {
1448 pr_err("Failed to allocate PROPBASE\n");
1452 paddr = page_to_phys(gic_rdists->prop_page);
1453 pr_info("GIC: using LPI property table @%pa\n", &paddr);
1455 return its_lpi_init(lpi_id_bits);
1458 static const char *its_base_type_string[] = {
1459 [GITS_BASER_TYPE_DEVICE] = "Devices",
1460 [GITS_BASER_TYPE_VCPU] = "Virtual CPUs",
1461 [GITS_BASER_TYPE_RESERVED3] = "Reserved (3)",
1462 [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections",
1463 [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)",
1464 [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)",
1465 [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)",
1468 static u64 its_read_baser(struct its_node *its, struct its_baser *baser)
1470 u32 idx = baser - its->tables;
1472 return gits_read_baser(its->base + GITS_BASER + (idx << 3));
1475 static void its_write_baser(struct its_node *its, struct its_baser *baser,
1478 u32 idx = baser - its->tables;
1480 gits_write_baser(val, its->base + GITS_BASER + (idx << 3));
1481 baser->val = its_read_baser(its, baser);
1484 static int its_setup_baser(struct its_node *its, struct its_baser *baser,
1485 u64 cache, u64 shr, u32 psz, u32 order,
1488 u64 val = its_read_baser(its, baser);
1489 u64 esz = GITS_BASER_ENTRY_SIZE(val);
1490 u64 type = GITS_BASER_TYPE(val);
1491 u64 baser_phys, tmp;
1496 alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
1497 if (alloc_pages > GITS_BASER_PAGES_MAX) {
1498 pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n",
1499 &its->phys_base, its_base_type_string[type],
1500 alloc_pages, GITS_BASER_PAGES_MAX);
1501 alloc_pages = GITS_BASER_PAGES_MAX;
1502 order = get_order(GITS_BASER_PAGES_MAX * psz);
1505 base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
1509 baser_phys = virt_to_phys(base);
1511 /* Check if the physical address of the memory is above 48bits */
1512 if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) {
1514 /* 52bit PA is supported only when PageSize=64K */
1515 if (psz != SZ_64K) {
1516 pr_err("ITS: no 52bit PA support when psz=%d\n", psz);
1517 free_pages((unsigned long)base, order);
1521 /* Convert 52bit PA to 48bit field */
1522 baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys);
1527 (type << GITS_BASER_TYPE_SHIFT) |
1528 ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
1529 ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) |
1534 val |= indirect ? GITS_BASER_INDIRECT : 0x0;
1538 val |= GITS_BASER_PAGE_SIZE_4K;
1541 val |= GITS_BASER_PAGE_SIZE_16K;
1544 val |= GITS_BASER_PAGE_SIZE_64K;
1548 its_write_baser(its, baser, val);
1551 if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
1553 * Shareability didn't stick. Just use
1554 * whatever the read reported, which is likely
1555 * to be the only thing this redistributor
1556 * supports. If that's zero, make it
1557 * non-cacheable as well.
1559 shr = tmp & GITS_BASER_SHAREABILITY_MASK;
1561 cache = GITS_BASER_nC;
1562 gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order));
1567 if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) {
1569 * Page size didn't stick. Let's try a smaller
1570 * size and retry. If we reach 4K, then
1571 * something is horribly wrong...
1573 free_pages((unsigned long)base, order);
1579 goto retry_alloc_baser;
1582 goto retry_alloc_baser;
1587 pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
1588 &its->phys_base, its_base_type_string[type],
1590 free_pages((unsigned long)base, order);
1594 baser->order = order;
1597 tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz;
1599 pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n",
1600 &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp),
1601 its_base_type_string[type],
1602 (unsigned long)virt_to_phys(base),
1603 indirect ? "indirect" : "flat", (int)esz,
1604 psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
1609 static bool its_parse_indirect_baser(struct its_node *its,
1610 struct its_baser *baser,
1611 u32 psz, u32 *order, u32 ids)
1613 u64 tmp = its_read_baser(its, baser);
1614 u64 type = GITS_BASER_TYPE(tmp);
1615 u64 esz = GITS_BASER_ENTRY_SIZE(tmp);
1616 u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb;
1617 u32 new_order = *order;
1618 bool indirect = false;
1620 /* No need to enable Indirection if memory requirement < (psz*2)bytes */
1621 if ((esz << ids) > (psz * 2)) {
1623 * Find out whether hw supports a single or two-level table by
1624 * table by reading bit at offset '62' after writing '1' to it.
1626 its_write_baser(its, baser, val | GITS_BASER_INDIRECT);
1627 indirect = !!(baser->val & GITS_BASER_INDIRECT);
1631 * The size of the lvl2 table is equal to ITS page size
1632 * which is 'psz'. For computing lvl1 table size,
1633 * subtract ID bits that sparse lvl2 table from 'ids'
1634 * which is reported by ITS hardware times lvl1 table
1637 ids -= ilog2(psz / (int)esz);
1638 esz = GITS_LVL1_ENTRY_SIZE;
1643 * Allocate as many entries as required to fit the
1644 * range of device IDs that the ITS can grok... The ID
1645 * space being incredibly sparse, this results in a
1646 * massive waste of memory if two-level device table
1647 * feature is not supported by hardware.
1649 new_order = max_t(u32, get_order(esz << ids), new_order);
1650 if (new_order >= MAX_ORDER) {
1651 new_order = MAX_ORDER - 1;
1652 ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz);
1653 pr_warn("ITS@%pa: %s Table too large, reduce ids %u->%u\n",
1654 &its->phys_base, its_base_type_string[type],
1655 its->device_ids, ids);
1663 static void its_free_tables(struct its_node *its)
1667 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
1668 if (its->tables[i].base) {
1669 free_pages((unsigned long)its->tables[i].base,
1670 its->tables[i].order);
1671 its->tables[i].base = NULL;
1676 static int its_alloc_tables(struct its_node *its)
1678 u64 typer = gic_read_typer(its->base + GITS_TYPER);
1679 u32 ids = GITS_TYPER_DEVBITS(typer);
1680 u64 shr = GITS_BASER_InnerShareable;
1681 u64 cache = GITS_BASER_RaWaWb;
1685 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) {
1687 * erratum 22375: only alloc 8MB table size
1688 * erratum 24313: ignore memory access type
1690 cache = GITS_BASER_nCnB;
1691 ids = 0x14; /* 20 bits, 8MB */
1694 its->device_ids = ids;
1696 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
1697 struct its_baser *baser = its->tables + i;
1698 u64 val = its_read_baser(its, baser);
1699 u64 type = GITS_BASER_TYPE(val);
1700 u32 order = get_order(psz);
1701 bool indirect = false;
1704 case GITS_BASER_TYPE_NONE:
1707 case GITS_BASER_TYPE_DEVICE:
1708 indirect = its_parse_indirect_baser(its, baser,
1711 case GITS_BASER_TYPE_VCPU:
1712 indirect = its_parse_indirect_baser(its, baser,
1714 ITS_MAX_VPEID_BITS);
1718 err = its_setup_baser(its, baser, cache, shr, psz, order, indirect);
1720 its_free_tables(its);
1724 /* Update settings which will be used for next BASERn */
1726 cache = baser->val & GITS_BASER_CACHEABILITY_MASK;
1727 shr = baser->val & GITS_BASER_SHAREABILITY_MASK;
1733 static int its_alloc_collections(struct its_node *its)
1735 its->collections = kzalloc(nr_cpu_ids * sizeof(*its->collections),
1737 if (!its->collections)
1743 static struct page *its_allocate_pending_table(gfp_t gfp_flags)
1745 struct page *pend_page;
1747 * The pending pages have to be at least 64kB aligned,
1748 * hence the 'max(LPI_PENDBASE_SZ, SZ_64K)' below.
1750 pend_page = alloc_pages(gfp_flags | __GFP_ZERO,
1751 get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K)));
1755 /* Make sure the GIC will observe the zero-ed page */
1756 gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ);
1761 static void its_free_pending_table(struct page *pt)
1763 free_pages((unsigned long)page_address(pt),
1764 get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K)));
1767 static void its_cpu_init_lpis(void)
1769 void __iomem *rbase = gic_data_rdist_rd_base();
1770 struct page *pend_page;
1773 /* If we didn't allocate the pending table yet, do it now */
1774 pend_page = gic_data_rdist()->pend_page;
1778 pend_page = its_allocate_pending_table(GFP_NOWAIT);
1780 pr_err("Failed to allocate PENDBASE for CPU%d\n",
1781 smp_processor_id());
1785 paddr = page_to_phys(pend_page);
1786 pr_info("CPU%d: using LPI pending table @%pa\n",
1787 smp_processor_id(), &paddr);
1788 gic_data_rdist()->pend_page = pend_page;
1792 val = readl_relaxed(rbase + GICR_CTLR);
1793 val &= ~GICR_CTLR_ENABLE_LPIS;
1794 writel_relaxed(val, rbase + GICR_CTLR);
1797 * Make sure any change to the table is observable by the GIC.
1802 val = (page_to_phys(gic_rdists->prop_page) |
1803 GICR_PROPBASER_InnerShareable |
1804 GICR_PROPBASER_RaWaWb |
1805 ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
1807 gicr_write_propbaser(val, rbase + GICR_PROPBASER);
1808 tmp = gicr_read_propbaser(rbase + GICR_PROPBASER);
1810 if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
1811 if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
1813 * The HW reports non-shareable, we must
1814 * remove the cacheability attributes as
1817 val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
1818 GICR_PROPBASER_CACHEABILITY_MASK);
1819 val |= GICR_PROPBASER_nC;
1820 gicr_write_propbaser(val, rbase + GICR_PROPBASER);
1822 pr_info_once("GIC: using cache flushing for LPI property table\n");
1823 gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
1827 val = (page_to_phys(pend_page) |
1828 GICR_PENDBASER_InnerShareable |
1829 GICR_PENDBASER_RaWaWb);
1831 gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
1832 tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER);
1834 if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
1836 * The HW reports non-shareable, we must remove the
1837 * cacheability attributes as well.
1839 val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
1840 GICR_PENDBASER_CACHEABILITY_MASK);
1841 val |= GICR_PENDBASER_nC;
1842 gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
1846 val = readl_relaxed(rbase + GICR_CTLR);
1847 val |= GICR_CTLR_ENABLE_LPIS;
1848 writel_relaxed(val, rbase + GICR_CTLR);
1850 /* Make sure the GIC has seen the above */
1854 static void its_cpu_init_collection(void)
1856 struct its_node *its;
1859 spin_lock(&its_lock);
1860 cpu = smp_processor_id();
1862 list_for_each_entry(its, &its_nodes, entry) {
1865 /* avoid cross node collections and its mapping */
1866 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
1867 struct device_node *cpu_node;
1869 cpu_node = of_get_cpu_node(cpu, NULL);
1870 if (its->numa_node != NUMA_NO_NODE &&
1871 its->numa_node != of_node_to_nid(cpu_node))
1876 * We now have to bind each collection to its target
1879 if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
1881 * This ITS wants the physical address of the
1884 target = gic_data_rdist()->phys_base;
1887 * This ITS wants a linear CPU number.
1889 target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
1890 target = GICR_TYPER_CPU_NUMBER(target) << 16;
1893 /* Perform collection mapping */
1894 its->collections[cpu].target_address = target;
1895 its->collections[cpu].col_id = cpu;
1897 its_send_mapc(its, &its->collections[cpu], 1);
1898 its_send_invall(its, &its->collections[cpu]);
1901 spin_unlock(&its_lock);
1904 static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
1906 struct its_device *its_dev = NULL, *tmp;
1907 unsigned long flags;
1909 raw_spin_lock_irqsave(&its->lock, flags);
1911 list_for_each_entry(tmp, &its->its_device_list, entry) {
1912 if (tmp->device_id == dev_id) {
1918 raw_spin_unlock_irqrestore(&its->lock, flags);
1923 static struct its_baser *its_get_baser(struct its_node *its, u32 type)
1927 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
1928 if (GITS_BASER_TYPE(its->tables[i].val) == type)
1929 return &its->tables[i];
1935 static bool its_alloc_table_entry(struct its_baser *baser, u32 id)
1941 /* Don't allow device id that exceeds single, flat table limit */
1942 esz = GITS_BASER_ENTRY_SIZE(baser->val);
1943 if (!(baser->val & GITS_BASER_INDIRECT))
1944 return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz));
1946 /* Compute 1st level table index & check if that exceeds table limit */
1947 idx = id >> ilog2(baser->psz / esz);
1948 if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE))
1951 table = baser->base;
1953 /* Allocate memory for 2nd level table */
1955 page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(baser->psz));
1959 /* Flush Lvl2 table to PoC if hw doesn't support coherency */
1960 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
1961 gic_flush_dcache_to_poc(page_address(page), baser->psz);
1963 table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
1965 /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
1966 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
1967 gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
1969 /* Ensure updated table contents are visible to ITS hardware */
1976 static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
1978 struct its_baser *baser;
1980 baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
1982 /* Don't allow device id that exceeds ITS hardware limit */
1984 return (ilog2(dev_id) < its->device_ids);
1986 return its_alloc_table_entry(baser, dev_id);
1989 static bool its_alloc_vpe_table(u32 vpe_id)
1991 struct its_node *its;
1994 * Make sure the L2 tables are allocated on *all* v4 ITSs. We
1995 * could try and only do it on ITSs corresponding to devices
1996 * that have interrupts targeted at this VPE, but the
1997 * complexity becomes crazy (and you have tons of memory
2000 list_for_each_entry(its, &its_nodes, entry) {
2001 struct its_baser *baser;
2006 baser = its_get_baser(its, GITS_BASER_TYPE_VCPU);
2010 if (!its_alloc_table_entry(baser, vpe_id))
2017 static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
2018 int nvecs, bool alloc_lpis)
2020 struct its_device *dev;
2021 unsigned long *lpi_map = NULL;
2022 unsigned long flags;
2023 u16 *col_map = NULL;
2030 if (!its_alloc_device_table(its, dev_id))
2033 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2035 * We allocate at least one chunk worth of LPIs bet device,
2036 * and thus that many ITEs. The device may require less though.
2038 nr_ites = max(IRQS_PER_CHUNK, roundup_pow_of_two(nvecs));
2039 sz = nr_ites * its->ite_size;
2040 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
2041 itt = kzalloc(sz, GFP_KERNEL);
2043 lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis);
2045 col_map = kzalloc(sizeof(*col_map) * nr_lpis,
2048 col_map = kzalloc(sizeof(*col_map) * nr_ites, GFP_KERNEL);
2053 if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) {
2061 gic_flush_dcache_to_poc(itt, sz);
2065 dev->nr_ites = nr_ites;
2066 dev->event_map.lpi_map = lpi_map;
2067 dev->event_map.col_map = col_map;
2068 dev->event_map.lpi_base = lpi_base;
2069 dev->event_map.nr_lpis = nr_lpis;
2070 mutex_init(&dev->event_map.vlpi_lock);
2071 dev->device_id = dev_id;
2072 INIT_LIST_HEAD(&dev->entry);
2074 raw_spin_lock_irqsave(&its->lock, flags);
2075 list_add(&dev->entry, &its->its_device_list);
2076 raw_spin_unlock_irqrestore(&its->lock, flags);
2078 /* Map device to its ITT */
2079 its_send_mapd(dev, 1);
2084 static void its_free_device(struct its_device *its_dev)
2086 unsigned long flags;
2088 raw_spin_lock_irqsave(&its_dev->its->lock, flags);
2089 list_del(&its_dev->entry);
2090 raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
2091 kfree(its_dev->itt);
2095 static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq)
2099 idx = bitmap_find_free_region(dev->event_map.lpi_map,
2100 dev->event_map.nr_lpis,
2101 get_count_order(nvecs));
2105 *hwirq = dev->event_map.lpi_base + idx;
2106 set_bit(idx, dev->event_map.lpi_map);
2111 static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
2112 int nvec, msi_alloc_info_t *info)
2114 struct its_node *its;
2115 struct its_device *its_dev;
2116 struct msi_domain_info *msi_info;
2121 * We ignore "dev" entierely, and rely on the dev_id that has
2122 * been passed via the scratchpad. This limits this domain's
2123 * usefulness to upper layers that definitely know that they
2124 * are built on top of the ITS.
2126 dev_id = info->scratchpad[0].ul;
2128 msi_info = msi_get_domain_info(domain);
2129 its = msi_info->data;
2131 if (!gic_rdists->has_direct_lpi &&
2133 vpe_proxy.dev->its == its &&
2134 dev_id == vpe_proxy.dev->device_id) {
2135 /* Bad luck. Get yourself a better implementation */
2136 WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n",
2141 mutex_lock(&its->dev_alloc_lock);
2142 its_dev = its_find_device(its, dev_id);
2145 * We already have seen this ID, probably through
2146 * another alias (PCI bridge of some sort). No need to
2147 * create the device.
2149 its_dev->shared = true;
2150 pr_debug("Reusing ITT for devID %x\n", dev_id);
2154 its_dev = its_create_device(its, dev_id, nvec, true);
2160 pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec));
2162 mutex_unlock(&its->dev_alloc_lock);
2163 info->scratchpad[0].ptr = its_dev;
2167 static struct msi_domain_ops its_msi_domain_ops = {
2168 .msi_prepare = its_msi_prepare,
2171 static int its_irq_gic_domain_alloc(struct irq_domain *domain,
2173 irq_hw_number_t hwirq)
2175 struct irq_fwspec fwspec;
2177 if (irq_domain_get_of_node(domain->parent)) {
2178 fwspec.fwnode = domain->parent->fwnode;
2179 fwspec.param_count = 3;
2180 fwspec.param[0] = GIC_IRQ_TYPE_LPI;
2181 fwspec.param[1] = hwirq;
2182 fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
2183 } else if (is_fwnode_irqchip(domain->parent->fwnode)) {
2184 fwspec.fwnode = domain->parent->fwnode;
2185 fwspec.param_count = 2;
2186 fwspec.param[0] = hwirq;
2187 fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
2192 return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
2195 static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
2196 unsigned int nr_irqs, void *args)
2198 msi_alloc_info_t *info = args;
2199 struct its_device *its_dev = info->scratchpad[0].ptr;
2200 irq_hw_number_t hwirq;
2204 err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq);
2208 for (i = 0; i < nr_irqs; i++) {
2209 err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
2213 irq_domain_set_hwirq_and_chip(domain, virq + i,
2214 hwirq + i, &its_irq_chip, its_dev);
2215 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i)));
2216 pr_debug("ID:%d pID:%d vID:%d\n",
2217 (int)(hwirq + i - its_dev->event_map.lpi_base),
2218 (int)(hwirq + i), virq + i);
2224 static void its_irq_domain_activate(struct irq_domain *domain,
2227 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2228 u32 event = its_get_event_id(d);
2229 const struct cpumask *cpu_mask = cpu_online_mask;
2232 /* get the cpu_mask of local node */
2233 if (its_dev->its->numa_node >= 0)
2234 cpu_mask = cpumask_of_node(its_dev->its->numa_node);
2236 /* Bind the LPI to the first possible CPU */
2237 cpu = cpumask_first_and(cpu_mask, cpu_online_mask);
2238 if (cpu >= nr_cpu_ids) {
2239 if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144)
2242 cpu = cpumask_first(cpu_online_mask);
2245 its_dev->event_map.col_map[event] = cpu;
2246 irq_data_update_effective_affinity(d, cpumask_of(cpu));
2248 /* Map the GIC IRQ and event to the device */
2249 its_send_mapti(its_dev, d->hwirq, event);
2252 static void its_irq_domain_deactivate(struct irq_domain *domain,
2255 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2256 u32 event = its_get_event_id(d);
2258 /* Stop the delivery of interrupts */
2259 its_send_discard(its_dev, event);
2262 static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
2263 unsigned int nr_irqs)
2265 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
2266 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2267 struct its_node *its = its_dev->its;
2270 for (i = 0; i < nr_irqs; i++) {
2271 struct irq_data *data = irq_domain_get_irq_data(domain,
2273 u32 event = its_get_event_id(data);
2275 /* Mark interrupt index as unused */
2276 clear_bit(event, its_dev->event_map.lpi_map);
2278 /* Nuke the entry in the domain */
2279 irq_domain_reset_irq_data(data);
2282 mutex_lock(&its->dev_alloc_lock);
2285 * If all interrupts have been freed, start mopping the
2286 * floor. This is conditionned on the device not being shared.
2288 if (!its_dev->shared &&
2289 bitmap_empty(its_dev->event_map.lpi_map,
2290 its_dev->event_map.nr_lpis)) {
2291 its_lpi_free_chunks(its_dev->event_map.lpi_map,
2292 its_dev->event_map.lpi_base,
2293 its_dev->event_map.nr_lpis);
2294 kfree(its_dev->event_map.col_map);
2296 /* Unmap device/itt */
2297 its_send_mapd(its_dev, 0);
2298 its_free_device(its_dev);
2301 mutex_unlock(&its->dev_alloc_lock);
2303 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
2306 static const struct irq_domain_ops its_domain_ops = {
2307 .alloc = its_irq_domain_alloc,
2308 .free = its_irq_domain_free,
2309 .activate = its_irq_domain_activate,
2310 .deactivate = its_irq_domain_deactivate,
2316 * If a GICv4 doesn't implement Direct LPIs (which is extremely
2317 * likely), the only way to perform an invalidate is to use a fake
2318 * device to issue an INV command, implying that the LPI has first
2319 * been mapped to some event on that device. Since this is not exactly
2320 * cheap, we try to keep that mapping around as long as possible, and
2321 * only issue an UNMAP if we're short on available slots.
2323 * Broken by design(tm).
2325 static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe)
2327 /* Already unmapped? */
2328 if (vpe->vpe_proxy_event == -1)
2331 its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event);
2332 vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL;
2335 * We don't track empty slots at all, so let's move the
2336 * next_victim pointer if we can quickly reuse that slot
2337 * instead of nuking an existing entry. Not clear that this is
2338 * always a win though, and this might just generate a ripple
2339 * effect... Let's just hope VPEs don't migrate too often.
2341 if (vpe_proxy.vpes[vpe_proxy.next_victim])
2342 vpe_proxy.next_victim = vpe->vpe_proxy_event;
2344 vpe->vpe_proxy_event = -1;
2347 static void its_vpe_db_proxy_unmap(struct its_vpe *vpe)
2349 if (!gic_rdists->has_direct_lpi) {
2350 unsigned long flags;
2352 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
2353 its_vpe_db_proxy_unmap_locked(vpe);
2354 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
2358 static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe)
2360 /* Already mapped? */
2361 if (vpe->vpe_proxy_event != -1)
2364 /* This slot was already allocated. Kick the other VPE out. */
2365 if (vpe_proxy.vpes[vpe_proxy.next_victim])
2366 its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]);
2368 /* Map the new VPE instead */
2369 vpe_proxy.vpes[vpe_proxy.next_victim] = vpe;
2370 vpe->vpe_proxy_event = vpe_proxy.next_victim;
2371 vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites;
2373 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx;
2374 its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event);
2377 static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
2379 unsigned long flags;
2380 struct its_collection *target_col;
2382 if (gic_rdists->has_direct_lpi) {
2383 void __iomem *rdbase;
2385 rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base;
2386 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
2387 while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
2393 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
2395 its_vpe_db_proxy_map_locked(vpe);
2397 target_col = &vpe_proxy.dev->its->collections[to];
2398 its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event);
2399 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to;
2401 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
2404 static int its_vpe_set_affinity(struct irq_data *d,
2405 const struct cpumask *mask_val,
2408 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2409 int cpu = cpumask_first(mask_val);
2412 * Changing affinity is mega expensive, so let's be as lazy as
2413 * we can and only do it if we really have to. Also, if mapped
2414 * into the proxy device, we need to move the doorbell
2415 * interrupt to its new location.
2417 if (vpe->col_idx != cpu) {
2418 int from = vpe->col_idx;
2421 its_send_vmovp(vpe);
2422 its_vpe_db_proxy_move(vpe, from, cpu);
2425 return IRQ_SET_MASK_OK_DONE;
2428 static void its_vpe_schedule(struct its_vpe *vpe)
2430 void * __iomem vlpi_base = gic_data_rdist_vlpi_base();
2433 /* Schedule the VPE */
2434 val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) &
2435 GENMASK_ULL(51, 12);
2436 val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
2437 val |= GICR_VPROPBASER_RaWb;
2438 val |= GICR_VPROPBASER_InnerShareable;
2439 gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2441 val = virt_to_phys(page_address(vpe->vpt_page)) &
2442 GENMASK_ULL(51, 16);
2443 val |= GICR_VPENDBASER_RaWaWb;
2444 val |= GICR_VPENDBASER_NonShareable;
2446 * There is no good way of finding out if the pending table is
2447 * empty as we can race against the doorbell interrupt very
2448 * easily. So in the end, vpe->pending_last is only an
2449 * indication that the vcpu has something pending, not one
2450 * that the pending table is empty. A good implementation
2451 * would be able to read its coarse map pretty quickly anyway,
2452 * making this a tolerable issue.
2454 val |= GICR_VPENDBASER_PendingLast;
2455 val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
2456 val |= GICR_VPENDBASER_Valid;
2457 gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
2460 static void its_vpe_deschedule(struct its_vpe *vpe)
2462 void * __iomem vlpi_base = gic_data_rdist_vlpi_base();
2463 u32 count = 1000000; /* 1s! */
2467 /* We're being scheduled out */
2468 val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
2469 val &= ~GICR_VPENDBASER_Valid;
2470 gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
2473 val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
2474 clean = !(val & GICR_VPENDBASER_Dirty);
2480 } while (!clean && count);
2482 if (unlikely(!clean && !count)) {
2483 pr_err_ratelimited("ITS virtual pending table not cleaning\n");
2485 vpe->pending_last = true;
2487 vpe->idai = !!(val & GICR_VPENDBASER_IDAI);
2488 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
2492 static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
2494 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2495 struct its_cmd_info *info = vcpu_info;
2497 switch (info->cmd_type) {
2499 its_vpe_schedule(vpe);
2502 case DESCHEDULE_VPE:
2503 its_vpe_deschedule(vpe);
2507 its_send_vinvall(vpe);
2515 static void its_vpe_send_cmd(struct its_vpe *vpe,
2516 void (*cmd)(struct its_device *, u32))
2518 unsigned long flags;
2520 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
2522 its_vpe_db_proxy_map_locked(vpe);
2523 cmd(vpe_proxy.dev, vpe->vpe_proxy_event);
2525 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
2528 static void its_vpe_send_inv(struct irq_data *d)
2530 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2532 if (gic_rdists->has_direct_lpi) {
2533 void __iomem *rdbase;
2535 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
2536 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_INVLPIR);
2537 while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
2540 its_vpe_send_cmd(vpe, its_send_inv);
2544 static void its_vpe_mask_irq(struct irq_data *d)
2547 * We need to unmask the LPI, which is described by the parent
2548 * irq_data. Instead of calling into the parent (which won't
2549 * exactly do the right thing, let's simply use the
2550 * parent_data pointer. Yes, I'm naughty.
2552 lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
2553 its_vpe_send_inv(d);
2556 static void its_vpe_unmask_irq(struct irq_data *d)
2558 /* Same hack as above... */
2559 lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
2560 its_vpe_send_inv(d);
2563 static int its_vpe_set_irqchip_state(struct irq_data *d,
2564 enum irqchip_irq_state which,
2567 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2569 if (which != IRQCHIP_STATE_PENDING)
2572 if (gic_rdists->has_direct_lpi) {
2573 void __iomem *rdbase;
2575 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
2577 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR);
2579 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
2580 while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
2585 its_vpe_send_cmd(vpe, its_send_int);
2587 its_vpe_send_cmd(vpe, its_send_clear);
2593 static struct irq_chip its_vpe_irq_chip = {
2594 .name = "GICv4-vpe",
2595 .irq_mask = its_vpe_mask_irq,
2596 .irq_unmask = its_vpe_unmask_irq,
2597 .irq_eoi = irq_chip_eoi_parent,
2598 .irq_set_affinity = its_vpe_set_affinity,
2599 .irq_set_irqchip_state = its_vpe_set_irqchip_state,
2600 .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity,
2603 static int its_vpe_id_alloc(void)
2605 return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL);
2608 static void its_vpe_id_free(u16 id)
2610 ida_simple_remove(&its_vpeid_ida, id);
2613 static int its_vpe_init(struct its_vpe *vpe)
2615 struct page *vpt_page;
2618 /* Allocate vpe_id */
2619 vpe_id = its_vpe_id_alloc();
2624 vpt_page = its_allocate_pending_table(GFP_KERNEL);
2626 its_vpe_id_free(vpe_id);
2630 if (!its_alloc_vpe_table(vpe_id)) {
2631 its_vpe_id_free(vpe_id);
2632 its_free_pending_table(vpe->vpt_page);
2636 vpe->vpe_id = vpe_id;
2637 vpe->vpt_page = vpt_page;
2638 vpe->vpe_proxy_event = -1;
2643 static void its_vpe_teardown(struct its_vpe *vpe)
2645 its_vpe_db_proxy_unmap(vpe);
2646 its_vpe_id_free(vpe->vpe_id);
2647 its_free_pending_table(vpe->vpt_page);
2650 static void its_vpe_irq_domain_free(struct irq_domain *domain,
2652 unsigned int nr_irqs)
2654 struct its_vm *vm = domain->host_data;
2657 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
2659 for (i = 0; i < nr_irqs; i++) {
2660 struct irq_data *data = irq_domain_get_irq_data(domain,
2662 struct its_vpe *vpe = irq_data_get_irq_chip_data(data);
2664 BUG_ON(vm != vpe->its_vm);
2666 clear_bit(data->hwirq, vm->db_bitmap);
2667 its_vpe_teardown(vpe);
2668 irq_domain_reset_irq_data(data);
2671 if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) {
2672 its_lpi_free_chunks(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis);
2673 its_free_prop_table(vm->vprop_page);
2677 static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
2678 unsigned int nr_irqs, void *args)
2680 struct its_vm *vm = args;
2681 unsigned long *bitmap;
2682 struct page *vprop_page;
2683 int base, nr_ids, i, err = 0;
2687 bitmap = its_lpi_alloc_chunks(nr_irqs, &base, &nr_ids);
2691 if (nr_ids < nr_irqs) {
2692 its_lpi_free_chunks(bitmap, base, nr_ids);
2696 vprop_page = its_allocate_prop_table(GFP_KERNEL);
2698 its_lpi_free_chunks(bitmap, base, nr_ids);
2702 vm->db_bitmap = bitmap;
2703 vm->db_lpi_base = base;
2704 vm->nr_db_lpis = nr_ids;
2705 vm->vprop_page = vprop_page;
2707 for (i = 0; i < nr_irqs; i++) {
2708 vm->vpes[i]->vpe_db_lpi = base + i;
2709 err = its_vpe_init(vm->vpes[i]);
2712 err = its_irq_gic_domain_alloc(domain, virq + i,
2713 vm->vpes[i]->vpe_db_lpi);
2716 irq_domain_set_hwirq_and_chip(domain, virq + i, i,
2717 &its_vpe_irq_chip, vm->vpes[i]);
2723 its_vpe_irq_domain_free(domain, virq, i - 1);
2725 its_lpi_free_chunks(bitmap, base, nr_ids);
2726 its_free_prop_table(vprop_page);
2732 static void its_vpe_irq_domain_activate(struct irq_domain *domain,
2735 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2737 /* Map the VPE to the first possible CPU */
2738 vpe->col_idx = cpumask_first(cpu_online_mask);
2739 its_send_vmapp(vpe, true);
2740 its_send_vinvall(vpe);
2743 static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
2746 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2748 its_send_vmapp(vpe, false);
2751 static const struct irq_domain_ops its_vpe_domain_ops = {
2752 .alloc = its_vpe_irq_domain_alloc,
2753 .free = its_vpe_irq_domain_free,
2754 .activate = its_vpe_irq_domain_activate,
2755 .deactivate = its_vpe_irq_domain_deactivate,
2758 static int its_force_quiescent(void __iomem *base)
2760 u32 count = 1000000; /* 1s */
2763 val = readl_relaxed(base + GITS_CTLR);
2765 * GIC architecture specification requires the ITS to be both
2766 * disabled and quiescent for writes to GITS_BASER<n> or
2767 * GITS_CBASER to not have UNPREDICTABLE results.
2769 if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE))
2772 /* Disable the generation of all interrupts to this ITS */
2773 val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe);
2774 writel_relaxed(val, base + GITS_CTLR);
2776 /* Poll GITS_CTLR and wait until ITS becomes quiescent */
2778 val = readl_relaxed(base + GITS_CTLR);
2779 if (val & GITS_CTLR_QUIESCENT)
2791 static void __maybe_unused its_enable_quirk_cavium_22375(void *data)
2793 struct its_node *its = data;
2795 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
2798 static void __maybe_unused its_enable_quirk_cavium_23144(void *data)
2800 struct its_node *its = data;
2802 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
2805 static void __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
2807 struct its_node *its = data;
2809 /* On QDF2400, the size of the ITE is 16Bytes */
2813 static const struct gic_quirk its_quirks[] = {
2814 #ifdef CONFIG_CAVIUM_ERRATUM_22375
2816 .desc = "ITS: Cavium errata 22375, 24313",
2817 .iidr = 0xa100034c, /* ThunderX pass 1.x */
2819 .init = its_enable_quirk_cavium_22375,
2822 #ifdef CONFIG_CAVIUM_ERRATUM_23144
2824 .desc = "ITS: Cavium erratum 23144",
2825 .iidr = 0xa100034c, /* ThunderX pass 1.x */
2827 .init = its_enable_quirk_cavium_23144,
2830 #ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065
2832 .desc = "ITS: QDF2400 erratum 0065",
2833 .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */
2835 .init = its_enable_quirk_qdf2400_e0065,
2842 static void its_enable_quirks(struct its_node *its)
2844 u32 iidr = readl_relaxed(its->base + GITS_IIDR);
2846 gic_enable_quirks(iidr, its_quirks, its);
2849 static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
2851 struct irq_domain *inner_domain;
2852 struct msi_domain_info *info;
2854 info = kzalloc(sizeof(*info), GFP_KERNEL);
2858 inner_domain = irq_domain_create_tree(handle, &its_domain_ops, its);
2859 if (!inner_domain) {
2864 inner_domain->parent = its_parent;
2865 irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
2866 inner_domain->flags |= IRQ_DOMAIN_FLAG_MSI_REMAP;
2867 info->ops = &its_msi_domain_ops;
2869 inner_domain->host_data = info;
2874 static int its_init_vpe_domain(void)
2876 struct its_node *its;
2880 if (gic_rdists->has_direct_lpi) {
2881 pr_info("ITS: Using DirectLPI for VPE invalidation\n");
2885 /* Any ITS will do, even if not v4 */
2886 its = list_first_entry(&its_nodes, struct its_node, entry);
2888 entries = roundup_pow_of_two(nr_cpu_ids);
2889 vpe_proxy.vpes = kzalloc(sizeof(*vpe_proxy.vpes) * entries,
2891 if (!vpe_proxy.vpes) {
2892 pr_err("ITS: Can't allocate GICv4 proxy device array\n");
2896 /* Use the last possible DevID */
2897 devid = GENMASK(its->device_ids - 1, 0);
2898 vpe_proxy.dev = its_create_device(its, devid, entries, false);
2899 if (!vpe_proxy.dev) {
2900 kfree(vpe_proxy.vpes);
2901 pr_err("ITS: Can't allocate GICv4 proxy device\n");
2905 BUG_ON(entries > vpe_proxy.dev->nr_ites);
2907 raw_spin_lock_init(&vpe_proxy.lock);
2908 vpe_proxy.next_victim = 0;
2909 pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n",
2910 devid, vpe_proxy.dev->nr_ites);
2915 static int __init its_compute_its_list_map(struct resource *res,
2916 void __iomem *its_base)
2922 * This is assumed to be done early enough that we're
2923 * guaranteed to be single-threaded, hence no
2924 * locking. Should this change, we should address
2927 its_number = find_first_zero_bit(&its_list_map, ITS_LIST_MAX);
2928 if (its_number >= ITS_LIST_MAX) {
2929 pr_err("ITS@%pa: No ITSList entry available!\n",
2934 ctlr = readl_relaxed(its_base + GITS_CTLR);
2935 ctlr &= ~GITS_CTLR_ITS_NUMBER;
2936 ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT;
2937 writel_relaxed(ctlr, its_base + GITS_CTLR);
2938 ctlr = readl_relaxed(its_base + GITS_CTLR);
2939 if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) {
2940 its_number = ctlr & GITS_CTLR_ITS_NUMBER;
2941 its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT;
2944 if (test_and_set_bit(its_number, &its_list_map)) {
2945 pr_err("ITS@%pa: Duplicate ITSList entry %d\n",
2946 &res->start, its_number);
2953 static int __init its_probe_one(struct resource *res,
2954 struct fwnode_handle *handle, int numa_node)
2956 struct its_node *its;
2957 void __iomem *its_base;
2959 u64 baser, tmp, typer;
2962 its_base = ioremap(res->start, resource_size(res));
2964 pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start);
2968 val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK;
2969 if (val != 0x30 && val != 0x40) {
2970 pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start);
2975 err = its_force_quiescent(its_base);
2977 pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start);
2981 pr_info("ITS %pR\n", res);
2983 its = kzalloc(sizeof(*its), GFP_KERNEL);
2989 raw_spin_lock_init(&its->lock);
2990 mutex_init(&its->dev_alloc_lock);
2991 INIT_LIST_HEAD(&its->entry);
2992 INIT_LIST_HEAD(&its->its_device_list);
2993 typer = gic_read_typer(its_base + GITS_TYPER);
2994 its->base = its_base;
2995 its->phys_base = res->start;
2996 its->ite_size = GITS_TYPER_ITT_ENTRY_SIZE(typer);
2997 its->is_v4 = !!(typer & GITS_TYPER_VLPIS);
2999 if (!(typer & GITS_TYPER_VMOVP)) {
3000 err = its_compute_its_list_map(res, its_base);
3004 pr_info("ITS@%pa: Using ITS number %d\n",
3007 pr_info("ITS@%pa: Single VMOVP capable\n", &res->start);
3011 its->numa_node = numa_node;
3013 its->cmd_base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
3014 get_order(ITS_CMD_QUEUE_SZ));
3015 if (!its->cmd_base) {
3019 its->cmd_write = its->cmd_base;
3021 its_enable_quirks(its);
3023 err = its_alloc_tables(its);
3027 err = its_alloc_collections(its);
3029 goto out_free_tables;
3031 baser = (virt_to_phys(its->cmd_base) |
3032 GITS_CBASER_RaWaWb |
3033 GITS_CBASER_InnerShareable |
3034 (ITS_CMD_QUEUE_SZ / SZ_4K - 1) |
3037 gits_write_cbaser(baser, its->base + GITS_CBASER);
3038 tmp = gits_read_cbaser(its->base + GITS_CBASER);
3040 if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) {
3041 if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) {
3043 * The HW reports non-shareable, we must
3044 * remove the cacheability attributes as
3047 baser &= ~(GITS_CBASER_SHAREABILITY_MASK |
3048 GITS_CBASER_CACHEABILITY_MASK);
3049 baser |= GITS_CBASER_nC;
3050 gits_write_cbaser(baser, its->base + GITS_CBASER);
3052 pr_info("ITS: using cache flushing for cmd queue\n");
3053 its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
3056 gits_write_cwriter(0, its->base + GITS_CWRITER);
3057 ctlr = readl_relaxed(its->base + GITS_CTLR);
3058 ctlr |= GITS_CTLR_ENABLE;
3060 ctlr |= GITS_CTLR_ImDe;
3061 writel_relaxed(ctlr, its->base + GITS_CTLR);
3063 err = its_init_domain(handle, its);
3065 goto out_free_tables;
3067 spin_lock(&its_lock);
3068 list_add(&its->entry, &its_nodes);
3069 spin_unlock(&its_lock);
3074 its_free_tables(its);
3076 free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
3081 pr_err("ITS@%pa: failed probing (%d)\n", &res->start, err);
3085 static bool gic_rdists_supports_plpis(void)
3087 return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
3090 int its_cpu_init(void)
3092 if (!list_empty(&its_nodes)) {
3093 if (!gic_rdists_supports_plpis()) {
3094 pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
3097 its_cpu_init_lpis();
3098 its_cpu_init_collection();
3104 static const struct of_device_id its_device_id[] = {
3105 { .compatible = "arm,gic-v3-its", },
3109 static int __init its_of_probe(struct device_node *node)
3111 struct device_node *np;
3112 struct resource res;
3114 for (np = of_find_matching_node(node, its_device_id); np;
3115 np = of_find_matching_node(np, its_device_id)) {
3116 if (!of_device_is_available(np))
3118 if (!of_property_read_bool(np, "msi-controller")) {
3119 pr_warn("%pOF: no msi-controller property, ITS ignored\n",
3124 if (of_address_to_resource(np, 0, &res)) {
3125 pr_warn("%pOF: no regs?\n", np);
3129 its_probe_one(&res, &np->fwnode, of_node_to_nid(np));
3136 #define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K)
3138 #ifdef CONFIG_ACPI_NUMA
3139 struct its_srat_map {
3146 static struct its_srat_map *its_srat_maps __initdata;
3147 static int its_in_srat __initdata;
3149 static int __init acpi_get_its_numa_node(u32 its_id)
3153 for (i = 0; i < its_in_srat; i++) {
3154 if (its_id == its_srat_maps[i].its_id)
3155 return its_srat_maps[i].numa_node;
3157 return NUMA_NO_NODE;
3160 static int __init gic_acpi_match_srat_its(struct acpi_subtable_header *header,
3161 const unsigned long end)
3166 static int __init gic_acpi_parse_srat_its(struct acpi_subtable_header *header,
3167 const unsigned long end)
3170 struct acpi_srat_gic_its_affinity *its_affinity;
3172 its_affinity = (struct acpi_srat_gic_its_affinity *)header;
3176 if (its_affinity->header.length < sizeof(*its_affinity)) {
3177 pr_err("SRAT: Invalid header length %d in ITS affinity\n",
3178 its_affinity->header.length);
3182 node = acpi_map_pxm_to_node(its_affinity->proximity_domain);
3184 if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
3185 pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node);
3189 its_srat_maps[its_in_srat].numa_node = node;
3190 its_srat_maps[its_in_srat].its_id = its_affinity->its_id;
3192 pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n",
3193 its_affinity->proximity_domain, its_affinity->its_id, node);
3198 static void __init acpi_table_parse_srat_its(void)
3202 count = acpi_table_parse_entries(ACPI_SIG_SRAT,
3203 sizeof(struct acpi_table_srat),
3204 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
3205 gic_acpi_match_srat_its, 0);
3209 its_srat_maps = kmalloc(count * sizeof(struct its_srat_map),
3211 if (!its_srat_maps) {
3212 pr_warn("SRAT: Failed to allocate memory for its_srat_maps!\n");
3216 acpi_table_parse_entries(ACPI_SIG_SRAT,
3217 sizeof(struct acpi_table_srat),
3218 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
3219 gic_acpi_parse_srat_its, 0);
3222 /* free the its_srat_maps after ITS probing */
3223 static void __init acpi_its_srat_maps_free(void)
3225 kfree(its_srat_maps);
3228 static void __init acpi_table_parse_srat_its(void) { }
3229 static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; }
3230 static void __init acpi_its_srat_maps_free(void) { }
3233 static int __init gic_acpi_parse_madt_its(struct acpi_subtable_header *header,
3234 const unsigned long end)
3236 struct acpi_madt_generic_translator *its_entry;
3237 struct fwnode_handle *dom_handle;
3238 struct resource res;
3241 its_entry = (struct acpi_madt_generic_translator *)header;
3242 memset(&res, 0, sizeof(res));
3243 res.start = its_entry->base_address;
3244 res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1;
3245 res.flags = IORESOURCE_MEM;
3247 dom_handle = irq_domain_alloc_fwnode((void *)its_entry->base_address);
3249 pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n",
3254 err = iort_register_domain_token(its_entry->translation_id, dom_handle);
3256 pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n",
3257 &res.start, its_entry->translation_id);
3261 err = its_probe_one(&res, dom_handle,
3262 acpi_get_its_numa_node(its_entry->translation_id));
3266 iort_deregister_domain_token(its_entry->translation_id);
3268 irq_domain_free_fwnode(dom_handle);
3272 static void __init its_acpi_probe(void)
3274 acpi_table_parse_srat_its();
3275 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
3276 gic_acpi_parse_madt_its, 0);
3277 acpi_its_srat_maps_free();
3280 static void __init its_acpi_probe(void) { }
3283 int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
3284 struct irq_domain *parent_domain)
3286 struct device_node *of_node;
3287 struct its_node *its;
3288 bool has_v4 = false;
3291 its_parent = parent_domain;
3292 of_node = to_of_node(handle);
3294 its_of_probe(of_node);
3298 if (list_empty(&its_nodes)) {
3299 pr_warn("ITS: No ITS available, not enabling LPIs\n");
3303 gic_rdists = rdists;
3304 err = its_alloc_lpi_tables();
3308 list_for_each_entry(its, &its_nodes, entry)
3309 has_v4 |= its->is_v4;
3311 if (has_v4 & rdists->has_vlpis) {
3312 if (its_init_vpe_domain() ||
3313 its_init_v4(parent_domain, &its_vpe_domain_ops)) {
3314 rdists->has_vlpis = false;
3315 pr_err("ITS: Disabling GICv4 support\n");