1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2015, 2016 ARM Ltd.
6 #include <linux/irqchip/arm-gic.h>
8 #include <linux/kvm_host.h>
9 #include <kvm/arm_vgic.h>
10 #include <asm/kvm_mmu.h>
14 static inline void vgic_v2_write_lr(int lr, u32 val)
16 void __iomem *base = kvm_vgic_global_state.vctrl_base;
18 writel_relaxed(val, base + GICH_LR0 + (lr * 4));
21 void vgic_v2_init_lrs(void)
25 for (i = 0; i < kvm_vgic_global_state.nr_lr; i++)
26 vgic_v2_write_lr(i, 0);
29 void vgic_v2_set_underflow(struct kvm_vcpu *vcpu)
31 struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
33 cpuif->vgic_hcr |= GICH_HCR_UIE;
36 static bool lr_signals_eoi_mi(u32 lr_val)
38 return !(lr_val & GICH_LR_STATE) && (lr_val & GICH_LR_EOI) &&
39 !(lr_val & GICH_LR_HW);
43 * transfer the content of the LRs back into the corresponding ap_list:
44 * - active bit is transferred as is
46 * - transferred as is in case of edge sensitive IRQs
47 * - set to the line-level (resample time) for level sensitive IRQs
49 void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
51 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
52 struct vgic_v2_cpu_if *cpuif = &vgic_cpu->vgic_v2;
55 DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
57 cpuif->vgic_hcr &= ~GICH_HCR_UIE;
59 for (lr = 0; lr < vgic_cpu->used_lrs; lr++) {
60 u32 val = cpuif->vgic_lr[lr];
61 u32 cpuid, intid = val & GICH_LR_VIRTUALID;
64 /* Extract the source vCPU id from the LR */
65 cpuid = val & GICH_LR_PHYSID_CPUID;
66 cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
69 /* Notify fds when the guest EOI'ed a level-triggered SPI */
70 if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
71 kvm_notify_acked_irq(vcpu->kvm, 0,
72 intid - VGIC_NR_PRIVATE_IRQS);
74 irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
76 raw_spin_lock(&irq->irq_lock);
78 /* Always preserve the active bit */
79 irq->active = !!(val & GICH_LR_ACTIVE_BIT);
81 if (irq->active && vgic_irq_is_sgi(intid))
82 irq->active_source = cpuid;
84 /* Edge is the only case where we preserve the pending bit */
85 if (irq->config == VGIC_CONFIG_EDGE &&
86 (val & GICH_LR_PENDING_BIT)) {
87 irq->pending_latch = true;
89 if (vgic_irq_is_sgi(intid))
90 irq->source |= (1 << cpuid);
94 * Clear soft pending state when level irqs have been acked.
96 if (irq->config == VGIC_CONFIG_LEVEL && !(val & GICH_LR_STATE))
97 irq->pending_latch = false;
100 * Level-triggered mapped IRQs are special because we only
101 * observe rising edges as input to the VGIC.
103 * If the guest never acked the interrupt we have to sample
104 * the physical line and set the line level, because the
105 * device state could have changed or we simply need to
106 * process the still pending interrupt later.
108 * If this causes us to lower the level, we have to also clear
109 * the physical active state, since we will otherwise never be
110 * told when the interrupt becomes asserted again.
112 if (vgic_irq_is_mapped_level(irq) && (val & GICH_LR_PENDING_BIT)) {
113 irq->line_level = vgic_get_phys_line_level(irq);
115 if (!irq->line_level)
116 vgic_irq_set_phys_active(irq, false);
119 raw_spin_unlock(&irq->irq_lock);
120 vgic_put_irq(vcpu->kvm, irq);
123 vgic_cpu->used_lrs = 0;
127 * Populates the particular LR with the state of a given IRQ:
128 * - for an edge sensitive IRQ the pending state is cleared in struct vgic_irq
129 * - for a level sensitive IRQ the pending state value is unchanged;
130 * it is dictated directly by the input level
132 * If @irq describes an SGI with multiple sources, we choose the
133 * lowest-numbered source VCPU and clear that bit in the source bitmap.
135 * The irq_lock must be held by the caller.
137 void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
139 u32 val = irq->intid;
140 bool allow_pending = true;
143 val |= GICH_LR_ACTIVE_BIT;
144 if (vgic_irq_is_sgi(irq->intid))
145 val |= irq->active_source << GICH_LR_PHYSID_CPUID_SHIFT;
146 if (vgic_irq_is_multi_sgi(irq)) {
147 allow_pending = false;
153 val |= GICH_LR_GROUP1;
157 val |= irq->hwintid << GICH_LR_PHYSID_CPUID_SHIFT;
159 * Never set pending+active on a HW interrupt, as the
160 * pending state is kept at the physical distributor
164 allow_pending = false;
166 if (irq->config == VGIC_CONFIG_LEVEL) {
170 * Software resampling doesn't work very well
171 * if we allow P+A, so let's not do that.
174 allow_pending = false;
178 if (allow_pending && irq_is_pending(irq)) {
179 val |= GICH_LR_PENDING_BIT;
181 if (irq->config == VGIC_CONFIG_EDGE)
182 irq->pending_latch = false;
184 if (vgic_irq_is_sgi(irq->intid)) {
185 u32 src = ffs(irq->source);
188 val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
189 irq->source &= ~(1 << (src - 1));
191 irq->pending_latch = true;
198 * Level-triggered mapped IRQs are special because we only observe
199 * rising edges as input to the VGIC. We therefore lower the line
200 * level here, so that we can take new virtual IRQs. See
201 * vgic_v2_fold_lr_state for more info.
203 if (vgic_irq_is_mapped_level(irq) && (val & GICH_LR_PENDING_BIT))
204 irq->line_level = false;
206 /* The GICv2 LR only holds five bits of priority. */
207 val |= (irq->priority >> 3) << GICH_LR_PRIORITY_SHIFT;
209 vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = val;
212 void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr)
214 vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = 0;
217 void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
219 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
222 vmcr = (vmcrp->grpen0 << GICH_VMCR_ENABLE_GRP0_SHIFT) &
223 GICH_VMCR_ENABLE_GRP0_MASK;
224 vmcr |= (vmcrp->grpen1 << GICH_VMCR_ENABLE_GRP1_SHIFT) &
225 GICH_VMCR_ENABLE_GRP1_MASK;
226 vmcr |= (vmcrp->ackctl << GICH_VMCR_ACK_CTL_SHIFT) &
227 GICH_VMCR_ACK_CTL_MASK;
228 vmcr |= (vmcrp->fiqen << GICH_VMCR_FIQ_EN_SHIFT) &
229 GICH_VMCR_FIQ_EN_MASK;
230 vmcr |= (vmcrp->cbpr << GICH_VMCR_CBPR_SHIFT) &
232 vmcr |= (vmcrp->eoim << GICH_VMCR_EOI_MODE_SHIFT) &
233 GICH_VMCR_EOI_MODE_MASK;
234 vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) &
235 GICH_VMCR_ALIAS_BINPOINT_MASK;
236 vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) &
237 GICH_VMCR_BINPOINT_MASK;
238 vmcr |= ((vmcrp->pmr >> GICV_PMR_PRIORITY_SHIFT) <<
239 GICH_VMCR_PRIMASK_SHIFT) & GICH_VMCR_PRIMASK_MASK;
241 cpu_if->vgic_vmcr = vmcr;
244 void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
246 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
249 vmcr = cpu_if->vgic_vmcr;
251 vmcrp->grpen0 = (vmcr & GICH_VMCR_ENABLE_GRP0_MASK) >>
252 GICH_VMCR_ENABLE_GRP0_SHIFT;
253 vmcrp->grpen1 = (vmcr & GICH_VMCR_ENABLE_GRP1_MASK) >>
254 GICH_VMCR_ENABLE_GRP1_SHIFT;
255 vmcrp->ackctl = (vmcr & GICH_VMCR_ACK_CTL_MASK) >>
256 GICH_VMCR_ACK_CTL_SHIFT;
257 vmcrp->fiqen = (vmcr & GICH_VMCR_FIQ_EN_MASK) >>
258 GICH_VMCR_FIQ_EN_SHIFT;
259 vmcrp->cbpr = (vmcr & GICH_VMCR_CBPR_MASK) >>
260 GICH_VMCR_CBPR_SHIFT;
261 vmcrp->eoim = (vmcr & GICH_VMCR_EOI_MODE_MASK) >>
262 GICH_VMCR_EOI_MODE_SHIFT;
264 vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >>
265 GICH_VMCR_ALIAS_BINPOINT_SHIFT;
266 vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >>
267 GICH_VMCR_BINPOINT_SHIFT;
268 vmcrp->pmr = ((vmcr & GICH_VMCR_PRIMASK_MASK) >>
269 GICH_VMCR_PRIMASK_SHIFT) << GICV_PMR_PRIORITY_SHIFT;
272 void vgic_v2_enable(struct kvm_vcpu *vcpu)
275 * By forcing VMCR to zero, the GIC will restore the binary
276 * points to their reset values. Anything else resets to zero
279 vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = 0;
281 /* Get the show on the road... */
282 vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr = GICH_HCR_EN;
285 /* check for overlapping regions and for regions crossing the end of memory */
286 static bool vgic_v2_check_base(gpa_t dist_base, gpa_t cpu_base)
288 if (dist_base + KVM_VGIC_V2_DIST_SIZE < dist_base)
290 if (cpu_base + KVM_VGIC_V2_CPU_SIZE < cpu_base)
293 if (dist_base + KVM_VGIC_V2_DIST_SIZE <= cpu_base)
295 if (cpu_base + KVM_VGIC_V2_CPU_SIZE <= dist_base)
301 int vgic_v2_map_resources(struct kvm *kvm)
303 struct vgic_dist *dist = &kvm->arch.vgic;
309 if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
310 IS_VGIC_ADDR_UNDEF(dist->vgic_cpu_base)) {
311 kvm_err("Need to set vgic cpu and dist addresses first\n");
316 if (!vgic_v2_check_base(dist->vgic_dist_base, dist->vgic_cpu_base)) {
317 kvm_err("VGIC CPU and dist frames overlap\n");
323 * Initialize the vgic if this hasn't already been done on demand by
324 * accessing the vgic state from userspace.
326 ret = vgic_init(kvm);
328 kvm_err("Unable to initialize VGIC dynamic data structures\n");
332 ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V2);
334 kvm_err("Unable to register VGIC MMIO regions\n");
338 if (!static_branch_unlikely(&vgic_v2_cpuif_trap)) {
339 ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base,
340 kvm_vgic_global_state.vcpu_base,
341 KVM_VGIC_V2_CPU_SIZE, true);
343 kvm_err("Unable to remap VGIC CPU to VCPU\n");
354 DEFINE_STATIC_KEY_FALSE(vgic_v2_cpuif_trap);
357 * vgic_v2_probe - probe for a GICv2 compatible interrupt controller in DT
358 * @node: pointer to the DT node
360 * Returns 0 if a GICv2 has been found, returns an error code otherwise
362 int vgic_v2_probe(const struct gic_kvm_info *info)
367 if (!info->vctrl.start) {
368 kvm_err("GICH not present in the firmware table\n");
372 if (!PAGE_ALIGNED(info->vcpu.start) ||
373 !PAGE_ALIGNED(resource_size(&info->vcpu))) {
374 kvm_info("GICV region size/alignment is unsafe, using trapping (reduced performance)\n");
376 ret = create_hyp_io_mappings(info->vcpu.start,
377 resource_size(&info->vcpu),
378 &kvm_vgic_global_state.vcpu_base_va,
379 &kvm_vgic_global_state.vcpu_hyp_va);
381 kvm_err("Cannot map GICV into hyp\n");
385 static_branch_enable(&vgic_v2_cpuif_trap);
388 ret = create_hyp_io_mappings(info->vctrl.start,
389 resource_size(&info->vctrl),
390 &kvm_vgic_global_state.vctrl_base,
391 &kvm_vgic_global_state.vctrl_hyp);
393 kvm_err("Cannot map VCTRL into hyp\n");
397 vtr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VTR);
398 kvm_vgic_global_state.nr_lr = (vtr & 0x3f) + 1;
400 ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2);
402 kvm_err("Cannot register GICv2 KVM device\n");
406 kvm_vgic_global_state.can_emulate_gicv2 = true;
407 kvm_vgic_global_state.vcpu_base = info->vcpu.start;
408 kvm_vgic_global_state.type = VGIC_V2;
409 kvm_vgic_global_state.max_gic_vcpus = VGIC_V2_MAX_CPUS;
411 kvm_debug("vgic-v2@%llx\n", info->vctrl.start);
415 if (kvm_vgic_global_state.vctrl_base)
416 iounmap(kvm_vgic_global_state.vctrl_base);
417 if (kvm_vgic_global_state.vcpu_base_va)
418 iounmap(kvm_vgic_global_state.vcpu_base_va);
423 static void save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
425 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
426 u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
430 elrsr = readl_relaxed(base + GICH_ELRSR0);
431 if (unlikely(used_lrs > 32))
432 elrsr |= ((u64)readl_relaxed(base + GICH_ELRSR1)) << 32;
434 for (i = 0; i < used_lrs; i++) {
435 if (elrsr & (1UL << i))
436 cpu_if->vgic_lr[i] &= ~GICH_LR_STATE;
438 cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4));
440 writel_relaxed(0, base + GICH_LR0 + (i * 4));
444 void vgic_v2_save_state(struct kvm_vcpu *vcpu)
446 void __iomem *base = kvm_vgic_global_state.vctrl_base;
447 u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
453 save_lrs(vcpu, base);
454 writel_relaxed(0, base + GICH_HCR);
458 void vgic_v2_restore_state(struct kvm_vcpu *vcpu)
460 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
461 void __iomem *base = kvm_vgic_global_state.vctrl_base;
462 u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
469 writel_relaxed(cpu_if->vgic_hcr, base + GICH_HCR);
470 for (i = 0; i < used_lrs; i++) {
471 writel_relaxed(cpu_if->vgic_lr[i],
472 base + GICH_LR0 + (i * 4));
477 void vgic_v2_load(struct kvm_vcpu *vcpu)
479 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
481 writel_relaxed(cpu_if->vgic_vmcr,
482 kvm_vgic_global_state.vctrl_base + GICH_VMCR);
483 writel_relaxed(cpu_if->vgic_apr,
484 kvm_vgic_global_state.vctrl_base + GICH_APR);
487 void vgic_v2_put(struct kvm_vcpu *vcpu)
489 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
491 cpu_if->vgic_vmcr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VMCR);
492 cpu_if->vgic_apr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_APR);