2 * Copyright 2012 Michael Ellerman, IBM Corporation.
3 * Copyright 2012 Benjamin Herrenschmidt, IBM Corporation.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
10 #include <linux/kernel.h>
11 #include <linux/kvm_host.h>
12 #include <linux/err.h>
13 #include <linux/gfp.h>
14 #include <linux/anon_inodes.h>
15 #include <linux/spinlock.h>
17 #include <asm/uaccess.h>
18 #include <asm/kvm_book3s.h>
19 #include <asm/kvm_ppc.h>
20 #include <asm/hvcall.h>
22 #include <asm/debug.h>
25 #include <linux/debugfs.h>
26 #include <linux/seq_file.h>
28 #include "book3s_xics.h"
31 #define XICS_DBG(fmt...) do { } while (0)
33 #define XICS_DBG(fmt...) trace_printk(fmt)
36 #define ENABLE_REALMODE true
37 #define DEBUG_REALMODE false
43 * Each ICS has a spin lock protecting the information about the IRQ
44 * sources and avoiding simultaneous deliveries of the same interrupt.
46 * ICP operations are done via a single compare & swap transaction
47 * (most ICP state fits in the union kvmppc_icp_state)
54 * - To speed up resends, keep a bitmap of "resend" set bits in the
57 * - Speed up server# -> ICP lookup (array ? hash table ?)
59 * - Make ICS lockless as well, or at least a per-interrupt lock or hashed
60 * locks array to improve scalability
63 /* -- ICS routines -- */
65 static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
69 * Return value ideally indicates how the interrupt was handled, but no
70 * callers look at it (given that we don't implement KVM_IRQ_LINE_STATUS),
73 static int ics_deliver_irq(struct kvmppc_xics *xics, u32 irq, u32 level)
75 struct ics_irq_state *state;
76 struct kvmppc_ics *ics;
79 XICS_DBG("ics deliver %#x (level: %d)\n", irq, level);
81 ics = kvmppc_xics_find_ics(xics, irq, &src);
83 XICS_DBG("ics_deliver_irq: IRQ 0x%06x not found !\n", irq);
86 state = &ics->irq_state[src];
91 * We set state->asserted locklessly. This should be fine as
92 * we are the only setter, thus concurrent access is undefined
95 if ((level == 1 && state->lsi) || level == KVM_INTERRUPT_SET_LEVEL)
97 else if (level == 0 || level == KVM_INTERRUPT_UNSET) {
102 /* Attempt delivery */
103 icp_deliver_irq(xics, NULL, irq);
108 static void ics_check_resend(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
109 struct kvmppc_icp *icp)
115 local_irq_save(flags);
116 arch_spin_lock(&ics->lock);
118 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
119 struct ics_irq_state *state = &ics->irq_state[i];
124 XICS_DBG("resend %#x prio %#x\n", state->number,
127 arch_spin_unlock(&ics->lock);
128 local_irq_restore(flags);
129 icp_deliver_irq(xics, icp, state->number);
130 local_irq_save(flags);
131 arch_spin_lock(&ics->lock);
134 arch_spin_unlock(&ics->lock);
135 local_irq_restore(flags);
138 static bool write_xive(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
139 struct ics_irq_state *state,
140 u32 server, u32 priority, u32 saved_priority)
145 local_irq_save(flags);
146 arch_spin_lock(&ics->lock);
148 state->server = server;
149 state->priority = priority;
150 state->saved_priority = saved_priority;
152 if ((state->masked_pending || state->resend) && priority != MASKED) {
153 state->masked_pending = 0;
157 arch_spin_unlock(&ics->lock);
158 local_irq_restore(flags);
163 int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server, u32 priority)
165 struct kvmppc_xics *xics = kvm->arch.xics;
166 struct kvmppc_icp *icp;
167 struct kvmppc_ics *ics;
168 struct ics_irq_state *state;
174 ics = kvmppc_xics_find_ics(xics, irq, &src);
177 state = &ics->irq_state[src];
179 icp = kvmppc_xics_find_server(kvm, server);
183 XICS_DBG("set_xive %#x server %#x prio %#x MP:%d RS:%d\n",
184 irq, server, priority,
185 state->masked_pending, state->resend);
187 if (write_xive(xics, ics, state, server, priority, priority))
188 icp_deliver_irq(xics, icp, irq);
193 int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server, u32 *priority)
195 struct kvmppc_xics *xics = kvm->arch.xics;
196 struct kvmppc_ics *ics;
197 struct ics_irq_state *state;
204 ics = kvmppc_xics_find_ics(xics, irq, &src);
207 state = &ics->irq_state[src];
209 local_irq_save(flags);
210 arch_spin_lock(&ics->lock);
211 *server = state->server;
212 *priority = state->priority;
213 arch_spin_unlock(&ics->lock);
214 local_irq_restore(flags);
219 int kvmppc_xics_int_on(struct kvm *kvm, u32 irq)
221 struct kvmppc_xics *xics = kvm->arch.xics;
222 struct kvmppc_icp *icp;
223 struct kvmppc_ics *ics;
224 struct ics_irq_state *state;
230 ics = kvmppc_xics_find_ics(xics, irq, &src);
233 state = &ics->irq_state[src];
235 icp = kvmppc_xics_find_server(kvm, state->server);
239 if (write_xive(xics, ics, state, state->server, state->saved_priority,
240 state->saved_priority))
241 icp_deliver_irq(xics, icp, irq);
246 int kvmppc_xics_int_off(struct kvm *kvm, u32 irq)
248 struct kvmppc_xics *xics = kvm->arch.xics;
249 struct kvmppc_ics *ics;
250 struct ics_irq_state *state;
256 ics = kvmppc_xics_find_ics(xics, irq, &src);
259 state = &ics->irq_state[src];
261 write_xive(xics, ics, state, state->server, MASKED, state->priority);
266 /* -- ICP routines, including hcalls -- */
268 static inline bool icp_try_update(struct kvmppc_icp *icp,
269 union kvmppc_icp_state old,
270 union kvmppc_icp_state new,
275 /* Calculate new output value */
276 new.out_ee = (new.xisr && (new.pending_pri < new.cppr));
278 /* Attempt atomic update */
279 success = cmpxchg64(&icp->state.raw, old.raw, new.raw) == old.raw;
283 XICS_DBG("UPD [%04lx] - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n",
285 old.cppr, old.mfrr, old.pending_pri, old.xisr,
286 old.need_resend, old.out_ee);
287 XICS_DBG("UPD - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n",
288 new.cppr, new.mfrr, new.pending_pri, new.xisr,
289 new.need_resend, new.out_ee);
291 * Check for output state update
293 * Note that this is racy since another processor could be updating
294 * the state already. This is why we never clear the interrupt output
295 * here, we only ever set it. The clear only happens prior to doing
296 * an update and only by the processor itself. Currently we do it
297 * in Accept (H_XIRR) and Up_Cppr (H_XPPR).
299 * We also do not try to figure out whether the EE state has changed,
300 * we unconditionally set it if the new state calls for it. The reason
301 * for that is that we opportunistically remove the pending interrupt
302 * flag when raising CPPR, so we need to set it back here if an
303 * interrupt is still pending.
306 kvmppc_book3s_queue_irqprio(icp->vcpu,
307 BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
309 kvmppc_fast_vcpu_kick(icp->vcpu);
315 static void icp_check_resend(struct kvmppc_xics *xics,
316 struct kvmppc_icp *icp)
320 /* Order this load with the test for need_resend in the caller */
322 for_each_set_bit(icsid, icp->resend_map, xics->max_icsid + 1) {
323 struct kvmppc_ics *ics = xics->ics[icsid];
325 if (!test_and_clear_bit(icsid, icp->resend_map))
329 ics_check_resend(xics, ics, icp);
333 static bool icp_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority,
336 union kvmppc_icp_state old_state, new_state;
339 XICS_DBG("try deliver %#x(P:%#x) to server %#lx\n", irq, priority,
343 old_state = new_state = READ_ONCE(icp->state);
347 /* See if we can deliver */
348 success = new_state.cppr > priority &&
349 new_state.mfrr > priority &&
350 new_state.pending_pri > priority;
353 * If we can, check for a rejection and perform the
357 *reject = new_state.xisr;
358 new_state.xisr = irq;
359 new_state.pending_pri = priority;
362 * If we failed to deliver we set need_resend
363 * so a subsequent CPPR state change causes us
364 * to try a new delivery.
366 new_state.need_resend = true;
369 } while (!icp_try_update(icp, old_state, new_state, false));
374 static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
377 struct ics_irq_state *state;
378 struct kvmppc_ics *ics;
384 * This is used both for initial delivery of an interrupt and
385 * for subsequent rejection.
387 * Rejection can be racy vs. resends. We have evaluated the
388 * rejection in an atomic ICP transaction which is now complete,
389 * so potentially the ICP can already accept the interrupt again.
391 * So we need to retry the delivery. Essentially the reject path
392 * boils down to a failed delivery. Always.
394 * Now the interrupt could also have moved to a different target,
395 * thus we may need to re-do the ICP lookup as well
399 /* Get the ICS state and lock it */
400 ics = kvmppc_xics_find_ics(xics, new_irq, &src);
402 XICS_DBG("icp_deliver_irq: IRQ 0x%06x not found !\n", new_irq);
405 state = &ics->irq_state[src];
407 /* Get a lock on the ICS */
408 local_irq_save(flags);
409 arch_spin_lock(&ics->lock);
412 if (!icp || state->server != icp->server_num) {
413 icp = kvmppc_xics_find_server(xics->kvm, state->server);
415 pr_warn("icp_deliver_irq: IRQ 0x%06x server 0x%x not found !\n",
416 new_irq, state->server);
421 /* Clear the resend bit of that interrupt */
425 * If masked, bail out
427 * Note: PAPR doesn't mention anything about masked pending
428 * when doing a resend, only when doing a delivery.
430 * However that would have the effect of losing a masked
431 * interrupt that was rejected and isn't consistent with
432 * the whole masked_pending business which is about not
433 * losing interrupts that occur while masked.
435 * I don't differentiate normal deliveries and resends, this
436 * implementation will differ from PAPR and not lose such
439 if (state->priority == MASKED) {
440 XICS_DBG("irq %#x masked pending\n", new_irq);
441 state->masked_pending = 1;
446 * Try the delivery, this will set the need_resend flag
447 * in the ICP as part of the atomic transaction if the
448 * delivery is not possible.
450 * Note that if successful, the new delivery might have itself
451 * rejected an interrupt that was "delivered" before we took the
454 * In this case we do the whole sequence all over again for the
455 * new guy. We cannot assume that the rejected interrupt is less
456 * favored than the new one, and thus doesn't need to be delivered,
457 * because by the time we exit icp_try_to_deliver() the target
458 * processor may well have alrady consumed & completed it, and thus
459 * the rejected interrupt might actually be already acceptable.
461 if (icp_try_to_deliver(icp, new_irq, state->priority, &reject)) {
463 * Delivery was successful, did we reject somebody else ?
465 if (reject && reject != XICS_IPI) {
466 arch_spin_unlock(&ics->lock);
467 local_irq_restore(flags);
473 * We failed to deliver the interrupt we need to set the
474 * resend map bit and mark the ICS state as needing a resend
476 set_bit(ics->icsid, icp->resend_map);
480 * If the need_resend flag got cleared in the ICP some time
481 * between icp_try_to_deliver() atomic update and now, then
482 * we know it might have missed the resend_map bit. So we
486 if (!icp->state.need_resend) {
487 arch_spin_unlock(&ics->lock);
488 local_irq_restore(flags);
493 arch_spin_unlock(&ics->lock);
494 local_irq_restore(flags);
497 static void icp_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
500 union kvmppc_icp_state old_state, new_state;
504 * This handles several related states in one operation:
506 * ICP State: Down_CPPR
508 * Load CPPR with new value and if the XISR is 0
509 * then check for resends:
513 * If MFRR is more favored than CPPR, check for IPIs
514 * and notify ICS of a potential resend. This is done
515 * asynchronously (when used in real mode, we will have
518 * We do not handle the complete Check_IPI as documented
519 * here. In the PAPR, this state will be used for both
520 * Set_MFRR and Down_CPPR. However, we know that we aren't
521 * changing the MFRR state here so we don't need to handle
522 * the case of an MFRR causing a reject of a pending irq,
523 * this will have been handled when the MFRR was set in the
526 * Thus we don't have to handle rejects, only resends.
528 * When implementing real mode for HV KVM, resend will lead to
529 * a H_TOO_HARD return and the whole transaction will be handled
533 old_state = new_state = READ_ONCE(icp->state);
536 new_state.cppr = new_cppr;
539 * Cut down Resend / Check_IPI / IPI
541 * The logic is that we cannot have a pending interrupt
542 * trumped by an IPI at this point (see above), so we
543 * know that either the pending interrupt is already an
544 * IPI (in which case we don't care to override it) or
545 * it's either more favored than us or non existent
547 if (new_state.mfrr < new_cppr &&
548 new_state.mfrr <= new_state.pending_pri) {
549 WARN_ON(new_state.xisr != XICS_IPI &&
550 new_state.xisr != 0);
551 new_state.pending_pri = new_state.mfrr;
552 new_state.xisr = XICS_IPI;
555 /* Latch/clear resend bit */
556 resend = new_state.need_resend;
557 new_state.need_resend = 0;
559 } while (!icp_try_update(icp, old_state, new_state, true));
562 * Now handle resend checks. Those are asynchronous to the ICP
563 * state update in HW (ie bus transactions) so we can handle them
564 * separately here too
567 icp_check_resend(xics, icp);
570 static noinline unsigned long kvmppc_h_xirr(struct kvm_vcpu *vcpu)
572 union kvmppc_icp_state old_state, new_state;
573 struct kvmppc_icp *icp = vcpu->arch.icp;
576 /* First, remove EE from the processor */
577 kvmppc_book3s_dequeue_irqprio(icp->vcpu,
578 BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
581 * ICP State: Accept_Interrupt
583 * Return the pending interrupt (if any) along with the
584 * current CPPR, then clear the XISR & set CPPR to the
588 old_state = new_state = READ_ONCE(icp->state);
590 xirr = old_state.xisr | (((u32)old_state.cppr) << 24);
593 new_state.cppr = new_state.pending_pri;
594 new_state.pending_pri = 0xff;
597 } while (!icp_try_update(icp, old_state, new_state, true));
599 XICS_DBG("h_xirr vcpu %d xirr %#x\n", vcpu->vcpu_id, xirr);
604 static noinline int kvmppc_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
607 union kvmppc_icp_state old_state, new_state;
608 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
609 struct kvmppc_icp *icp;
614 XICS_DBG("h_ipi vcpu %d to server %lu mfrr %#lx\n",
615 vcpu->vcpu_id, server, mfrr);
617 icp = vcpu->arch.icp;
618 local = icp->server_num == server;
620 icp = kvmppc_xics_find_server(vcpu->kvm, server);
626 * ICP state: Set_MFRR
628 * If the CPPR is more favored than the new MFRR, then
629 * nothing needs to be rejected as there can be no XISR to
630 * reject. If the MFRR is being made less favored then
631 * there might be a previously-rejected interrupt needing
634 * ICP state: Check_IPI
636 * If the CPPR is less favored, then we might be replacing
637 * an interrupt, and thus need to possibly reject it.
641 * Besides rejecting any pending interrupts, we also
642 * update XISR and pending_pri to mark IPI as pending.
644 * PAPR does not describe this state, but if the MFRR is being
645 * made less favored than its earlier value, there might be
646 * a previously-rejected interrupt needing to be resent.
647 * Ideally, we would want to resend only if
648 * prio(pending_interrupt) < mfrr &&
649 * prio(pending_interrupt) < cppr
650 * where pending interrupt is the one that was rejected. But
651 * we don't have that state, so we simply trigger a resend
652 * whenever the MFRR is made less favored.
655 old_state = new_state = READ_ONCE(icp->state);
658 new_state.mfrr = mfrr;
663 if (mfrr < new_state.cppr) {
664 /* Reject a pending interrupt if not an IPI */
665 if (mfrr <= new_state.pending_pri) {
666 reject = new_state.xisr;
667 new_state.pending_pri = mfrr;
668 new_state.xisr = XICS_IPI;
672 if (mfrr > old_state.mfrr) {
673 resend = new_state.need_resend;
674 new_state.need_resend = 0;
676 } while (!icp_try_update(icp, old_state, new_state, local));
679 if (reject && reject != XICS_IPI)
680 icp_deliver_irq(xics, icp, reject);
684 icp_check_resend(xics, icp);
689 static int kvmppc_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
691 union kvmppc_icp_state state;
692 struct kvmppc_icp *icp;
694 icp = vcpu->arch.icp;
695 if (icp->server_num != server) {
696 icp = kvmppc_xics_find_server(vcpu->kvm, server);
700 state = READ_ONCE(icp->state);
701 kvmppc_set_gpr(vcpu, 4, ((u32)state.cppr << 24) | state.xisr);
702 kvmppc_set_gpr(vcpu, 5, state.mfrr);
706 static noinline void kvmppc_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
708 union kvmppc_icp_state old_state, new_state;
709 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
710 struct kvmppc_icp *icp = vcpu->arch.icp;
713 XICS_DBG("h_cppr vcpu %d cppr %#lx\n", vcpu->vcpu_id, cppr);
716 * ICP State: Set_CPPR
718 * We can safely compare the new value with the current
719 * value outside of the transaction as the CPPR is only
720 * ever changed by the processor on itself
722 if (cppr > icp->state.cppr)
723 icp_down_cppr(xics, icp, cppr);
724 else if (cppr == icp->state.cppr)
730 * The processor is raising its priority, this can result
731 * in a rejection of a pending interrupt:
733 * ICP State: Reject_Current
735 * We can remove EE from the current processor, the update
736 * transaction will set it again if needed
738 kvmppc_book3s_dequeue_irqprio(icp->vcpu,
739 BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
742 old_state = new_state = READ_ONCE(icp->state);
745 new_state.cppr = cppr;
747 if (cppr <= new_state.pending_pri) {
748 reject = new_state.xisr;
750 new_state.pending_pri = 0xff;
753 } while (!icp_try_update(icp, old_state, new_state, true));
756 * Check for rejects. They are handled by doing a new delivery
757 * attempt (see comments in icp_deliver_irq).
759 if (reject && reject != XICS_IPI)
760 icp_deliver_irq(xics, icp, reject);
763 static noinline int kvmppc_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
765 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
766 struct kvmppc_icp *icp = vcpu->arch.icp;
767 struct kvmppc_ics *ics;
768 struct ics_irq_state *state;
769 u32 irq = xirr & 0x00ffffff;
772 XICS_DBG("h_eoi vcpu %d eoi %#lx\n", vcpu->vcpu_id, xirr);
777 * Note: If EOI is incorrectly used by SW to lower the CPPR
778 * value (ie more favored), we do not check for rejection of
779 * a pending interrupt, this is a SW error and PAPR sepcifies
780 * that we don't have to deal with it.
782 * The sending of an EOI to the ICS is handled after the
785 * ICP State: Down_CPPR which we handle
786 * in a separate function as it's shared with H_CPPR.
788 icp_down_cppr(xics, icp, xirr >> 24);
790 /* IPIs have no EOI */
794 * EOI handling: If the interrupt is still asserted, we need to
795 * resend it. We can take a lockless "peek" at the ICS state here.
797 * "Message" interrupts will never have "asserted" set
799 ics = kvmppc_xics_find_ics(xics, irq, &src);
801 XICS_DBG("h_eoi: IRQ 0x%06x not found !\n", irq);
804 state = &ics->irq_state[src];
806 /* Still asserted, resend it */
808 icp_deliver_irq(xics, icp, irq);
810 kvm_notify_acked_irq(vcpu->kvm, 0, irq);
815 static noinline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
817 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
818 struct kvmppc_icp *icp = vcpu->arch.icp;
820 XICS_DBG("XICS_RM: H_%x completing, act: %x state: %lx tgt: %p\n",
821 hcall, icp->rm_action, icp->rm_dbgstate.raw, icp->rm_dbgtgt);
823 if (icp->rm_action & XICS_RM_KICK_VCPU) {
824 icp->n_rm_kick_vcpu++;
825 kvmppc_fast_vcpu_kick(icp->rm_kick_target);
827 if (icp->rm_action & XICS_RM_CHECK_RESEND) {
828 icp->n_rm_check_resend++;
829 icp_check_resend(xics, icp->rm_resend_icp);
831 if (icp->rm_action & XICS_RM_REJECT) {
833 icp_deliver_irq(xics, icp, icp->rm_reject);
835 if (icp->rm_action & XICS_RM_NOTIFY_EOI) {
836 icp->n_rm_notify_eoi++;
837 kvm_notify_acked_irq(vcpu->kvm, 0, icp->rm_eoied_irq);
845 int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
847 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
851 /* Check if we have an ICP */
852 if (!xics || !vcpu->arch.icp)
855 /* These requests don't have real-mode implementations at present */
858 res = kvmppc_h_xirr(vcpu);
859 kvmppc_set_gpr(vcpu, 4, res);
860 kvmppc_set_gpr(vcpu, 5, get_tb());
863 rc = kvmppc_h_ipoll(vcpu, kvmppc_get_gpr(vcpu, 4));
867 /* Check for real mode returning too hard */
868 if (xics->real_mode && is_kvmppc_hv_enabled(vcpu->kvm))
869 return kvmppc_xics_rm_complete(vcpu, req);
873 res = kvmppc_h_xirr(vcpu);
874 kvmppc_set_gpr(vcpu, 4, res);
877 kvmppc_h_cppr(vcpu, kvmppc_get_gpr(vcpu, 4));
880 rc = kvmppc_h_eoi(vcpu, kvmppc_get_gpr(vcpu, 4));
883 rc = kvmppc_h_ipi(vcpu, kvmppc_get_gpr(vcpu, 4),
884 kvmppc_get_gpr(vcpu, 5));
890 EXPORT_SYMBOL_GPL(kvmppc_xics_hcall);
893 /* -- Initialisation code etc. -- */
895 static int xics_debug_show(struct seq_file *m, void *private)
897 struct kvmppc_xics *xics = m->private;
898 struct kvm *kvm = xics->kvm;
899 struct kvm_vcpu *vcpu;
902 unsigned long t_rm_kick_vcpu, t_rm_check_resend;
903 unsigned long t_rm_reject, t_rm_notify_eoi;
904 unsigned long t_reject, t_check_resend;
911 t_rm_check_resend = 0;
916 seq_printf(m, "=========\nICP state\n=========\n");
918 kvm_for_each_vcpu(i, vcpu, kvm) {
919 struct kvmppc_icp *icp = vcpu->arch.icp;
920 union kvmppc_icp_state state;
925 state.raw = READ_ONCE(icp->state.raw);
926 seq_printf(m, "cpu server %#lx XIRR:%#x PPRI:%#x CPPR:%#x MFRR:%#x OUT:%d NR:%d\n",
927 icp->server_num, state.xisr,
928 state.pending_pri, state.cppr, state.mfrr,
929 state.out_ee, state.need_resend);
930 t_rm_kick_vcpu += icp->n_rm_kick_vcpu;
931 t_rm_notify_eoi += icp->n_rm_notify_eoi;
932 t_rm_check_resend += icp->n_rm_check_resend;
933 t_rm_reject += icp->n_rm_reject;
934 t_check_resend += icp->n_check_resend;
935 t_reject += icp->n_reject;
938 seq_printf(m, "ICP Guest->Host totals: kick_vcpu=%lu check_resend=%lu reject=%lu notify_eoi=%lu\n",
939 t_rm_kick_vcpu, t_rm_check_resend,
940 t_rm_reject, t_rm_notify_eoi);
941 seq_printf(m, "ICP Real Mode totals: check_resend=%lu resend=%lu\n",
942 t_check_resend, t_reject);
943 for (icsid = 0; icsid <= KVMPPC_XICS_MAX_ICS_ID; icsid++) {
944 struct kvmppc_ics *ics = xics->ics[icsid];
949 seq_printf(m, "=========\nICS state for ICS 0x%x\n=========\n",
952 local_irq_save(flags);
953 arch_spin_lock(&ics->lock);
955 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
956 struct ics_irq_state *irq = &ics->irq_state[i];
958 seq_printf(m, "irq 0x%06x: server %#x prio %#x save prio %#x asserted %d resend %d masked pending %d\n",
959 irq->number, irq->server, irq->priority,
960 irq->saved_priority, irq->asserted,
961 irq->resend, irq->masked_pending);
964 arch_spin_unlock(&ics->lock);
965 local_irq_restore(flags);
970 static int xics_debug_open(struct inode *inode, struct file *file)
972 return single_open(file, xics_debug_show, inode->i_private);
975 static const struct file_operations xics_debug_fops = {
976 .open = xics_debug_open,
979 .release = single_release,
982 static void xics_debugfs_init(struct kvmppc_xics *xics)
986 name = kasprintf(GFP_KERNEL, "kvm-xics-%p", xics);
988 pr_err("%s: no memory for name\n", __func__);
992 xics->dentry = debugfs_create_file(name, S_IRUGO, powerpc_debugfs_root,
993 xics, &xics_debug_fops);
995 pr_debug("%s: created %s\n", __func__, name);
999 static struct kvmppc_ics *kvmppc_xics_create_ics(struct kvm *kvm,
1000 struct kvmppc_xics *xics, int irq)
1002 struct kvmppc_ics *ics;
1005 icsid = irq >> KVMPPC_XICS_ICS_SHIFT;
1007 mutex_lock(&kvm->lock);
1009 /* ICS already exists - somebody else got here first */
1010 if (xics->ics[icsid])
1013 /* Create the ICS */
1014 ics = kzalloc(sizeof(struct kvmppc_ics), GFP_KERNEL);
1020 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1021 ics->irq_state[i].number = (icsid << KVMPPC_XICS_ICS_SHIFT) | i;
1022 ics->irq_state[i].priority = MASKED;
1023 ics->irq_state[i].saved_priority = MASKED;
1026 xics->ics[icsid] = ics;
1028 if (icsid > xics->max_icsid)
1029 xics->max_icsid = icsid;
1032 mutex_unlock(&kvm->lock);
1033 return xics->ics[icsid];
1036 int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server_num)
1038 struct kvmppc_icp *icp;
1040 if (!vcpu->kvm->arch.xics)
1043 if (kvmppc_xics_find_server(vcpu->kvm, server_num))
1046 icp = kzalloc(sizeof(struct kvmppc_icp), GFP_KERNEL);
1051 icp->server_num = server_num;
1052 icp->state.mfrr = MASKED;
1053 icp->state.pending_pri = MASKED;
1054 vcpu->arch.icp = icp;
1056 XICS_DBG("created server for vcpu %d\n", vcpu->vcpu_id);
1061 u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu)
1063 struct kvmppc_icp *icp = vcpu->arch.icp;
1064 union kvmppc_icp_state state;
1069 return ((u64)state.cppr << KVM_REG_PPC_ICP_CPPR_SHIFT) |
1070 ((u64)state.xisr << KVM_REG_PPC_ICP_XISR_SHIFT) |
1071 ((u64)state.mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT) |
1072 ((u64)state.pending_pri << KVM_REG_PPC_ICP_PPRI_SHIFT);
1075 int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
1077 struct kvmppc_icp *icp = vcpu->arch.icp;
1078 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
1079 union kvmppc_icp_state old_state, new_state;
1080 struct kvmppc_ics *ics;
1081 u8 cppr, mfrr, pending_pri;
1089 cppr = icpval >> KVM_REG_PPC_ICP_CPPR_SHIFT;
1090 xisr = (icpval >> KVM_REG_PPC_ICP_XISR_SHIFT) &
1091 KVM_REG_PPC_ICP_XISR_MASK;
1092 mfrr = icpval >> KVM_REG_PPC_ICP_MFRR_SHIFT;
1093 pending_pri = icpval >> KVM_REG_PPC_ICP_PPRI_SHIFT;
1095 /* Require the new state to be internally consistent */
1097 if (pending_pri != 0xff)
1099 } else if (xisr == XICS_IPI) {
1100 if (pending_pri != mfrr || pending_pri >= cppr)
1103 if (pending_pri >= mfrr || pending_pri >= cppr)
1105 ics = kvmppc_xics_find_ics(xics, xisr, &src);
1111 new_state.cppr = cppr;
1112 new_state.xisr = xisr;
1113 new_state.mfrr = mfrr;
1114 new_state.pending_pri = pending_pri;
1117 * Deassert the CPU interrupt request.
1118 * icp_try_update will reassert it if necessary.
1120 kvmppc_book3s_dequeue_irqprio(icp->vcpu,
1121 BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
1124 * Note that if we displace an interrupt from old_state.xisr,
1125 * we don't mark it as rejected. We expect userspace to set
1126 * the state of the interrupt sources to be consistent with
1127 * the ICP states (either before or afterwards, which doesn't
1128 * matter). We do handle resends due to CPPR becoming less
1129 * favoured because that is necessary to end up with a
1130 * consistent state in the situation where userspace restores
1131 * the ICS states before the ICP states.
1134 old_state = READ_ONCE(icp->state);
1136 if (new_state.mfrr <= old_state.mfrr) {
1138 new_state.need_resend = old_state.need_resend;
1140 resend = old_state.need_resend;
1141 new_state.need_resend = 0;
1143 } while (!icp_try_update(icp, old_state, new_state, false));
1146 icp_check_resend(xics, icp);
1151 static int xics_get_source(struct kvmppc_xics *xics, long irq, u64 addr)
1154 struct kvmppc_ics *ics;
1155 struct ics_irq_state *irqp;
1156 u64 __user *ubufp = (u64 __user *) addr;
1159 unsigned long flags;
1161 ics = kvmppc_xics_find_ics(xics, irq, &idx);
1165 irqp = &ics->irq_state[idx];
1166 local_irq_save(flags);
1167 arch_spin_lock(&ics->lock);
1171 prio = irqp->priority;
1172 if (prio == MASKED) {
1173 val |= KVM_XICS_MASKED;
1174 prio = irqp->saved_priority;
1176 val |= prio << KVM_XICS_PRIORITY_SHIFT;
1178 val |= KVM_XICS_LEVEL_SENSITIVE;
1180 val |= KVM_XICS_PENDING;
1181 } else if (irqp->masked_pending || irqp->resend)
1182 val |= KVM_XICS_PENDING;
1185 arch_spin_unlock(&ics->lock);
1186 local_irq_restore(flags);
1188 if (!ret && put_user(val, ubufp))
1194 static int xics_set_source(struct kvmppc_xics *xics, long irq, u64 addr)
1196 struct kvmppc_ics *ics;
1197 struct ics_irq_state *irqp;
1198 u64 __user *ubufp = (u64 __user *) addr;
1203 unsigned long flags;
1205 if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS)
1208 ics = kvmppc_xics_find_ics(xics, irq, &idx);
1210 ics = kvmppc_xics_create_ics(xics->kvm, xics, irq);
1214 irqp = &ics->irq_state[idx];
1215 if (get_user(val, ubufp))
1218 server = val & KVM_XICS_DESTINATION_MASK;
1219 prio = val >> KVM_XICS_PRIORITY_SHIFT;
1220 if (prio != MASKED &&
1221 kvmppc_xics_find_server(xics->kvm, server) == NULL)
1224 local_irq_save(flags);
1225 arch_spin_lock(&ics->lock);
1226 irqp->server = server;
1227 irqp->saved_priority = prio;
1228 if (val & KVM_XICS_MASKED)
1230 irqp->priority = prio;
1232 irqp->masked_pending = 0;
1235 if (val & KVM_XICS_LEVEL_SENSITIVE) {
1237 if (val & KVM_XICS_PENDING)
1241 arch_spin_unlock(&ics->lock);
1242 local_irq_restore(flags);
1244 if (val & KVM_XICS_PENDING)
1245 icp_deliver_irq(xics, NULL, irqp->number);
1250 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
1253 struct kvmppc_xics *xics = kvm->arch.xics;
1255 return ics_deliver_irq(xics, irq, level);
1258 int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *irq_entry,
1259 struct kvm *kvm, int irq_source_id,
1260 int level, bool line_status)
1262 return kvm_set_irq(kvm, irq_source_id, irq_entry->gsi,
1263 level, line_status);
1266 static int xics_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1268 struct kvmppc_xics *xics = dev->private;
1270 switch (attr->group) {
1271 case KVM_DEV_XICS_GRP_SOURCES:
1272 return xics_set_source(xics, attr->attr, attr->addr);
1277 static int xics_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1279 struct kvmppc_xics *xics = dev->private;
1281 switch (attr->group) {
1282 case KVM_DEV_XICS_GRP_SOURCES:
1283 return xics_get_source(xics, attr->attr, attr->addr);
1288 static int xics_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1290 switch (attr->group) {
1291 case KVM_DEV_XICS_GRP_SOURCES:
1292 if (attr->attr >= KVMPPC_XICS_FIRST_IRQ &&
1293 attr->attr < KVMPPC_XICS_NR_IRQS)
1300 static void kvmppc_xics_free(struct kvm_device *dev)
1302 struct kvmppc_xics *xics = dev->private;
1304 struct kvm *kvm = xics->kvm;
1306 debugfs_remove(xics->dentry);
1309 kvm->arch.xics = NULL;
1311 for (i = 0; i <= xics->max_icsid; i++)
1312 kfree(xics->ics[i]);
1317 static int kvmppc_xics_create(struct kvm_device *dev, u32 type)
1319 struct kvmppc_xics *xics;
1320 struct kvm *kvm = dev->kvm;
1323 xics = kzalloc(sizeof(*xics), GFP_KERNEL);
1327 dev->private = xics;
1331 /* Already there ? */
1335 kvm->arch.xics = xics;
1342 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1343 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
1344 /* Enable real mode support */
1345 xics->real_mode = ENABLE_REALMODE;
1346 xics->real_mode_dbg = DEBUG_REALMODE;
1348 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
1353 static void kvmppc_xics_init(struct kvm_device *dev)
1355 struct kvmppc_xics *xics = (struct kvmppc_xics *)dev->private;
1357 xics_debugfs_init(xics);
1360 struct kvm_device_ops kvm_xics_ops = {
1362 .create = kvmppc_xics_create,
1363 .init = kvmppc_xics_init,
1364 .destroy = kvmppc_xics_free,
1365 .set_attr = xics_set_attr,
1366 .get_attr = xics_get_attr,
1367 .has_attr = xics_has_attr,
1370 int kvmppc_xics_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
1373 struct kvmppc_xics *xics = dev->private;
1376 if (dev->ops != &kvm_xics_ops)
1378 if (xics->kvm != vcpu->kvm)
1380 if (vcpu->arch.irq_type)
1383 r = kvmppc_xics_create_icp(vcpu, xcpu);
1385 vcpu->arch.irq_type = KVMPPC_IRQ_XICS;
1390 void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu)
1392 if (!vcpu->arch.icp)
1394 kfree(vcpu->arch.icp);
1395 vcpu->arch.icp = NULL;
1396 vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
1399 static int xics_set_irq(struct kvm_kernel_irq_routing_entry *e,
1400 struct kvm *kvm, int irq_source_id, int level,
1403 return kvm_set_irq(kvm, irq_source_id, e->gsi, level, line_status);
1406 int kvm_irq_map_gsi(struct kvm *kvm,
1407 struct kvm_kernel_irq_routing_entry *entries, int gsi)
1410 entries->type = KVM_IRQ_ROUTING_IRQCHIP;
1411 entries->set = xics_set_irq;
1412 entries->irqchip.irqchip = 0;
1413 entries->irqchip.pin = gsi;
1417 int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)