Capability: KVM_CAP_VCPU_EVENTS
Extended by: KVM_CAP_INTR_SHADOW
- Architectures: x86
- Type: vm ioctl
+ Architectures: x86, arm, arm64
+ Type: vcpu ioctl
Parameters: struct kvm_vcpu_event (out)
Returns: 0 on success, -1 on error
+ X86:
+
Gets currently pending exceptions, interrupts, and NMIs as well as related
states of the vcpu.
- KVM_VCPUEVENT_VALID_SMM may be set in the flags field to signal that
smi contains a valid state.
+ ARM/ARM64:
+
+ If the guest accesses a device that is being emulated by the host kernel in
+ such a way that a real device would generate a physical SError, KVM may make
+ a virtual SError pending for that VCPU. This system error interrupt remains
+ pending until the guest takes the exception by unmasking PSTATE.A.
+
+ Running the VCPU may cause it to take a pending SError, or make an access that
+ causes an SError to become pending. The event's description is only valid while
+ the VPCU is not running.
+
+ This API provides a way to read and write the pending 'event' state that is not
+ visible to the guest. To save, restore or migrate a VCPU the struct representing
+ the state can be read then written using this GET/SET API, along with the other
+ guest-visible registers. It is not possible to 'cancel' an SError that has been
+ made pending.
+
+ A device being emulated in user-space may also wish to generate an SError. To do
+ this the events structure can be populated by user-space. The current state
+ should be read first, to ensure no existing SError is pending. If an existing
+ SError is pending, the architecture's 'Multiple SError interrupts' rules should
+ be followed. (2.5.3 of DDI0587.a "ARM Reliability, Availability, and
+ Serviceability (RAS) Specification").
+
+ SError exceptions always have an ESR value. Some CPUs have the ability to
+ specify what the virtual SError's ESR value should be. These systems will
+ advertise KVM_CAP_ARM_SET_SERROR_ESR. In this case exception.has_esr will
+ always have a non-zero value when read, and the agent making an SError pending
+ should specify the ISS field in the lower 24 bits of exception.serror_esr. If
+ the system supports KVM_CAP_ARM_SET_SERROR_ESR, but user-space sets the events
+ with exception.has_esr as zero, KVM will choose an ESR.
+
+ Specifying exception.has_esr on a system that does not support it will return
+ -EINVAL. Setting anything other than the lower 24bits of exception.serror_esr
+ will return -EINVAL.
+
+ struct kvm_vcpu_events {
+ struct {
+ __u8 serror_pending;
+ __u8 serror_has_esr;
+ /* Align it to 8 bytes */
+ __u8 pad[6];
+ __u64 serror_esr;
+ } exception;
+ __u32 reserved[12];
+ };
+
4.32 KVM_SET_VCPU_EVENTS
Capability: KVM_CAP_VCPU_EVENTS
Extended by: KVM_CAP_INTR_SHADOW
- Architectures: x86
- Type: vm ioctl
+ Architectures: x86, arm, arm64
+ Type: vcpu ioctl
Parameters: struct kvm_vcpu_event (in)
Returns: 0 on success, -1 on error
+ X86:
+
Set pending exceptions, interrupts, and NMIs as well as related states of the
vcpu.
KVM_VCPUEVENT_VALID_SMM can only be set if KVM_CAP_X86_SMM is available.
+ ARM/ARM64:
+
+ Set the pending SError exception state for this VCPU. It is not possible to
+ 'cancel' an Serror that has been made pending.
+
+ See KVM_GET_VCPU_EVENTS for the data structure.
+
4.33 KVM_GET_DEBUGREGS
-ENOENT on deassign if the conn_id isn't registered
-EEXIST on assign if the conn_id is already registered
+4.114 KVM_GET_NESTED_STATE
+
+Capability: KVM_CAP_NESTED_STATE
+Architectures: x86
+Type: vcpu ioctl
+Parameters: struct kvm_nested_state (in/out)
+Returns: 0 on success, -1 on error
+Errors:
+ E2BIG: the total state size (including the fixed-size part of struct
+ kvm_nested_state) exceeds the value of 'size' specified by
+ the user; the size required will be written into size.
+
+struct kvm_nested_state {
+ __u16 flags;
+ __u16 format;
+ __u32 size;
+ union {
+ struct kvm_vmx_nested_state vmx;
+ struct kvm_svm_nested_state svm;
+ __u8 pad[120];
+ };
+ __u8 data[0];
+};
+
+#define KVM_STATE_NESTED_GUEST_MODE 0x00000001
+#define KVM_STATE_NESTED_RUN_PENDING 0x00000002
+
+#define KVM_STATE_NESTED_SMM_GUEST_MODE 0x00000001
+#define KVM_STATE_NESTED_SMM_VMXON 0x00000002
+
+struct kvm_vmx_nested_state {
+ __u64 vmxon_pa;
+ __u64 vmcs_pa;
+
+ struct {
+ __u16 flags;
+ } smm;
+};
+
+This ioctl copies the vcpu's nested virtualization state from the kernel to
+userspace.
+
+The maximum size of the state, including the fixed-size part of struct
+kvm_nested_state, can be retrieved by passing KVM_CAP_NESTED_STATE to
+the KVM_CHECK_EXTENSION ioctl().
+
+4.115 KVM_SET_NESTED_STATE
+
+Capability: KVM_CAP_NESTED_STATE
+Architectures: x86
+Type: vcpu ioctl
+Parameters: struct kvm_nested_state (in)
+Returns: 0 on success, -1 on error
+
+This copies the vcpu's kvm_nested_state struct from userspace to the kernel. For
+the definition of struct kvm_nested_state, see KVM_GET_NESTED_STATE.
5. The kvm_run structure
------------------------
Do not enable KVM_FEATURE_PV_UNHALT if you disable HLT exits.
+7.14 KVM_CAP_S390_HPAGE_1M
+
+Architectures: s390
+Parameters: none
+Returns: 0 on success, -EINVAL if hpage module parameter was not set
+ or cmma is enabled
+
+With this capability the KVM support for memory backing with 1m pages
+through hugetlbfs can be enabled for a VM. After the capability is
+enabled, cmma can't be enabled anymore and pfmfi and the storage key
+interpretation are disabled. If cmma has already been enabled or the
+hpage module parameter is not set to 1, -EINVAL is returned.
+
+While it is generally possible to create a huge page backed VM without
+this capability, the VM will not be able to run.
+
8. Other capabilities.
----------------------
hypercalls:
HvFlushVirtualAddressSpace, HvFlushVirtualAddressSpaceEx,
HvFlushVirtualAddressList, HvFlushVirtualAddressListEx.
+
+ 8.19 KVM_CAP_ARM_SET_SERROR_ESR
+
+ Architectures: arm, arm64
+
+ This capability indicates that userspace can specify (via the
+ KVM_SET_VCPU_EVENTS ioctl) the syndrome value reported to the guest when it
+ takes a virtual SError interrupt exception.
+ If KVM advertises this capability, userspace can only specify the ISS field for
+ the ESR syndrome. Other parts of the ESR, such as the EC are generated by the
+ CPU when the exception is taken. If this virtual SError is taken to EL1 using
+ AArch64, this value will be reported in the ISS field of ESR_ELx.
+
+ See KVM_CAP_VCPU_EVENTS for more details.
#include <asm/cputype.h>
/* arm64 compatibility macros */
-#define COMPAT_PSR_MODE_ABT ABT_MODE
-#define COMPAT_PSR_MODE_UND UND_MODE
-#define COMPAT_PSR_T_BIT PSR_T_BIT
-#define COMPAT_PSR_I_BIT PSR_I_BIT
-#define COMPAT_PSR_A_BIT PSR_A_BIT
-#define COMPAT_PSR_E_BIT PSR_E_BIT
-#define COMPAT_PSR_IT_MASK PSR_IT_MASK
+#define PSR_AA32_MODE_ABT ABT_MODE
+#define PSR_AA32_MODE_UND UND_MODE
+#define PSR_AA32_T_BIT PSR_T_BIT
+#define PSR_AA32_I_BIT PSR_I_BIT
+#define PSR_AA32_A_BIT PSR_A_BIT
+#define PSR_AA32_E_BIT PSR_E_BIT
+#define PSR_AA32_IT_MASK PSR_IT_MASK
unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
return (unsigned long *)&vcpu->arch.hcr;
}
+ static inline void vcpu_clear_wfe_traps(struct kvm_vcpu *vcpu)
+ {
+ vcpu->arch.hcr &= ~HCR_TWE;
+ }
+
+ static inline void vcpu_set_wfe_traps(struct kvm_vcpu *vcpu)
+ {
+ vcpu->arch.hcr |= HCR_TWE;
+ }
+
static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
{
- return 1;
+ return true;
}
static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu)
#define ARM64_HAS_CACHE_DIC 28
#define ARM64_HW_DBM 29
#define ARM64_SSBD 30
-#define ARM64_HAS_STAGE2_FWB 31
+#define ARM64_MISMATCHED_CACHE_TYPE 31
++#define ARM64_HAS_STAGE2_FWB 32
--#define ARM64_NCAPS 32
++#define ARM64_NCAPS 33
#endif /* __ASM_CPUCAPS_H */
/* trap error record accesses */
vcpu->arch.hcr_el2 |= HCR_TERR;
}
+ if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
+ vcpu->arch.hcr_el2 |= HCR_FWB;
if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
vcpu->arch.hcr_el2 &= ~HCR_RW;
return (unsigned long *)&vcpu->arch.hcr_el2;
}
+ static inline void vcpu_clear_wfe_traps(struct kvm_vcpu *vcpu)
+ {
+ vcpu->arch.hcr_el2 &= ~HCR_TWE;
+ }
+
+ static inline void vcpu_set_wfe_traps(struct kvm_vcpu *vcpu)
+ {
+ vcpu->arch.hcr_el2 |= HCR_TWE;
+ }
+
+ static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
+ {
+ return vcpu->arch.vsesr_el2;
+ }
+
static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
{
vcpu->arch.vsesr_el2 = vsesr;
static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
{
- *vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT;
+ *vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT;
}
/*
u32 mode;
if (vcpu_mode_is_32bit(vcpu)) {
- mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK;
- return mode > COMPAT_PSR_MODE_USR;
+ mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
+ return mode > PSR_AA32_MODE_USR;
}
mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
{
if (vcpu_mode_is_32bit(vcpu)) {
- *vcpu_cpsr(vcpu) |= COMPAT_PSR_E_BIT;
+ *vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT;
} else {
u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
sctlr |= (1 << 25);
static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
{
if (vcpu_mode_is_32bit(vcpu))
- return !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_E_BIT);
+ return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT);
return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & (1 << 25));
}
#define SYS_ICC_DIR_EL1 sys_reg(3, 0, 12, 11, 1)
#define SYS_ICC_RPR_EL1 sys_reg(3, 0, 12, 11, 3)
#define SYS_ICC_SGI1R_EL1 sys_reg(3, 0, 12, 11, 5)
+ #define SYS_ICC_ASGI1R_EL1 sys_reg(3, 0, 12, 11, 6)
+ #define SYS_ICC_SGI0R_EL1 sys_reg(3, 0, 12, 11, 7)
#define SYS_ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0)
#define SYS_ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1)
#define SYS_ICC_HPPIR1_EL1 sys_reg(3, 0, 12, 12, 2)
#define SCTLR_EL2_RES0 ((1 << 6) | (1 << 7) | (1 << 8) | (1 << 9) | \
(1 << 10) | (1 << 13) | (1 << 14) | (1 << 15) | \
(1 << 17) | (1 << 20) | (1 << 24) | (1 << 26) | \
- (1 << 27) | (1 << 30) | (1 << 31))
+ (1 << 27) | (1 << 30) | (1 << 31) | \
+ (0xffffffffUL << 32))
#ifdef CONFIG_CPU_BIG_ENDIAN
#define ENDIAN_SET_EL2 SCTLR_ELx_EE
SCTLR_ELx_SA | SCTLR_ELx_I | SCTLR_ELx_WXN | \
ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0)
-/* Check all the bits are accounted for */
-#define SCTLR_EL2_BUILD_BUG_ON_MISSING_BITS BUILD_BUG_ON((SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != ~0)
-
+#if (SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != 0xffffffffffffffff
+#error "Inconsistent SCTLR_EL2 set/clear bits"
+#endif
/* SCTLR_EL1 specific flags. */
#define SCTLR_EL1_UCI (1 << 26)
#define SCTLR_EL1_RES1 ((1 << 11) | (1 << 20) | (1 << 22) | (1 << 28) | \
(1 << 29))
#define SCTLR_EL1_RES0 ((1 << 6) | (1 << 10) | (1 << 13) | (1 << 17) | \
- (1 << 27) | (1 << 30) | (1 << 31))
+ (1 << 27) | (1 << 30) | (1 << 31) | \
+ (0xffffffffUL << 32))
#ifdef CONFIG_CPU_BIG_ENDIAN
#define ENDIAN_SET_EL1 (SCTLR_EL1_E0E | SCTLR_ELx_EE)
SCTLR_EL1_UMA | SCTLR_ELx_WXN | ENDIAN_CLEAR_EL1 |\
SCTLR_EL1_RES0)
-/* Check all the bits are accounted for */
-#define SCTLR_EL1_BUILD_BUG_ON_MISSING_BITS BUILD_BUG_ON((SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != ~0)
+#if (SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != 0xffffffffffffffff
+#error "Inconsistent SCTLR_EL1 set/clear bits"
+#endif
/* id_aa64isar0 */
#define ID_AA64ISAR0_TS_SHIFT 52
#define ID_AA64MMFR1_VMIDBITS_16 2
/* id_aa64mmfr2 */
+ #define ID_AA64MMFR2_FWB_SHIFT 40
#define ID_AA64MMFR2_AT_SHIFT 32
#define ID_AA64MMFR2_LVA_SHIFT 16
#define ID_AA64MMFR2_IESB_SHIFT 12
write_sysreg(__scs_new, sysreg); \
} while (0)
-static inline void config_sctlr_el1(u32 clear, u32 set)
-{
- u32 val;
-
- SCTLR_EL2_BUILD_BUG_ON_MISSING_BITS;
- SCTLR_EL1_BUILD_BUG_ON_MISSING_BITS;
-
- val = read_sysreg(sctlr_el1);
- val &= ~clear;
- val |= set;
- write_sysreg(val, sctlr_el1);
-}
-
#endif
#endif /* __ASM_SYSREG_H */
};
static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_FWB_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_AT_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IESB_SHIFT, 4, 0),
* If we have differing I-cache policies, report it as the weakest - VIPT.
*/
ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, 14, 2, ICACHE_POLICY_VIPT), /* L1Ip */
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* IminLine */
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IMINLINE_SHIFT, 4, 0),
ARM64_FTR_END,
};
}
#endif
+ static void cpu_has_fwb(const struct arm64_cpu_capabilities *__unused)
+ {
+ u64 val = read_sysreg_s(SYS_CLIDR_EL1);
+
+ /* Check that CLIDR_EL1.LOU{U,IS} are both 0 */
+ WARN_ON(val & (7 << 27 | 7 << 21));
+ }
+
static const struct arm64_cpu_capabilities arm64_features[] = {
{
.desc = "GIC system register CPU interface",
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cache_dic,
},
+ {
+ .desc = "Stage-2 Force Write-Back",
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .capability = ARM64_HAS_STAGE2_FWB,
+ .sys_reg = SYS_ID_AA64MMFR2_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64MMFR2_FWB_SHIFT,
+ .min_field_value = 1,
+ .matches = has_cpuid_feature,
+ .cpu_enable = cpu_has_fwb,
+ },
#ifdef CONFIG_ARM64_HW_AFDBM
{
/*
static void update_cpu_capabilities(u16 scope_mask)
{
- __update_cpu_capabilities(arm64_features, scope_mask, "detected:");
__update_cpu_capabilities(arm64_errata, scope_mask,
"enabling workaround for");
+ __update_cpu_capabilities(arm64_features, scope_mask, "detected:");
}
static int __enable_cpu_capability(void *arg)
static void __init enable_cpu_capabilities(u16 scope_mask)
{
- __enable_cpu_capabilities(arm64_features, scope_mask);
__enable_cpu_capabilities(arm64_errata, scope_mask);
+ __enable_cpu_capabilities(arm64_features, scope_mask);
}
/*
static struct undef_hook mrs_hook = {
.instr_mask = 0xfff00000,
.instr_val = 0xd5300000,
- .pstate_mask = COMPAT_PSR_MODE_MASK,
+ .pstate_mask = PSR_AA32_MODE_MASK,
.pstate_val = PSR_MODE_EL0t,
.fn = emulate_mrs,
};
}
if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
- u32 mode = (*(u32 *)valp) & COMPAT_PSR_MODE_MASK;
+ u32 mode = (*(u32 *)valp) & PSR_AA32_MODE_MASK;
switch (mode) {
- case COMPAT_PSR_MODE_USR:
- case COMPAT_PSR_MODE_FIQ:
- case COMPAT_PSR_MODE_IRQ:
- case COMPAT_PSR_MODE_SVC:
- case COMPAT_PSR_MODE_ABT:
- case COMPAT_PSR_MODE_UND:
+ case PSR_AA32_MODE_USR:
+ case PSR_AA32_MODE_FIQ:
+ case PSR_AA32_MODE_IRQ:
+ case PSR_AA32_MODE_SVC:
+ case PSR_AA32_MODE_ABT:
+ case PSR_AA32_MODE_UND:
case PSR_MODE_EL0t:
case PSR_MODE_EL1t:
case PSR_MODE_EL1h:
return -EINVAL;
}
+ int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu_events *events)
+ {
+ events->exception.serror_pending = !!(vcpu->arch.hcr_el2 & HCR_VSE);
+ events->exception.serror_has_esr = cpus_have_const_cap(ARM64_HAS_RAS_EXTN);
+
+ if (events->exception.serror_pending && events->exception.serror_has_esr)
+ events->exception.serror_esr = vcpu_get_vsesr(vcpu);
+
+ return 0;
+ }
+
+ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu_events *events)
+ {
+ bool serror_pending = events->exception.serror_pending;
+ bool has_esr = events->exception.serror_has_esr;
+
+ if (serror_pending && has_esr) {
+ if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
+ return -EINVAL;
+
+ if (!((events->exception.serror_esr) & ~ESR_ELx_ISS_MASK))
+ kvm_set_sei_esr(vcpu, events->exception.serror_esr);
+ else
+ return -EINVAL;
+ } else if (serror_pending) {
+ kvm_inject_vabt(vcpu);
+ }
+
+ return 0;
+ }
+
int __attribute_const__ kvm_target_cpu(void)
{
unsigned long implementor = read_cpuid_implementor();
};
static const struct kvm_regs default_regs_reset32 = {
- .regs.pstate = (COMPAT_PSR_MODE_SVC | COMPAT_PSR_A_BIT |
- COMPAT_PSR_I_BIT | COMPAT_PSR_F_BIT),
+ .regs.pstate = (PSR_AA32_MODE_SVC | PSR_AA32_A_BIT |
+ PSR_AA32_I_BIT | PSR_AA32_F_BIT),
};
static bool cpu_has_32bit_el1(void)
case KVM_CAP_ARM_PMU_V3:
r = kvm_arm_support_pmu_v3();
break;
+ case KVM_CAP_ARM_INJECT_SERROR_ESR:
+ r = cpus_have_const_cap(ARM64_HAS_RAS_EXTN);
+ break;
case KVM_CAP_SET_GUEST_DEBUG:
case KVM_CAP_VCPU_ATTRIBUTES:
+ case KVM_CAP_VCPU_EVENTS:
r = 1;
break;
default:
#define GICD_CTLR_ENABLE_G1A (1U << 1)
#define GICD_CTLR_ENABLE_G1 (1U << 0)
+ #define GICD_IIDR_IMPLEMENTER_SHIFT 0
+ #define GICD_IIDR_IMPLEMENTER_MASK (0xfff << GICD_IIDR_IMPLEMENTER_SHIFT)
+ #define GICD_IIDR_REVISION_SHIFT 12
+ #define GICD_IIDR_REVISION_MASK (0xf << GICD_IIDR_REVISION_SHIFT)
+ #define GICD_IIDR_VARIANT_SHIFT 16
+ #define GICD_IIDR_VARIANT_MASK (0xf << GICD_IIDR_VARIANT_SHIFT)
+ #define GICD_IIDR_PRODUCT_ID_SHIFT 24
+ #define GICD_IIDR_PRODUCT_ID_MASK (0xff << GICD_IIDR_PRODUCT_ID_SHIFT)
+
+
/*
* In systems with a single security state (what we emulate in KVM)
* the meaning of the interrupt group enable bits is slightly different
#define GICD_TYPER_MBIS (1U << 16)
#define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1)
+#define GICD_TYPER_NUM_LPIS(typer) ((((typer) >> 11) & 0x1f) + 1)
#define GICD_TYPER_IRQS(typer) ((((typer) & 0x1f) + 1) * 32)
#define GICD_IROUTER_SPI_MODE_ONE (0U << 31)
phys_addr_t phys_base;
} __percpu *rdist;
struct page *prop_page;
- int id_bits;
u64 flags;
+ u32 gicd_typer;
bool has_vlpis;
bool has_direct_lpi;
};
#define KVM_CAP_GET_MSR_FEATURES 153
#define KVM_CAP_HYPERV_EVENTFD 154
#define KVM_CAP_HYPERV_TLBFLUSH 155
-#define KVM_CAP_ARM_INJECT_SERROR_ESR 156
+#define KVM_CAP_S390_HPAGE_1M 156
+#define KVM_CAP_NESTED_STATE 157
++#define KVM_CAP_ARM_INJECT_SERROR_ESR 158
#ifdef KVM_CAP_IRQ_ROUTING
/* Available with KVM_CAP_HYPERV_EVENTFD */
#define KVM_HYPERV_EVENTFD _IOW(KVMIO, 0xbd, struct kvm_hyperv_eventfd)
+/* Available with KVM_CAP_NESTED_STATE */
+#define KVM_GET_NESTED_STATE _IOWR(KVMIO, 0xbe, struct kvm_nested_state)
+#define KVM_SET_NESTED_STATE _IOW(KVMIO, 0xbf, struct kvm_nested_state)
/* Secure Encrypted Virtualization command */
enum sev_cmd_id {
#include <linux/kvm.h>
#include <linux/kvm_irqfd.h>
#include <linux/irqbypass.h>
+ #include <linux/sched/stat.h>
#include <trace/events/kvm.h>
#include <kvm/arm_pmu.h>
#include <kvm/arm_psci.h>
kvm_timer_vcpu_load(vcpu);
kvm_vcpu_load_sysregs(vcpu);
kvm_arch_vcpu_load_fp(vcpu);
+
+ if (single_task_running())
+ vcpu_clear_wfe_traps(vcpu);
+ else
+ vcpu_set_wfe_traps(vcpu);
}
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu->arch.pause = false;
- swake_up(kvm_arch_vcpu_wq(vcpu));
+ swake_up_one(kvm_arch_vcpu_wq(vcpu));
}
}
{
struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
- swait_event_interruptible(*wq, ((!vcpu->arch.power_off) &&
+ swait_event_interruptible_exclusive(*wq, ((!vcpu->arch.power_off) &&
(!vcpu->arch.pause)));
if (vcpu->arch.power_off || vcpu->arch.pause) {
return ret;
}
+ static int kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu_events *events)
+ {
+ memset(events, 0, sizeof(*events));
+
+ return __kvm_arm_vcpu_get_events(vcpu, events);
+ }
+
+ static int kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu_events *events)
+ {
+ int i;
+
+ /* check whether the reserved field is zero */
+ for (i = 0; i < ARRAY_SIZE(events->reserved); i++)
+ if (events->reserved[i])
+ return -EINVAL;
+
+ /* check whether the pad field is zero */
+ for (i = 0; i < ARRAY_SIZE(events->exception.pad); i++)
+ if (events->exception.pad[i])
+ return -EINVAL;
+
+ return __kvm_arm_vcpu_set_events(vcpu, events);
+ }
+
long kvm_arch_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
r = kvm_arm_vcpu_has_attr(vcpu, &attr);
break;
}
+ case KVM_GET_VCPU_EVENTS: {
+ struct kvm_vcpu_events events;
+
+ if (kvm_arm_vcpu_get_events(vcpu, &events))
+ return -EINVAL;
+
+ if (copy_to_user(argp, &events, sizeof(events)))
+ return -EFAULT;
+
+ return 0;
+ }
+ case KVM_SET_VCPU_EVENTS: {
+ struct kvm_vcpu_events events;
+
+ if (copy_from_user(&events, argp, sizeof(events)))
+ return -EFAULT;
+
+ return kvm_arm_vcpu_set_events(vcpu, &events);
+ }
default:
r = -EINVAL;
}