Merge tag 'kvm-arm64/for-3.13-1' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorPaolo Bonzini <pbonzini@redhat.com>
Mon, 11 Nov 2013 11:05:20 +0000 (12:05 +0100)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 11 Nov 2013 11:05:20 +0000 (12:05 +0100)
A handful of fixes for KVM/arm64:

- A couple a basic fixes for running BE guests on a LE host
- A performance improvement for overcommitted VMs (same as the equivalent
  patch for ARM)

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Conflicts:
arch/arm/include/asm/kvm_emulate.h
arch/arm64/include/asm/kvm_emulate.h

1  2 
arch/arm/include/asm/kvm_emulate.h
arch/arm/kvm/psci.c
arch/arm64/include/asm/kvm_emulate.h

@@@ -157,9 -157,50 +157,55 @@@ static inline u32 kvm_vcpu_hvc_get_imm(
        return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK;
  }
  
 +static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu)
 +{
 +      return vcpu->arch.cp15[c0_MPIDR];
 +}
 +
+ static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
+ {
+       *vcpu_cpsr(vcpu) |= PSR_E_BIT;
+ }
+ static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
+ {
+       return !!(*vcpu_cpsr(vcpu) & PSR_E_BIT);
+ }
+ static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
+                                                   unsigned long data,
+                                                   unsigned int len)
+ {
+       if (kvm_vcpu_is_be(vcpu)) {
+               switch (len) {
+               case 1:
+                       return data & 0xff;
+               case 2:
+                       return be16_to_cpu(data & 0xffff);
+               default:
+                       return be32_to_cpu(data);
+               }
+       }
+       return data;            /* Leave LE untouched */
+ }
+ static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
+                                                   unsigned long data,
+                                                   unsigned int len)
+ {
+       if (kvm_vcpu_is_be(vcpu)) {
+               switch (len) {
+               case 1:
+                       return data & 0xff;
+               case 2:
+                       return cpu_to_be16(data & 0xffff);
+               default:
+                       return cpu_to_be32(data);
+               }
+       }
+       return data;            /* Leave LE untouched */
+ }
  #endif /* __ARM_KVM_EMULATE_H__ */
diff --combined arch/arm/kvm/psci.c
@@@ -18,7 -18,6 +18,7 @@@
  #include <linux/kvm_host.h>
  #include <linux/wait.h>
  
 +#include <asm/cputype.h>
  #include <asm/kvm_emulate.h>
  #include <asm/kvm_psci.h>
  
@@@ -35,30 -34,22 +35,30 @@@ static void kvm_psci_vcpu_off(struct kv
  static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
  {
        struct kvm *kvm = source_vcpu->kvm;
 -      struct kvm_vcpu *vcpu;
 +      struct kvm_vcpu *vcpu = NULL, *tmp;
        wait_queue_head_t *wq;
        unsigned long cpu_id;
 +      unsigned long mpidr;
        phys_addr_t target_pc;
 +      int i;
  
        cpu_id = *vcpu_reg(source_vcpu, 1);
        if (vcpu_mode_is_32bit(source_vcpu))
                cpu_id &= ~((u32) 0);
  
 -      if (cpu_id >= atomic_read(&kvm->online_vcpus))
 +      kvm_for_each_vcpu(i, tmp, kvm) {
 +              mpidr = kvm_vcpu_get_mpidr(tmp);
 +              if ((mpidr & MPIDR_HWID_BITMASK) == (cpu_id & MPIDR_HWID_BITMASK)) {
 +                      vcpu = tmp;
 +                      break;
 +              }
 +      }
 +
 +      if (!vcpu)
                return KVM_PSCI_RET_INVAL;
  
        target_pc = *vcpu_reg(source_vcpu, 2);
  
 -      vcpu = kvm_get_vcpu(kvm, cpu_id);
 -
        wq = kvm_arch_vcpu_wq(vcpu);
        if (!waitqueue_active(wq))
                return KVM_PSCI_RET_INVAL;
                vcpu_set_thumb(vcpu);
        }
  
+       /* Propagate caller endianness */
+       if (kvm_vcpu_is_be(source_vcpu))
+               kvm_vcpu_set_be(vcpu);
        *vcpu_pc(vcpu) = target_pc;
        vcpu->arch.pause = false;
        smp_mb();               /* Make sure the above is visible */
@@@ -177,9 -177,60 +177,65 @@@ static inline u8 kvm_vcpu_trap_get_faul
        return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC_TYPE;
  }
  
 +static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu)
 +{
 +      return vcpu_sys_reg(vcpu, MPIDR_EL1);
 +}
 +
+ static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
+ {
+       if (vcpu_mode_is_32bit(vcpu))
+               *vcpu_cpsr(vcpu) |= COMPAT_PSR_E_BIT;
+       else
+               vcpu_sys_reg(vcpu, SCTLR_EL1) |= (1 << 25);
+ }
+ static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
+ {
+       if (vcpu_mode_is_32bit(vcpu))
+               return !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_E_BIT);
+       return !!(vcpu_sys_reg(vcpu, SCTLR_EL1) & (1 << 25));
+ }
+ static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
+                                                   unsigned long data,
+                                                   unsigned int len)
+ {
+       if (kvm_vcpu_is_be(vcpu)) {
+               switch (len) {
+               case 1:
+                       return data & 0xff;
+               case 2:
+                       return be16_to_cpu(data & 0xffff);
+               case 4:
+                       return be32_to_cpu(data & 0xffffffff);
+               default:
+                       return be64_to_cpu(data);
+               }
+       }
+       return data;            /* Leave LE untouched */
+ }
+ static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
+                                                   unsigned long data,
+                                                   unsigned int len)
+ {
+       if (kvm_vcpu_is_be(vcpu)) {
+               switch (len) {
+               case 1:
+                       return data & 0xff;
+               case 2:
+                       return cpu_to_be16(data & 0xffff);
+               case 4:
+                       return cpu_to_be32(data & 0xffffffff);
+               default:
+                       return cpu_to_be64(data);
+               }
+       }
+       return data;            /* Leave LE untouched */
+ }
  #endif /* __ARM64_KVM_EMULATE_H__ */