spinlock_t mmu_lock;
};
-#define CONTEXT_HOST 0
-#define CONTEXT_GUEST 1
-#define CONTEXT_GUEST_END 2
-
#define VSID_REAL 0x07ffffffffc00000ULL
#define VSID_BAT 0x07ffffffffb00000ULL
#define VSID_64K 0x0800000000000000ULL
return !is_kvmppc_hv_enabled(vcpu->kvm);
}
+ extern int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu);
+ extern int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu);
+
/* Magic register values loaded into r3 and r4 before the 'sc' assembly
* instruction for the OSI hypercalls */
#define OSI_SC_MAGIC_R3 0x113724FA
return old == 0;
}
+ static inline void unlock_hpte(__be64 *hpte, unsigned long hpte_v)
+ {
+ hpte_v &= ~HPTE_V_HVLOCK;
+ asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
+ hpte[0] = cpu_to_be64(hpte_v);
+ }
+
+ /* Without barrier */
+ static inline void __unlock_hpte(__be64 *hpte, unsigned long hpte_v)
+ {
+ hpte_v &= ~HPTE_V_HVLOCK;
+ hpte[0] = cpu_to_be64(hpte_v);
+ }
+
static inline int __hpte_actual_psize(unsigned int lp, int psize)
{
int i, shift;
pte_t old_pte, new_pte = __pte(0);
while (1) {
- old_pte = pte_val(*ptep);
+ old_pte = *ptep;
/*
* wait until _PAGE_BUSY is clear then set it atomically
*/
- if (unlikely(old_pte & _PAGE_BUSY)) {
+ if (unlikely(pte_val(old_pte) & _PAGE_BUSY)) {
cpu_relax();
continue;
}
return __pte(0);
#endif
/* If pte is not present return None */
- if (unlikely(!(old_pte & _PAGE_PRESENT)))
+ if (unlikely(!(pte_val(old_pte) & _PAGE_PRESENT)))
return __pte(0);
new_pte = pte_mkyoung(old_pte);
if (writing && pte_write(old_pte))
new_pte = pte_mkdirty(new_pte);
- if (old_pte == __cmpxchg_u64((unsigned long *)ptep, old_pte,
- new_pte))
+ if (pte_val(old_pte) == __cmpxchg_u64((unsigned long *)ptep,
+ pte_val(old_pte),
+ pte_val(new_pte))) {
break;
+ }
}
return new_pte;
}
{
if (key)
return PP_RWRX <= pp && pp <= PP_RXRX;
- return 1;
+ return true;
}
static inline bool hpte_write_permission(unsigned long pp, unsigned long key)
unsigned long mask = (pagesize >> PAGE_SHIFT) - 1;
if (pagesize <= PAGE_SIZE)
- return 1;
+ return true;
return !(memslot->base_gfn & mask) && !(memslot->npages & mask);
}
return rcu_dereference_raw_notrace(kvm->memslots);
}
+ extern void kvmppc_mmu_debugfs_init(struct kvm *kvm);
+
+ extern void kvmhv_rm_send_ipi(int cpu);
+
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
#endif /* __ASM_KVM_BOOK3S_64_H__ */
unsigned long host_sdr1;
int tlbie_lock;
unsigned long lpcr;
- unsigned long rmor;
- struct kvm_rma_info *rma;
unsigned long vrma_slb_v;
- int rma_setup_done;
+ int hpte_setup_done;
u32 hpt_order;
atomic_t vcpus_running;
u32 online_vcores;
atomic_t hpte_mod_interest;
cpumask_t need_tlb_flush;
int hpt_cma_alloc;
+ struct dentry *debugfs_dir;
+ struct dentry *htab_dentry;
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
struct mutex hpt_mutex;
/*
* Struct for a virtual core.
- * Note: entry_exit_count combines an entry count in the bottom 8 bits
- * and an exit count in the next 8 bits. This is so that we can
- * atomically increment the entry count iff the exit count is 0
- * without taking the lock.
+ * Note: entry_exit_map combines a bitmap of threads that have entered
+ * in the bottom 8 bits and a bitmap of threads that have exited in the
+ * next 8 bits. This is so that we can atomically set the entry bit
+ * iff the exit map is 0 without taking a lock.
*/
struct kvmppc_vcore {
int n_runnable;
- int n_busy;
int num_threads;
- int entry_exit_count;
- int n_woken;
- int nap_count;
+ int entry_exit_map;
int napping_threads;
int first_vcpuid;
u16 pcpu;
ulong conferring_threads;
};
- #define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff)
- #define VCORE_EXIT_COUNT(vc) ((vc)->entry_exit_count >> 8)
+ #define VCORE_ENTRY_MAP(vc) ((vc)->entry_exit_map & 0xff)
+ #define VCORE_EXIT_MAP(vc) ((vc)->entry_exit_map >> 8)
+ #define VCORE_IS_EXITING(vc) (VCORE_EXIT_MAP(vc) != 0)
/* Values for vcore_state */
#define VCORE_INACTIVE 0
#define VCORE_SLEEPING 1
- #define VCORE_STARTING 2
+ #define VCORE_PREEMPT 2
#define VCORE_RUNNING 3
#define VCORE_EXITING 4
u8 base_page_size; /* MMU_PAGE_xxx */
};
+ /* Struct used to accumulate timing information in HV real mode code */
+ struct kvmhv_tb_accumulator {
+ u64 seqcount; /* used to synchronize access, also count * 2 */
+ u64 tb_total; /* total time in timebase ticks */
+ u64 tb_min; /* min time */
+ u64 tb_max; /* max time */
+ };
+
# ifdef CONFIG_PPC_FSL_BOOK3E
#define KVMPPC_BOOKE_IAC_NUM 2
#define KVMPPC_BOOKE_DAC_NUM 2
pgd_t *pgdir;
u8 io_gpr; /* GPR used as IO source/target */
- u8 mmio_is_bigendian;
+ u8 mmio_host_swabbed;
u8 mmio_sign_extend;
u8 osi_needed;
u8 osi_enabled;
u32 emul_inst;
#endif
+
+ #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
+ struct kvmhv_tb_accumulator *cur_activity; /* What we're timing */
+ u64 cur_tb_start; /* when it started */
+ struct kvmhv_tb_accumulator rm_entry; /* real-mode entry code */
+ struct kvmhv_tb_accumulator rm_intr; /* real-mode intr handling */
+ struct kvmhv_tb_accumulator rm_exit; /* real-mode exit code */
+ struct kvmhv_tb_accumulator guest_time; /* guest execution */
+ struct kvmhv_tb_accumulator cede_time; /* time napping inside guest */
+
+ struct dentry *debugfs_dir;
+ struct dentry *debugfs_timings;
+ #endif /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */
};
#define VCPU_FPR(vcpu, i) (vcpu)->arch.fp.fpr[i][TS_FPROFFSET]
case KVM_CAP_PPC_RMA:
r = 0;
break;
+ case KVM_CAP_PPC_HWRNG:
+ r = kvmppc_hwrng_present();
+ break;
#endif
case KVM_CAP_SYNC_MMU:
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
return;
}
- if (vcpu->arch.mmio_is_bigendian) {
+ if (!vcpu->arch.mmio_host_swabbed) {
switch (run->mmio.len) {
case 8: gpr = *(u64 *)run->mmio.data; break;
case 4: gpr = *(u32 *)run->mmio.data; break;
case 1: gpr = *(u8 *)run->mmio.data; break;
}
} else {
- /* Convert BE data from userland back to LE. */
switch (run->mmio.len) {
- case 4: gpr = ld_le32((u32 *)run->mmio.data); break;
- case 2: gpr = ld_le16((u16 *)run->mmio.data); break;
+ case 8: gpr = swab64(*(u64 *)run->mmio.data); break;
+ case 4: gpr = swab32(*(u32 *)run->mmio.data); break;
+ case 2: gpr = swab16(*(u16 *)run->mmio.data); break;
case 1: gpr = *(u8 *)run->mmio.data; break;
}
}
int is_default_endian)
{
int idx, ret;
- int is_bigendian;
+ bool host_swabbed;
+ /* Pity C doesn't have a logical XOR operator */
if (kvmppc_need_byteswap(vcpu)) {
- /* Default endianness is "little endian". */
- is_bigendian = !is_default_endian;
+ host_swabbed = is_default_endian;
} else {
- /* Default endianness is "big endian". */
- is_bigendian = is_default_endian;
+ host_swabbed = !is_default_endian;
}
if (bytes > sizeof(run->mmio.data)) {
run->mmio.is_write = 0;
vcpu->arch.io_gpr = rt;
- vcpu->arch.mmio_is_bigendian = is_bigendian;
+ vcpu->arch.mmio_host_swabbed = host_swabbed;
vcpu->mmio_needed = 1;
vcpu->mmio_is_write = 0;
vcpu->arch.mmio_sign_extend = 0;
{
void *data = run->mmio.data;
int idx, ret;
- int is_bigendian;
+ bool host_swabbed;
+ /* Pity C doesn't have a logical XOR operator */
if (kvmppc_need_byteswap(vcpu)) {
- /* Default endianness is "little endian". */
- is_bigendian = !is_default_endian;
+ host_swabbed = is_default_endian;
} else {
- /* Default endianness is "big endian". */
- is_bigendian = is_default_endian;
+ host_swabbed = !is_default_endian;
}
if (bytes > sizeof(run->mmio.data)) {
vcpu->mmio_is_write = 1;
/* Store the value at the lowest bytes in 'data'. */
- if (is_bigendian) {
+ if (!host_swabbed) {
switch (bytes) {
case 8: *(u64 *)data = val; break;
case 4: *(u32 *)data = val; break;
case 1: *(u8 *)data = val; break;
}
} else {
- /* Store LE value into 'data'. */
switch (bytes) {
- case 4: st_le32(data, val); break;
- case 2: st_le16(data, val); break;
- case 1: *(u8 *)data = val; break;
+ case 8: *(u64 *)data = swab64(val); break;
+ case 4: *(u32 *)data = swab32(val); break;
+ case 2: *(u16 *)data = swab16(val); break;
+ case 1: *(u8 *)data = val; break;
}
}