selftests/bpf: fix bpf_loop_bench for new callback verification scheme
[platform/kernel/linux-starfive.git] / virt / kvm / kvm_mm.h
1 /* SPDX-License-Identifier: GPL-2.0-only */
2
3 #ifndef __KVM_MM_H__
4 #define __KVM_MM_H__ 1
5
6 /*
7  * Architectures can choose whether to use an rwlock or spinlock
8  * for the mmu_lock.  These macros, for use in common code
9  * only, avoids using #ifdefs in places that must deal with
10  * multiple architectures.
11  */
12
13 #ifdef KVM_HAVE_MMU_RWLOCK
14 #define KVM_MMU_LOCK_INIT(kvm)          rwlock_init(&(kvm)->mmu_lock)
15 #define KVM_MMU_LOCK(kvm)               write_lock(&(kvm)->mmu_lock)
16 #define KVM_MMU_UNLOCK(kvm)             write_unlock(&(kvm)->mmu_lock)
17 #else
18 #define KVM_MMU_LOCK_INIT(kvm)          spin_lock_init(&(kvm)->mmu_lock)
19 #define KVM_MMU_LOCK(kvm)               spin_lock(&(kvm)->mmu_lock)
20 #define KVM_MMU_UNLOCK(kvm)             spin_unlock(&(kvm)->mmu_lock)
21 #endif /* KVM_HAVE_MMU_RWLOCK */
22
23 kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool interruptible,
24                      bool *async, bool write_fault, bool *writable);
25
26 #ifdef CONFIG_HAVE_KVM_PFNCACHE
27 void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm,
28                                        unsigned long start,
29                                        unsigned long end,
30                                        bool may_block);
31 #else
32 static inline void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm,
33                                                      unsigned long start,
34                                                      unsigned long end,
35                                                      bool may_block)
36 {
37 }
38 #endif /* HAVE_KVM_PFNCACHE */
39
40 #endif /* __KVM_MM_H__ */