From: Will Deacon Date: Tue, 23 May 2023 10:18:20 +0000 (+0100) Subject: KVM: arm64: Allocate pages for hypervisor FF-A mailboxes X-Git-Tag: v6.6.7~2444^2~7^2~6^2~7 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=bc3888a0f4e979ecf9dd8c33a84b8da8cc130790;p=platform%2Fkernel%2Flinux-starfive.git KVM: arm64: Allocate pages for hypervisor FF-A mailboxes The FF-A proxy code needs to allocate its own buffer pair for communication with EL3 and for forwarding calls from the host at EL1. Reserve a couple of pages for this purpose and use them to initialise the hypervisor's FF-A buffer structure. Co-developed-by: Andrew Walbran Signed-off-by: Andrew Walbran Signed-off-by: Will Deacon Link: https://lore.kernel.org/r/20230523101828.7328-4-will@kernel.org Signed-off-by: Oliver Upton --- diff --git a/arch/arm64/include/asm/kvm_pkvm.h b/arch/arm64/include/asm/kvm_pkvm.h index 01129b0..2b495ec 100644 --- a/arch/arm64/include/asm/kvm_pkvm.h +++ b/arch/arm64/include/asm/kvm_pkvm.h @@ -106,4 +106,12 @@ static inline unsigned long host_s2_pgtable_pages(void) return res; } +#define KVM_FFA_MBOX_NR_PAGES 1 + +static inline unsigned long hyp_ffa_proxy_pages(void) +{ + /* A page each for the hypervisor's RX and TX mailboxes. */ + return 2 * KVM_FFA_MBOX_NR_PAGES; +} + #endif /* __ARM64_KVM_PKVM_H__ */ diff --git a/arch/arm64/kvm/hyp/include/nvhe/ffa.h b/arch/arm64/kvm/hyp/include/nvhe/ffa.h index 5c9b924..1becb10 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/ffa.h +++ b/arch/arm64/kvm/hyp/include/nvhe/ffa.h @@ -11,7 +11,7 @@ #define FFA_MIN_FUNC_NUM 0x60 #define FFA_MAX_FUNC_NUM 0x7F -int hyp_ffa_init(void); +int hyp_ffa_init(void *pages); bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt); #endif /* __KVM_HYP_FFA_H */ diff --git a/arch/arm64/kvm/hyp/nvhe/ffa.c b/arch/arm64/kvm/hyp/nvhe/ffa.c index abdcaf9..c85e5d4 100644 --- a/arch/arm64/kvm/hyp/nvhe/ffa.c +++ b/arch/arm64/kvm/hyp/nvhe/ffa.c @@ -28,8 +28,11 @@ #include #include +#include + #include #include +#include /* * "ID value 0 must be returned at the Non-secure physical FF-A instance" @@ -37,6 +40,19 @@ */ #define HOST_FFA_ID 0 +struct kvm_ffa_buffers { + hyp_spinlock_t lock; + void *tx; + void *rx; +}; + +/* + * Note that we don't currently lock these buffers explicitly, instead + * relying on the locking of the host FFA buffers as we only have one + * client. + */ +static struct kvm_ffa_buffers hyp_buffers; + static void ffa_to_smccc_error(struct arm_smccc_res *res, u64 ffa_errno) { *res = (struct arm_smccc_res) { @@ -124,7 +140,7 @@ bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt) return true; } -int hyp_ffa_init(void) +int hyp_ffa_init(void *pages) { struct arm_smccc_res res; @@ -145,5 +161,11 @@ int hyp_ffa_init(void) if (res.a2 != HOST_FFA_ID) return -EINVAL; + hyp_buffers = (struct kvm_ffa_buffers) { + .lock = __HYP_SPIN_LOCK_UNLOCKED, + .tx = pages, + .rx = pages + (KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE), + }; + return 0; } diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c index c4ca174..bb98630 100644 --- a/arch/arm64/kvm/hyp/nvhe/setup.c +++ b/arch/arm64/kvm/hyp/nvhe/setup.c @@ -29,6 +29,7 @@ static void *vmemmap_base; static void *vm_table_base; static void *hyp_pgt_base; static void *host_s2_pgt_base; +static void *ffa_proxy_pages; static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops; static struct hyp_pool hpool; @@ -58,6 +59,11 @@ static int divide_memory_pool(void *virt, unsigned long size) if (!host_s2_pgt_base) return -ENOMEM; + nr_pages = hyp_ffa_proxy_pages(); + ffa_proxy_pages = hyp_early_alloc_contig(nr_pages); + if (!ffa_proxy_pages) + return -ENOMEM; + return 0; } @@ -315,7 +321,7 @@ void __noreturn __pkvm_init_finalise(void) if (ret) goto out; - ret = hyp_ffa_init(); + ret = hyp_ffa_init(ffa_proxy_pages); if (ret) goto out; diff --git a/arch/arm64/kvm/pkvm.c b/arch/arm64/kvm/pkvm.c index 6e9ece1..994a494 100644 --- a/arch/arm64/kvm/pkvm.c +++ b/arch/arm64/kvm/pkvm.c @@ -78,6 +78,7 @@ void __init kvm_hyp_reserve(void) hyp_mem_pages += host_s2_pgtable_pages(); hyp_mem_pages += hyp_vm_table_pages(); hyp_mem_pages += hyp_vmemmap_pages(STRUCT_HYP_PAGE_SIZE); + hyp_mem_pages += hyp_ffa_proxy_pages(); /* * Try to allocate a PMD-aligned region to reduce TLB pressure once