Merge branches 'clk-baikal', 'clk-broadcom', 'clk-vc5' and 'clk-versaclock' into...
[platform/kernel/linux-starfive.git] / virt / kvm / pfncache.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel-based Virtual Machine driver for Linux
4  *
5  * This module enables kernel and guest-mode vCPU access to guest physical
6  * memory with suitable invalidation mechanisms.
7  *
8  * Copyright © 2021 Amazon.com, Inc. or its affiliates.
9  *
10  * Authors:
11  *   David Woodhouse <dwmw2@infradead.org>
12  */
13
14 #include <linux/kvm_host.h>
15 #include <linux/kvm.h>
16 #include <linux/highmem.h>
17 #include <linux/module.h>
18 #include <linux/errno.h>
19
20 #include "kvm_mm.h"
21
22 /*
23  * MMU notifier 'invalidate_range_start' hook.
24  */
25 void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
26                                        unsigned long end, bool may_block)
27 {
28         DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
29         struct gfn_to_pfn_cache *gpc;
30         bool evict_vcpus = false;
31
32         spin_lock(&kvm->gpc_lock);
33         list_for_each_entry(gpc, &kvm->gpc_list, list) {
34                 write_lock_irq(&gpc->lock);
35
36                 /* Only a single page so no need to care about length */
37                 if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) &&
38                     gpc->uhva >= start && gpc->uhva < end) {
39                         gpc->valid = false;
40
41                         /*
42                          * If a guest vCPU could be using the physical address,
43                          * it needs to be forced out of guest mode.
44                          */
45                         if (gpc->usage & KVM_GUEST_USES_PFN) {
46                                 if (!evict_vcpus) {
47                                         evict_vcpus = true;
48                                         bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
49                                 }
50                                 __set_bit(gpc->vcpu->vcpu_idx, vcpu_bitmap);
51                         }
52                 }
53                 write_unlock_irq(&gpc->lock);
54         }
55         spin_unlock(&kvm->gpc_lock);
56
57         if (evict_vcpus) {
58                 /*
59                  * KVM needs to ensure the vCPU is fully out of guest context
60                  * before allowing the invalidation to continue.
61                  */
62                 unsigned int req = KVM_REQ_OUTSIDE_GUEST_MODE;
63                 bool called;
64
65                 /*
66                  * If the OOM reaper is active, then all vCPUs should have
67                  * been stopped already, so perform the request without
68                  * KVM_REQUEST_WAIT and be sad if any needed to be IPI'd.
69                  */
70                 if (!may_block)
71                         req &= ~KVM_REQUEST_WAIT;
72
73                 called = kvm_make_vcpus_request_mask(kvm, req, vcpu_bitmap);
74
75                 WARN_ON_ONCE(called && !may_block);
76         }
77 }
78
79 bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
80                                 gpa_t gpa, unsigned long len)
81 {
82         struct kvm_memslots *slots = kvm_memslots(kvm);
83
84         if ((gpa & ~PAGE_MASK) + len > PAGE_SIZE)
85                 return false;
86
87         if (gpc->gpa != gpa || gpc->generation != slots->generation ||
88             kvm_is_error_hva(gpc->uhva))
89                 return false;
90
91         if (!gpc->valid)
92                 return false;
93
94         return true;
95 }
96 EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_check);
97
98 static void gpc_unmap_khva(struct kvm *kvm, kvm_pfn_t pfn, void *khva)
99 {
100         /* Unmap the old pfn/page if it was mapped before. */
101         if (!is_error_noslot_pfn(pfn) && khva) {
102                 if (pfn_valid(pfn))
103                         kunmap(pfn_to_page(pfn));
104 #ifdef CONFIG_HAS_IOMEM
105                 else
106                         memunmap(khva);
107 #endif
108         }
109 }
110
111 static inline bool mmu_notifier_retry_cache(struct kvm *kvm, unsigned long mmu_seq)
112 {
113         /*
114          * mn_active_invalidate_count acts for all intents and purposes
115          * like mmu_invalidate_in_progress here; but the latter cannot
116          * be used here because the invalidation of caches in the
117          * mmu_notifier event occurs _before_ mmu_invalidate_in_progress
118          * is elevated.
119          *
120          * Note, it does not matter that mn_active_invalidate_count
121          * is not protected by gpc->lock.  It is guaranteed to
122          * be elevated before the mmu_notifier acquires gpc->lock, and
123          * isn't dropped until after mmu_invalidate_seq is updated.
124          */
125         if (kvm->mn_active_invalidate_count)
126                 return true;
127
128         /*
129          * Ensure mn_active_invalidate_count is read before
130          * mmu_invalidate_seq.  This pairs with the smp_wmb() in
131          * mmu_notifier_invalidate_range_end() to guarantee either the
132          * old (non-zero) value of mn_active_invalidate_count or the
133          * new (incremented) value of mmu_invalidate_seq is observed.
134          */
135         smp_rmb();
136         return kvm->mmu_invalidate_seq != mmu_seq;
137 }
138
139 static kvm_pfn_t hva_to_pfn_retry(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
140 {
141         /* Note, the new page offset may be different than the old! */
142         void *old_khva = gpc->khva - offset_in_page(gpc->khva);
143         kvm_pfn_t new_pfn = KVM_PFN_ERR_FAULT;
144         void *new_khva = NULL;
145         unsigned long mmu_seq;
146
147         lockdep_assert_held(&gpc->refresh_lock);
148
149         lockdep_assert_held_write(&gpc->lock);
150
151         /*
152          * Invalidate the cache prior to dropping gpc->lock, the gpa=>uhva
153          * assets have already been updated and so a concurrent check() from a
154          * different task may not fail the gpa/uhva/generation checks.
155          */
156         gpc->valid = false;
157
158         do {
159                 mmu_seq = kvm->mmu_invalidate_seq;
160                 smp_rmb();
161
162                 write_unlock_irq(&gpc->lock);
163
164                 /*
165                  * If the previous iteration "failed" due to an mmu_notifier
166                  * event, release the pfn and unmap the kernel virtual address
167                  * from the previous attempt.  Unmapping might sleep, so this
168                  * needs to be done after dropping the lock.  Opportunistically
169                  * check for resched while the lock isn't held.
170                  */
171                 if (new_pfn != KVM_PFN_ERR_FAULT) {
172                         /*
173                          * Keep the mapping if the previous iteration reused
174                          * the existing mapping and didn't create a new one.
175                          */
176                         if (new_khva != old_khva)
177                                 gpc_unmap_khva(kvm, new_pfn, new_khva);
178
179                         kvm_release_pfn_clean(new_pfn);
180
181                         cond_resched();
182                 }
183
184                 /* We always request a writeable mapping */
185                 new_pfn = hva_to_pfn(gpc->uhva, false, NULL, true, NULL);
186                 if (is_error_noslot_pfn(new_pfn))
187                         goto out_error;
188
189                 /*
190                  * Obtain a new kernel mapping if KVM itself will access the
191                  * pfn.  Note, kmap() and memremap() can both sleep, so this
192                  * too must be done outside of gpc->lock!
193                  */
194                 if (gpc->usage & KVM_HOST_USES_PFN) {
195                         if (new_pfn == gpc->pfn) {
196                                 new_khva = old_khva;
197                         } else if (pfn_valid(new_pfn)) {
198                                 new_khva = kmap(pfn_to_page(new_pfn));
199 #ifdef CONFIG_HAS_IOMEM
200                         } else {
201                                 new_khva = memremap(pfn_to_hpa(new_pfn), PAGE_SIZE, MEMREMAP_WB);
202 #endif
203                         }
204                         if (!new_khva) {
205                                 kvm_release_pfn_clean(new_pfn);
206                                 goto out_error;
207                         }
208                 }
209
210                 write_lock_irq(&gpc->lock);
211
212                 /*
213                  * Other tasks must wait for _this_ refresh to complete before
214                  * attempting to refresh.
215                  */
216                 WARN_ON_ONCE(gpc->valid);
217         } while (mmu_notifier_retry_cache(kvm, mmu_seq));
218
219         gpc->valid = true;
220         gpc->pfn = new_pfn;
221         gpc->khva = new_khva + (gpc->gpa & ~PAGE_MASK);
222
223         /*
224          * Put the reference to the _new_ pfn.  The pfn is now tracked by the
225          * cache and can be safely migrated, swapped, etc... as the cache will
226          * invalidate any mappings in response to relevant mmu_notifier events.
227          */
228         kvm_release_pfn_clean(new_pfn);
229
230         return 0;
231
232 out_error:
233         write_lock_irq(&gpc->lock);
234
235         return -EFAULT;
236 }
237
238 int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
239                                  gpa_t gpa, unsigned long len)
240 {
241         struct kvm_memslots *slots = kvm_memslots(kvm);
242         unsigned long page_offset = gpa & ~PAGE_MASK;
243         kvm_pfn_t old_pfn, new_pfn;
244         unsigned long old_uhva;
245         void *old_khva;
246         int ret = 0;
247
248         /*
249          * If must fit within a single page. The 'len' argument is
250          * only to enforce that.
251          */
252         if (page_offset + len > PAGE_SIZE)
253                 return -EINVAL;
254
255         /*
256          * If another task is refreshing the cache, wait for it to complete.
257          * There is no guarantee that concurrent refreshes will see the same
258          * gpa, memslots generation, etc..., so they must be fully serialized.
259          */
260         mutex_lock(&gpc->refresh_lock);
261
262         write_lock_irq(&gpc->lock);
263
264         old_pfn = gpc->pfn;
265         old_khva = gpc->khva - offset_in_page(gpc->khva);
266         old_uhva = gpc->uhva;
267
268         /* If the userspace HVA is invalid, refresh that first */
269         if (gpc->gpa != gpa || gpc->generation != slots->generation ||
270             kvm_is_error_hva(gpc->uhva)) {
271                 gfn_t gfn = gpa_to_gfn(gpa);
272
273                 gpc->gpa = gpa;
274                 gpc->generation = slots->generation;
275                 gpc->memslot = __gfn_to_memslot(slots, gfn);
276                 gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn);
277
278                 if (kvm_is_error_hva(gpc->uhva)) {
279                         ret = -EFAULT;
280                         goto out;
281                 }
282         }
283
284         /*
285          * If the userspace HVA changed or the PFN was already invalid,
286          * drop the lock and do the HVA to PFN lookup again.
287          */
288         if (!gpc->valid || old_uhva != gpc->uhva) {
289                 ret = hva_to_pfn_retry(kvm, gpc);
290         } else {
291                 /* If the HVA→PFN mapping was already valid, don't unmap it. */
292                 old_pfn = KVM_PFN_ERR_FAULT;
293                 old_khva = NULL;
294         }
295
296  out:
297         /*
298          * Invalidate the cache and purge the pfn/khva if the refresh failed.
299          * Some/all of the uhva, gpa, and memslot generation info may still be
300          * valid, leave it as is.
301          */
302         if (ret) {
303                 gpc->valid = false;
304                 gpc->pfn = KVM_PFN_ERR_FAULT;
305                 gpc->khva = NULL;
306         }
307
308         /* Snapshot the new pfn before dropping the lock! */
309         new_pfn = gpc->pfn;
310
311         write_unlock_irq(&gpc->lock);
312
313         mutex_unlock(&gpc->refresh_lock);
314
315         if (old_pfn != new_pfn)
316                 gpc_unmap_khva(kvm, old_pfn, old_khva);
317
318         return ret;
319 }
320 EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_refresh);
321
322 void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
323 {
324         void *old_khva;
325         kvm_pfn_t old_pfn;
326
327         mutex_lock(&gpc->refresh_lock);
328         write_lock_irq(&gpc->lock);
329
330         gpc->valid = false;
331
332         old_khva = gpc->khva - offset_in_page(gpc->khva);
333         old_pfn = gpc->pfn;
334
335         /*
336          * We can leave the GPA → uHVA map cache intact but the PFN
337          * lookup will need to be redone even for the same page.
338          */
339         gpc->khva = NULL;
340         gpc->pfn = KVM_PFN_ERR_FAULT;
341
342         write_unlock_irq(&gpc->lock);
343         mutex_unlock(&gpc->refresh_lock);
344
345         gpc_unmap_khva(kvm, old_pfn, old_khva);
346 }
347 EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_unmap);
348
349
350 int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
351                               struct kvm_vcpu *vcpu, enum pfn_cache_usage usage,
352                               gpa_t gpa, unsigned long len)
353 {
354         WARN_ON_ONCE(!usage || (usage & KVM_GUEST_AND_HOST_USE_PFN) != usage);
355
356         if (!gpc->active) {
357                 rwlock_init(&gpc->lock);
358                 mutex_init(&gpc->refresh_lock);
359
360                 gpc->khva = NULL;
361                 gpc->pfn = KVM_PFN_ERR_FAULT;
362                 gpc->uhva = KVM_HVA_ERR_BAD;
363                 gpc->vcpu = vcpu;
364                 gpc->usage = usage;
365                 gpc->valid = false;
366                 gpc->active = true;
367
368                 spin_lock(&kvm->gpc_lock);
369                 list_add(&gpc->list, &kvm->gpc_list);
370                 spin_unlock(&kvm->gpc_lock);
371         }
372         return kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpa, len);
373 }
374 EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_init);
375
376 void kvm_gfn_to_pfn_cache_destroy(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
377 {
378         if (gpc->active) {
379                 spin_lock(&kvm->gpc_lock);
380                 list_del(&gpc->list);
381                 spin_unlock(&kvm->gpc_lock);
382
383                 kvm_gfn_to_pfn_cache_unmap(kvm, gpc);
384                 gpc->active = false;
385         }
386 }
387 EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_destroy);