usb: typec: mux: fix static inline syntax error
[platform/kernel/linux-starfive.git] / arch / arm64 / kvm / hyp / pgtable.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Stand-alone page-table allocator for hyp stage-1 and guest stage-2.
4  * No bombay mix was harmed in the writing of this file.
5  *
6  * Copyright (C) 2020 Google LLC
7  * Author: Will Deacon <will@kernel.org>
8  */
9
10 #include <linux/bitfield.h>
11 #include <asm/kvm_pgtable.h>
12 #include <asm/stage2_pgtable.h>
13
14
15 #define KVM_PTE_TYPE                    BIT(1)
16 #define KVM_PTE_TYPE_BLOCK              0
17 #define KVM_PTE_TYPE_PAGE               1
18 #define KVM_PTE_TYPE_TABLE              1
19
20 #define KVM_PTE_LEAF_ATTR_LO            GENMASK(11, 2)
21
22 #define KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX GENMASK(4, 2)
23 #define KVM_PTE_LEAF_ATTR_LO_S1_AP      GENMASK(7, 6)
24 #define KVM_PTE_LEAF_ATTR_LO_S1_AP_RO   3
25 #define KVM_PTE_LEAF_ATTR_LO_S1_AP_RW   1
26 #define KVM_PTE_LEAF_ATTR_LO_S1_SH      GENMASK(9, 8)
27 #define KVM_PTE_LEAF_ATTR_LO_S1_SH_IS   3
28 #define KVM_PTE_LEAF_ATTR_LO_S1_AF      BIT(10)
29
30 #define KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR GENMASK(5, 2)
31 #define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R  BIT(6)
32 #define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W  BIT(7)
33 #define KVM_PTE_LEAF_ATTR_LO_S2_SH      GENMASK(9, 8)
34 #define KVM_PTE_LEAF_ATTR_LO_S2_SH_IS   3
35 #define KVM_PTE_LEAF_ATTR_LO_S2_AF      BIT(10)
36
37 #define KVM_PTE_LEAF_ATTR_HI            GENMASK(63, 51)
38
39 #define KVM_PTE_LEAF_ATTR_HI_SW         GENMASK(58, 55)
40
41 #define KVM_PTE_LEAF_ATTR_HI_S1_XN      BIT(54)
42
43 #define KVM_PTE_LEAF_ATTR_HI_S2_XN      BIT(54)
44
45 #define KVM_PTE_LEAF_ATTR_S2_PERMS      (KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R | \
46                                          KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W | \
47                                          KVM_PTE_LEAF_ATTR_HI_S2_XN)
48
49 #define KVM_INVALID_PTE_OWNER_MASK      GENMASK(9, 2)
50 #define KVM_MAX_OWNER_ID                1
51
52 /*
53  * Used to indicate a pte for which a 'break-before-make' sequence is in
54  * progress.
55  */
56 #define KVM_INVALID_PTE_LOCKED          BIT(10)
57
58 struct kvm_pgtable_walk_data {
59         struct kvm_pgtable_walker       *walker;
60
61         const u64                       start;
62         u64                             addr;
63         const u64                       end;
64 };
65
66 static bool kvm_phys_is_valid(u64 phys)
67 {
68         return phys < BIT(id_aa64mmfr0_parange_to_phys_shift(ID_AA64MMFR0_EL1_PARANGE_MAX));
69 }
70
71 static bool kvm_block_mapping_supported(const struct kvm_pgtable_visit_ctx *ctx, u64 phys)
72 {
73         u64 granule = kvm_granule_size(ctx->level);
74
75         if (!kvm_level_supports_block_mapping(ctx->level))
76                 return false;
77
78         if (granule > (ctx->end - ctx->addr))
79                 return false;
80
81         if (kvm_phys_is_valid(phys) && !IS_ALIGNED(phys, granule))
82                 return false;
83
84         return IS_ALIGNED(ctx->addr, granule);
85 }
86
87 static u32 kvm_pgtable_idx(struct kvm_pgtable_walk_data *data, u32 level)
88 {
89         u64 shift = kvm_granule_shift(level);
90         u64 mask = BIT(PAGE_SHIFT - 3) - 1;
91
92         return (data->addr >> shift) & mask;
93 }
94
95 static u32 kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr)
96 {
97         u64 shift = kvm_granule_shift(pgt->start_level - 1); /* May underflow */
98         u64 mask = BIT(pgt->ia_bits) - 1;
99
100         return (addr & mask) >> shift;
101 }
102
103 static u32 kvm_pgd_pages(u32 ia_bits, u32 start_level)
104 {
105         struct kvm_pgtable pgt = {
106                 .ia_bits        = ia_bits,
107                 .start_level    = start_level,
108         };
109
110         return kvm_pgd_page_idx(&pgt, -1ULL) + 1;
111 }
112
113 static bool kvm_pte_table(kvm_pte_t pte, u32 level)
114 {
115         if (level == KVM_PGTABLE_MAX_LEVELS - 1)
116                 return false;
117
118         if (!kvm_pte_valid(pte))
119                 return false;
120
121         return FIELD_GET(KVM_PTE_TYPE, pte) == KVM_PTE_TYPE_TABLE;
122 }
123
124 static kvm_pte_t *kvm_pte_follow(kvm_pte_t pte, struct kvm_pgtable_mm_ops *mm_ops)
125 {
126         return mm_ops->phys_to_virt(kvm_pte_to_phys(pte));
127 }
128
129 static void kvm_clear_pte(kvm_pte_t *ptep)
130 {
131         WRITE_ONCE(*ptep, 0);
132 }
133
134 static kvm_pte_t kvm_init_table_pte(kvm_pte_t *childp, struct kvm_pgtable_mm_ops *mm_ops)
135 {
136         kvm_pte_t pte = kvm_phys_to_pte(mm_ops->virt_to_phys(childp));
137
138         pte |= FIELD_PREP(KVM_PTE_TYPE, KVM_PTE_TYPE_TABLE);
139         pte |= KVM_PTE_VALID;
140         return pte;
141 }
142
143 static kvm_pte_t kvm_init_valid_leaf_pte(u64 pa, kvm_pte_t attr, u32 level)
144 {
145         kvm_pte_t pte = kvm_phys_to_pte(pa);
146         u64 type = (level == KVM_PGTABLE_MAX_LEVELS - 1) ? KVM_PTE_TYPE_PAGE :
147                                                            KVM_PTE_TYPE_BLOCK;
148
149         pte |= attr & (KVM_PTE_LEAF_ATTR_LO | KVM_PTE_LEAF_ATTR_HI);
150         pte |= FIELD_PREP(KVM_PTE_TYPE, type);
151         pte |= KVM_PTE_VALID;
152
153         return pte;
154 }
155
156 static kvm_pte_t kvm_init_invalid_leaf_owner(u8 owner_id)
157 {
158         return FIELD_PREP(KVM_INVALID_PTE_OWNER_MASK, owner_id);
159 }
160
161 static int kvm_pgtable_visitor_cb(struct kvm_pgtable_walk_data *data,
162                                   const struct kvm_pgtable_visit_ctx *ctx,
163                                   enum kvm_pgtable_walk_flags visit)
164 {
165         struct kvm_pgtable_walker *walker = data->walker;
166
167         /* Ensure the appropriate lock is held (e.g. RCU lock for stage-2 MMU) */
168         WARN_ON_ONCE(kvm_pgtable_walk_shared(ctx) && !kvm_pgtable_walk_lock_held());
169         return walker->cb(ctx, visit);
170 }
171
172 static bool kvm_pgtable_walk_continue(const struct kvm_pgtable_walker *walker,
173                                       int r)
174 {
175         /*
176          * Visitor callbacks return EAGAIN when the conditions that led to a
177          * fault are no longer reflected in the page tables due to a race to
178          * update a PTE. In the context of a fault handler this is interpreted
179          * as a signal to retry guest execution.
180          *
181          * Ignore the return code altogether for walkers outside a fault handler
182          * (e.g. write protecting a range of memory) and chug along with the
183          * page table walk.
184          */
185         if (r == -EAGAIN)
186                 return !(walker->flags & KVM_PGTABLE_WALK_HANDLE_FAULT);
187
188         return !r;
189 }
190
191 static int __kvm_pgtable_walk(struct kvm_pgtable_walk_data *data,
192                               struct kvm_pgtable_mm_ops *mm_ops, kvm_pteref_t pgtable, u32 level);
193
194 static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data,
195                                       struct kvm_pgtable_mm_ops *mm_ops,
196                                       kvm_pteref_t pteref, u32 level)
197 {
198         enum kvm_pgtable_walk_flags flags = data->walker->flags;
199         kvm_pte_t *ptep = kvm_dereference_pteref(data->walker, pteref);
200         struct kvm_pgtable_visit_ctx ctx = {
201                 .ptep   = ptep,
202                 .old    = READ_ONCE(*ptep),
203                 .arg    = data->walker->arg,
204                 .mm_ops = mm_ops,
205                 .start  = data->start,
206                 .addr   = data->addr,
207                 .end    = data->end,
208                 .level  = level,
209                 .flags  = flags,
210         };
211         int ret = 0;
212         kvm_pteref_t childp;
213         bool table = kvm_pte_table(ctx.old, level);
214
215         if (table && (ctx.flags & KVM_PGTABLE_WALK_TABLE_PRE))
216                 ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_TABLE_PRE);
217
218         if (!table && (ctx.flags & KVM_PGTABLE_WALK_LEAF)) {
219                 ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_LEAF);
220                 ctx.old = READ_ONCE(*ptep);
221                 table = kvm_pte_table(ctx.old, level);
222         }
223
224         if (!kvm_pgtable_walk_continue(data->walker, ret))
225                 goto out;
226
227         if (!table) {
228                 data->addr = ALIGN_DOWN(data->addr, kvm_granule_size(level));
229                 data->addr += kvm_granule_size(level);
230                 goto out;
231         }
232
233         childp = (kvm_pteref_t)kvm_pte_follow(ctx.old, mm_ops);
234         ret = __kvm_pgtable_walk(data, mm_ops, childp, level + 1);
235         if (!kvm_pgtable_walk_continue(data->walker, ret))
236                 goto out;
237
238         if (ctx.flags & KVM_PGTABLE_WALK_TABLE_POST)
239                 ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_TABLE_POST);
240
241 out:
242         if (kvm_pgtable_walk_continue(data->walker, ret))
243                 return 0;
244
245         return ret;
246 }
247
248 static int __kvm_pgtable_walk(struct kvm_pgtable_walk_data *data,
249                               struct kvm_pgtable_mm_ops *mm_ops, kvm_pteref_t pgtable, u32 level)
250 {
251         u32 idx;
252         int ret = 0;
253
254         if (WARN_ON_ONCE(level >= KVM_PGTABLE_MAX_LEVELS))
255                 return -EINVAL;
256
257         for (idx = kvm_pgtable_idx(data, level); idx < PTRS_PER_PTE; ++idx) {
258                 kvm_pteref_t pteref = &pgtable[idx];
259
260                 if (data->addr >= data->end)
261                         break;
262
263                 ret = __kvm_pgtable_visit(data, mm_ops, pteref, level);
264                 if (ret)
265                         break;
266         }
267
268         return ret;
269 }
270
271 static int _kvm_pgtable_walk(struct kvm_pgtable *pgt, struct kvm_pgtable_walk_data *data)
272 {
273         u32 idx;
274         int ret = 0;
275         u64 limit = BIT(pgt->ia_bits);
276
277         if (data->addr > limit || data->end > limit)
278                 return -ERANGE;
279
280         if (!pgt->pgd)
281                 return -EINVAL;
282
283         for (idx = kvm_pgd_page_idx(pgt, data->addr); data->addr < data->end; ++idx) {
284                 kvm_pteref_t pteref = &pgt->pgd[idx * PTRS_PER_PTE];
285
286                 ret = __kvm_pgtable_walk(data, pgt->mm_ops, pteref, pgt->start_level);
287                 if (ret)
288                         break;
289         }
290
291         return ret;
292 }
293
294 int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size,
295                      struct kvm_pgtable_walker *walker)
296 {
297         struct kvm_pgtable_walk_data walk_data = {
298                 .start  = ALIGN_DOWN(addr, PAGE_SIZE),
299                 .addr   = ALIGN_DOWN(addr, PAGE_SIZE),
300                 .end    = PAGE_ALIGN(walk_data.addr + size),
301                 .walker = walker,
302         };
303         int r;
304
305         r = kvm_pgtable_walk_begin(walker);
306         if (r)
307                 return r;
308
309         r = _kvm_pgtable_walk(pgt, &walk_data);
310         kvm_pgtable_walk_end(walker);
311
312         return r;
313 }
314
315 struct leaf_walk_data {
316         kvm_pte_t       pte;
317         u32             level;
318 };
319
320 static int leaf_walker(const struct kvm_pgtable_visit_ctx *ctx,
321                        enum kvm_pgtable_walk_flags visit)
322 {
323         struct leaf_walk_data *data = ctx->arg;
324
325         data->pte   = ctx->old;
326         data->level = ctx->level;
327
328         return 0;
329 }
330
331 int kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr,
332                          kvm_pte_t *ptep, u32 *level)
333 {
334         struct leaf_walk_data data;
335         struct kvm_pgtable_walker walker = {
336                 .cb     = leaf_walker,
337                 .flags  = KVM_PGTABLE_WALK_LEAF,
338                 .arg    = &data,
339         };
340         int ret;
341
342         ret = kvm_pgtable_walk(pgt, ALIGN_DOWN(addr, PAGE_SIZE),
343                                PAGE_SIZE, &walker);
344         if (!ret) {
345                 if (ptep)
346                         *ptep  = data.pte;
347                 if (level)
348                         *level = data.level;
349         }
350
351         return ret;
352 }
353
354 struct hyp_map_data {
355         const u64                       phys;
356         kvm_pte_t                       attr;
357 };
358
359 static int hyp_set_prot_attr(enum kvm_pgtable_prot prot, kvm_pte_t *ptep)
360 {
361         bool device = prot & KVM_PGTABLE_PROT_DEVICE;
362         u32 mtype = device ? MT_DEVICE_nGnRE : MT_NORMAL;
363         kvm_pte_t attr = FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX, mtype);
364         u32 sh = KVM_PTE_LEAF_ATTR_LO_S1_SH_IS;
365         u32 ap = (prot & KVM_PGTABLE_PROT_W) ? KVM_PTE_LEAF_ATTR_LO_S1_AP_RW :
366                                                KVM_PTE_LEAF_ATTR_LO_S1_AP_RO;
367
368         if (!(prot & KVM_PGTABLE_PROT_R))
369                 return -EINVAL;
370
371         if (prot & KVM_PGTABLE_PROT_X) {
372                 if (prot & KVM_PGTABLE_PROT_W)
373                         return -EINVAL;
374
375                 if (device)
376                         return -EINVAL;
377         } else {
378                 attr |= KVM_PTE_LEAF_ATTR_HI_S1_XN;
379         }
380
381         attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_AP, ap);
382         attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_SH, sh);
383         attr |= KVM_PTE_LEAF_ATTR_LO_S1_AF;
384         attr |= prot & KVM_PTE_LEAF_ATTR_HI_SW;
385         *ptep = attr;
386
387         return 0;
388 }
389
390 enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte)
391 {
392         enum kvm_pgtable_prot prot = pte & KVM_PTE_LEAF_ATTR_HI_SW;
393         u32 ap;
394
395         if (!kvm_pte_valid(pte))
396                 return prot;
397
398         if (!(pte & KVM_PTE_LEAF_ATTR_HI_S1_XN))
399                 prot |= KVM_PGTABLE_PROT_X;
400
401         ap = FIELD_GET(KVM_PTE_LEAF_ATTR_LO_S1_AP, pte);
402         if (ap == KVM_PTE_LEAF_ATTR_LO_S1_AP_RO)
403                 prot |= KVM_PGTABLE_PROT_R;
404         else if (ap == KVM_PTE_LEAF_ATTR_LO_S1_AP_RW)
405                 prot |= KVM_PGTABLE_PROT_RW;
406
407         return prot;
408 }
409
410 static bool hyp_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
411                                     struct hyp_map_data *data)
412 {
413         u64 phys = data->phys + (ctx->addr - ctx->start);
414         kvm_pte_t new;
415
416         if (!kvm_block_mapping_supported(ctx, phys))
417                 return false;
418
419         new = kvm_init_valid_leaf_pte(phys, data->attr, ctx->level);
420         if (ctx->old == new)
421                 return true;
422         if (!kvm_pte_valid(ctx->old))
423                 ctx->mm_ops->get_page(ctx->ptep);
424         else if (WARN_ON((ctx->old ^ new) & ~KVM_PTE_LEAF_ATTR_HI_SW))
425                 return false;
426
427         smp_store_release(ctx->ptep, new);
428         return true;
429 }
430
431 static int hyp_map_walker(const struct kvm_pgtable_visit_ctx *ctx,
432                           enum kvm_pgtable_walk_flags visit)
433 {
434         kvm_pte_t *childp, new;
435         struct hyp_map_data *data = ctx->arg;
436         struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
437
438         if (hyp_map_walker_try_leaf(ctx, data))
439                 return 0;
440
441         if (WARN_ON(ctx->level == KVM_PGTABLE_MAX_LEVELS - 1))
442                 return -EINVAL;
443
444         childp = (kvm_pte_t *)mm_ops->zalloc_page(NULL);
445         if (!childp)
446                 return -ENOMEM;
447
448         new = kvm_init_table_pte(childp, mm_ops);
449         mm_ops->get_page(ctx->ptep);
450         smp_store_release(ctx->ptep, new);
451
452         return 0;
453 }
454
455 int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
456                         enum kvm_pgtable_prot prot)
457 {
458         int ret;
459         struct hyp_map_data map_data = {
460                 .phys   = ALIGN_DOWN(phys, PAGE_SIZE),
461         };
462         struct kvm_pgtable_walker walker = {
463                 .cb     = hyp_map_walker,
464                 .flags  = KVM_PGTABLE_WALK_LEAF,
465                 .arg    = &map_data,
466         };
467
468         ret = hyp_set_prot_attr(prot, &map_data.attr);
469         if (ret)
470                 return ret;
471
472         ret = kvm_pgtable_walk(pgt, addr, size, &walker);
473         dsb(ishst);
474         isb();
475         return ret;
476 }
477
478 static int hyp_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx,
479                             enum kvm_pgtable_walk_flags visit)
480 {
481         kvm_pte_t *childp = NULL;
482         u64 granule = kvm_granule_size(ctx->level);
483         u64 *unmapped = ctx->arg;
484         struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
485
486         if (!kvm_pte_valid(ctx->old))
487                 return -EINVAL;
488
489         if (kvm_pte_table(ctx->old, ctx->level)) {
490                 childp = kvm_pte_follow(ctx->old, mm_ops);
491
492                 if (mm_ops->page_count(childp) != 1)
493                         return 0;
494
495                 kvm_clear_pte(ctx->ptep);
496                 dsb(ishst);
497                 __tlbi_level(vae2is, __TLBI_VADDR(ctx->addr, 0), ctx->level);
498         } else {
499                 if (ctx->end - ctx->addr < granule)
500                         return -EINVAL;
501
502                 kvm_clear_pte(ctx->ptep);
503                 dsb(ishst);
504                 __tlbi_level(vale2is, __TLBI_VADDR(ctx->addr, 0), ctx->level);
505                 *unmapped += granule;
506         }
507
508         dsb(ish);
509         isb();
510         mm_ops->put_page(ctx->ptep);
511
512         if (childp)
513                 mm_ops->put_page(childp);
514
515         return 0;
516 }
517
518 u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
519 {
520         u64 unmapped = 0;
521         struct kvm_pgtable_walker walker = {
522                 .cb     = hyp_unmap_walker,
523                 .arg    = &unmapped,
524                 .flags  = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
525         };
526
527         if (!pgt->mm_ops->page_count)
528                 return 0;
529
530         kvm_pgtable_walk(pgt, addr, size, &walker);
531         return unmapped;
532 }
533
534 int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits,
535                          struct kvm_pgtable_mm_ops *mm_ops)
536 {
537         u64 levels = ARM64_HW_PGTABLE_LEVELS(va_bits);
538
539         pgt->pgd = (kvm_pteref_t)mm_ops->zalloc_page(NULL);
540         if (!pgt->pgd)
541                 return -ENOMEM;
542
543         pgt->ia_bits            = va_bits;
544         pgt->start_level        = KVM_PGTABLE_MAX_LEVELS - levels;
545         pgt->mm_ops             = mm_ops;
546         pgt->mmu                = NULL;
547         pgt->force_pte_cb       = NULL;
548
549         return 0;
550 }
551
552 static int hyp_free_walker(const struct kvm_pgtable_visit_ctx *ctx,
553                            enum kvm_pgtable_walk_flags visit)
554 {
555         struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
556
557         if (!kvm_pte_valid(ctx->old))
558                 return 0;
559
560         mm_ops->put_page(ctx->ptep);
561
562         if (kvm_pte_table(ctx->old, ctx->level))
563                 mm_ops->put_page(kvm_pte_follow(ctx->old, mm_ops));
564
565         return 0;
566 }
567
568 void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt)
569 {
570         struct kvm_pgtable_walker walker = {
571                 .cb     = hyp_free_walker,
572                 .flags  = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
573         };
574
575         WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker));
576         pgt->mm_ops->put_page(kvm_dereference_pteref(&walker, pgt->pgd));
577         pgt->pgd = NULL;
578 }
579
580 struct stage2_map_data {
581         const u64                       phys;
582         kvm_pte_t                       attr;
583         u8                              owner_id;
584
585         kvm_pte_t                       *anchor;
586         kvm_pte_t                       *childp;
587
588         struct kvm_s2_mmu               *mmu;
589         void                            *memcache;
590
591         /* Force mappings to page granularity */
592         bool                            force_pte;
593 };
594
595 u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift)
596 {
597         u64 vtcr = VTCR_EL2_FLAGS;
598         u8 lvls;
599
600         vtcr |= kvm_get_parange(mmfr0) << VTCR_EL2_PS_SHIFT;
601         vtcr |= VTCR_EL2_T0SZ(phys_shift);
602         /*
603          * Use a minimum 2 level page table to prevent splitting
604          * host PMD huge pages at stage2.
605          */
606         lvls = stage2_pgtable_levels(phys_shift);
607         if (lvls < 2)
608                 lvls = 2;
609         vtcr |= VTCR_EL2_LVLS_TO_SL0(lvls);
610
611 #ifdef CONFIG_ARM64_HW_AFDBM
612         /*
613          * Enable the Hardware Access Flag management, unconditionally
614          * on all CPUs. The features is RES0 on CPUs without the support
615          * and must be ignored by the CPUs.
616          */
617         vtcr |= VTCR_EL2_HA;
618 #endif /* CONFIG_ARM64_HW_AFDBM */
619
620         /* Set the vmid bits */
621         vtcr |= (get_vmid_bits(mmfr1) == 16) ?
622                 VTCR_EL2_VS_16BIT :
623                 VTCR_EL2_VS_8BIT;
624
625         return vtcr;
626 }
627
628 static bool stage2_has_fwb(struct kvm_pgtable *pgt)
629 {
630         if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
631                 return false;
632
633         return !(pgt->flags & KVM_PGTABLE_S2_NOFWB);
634 }
635
636 #define KVM_S2_MEMATTR(pgt, attr) PAGE_S2_MEMATTR(attr, stage2_has_fwb(pgt))
637
638 static int stage2_set_prot_attr(struct kvm_pgtable *pgt, enum kvm_pgtable_prot prot,
639                                 kvm_pte_t *ptep)
640 {
641         bool device = prot & KVM_PGTABLE_PROT_DEVICE;
642         kvm_pte_t attr = device ? KVM_S2_MEMATTR(pgt, DEVICE_nGnRE) :
643                             KVM_S2_MEMATTR(pgt, NORMAL);
644         u32 sh = KVM_PTE_LEAF_ATTR_LO_S2_SH_IS;
645
646         if (!(prot & KVM_PGTABLE_PROT_X))
647                 attr |= KVM_PTE_LEAF_ATTR_HI_S2_XN;
648         else if (device)
649                 return -EINVAL;
650
651         if (prot & KVM_PGTABLE_PROT_R)
652                 attr |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R;
653
654         if (prot & KVM_PGTABLE_PROT_W)
655                 attr |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W;
656
657         attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S2_SH, sh);
658         attr |= KVM_PTE_LEAF_ATTR_LO_S2_AF;
659         attr |= prot & KVM_PTE_LEAF_ATTR_HI_SW;
660         *ptep = attr;
661
662         return 0;
663 }
664
665 enum kvm_pgtable_prot kvm_pgtable_stage2_pte_prot(kvm_pte_t pte)
666 {
667         enum kvm_pgtable_prot prot = pte & KVM_PTE_LEAF_ATTR_HI_SW;
668
669         if (!kvm_pte_valid(pte))
670                 return prot;
671
672         if (pte & KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R)
673                 prot |= KVM_PGTABLE_PROT_R;
674         if (pte & KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W)
675                 prot |= KVM_PGTABLE_PROT_W;
676         if (!(pte & KVM_PTE_LEAF_ATTR_HI_S2_XN))
677                 prot |= KVM_PGTABLE_PROT_X;
678
679         return prot;
680 }
681
682 static bool stage2_pte_needs_update(kvm_pte_t old, kvm_pte_t new)
683 {
684         if (!kvm_pte_valid(old) || !kvm_pte_valid(new))
685                 return true;
686
687         return ((old ^ new) & (~KVM_PTE_LEAF_ATTR_S2_PERMS));
688 }
689
690 static bool stage2_pte_is_counted(kvm_pte_t pte)
691 {
692         /*
693          * The refcount tracks valid entries as well as invalid entries if they
694          * encode ownership of a page to another entity than the page-table
695          * owner, whose id is 0.
696          */
697         return !!pte;
698 }
699
700 static bool stage2_pte_is_locked(kvm_pte_t pte)
701 {
702         return !kvm_pte_valid(pte) && (pte & KVM_INVALID_PTE_LOCKED);
703 }
704
705 static bool stage2_try_set_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_t new)
706 {
707         if (!kvm_pgtable_walk_shared(ctx)) {
708                 WRITE_ONCE(*ctx->ptep, new);
709                 return true;
710         }
711
712         return cmpxchg(ctx->ptep, ctx->old, new) == ctx->old;
713 }
714
715 /**
716  * stage2_try_break_pte() - Invalidates a pte according to the
717  *                          'break-before-make' requirements of the
718  *                          architecture.
719  *
720  * @ctx: context of the visited pte.
721  * @mmu: stage-2 mmu
722  *
723  * Returns: true if the pte was successfully broken.
724  *
725  * If the removed pte was valid, performs the necessary serialization and TLB
726  * invalidation for the old value. For counted ptes, drops the reference count
727  * on the containing table page.
728  */
729 static bool stage2_try_break_pte(const struct kvm_pgtable_visit_ctx *ctx,
730                                  struct kvm_s2_mmu *mmu)
731 {
732         struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
733
734         if (stage2_pte_is_locked(ctx->old)) {
735                 /*
736                  * Should never occur if this walker has exclusive access to the
737                  * page tables.
738                  */
739                 WARN_ON(!kvm_pgtable_walk_shared(ctx));
740                 return false;
741         }
742
743         if (!stage2_try_set_pte(ctx, KVM_INVALID_PTE_LOCKED))
744                 return false;
745
746         /*
747          * Perform the appropriate TLB invalidation based on the evicted pte
748          * value (if any).
749          */
750         if (kvm_pte_table(ctx->old, ctx->level))
751                 kvm_call_hyp(__kvm_tlb_flush_vmid, mmu);
752         else if (kvm_pte_valid(ctx->old))
753                 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr, ctx->level);
754
755         if (stage2_pte_is_counted(ctx->old))
756                 mm_ops->put_page(ctx->ptep);
757
758         return true;
759 }
760
761 static void stage2_make_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_t new)
762 {
763         struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
764
765         WARN_ON(!stage2_pte_is_locked(*ctx->ptep));
766
767         if (stage2_pte_is_counted(new))
768                 mm_ops->get_page(ctx->ptep);
769
770         smp_store_release(ctx->ptep, new);
771 }
772
773 static void stage2_put_pte(const struct kvm_pgtable_visit_ctx *ctx, struct kvm_s2_mmu *mmu,
774                            struct kvm_pgtable_mm_ops *mm_ops)
775 {
776         /*
777          * Clear the existing PTE, and perform break-before-make with
778          * TLB maintenance if it was valid.
779          */
780         if (kvm_pte_valid(ctx->old)) {
781                 kvm_clear_pte(ctx->ptep);
782                 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr, ctx->level);
783         }
784
785         mm_ops->put_page(ctx->ptep);
786 }
787
788 static bool stage2_pte_cacheable(struct kvm_pgtable *pgt, kvm_pte_t pte)
789 {
790         u64 memattr = pte & KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR;
791         return memattr == KVM_S2_MEMATTR(pgt, NORMAL);
792 }
793
794 static bool stage2_pte_executable(kvm_pte_t pte)
795 {
796         return !(pte & KVM_PTE_LEAF_ATTR_HI_S2_XN);
797 }
798
799 static u64 stage2_map_walker_phys_addr(const struct kvm_pgtable_visit_ctx *ctx,
800                                        const struct stage2_map_data *data)
801 {
802         u64 phys = data->phys;
803
804         /*
805          * Stage-2 walks to update ownership data are communicated to the map
806          * walker using an invalid PA. Avoid offsetting an already invalid PA,
807          * which could overflow and make the address valid again.
808          */
809         if (!kvm_phys_is_valid(phys))
810                 return phys;
811
812         /*
813          * Otherwise, work out the correct PA based on how far the walk has
814          * gotten.
815          */
816         return phys + (ctx->addr - ctx->start);
817 }
818
819 static bool stage2_leaf_mapping_allowed(const struct kvm_pgtable_visit_ctx *ctx,
820                                         struct stage2_map_data *data)
821 {
822         u64 phys = stage2_map_walker_phys_addr(ctx, data);
823
824         if (data->force_pte && (ctx->level < (KVM_PGTABLE_MAX_LEVELS - 1)))
825                 return false;
826
827         return kvm_block_mapping_supported(ctx, phys);
828 }
829
830 static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
831                                       struct stage2_map_data *data)
832 {
833         kvm_pte_t new;
834         u64 phys = stage2_map_walker_phys_addr(ctx, data);
835         u64 granule = kvm_granule_size(ctx->level);
836         struct kvm_pgtable *pgt = data->mmu->pgt;
837         struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
838
839         if (!stage2_leaf_mapping_allowed(ctx, data))
840                 return -E2BIG;
841
842         if (kvm_phys_is_valid(phys))
843                 new = kvm_init_valid_leaf_pte(phys, data->attr, ctx->level);
844         else
845                 new = kvm_init_invalid_leaf_owner(data->owner_id);
846
847         /*
848          * Skip updating the PTE if we are trying to recreate the exact
849          * same mapping or only change the access permissions. Instead,
850          * the vCPU will exit one more time from guest if still needed
851          * and then go through the path of relaxing permissions.
852          */
853         if (!stage2_pte_needs_update(ctx->old, new))
854                 return -EAGAIN;
855
856         if (!stage2_try_break_pte(ctx, data->mmu))
857                 return -EAGAIN;
858
859         /* Perform CMOs before installation of the guest stage-2 PTE */
860         if (mm_ops->dcache_clean_inval_poc && stage2_pte_cacheable(pgt, new))
861                 mm_ops->dcache_clean_inval_poc(kvm_pte_follow(new, mm_ops),
862                                                 granule);
863
864         if (mm_ops->icache_inval_pou && stage2_pte_executable(new))
865                 mm_ops->icache_inval_pou(kvm_pte_follow(new, mm_ops), granule);
866
867         stage2_make_pte(ctx, new);
868
869         return 0;
870 }
871
872 static int stage2_map_walk_table_pre(const struct kvm_pgtable_visit_ctx *ctx,
873                                      struct stage2_map_data *data)
874 {
875         struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
876         kvm_pte_t *childp = kvm_pte_follow(ctx->old, mm_ops);
877         int ret;
878
879         if (!stage2_leaf_mapping_allowed(ctx, data))
880                 return 0;
881
882         ret = stage2_map_walker_try_leaf(ctx, data);
883         if (ret)
884                 return ret;
885
886         mm_ops->free_removed_table(childp, ctx->level);
887         return 0;
888 }
889
890 static int stage2_map_walk_leaf(const struct kvm_pgtable_visit_ctx *ctx,
891                                 struct stage2_map_data *data)
892 {
893         struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
894         kvm_pte_t *childp, new;
895         int ret;
896
897         ret = stage2_map_walker_try_leaf(ctx, data);
898         if (ret != -E2BIG)
899                 return ret;
900
901         if (WARN_ON(ctx->level == KVM_PGTABLE_MAX_LEVELS - 1))
902                 return -EINVAL;
903
904         if (!data->memcache)
905                 return -ENOMEM;
906
907         childp = mm_ops->zalloc_page(data->memcache);
908         if (!childp)
909                 return -ENOMEM;
910
911         if (!stage2_try_break_pte(ctx, data->mmu)) {
912                 mm_ops->put_page(childp);
913                 return -EAGAIN;
914         }
915
916         /*
917          * If we've run into an existing block mapping then replace it with
918          * a table. Accesses beyond 'end' that fall within the new table
919          * will be mapped lazily.
920          */
921         new = kvm_init_table_pte(childp, mm_ops);
922         stage2_make_pte(ctx, new);
923
924         return 0;
925 }
926
927 /*
928  * The TABLE_PRE callback runs for table entries on the way down, looking
929  * for table entries which we could conceivably replace with a block entry
930  * for this mapping. If it finds one it replaces the entry and calls
931  * kvm_pgtable_mm_ops::free_removed_table() to tear down the detached table.
932  *
933  * Otherwise, the LEAF callback performs the mapping at the existing leaves
934  * instead.
935  */
936 static int stage2_map_walker(const struct kvm_pgtable_visit_ctx *ctx,
937                              enum kvm_pgtable_walk_flags visit)
938 {
939         struct stage2_map_data *data = ctx->arg;
940
941         switch (visit) {
942         case KVM_PGTABLE_WALK_TABLE_PRE:
943                 return stage2_map_walk_table_pre(ctx, data);
944         case KVM_PGTABLE_WALK_LEAF:
945                 return stage2_map_walk_leaf(ctx, data);
946         default:
947                 return -EINVAL;
948         }
949 }
950
951 int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
952                            u64 phys, enum kvm_pgtable_prot prot,
953                            void *mc, enum kvm_pgtable_walk_flags flags)
954 {
955         int ret;
956         struct stage2_map_data map_data = {
957                 .phys           = ALIGN_DOWN(phys, PAGE_SIZE),
958                 .mmu            = pgt->mmu,
959                 .memcache       = mc,
960                 .force_pte      = pgt->force_pte_cb && pgt->force_pte_cb(addr, addr + size, prot),
961         };
962         struct kvm_pgtable_walker walker = {
963                 .cb             = stage2_map_walker,
964                 .flags          = flags |
965                                   KVM_PGTABLE_WALK_TABLE_PRE |
966                                   KVM_PGTABLE_WALK_LEAF,
967                 .arg            = &map_data,
968         };
969
970         if (WARN_ON((pgt->flags & KVM_PGTABLE_S2_IDMAP) && (addr != phys)))
971                 return -EINVAL;
972
973         ret = stage2_set_prot_attr(pgt, prot, &map_data.attr);
974         if (ret)
975                 return ret;
976
977         ret = kvm_pgtable_walk(pgt, addr, size, &walker);
978         dsb(ishst);
979         return ret;
980 }
981
982 int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size,
983                                  void *mc, u8 owner_id)
984 {
985         int ret;
986         struct stage2_map_data map_data = {
987                 .phys           = KVM_PHYS_INVALID,
988                 .mmu            = pgt->mmu,
989                 .memcache       = mc,
990                 .owner_id       = owner_id,
991                 .force_pte      = true,
992         };
993         struct kvm_pgtable_walker walker = {
994                 .cb             = stage2_map_walker,
995                 .flags          = KVM_PGTABLE_WALK_TABLE_PRE |
996                                   KVM_PGTABLE_WALK_LEAF,
997                 .arg            = &map_data,
998         };
999
1000         if (owner_id > KVM_MAX_OWNER_ID)
1001                 return -EINVAL;
1002
1003         ret = kvm_pgtable_walk(pgt, addr, size, &walker);
1004         return ret;
1005 }
1006
1007 static int stage2_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx,
1008                                enum kvm_pgtable_walk_flags visit)
1009 {
1010         struct kvm_pgtable *pgt = ctx->arg;
1011         struct kvm_s2_mmu *mmu = pgt->mmu;
1012         struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
1013         kvm_pte_t *childp = NULL;
1014         bool need_flush = false;
1015
1016         if (!kvm_pte_valid(ctx->old)) {
1017                 if (stage2_pte_is_counted(ctx->old)) {
1018                         kvm_clear_pte(ctx->ptep);
1019                         mm_ops->put_page(ctx->ptep);
1020                 }
1021                 return 0;
1022         }
1023
1024         if (kvm_pte_table(ctx->old, ctx->level)) {
1025                 childp = kvm_pte_follow(ctx->old, mm_ops);
1026
1027                 if (mm_ops->page_count(childp) != 1)
1028                         return 0;
1029         } else if (stage2_pte_cacheable(pgt, ctx->old)) {
1030                 need_flush = !stage2_has_fwb(pgt);
1031         }
1032
1033         /*
1034          * This is similar to the map() path in that we unmap the entire
1035          * block entry and rely on the remaining portions being faulted
1036          * back lazily.
1037          */
1038         stage2_put_pte(ctx, mmu, mm_ops);
1039
1040         if (need_flush && mm_ops->dcache_clean_inval_poc)
1041                 mm_ops->dcache_clean_inval_poc(kvm_pte_follow(ctx->old, mm_ops),
1042                                                kvm_granule_size(ctx->level));
1043
1044         if (childp)
1045                 mm_ops->put_page(childp);
1046
1047         return 0;
1048 }
1049
1050 int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
1051 {
1052         struct kvm_pgtable_walker walker = {
1053                 .cb     = stage2_unmap_walker,
1054                 .arg    = pgt,
1055                 .flags  = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
1056         };
1057
1058         return kvm_pgtable_walk(pgt, addr, size, &walker);
1059 }
1060
1061 struct stage2_attr_data {
1062         kvm_pte_t                       attr_set;
1063         kvm_pte_t                       attr_clr;
1064         kvm_pte_t                       pte;
1065         u32                             level;
1066 };
1067
1068 static int stage2_attr_walker(const struct kvm_pgtable_visit_ctx *ctx,
1069                               enum kvm_pgtable_walk_flags visit)
1070 {
1071         kvm_pte_t pte = ctx->old;
1072         struct stage2_attr_data *data = ctx->arg;
1073         struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
1074
1075         if (!kvm_pte_valid(ctx->old))
1076                 return -EAGAIN;
1077
1078         data->level = ctx->level;
1079         data->pte = pte;
1080         pte &= ~data->attr_clr;
1081         pte |= data->attr_set;
1082
1083         /*
1084          * We may race with the CPU trying to set the access flag here,
1085          * but worst-case the access flag update gets lost and will be
1086          * set on the next access instead.
1087          */
1088         if (data->pte != pte) {
1089                 /*
1090                  * Invalidate instruction cache before updating the guest
1091                  * stage-2 PTE if we are going to add executable permission.
1092                  */
1093                 if (mm_ops->icache_inval_pou &&
1094                     stage2_pte_executable(pte) && !stage2_pte_executable(ctx->old))
1095                         mm_ops->icache_inval_pou(kvm_pte_follow(pte, mm_ops),
1096                                                   kvm_granule_size(ctx->level));
1097
1098                 if (!stage2_try_set_pte(ctx, pte))
1099                         return -EAGAIN;
1100         }
1101
1102         return 0;
1103 }
1104
1105 static int stage2_update_leaf_attrs(struct kvm_pgtable *pgt, u64 addr,
1106                                     u64 size, kvm_pte_t attr_set,
1107                                     kvm_pte_t attr_clr, kvm_pte_t *orig_pte,
1108                                     u32 *level, enum kvm_pgtable_walk_flags flags)
1109 {
1110         int ret;
1111         kvm_pte_t attr_mask = KVM_PTE_LEAF_ATTR_LO | KVM_PTE_LEAF_ATTR_HI;
1112         struct stage2_attr_data data = {
1113                 .attr_set       = attr_set & attr_mask,
1114                 .attr_clr       = attr_clr & attr_mask,
1115         };
1116         struct kvm_pgtable_walker walker = {
1117                 .cb             = stage2_attr_walker,
1118                 .arg            = &data,
1119                 .flags          = flags | KVM_PGTABLE_WALK_LEAF,
1120         };
1121
1122         ret = kvm_pgtable_walk(pgt, addr, size, &walker);
1123         if (ret)
1124                 return ret;
1125
1126         if (orig_pte)
1127                 *orig_pte = data.pte;
1128
1129         if (level)
1130                 *level = data.level;
1131         return 0;
1132 }
1133
1134 int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size)
1135 {
1136         return stage2_update_leaf_attrs(pgt, addr, size, 0,
1137                                         KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W,
1138                                         NULL, NULL, 0);
1139 }
1140
1141 kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr)
1142 {
1143         kvm_pte_t pte = 0;
1144         int ret;
1145
1146         ret = stage2_update_leaf_attrs(pgt, addr, 1, KVM_PTE_LEAF_ATTR_LO_S2_AF, 0,
1147                                        &pte, NULL,
1148                                        KVM_PGTABLE_WALK_HANDLE_FAULT |
1149                                        KVM_PGTABLE_WALK_SHARED);
1150         if (!ret)
1151                 dsb(ishst);
1152
1153         return pte;
1154 }
1155
1156 kvm_pte_t kvm_pgtable_stage2_mkold(struct kvm_pgtable *pgt, u64 addr)
1157 {
1158         kvm_pte_t pte = 0;
1159         stage2_update_leaf_attrs(pgt, addr, 1, 0, KVM_PTE_LEAF_ATTR_LO_S2_AF,
1160                                  &pte, NULL, 0);
1161         /*
1162          * "But where's the TLBI?!", you scream.
1163          * "Over in the core code", I sigh.
1164          *
1165          * See the '->clear_flush_young()' callback on the KVM mmu notifier.
1166          */
1167         return pte;
1168 }
1169
1170 bool kvm_pgtable_stage2_is_young(struct kvm_pgtable *pgt, u64 addr)
1171 {
1172         kvm_pte_t pte = 0;
1173         stage2_update_leaf_attrs(pgt, addr, 1, 0, 0, &pte, NULL, 0);
1174         return pte & KVM_PTE_LEAF_ATTR_LO_S2_AF;
1175 }
1176
1177 int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
1178                                    enum kvm_pgtable_prot prot)
1179 {
1180         int ret;
1181         u32 level;
1182         kvm_pte_t set = 0, clr = 0;
1183
1184         if (prot & KVM_PTE_LEAF_ATTR_HI_SW)
1185                 return -EINVAL;
1186
1187         if (prot & KVM_PGTABLE_PROT_R)
1188                 set |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R;
1189
1190         if (prot & KVM_PGTABLE_PROT_W)
1191                 set |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W;
1192
1193         if (prot & KVM_PGTABLE_PROT_X)
1194                 clr |= KVM_PTE_LEAF_ATTR_HI_S2_XN;
1195
1196         ret = stage2_update_leaf_attrs(pgt, addr, 1, set, clr, NULL, &level,
1197                                        KVM_PGTABLE_WALK_HANDLE_FAULT |
1198                                        KVM_PGTABLE_WALK_SHARED);
1199         if (!ret)
1200                 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, pgt->mmu, addr, level);
1201         return ret;
1202 }
1203
1204 static int stage2_flush_walker(const struct kvm_pgtable_visit_ctx *ctx,
1205                                enum kvm_pgtable_walk_flags visit)
1206 {
1207         struct kvm_pgtable *pgt = ctx->arg;
1208         struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops;
1209
1210         if (!kvm_pte_valid(ctx->old) || !stage2_pte_cacheable(pgt, ctx->old))
1211                 return 0;
1212
1213         if (mm_ops->dcache_clean_inval_poc)
1214                 mm_ops->dcache_clean_inval_poc(kvm_pte_follow(ctx->old, mm_ops),
1215                                                kvm_granule_size(ctx->level));
1216         return 0;
1217 }
1218
1219 int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size)
1220 {
1221         struct kvm_pgtable_walker walker = {
1222                 .cb     = stage2_flush_walker,
1223                 .flags  = KVM_PGTABLE_WALK_LEAF,
1224                 .arg    = pgt,
1225         };
1226
1227         if (stage2_has_fwb(pgt))
1228                 return 0;
1229
1230         return kvm_pgtable_walk(pgt, addr, size, &walker);
1231 }
1232
1233
1234 int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
1235                               struct kvm_pgtable_mm_ops *mm_ops,
1236                               enum kvm_pgtable_stage2_flags flags,
1237                               kvm_pgtable_force_pte_cb_t force_pte_cb)
1238 {
1239         size_t pgd_sz;
1240         u64 vtcr = mmu->arch->vtcr;
1241         u32 ia_bits = VTCR_EL2_IPA(vtcr);
1242         u32 sl0 = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr);
1243         u32 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0;
1244
1245         pgd_sz = kvm_pgd_pages(ia_bits, start_level) * PAGE_SIZE;
1246         pgt->pgd = (kvm_pteref_t)mm_ops->zalloc_pages_exact(pgd_sz);
1247         if (!pgt->pgd)
1248                 return -ENOMEM;
1249
1250         pgt->ia_bits            = ia_bits;
1251         pgt->start_level        = start_level;
1252         pgt->mm_ops             = mm_ops;
1253         pgt->mmu                = mmu;
1254         pgt->flags              = flags;
1255         pgt->force_pte_cb       = force_pte_cb;
1256
1257         /* Ensure zeroed PGD pages are visible to the hardware walker */
1258         dsb(ishst);
1259         return 0;
1260 }
1261
1262 size_t kvm_pgtable_stage2_pgd_size(u64 vtcr)
1263 {
1264         u32 ia_bits = VTCR_EL2_IPA(vtcr);
1265         u32 sl0 = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr);
1266         u32 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0;
1267
1268         return kvm_pgd_pages(ia_bits, start_level) * PAGE_SIZE;
1269 }
1270
1271 static int stage2_free_walker(const struct kvm_pgtable_visit_ctx *ctx,
1272                               enum kvm_pgtable_walk_flags visit)
1273 {
1274         struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
1275
1276         if (!stage2_pte_is_counted(ctx->old))
1277                 return 0;
1278
1279         mm_ops->put_page(ctx->ptep);
1280
1281         if (kvm_pte_table(ctx->old, ctx->level))
1282                 mm_ops->put_page(kvm_pte_follow(ctx->old, mm_ops));
1283
1284         return 0;
1285 }
1286
1287 void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
1288 {
1289         size_t pgd_sz;
1290         struct kvm_pgtable_walker walker = {
1291                 .cb     = stage2_free_walker,
1292                 .flags  = KVM_PGTABLE_WALK_LEAF |
1293                           KVM_PGTABLE_WALK_TABLE_POST,
1294         };
1295
1296         WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker));
1297         pgd_sz = kvm_pgd_pages(pgt->ia_bits, pgt->start_level) * PAGE_SIZE;
1298         pgt->mm_ops->free_pages_exact(kvm_dereference_pteref(&walker, pgt->pgd), pgd_sz);
1299         pgt->pgd = NULL;
1300 }
1301
1302 void kvm_pgtable_stage2_free_removed(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, u32 level)
1303 {
1304         kvm_pteref_t ptep = (kvm_pteref_t)pgtable;
1305         struct kvm_pgtable_walker walker = {
1306                 .cb     = stage2_free_walker,
1307                 .flags  = KVM_PGTABLE_WALK_LEAF |
1308                           KVM_PGTABLE_WALK_TABLE_POST,
1309         };
1310         struct kvm_pgtable_walk_data data = {
1311                 .walker = &walker,
1312
1313                 /*
1314                  * At this point the IPA really doesn't matter, as the page
1315                  * table being traversed has already been removed from the stage
1316                  * 2. Set an appropriate range to cover the entire page table.
1317                  */
1318                 .addr   = 0,
1319                 .end    = kvm_granule_size(level),
1320         };
1321
1322         WARN_ON(__kvm_pgtable_walk(&data, mm_ops, ptep, level + 1));
1323 }