2 * The pagetable code, on the other hand, still shows the scars of
3 * previous encounters. It's functional, and as neat as it can be in the
4 * circumstances, but be wary, for these things are subtle and break easily.
5 * The Guest provides a virtual to physical mapping, but we can neither trust
6 * it nor use it: we verify and convert it here then point the CPU to the
7 * converted Guest pages when running the Guest.
10 /* Copyright (C) Rusty Russell IBM Corporation 2013.
11 * GPL v2 and any later version */
13 #include <linux/gfp.h>
14 #include <linux/types.h>
15 #include <linux/spinlock.h>
16 #include <linux/random.h>
17 #include <linux/percpu.h>
18 #include <asm/tlbflush.h>
19 #include <asm/uaccess.h>
23 * We hold reference to pages, which prevents them from being swapped.
24 * It'd be nice to have a callback in the "struct mm_struct" when Linux wants
25 * to swap out. If we had this, and a shrinker callback to trim PTE pages, we
26 * could probably consider launching Guests as non-root.
32 * We use two-level page tables for the Guest, or three-level with PAE. If
33 * you're not entirely comfortable with virtual addresses, physical addresses
34 * and page tables then I recommend you review arch/x86/lguest/boot.c's "Page
35 * Table Handling" (with diagrams!).
37 * The Guest keeps page tables, but we maintain the actual ones here: these are
38 * called "shadow" page tables. Which is a very Guest-centric name: these are
39 * the real page tables the CPU uses, although we keep them up to date to
40 * reflect the Guest's. (See what I mean about weird naming? Since when do
41 * shadows reflect anything?)
43 * Anyway, this is the most complicated part of the Host code. There are seven
45 * (i) Looking up a page table entry when the Guest faults,
46 * (ii) Making sure the Guest stack is mapped,
47 * (iii) Setting up a page table entry when the Guest tells us one has changed,
48 * (iv) Switching page tables,
49 * (v) Flushing (throwing away) page tables,
50 * (vi) Mapping the Switcher when the Guest is about to run,
51 * (vii) Setting up the page tables initially.
55 * The Switcher uses the complete top PTE page. That's 1024 PTE entries (4MB)
56 * or 512 PTE entries with PAE (2MB).
58 #define SWITCHER_PGD_INDEX (PTRS_PER_PGD - 1)
61 * For PAE we need the PMD index as well. We use the last 2MB, so we
62 * will need the last pmd entry of the last pmd page.
65 #define CHECK_GPGD_MASK _PAGE_PRESENT
67 #define CHECK_GPGD_MASK _PAGE_TABLE
71 * The page table code is curly enough to need helper functions to keep it
72 * clear and clean. The kernel itself provides many of them; one advantage
73 * of insisting that the Guest and Host use the same CONFIG_X86_PAE setting.
75 * There are two functions which return pointers to the shadow (aka "real")
78 * spgd_addr() takes the virtual address and returns a pointer to the top-level
79 * page directory entry (PGD) for that address. Since we keep track of several
80 * page tables, the "i" argument tells us which one we're interested in (it's
81 * usually the current one).
83 static pgd_t *spgd_addr(struct lg_cpu *cpu, u32 i, unsigned long vaddr)
85 unsigned int index = pgd_index(vaddr);
87 /* Return a pointer index'th pgd entry for the i'th page table. */
88 return &cpu->lg->pgdirs[i].pgdir[index];
93 * This routine then takes the PGD entry given above, which contains the
94 * address of the PMD page. It then returns a pointer to the PMD entry for the
97 static pmd_t *spmd_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr)
99 unsigned int index = pmd_index(vaddr);
102 /* You should never call this if the PGD entry wasn't valid */
103 BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT));
104 page = __va(pgd_pfn(spgd) << PAGE_SHIFT);
111 * This routine then takes the page directory entry returned above, which
112 * contains the address of the page table entry (PTE) page. It then returns a
113 * pointer to the PTE entry for the given address.
115 static pte_t *spte_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr)
117 #ifdef CONFIG_X86_PAE
118 pmd_t *pmd = spmd_addr(cpu, spgd, vaddr);
119 pte_t *page = __va(pmd_pfn(*pmd) << PAGE_SHIFT);
121 /* You should never call this if the PMD entry wasn't valid */
122 BUG_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT));
124 pte_t *page = __va(pgd_pfn(spgd) << PAGE_SHIFT);
125 /* You should never call this if the PGD entry wasn't valid */
126 BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT));
129 return &page[pte_index(vaddr)];
133 * These functions are just like the above, except they access the Guest
134 * page tables. Hence they return a Guest address.
136 static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr)
138 unsigned int index = vaddr >> (PGDIR_SHIFT);
139 return cpu->lg->pgdirs[cpu->cpu_pgd].gpgdir + index * sizeof(pgd_t);
142 #ifdef CONFIG_X86_PAE
143 /* Follow the PGD to the PMD. */
144 static unsigned long gpmd_addr(pgd_t gpgd, unsigned long vaddr)
146 unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT;
147 BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT));
148 return gpage + pmd_index(vaddr) * sizeof(pmd_t);
151 /* Follow the PMD to the PTE. */
152 static unsigned long gpte_addr(struct lg_cpu *cpu,
153 pmd_t gpmd, unsigned long vaddr)
155 unsigned long gpage = pmd_pfn(gpmd) << PAGE_SHIFT;
157 BUG_ON(!(pmd_flags(gpmd) & _PAGE_PRESENT));
158 return gpage + pte_index(vaddr) * sizeof(pte_t);
161 /* Follow the PGD to the PTE (no mid-level for !PAE). */
162 static unsigned long gpte_addr(struct lg_cpu *cpu,
163 pgd_t gpgd, unsigned long vaddr)
165 unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT;
167 BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT));
168 return gpage + pte_index(vaddr) * sizeof(pte_t);
174 * get_pfn is slow: we could probably try to grab batches of pages here as
175 * an optimization (ie. pre-faulting).
179 * This routine takes a page number given by the Guest and converts it to
180 * an actual, physical page number. It can fail for several reasons: the
181 * virtual address might not be mapped by the Launcher, the write flag is set
182 * and the page is read-only, or the write flag was set and the page was
183 * shared so had to be copied, but we ran out of memory.
185 * This holds a reference to the page, so release_pte() is careful to put that
188 static unsigned long get_pfn(unsigned long virtpfn, int write)
192 /* gup me one page at this address please! */
193 if (get_user_pages_fast(virtpfn << PAGE_SHIFT, 1, write, &page) == 1)
194 return page_to_pfn(page);
196 /* This value indicates failure. */
201 * Converting a Guest page table entry to a shadow (ie. real) page table
202 * entry can be a little tricky. The flags are (almost) the same, but the
203 * Guest PTE contains a virtual page number: the CPU needs the real page
206 static pte_t gpte_to_spte(struct lg_cpu *cpu, pte_t gpte, int write)
208 unsigned long pfn, base, flags;
211 * The Guest sets the global flag, because it thinks that it is using
212 * PGE. We only told it to use PGE so it would tell us whether it was
213 * flushing a kernel mapping or a userspace mapping. We don't actually
214 * use the global bit, so throw it away.
216 flags = (pte_flags(gpte) & ~_PAGE_GLOBAL);
218 /* The Guest's pages are offset inside the Launcher. */
219 base = (unsigned long)cpu->lg->mem_base / PAGE_SIZE;
222 * We need a temporary "unsigned long" variable to hold the answer from
223 * get_pfn(), because it returns 0xFFFFFFFF on failure, which wouldn't
224 * fit in spte.pfn. get_pfn() finds the real physical number of the
225 * page, given the virtual number.
227 pfn = get_pfn(base + pte_pfn(gpte), write);
229 kill_guest(cpu, "failed to get page %lu", pte_pfn(gpte));
231 * When we destroy the Guest, we'll go through the shadow page
232 * tables and release_pte() them. Make sure we don't think
237 /* Now we assemble our shadow PTE from the page number and flags. */
238 return pfn_pte(pfn, __pgprot(flags));
241 /*H:460 And to complete the chain, release_pte() looks like this: */
242 static void release_pte(pte_t pte)
245 * Remember that get_user_pages_fast() took a reference to the page, in
246 * get_pfn()? We have to put it back now.
248 if (pte_flags(pte) & _PAGE_PRESENT)
249 put_page(pte_page(pte));
253 static bool check_gpte(struct lg_cpu *cpu, pte_t gpte)
255 if ((pte_flags(gpte) & _PAGE_PSE) ||
256 pte_pfn(gpte) >= cpu->lg->pfn_limit) {
257 kill_guest(cpu, "bad page table entry");
263 static bool check_gpgd(struct lg_cpu *cpu, pgd_t gpgd)
265 if ((pgd_flags(gpgd) & ~CHECK_GPGD_MASK) ||
266 (pgd_pfn(gpgd) >= cpu->lg->pfn_limit)) {
267 kill_guest(cpu, "bad page directory entry");
273 #ifdef CONFIG_X86_PAE
274 static bool check_gpmd(struct lg_cpu *cpu, pmd_t gpmd)
276 if ((pmd_flags(gpmd) & ~_PAGE_TABLE) ||
277 (pmd_pfn(gpmd) >= cpu->lg->pfn_limit)) {
278 kill_guest(cpu, "bad page middle directory entry");
286 * This is the core routine to walk the shadow page tables and find the page
287 * table entry for a specific address.
289 * If allocate is set, then we allocate any missing levels, setting the flags
290 * on the new page directory and mid-level directories using the arguments
291 * (which are copied from the Guest's page table entries).
293 static pte_t *find_spte(struct lg_cpu *cpu, unsigned long vaddr, bool allocate,
294 int pgd_flags, int pmd_flags)
297 /* Mid level for PAE. */
298 #ifdef CONFIG_X86_PAE
302 /* Get top level entry. */
303 spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr);
304 if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) {
305 /* No shadow entry: allocate a new shadow PTE page. */
306 unsigned long ptepage;
308 /* If they didn't want us to allocate anything, stop. */
312 ptepage = get_zeroed_page(GFP_KERNEL);
314 * This is not really the Guest's fault, but killing it is
315 * simple for this corner case.
318 kill_guest(cpu, "out of memory allocating pte page");
322 * And we copy the flags to the shadow PGD entry. The page
323 * number in the shadow PGD is the page we just allocated.
325 set_pgd(spgd, __pgd(__pa(ptepage) | pgd_flags));
329 * Intel's Physical Address Extension actually uses three levels of
330 * page tables, so we need to look in the mid-level.
332 #ifdef CONFIG_X86_PAE
333 /* Now look at the mid-level shadow entry. */
334 spmd = spmd_addr(cpu, *spgd, vaddr);
336 if (!(pmd_flags(*spmd) & _PAGE_PRESENT)) {
337 /* No shadow entry: allocate a new shadow PTE page. */
338 unsigned long ptepage;
340 /* If they didn't want us to allocate anything, stop. */
344 ptepage = get_zeroed_page(GFP_KERNEL);
347 * This is not really the Guest's fault, but killing it is
348 * simple for this corner case.
351 kill_guest(cpu, "out of memory allocating pmd page");
356 * And we copy the flags to the shadow PMD entry. The page
357 * number in the shadow PMD is the page we just allocated.
359 set_pmd(spmd, __pmd(__pa(ptepage) | pmd_flags));
363 /* Get the pointer to the shadow PTE entry we're going to set. */
364 return spte_addr(cpu, *spgd, vaddr);
368 * (i) Looking up a page table entry when the Guest faults.
370 * We saw this call in run_guest(): when we see a page fault in the Guest, we
371 * come here. That's because we only set up the shadow page tables lazily as
372 * they're needed, so we get page faults all the time and quietly fix them up
373 * and return to the Guest without it knowing.
375 * If we fixed up the fault (ie. we mapped the address), this routine returns
376 * true. Otherwise, it was a real fault and we need to tell the Guest.
378 bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
380 unsigned long gpte_ptr;
386 /* We never demand page the Switcher, so trying is a mistake. */
387 if (vaddr >= switcher_addr)
390 /* First step: get the top-level Guest page table entry. */
391 if (unlikely(cpu->linear_pages)) {
392 /* Faking up a linear mapping. */
393 gpgd = __pgd(CHECK_GPGD_MASK);
395 gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
396 /* Toplevel not present? We can't map it in. */
397 if (!(pgd_flags(gpgd) & _PAGE_PRESENT))
401 * This kills the Guest if it has weird flags or tries to
402 * refer to a "physical" address outside the bounds.
404 if (!check_gpgd(cpu, gpgd))
408 /* This "mid-level" entry is only used for non-linear, PAE mode. */
409 gpmd = __pmd(_PAGE_TABLE);
411 #ifdef CONFIG_X86_PAE
412 if (likely(!cpu->linear_pages)) {
413 gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t);
414 /* Middle level not present? We can't map it in. */
415 if (!(pmd_flags(gpmd) & _PAGE_PRESENT))
419 * This kills the Guest if it has weird flags or tries to
420 * refer to a "physical" address outside the bounds.
422 if (!check_gpmd(cpu, gpmd))
427 * OK, now we look at the lower level in the Guest page table: keep its
428 * address, because we might update it later.
430 gpte_ptr = gpte_addr(cpu, gpmd, vaddr);
433 * OK, now we look at the lower level in the Guest page table: keep its
434 * address, because we might update it later.
436 gpte_ptr = gpte_addr(cpu, gpgd, vaddr);
439 if (unlikely(cpu->linear_pages)) {
440 /* Linear? Make up a PTE which points to same page. */
441 gpte = __pte((vaddr & PAGE_MASK) | _PAGE_RW | _PAGE_PRESENT);
443 /* Read the actual PTE value. */
444 gpte = lgread(cpu, gpte_ptr, pte_t);
447 /* If this page isn't in the Guest page tables, we can't page it in. */
448 if (!(pte_flags(gpte) & _PAGE_PRESENT))
452 * Check they're not trying to write to a page the Guest wants
453 * read-only (bit 2 of errcode == write).
455 if ((errcode & 2) && !(pte_flags(gpte) & _PAGE_RW))
458 /* User access to a kernel-only page? (bit 3 == user access) */
459 if ((errcode & 4) && !(pte_flags(gpte) & _PAGE_USER))
463 * Check that the Guest PTE flags are OK, and the page number is below
464 * the pfn_limit (ie. not mapping the Launcher binary).
466 if (!check_gpte(cpu, gpte))
469 /* Add the _PAGE_ACCESSED and (for a write) _PAGE_DIRTY flag */
470 gpte = pte_mkyoung(gpte);
472 gpte = pte_mkdirty(gpte);
474 /* Get the pointer to the shadow PTE entry we're going to set. */
475 spte = find_spte(cpu, vaddr, true, pgd_flags(gpgd), pmd_flags(gpmd));
480 * If there was a valid shadow PTE entry here before, we release it.
481 * This can happen with a write to a previously read-only entry.
486 * If this is a write, we insist that the Guest page is writable (the
487 * final arg to gpte_to_spte()).
490 *spte = gpte_to_spte(cpu, gpte, 1);
493 * If this is a read, don't set the "writable" bit in the page
494 * table entry, even if the Guest says it's writable. That way
495 * we will come back here when a write does actually occur, so
496 * we can update the Guest's _PAGE_DIRTY flag.
498 set_pte(spte, gpte_to_spte(cpu, pte_wrprotect(gpte), 0));
501 * Finally, we write the Guest PTE entry back: we've set the
502 * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags.
504 if (likely(!cpu->linear_pages))
505 lgwrite(cpu, gpte_ptr, pte_t, gpte);
508 * The fault is fixed, the page table is populated, the mapping
509 * manipulated, the result returned and the code complete. A small
510 * delay and a trace of alliteration are the only indications the Guest
511 * has that a page fault occurred at all.
517 * (ii) Making sure the Guest stack is mapped.
519 * Remember that direct traps into the Guest need a mapped Guest kernel stack.
520 * pin_stack_pages() calls us here: we could simply call demand_page(), but as
521 * we've seen that logic is quite long, and usually the stack pages are already
522 * mapped, so it's overkill.
524 * This is a quick version which answers the question: is this virtual address
525 * mapped by the shadow page tables, and is it writable?
527 static bool page_writable(struct lg_cpu *cpu, unsigned long vaddr)
532 /* You can't put your stack in the Switcher! */
533 if (vaddr >= switcher_addr)
536 /* If there's no shadow PTE, it's not writable. */
537 spte = find_spte(cpu, vaddr, false, 0, 0);
542 * Check the flags on the pte entry itself: it must be present and
545 flags = pte_flags(*spte);
546 return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW);
550 * So, when pin_stack_pages() asks us to pin a page, we check if it's already
551 * in the page tables, and if not, we call demand_page() with error code 2
554 void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
556 if (!page_writable(cpu, vaddr) && !demand_page(cpu, vaddr, 2))
557 kill_guest(cpu, "bad stack page %#lx", vaddr);
561 #ifdef CONFIG_X86_PAE
562 static void release_pmd(pmd_t *spmd)
564 /* If the entry's not present, there's nothing to release. */
565 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
567 pte_t *ptepage = __va(pmd_pfn(*spmd) << PAGE_SHIFT);
568 /* For each entry in the page, we might need to release it. */
569 for (i = 0; i < PTRS_PER_PTE; i++)
570 release_pte(ptepage[i]);
571 /* Now we can free the page of PTEs */
572 free_page((long)ptepage);
573 /* And zero out the PMD entry so we never release it twice. */
574 set_pmd(spmd, __pmd(0));
578 static void release_pgd(pgd_t *spgd)
580 /* If the entry's not present, there's nothing to release. */
581 if (pgd_flags(*spgd) & _PAGE_PRESENT) {
583 pmd_t *pmdpage = __va(pgd_pfn(*spgd) << PAGE_SHIFT);
585 for (i = 0; i < PTRS_PER_PMD; i++)
586 release_pmd(&pmdpage[i]);
588 /* Now we can free the page of PMDs */
589 free_page((long)pmdpage);
590 /* And zero out the PGD entry so we never release it twice. */
591 set_pgd(spgd, __pgd(0));
595 #else /* !CONFIG_X86_PAE */
597 * If we chase down the release_pgd() code, the non-PAE version looks like
598 * this. The PAE version is almost identical, but instead of calling
599 * release_pte it calls release_pmd(), which looks much like this.
601 static void release_pgd(pgd_t *spgd)
603 /* If the entry's not present, there's nothing to release. */
604 if (pgd_flags(*spgd) & _PAGE_PRESENT) {
607 * Converting the pfn to find the actual PTE page is easy: turn
608 * the page number into a physical address, then convert to a
609 * virtual address (easy for kernel pages like this one).
611 pte_t *ptepage = __va(pgd_pfn(*spgd) << PAGE_SHIFT);
612 /* For each entry in the page, we might need to release it. */
613 for (i = 0; i < PTRS_PER_PTE; i++)
614 release_pte(ptepage[i]);
615 /* Now we can free the page of PTEs */
616 free_page((long)ptepage);
617 /* And zero out the PGD entry so we never release it twice. */
624 * We saw flush_user_mappings() twice: once from the flush_user_mappings()
625 * hypercall and once in new_pgdir() when we re-used a top-level pgdir page.
626 * It simply releases every PTE page from 0 up to the Guest's kernel address.
628 static void flush_user_mappings(struct lguest *lg, int idx)
631 /* Release every pgd entry up to the kernel's address. */
632 for (i = 0; i < pgd_index(lg->kernel_address); i++)
633 release_pgd(lg->pgdirs[idx].pgdir + i);
637 * (v) Flushing (throwing away) page tables,
639 * The Guest has a hypercall to throw away the page tables: it's used when a
640 * large number of mappings have been changed.
642 void guest_pagetable_flush_user(struct lg_cpu *cpu)
644 /* Drop the userspace part of the current page table. */
645 flush_user_mappings(cpu->lg, cpu->cpu_pgd);
649 /* We walk down the guest page tables to get a guest-physical address */
650 unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr)
654 #ifdef CONFIG_X86_PAE
658 /* Still not set up? Just map 1:1. */
659 if (unlikely(cpu->linear_pages))
662 /* First step: get the top-level Guest page table entry. */
663 gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
664 /* Toplevel not present? We can't map it in. */
665 if (!(pgd_flags(gpgd) & _PAGE_PRESENT)) {
666 kill_guest(cpu, "Bad address %#lx", vaddr);
670 #ifdef CONFIG_X86_PAE
671 gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t);
672 if (!(pmd_flags(gpmd) & _PAGE_PRESENT)) {
673 kill_guest(cpu, "Bad address %#lx", vaddr);
676 gpte = lgread(cpu, gpte_addr(cpu, gpmd, vaddr), pte_t);
678 gpte = lgread(cpu, gpte_addr(cpu, gpgd, vaddr), pte_t);
680 if (!(pte_flags(gpte) & _PAGE_PRESENT))
681 kill_guest(cpu, "Bad address %#lx", vaddr);
683 return pte_pfn(gpte) * PAGE_SIZE | (vaddr & ~PAGE_MASK);
687 * We keep several page tables. This is a simple routine to find the page
688 * table (if any) corresponding to this top-level address the Guest has given
691 static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable)
694 for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
695 if (lg->pgdirs[i].pgdir && lg->pgdirs[i].gpgdir == pgtable)
701 * And this is us, creating the new page directory. If we really do
702 * allocate a new one (and so the kernel parts are not there), we set
705 static unsigned int new_pgdir(struct lg_cpu *cpu,
706 unsigned long gpgdir,
712 * We pick one entry at random to throw out. Choosing the Least
713 * Recently Used might be better, but this is easy.
715 next = prandom_u32() % ARRAY_SIZE(cpu->lg->pgdirs);
716 /* If it's never been allocated at all before, try now. */
717 if (!cpu->lg->pgdirs[next].pgdir) {
718 cpu->lg->pgdirs[next].pgdir =
719 (pgd_t *)get_zeroed_page(GFP_KERNEL);
720 /* If the allocation fails, just keep using the one we have */
721 if (!cpu->lg->pgdirs[next].pgdir)
725 * This is a blank page, so there are no kernel
726 * mappings: caller must map the stack!
731 /* Record which Guest toplevel this shadows. */
732 cpu->lg->pgdirs[next].gpgdir = gpgdir;
733 /* Release all the non-kernel mappings. */
734 flush_user_mappings(cpu->lg, next);
736 /* This hasn't run on any CPU at all. */
737 cpu->lg->pgdirs[next].last_host_cpu = -1;
743 * We do need the Switcher code mapped at all times, so we allocate that
744 * part of the Guest page table here. We map the Switcher code immediately,
745 * but defer mapping of the guest register page and IDT/LDT etc page until
746 * just before we run the guest in map_switcher_in_guest().
748 * We *could* do this setup in map_switcher_in_guest(), but at that point
749 * we've interrupts disabled, and allocating pages like that is fraught: we
750 * can't sleep if we need to free up some memory.
752 static bool allocate_switcher_mapping(struct lg_cpu *cpu)
756 for (i = 0; i < TOTAL_SWITCHER_PAGES; i++) {
757 pte_t *pte = find_spte(cpu, switcher_addr + i * PAGE_SIZE, true,
758 CHECK_GPGD_MASK, _PAGE_TABLE);
763 * Map the switcher page if not already there. It might
764 * already be there because we call allocate_switcher_mapping()
765 * in guest_set_pgd() just in case it did discard our Switcher
766 * mapping, but it probably didn't.
768 if (i == 0 && !(pte_flags(*pte) & _PAGE_PRESENT)) {
769 /* Get a reference to the Switcher page. */
770 get_page(lg_switcher_pages[0]);
771 /* Create a read-only, exectuable, kernel-style PTE */
773 mk_pte(lg_switcher_pages[0], PAGE_KERNEL_RX));
776 cpu->lg->pgdirs[cpu->cpu_pgd].switcher_mapped = true;
781 * Finally, a routine which throws away everything: all PGD entries in all
782 * the shadow page tables, including the Guest's kernel mappings. This is used
783 * when we destroy the Guest.
785 static void release_all_pagetables(struct lguest *lg)
789 /* Every shadow pagetable this Guest has */
790 for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) {
791 if (!lg->pgdirs[i].pgdir)
794 /* Every PGD entry. */
795 for (j = 0; j < PTRS_PER_PGD; j++)
796 release_pgd(lg->pgdirs[i].pgdir + j);
797 lg->pgdirs[i].switcher_mapped = false;
798 lg->pgdirs[i].last_host_cpu = -1;
803 * We also throw away everything when a Guest tells us it's changed a kernel
804 * mapping. Since kernel mappings are in every page table, it's easiest to
805 * throw them all away. This traps the Guest in amber for a while as
806 * everything faults back in, but it's rare.
808 void guest_pagetable_clear_all(struct lg_cpu *cpu)
810 release_all_pagetables(cpu->lg);
811 /* We need the Guest kernel stack mapped again. */
812 pin_stack_pages(cpu);
813 /* And we need Switcher allocated. */
814 if (!allocate_switcher_mapping(cpu))
815 kill_guest(cpu, "Cannot populate switcher mapping");
819 * (iv) Switching page tables
821 * Now we've seen all the page table setting and manipulation, let's see
822 * what happens when the Guest changes page tables (ie. changes the top-level
823 * pgdir). This occurs on almost every context switch.
825 void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable)
827 int newpgdir, repin = 0;
830 * The very first time they call this, we're actually running without
831 * any page tables; we've been making it up. Throw them away now.
833 if (unlikely(cpu->linear_pages)) {
834 release_all_pagetables(cpu->lg);
835 cpu->linear_pages = false;
836 /* Force allocation of a new pgdir. */
837 newpgdir = ARRAY_SIZE(cpu->lg->pgdirs);
839 /* Look to see if we have this one already. */
840 newpgdir = find_pgdir(cpu->lg, pgtable);
844 * If not, we allocate or mug an existing one: if it's a fresh one,
845 * repin gets set to 1.
847 if (newpgdir == ARRAY_SIZE(cpu->lg->pgdirs))
848 newpgdir = new_pgdir(cpu, pgtable, &repin);
849 /* Change the current pgd index to the new one. */
850 cpu->cpu_pgd = newpgdir;
852 * If it was completely blank, we map in the Guest kernel stack and
856 pin_stack_pages(cpu);
858 if (!cpu->lg->pgdirs[cpu->cpu_pgd].switcher_mapped) {
859 if (!allocate_switcher_mapping(cpu))
860 kill_guest(cpu, "Cannot populate switcher mapping");
866 * Since we throw away all mappings when a kernel mapping changes, our
867 * performance sucks for guests using highmem. In fact, a guest with
868 * PAGE_OFFSET 0xc0000000 (the default) and more than about 700MB of RAM is
869 * usually slower than a Guest with less memory.
871 * This, of course, cannot be fixed. It would take some kind of... well, I
872 * don't know, but the term "puissant code-fu" comes to mind.
876 * This is the routine which actually sets the page table entry for then
877 * "idx"'th shadow page table.
879 * Normally, we can just throw out the old entry and replace it with 0: if they
880 * use it demand_page() will put the new entry in. We need to do this anyway:
881 * The Guest expects _PAGE_ACCESSED to be set on its PTE the first time a page
882 * is read from, and _PAGE_DIRTY when it's written to.
884 * But Avi Kivity pointed out that most Operating Systems (Linux included) set
885 * these bits on PTEs immediately anyway. This is done to save the CPU from
886 * having to update them, but it helps us the same way: if they set
887 * _PAGE_ACCESSED then we can put a read-only PTE entry in immediately, and if
888 * they set _PAGE_DIRTY then we can put a writable PTE entry in immediately.
890 static void do_set_pte(struct lg_cpu *cpu, int idx,
891 unsigned long vaddr, pte_t gpte)
893 /* Look up the matching shadow page directory entry. */
894 pgd_t *spgd = spgd_addr(cpu, idx, vaddr);
895 #ifdef CONFIG_X86_PAE
899 /* If the top level isn't present, there's no entry to update. */
900 if (pgd_flags(*spgd) & _PAGE_PRESENT) {
901 #ifdef CONFIG_X86_PAE
902 spmd = spmd_addr(cpu, *spgd, vaddr);
903 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
905 /* Otherwise, start by releasing the existing entry. */
906 pte_t *spte = spte_addr(cpu, *spgd, vaddr);
910 * If they're setting this entry as dirty or accessed,
911 * we might as well put that entry they've given us in
912 * now. This shaves 10% off a copy-on-write
915 if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) {
916 if (!check_gpte(cpu, gpte))
919 gpte_to_spte(cpu, gpte,
920 pte_flags(gpte) & _PAGE_DIRTY));
923 * Otherwise kill it and we can demand_page()
926 set_pte(spte, __pte(0));
928 #ifdef CONFIG_X86_PAE
935 * Updating a PTE entry is a little trickier.
937 * We keep track of several different page tables (the Guest uses one for each
938 * process, so it makes sense to cache at least a few). Each of these have
939 * identical kernel parts: ie. every mapping above PAGE_OFFSET is the same for
940 * all processes. So when the page table above that address changes, we update
941 * all the page tables, not just the current one. This is rare.
943 * The benefit is that when we have to track a new page table, we can keep all
944 * the kernel mappings. This speeds up context switch immensely.
946 void guest_set_pte(struct lg_cpu *cpu,
947 unsigned long gpgdir, unsigned long vaddr, pte_t gpte)
949 /* We don't let you remap the Switcher; we need it to get back! */
950 if (vaddr >= switcher_addr) {
951 kill_guest(cpu, "attempt to set pte into Switcher pages");
956 * Kernel mappings must be changed on all top levels. Slow, but doesn't
959 if (vaddr >= cpu->lg->kernel_address) {
961 for (i = 0; i < ARRAY_SIZE(cpu->lg->pgdirs); i++)
962 if (cpu->lg->pgdirs[i].pgdir)
963 do_set_pte(cpu, i, vaddr, gpte);
965 /* Is this page table one we have a shadow for? */
966 int pgdir = find_pgdir(cpu->lg, gpgdir);
967 if (pgdir != ARRAY_SIZE(cpu->lg->pgdirs))
968 /* If so, do the update. */
969 do_set_pte(cpu, pgdir, vaddr, gpte);
974 * (iii) Setting up a page table entry when the Guest tells us one has changed.
976 * Just like we did in interrupts_and_traps.c, it makes sense for us to deal
977 * with the other side of page tables while we're here: what happens when the
978 * Guest asks for a page table to be updated?
980 * We already saw that demand_page() will fill in the shadow page tables when
981 * needed, so we can simply remove shadow page table entries whenever the Guest
982 * tells us they've changed. When the Guest tries to use the new entry it will
983 * fault and demand_page() will fix it up.
985 * So with that in mind here's our code to update a (top-level) PGD entry:
987 void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 idx)
991 if (idx > PTRS_PER_PGD) {
992 kill_guest(&lg->cpus[0], "Attempt to set pgd %u/%u",
997 /* If they're talking about a page table we have a shadow for... */
998 pgdir = find_pgdir(lg, gpgdir);
999 if (pgdir < ARRAY_SIZE(lg->pgdirs)) {
1000 /* ... throw it away. */
1001 release_pgd(lg->pgdirs[pgdir].pgdir + idx);
1002 /* That might have been the Switcher mapping, remap it. */
1003 if (!allocate_switcher_mapping(&lg->cpus[0])) {
1004 kill_guest(&lg->cpus[0],
1005 "Cannot populate switcher mapping");
1007 lg->pgdirs[pgdir].last_host_cpu = -1;
1011 #ifdef CONFIG_X86_PAE
1012 /* For setting a mid-level, we just throw everything away. It's easy. */
1013 void guest_set_pmd(struct lguest *lg, unsigned long pmdp, u32 idx)
1015 guest_pagetable_clear_all(&lg->cpus[0]);
1020 * (vii) Setting up the page tables initially.
1022 * When a Guest is first created, set initialize a shadow page table which
1023 * we will populate on future faults. The Guest doesn't have any actual
1024 * pagetables yet, so we set linear_pages to tell demand_page() to fake it
1027 * We do need the Switcher to be mapped at all times, so we allocate that
1028 * part of the Guest page table here.
1030 int init_guest_pagetable(struct lguest *lg)
1032 struct lg_cpu *cpu = &lg->cpus[0];
1035 /* lg (and lg->cpus[]) starts zeroed: this allocates a new pgdir */
1036 cpu->cpu_pgd = new_pgdir(cpu, 0, &allocated);
1040 /* We start with a linear mapping until the initialize. */
1041 cpu->linear_pages = true;
1043 /* Allocate the page tables for the Switcher. */
1044 if (!allocate_switcher_mapping(cpu)) {
1045 release_all_pagetables(lg);
1052 /*H:508 When the Guest calls LHCALL_LGUEST_INIT we do more setup. */
1053 void page_table_guest_data_init(struct lg_cpu *cpu)
1056 * We tell the Guest that it can't use the virtual addresses
1057 * used by the Switcher. This trick is equivalent to 4GB -
1060 u32 top = ~switcher_addr + 1;
1062 /* We get the kernel address: above this is all kernel memory. */
1063 if (get_user(cpu->lg->kernel_address,
1064 &cpu->lg->lguest_data->kernel_address)
1066 * We tell the Guest that it can't use the top virtual
1067 * addresses (used by the Switcher).
1069 || put_user(top, &cpu->lg->lguest_data->reserve_mem)) {
1070 kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data);
1075 * In flush_user_mappings() we loop from 0 to
1076 * "pgd_index(lg->kernel_address)". This assumes it won't hit the
1077 * Switcher mappings, so check that now.
1079 if (cpu->lg->kernel_address >= switcher_addr)
1080 kill_guest(cpu, "bad kernel address %#lx",
1081 cpu->lg->kernel_address);
1084 /* When a Guest dies, our cleanup is fairly simple. */
1085 void free_guest_pagetable(struct lguest *lg)
1089 /* Throw away all page table pages. */
1090 release_all_pagetables(lg);
1091 /* Now free the top levels: free_page() can handle 0 just fine. */
1092 for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
1093 free_page((long)lg->pgdirs[i].pgdir);
1097 * This clears the Switcher mappings for cpu #i.
1099 static void remove_switcher_percpu_map(struct lg_cpu *cpu, unsigned int i)
1101 unsigned long base = switcher_addr + PAGE_SIZE + i * PAGE_SIZE*2;
1104 /* Clear the mappings for both pages. */
1105 pte = find_spte(cpu, base, false, 0, 0);
1107 set_pte(pte, __pte(0));
1109 pte = find_spte(cpu, base + PAGE_SIZE, false, 0, 0);
1111 set_pte(pte, __pte(0));
1115 * (vi) Mapping the Switcher when the Guest is about to run.
1117 * The Switcher and the two pages for this CPU need to be visible in the Guest
1118 * (and not the pages for other CPUs).
1120 * The pages for the pagetables have all been allocated before: we just need
1121 * to make sure the actual PTEs are up-to-date for the CPU we're about to run
1124 void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages)
1127 struct page *percpu_switcher_page, *regs_page;
1129 struct pgdir *pgdir = &cpu->lg->pgdirs[cpu->cpu_pgd];
1131 /* Switcher page should always be mapped by now! */
1132 BUG_ON(!pgdir->switcher_mapped);
1135 * Remember that we have two pages for each Host CPU, so we can run a
1136 * Guest on each CPU without them interfering. We need to make sure
1137 * those pages are mapped correctly in the Guest, but since we usually
1138 * run on the same CPU, we cache that, and only update the mappings
1141 if (pgdir->last_host_cpu == raw_smp_processor_id())
1144 /* -1 means unknown so we remove everything. */
1145 if (pgdir->last_host_cpu == -1) {
1147 for_each_possible_cpu(i)
1148 remove_switcher_percpu_map(cpu, i);
1150 /* We know exactly what CPU mapping to remove. */
1151 remove_switcher_percpu_map(cpu, pgdir->last_host_cpu);
1155 * When we're running the Guest, we want the Guest's "regs" page to
1156 * appear where the first Switcher page for this CPU is. This is an
1157 * optimization: when the Switcher saves the Guest registers, it saves
1158 * them into the first page of this CPU's "struct lguest_pages": if we
1159 * make sure the Guest's register page is already mapped there, we
1160 * don't have to copy them out again.
1162 /* Find the shadow PTE for this regs page. */
1163 base = switcher_addr + PAGE_SIZE
1164 + raw_smp_processor_id() * sizeof(struct lguest_pages);
1165 pte = find_spte(cpu, base, false, 0, 0);
1166 regs_page = pfn_to_page(__pa(cpu->regs_page) >> PAGE_SHIFT);
1167 get_page(regs_page);
1168 set_pte(pte, mk_pte(regs_page, __pgprot(__PAGE_KERNEL & ~_PAGE_GLOBAL)));
1171 * We map the second page of the struct lguest_pages read-only in
1172 * the Guest: the IDT, GDT and other things it's not supposed to
1175 pte = find_spte(cpu, base + PAGE_SIZE, false, 0, 0);
1176 percpu_switcher_page
1177 = lg_switcher_pages[1 + raw_smp_processor_id()*2 + 1];
1178 get_page(percpu_switcher_page);
1179 set_pte(pte, mk_pte(percpu_switcher_page,
1180 __pgprot(__PAGE_KERNEL_RO & ~_PAGE_GLOBAL)));
1182 pgdir->last_host_cpu = raw_smp_processor_id();
1186 * We've made it through the page table code. Perhaps our tired brains are
1187 * still processing the details, or perhaps we're simply glad it's over.
1189 * If nothing else, note that all this complexity in juggling shadow page tables
1190 * in sync with the Guest's page tables is for one reason: for most Guests this
1191 * page table dance determines how bad performance will be. This is why Xen
1192 * uses exotic direct Guest pagetable manipulation, and why both Intel and AMD
1193 * have implemented shadow page table support directly into hardware.
1195 * There is just one file remaining in the Host.