1 /* pgalloc.c: page directory & page table allocation
3 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/sched.h>
13 #include <linux/gfp.h>
15 #include <linux/highmem.h>
16 #include <linux/quicklist.h>
17 #include <asm/pgalloc.h>
19 #include <asm/cacheflush.h>
21 pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((aligned(PAGE_SIZE)));
23 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
25 pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
31 pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
36 page = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0);
38 page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
42 pgtable_page_ctor(page);
43 flush_dcache_page(page);
48 void __set_pmd(pmd_t *pmdptr, unsigned long pmd)
50 unsigned long *__ste_p = pmdptr->ste;
54 memset(__ste_p, 0, PME_SIZE);
57 BUG_ON(pmd & (0x3f00 | xAMPRx_SS | 0xe));
59 for (loop = PME_SIZE; loop > 0; loop -= 4) {
65 frv_dcache_writeback((unsigned long) pmdptr, (unsigned long) (pmdptr + 1));
69 * List of all pgd's needed for non-PAE so it can invalidate entries
70 * in both cached and uncached pgd's; not needed for PAE since the
71 * kernel pmd is shared. If PAE were not to share the pmd a similar
72 * tactic would be needed. This is essentially codepath-based locking
73 * against pageattr.c; it is the unique case in which a valid change
74 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
75 * vmalloc faults work because attached pagetables are never freed.
76 * If the locking proves to be non-performant, a ticketing scheme with
77 * checks at dup_mmap(), exec(), and other mmlist addition points
78 * could be used. The locking scheme was chosen on the basis of
79 * manfred's recommendations and having no core impact whatsoever.
82 DEFINE_SPINLOCK(pgd_lock);
83 struct page *pgd_list;
85 static inline void pgd_list_add(pgd_t *pgd)
87 struct page *page = virt_to_page(pgd);
88 page->index = (unsigned long) pgd_list;
90 set_page_private(pgd_list, (unsigned long) &page->index);
92 set_page_private(page, (unsigned long)&pgd_list);
95 static inline void pgd_list_del(pgd_t *pgd)
97 struct page *next, **pprev, *page = virt_to_page(pgd);
98 next = (struct page *) page->index;
99 pprev = (struct page **) page_private(page);
102 set_page_private(next, (unsigned long) pprev);
105 void pgd_ctor(void *pgd)
109 if (PTRS_PER_PMD == 1)
110 spin_lock_irqsave(&pgd_lock, flags);
112 memcpy((pgd_t *) pgd + USER_PGDS_IN_LAST_PML4,
113 swapper_pg_dir + USER_PGDS_IN_LAST_PML4,
114 (PTRS_PER_PGD - USER_PGDS_IN_LAST_PML4) * sizeof(pgd_t));
116 if (PTRS_PER_PMD > 1)
120 spin_unlock_irqrestore(&pgd_lock, flags);
121 memset(pgd, 0, USER_PGDS_IN_LAST_PML4 * sizeof(pgd_t));
124 /* never called when PTRS_PER_PMD > 1 */
125 void pgd_dtor(void *pgd)
127 unsigned long flags; /* can be called from interrupt context */
129 spin_lock_irqsave(&pgd_lock, flags);
131 spin_unlock_irqrestore(&pgd_lock, flags);
134 pgd_t *pgd_alloc(struct mm_struct *mm)
136 return quicklist_alloc(0, GFP_KERNEL, pgd_ctor);
139 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
141 /* in the non-PAE case, clear_page_tables() clears user pgd entries */
142 quicklist_free(0, pgd_dtor, pgd);
145 void __init pgtable_cache_init(void)
149 void check_pgt_cache(void)
151 quicklist_trim(0, pgd_dtor, 25, 16);