1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright IBM Corp. 2008
5 * Guest page hinting for unused pages.
7 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
10 #include <linux/kernel.h>
11 #include <linux/errno.h>
12 #include <linux/types.h>
14 #include <linux/memblock.h>
15 #include <linux/gfp.h>
16 #include <linux/init.h>
17 #include <asm/facility.h>
18 #include <asm/page-states.h>
20 static int cmma_flag = 1;
22 static int __init cmma(char *str)
26 if (!kstrtobool(str, &enabled))
30 __setup("cmma=", cmma);
32 static inline int cmma_test_essa(void)
34 unsigned long tmp = 0;
37 /* test ESSA_GET_STATE */
39 " .insn rrf,0xb9ab0000,%[tmp],%[tmp],%[cmd],0\n"
43 : [rc] "+&d" (rc), [tmp] "+&d" (tmp)
44 : [cmd] "i" (ESSA_GET_STATE));
48 void __init cmma_init(void)
52 if (cmma_test_essa()) {
56 if (test_facility(147))
60 static inline unsigned char get_page_state(struct page *page)
64 asm volatile(" .insn rrf,0xb9ab0000,%0,%1,%2,0"
66 : "a" (page_to_phys(page)),
67 "i" (ESSA_GET_STATE));
71 static inline void set_page_unused(struct page *page, int order)
75 for (i = 0; i < (1 << order); i++)
76 asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
78 : "a" (page_to_phys(page + i)),
79 "i" (ESSA_SET_UNUSED));
82 static inline void set_page_stable_dat(struct page *page, int order)
86 for (i = 0; i < (1 << order); i++)
87 asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
89 : "a" (page_to_phys(page + i)),
90 "i" (ESSA_SET_STABLE));
93 static inline void set_page_stable_nodat(struct page *page, int order)
97 for (i = 0; i < (1 << order); i++)
98 asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
100 : "a" (page_to_phys(page + i)),
101 "i" (ESSA_SET_STABLE_NODAT));
104 static void mark_kernel_pmd(pud_t *pud, unsigned long addr, unsigned long end)
110 pmd = pmd_offset(pud, addr);
112 next = pmd_addr_end(addr, end);
113 if (pmd_none(*pmd) || pmd_large(*pmd))
115 page = phys_to_page(pmd_val(*pmd));
116 set_bit(PG_arch_1, &page->flags);
117 } while (pmd++, addr = next, addr != end);
120 static void mark_kernel_pud(p4d_t *p4d, unsigned long addr, unsigned long end)
127 pud = pud_offset(p4d, addr);
129 next = pud_addr_end(addr, end);
130 if (pud_none(*pud) || pud_large(*pud))
132 if (!pud_folded(*pud)) {
133 page = phys_to_page(pud_val(*pud));
134 for (i = 0; i < 3; i++)
135 set_bit(PG_arch_1, &page[i].flags);
137 mark_kernel_pmd(pud, addr, next);
138 } while (pud++, addr = next, addr != end);
141 static void mark_kernel_p4d(pgd_t *pgd, unsigned long addr, unsigned long end)
148 p4d = p4d_offset(pgd, addr);
150 next = p4d_addr_end(addr, end);
153 if (!p4d_folded(*p4d)) {
154 page = phys_to_page(p4d_val(*p4d));
155 for (i = 0; i < 3; i++)
156 set_bit(PG_arch_1, &page[i].flags);
158 mark_kernel_pud(p4d, addr, next);
159 } while (p4d++, addr = next, addr != end);
162 static void mark_kernel_pgd(void)
164 unsigned long addr, next;
170 pgd = pgd_offset_k(addr);
172 next = pgd_addr_end(addr, MODULES_END);
175 if (!pgd_folded(*pgd)) {
176 page = phys_to_page(pgd_val(*pgd));
177 for (i = 0; i < 3; i++)
178 set_bit(PG_arch_1, &page[i].flags);
180 mark_kernel_p4d(pgd, addr, next);
181 } while (pgd++, addr = next, addr != MODULES_END);
184 void __init cmma_init_nodat(void)
187 unsigned long start, end, ix;
192 /* Mark pages used in kernel page tables */
195 /* Set all kernel pages not used for page tables to stable/no-dat */
196 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
197 page = pfn_to_page(start);
198 for (ix = start; ix < end; ix++, page++) {
199 if (__test_and_clear_bit(PG_arch_1, &page->flags))
200 continue; /* skip page table pages */
201 if (!list_empty(&page->lru))
202 continue; /* skip free pages */
203 set_page_stable_nodat(page, 0);
208 void arch_free_page(struct page *page, int order)
212 set_page_unused(page, order);
215 void arch_alloc_page(struct page *page, int order)
220 set_page_stable_dat(page, order);
222 set_page_stable_nodat(page, order);
225 void arch_set_page_dat(struct page *page, int order)
229 set_page_stable_dat(page, order);