1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2012 Regents of the University of California
6 #ifndef _ASM_RISCV_PGTABLE_64_H
7 #define _ASM_RISCV_PGTABLE_64_H
9 #include <linux/bits.h>
10 #include <linux/const.h>
11 #include <asm/errata_list.h>
13 extern bool pgtable_l4_enabled;
14 extern bool pgtable_l5_enabled;
16 #define PGDIR_SHIFT_L3 30
17 #define PGDIR_SHIFT_L4 39
18 #define PGDIR_SHIFT_L5 48
19 #define PGDIR_SIZE_L3 (_AC(1, UL) << PGDIR_SHIFT_L3)
21 #define PGDIR_SHIFT (pgtable_l5_enabled ? PGDIR_SHIFT_L5 : \
22 (pgtable_l4_enabled ? PGDIR_SHIFT_L4 : PGDIR_SHIFT_L3))
23 /* Size of region mapped by a page global directory */
24 #define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
25 #define PGDIR_MASK (~(PGDIR_SIZE - 1))
27 /* p4d is folded into pgd in case of 4-level page table */
29 #define P4D_SIZE (_AC(1, UL) << P4D_SHIFT)
30 #define P4D_MASK (~(P4D_SIZE - 1))
32 /* pud is folded into pgd in case of 3-level page table */
34 #define PUD_SIZE (_AC(1, UL) << PUD_SHIFT)
35 #define PUD_MASK (~(PUD_SIZE - 1))
38 /* Size of region mapped by a page middle directory */
39 #define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
40 #define PMD_MASK (~(PMD_SIZE - 1))
42 /* Page 4th Directory entry */
47 #define p4d_val(x) ((x).p4d)
48 #define __p4d(x) ((p4d_t) { (x) })
49 #define PTRS_PER_P4D (PAGE_SIZE / sizeof(p4d_t))
51 /* Page Upper Directory entry */
56 #define pud_val(x) ((x).pud)
57 #define __pud(x) ((pud_t) { (x) })
58 #define PTRS_PER_PUD (PAGE_SIZE / sizeof(pud_t))
60 /* Page Middle Directory entry */
65 #define pmd_val(x) ((x).pmd)
66 #define __pmd(x) ((pmd_t) { (x) })
68 #define PTRS_PER_PMD (PAGE_SIZE / sizeof(pmd_t))
72 * | 63 | 62 61 | 60 54 | 53 10 | 9 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
73 * N MT RSV PFN reserved for SW D A G U X W R V
75 #define _PAGE_PFN_MASK GENMASK(53, 10)
78 * [62:61] Svpbmt Memory Type definitions:
80 * 00 - PMA Normal Cacheable, No change to implied PMA memory type
81 * 01 - NC Non-cacheable, idempotent, weakly-ordered Main Memory
82 * 10 - IO Non-cacheable, non-idempotent, strongly-ordered I/O memory
83 * 11 - Rsvd Reserved for future standard use
85 #define _PAGE_NOCACHE_SVPBMT (1UL << 61)
86 #define _PAGE_IO_SVPBMT (1UL << 62)
87 #define _PAGE_MTMASK_SVPBMT (_PAGE_NOCACHE_SVPBMT | _PAGE_IO_SVPBMT)
90 * [63:59] T-Head Memory Type definitions:
92 * 00000 - NC Weakly-ordered, Non-cacheable, Non-bufferable, Non-shareable, Non-trustable
93 * 01110 - PMA Weakly-ordered, Cacheable, Bufferable, Shareable, Non-trustable
94 * 10000 - IO Strongly-ordered, Non-cacheable, Non-bufferable, Non-shareable, Non-trustable
96 #define _PAGE_PMA_THEAD ((1UL << 62) | (1UL << 61) | (1UL << 60))
97 #define _PAGE_NOCACHE_THEAD 0UL
98 #define _PAGE_IO_THEAD (1UL << 63)
99 #define _PAGE_MTMASK_THEAD (_PAGE_PMA_THEAD | _PAGE_IO_THEAD | (1UL << 59))
101 static inline u64 riscv_page_mtmask(void)
105 ALT_SVPBMT(val, _PAGE_MTMASK);
109 static inline u64 riscv_page_nocache(void)
113 ALT_SVPBMT(val, _PAGE_NOCACHE);
117 static inline u64 riscv_page_io(void)
121 ALT_SVPBMT(val, _PAGE_IO);
125 #define _PAGE_NOCACHE riscv_page_nocache()
126 #define _PAGE_IO riscv_page_io()
127 #define _PAGE_MTMASK riscv_page_mtmask()
129 /* Set of bits to preserve across pte_modify() */
130 #define _PAGE_CHG_MASK (~(unsigned long)(_PAGE_PRESENT | _PAGE_READ | \
131 _PAGE_WRITE | _PAGE_EXEC | \
132 _PAGE_USER | _PAGE_GLOBAL | \
135 static inline int pud_present(pud_t pud)
137 return (pud_val(pud) & _PAGE_PRESENT);
140 static inline int pud_none(pud_t pud)
142 return (pud_val(pud) == 0);
145 static inline int pud_bad(pud_t pud)
147 return !pud_present(pud);
150 #define pud_leaf pud_leaf
151 static inline int pud_leaf(pud_t pud)
153 return pud_present(pud) && (pud_val(pud) & _PAGE_LEAF);
156 static inline int pud_user(pud_t pud)
158 return pud_val(pud) & _PAGE_USER;
161 static inline void set_pud(pud_t *pudp, pud_t pud)
166 static inline void pud_clear(pud_t *pudp)
168 set_pud(pudp, __pud(0));
171 static inline pud_t pfn_pud(unsigned long pfn, pgprot_t prot)
173 return __pud((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
176 static inline unsigned long _pud_pfn(pud_t pud)
178 return pud_val(pud) >> _PAGE_PFN_SHIFT;
181 static inline pmd_t *pud_pgtable(pud_t pud)
183 return (pmd_t *)pfn_to_virt(__page_val_to_pfn(pud_val(pud)));
186 static inline struct page *pud_page(pud_t pud)
188 return pfn_to_page(__page_val_to_pfn(pud_val(pud)));
191 #define mm_p4d_folded mm_p4d_folded
192 static inline bool mm_p4d_folded(struct mm_struct *mm)
194 if (pgtable_l5_enabled)
200 #define mm_pud_folded mm_pud_folded
201 static inline bool mm_pud_folded(struct mm_struct *mm)
203 if (pgtable_l4_enabled)
209 #define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
211 static inline pmd_t pfn_pmd(unsigned long pfn, pgprot_t prot)
213 unsigned long prot_val = pgprot_val(prot);
215 ALT_THEAD_PMA(prot_val);
217 return __pmd((pfn << _PAGE_PFN_SHIFT) | prot_val);
220 static inline unsigned long _pmd_pfn(pmd_t pmd)
222 return __page_val_to_pfn(pmd_val(pmd));
225 #define mk_pmd(page, prot) pfn_pmd(page_to_pfn(page), prot)
227 #define pmd_ERROR(e) \
228 pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
230 #define pud_ERROR(e) \
231 pr_err("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
233 #define p4d_ERROR(e) \
234 pr_err("%s:%d: bad p4d %016lx.\n", __FILE__, __LINE__, p4d_val(e))
236 static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
238 if (pgtable_l4_enabled)
241 set_pud((pud_t *)p4dp, (pud_t){ p4d_val(p4d) });
244 static inline int p4d_none(p4d_t p4d)
246 if (pgtable_l4_enabled)
247 return (p4d_val(p4d) == 0);
252 static inline int p4d_present(p4d_t p4d)
254 if (pgtable_l4_enabled)
255 return (p4d_val(p4d) & _PAGE_PRESENT);
260 static inline int p4d_bad(p4d_t p4d)
262 if (pgtable_l4_enabled)
263 return !p4d_present(p4d);
268 static inline void p4d_clear(p4d_t *p4d)
270 if (pgtable_l4_enabled)
271 set_p4d(p4d, __p4d(0));
274 static inline p4d_t pfn_p4d(unsigned long pfn, pgprot_t prot)
276 return __p4d((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
279 static inline unsigned long _p4d_pfn(p4d_t p4d)
281 return p4d_val(p4d) >> _PAGE_PFN_SHIFT;
284 static inline pud_t *p4d_pgtable(p4d_t p4d)
286 if (pgtable_l4_enabled)
287 return (pud_t *)pfn_to_virt(p4d_val(p4d) >> _PAGE_PFN_SHIFT);
289 return (pud_t *)pud_pgtable((pud_t) { p4d_val(p4d) });
291 #define p4d_page_vaddr(p4d) ((unsigned long)p4d_pgtable(p4d))
293 static inline struct page *p4d_page(p4d_t p4d)
295 return pfn_to_page(p4d_val(p4d) >> _PAGE_PFN_SHIFT);
298 #define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
300 #define pud_offset pud_offset
301 static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
303 if (pgtable_l4_enabled)
304 return p4d_pgtable(*p4d) + pud_index(address);
309 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
311 if (pgtable_l5_enabled)
314 set_p4d((p4d_t *)pgdp, (p4d_t){ pgd_val(pgd) });
317 static inline int pgd_none(pgd_t pgd)
319 if (pgtable_l5_enabled)
320 return (pgd_val(pgd) == 0);
325 static inline int pgd_present(pgd_t pgd)
327 if (pgtable_l5_enabled)
328 return (pgd_val(pgd) & _PAGE_PRESENT);
333 static inline int pgd_bad(pgd_t pgd)
335 if (pgtable_l5_enabled)
336 return !pgd_present(pgd);
341 static inline void pgd_clear(pgd_t *pgd)
343 if (pgtable_l5_enabled)
344 set_pgd(pgd, __pgd(0));
347 static inline p4d_t *pgd_pgtable(pgd_t pgd)
349 if (pgtable_l5_enabled)
350 return (p4d_t *)pfn_to_virt(pgd_val(pgd) >> _PAGE_PFN_SHIFT);
352 return (p4d_t *)p4d_pgtable((p4d_t) { pgd_val(pgd) });
354 #define pgd_page_vaddr(pgd) ((unsigned long)pgd_pgtable(pgd))
356 static inline struct page *pgd_page(pgd_t pgd)
358 return pfn_to_page(pgd_val(pgd) >> _PAGE_PFN_SHIFT);
360 #define pgd_page(pgd) pgd_page(pgd)
362 #define p4d_index(addr) (((addr) >> P4D_SHIFT) & (PTRS_PER_P4D - 1))
364 #define p4d_offset p4d_offset
365 static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
367 if (pgtable_l5_enabled)
368 return pgd_pgtable(*pgd) + p4d_index(address);
373 #endif /* _ASM_RISCV_PGTABLE_64_H */