1 /* SPDX-License-Identifier: GPL-2.0 */
4 * Copyright IBM Corp. 1999, 2000
5 * Author(s): Hartmut Penner (hp@de.ibm.com)
6 * Ulrich Weigand (weigand@de.ibm.com)
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
9 * Derived from "include/asm-i386/pgtable.h"
12 #ifndef _ASM_S390_PGTABLE_H
13 #define _ASM_S390_PGTABLE_H
15 #include <linux/sched.h>
16 #include <linux/mm_types.h>
17 #include <linux/page-flags.h>
18 #include <linux/radix-tree.h>
19 #include <linux/atomic.h>
20 #include <asm/sections.h>
25 extern pgd_t swapper_pg_dir[];
26 extern void paging_init(void);
27 extern unsigned long s390_invalid_asce;
36 extern atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
38 static inline void update_page_count(int level, long count)
40 if (IS_ENABLED(CONFIG_PROC_FS))
41 atomic_long_add(count, &direct_pages_count[level]);
45 void arch_report_meminfo(struct seq_file *m);
48 * The S390 doesn't have any external MMU info: the kernel page
49 * tables contain all the necessary information.
51 #define update_mmu_cache(vma, address, ptep) do { } while (0)
52 #define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
55 * ZERO_PAGE is a global shared page that is always zero; used
56 * for zero-mapped memory areas etc..
59 extern unsigned long empty_zero_page;
60 extern unsigned long zero_page_mask;
62 #define ZERO_PAGE(vaddr) \
63 (virt_to_page((void *)(empty_zero_page + \
64 (((unsigned long)(vaddr)) &zero_page_mask))))
65 #define __HAVE_COLOR_ZERO_PAGE
67 /* TODO: s390 cannot support io_remap_pfn_range... */
69 #define pte_ERROR(e) \
70 pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
71 #define pmd_ERROR(e) \
72 pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
73 #define pud_ERROR(e) \
74 pr_err("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
75 #define p4d_ERROR(e) \
76 pr_err("%s:%d: bad p4d %016lx.\n", __FILE__, __LINE__, p4d_val(e))
77 #define pgd_ERROR(e) \
78 pr_err("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
81 * The vmalloc and module area will always be on the topmost area of the
82 * kernel mapping. 512GB are reserved for vmalloc by default.
83 * At the top of the vmalloc area a 2GB area is reserved where modules
84 * will reside. That makes sure that inter module branches always
85 * happen without trampolines and in addition the placement within a
86 * 2GB frame is branch prediction unit friendly.
88 extern unsigned long __bootdata_preserved(VMALLOC_START);
89 extern unsigned long __bootdata_preserved(VMALLOC_END);
90 #define VMALLOC_DEFAULT_SIZE ((512UL << 30) - MODULES_LEN)
91 extern struct page *__bootdata_preserved(vmemmap);
92 extern unsigned long __bootdata_preserved(vmemmap_size);
94 #define VMEM_MAX_PHYS ((unsigned long) vmemmap)
96 extern unsigned long __bootdata_preserved(MODULES_VADDR);
97 extern unsigned long __bootdata_preserved(MODULES_END);
98 #define MODULES_VADDR MODULES_VADDR
99 #define MODULES_END MODULES_END
100 #define MODULES_LEN (1UL << 31)
102 static inline int is_module_addr(void *addr)
104 BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
105 if (addr < (void *)MODULES_VADDR)
107 if (addr > (void *)MODULES_END)
113 * A 64 bit pagetable entry of S390 has following format:
115 * 0000000000111111111122222222223333333333444444444455555555556666
116 * 0123456789012345678901234567890123456789012345678901234567890123
118 * I Page-Invalid Bit: Page is not available for address-translation
119 * P Page-Protection Bit: Store access not possible for page
120 * C Change-bit override: HW is not required to set change bit
122 * A 64 bit segmenttable entry of S390 has following format:
123 * | P-table origin | TT
124 * 0000000000111111111122222222223333333333444444444455555555556666
125 * 0123456789012345678901234567890123456789012345678901234567890123
127 * I Segment-Invalid Bit: Segment is not available for address-translation
128 * C Common-Segment Bit: Segment is not private (PoP 3-30)
129 * P Page-Protection Bit: Store access not possible for page
132 * A 64 bit region table entry of S390 has following format:
133 * | S-table origin | TF TTTL
134 * 0000000000111111111122222222223333333333444444444455555555556666
135 * 0123456789012345678901234567890123456789012345678901234567890123
137 * I Segment-Invalid Bit: Segment is not available for address-translation
142 * The 64 bit regiontable origin of S390 has following format:
143 * | region table origon | DTTL
144 * 0000000000111111111122222222223333333333444444444455555555556666
145 * 0123456789012345678901234567890123456789012345678901234567890123
147 * X Space-Switch event:
148 * G Segment-Invalid Bit:
149 * P Private-Space Bit:
150 * S Storage-Alteration:
154 * A storage key has the following format:
158 * F : fetch protection bit
163 /* Hardware bits in the page table entry */
164 #define _PAGE_NOEXEC 0x100 /* HW no-execute bit */
165 #define _PAGE_PROTECT 0x200 /* HW read-only bit */
166 #define _PAGE_INVALID 0x400 /* HW invalid bit */
167 #define _PAGE_LARGE 0x800 /* Bit to mark a large pte */
169 /* Software bits in the page table entry */
170 #define _PAGE_PRESENT 0x001 /* SW pte present bit */
171 #define _PAGE_YOUNG 0x004 /* SW pte young bit */
172 #define _PAGE_DIRTY 0x008 /* SW pte dirty bit */
173 #define _PAGE_READ 0x010 /* SW pte read bit */
174 #define _PAGE_WRITE 0x020 /* SW pte write bit */
175 #define _PAGE_SPECIAL 0x040 /* SW associated with special page */
176 #define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */
178 #ifdef CONFIG_MEM_SOFT_DIRTY
179 #define _PAGE_SOFT_DIRTY 0x002 /* SW pte soft dirty bit */
181 #define _PAGE_SOFT_DIRTY 0x000
184 /* Set of bits not changed in pte_modify */
185 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
186 _PAGE_YOUNG | _PAGE_SOFT_DIRTY)
189 * handle_pte_fault uses pte_present and pte_none to find out the pte type
190 * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to
191 * distinguish present from not-present ptes. It is changed only with the page
194 * The following table gives the different possible bit combinations for
195 * the pte hardware and software bits in the last 12 bits of a pte
196 * (. unassigned bit, x don't care, t swap type):
204 * prot-none, clean, old .11.xx0000.1
205 * prot-none, clean, young .11.xx0001.1
206 * prot-none, dirty, old .11.xx0010.1
207 * prot-none, dirty, young .11.xx0011.1
208 * read-only, clean, old .11.xx0100.1
209 * read-only, clean, young .01.xx0101.1
210 * read-only, dirty, old .11.xx0110.1
211 * read-only, dirty, young .01.xx0111.1
212 * read-write, clean, old .11.xx1100.1
213 * read-write, clean, young .01.xx1101.1
214 * read-write, dirty, old .10.xx1110.1
215 * read-write, dirty, young .00.xx1111.1
216 * HW-bits: R read-only, I invalid
217 * SW-bits: p present, y young, d dirty, r read, w write, s special,
220 * pte_none is true for the bit pattern .10.00000000, pte == 0x400
221 * pte_swap is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200
222 * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001
225 /* Bits in the segment/region table address-space-control-element */
226 #define _ASCE_ORIGIN ~0xfffUL/* region/segment table origin */
227 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
228 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
229 #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
230 #define _ASCE_REAL_SPACE 0x20 /* real space control */
231 #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
232 #define _ASCE_TYPE_REGION1 0x0c /* region first table type */
233 #define _ASCE_TYPE_REGION2 0x08 /* region second table type */
234 #define _ASCE_TYPE_REGION3 0x04 /* region third table type */
235 #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
236 #define _ASCE_TABLE_LENGTH 0x03 /* region table length */
238 /* Bits in the region table entry */
239 #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
240 #define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */
241 #define _REGION_ENTRY_NOEXEC 0x100 /* region no-execute bit */
242 #define _REGION_ENTRY_OFFSET 0xc0 /* region table offset */
243 #define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */
244 #define _REGION_ENTRY_TYPE_MASK 0x0c /* region table type mask */
245 #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
246 #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
247 #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
248 #define _REGION_ENTRY_LENGTH 0x03 /* region third length */
250 #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
251 #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
252 #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
253 #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
254 #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
255 #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
257 #define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL /* large page address */
258 #define _REGION3_ENTRY_DIRTY 0x2000 /* SW region dirty bit */
259 #define _REGION3_ENTRY_YOUNG 0x1000 /* SW region young bit */
260 #define _REGION3_ENTRY_LARGE 0x0400 /* RTTE-format control, large page */
261 #define _REGION3_ENTRY_READ 0x0002 /* SW region read bit */
262 #define _REGION3_ENTRY_WRITE 0x0001 /* SW region write bit */
264 #ifdef CONFIG_MEM_SOFT_DIRTY
265 #define _REGION3_ENTRY_SOFT_DIRTY 0x4000 /* SW region soft dirty bit */
267 #define _REGION3_ENTRY_SOFT_DIRTY 0x0000 /* SW region soft dirty bit */
270 #define _REGION_ENTRY_BITS 0xfffffffffffff22fUL
272 /* Bits in the segment table entry */
273 #define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
274 #define _SEGMENT_ENTRY_HARDWARE_BITS 0xfffffffffffffe30UL
275 #define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE 0xfffffffffff00730UL
276 #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
277 #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* page table origin */
278 #define _SEGMENT_ENTRY_PROTECT 0x200 /* segment protection bit */
279 #define _SEGMENT_ENTRY_NOEXEC 0x100 /* segment no-execute bit */
280 #define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
281 #define _SEGMENT_ENTRY_TYPE_MASK 0x0c /* segment table type mask */
283 #define _SEGMENT_ENTRY (0)
284 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
286 #define _SEGMENT_ENTRY_DIRTY 0x2000 /* SW segment dirty bit */
287 #define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */
288 #define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */
289 #define _SEGMENT_ENTRY_WRITE 0x0002 /* SW segment write bit */
290 #define _SEGMENT_ENTRY_READ 0x0001 /* SW segment read bit */
292 #ifdef CONFIG_MEM_SOFT_DIRTY
293 #define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000 /* SW segment soft dirty bit */
295 #define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */
298 #define _CRST_ENTRIES 2048 /* number of region/segment table entries */
299 #define _PAGE_ENTRIES 256 /* number of page table entries */
301 #define _CRST_TABLE_SIZE (_CRST_ENTRIES * 8)
302 #define _PAGE_TABLE_SIZE (_PAGE_ENTRIES * 8)
304 #define _REGION1_SHIFT 53
305 #define _REGION2_SHIFT 42
306 #define _REGION3_SHIFT 31
307 #define _SEGMENT_SHIFT 20
309 #define _REGION1_INDEX (0x7ffUL << _REGION1_SHIFT)
310 #define _REGION2_INDEX (0x7ffUL << _REGION2_SHIFT)
311 #define _REGION3_INDEX (0x7ffUL << _REGION3_SHIFT)
312 #define _SEGMENT_INDEX (0x7ffUL << _SEGMENT_SHIFT)
313 #define _PAGE_INDEX (0xffUL << _PAGE_SHIFT)
315 #define _REGION1_SIZE (1UL << _REGION1_SHIFT)
316 #define _REGION2_SIZE (1UL << _REGION2_SHIFT)
317 #define _REGION3_SIZE (1UL << _REGION3_SHIFT)
318 #define _SEGMENT_SIZE (1UL << _SEGMENT_SHIFT)
320 #define _REGION1_MASK (~(_REGION1_SIZE - 1))
321 #define _REGION2_MASK (~(_REGION2_SIZE - 1))
322 #define _REGION3_MASK (~(_REGION3_SIZE - 1))
323 #define _SEGMENT_MASK (~(_SEGMENT_SIZE - 1))
325 #define PMD_SHIFT _SEGMENT_SHIFT
326 #define PUD_SHIFT _REGION3_SHIFT
327 #define P4D_SHIFT _REGION2_SHIFT
328 #define PGDIR_SHIFT _REGION1_SHIFT
330 #define PMD_SIZE _SEGMENT_SIZE
331 #define PUD_SIZE _REGION3_SIZE
332 #define P4D_SIZE _REGION2_SIZE
333 #define PGDIR_SIZE _REGION1_SIZE
335 #define PMD_MASK _SEGMENT_MASK
336 #define PUD_MASK _REGION3_MASK
337 #define P4D_MASK _REGION2_MASK
338 #define PGDIR_MASK _REGION1_MASK
340 #define PTRS_PER_PTE _PAGE_ENTRIES
341 #define PTRS_PER_PMD _CRST_ENTRIES
342 #define PTRS_PER_PUD _CRST_ENTRIES
343 #define PTRS_PER_P4D _CRST_ENTRIES
344 #define PTRS_PER_PGD _CRST_ENTRIES
347 * Segment table and region3 table entry encoding
348 * (R = read-only, I = invalid, y = young bit):
350 * prot-none, clean, old 00..1...1...00
351 * prot-none, clean, young 01..1...1...00
352 * prot-none, dirty, old 10..1...1...00
353 * prot-none, dirty, young 11..1...1...00
354 * read-only, clean, old 00..1...1...01
355 * read-only, clean, young 01..1...0...01
356 * read-only, dirty, old 10..1...1...01
357 * read-only, dirty, young 11..1...0...01
358 * read-write, clean, old 00..1...1...11
359 * read-write, clean, young 01..1...0...11
360 * read-write, dirty, old 10..0...1...11
361 * read-write, dirty, young 11..0...0...11
362 * The segment table origin is used to distinguish empty (origin==0) from
363 * read-write, old segment table entries (origin!=0)
364 * HW-bits: R read-only, I invalid
365 * SW-bits: y young, d dirty, r read, w write
368 /* Page status table bits for virtualization */
369 #define PGSTE_ACC_BITS 0xf000000000000000UL
370 #define PGSTE_FP_BIT 0x0800000000000000UL
371 #define PGSTE_PCL_BIT 0x0080000000000000UL
372 #define PGSTE_HR_BIT 0x0040000000000000UL
373 #define PGSTE_HC_BIT 0x0020000000000000UL
374 #define PGSTE_GR_BIT 0x0004000000000000UL
375 #define PGSTE_GC_BIT 0x0002000000000000UL
376 #define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */
377 #define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */
378 #define PGSTE_VSIE_BIT 0x0000200000000000UL /* ref'd in a shadow table */
380 /* Guest Page State used for virtualization */
381 #define _PGSTE_GPS_ZERO 0x0000000080000000UL
382 #define _PGSTE_GPS_NODAT 0x0000000040000000UL
383 #define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL
384 #define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL
385 #define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL
386 #define _PGSTE_GPS_USAGE_POT_VOLATILE 0x0000000002000000UL
387 #define _PGSTE_GPS_USAGE_VOLATILE _PGSTE_GPS_USAGE_MASK
390 * A user page table pointer has the space-switch-event bit, the
391 * private-space-control bit and the storage-alteration-event-control
392 * bit set. A kernel page table pointer doesn't need them.
394 #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
398 * Page protection definitions.
400 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT)
401 #define PAGE_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | \
402 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
403 #define PAGE_RX __pgprot(_PAGE_PRESENT | _PAGE_READ | \
404 _PAGE_INVALID | _PAGE_PROTECT)
405 #define PAGE_RW __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
406 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
407 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
408 _PAGE_INVALID | _PAGE_PROTECT)
410 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
411 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
412 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
413 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
414 #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
415 _PAGE_PROTECT | _PAGE_NOEXEC)
416 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
417 _PAGE_YOUNG | _PAGE_DIRTY)
420 * On s390 the page table entry has an invalid bit and a read-only bit.
421 * Read permission implies execute permission and write permission
422 * implies read permission.
425 #define __P000 PAGE_NONE
426 #define __P001 PAGE_RO
427 #define __P010 PAGE_RO
428 #define __P011 PAGE_RO
429 #define __P100 PAGE_RX
430 #define __P101 PAGE_RX
431 #define __P110 PAGE_RX
432 #define __P111 PAGE_RX
434 #define __S000 PAGE_NONE
435 #define __S001 PAGE_RO
436 #define __S010 PAGE_RW
437 #define __S011 PAGE_RW
438 #define __S100 PAGE_RX
439 #define __S101 PAGE_RX
440 #define __S110 PAGE_RWX
441 #define __S111 PAGE_RWX
444 * Segment entry (large page) protection definitions.
446 #define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \
447 _SEGMENT_ENTRY_PROTECT)
448 #define SEGMENT_RO __pgprot(_SEGMENT_ENTRY_PROTECT | \
449 _SEGMENT_ENTRY_READ | \
450 _SEGMENT_ENTRY_NOEXEC)
451 #define SEGMENT_RX __pgprot(_SEGMENT_ENTRY_PROTECT | \
453 #define SEGMENT_RW __pgprot(_SEGMENT_ENTRY_READ | \
454 _SEGMENT_ENTRY_WRITE | \
455 _SEGMENT_ENTRY_NOEXEC)
456 #define SEGMENT_RWX __pgprot(_SEGMENT_ENTRY_READ | \
457 _SEGMENT_ENTRY_WRITE)
458 #define SEGMENT_KERNEL __pgprot(_SEGMENT_ENTRY | \
459 _SEGMENT_ENTRY_LARGE | \
460 _SEGMENT_ENTRY_READ | \
461 _SEGMENT_ENTRY_WRITE | \
462 _SEGMENT_ENTRY_YOUNG | \
463 _SEGMENT_ENTRY_DIRTY | \
464 _SEGMENT_ENTRY_NOEXEC)
465 #define SEGMENT_KERNEL_RO __pgprot(_SEGMENT_ENTRY | \
466 _SEGMENT_ENTRY_LARGE | \
467 _SEGMENT_ENTRY_READ | \
468 _SEGMENT_ENTRY_YOUNG | \
469 _SEGMENT_ENTRY_PROTECT | \
470 _SEGMENT_ENTRY_NOEXEC)
471 #define SEGMENT_KERNEL_EXEC __pgprot(_SEGMENT_ENTRY | \
472 _SEGMENT_ENTRY_LARGE | \
473 _SEGMENT_ENTRY_READ | \
474 _SEGMENT_ENTRY_WRITE | \
475 _SEGMENT_ENTRY_YOUNG | \
476 _SEGMENT_ENTRY_DIRTY)
479 * Region3 entry (large page) protection definitions.
482 #define REGION3_KERNEL __pgprot(_REGION_ENTRY_TYPE_R3 | \
483 _REGION3_ENTRY_LARGE | \
484 _REGION3_ENTRY_READ | \
485 _REGION3_ENTRY_WRITE | \
486 _REGION3_ENTRY_YOUNG | \
487 _REGION3_ENTRY_DIRTY | \
488 _REGION_ENTRY_NOEXEC)
489 #define REGION3_KERNEL_RO __pgprot(_REGION_ENTRY_TYPE_R3 | \
490 _REGION3_ENTRY_LARGE | \
491 _REGION3_ENTRY_READ | \
492 _REGION3_ENTRY_YOUNG | \
493 _REGION_ENTRY_PROTECT | \
494 _REGION_ENTRY_NOEXEC)
496 static inline bool mm_p4d_folded(struct mm_struct *mm)
498 return mm->context.asce_limit <= _REGION1_SIZE;
500 #define mm_p4d_folded(mm) mm_p4d_folded(mm)
502 static inline bool mm_pud_folded(struct mm_struct *mm)
504 return mm->context.asce_limit <= _REGION2_SIZE;
506 #define mm_pud_folded(mm) mm_pud_folded(mm)
508 static inline bool mm_pmd_folded(struct mm_struct *mm)
510 return mm->context.asce_limit <= _REGION3_SIZE;
512 #define mm_pmd_folded(mm) mm_pmd_folded(mm)
514 static inline int mm_has_pgste(struct mm_struct *mm)
517 if (unlikely(mm->context.has_pgste))
523 static inline int mm_is_protected(struct mm_struct *mm)
526 if (unlikely(atomic_read(&mm->context.is_protected)))
532 static inline int mm_alloc_pgste(struct mm_struct *mm)
535 if (unlikely(mm->context.alloc_pgste))
541 static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
543 return __pte(pte_val(pte) & ~pgprot_val(prot));
546 static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
548 return __pte(pte_val(pte) | pgprot_val(prot));
551 static inline pmd_t clear_pmd_bit(pmd_t pmd, pgprot_t prot)
553 return __pmd(pmd_val(pmd) & ~pgprot_val(prot));
556 static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot)
558 return __pmd(pmd_val(pmd) | pgprot_val(prot));
561 static inline pud_t clear_pud_bit(pud_t pud, pgprot_t prot)
563 return __pud(pud_val(pud) & ~pgprot_val(prot));
566 static inline pud_t set_pud_bit(pud_t pud, pgprot_t prot)
568 return __pud(pud_val(pud) | pgprot_val(prot));
572 * In the case that a guest uses storage keys
573 * faults should no longer be backed by zero pages
575 #define mm_forbids_zeropage mm_has_pgste
576 static inline int mm_uses_skeys(struct mm_struct *mm)
579 if (mm->context.uses_skeys)
585 static inline void csp(unsigned int *ptr, unsigned int old, unsigned int new)
587 union register_pair r1 = { .even = old, .odd = new, };
588 unsigned long address = (unsigned long)ptr | 1;
591 " csp %[r1],%[address]"
592 : [r1] "+&d" (r1.pair), "+m" (*ptr)
593 : [address] "d" (address)
597 static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new)
599 union register_pair r1 = { .even = old, .odd = new, };
600 unsigned long address = (unsigned long)ptr | 1;
603 " .insn rre,0xb98a0000,%[r1],%[address]"
604 : [r1] "+&d" (r1.pair), "+m" (*ptr)
605 : [address] "d" (address)
609 #define CRDTE_DTT_PAGE 0x00UL
610 #define CRDTE_DTT_SEGMENT 0x10UL
611 #define CRDTE_DTT_REGION3 0x14UL
612 #define CRDTE_DTT_REGION2 0x18UL
613 #define CRDTE_DTT_REGION1 0x1cUL
615 static inline void crdte(unsigned long old, unsigned long new,
616 unsigned long *table, unsigned long dtt,
617 unsigned long address, unsigned long asce)
619 union register_pair r1 = { .even = old, .odd = new, };
620 union register_pair r2 = { .even = __pa(table) | dtt, .odd = address, };
622 asm volatile(".insn rrf,0xb98f0000,%[r1],%[r2],%[asce],0"
623 : [r1] "+&d" (r1.pair)
624 : [r2] "d" (r2.pair), [asce] "a" (asce)
629 * pgd/p4d/pud/pmd/pte query functions
631 static inline int pgd_folded(pgd_t pgd)
633 return (pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1;
636 static inline int pgd_present(pgd_t pgd)
640 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
643 static inline int pgd_none(pgd_t pgd)
647 return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
650 static inline int pgd_bad(pgd_t pgd)
652 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1)
654 return (pgd_val(pgd) & ~_REGION_ENTRY_BITS) != 0;
657 static inline unsigned long pgd_pfn(pgd_t pgd)
659 unsigned long origin_mask;
661 origin_mask = _REGION_ENTRY_ORIGIN;
662 return (pgd_val(pgd) & origin_mask) >> PAGE_SHIFT;
665 static inline int p4d_folded(p4d_t p4d)
667 return (p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2;
670 static inline int p4d_present(p4d_t p4d)
674 return (p4d_val(p4d) & _REGION_ENTRY_ORIGIN) != 0UL;
677 static inline int p4d_none(p4d_t p4d)
681 return p4d_val(p4d) == _REGION2_ENTRY_EMPTY;
684 static inline unsigned long p4d_pfn(p4d_t p4d)
686 unsigned long origin_mask;
688 origin_mask = _REGION_ENTRY_ORIGIN;
689 return (p4d_val(p4d) & origin_mask) >> PAGE_SHIFT;
692 static inline int pud_folded(pud_t pud)
694 return (pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3;
697 static inline int pud_present(pud_t pud)
701 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
704 static inline int pud_none(pud_t pud)
708 return pud_val(pud) == _REGION3_ENTRY_EMPTY;
711 #define pud_leaf pud_large
712 static inline int pud_large(pud_t pud)
714 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
716 return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
719 #define pmd_leaf pmd_large
720 static inline int pmd_large(pmd_t pmd)
722 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
725 static inline int pmd_bad(pmd_t pmd)
727 if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0 || pmd_large(pmd))
729 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
732 static inline int pud_bad(pud_t pud)
734 unsigned long type = pud_val(pud) & _REGION_ENTRY_TYPE_MASK;
736 if (type > _REGION_ENTRY_TYPE_R3 || pud_large(pud))
738 if (type < _REGION_ENTRY_TYPE_R3)
740 return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0;
743 static inline int p4d_bad(p4d_t p4d)
745 unsigned long type = p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK;
747 if (type > _REGION_ENTRY_TYPE_R2)
749 if (type < _REGION_ENTRY_TYPE_R2)
751 return (p4d_val(p4d) & ~_REGION_ENTRY_BITS) != 0;
754 static inline int pmd_present(pmd_t pmd)
756 return pmd_val(pmd) != _SEGMENT_ENTRY_EMPTY;
759 static inline int pmd_none(pmd_t pmd)
761 return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY;
764 #define pmd_write pmd_write
765 static inline int pmd_write(pmd_t pmd)
767 return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
770 #define pud_write pud_write
771 static inline int pud_write(pud_t pud)
773 return (pud_val(pud) & _REGION3_ENTRY_WRITE) != 0;
776 static inline int pmd_dirty(pmd_t pmd)
778 return (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
781 static inline int pmd_young(pmd_t pmd)
783 return (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
786 static inline int pte_present(pte_t pte)
788 /* Bit pattern: (pte & 0x001) == 0x001 */
789 return (pte_val(pte) & _PAGE_PRESENT) != 0;
792 static inline int pte_none(pte_t pte)
794 /* Bit pattern: pte == 0x400 */
795 return pte_val(pte) == _PAGE_INVALID;
798 static inline int pte_swap(pte_t pte)
800 /* Bit pattern: (pte & 0x201) == 0x200 */
801 return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT))
805 static inline int pte_special(pte_t pte)
807 return (pte_val(pte) & _PAGE_SPECIAL);
810 #define __HAVE_ARCH_PTE_SAME
811 static inline int pte_same(pte_t a, pte_t b)
813 return pte_val(a) == pte_val(b);
816 #ifdef CONFIG_NUMA_BALANCING
817 static inline int pte_protnone(pte_t pte)
819 return pte_present(pte) && !(pte_val(pte) & _PAGE_READ);
822 static inline int pmd_protnone(pmd_t pmd)
824 /* pmd_large(pmd) implies pmd_present(pmd) */
825 return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
829 static inline int pte_soft_dirty(pte_t pte)
831 return pte_val(pte) & _PAGE_SOFT_DIRTY;
833 #define pte_swp_soft_dirty pte_soft_dirty
835 static inline pte_t pte_mksoft_dirty(pte_t pte)
837 pte_val(pte) |= _PAGE_SOFT_DIRTY;
840 #define pte_swp_mksoft_dirty pte_mksoft_dirty
842 static inline pte_t pte_clear_soft_dirty(pte_t pte)
844 pte_val(pte) &= ~_PAGE_SOFT_DIRTY;
847 #define pte_swp_clear_soft_dirty pte_clear_soft_dirty
849 static inline int pmd_soft_dirty(pmd_t pmd)
851 return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY;
854 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
856 pmd_val(pmd) |= _SEGMENT_ENTRY_SOFT_DIRTY;
860 static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
862 pmd_val(pmd) &= ~_SEGMENT_ENTRY_SOFT_DIRTY;
867 * query functions pte_write/pte_dirty/pte_young only work if
868 * pte_present() is true. Undefined behaviour if not..
870 static inline int pte_write(pte_t pte)
872 return (pte_val(pte) & _PAGE_WRITE) != 0;
875 static inline int pte_dirty(pte_t pte)
877 return (pte_val(pte) & _PAGE_DIRTY) != 0;
880 static inline int pte_young(pte_t pte)
882 return (pte_val(pte) & _PAGE_YOUNG) != 0;
885 #define __HAVE_ARCH_PTE_UNUSED
886 static inline int pte_unused(pte_t pte)
888 return pte_val(pte) & _PAGE_UNUSED;
892 * Extract the pgprot value from the given pte while at the same time making it
893 * usable for kernel address space mappings where fault driven dirty and
894 * young/old accounting is not supported, i.e _PAGE_PROTECT and _PAGE_INVALID
897 static inline pgprot_t pte_pgprot(pte_t pte)
899 unsigned long pte_flags = pte_val(pte) & _PAGE_CHG_MASK;
902 pte_flags |= pgprot_val(PAGE_KERNEL);
904 pte_flags |= pgprot_val(PAGE_KERNEL_RO);
905 pte_flags |= pte_val(pte) & mio_wb_bit_mask;
907 return __pgprot(pte_flags);
911 * pgd/pmd/pte modification functions
914 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
916 WRITE_ONCE(*pgdp, pgd);
919 static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
921 WRITE_ONCE(*p4dp, p4d);
924 static inline void set_pud(pud_t *pudp, pud_t pud)
926 WRITE_ONCE(*pudp, pud);
929 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
931 WRITE_ONCE(*pmdp, pmd);
934 static inline void set_pte(pte_t *ptep, pte_t pte)
936 WRITE_ONCE(*ptep, pte);
939 static inline void pgd_clear(pgd_t *pgd)
941 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
942 set_pgd(pgd, __pgd(_REGION1_ENTRY_EMPTY));
945 static inline void p4d_clear(p4d_t *p4d)
947 if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
948 set_p4d(p4d, __p4d(_REGION2_ENTRY_EMPTY));
951 static inline void pud_clear(pud_t *pud)
953 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
954 set_pud(pud, __pud(_REGION3_ENTRY_EMPTY));
957 static inline void pmd_clear(pmd_t *pmdp)
959 set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
962 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
964 set_pte(ptep, __pte(_PAGE_INVALID));
968 * The following pte modification functions only work if
969 * pte_present() is true. Undefined behaviour if not..
971 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
973 pte_val(pte) &= _PAGE_CHG_MASK;
974 pte_val(pte) |= pgprot_val(newprot);
976 * newprot for PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX
977 * has the invalid bit set, clear it again for readable, young pages
979 if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
980 pte_val(pte) &= ~_PAGE_INVALID;
982 * newprot for PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX has the page
983 * protection bit set, clear it again for writable, dirty pages
985 if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
986 pte_val(pte) &= ~_PAGE_PROTECT;
990 static inline pte_t pte_wrprotect(pte_t pte)
992 pte_val(pte) &= ~_PAGE_WRITE;
993 pte_val(pte) |= _PAGE_PROTECT;
997 static inline pte_t pte_mkwrite(pte_t pte)
999 pte_val(pte) |= _PAGE_WRITE;
1000 if (pte_val(pte) & _PAGE_DIRTY)
1001 pte_val(pte) &= ~_PAGE_PROTECT;
1005 static inline pte_t pte_mkclean(pte_t pte)
1007 pte_val(pte) &= ~_PAGE_DIRTY;
1008 pte_val(pte) |= _PAGE_PROTECT;
1012 static inline pte_t pte_mkdirty(pte_t pte)
1014 pte_val(pte) |= _PAGE_DIRTY | _PAGE_SOFT_DIRTY;
1015 if (pte_val(pte) & _PAGE_WRITE)
1016 pte_val(pte) &= ~_PAGE_PROTECT;
1020 static inline pte_t pte_mkold(pte_t pte)
1022 pte_val(pte) &= ~_PAGE_YOUNG;
1023 pte_val(pte) |= _PAGE_INVALID;
1027 static inline pte_t pte_mkyoung(pte_t pte)
1029 pte_val(pte) |= _PAGE_YOUNG;
1030 if (pte_val(pte) & _PAGE_READ)
1031 pte_val(pte) &= ~_PAGE_INVALID;
1035 static inline pte_t pte_mkspecial(pte_t pte)
1037 pte_val(pte) |= _PAGE_SPECIAL;
1041 #ifdef CONFIG_HUGETLB_PAGE
1042 static inline pte_t pte_mkhuge(pte_t pte)
1044 pte_val(pte) |= _PAGE_LARGE;
1049 #define IPTE_GLOBAL 0
1050 #define IPTE_LOCAL 1
1052 #define IPTE_NODAT 0x400
1053 #define IPTE_GUEST_ASCE 0x800
1055 static __always_inline void __ptep_ipte(unsigned long address, pte_t *ptep,
1056 unsigned long opt, unsigned long asce,
1059 unsigned long pto = __pa(ptep);
1061 if (__builtin_constant_p(opt) && opt == 0) {
1062 /* Invalidation + TLB flush for the pte */
1064 " .insn rrf,0xb2210000,%[r1],%[r2],0,%[m4]"
1065 : "+m" (*ptep) : [r1] "a" (pto), [r2] "a" (address),
1070 /* Invalidate ptes with options + TLB flush of the ptes */
1071 opt = opt | (asce & _ASCE_ORIGIN);
1073 " .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
1074 : [r2] "+a" (address), [r3] "+a" (opt)
1075 : [r1] "a" (pto), [m4] "i" (local) : "memory");
1078 static __always_inline void __ptep_ipte_range(unsigned long address, int nr,
1079 pte_t *ptep, int local)
1081 unsigned long pto = __pa(ptep);
1083 /* Invalidate a range of ptes + TLB flush of the ptes */
1086 " .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
1087 : [r2] "+a" (address), [r3] "+a" (nr)
1088 : [r1] "a" (pto), [m4] "i" (local) : "memory");
1089 } while (nr != 255);
1093 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
1094 * both clear the TLB for the unmapped pte. The reason is that
1095 * ptep_get_and_clear is used in common code (e.g. change_pte_range)
1096 * to modify an active pte. The sequence is
1097 * 1) ptep_get_and_clear
1099 * 3) flush_tlb_range
1100 * On s390 the tlb needs to get flushed with the modification of the pte
1101 * if the pte is active. The only way how this can be implemented is to
1102 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
1105 pte_t ptep_xchg_direct(struct mm_struct *, unsigned long, pte_t *, pte_t);
1106 pte_t ptep_xchg_lazy(struct mm_struct *, unsigned long, pte_t *, pte_t);
1108 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1109 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
1110 unsigned long addr, pte_t *ptep)
1114 pte = ptep_xchg_direct(vma->vm_mm, addr, ptep, pte_mkold(pte));
1115 return pte_young(pte);
1118 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1119 static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
1120 unsigned long address, pte_t *ptep)
1122 return ptep_test_and_clear_young(vma, address, ptep);
1125 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1126 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
1127 unsigned long addr, pte_t *ptep)
1131 res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1132 /* At this point the reference through the mapping is still present */
1133 if (mm_is_protected(mm) && pte_present(res))
1134 uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK);
1138 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1139 pte_t ptep_modify_prot_start(struct vm_area_struct *, unsigned long, pte_t *);
1140 void ptep_modify_prot_commit(struct vm_area_struct *, unsigned long,
1141 pte_t *, pte_t, pte_t);
1143 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
1144 static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
1145 unsigned long addr, pte_t *ptep)
1149 res = ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID));
1150 /* At this point the reference through the mapping is still present */
1151 if (mm_is_protected(vma->vm_mm) && pte_present(res))
1152 uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK);
1157 * The batched pte unmap code uses ptep_get_and_clear_full to clear the
1158 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
1159 * tlbs of an mm if it can guarantee that the ptes of the mm_struct
1160 * cannot be accessed while the batched unmap is running. In this case
1161 * full==1 and a simple pte_clear is enough. See tlb.h.
1163 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1164 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1166 pte_t *ptep, int full)
1172 set_pte(ptep, __pte(_PAGE_INVALID));
1174 res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1176 /* At this point the reference through the mapping is still present */
1177 if (mm_is_protected(mm) && pte_present(res))
1178 uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK);
1182 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
1183 static inline void ptep_set_wrprotect(struct mm_struct *mm,
1184 unsigned long addr, pte_t *ptep)
1189 ptep_xchg_lazy(mm, addr, ptep, pte_wrprotect(pte));
1192 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1193 static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1194 unsigned long addr, pte_t *ptep,
1195 pte_t entry, int dirty)
1197 if (pte_same(*ptep, entry))
1199 ptep_xchg_direct(vma->vm_mm, addr, ptep, entry);
1204 * Additional functions to handle KVM guest page tables
1206 void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
1207 pte_t *ptep, pte_t entry);
1208 void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1209 void ptep_notify(struct mm_struct *mm, unsigned long addr,
1210 pte_t *ptep, unsigned long bits);
1211 int ptep_force_prot(struct mm_struct *mm, unsigned long gaddr,
1212 pte_t *ptep, int prot, unsigned long bit);
1213 void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
1214 pte_t *ptep , int reset);
1215 void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1216 int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
1217 pte_t *sptep, pte_t *tptep, pte_t pte);
1218 void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep);
1220 bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long address,
1222 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1223 unsigned char key, bool nq);
1224 int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1225 unsigned char key, unsigned char *oldkey,
1226 bool nq, bool mr, bool mc);
1227 int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr);
1228 int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1229 unsigned char *key);
1231 int set_pgste_bits(struct mm_struct *mm, unsigned long addr,
1232 unsigned long bits, unsigned long value);
1233 int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep);
1234 int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
1235 unsigned long *oldpte, unsigned long *oldpgste);
1236 void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr);
1237 void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr);
1238 void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr);
1239 void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr);
1241 #define pgprot_writecombine pgprot_writecombine
1242 pgprot_t pgprot_writecombine(pgprot_t prot);
1244 #define pgprot_writethrough pgprot_writethrough
1245 pgprot_t pgprot_writethrough(pgprot_t prot);
1248 * Certain architectures need to do special things when PTEs
1249 * within a page table are directly modified. Thus, the following
1250 * hook is made available.
1252 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1253 pte_t *ptep, pte_t entry)
1255 if (pte_present(entry))
1256 pte_val(entry) &= ~_PAGE_UNUSED;
1257 if (mm_has_pgste(mm))
1258 ptep_set_pte_at(mm, addr, ptep, entry);
1260 set_pte(ptep, entry);
1264 * Conversion functions: convert a page and protection to a page entry,
1265 * and a page entry and page directory to the page they refer to.
1267 static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1271 pte_val(__pte) = physpage | pgprot_val(pgprot);
1272 if (!MACHINE_HAS_NX)
1273 pte_val(__pte) &= ~_PAGE_NOEXEC;
1274 return pte_mkyoung(__pte);
1277 static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1279 unsigned long physpage = page_to_phys(page);
1280 pte_t __pte = mk_pte_phys(physpage, pgprot);
1282 if (pte_write(__pte) && PageDirty(page))
1283 __pte = pte_mkdirty(__pte);
1287 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1288 #define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D-1))
1289 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1290 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1292 #define p4d_deref(pud) ((unsigned long)__va(p4d_val(pud) & _REGION_ENTRY_ORIGIN))
1293 #define pgd_deref(pgd) ((unsigned long)__va(pgd_val(pgd) & _REGION_ENTRY_ORIGIN))
1295 static inline unsigned long pmd_deref(pmd_t pmd)
1297 unsigned long origin_mask;
1299 origin_mask = _SEGMENT_ENTRY_ORIGIN;
1301 origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
1302 return (unsigned long)__va(pmd_val(pmd) & origin_mask);
1305 static inline unsigned long pmd_pfn(pmd_t pmd)
1307 return __pa(pmd_deref(pmd)) >> PAGE_SHIFT;
1310 static inline unsigned long pud_deref(pud_t pud)
1312 unsigned long origin_mask;
1314 origin_mask = _REGION_ENTRY_ORIGIN;
1316 origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
1317 return (unsigned long)__va(pud_val(pud) & origin_mask);
1320 static inline unsigned long pud_pfn(pud_t pud)
1322 return __pa(pud_deref(pud)) >> PAGE_SHIFT;
1326 * The pgd_offset function *always* adds the index for the top-level
1327 * region/segment table. This is done to get a sequence like the
1328 * following to work:
1329 * pgdp = pgd_offset(current->mm, addr);
1330 * pgd = READ_ONCE(*pgdp);
1331 * p4dp = p4d_offset(&pgd, addr);
1333 * The subsequent p4d_offset, pud_offset and pmd_offset functions
1334 * only add an index if they dereferenced the pointer.
1336 static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address)
1341 /* Get the first entry of the top level table */
1342 rste = pgd_val(*pgd);
1343 /* Pick up the shift from the table type of the first entry */
1344 shift = ((rste & _REGION_ENTRY_TYPE_MASK) >> 2) * 11 + 20;
1345 return pgd + ((address >> shift) & (PTRS_PER_PGD - 1));
1348 #define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address)
1350 static inline p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long address)
1352 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
1353 return (p4d_t *) pgd_deref(pgd) + p4d_index(address);
1354 return (p4d_t *) pgdp;
1356 #define p4d_offset_lockless p4d_offset_lockless
1358 static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long address)
1360 return p4d_offset_lockless(pgdp, *pgdp, address);
1363 static inline pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long address)
1365 if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
1366 return (pud_t *) p4d_deref(p4d) + pud_index(address);
1367 return (pud_t *) p4dp;
1369 #define pud_offset_lockless pud_offset_lockless
1371 static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long address)
1373 return pud_offset_lockless(p4dp, *p4dp, address);
1375 #define pud_offset pud_offset
1377 static inline pmd_t *pmd_offset_lockless(pud_t *pudp, pud_t pud, unsigned long address)
1379 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
1380 return (pmd_t *) pud_deref(pud) + pmd_index(address);
1381 return (pmd_t *) pudp;
1383 #define pmd_offset_lockless pmd_offset_lockless
1385 static inline pmd_t *pmd_offset(pud_t *pudp, unsigned long address)
1387 return pmd_offset_lockless(pudp, *pudp, address);
1389 #define pmd_offset pmd_offset
1391 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
1393 return (unsigned long) pmd_deref(pmd);
1396 static inline bool gup_fast_permitted(unsigned long start, unsigned long end)
1398 return end <= current->mm->context.asce_limit;
1400 #define gup_fast_permitted gup_fast_permitted
1402 #define pfn_pte(pfn, pgprot) mk_pte_phys(((pfn) << PAGE_SHIFT), (pgprot))
1403 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1404 #define pte_page(x) pfn_to_page(pte_pfn(x))
1406 #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
1407 #define pud_page(pud) pfn_to_page(pud_pfn(pud))
1408 #define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
1409 #define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
1411 static inline pmd_t pmd_wrprotect(pmd_t pmd)
1413 pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE;
1414 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1418 static inline pmd_t pmd_mkwrite(pmd_t pmd)
1420 pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE;
1421 if (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)
1422 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1426 static inline pmd_t pmd_mkclean(pmd_t pmd)
1428 pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY;
1429 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1433 static inline pmd_t pmd_mkdirty(pmd_t pmd)
1435 pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_SOFT_DIRTY;
1436 if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
1437 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1441 static inline pud_t pud_wrprotect(pud_t pud)
1443 pud_val(pud) &= ~_REGION3_ENTRY_WRITE;
1444 pud_val(pud) |= _REGION_ENTRY_PROTECT;
1448 static inline pud_t pud_mkwrite(pud_t pud)
1450 pud_val(pud) |= _REGION3_ENTRY_WRITE;
1451 if (pud_val(pud) & _REGION3_ENTRY_DIRTY)
1452 pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
1456 static inline pud_t pud_mkclean(pud_t pud)
1458 pud_val(pud) &= ~_REGION3_ENTRY_DIRTY;
1459 pud_val(pud) |= _REGION_ENTRY_PROTECT;
1463 static inline pud_t pud_mkdirty(pud_t pud)
1465 pud_val(pud) |= _REGION3_ENTRY_DIRTY | _REGION3_ENTRY_SOFT_DIRTY;
1466 if (pud_val(pud) & _REGION3_ENTRY_WRITE)
1467 pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
1471 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
1472 static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1475 * pgprot is PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW or PAGE_RWX
1476 * (see __Pxxx / __Sxxx). Convert to segment table entry format.
1478 if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
1479 return pgprot_val(SEGMENT_NONE);
1480 if (pgprot_val(pgprot) == pgprot_val(PAGE_RO))
1481 return pgprot_val(SEGMENT_RO);
1482 if (pgprot_val(pgprot) == pgprot_val(PAGE_RX))
1483 return pgprot_val(SEGMENT_RX);
1484 if (pgprot_val(pgprot) == pgprot_val(PAGE_RW))
1485 return pgprot_val(SEGMENT_RW);
1486 return pgprot_val(SEGMENT_RWX);
1489 static inline pmd_t pmd_mkyoung(pmd_t pmd)
1491 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1492 if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
1493 pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
1497 static inline pmd_t pmd_mkold(pmd_t pmd)
1499 pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
1500 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1504 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1506 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
1507 _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
1508 _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SOFT_DIRTY;
1509 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1510 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1511 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1512 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
1513 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1517 static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1520 pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
1524 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
1526 static inline void __pmdp_csp(pmd_t *pmdp)
1528 csp((unsigned int *)pmdp + 1, pmd_val(*pmdp),
1529 pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1532 #define IDTE_GLOBAL 0
1533 #define IDTE_LOCAL 1
1535 #define IDTE_PTOA 0x0800
1536 #define IDTE_NODAT 0x1000
1537 #define IDTE_GUEST_ASCE 0x2000
1539 static __always_inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp,
1540 unsigned long opt, unsigned long asce,
1545 sto = __pa(pmdp) - pmd_index(addr) * sizeof(pmd_t);
1546 if (__builtin_constant_p(opt) && opt == 0) {
1547 /* flush without guest asce */
1549 " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
1551 : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK)),
1555 /* flush with guest asce */
1557 " .insn rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
1559 : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK) | opt),
1560 [r3] "a" (asce), [m4] "i" (local)
1565 static __always_inline void __pudp_idte(unsigned long addr, pud_t *pudp,
1566 unsigned long opt, unsigned long asce,
1571 r3o = __pa(pudp) - pud_index(addr) * sizeof(pud_t);
1572 r3o |= _ASCE_TYPE_REGION3;
1573 if (__builtin_constant_p(opt) && opt == 0) {
1574 /* flush without guest asce */
1576 " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
1578 : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK)),
1582 /* flush with guest asce */
1584 " .insn rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
1586 : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK) | opt),
1587 [r3] "a" (asce), [m4] "i" (local)
1592 pmd_t pmdp_xchg_direct(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1593 pmd_t pmdp_xchg_lazy(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1594 pud_t pudp_xchg_direct(struct mm_struct *, unsigned long, pud_t *, pud_t);
1596 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1598 #define __HAVE_ARCH_PGTABLE_DEPOSIT
1599 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1602 #define __HAVE_ARCH_PGTABLE_WITHDRAW
1603 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
1605 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1606 static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
1607 unsigned long addr, pmd_t *pmdp,
1608 pmd_t entry, int dirty)
1610 VM_BUG_ON(addr & ~HPAGE_MASK);
1612 entry = pmd_mkyoung(entry);
1614 entry = pmd_mkdirty(entry);
1615 if (pmd_val(*pmdp) == pmd_val(entry))
1617 pmdp_xchg_direct(vma->vm_mm, addr, pmdp, entry);
1621 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1622 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1623 unsigned long addr, pmd_t *pmdp)
1627 pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd_mkold(pmd));
1628 return pmd_young(pmd);
1631 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
1632 static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
1633 unsigned long addr, pmd_t *pmdp)
1635 VM_BUG_ON(addr & ~HPAGE_MASK);
1636 return pmdp_test_and_clear_young(vma, addr, pmdp);
1639 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1640 pmd_t *pmdp, pmd_t entry)
1642 if (!MACHINE_HAS_NX)
1643 pmd_val(entry) &= ~_SEGMENT_ENTRY_NOEXEC;
1644 set_pmd(pmdp, entry);
1647 static inline pmd_t pmd_mkhuge(pmd_t pmd)
1649 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
1650 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1651 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1655 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1656 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
1657 unsigned long addr, pmd_t *pmdp)
1659 return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1662 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
1663 static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
1665 pmd_t *pmdp, int full)
1669 set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1672 return pmdp_xchg_lazy(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1675 #define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
1676 static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
1677 unsigned long addr, pmd_t *pmdp)
1679 return pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
1682 #define __HAVE_ARCH_PMDP_INVALIDATE
1683 static inline pmd_t pmdp_invalidate(struct vm_area_struct *vma,
1684 unsigned long addr, pmd_t *pmdp)
1686 pmd_t pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1688 return pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd);
1691 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
1692 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1693 unsigned long addr, pmd_t *pmdp)
1698 pmd = pmdp_xchg_lazy(mm, addr, pmdp, pmd_wrprotect(pmd));
1701 static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
1702 unsigned long address,
1705 return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
1707 #define pmdp_collapse_flush pmdp_collapse_flush
1709 #define pfn_pmd(pfn, pgprot) mk_pmd_phys(((pfn) << PAGE_SHIFT), (pgprot))
1710 #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1712 static inline int pmd_trans_huge(pmd_t pmd)
1714 return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
1717 #define has_transparent_hugepage has_transparent_hugepage
1718 static inline int has_transparent_hugepage(void)
1720 return MACHINE_HAS_EDAT1 ? 1 : 0;
1722 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1725 * 64 bit swap entry format:
1726 * A page-table entry has some bits we have to treat in a special way.
1727 * Bits 52 and bit 55 have to be zero, otherwise a specification
1728 * exception will occur instead of a page translation exception. The
1729 * specification exception has the bad habit not to store necessary
1730 * information in the lowcore.
1731 * Bits 54 and 63 are used to indicate the page type.
1732 * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200
1733 * This leaves the bits 0-51 and bits 56-62 to store type and offset.
1734 * We use the 5 bits from 57-61 for the type and the 52 bits from 0-51
1736 * | offset |01100|type |00|
1737 * |0000000000111111111122222222223333333333444444444455|55555|55566|66|
1738 * |0123456789012345678901234567890123456789012345678901|23456|78901|23|
1741 #define __SWP_OFFSET_MASK ((1UL << 52) - 1)
1742 #define __SWP_OFFSET_SHIFT 12
1743 #define __SWP_TYPE_MASK ((1UL << 5) - 1)
1744 #define __SWP_TYPE_SHIFT 2
1746 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1750 pte_val(pte) = _PAGE_INVALID | _PAGE_PROTECT;
1751 pte_val(pte) |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT;
1752 pte_val(pte) |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT;
1756 static inline unsigned long __swp_type(swp_entry_t entry)
1758 return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK;
1761 static inline unsigned long __swp_offset(swp_entry_t entry)
1763 return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK;
1766 static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
1768 return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) };
1771 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1772 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1774 #define kern_addr_valid(addr) (1)
1776 extern int vmem_add_mapping(unsigned long start, unsigned long size);
1777 extern void vmem_remove_mapping(unsigned long start, unsigned long size);
1778 extern int s390_enable_sie(void);
1779 extern int s390_enable_skey(void);
1780 extern void s390_reset_cmma(struct mm_struct *mm);
1782 /* s390 has a private copy of get unmapped area to deal with cache synonyms */
1783 #define HAVE_ARCH_UNMAPPED_AREA
1784 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1786 #define pmd_pgtable(pmd) \
1787 ((pgtable_t)__va(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE))
1789 #endif /* _S390_PAGE_H */