1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
6 #include <asm/loongarch.h>
8 #include <asm/pgtable.h>
9 #include <asm/regdef.h>
10 #include <asm/stackframe.h>
12 #define INVTLB_ADDR_GFALSE_AND_ASID 5
14 #define PTRS_PER_PGD_BITS (PAGE_SHIFT - 3)
15 #define PTRS_PER_PUD_BITS (PAGE_SHIFT - 3)
16 #define PTRS_PER_PMD_BITS (PAGE_SHIFT - 3)
17 #define PTRS_PER_PTE_BITS (PAGE_SHIFT - 3)
19 .macro tlb_do_page_fault, write
20 SYM_CODE_START(tlb_do_page_fault_\write)
22 csrrd a2, LOONGARCH_CSR_BADV
24 REG_S a2, sp, PT_BVADDR
28 SYM_CODE_END(tlb_do_page_fault_\write)
34 SYM_CODE_START(handle_tlb_protect)
39 csrrd a2, LOONGARCH_CSR_BADV
40 REG_S a2, sp, PT_BVADDR
41 la_abs t0, do_page_fault
44 SYM_CODE_END(handle_tlb_protect)
46 SYM_CODE_START(handle_tlb_load)
47 csrwr t0, EXCEPTION_KS0
48 csrwr t1, EXCEPTION_KS1
49 csrwr ra, EXCEPTION_KS2
52 * The vmalloc handling is not in the hotpath.
54 csrrd t0, LOONGARCH_CSR_BADV
56 csrrd t1, LOONGARCH_CSR_PGDL
59 /* Get PGD offset in bytes */
60 bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
62 #if CONFIG_PGTABLE_LEVELS > 3
64 bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
67 #if CONFIG_PGTABLE_LEVELS > 2
69 bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
75 * For huge tlb entries, pmde doesn't contain an address but
76 * instead contains the tlb pte. Check the PAGE_HUGE bit and
77 * see if we need to jump to huge tlb processing.
79 rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1
80 bltz ra, tlb_huge_update_load
82 rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
83 bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
84 alsl.d t1, t0, ra, _PTE_T_LOG2
87 smp_pgtable_change_load:
92 andi ra, t0, _PAGE_PRESENT
93 beqz ra, nopage_tlb_load
95 ori t0, t0, _PAGE_VALID
98 beqz t0, smp_pgtable_change_load
103 bstrins.d t1, zero, 3, 3
106 csrwr t0, LOONGARCH_CSR_TLBELO0
107 csrwr t1, LOONGARCH_CSR_TLBELO1
110 csrrd t0, EXCEPTION_KS0
111 csrrd t1, EXCEPTION_KS1
112 csrrd ra, EXCEPTION_KS2
117 la_abs t1, swapper_pg_dir
121 /* This is the entry point of a huge page. */
122 tlb_huge_update_load:
126 andi t0, ra, _PAGE_PRESENT
127 beqz t0, nopage_tlb_load
130 ori t0, ra, _PAGE_VALID
132 beqz t0, tlb_huge_update_load
133 ori t0, ra, _PAGE_VALID
135 rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
136 ori t0, ra, _PAGE_VALID
139 csrrd ra, LOONGARCH_CSR_ASID
140 csrrd t1, LOONGARCH_CSR_BADV
141 andi ra, ra, CSR_ASID_ASID
142 invtlb INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
145 * A huge PTE describes an area the size of the
146 * configured huge page size. This is twice the
147 * of the large TLB entry size we intend to use.
148 * A TLB entry half the size of the configured
149 * huge page size is configured into entrylo0
150 * and entrylo1 to cover the contiguous huge PTE
153 /* Huge page: Move Global bit */
154 xori t0, t0, _PAGE_HUGE
155 lu12i.w t1, _PAGE_HGLOBAL >> 12
157 srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
161 csrwr ra, LOONGARCH_CSR_TLBELO0
163 /* Convert to entrylo1 */
165 slli.d t1, t1, (HPAGE_SHIFT - 1)
167 csrwr t0, LOONGARCH_CSR_TLBELO1
169 /* Set huge page tlb entry size */
170 addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
171 addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
172 csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
176 addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
177 addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
178 csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
180 csrrd t0, EXCEPTION_KS0
181 csrrd t1, EXCEPTION_KS1
182 csrrd ra, EXCEPTION_KS2
187 csrrd ra, EXCEPTION_KS2
188 la_abs t0, tlb_do_page_fault_0
190 SYM_CODE_END(handle_tlb_load)
192 SYM_CODE_START(handle_tlb_load_ptw)
193 csrwr t0, LOONGARCH_CSR_KS0
194 csrwr t1, LOONGARCH_CSR_KS1
195 la_abs t0, tlb_do_page_fault_0
197 SYM_CODE_END(handle_tlb_load_ptw)
199 SYM_CODE_START(handle_tlb_store)
200 csrwr t0, EXCEPTION_KS0
201 csrwr t1, EXCEPTION_KS1
202 csrwr ra, EXCEPTION_KS2
205 * The vmalloc handling is not in the hotpath.
207 csrrd t0, LOONGARCH_CSR_BADV
208 bltz t0, vmalloc_store
209 csrrd t1, LOONGARCH_CSR_PGDL
212 /* Get PGD offset in bytes */
213 bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
215 #if CONFIG_PGTABLE_LEVELS > 3
217 bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
220 #if CONFIG_PGTABLE_LEVELS > 2
222 bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
228 * For huge tlb entries, pmde doesn't contain an address but
229 * instead contains the tlb pte. Check the PAGE_HUGE bit and
230 * see if we need to jump to huge tlb processing.
232 rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1
233 bltz ra, tlb_huge_update_store
235 rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
236 bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
237 alsl.d t1, t0, ra, _PTE_T_LOG2
240 smp_pgtable_change_store:
245 andi ra, t0, _PAGE_PRESENT | _PAGE_WRITE
246 xori ra, ra, _PAGE_PRESENT | _PAGE_WRITE
247 bnez ra, nopage_tlb_store
249 ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
252 beqz t0, smp_pgtable_change_store
257 bstrins.d t1, zero, 3, 3
260 csrwr t0, LOONGARCH_CSR_TLBELO0
261 csrwr t1, LOONGARCH_CSR_TLBELO1
264 csrrd t0, EXCEPTION_KS0
265 csrrd t1, EXCEPTION_KS1
266 csrrd ra, EXCEPTION_KS2
271 la_abs t1, swapper_pg_dir
275 /* This is the entry point of a huge page. */
276 tlb_huge_update_store:
280 andi t0, ra, _PAGE_PRESENT | _PAGE_WRITE
281 xori t0, t0, _PAGE_PRESENT | _PAGE_WRITE
282 bnez t0, nopage_tlb_store
285 ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
287 beqz t0, tlb_huge_update_store
288 ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
290 rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
291 ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
294 csrrd ra, LOONGARCH_CSR_ASID
295 csrrd t1, LOONGARCH_CSR_BADV
296 andi ra, ra, CSR_ASID_ASID
297 invtlb INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
300 * A huge PTE describes an area the size of the
301 * configured huge page size. This is twice the
302 * of the large TLB entry size we intend to use.
303 * A TLB entry half the size of the configured
304 * huge page size is configured into entrylo0
305 * and entrylo1 to cover the contiguous huge PTE
308 /* Huge page: Move Global bit */
309 xori t0, t0, _PAGE_HUGE
310 lu12i.w t1, _PAGE_HGLOBAL >> 12
312 srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
316 csrwr ra, LOONGARCH_CSR_TLBELO0
318 /* Convert to entrylo1 */
320 slli.d t1, t1, (HPAGE_SHIFT - 1)
322 csrwr t0, LOONGARCH_CSR_TLBELO1
324 /* Set huge page tlb entry size */
325 addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
326 addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
327 csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
331 /* Reset default page size */
332 addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
333 addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
334 csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
336 csrrd t0, EXCEPTION_KS0
337 csrrd t1, EXCEPTION_KS1
338 csrrd ra, EXCEPTION_KS2
343 csrrd ra, EXCEPTION_KS2
344 la_abs t0, tlb_do_page_fault_1
346 SYM_CODE_END(handle_tlb_store)
348 SYM_CODE_START(handle_tlb_store_ptw)
349 csrwr t0, LOONGARCH_CSR_KS0
350 csrwr t1, LOONGARCH_CSR_KS1
351 la_abs t0, tlb_do_page_fault_1
353 SYM_CODE_END(handle_tlb_store_ptw)
355 SYM_CODE_START(handle_tlb_modify)
356 csrwr t0, EXCEPTION_KS0
357 csrwr t1, EXCEPTION_KS1
358 csrwr ra, EXCEPTION_KS2
361 * The vmalloc handling is not in the hotpath.
363 csrrd t0, LOONGARCH_CSR_BADV
364 bltz t0, vmalloc_modify
365 csrrd t1, LOONGARCH_CSR_PGDL
368 /* Get PGD offset in bytes */
369 bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
371 #if CONFIG_PGTABLE_LEVELS > 3
373 bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
376 #if CONFIG_PGTABLE_LEVELS > 2
378 bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
384 * For huge tlb entries, pmde doesn't contain an address but
385 * instead contains the tlb pte. Check the PAGE_HUGE bit and
386 * see if we need to jump to huge tlb processing.
388 rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1
389 bltz ra, tlb_huge_update_modify
391 rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
392 bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
393 alsl.d t1, t0, ra, _PTE_T_LOG2
396 smp_pgtable_change_modify:
401 andi ra, t0, _PAGE_WRITE
402 beqz ra, nopage_tlb_modify
404 ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
407 beqz t0, smp_pgtable_change_modify
412 bstrins.d t1, zero, 3, 3
415 csrwr t0, LOONGARCH_CSR_TLBELO0
416 csrwr t1, LOONGARCH_CSR_TLBELO1
419 csrrd t0, EXCEPTION_KS0
420 csrrd t1, EXCEPTION_KS1
421 csrrd ra, EXCEPTION_KS2
426 la_abs t1, swapper_pg_dir
427 b vmalloc_done_modify
430 /* This is the entry point of a huge page. */
431 tlb_huge_update_modify:
435 andi t0, ra, _PAGE_WRITE
436 beqz t0, nopage_tlb_modify
439 ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
441 beqz t0, tlb_huge_update_modify
442 ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
444 rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
445 ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
448 csrrd ra, LOONGARCH_CSR_ASID
449 csrrd t1, LOONGARCH_CSR_BADV
450 andi ra, ra, CSR_ASID_ASID
451 invtlb INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
454 * A huge PTE describes an area the size of the
455 * configured huge page size. This is twice the
456 * of the large TLB entry size we intend to use.
457 * A TLB entry half the size of the configured
458 * huge page size is configured into entrylo0
459 * and entrylo1 to cover the contiguous huge PTE
462 /* Huge page: Move Global bit */
463 xori t0, t0, _PAGE_HUGE
464 lu12i.w t1, _PAGE_HGLOBAL >> 12
466 srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
470 csrwr ra, LOONGARCH_CSR_TLBELO0
472 /* Convert to entrylo1 */
474 slli.d t1, t1, (HPAGE_SHIFT - 1)
476 csrwr t0, LOONGARCH_CSR_TLBELO1
478 /* Set huge page tlb entry size */
479 addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
480 addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
481 csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
485 /* Reset default page size */
486 addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
487 addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
488 csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
490 csrrd t0, EXCEPTION_KS0
491 csrrd t1, EXCEPTION_KS1
492 csrrd ra, EXCEPTION_KS2
497 csrrd ra, EXCEPTION_KS2
498 la_abs t0, tlb_do_page_fault_1
500 SYM_CODE_END(handle_tlb_modify)
502 SYM_CODE_START(handle_tlb_modify_ptw)
503 csrwr t0, LOONGARCH_CSR_KS0
504 csrwr t1, LOONGARCH_CSR_KS1
505 la_abs t0, tlb_do_page_fault_1
507 SYM_CODE_END(handle_tlb_modify_ptw)
509 SYM_CODE_START(handle_tlb_refill)
510 csrwr t0, LOONGARCH_CSR_TLBRSAVE
511 csrrd t0, LOONGARCH_CSR_PGD
513 #if CONFIG_PGTABLE_LEVELS > 3
516 #if CONFIG_PGTABLE_LEVELS > 2
522 csrrd t0, LOONGARCH_CSR_TLBRSAVE
524 SYM_CODE_END(handle_tlb_refill)