1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
6 #include <asm/export.h>
7 #include <asm/loongarch.h>
9 #include <asm/pgtable.h>
10 #include <asm/regdef.h>
11 #include <asm/stackframe.h>
13 .macro tlb_do_page_fault, write
14 SYM_FUNC_START(tlb_do_page_fault_\write)
16 csrrd a2, LOONGARCH_CSR_BADV
18 REG_S a2, sp, PT_BVADDR
20 la.abs t0, do_page_fault
23 SYM_FUNC_END(tlb_do_page_fault_\write)
29 SYM_FUNC_START(handle_tlb_protect)
34 csrrd a2, LOONGARCH_CSR_BADV
35 REG_S a2, sp, PT_BVADDR
36 la.abs t0, do_page_fault
39 SYM_FUNC_END(handle_tlb_protect)
41 SYM_FUNC_START(handle_tlb_load)
42 csrwr t0, EXCEPTION_KS0
43 csrwr t1, EXCEPTION_KS1
44 csrwr ra, EXCEPTION_KS2
47 * The vmalloc handling is not in the hotpath.
49 csrrd t0, LOONGARCH_CSR_BADV
50 blt t0, $r0, vmalloc_load
51 csrrd t1, LOONGARCH_CSR_PGDL
54 /* Get PGD offset in bytes */
55 srli.d t0, t0, PGDIR_SHIFT
56 andi t0, t0, (PTRS_PER_PGD - 1)
59 #if CONFIG_PGTABLE_LEVELS > 3
60 csrrd t0, LOONGARCH_CSR_BADV
62 srli.d t0, t0, PUD_SHIFT
63 andi t0, t0, (PTRS_PER_PUD - 1)
67 #if CONFIG_PGTABLE_LEVELS > 2
68 csrrd t0, LOONGARCH_CSR_BADV
70 srli.d t0, t0, PMD_SHIFT
71 andi t0, t0, (PTRS_PER_PMD - 1)
78 * For huge tlb entries, pmde doesn't contain an address but
79 * instead contains the tlb pte. Check the PAGE_HUGE bit and
80 * see if we need to jump to huge tlb processing.
82 andi t0, ra, _PAGE_HUGE
83 bne t0, $r0, tlb_huge_update_load
85 csrrd t0, LOONGARCH_CSR_BADV
86 srli.d t0, t0, (PAGE_SHIFT + PTE_ORDER)
87 andi t0, t0, (PTRS_PER_PTE - 1)
88 slli.d t0, t0, _PTE_T_LOG2
92 smp_pgtable_change_load:
101 srli.d ra, t0, _PAGE_PRESENT_SHIFT
103 beq ra, $r0, nopage_tlb_load
105 ori t0, t0, _PAGE_VALID
108 beq t0, $r0, smp_pgtable_change_load
116 csrwr t0, LOONGARCH_CSR_TLBELO0
117 csrwr t1, LOONGARCH_CSR_TLBELO1
120 csrrd t0, EXCEPTION_KS0
121 csrrd t1, EXCEPTION_KS1
122 csrrd ra, EXCEPTION_KS2
126 la.abs t1, swapper_pg_dir
131 * This is the entry point when build_tlbchange_handler_head
134 tlb_huge_update_load:
140 srli.d ra, t0, _PAGE_PRESENT_SHIFT
142 beq ra, $r0, nopage_tlb_load
145 ori t0, t0, _PAGE_VALID
148 beq t0, $r0, tlb_huge_update_load
153 addu16i.d t1, $r0, -(CSR_TLBIDX_EHINV >> 16)
155 csrxchg ra, t1, LOONGARCH_CSR_TLBIDX
158 csrxchg $r0, t1, LOONGARCH_CSR_TLBIDX
161 * A huge PTE describes an area the size of the
162 * configured huge page size. This is twice the
163 * of the large TLB entry size we intend to use.
164 * A TLB entry half the size of the configured
165 * huge page size is configured into entrylo0
166 * and entrylo1 to cover the contiguous huge PTE
169 /* Huge page: Move Global bit */
170 xori t0, t0, _PAGE_HUGE
171 lu12i.w t1, _PAGE_HGLOBAL >> 12
173 srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
177 csrwr t0, LOONGARCH_CSR_TLBELO0
180 /* Convert to entrylo1 */
182 slli.d t1, t1, (HPAGE_SHIFT - 1)
184 csrwr t0, LOONGARCH_CSR_TLBELO1
186 /* Set huge page tlb entry size */
187 addu16i.d t0, $r0, (CSR_TLBIDX_PS >> 16)
188 addu16i.d t1, $r0, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
189 csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
193 addu16i.d t0, $r0, (CSR_TLBIDX_PS >> 16)
194 addu16i.d t1, $r0, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
195 csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
199 csrrd ra, EXCEPTION_KS2
200 la.abs t0, tlb_do_page_fault_0
202 SYM_FUNC_END(handle_tlb_load)
204 SYM_FUNC_START(handle_tlb_store)
205 csrwr t0, EXCEPTION_KS0
206 csrwr t1, EXCEPTION_KS1
207 csrwr ra, EXCEPTION_KS2
210 * The vmalloc handling is not in the hotpath.
212 csrrd t0, LOONGARCH_CSR_BADV
213 blt t0, $r0, vmalloc_store
214 csrrd t1, LOONGARCH_CSR_PGDL
217 /* Get PGD offset in bytes */
218 srli.d t0, t0, PGDIR_SHIFT
219 andi t0, t0, (PTRS_PER_PGD - 1)
223 #if CONFIG_PGTABLE_LEVELS > 3
224 csrrd t0, LOONGARCH_CSR_BADV
226 srli.d t0, t0, PUD_SHIFT
227 andi t0, t0, (PTRS_PER_PUD - 1)
231 #if CONFIG_PGTABLE_LEVELS > 2
232 csrrd t0, LOONGARCH_CSR_BADV
234 srli.d t0, t0, PMD_SHIFT
235 andi t0, t0, (PTRS_PER_PMD - 1)
242 * For huge tlb entries, pmde doesn't contain an address but
243 * instead contains the tlb pte. Check the PAGE_HUGE bit and
244 * see if we need to jump to huge tlb processing.
246 andi t0, ra, _PAGE_HUGE
247 bne t0, $r0, tlb_huge_update_store
249 csrrd t0, LOONGARCH_CSR_BADV
250 srli.d t0, t0, (PAGE_SHIFT + PTE_ORDER)
251 andi t0, t0, (PTRS_PER_PTE - 1)
252 slli.d t0, t0, _PTE_T_LOG2
256 smp_pgtable_change_store:
265 srli.d ra, t0, _PAGE_PRESENT_SHIFT
266 andi ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT)
267 xori ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT)
268 bne ra, $r0, nopage_tlb_store
270 ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
273 beq t0, $r0, smp_pgtable_change_store
282 csrwr t0, LOONGARCH_CSR_TLBELO0
283 csrwr t1, LOONGARCH_CSR_TLBELO1
286 csrrd t0, EXCEPTION_KS0
287 csrrd t1, EXCEPTION_KS1
288 csrrd ra, EXCEPTION_KS2
292 la.abs t1, swapper_pg_dir
297 * This is the entry point when build_tlbchange_handler_head
300 tlb_huge_update_store:
306 srli.d ra, t0, _PAGE_PRESENT_SHIFT
307 andi ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT)
308 xori ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT)
309 bne ra, $r0, nopage_tlb_store
312 ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
316 beq t0, $r0, tlb_huge_update_store
321 addu16i.d t1, $r0, -(CSR_TLBIDX_EHINV >> 16)
323 csrxchg ra, t1, LOONGARCH_CSR_TLBIDX
326 csrxchg $r0, t1, LOONGARCH_CSR_TLBIDX
328 * A huge PTE describes an area the size of the
329 * configured huge page size. This is twice the
330 * of the large TLB entry size we intend to use.
331 * A TLB entry half the size of the configured
332 * huge page size is configured into entrylo0
333 * and entrylo1 to cover the contiguous huge PTE
336 /* Huge page: Move Global bit */
337 xori t0, t0, _PAGE_HUGE
338 lu12i.w t1, _PAGE_HGLOBAL >> 12
340 srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
344 csrwr t0, LOONGARCH_CSR_TLBELO0
347 /* Convert to entrylo1 */
349 slli.d t1, t1, (HPAGE_SHIFT - 1)
351 csrwr t0, LOONGARCH_CSR_TLBELO1
353 /* Set huge page tlb entry size */
354 addu16i.d t0, $r0, (CSR_TLBIDX_PS >> 16)
355 addu16i.d t1, $r0, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
356 csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
360 /* Reset default page size */
361 addu16i.d t0, $r0, (CSR_TLBIDX_PS >> 16)
362 addu16i.d t1, $r0, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
363 csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
367 csrrd ra, EXCEPTION_KS2
368 la.abs t0, tlb_do_page_fault_1
370 SYM_FUNC_END(handle_tlb_store)
372 SYM_FUNC_START(handle_tlb_modify)
373 csrwr t0, EXCEPTION_KS0
374 csrwr t1, EXCEPTION_KS1
375 csrwr ra, EXCEPTION_KS2
378 * The vmalloc handling is not in the hotpath.
380 csrrd t0, LOONGARCH_CSR_BADV
381 blt t0, $r0, vmalloc_modify
382 csrrd t1, LOONGARCH_CSR_PGDL
385 /* Get PGD offset in bytes */
386 srli.d t0, t0, PGDIR_SHIFT
387 andi t0, t0, (PTRS_PER_PGD - 1)
390 #if CONFIG_PGTABLE_LEVELS > 3
391 csrrd t0, LOONGARCH_CSR_BADV
393 srli.d t0, t0, PUD_SHIFT
394 andi t0, t0, (PTRS_PER_PUD - 1)
398 #if CONFIG_PGTABLE_LEVELS > 2
399 csrrd t0, LOONGARCH_CSR_BADV
401 srli.d t0, t0, PMD_SHIFT
402 andi t0, t0, (PTRS_PER_PMD - 1)
409 * For huge tlb entries, pmde doesn't contain an address but
410 * instead contains the tlb pte. Check the PAGE_HUGE bit and
411 * see if we need to jump to huge tlb processing.
413 andi t0, ra, _PAGE_HUGE
414 bne t0, $r0, tlb_huge_update_modify
416 csrrd t0, LOONGARCH_CSR_BADV
417 srli.d t0, t0, (PAGE_SHIFT + PTE_ORDER)
418 andi t0, t0, (PTRS_PER_PTE - 1)
419 slli.d t0, t0, _PTE_T_LOG2
423 smp_pgtable_change_modify:
432 srli.d ra, t0, _PAGE_WRITE_SHIFT
434 beq ra, $r0, nopage_tlb_modify
436 ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
439 beq t0, $r0, smp_pgtable_change_modify
447 csrwr t0, LOONGARCH_CSR_TLBELO0
448 csrwr t1, LOONGARCH_CSR_TLBELO1
451 csrrd t0, EXCEPTION_KS0
452 csrrd t1, EXCEPTION_KS1
453 csrrd ra, EXCEPTION_KS2
457 la.abs t1, swapper_pg_dir
458 b vmalloc_done_modify
462 * This is the entry point when
463 * build_tlbchange_handler_head spots a huge page.
465 tlb_huge_update_modify:
472 srli.d ra, t0, _PAGE_WRITE_SHIFT
474 beq ra, $r0, nopage_tlb_modify
477 ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
481 beq t0, $r0, tlb_huge_update_modify
487 * A huge PTE describes an area the size of the
488 * configured huge page size. This is twice the
489 * of the large TLB entry size we intend to use.
490 * A TLB entry half the size of the configured
491 * huge page size is configured into entrylo0
492 * and entrylo1 to cover the contiguous huge PTE
495 /* Huge page: Move Global bit */
496 xori t0, t0, _PAGE_HUGE
497 lu12i.w t1, _PAGE_HGLOBAL >> 12
499 srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
503 csrwr t0, LOONGARCH_CSR_TLBELO0
506 /* Convert to entrylo1 */
508 slli.d t1, t1, (HPAGE_SHIFT - 1)
510 csrwr t0, LOONGARCH_CSR_TLBELO1
512 /* Set huge page tlb entry size */
513 addu16i.d t0, $r0, (CSR_TLBIDX_PS >> 16)
514 addu16i.d t1, $r0, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
515 csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
519 /* Reset default page size */
520 addu16i.d t0, $r0, (CSR_TLBIDX_PS >> 16)
521 addu16i.d t1, $r0, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
522 csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
526 csrrd ra, EXCEPTION_KS2
527 la.abs t0, tlb_do_page_fault_1
529 SYM_FUNC_END(handle_tlb_modify)
531 SYM_FUNC_START(handle_tlb_refill)
532 csrwr t0, LOONGARCH_CSR_TLBRSAVE
533 csrrd t0, LOONGARCH_CSR_PGD
535 #if CONFIG_PGTABLE_LEVELS > 3
538 #if CONFIG_PGTABLE_LEVELS > 2
544 csrrd t0, LOONGARCH_CSR_TLBRSAVE
546 SYM_FUNC_END(handle_tlb_refill)