1 // SPDX-License-Identifier: GPL-2.0
5 #include <linux/sched.h>
7 #include <asm/mmu_context.h>
8 #include <asm/tlbflush.h>
10 void flush_tlb_all(void)
12 sbi_remote_sfence_vma(NULL, 0, -1);
15 static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
16 unsigned long size, unsigned long stride)
18 struct cpumask *pmask = &mm->context.tlb_stale_mask;
19 struct cpumask *cmask = mm_cpumask(mm);
24 if (cpumask_empty(cmask))
28 /* check if the tlbflush needs to be sent to other CPUs */
29 broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
30 if (static_branch_unlikely(&use_asid_allocator)) {
31 unsigned long asid = atomic_long_read(&mm->context.id);
34 * TLB will be immediately flushed on harts concurrently
35 * executing this MM context. TLB flush on other harts
36 * is deferred until this MM context migrates there.
38 cpumask_setall(pmask);
39 cpumask_clear_cpu(cpuid, pmask);
40 cpumask_andnot(pmask, pmask, cmask);
43 riscv_cpuid_to_hartid_mask(cmask, &hmask);
44 sbi_remote_sfence_vma_asid(cpumask_bits(&hmask),
46 } else if (size <= stride) {
47 local_flush_tlb_page_asid(start, asid);
49 local_flush_tlb_all_asid(asid);
53 riscv_cpuid_to_hartid_mask(cmask, &hmask);
54 sbi_remote_sfence_vma(cpumask_bits(&hmask),
56 } else if (size <= stride) {
57 local_flush_tlb_page(start);
59 local_flush_tlb_all();
66 void flush_tlb_mm(struct mm_struct *mm)
68 __sbi_tlb_flush_range(mm, 0, -1, PAGE_SIZE);
71 void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
73 __sbi_tlb_flush_range(vma->vm_mm, addr, PAGE_SIZE, PAGE_SIZE);
76 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
79 __sbi_tlb_flush_range(vma->vm_mm, start, end - start, PAGE_SIZE);
81 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
82 void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
85 __sbi_tlb_flush_range(vma->vm_mm, start, end - start, PMD_SIZE);