riscv: mm: notify remote harts about mmu cache updates
[platform/kernel/linux-rpi.git] / arch / riscv / mm / tlbflush.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/mm.h>
4 #include <linux/smp.h>
5 #include <linux/sched.h>
6 #include <asm/sbi.h>
7 #include <asm/mmu_context.h>
8 #include <asm/tlbflush.h>
9
10 void flush_tlb_all(void)
11 {
12         sbi_remote_sfence_vma(NULL, 0, -1);
13 }
14
15 static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
16                                   unsigned long size, unsigned long stride)
17 {
18         struct cpumask *pmask = &mm->context.tlb_stale_mask;
19         struct cpumask *cmask = mm_cpumask(mm);
20         struct cpumask hmask;
21         unsigned int cpuid;
22         bool broadcast;
23
24         if (cpumask_empty(cmask))
25                 return;
26
27         cpuid = get_cpu();
28         /* check if the tlbflush needs to be sent to other CPUs */
29         broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
30         if (static_branch_unlikely(&use_asid_allocator)) {
31                 unsigned long asid = atomic_long_read(&mm->context.id);
32
33                 /*
34                  * TLB will be immediately flushed on harts concurrently
35                  * executing this MM context. TLB flush on other harts
36                  * is deferred until this MM context migrates there.
37                  */
38                 cpumask_setall(pmask);
39                 cpumask_clear_cpu(cpuid, pmask);
40                 cpumask_andnot(pmask, pmask, cmask);
41
42                 if (broadcast) {
43                         riscv_cpuid_to_hartid_mask(cmask, &hmask);
44                         sbi_remote_sfence_vma_asid(cpumask_bits(&hmask),
45                                                    start, size, asid);
46                 } else if (size <= stride) {
47                         local_flush_tlb_page_asid(start, asid);
48                 } else {
49                         local_flush_tlb_all_asid(asid);
50                 }
51         } else {
52                 if (broadcast) {
53                         riscv_cpuid_to_hartid_mask(cmask, &hmask);
54                         sbi_remote_sfence_vma(cpumask_bits(&hmask),
55                                               start, size);
56                 } else if (size <= stride) {
57                         local_flush_tlb_page(start);
58                 } else {
59                         local_flush_tlb_all();
60                 }
61         }
62
63         put_cpu();
64 }
65
66 void flush_tlb_mm(struct mm_struct *mm)
67 {
68         __sbi_tlb_flush_range(mm, 0, -1, PAGE_SIZE);
69 }
70
71 void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
72 {
73         __sbi_tlb_flush_range(vma->vm_mm, addr, PAGE_SIZE, PAGE_SIZE);
74 }
75
76 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
77                      unsigned long end)
78 {
79         __sbi_tlb_flush_range(vma->vm_mm, start, end - start, PAGE_SIZE);
80 }
81 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
82 void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
83                         unsigned long end)
84 {
85         __sbi_tlb_flush_range(vma->vm_mm, start, end - start, PMD_SIZE);
86 }
87 #endif