riscv: Improve flush_tlb_kernel_range()
[platform/kernel/linux-starfive.git] / arch / riscv / mm / tlbflush.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/mm.h>
4 #include <linux/smp.h>
5 #include <linux/sched.h>
6 #include <asm/sbi.h>
7 #include <asm/mmu_context.h>
8
9 static inline void local_flush_tlb_all_asid(unsigned long asid)
10 {
11         if (asid != FLUSH_TLB_NO_ASID)
12                 __asm__ __volatile__ ("sfence.vma x0, %0"
13                                 :
14                                 : "r" (asid)
15                                 : "memory");
16         else
17                 local_flush_tlb_all();
18 }
19
20 static inline void local_flush_tlb_page_asid(unsigned long addr,
21                 unsigned long asid)
22 {
23         if (asid != FLUSH_TLB_NO_ASID)
24                 __asm__ __volatile__ ("sfence.vma %0, %1"
25                                 :
26                                 : "r" (addr), "r" (asid)
27                                 : "memory");
28         else
29                 local_flush_tlb_page(addr);
30 }
31
32 /*
33  * Flush entire TLB if number of entries to be flushed is greater
34  * than the threshold below.
35  */
36 static unsigned long tlb_flush_all_threshold __read_mostly = 64;
37
38 static void local_flush_tlb_range_threshold_asid(unsigned long start,
39                                                  unsigned long size,
40                                                  unsigned long stride,
41                                                  unsigned long asid)
42 {
43         unsigned long nr_ptes_in_range = DIV_ROUND_UP(size, stride);
44         int i;
45
46         if (nr_ptes_in_range > tlb_flush_all_threshold) {
47                 local_flush_tlb_all_asid(asid);
48                 return;
49         }
50
51         for (i = 0; i < nr_ptes_in_range; ++i) {
52                 local_flush_tlb_page_asid(start, asid);
53                 start += stride;
54         }
55 }
56
57 static inline void local_flush_tlb_range_asid(unsigned long start,
58                 unsigned long size, unsigned long stride, unsigned long asid)
59 {
60         if (size <= stride)
61                 local_flush_tlb_page_asid(start, asid);
62         else if (size == FLUSH_TLB_MAX_SIZE)
63                 local_flush_tlb_all_asid(asid);
64         else
65                 local_flush_tlb_range_threshold_asid(start, size, stride, asid);
66 }
67
68 static void __ipi_flush_tlb_all(void *info)
69 {
70         local_flush_tlb_all();
71 }
72
73 void flush_tlb_all(void)
74 {
75         if (riscv_use_ipi_for_rfence())
76                 on_each_cpu(__ipi_flush_tlb_all, NULL, 1);
77         else
78                 sbi_remote_sfence_vma_asid(NULL, 0, FLUSH_TLB_MAX_SIZE, FLUSH_TLB_NO_ASID);
79 }
80
81 struct flush_tlb_range_data {
82         unsigned long asid;
83         unsigned long start;
84         unsigned long size;
85         unsigned long stride;
86 };
87
88 static void __ipi_flush_tlb_range_asid(void *info)
89 {
90         struct flush_tlb_range_data *d = info;
91
92         local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid);
93 }
94
95 static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
96                               unsigned long size, unsigned long stride)
97 {
98         struct flush_tlb_range_data ftd;
99         const struct cpumask *cmask;
100         unsigned long asid = FLUSH_TLB_NO_ASID;
101         bool broadcast;
102
103         if (mm) {
104                 unsigned int cpuid;
105
106                 cmask = mm_cpumask(mm);
107                 if (cpumask_empty(cmask))
108                         return;
109
110                 cpuid = get_cpu();
111                 /* check if the tlbflush needs to be sent to other CPUs */
112                 broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
113
114                 if (static_branch_unlikely(&use_asid_allocator))
115                         asid = atomic_long_read(&mm->context.id) & asid_mask;
116         } else {
117                 cmask = cpu_online_mask;
118                 broadcast = true;
119         }
120
121         if (broadcast) {
122                 if (riscv_use_ipi_for_rfence()) {
123                         ftd.asid = asid;
124                         ftd.start = start;
125                         ftd.size = size;
126                         ftd.stride = stride;
127                         on_each_cpu_mask(cmask,
128                                          __ipi_flush_tlb_range_asid,
129                                          &ftd, 1);
130                 } else
131                         sbi_remote_sfence_vma_asid(cmask,
132                                                    start, size, asid);
133         } else {
134                 local_flush_tlb_range_asid(start, size, stride, asid);
135         }
136
137         if (mm)
138                 put_cpu();
139 }
140
141 void flush_tlb_mm(struct mm_struct *mm)
142 {
143         __flush_tlb_range(mm, 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
144 }
145
146 void flush_tlb_mm_range(struct mm_struct *mm,
147                         unsigned long start, unsigned long end,
148                         unsigned int page_size)
149 {
150         __flush_tlb_range(mm, start, end - start, page_size);
151 }
152
153 void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
154 {
155         __flush_tlb_range(vma->vm_mm, addr, PAGE_SIZE, PAGE_SIZE);
156 }
157
158 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
159                      unsigned long end)
160 {
161         __flush_tlb_range(vma->vm_mm, start, end - start, PAGE_SIZE);
162 }
163
164 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
165 {
166         __flush_tlb_range(NULL, start, end - start, PAGE_SIZE);
167 }
168
169 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
170 void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
171                         unsigned long end)
172 {
173         __flush_tlb_range(vma->vm_mm, start, end - start, PMD_SIZE);
174 }
175 #endif