mm: Introduce flush_cache_vmap_early()
[platform/kernel/linux-rpi.git] / arch / riscv / mm / tlbflush.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/mm.h>
4 #include <linux/smp.h>
5 #include <linux/sched.h>
6 #include <asm/sbi.h>
7 #include <asm/mmu_context.h>
8
9 static inline void local_flush_tlb_all_asid(unsigned long asid)
10 {
11         if (asid != FLUSH_TLB_NO_ASID)
12                 __asm__ __volatile__ ("sfence.vma x0, %0"
13                                 :
14                                 : "r" (asid)
15                                 : "memory");
16         else
17                 local_flush_tlb_all();
18 }
19
20 static inline void local_flush_tlb_page_asid(unsigned long addr,
21                 unsigned long asid)
22 {
23         if (asid != FLUSH_TLB_NO_ASID)
24                 __asm__ __volatile__ ("sfence.vma %0, %1"
25                                 :
26                                 : "r" (addr), "r" (asid)
27                                 : "memory");
28         else
29                 local_flush_tlb_page(addr);
30 }
31
32 /*
33  * Flush entire TLB if number of entries to be flushed is greater
34  * than the threshold below.
35  */
36 static unsigned long tlb_flush_all_threshold __read_mostly = 64;
37
38 static void local_flush_tlb_range_threshold_asid(unsigned long start,
39                                                  unsigned long size,
40                                                  unsigned long stride,
41                                                  unsigned long asid)
42 {
43         unsigned long nr_ptes_in_range = DIV_ROUND_UP(size, stride);
44         int i;
45
46         if (nr_ptes_in_range > tlb_flush_all_threshold) {
47                 local_flush_tlb_all_asid(asid);
48                 return;
49         }
50
51         for (i = 0; i < nr_ptes_in_range; ++i) {
52                 local_flush_tlb_page_asid(start, asid);
53                 start += stride;
54         }
55 }
56
57 static inline void local_flush_tlb_range_asid(unsigned long start,
58                 unsigned long size, unsigned long stride, unsigned long asid)
59 {
60         if (size <= stride)
61                 local_flush_tlb_page_asid(start, asid);
62         else if (size == FLUSH_TLB_MAX_SIZE)
63                 local_flush_tlb_all_asid(asid);
64         else
65                 local_flush_tlb_range_threshold_asid(start, size, stride, asid);
66 }
67
68 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
69 {
70         local_flush_tlb_range_asid(start, end, PAGE_SIZE, FLUSH_TLB_NO_ASID);
71 }
72
73 static void __ipi_flush_tlb_all(void *info)
74 {
75         local_flush_tlb_all();
76 }
77
78 void flush_tlb_all(void)
79 {
80         if (riscv_use_ipi_for_rfence())
81                 on_each_cpu(__ipi_flush_tlb_all, NULL, 1);
82         else
83                 sbi_remote_sfence_vma_asid(NULL, 0, FLUSH_TLB_MAX_SIZE, FLUSH_TLB_NO_ASID);
84 }
85
86 struct flush_tlb_range_data {
87         unsigned long asid;
88         unsigned long start;
89         unsigned long size;
90         unsigned long stride;
91 };
92
93 static void __ipi_flush_tlb_range_asid(void *info)
94 {
95         struct flush_tlb_range_data *d = info;
96
97         local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid);
98 }
99
100 static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
101                               unsigned long size, unsigned long stride)
102 {
103         struct flush_tlb_range_data ftd;
104         const struct cpumask *cmask;
105         unsigned long asid = FLUSH_TLB_NO_ASID;
106         bool broadcast;
107
108         if (mm) {
109                 unsigned int cpuid;
110
111                 cmask = mm_cpumask(mm);
112                 if (cpumask_empty(cmask))
113                         return;
114
115                 cpuid = get_cpu();
116                 /* check if the tlbflush needs to be sent to other CPUs */
117                 broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
118
119                 if (static_branch_unlikely(&use_asid_allocator))
120                         asid = atomic_long_read(&mm->context.id) & asid_mask;
121         } else {
122                 cmask = cpu_online_mask;
123                 broadcast = true;
124         }
125
126         if (broadcast) {
127                 if (riscv_use_ipi_for_rfence()) {
128                         ftd.asid = asid;
129                         ftd.start = start;
130                         ftd.size = size;
131                         ftd.stride = stride;
132                         on_each_cpu_mask(cmask,
133                                          __ipi_flush_tlb_range_asid,
134                                          &ftd, 1);
135                 } else
136                         sbi_remote_sfence_vma_asid(cmask,
137                                                    start, size, asid);
138         } else {
139                 local_flush_tlb_range_asid(start, size, stride, asid);
140         }
141
142         if (mm)
143                 put_cpu();
144 }
145
146 void flush_tlb_mm(struct mm_struct *mm)
147 {
148         __flush_tlb_range(mm, 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
149 }
150
151 void flush_tlb_mm_range(struct mm_struct *mm,
152                         unsigned long start, unsigned long end,
153                         unsigned int page_size)
154 {
155         __flush_tlb_range(mm, start, end - start, page_size);
156 }
157
158 void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
159 {
160         __flush_tlb_range(vma->vm_mm, addr, PAGE_SIZE, PAGE_SIZE);
161 }
162
163 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
164                      unsigned long end)
165 {
166         __flush_tlb_range(vma->vm_mm, start, end - start, PAGE_SIZE);
167 }
168
169 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
170 {
171         __flush_tlb_range(NULL, start, end - start, PAGE_SIZE);
172 }
173
174 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
175 void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
176                         unsigned long end)
177 {
178         __flush_tlb_range(vma->vm_mm, start, end - start, PMD_SIZE);
179 }
180 #endif