1 // SPDX-License-Identifier: GPL-2.0
5 #include <linux/sched.h>
7 #include <asm/mmu_context.h>
9 static inline void local_flush_tlb_all_asid(unsigned long asid)
11 if (asid != FLUSH_TLB_NO_ASID)
12 __asm__ __volatile__ ("sfence.vma x0, %0"
17 local_flush_tlb_all();
20 static inline void local_flush_tlb_page_asid(unsigned long addr,
23 if (asid != FLUSH_TLB_NO_ASID)
24 __asm__ __volatile__ ("sfence.vma %0, %1"
26 : "r" (addr), "r" (asid)
29 local_flush_tlb_page(addr);
33 * Flush entire TLB if number of entries to be flushed is greater
34 * than the threshold below.
36 static unsigned long tlb_flush_all_threshold __read_mostly = 64;
38 static void local_flush_tlb_range_threshold_asid(unsigned long start,
43 unsigned long nr_ptes_in_range = DIV_ROUND_UP(size, stride);
46 if (nr_ptes_in_range > tlb_flush_all_threshold) {
47 local_flush_tlb_all_asid(asid);
51 for (i = 0; i < nr_ptes_in_range; ++i) {
52 local_flush_tlb_page_asid(start, asid);
57 static inline void local_flush_tlb_range_asid(unsigned long start,
58 unsigned long size, unsigned long stride, unsigned long asid)
61 local_flush_tlb_page_asid(start, asid);
62 else if (size == FLUSH_TLB_MAX_SIZE)
63 local_flush_tlb_all_asid(asid);
65 local_flush_tlb_range_threshold_asid(start, size, stride, asid);
68 static void __ipi_flush_tlb_all(void *info)
70 local_flush_tlb_all();
73 void flush_tlb_all(void)
75 if (riscv_use_ipi_for_rfence())
76 on_each_cpu(__ipi_flush_tlb_all, NULL, 1);
78 sbi_remote_sfence_vma_asid(NULL, 0, FLUSH_TLB_MAX_SIZE, FLUSH_TLB_NO_ASID);
81 struct flush_tlb_range_data {
88 static void __ipi_flush_tlb_range_asid(void *info)
90 struct flush_tlb_range_data *d = info;
92 local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid);
95 static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
96 unsigned long size, unsigned long stride)
98 struct flush_tlb_range_data ftd;
99 struct cpumask *cmask = mm_cpumask(mm);
100 unsigned long asid = FLUSH_TLB_NO_ASID;
104 if (cpumask_empty(cmask))
108 /* check if the tlbflush needs to be sent to other CPUs */
109 broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
111 if (static_branch_unlikely(&use_asid_allocator))
112 asid = atomic_long_read(&mm->context.id) & asid_mask;
115 if (riscv_use_ipi_for_rfence()) {
120 on_each_cpu_mask(cmask,
121 __ipi_flush_tlb_range_asid,
124 sbi_remote_sfence_vma_asid(cmask,
127 local_flush_tlb_range_asid(start, size, stride, asid);
133 void flush_tlb_mm(struct mm_struct *mm)
135 __flush_tlb_range(mm, 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
138 void flush_tlb_mm_range(struct mm_struct *mm,
139 unsigned long start, unsigned long end,
140 unsigned int page_size)
142 __flush_tlb_range(mm, start, end - start, page_size);
145 void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
147 __flush_tlb_range(vma->vm_mm, addr, PAGE_SIZE, PAGE_SIZE);
150 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
153 __flush_tlb_range(vma->vm_mm, start, end - start, PAGE_SIZE);
155 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
156 void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
159 __flush_tlb_range(vma->vm_mm, start, end - start, PMD_SIZE);