1 /* include/asm-generic/tlb.h
3 * Generic TLB shootdown code
5 * Copyright 2001 Red Hat, Inc.
6 * Based on code from mm/memory.c Copyright Linus Torvalds and others.
8 * Copyright 2011 Red Hat, Inc., Peter Zijlstra
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
15 #ifndef _ASM_GENERIC__TLB_H
16 #define _ASM_GENERIC__TLB_H
18 #include <linux/mmu_notifier.h>
19 #include <linux/swap.h>
20 #include <asm/pgalloc.h>
21 #include <asm/tlbflush.h>
24 * Blindly accessing user memory from NMI context can be dangerous
25 * if we're in the middle of switching the current user task or switching
28 #ifndef nmi_uaccess_okay
29 # define nmi_uaccess_okay() true
34 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
36 * Semi RCU freeing of the page directories.
38 * This is needed by some architectures to implement software pagetable walkers.
40 * gup_fast() and other software pagetable walkers do a lockless page-table
41 * walk and therefore needs some synchronization with the freeing of the page
42 * directories. The chosen means to accomplish that is by disabling IRQs over
45 * Architectures that use IPIs to flush TLBs will then automagically DTRT,
46 * since we unlink the page, flush TLBs, free the page. Since the disabling of
47 * IRQs delays the completion of the TLB flush we can never observe an already
50 * Architectures that do not have this (PPC) need to delay the freeing by some
51 * other means, this is that means.
53 * What we do is batch the freed directory pages (tables) and RCU free them.
54 * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling
55 * holds off grace periods.
57 * However, in order to batch these pages we need to allocate storage, this
58 * allocation is deep inside the MM code and can thus easily fail on memory
59 * pressure. To guarantee progress we fall back to single table freeing, see
60 * the implementation of tlb_remove_table_one().
63 struct mmu_table_batch {
69 #define MAX_TABLE_BATCH \
70 ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
72 extern void tlb_table_flush(struct mmu_gather *tlb);
73 extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
78 * If we can't allocate a page to make a big batch of page pointers
79 * to work on, then just handle a few from the on-stack structure.
81 #define MMU_GATHER_BUNDLE 8
83 struct mmu_gather_batch {
84 struct mmu_gather_batch *next;
87 struct page *pages[0];
90 #define MAX_GATHER_BATCH \
91 ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
94 * Limit the maximum number of mmu_gather batches to reduce a risk of soft
95 * lockups for non-preemptible kernels on huge machines when a lot of memory
96 * is zapped during unmapping.
97 * 10K pages freed at once should be safe even without a preemption point.
99 #define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH)
101 /* struct mmu_gather is an opaque type used by the mm code for passing around
102 * any data needed by arch specific code for tlb_remove_page.
105 struct mm_struct *mm;
106 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
107 struct mmu_table_batch *batch;
112 * we are in the middle of an operation to clear
113 * a full mm and can make some optimizations
115 unsigned int fullmm : 1;
118 * we have performed an operation which
119 * requires a complete flush of the tlb
121 unsigned int need_flush_all : 1;
124 * we have removed page directories
126 unsigned int freed_tables : 1;
129 * at which levels have we cleared entries?
131 unsigned int cleared_ptes : 1;
132 unsigned int cleared_pmds : 1;
133 unsigned int cleared_puds : 1;
134 unsigned int cleared_p4ds : 1;
136 struct mmu_gather_batch *active;
137 struct mmu_gather_batch local;
138 struct page *__pages[MMU_GATHER_BUNDLE];
139 unsigned int batch_count;
143 #define HAVE_GENERIC_MMU_GATHER
145 void arch_tlb_gather_mmu(struct mmu_gather *tlb,
146 struct mm_struct *mm, unsigned long start, unsigned long end);
147 void tlb_flush_mmu(struct mmu_gather *tlb);
148 void arch_tlb_finish_mmu(struct mmu_gather *tlb,
149 unsigned long start, unsigned long end, bool force);
150 void tlb_flush_mmu_free(struct mmu_gather *tlb);
151 extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
154 static inline void __tlb_adjust_range(struct mmu_gather *tlb,
155 unsigned long address,
156 unsigned int range_size)
158 tlb->start = min(tlb->start, address);
159 tlb->end = max(tlb->end, address + range_size);
162 static inline void __tlb_reset_range(struct mmu_gather *tlb)
165 tlb->start = tlb->end = ~0;
167 tlb->start = TASK_SIZE;
170 tlb->freed_tables = 0;
171 tlb->cleared_ptes = 0;
172 tlb->cleared_pmds = 0;
173 tlb->cleared_puds = 0;
174 tlb->cleared_p4ds = 0;
177 static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
183 mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
184 __tlb_reset_range(tlb);
187 static inline void tlb_remove_page_size(struct mmu_gather *tlb,
188 struct page *page, int page_size)
190 if (__tlb_remove_page_size(tlb, page, page_size))
194 static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
196 return __tlb_remove_page_size(tlb, page, PAGE_SIZE);
200 * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
203 static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
205 return tlb_remove_page_size(tlb, page, PAGE_SIZE);
208 #ifndef tlb_remove_check_page_size_change
209 #define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
210 static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
211 unsigned int page_size)
214 * We don't care about page size change, just update
215 * mmu_gather page size here so that debug checks
216 * doesn't throw false warning.
218 #ifdef CONFIG_DEBUG_VM
219 tlb->page_size = page_size;
224 static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
226 if (tlb->cleared_ptes)
228 if (tlb->cleared_pmds)
230 if (tlb->cleared_puds)
232 if (tlb->cleared_p4ds)
238 static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
240 return 1UL << tlb_get_unmap_shift(tlb);
244 * In the case of tlb vma handling, we can optimise these away in the
245 * case where we're doing a full MM flush. When we're doing a munmap,
246 * the vmas are adjusted to only cover the region to be torn down.
248 #ifndef tlb_start_vma
249 #define tlb_start_vma(tlb, vma) do { } while (0)
252 #define __tlb_end_vma(tlb, vma) \
255 tlb_flush_mmu_tlbonly(tlb); \
259 #define tlb_end_vma __tlb_end_vma
262 #ifndef __tlb_remove_tlb_entry
263 #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
267 * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
269 * Record the fact that pte's were really unmapped by updating the range,
270 * so we can later optimise away the tlb invalidate. This helps when
271 * userspace is unmapping already-unmapped pages, which happens quite a lot.
273 #define tlb_remove_tlb_entry(tlb, ptep, address) \
275 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
276 tlb->cleared_ptes = 1; \
277 __tlb_remove_tlb_entry(tlb, ptep, address); \
280 #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
282 unsigned long _sz = huge_page_size(h); \
283 __tlb_adjust_range(tlb, address, _sz); \
284 if (_sz == PMD_SIZE) \
285 tlb->cleared_pmds = 1; \
286 else if (_sz == PUD_SIZE) \
287 tlb->cleared_puds = 1; \
288 __tlb_remove_tlb_entry(tlb, ptep, address); \
292 * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation
293 * This is a nop so far, because only x86 needs it.
295 #ifndef __tlb_remove_pmd_tlb_entry
296 #define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
299 #define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
301 __tlb_adjust_range(tlb, address, HPAGE_PMD_SIZE); \
302 tlb->cleared_pmds = 1; \
303 __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
307 * tlb_remove_pud_tlb_entry - remember a pud mapping for later tlb
308 * invalidation. This is a nop so far, because only x86 needs it.
310 #ifndef __tlb_remove_pud_tlb_entry
311 #define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0)
314 #define tlb_remove_pud_tlb_entry(tlb, pudp, address) \
316 __tlb_adjust_range(tlb, address, HPAGE_PUD_SIZE); \
317 tlb->cleared_puds = 1; \
318 __tlb_remove_pud_tlb_entry(tlb, pudp, address); \
322 * For things like page tables caches (ie caching addresses "inside" the
323 * page tables, like x86 does), for legacy reasons, flushing an
324 * individual page had better flush the page table caches behind it. This
325 * is definitely how x86 works, for example. And if you have an
326 * architected non-legacy page table cache (which I'm not aware of
327 * anybody actually doing), you're going to have some architecturally
328 * explicit flushing for that, likely *separate* from a regular TLB entry
329 * flush, and thus you'd need more than just some range expansion..
331 * So if we ever find an architecture
332 * that would want something that odd, I think it is up to that
333 * architecture to do its own odd thing, not cause pain for others
334 * http://lkml.kernel.org/r/CA+55aFzBggoXtNXQeng5d_mRoDnaMBE5Y+URs+PHR67nUpMtaw@mail.gmail.com
336 * For now w.r.t page table cache, mark the range_size as PAGE_SIZE
340 #define pte_free_tlb(tlb, ptep, address) \
342 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
343 tlb->freed_tables = 1; \
344 tlb->cleared_pmds = 1; \
345 __pte_free_tlb(tlb, ptep, address); \
350 #define pmd_free_tlb(tlb, pmdp, address) \
352 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
353 tlb->freed_tables = 1; \
354 tlb->cleared_puds = 1; \
355 __pmd_free_tlb(tlb, pmdp, address); \
359 #ifndef __ARCH_HAS_4LEVEL_HACK
361 #define pud_free_tlb(tlb, pudp, address) \
363 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
364 tlb->freed_tables = 1; \
365 tlb->cleared_p4ds = 1; \
366 __pud_free_tlb(tlb, pudp, address); \
371 #ifndef __ARCH_HAS_5LEVEL_HACK
373 #define p4d_free_tlb(tlb, pudp, address) \
375 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
376 tlb->freed_tables = 1; \
377 __p4d_free_tlb(tlb, pudp, address); \
382 #endif /* CONFIG_MMU */
384 #define tlb_migrate_finish(mm) do {} while (0)
386 #endif /* _ASM_GENERIC__TLB_H */