asm-generic/tlb: Track freeing of page-table directories in struct mmu_gather
authorPeter Zijlstra <peterz@infradead.org>
Thu, 23 Aug 2018 19:27:25 +0000 (20:27 +0100)
committerWill Deacon <will.deacon@arm.com>
Tue, 4 Sep 2018 10:08:26 +0000 (11:08 +0100)
Some architectures require different TLB invalidation instructions
depending on whether it is only the last-level of page table being
changed, or whether there are also changes to the intermediate
(directory) entries higher up the tree.

Add a new bit to the flags bitfield in struct mmu_gather so that the
architecture code can operate accordingly if it's the intermediate
levels being invalidated.

Acked-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
include/asm-generic/tlb.h

index a25e236..2b444ad 100644 (file)
@@ -99,12 +99,22 @@ struct mmu_gather {
 #endif
        unsigned long           start;
        unsigned long           end;
-       /* we are in the middle of an operation to clear
-        * a full mm and can make some optimizations */
-       unsigned int            fullmm : 1,
-       /* we have performed an operation which
-        * requires a complete flush of the tlb */
-                               need_flush_all : 1;
+       /*
+        * we are in the middle of an operation to clear
+        * a full mm and can make some optimizations
+        */
+       unsigned int            fullmm : 1;
+
+       /*
+        * we have performed an operation which
+        * requires a complete flush of the tlb
+        */
+       unsigned int            need_flush_all : 1;
+
+       /*
+        * we have removed page directories
+        */
+       unsigned int            freed_tables : 1;
 
        struct mmu_gather_batch *active;
        struct mmu_gather_batch local;
@@ -139,6 +149,7 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
                tlb->start = TASK_SIZE;
                tlb->end = 0;
        }
+       tlb->freed_tables = 0;
 }
 
 static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
@@ -280,6 +291,7 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
 #define pte_free_tlb(tlb, ptep, address)                       \
        do {                                                    \
                __tlb_adjust_range(tlb, address, PAGE_SIZE);    \
+               tlb->freed_tables = 1;                  \
                __pte_free_tlb(tlb, ptep, address);             \
        } while (0)
 #endif
@@ -287,7 +299,8 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
 #ifndef pmd_free_tlb
 #define pmd_free_tlb(tlb, pmdp, address)                       \
        do {                                                    \
-               __tlb_adjust_range(tlb, address, PAGE_SIZE);            \
+               __tlb_adjust_range(tlb, address, PAGE_SIZE);    \
+               tlb->freed_tables = 1;                  \
                __pmd_free_tlb(tlb, pmdp, address);             \
        } while (0)
 #endif
@@ -297,6 +310,7 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
 #define pud_free_tlb(tlb, pudp, address)                       \
        do {                                                    \
                __tlb_adjust_range(tlb, address, PAGE_SIZE);    \
+               tlb->freed_tables = 1;                  \
                __pud_free_tlb(tlb, pudp, address);             \
        } while (0)
 #endif
@@ -306,7 +320,8 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
 #ifndef p4d_free_tlb
 #define p4d_free_tlb(tlb, pudp, address)                       \
        do {                                                    \
-               __tlb_adjust_range(tlb, address, PAGE_SIZE);            \
+               __tlb_adjust_range(tlb, address, PAGE_SIZE);    \
+               tlb->freed_tables = 1;                  \
                __p4d_free_tlb(tlb, pudp, address);             \
        } while (0)
 #endif