x86/mm: Remove flush_tlb() and flush_tlb_current_task()
authorAndy Lutomirski <luto@kernel.org>
Sat, 22 Apr 2017 07:01:20 +0000 (00:01 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 2 Jan 2018 19:35:08 +0000 (20:35 +0100)
commit 29961b59a51f8c6838a26a45e871a7ed6771809b upstream.

I was trying to figure out what how flush_tlb_current_task() would
possibly work correctly if current->mm != current->active_mm, but I
realized I could spare myself the effort: it has no callers except
the unused flush_tlb() macro.

Signed-off-by: Andy Lutomirski <luto@kernel.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Nadav Amit <namit@vmware.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/e52d64c11690f85e9f1d69d7b48cc2269cd2e94b.1492844372.git.luto@kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/x86/include/asm/tlbflush.h
arch/x86/mm/tlb.c

index fc5abff..8e7ae9e 100644 (file)
@@ -205,7 +205,6 @@ static inline void __flush_tlb_one(unsigned long addr)
 /*
  * TLB flushing:
  *
- *  - flush_tlb() flushes the current mm struct TLBs
  *  - flush_tlb_all() flushes all processes TLBs
  *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
  *  - flush_tlb_page(vma, vmaddr) flushes one page
@@ -237,11 +236,6 @@ static inline void flush_tlb_all(void)
        __flush_tlb_all();
 }
 
-static inline void flush_tlb(void)
-{
-       __flush_tlb_up();
-}
-
 static inline void local_flush_tlb(void)
 {
        __flush_tlb_up();
@@ -303,14 +297,11 @@ static inline void flush_tlb_kernel_range(unsigned long start,
                flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)
 
 extern void flush_tlb_all(void);
-extern void flush_tlb_current_task(void);
 extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
 extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
                                unsigned long end, unsigned long vmflag);
 extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
 
-#define flush_tlb()    flush_tlb_current_task()
-
 void native_flush_tlb_others(const struct cpumask *cpumask,
                                struct mm_struct *mm,
                                unsigned long start, unsigned long end);
index 75fb011..30ed9a5 100644 (file)
@@ -287,23 +287,6 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
        smp_call_function_many(cpumask, flush_tlb_func, &info, 1);
 }
 
-void flush_tlb_current_task(void)
-{
-       struct mm_struct *mm = current->mm;
-
-       preempt_disable();
-
-       count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
-
-       /* This is an implicit full barrier that synchronizes with switch_mm. */
-       local_flush_tlb();
-
-       trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
-       if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
-               flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
-       preempt_enable();
-}
-
 /*
  * See Documentation/x86/tlb.txt for details.  We choose 33
  * because it is large enough to cover the vast majority (at