x86/mm: Clean up inconsistencies when flushing TLB ranges
[platform/adaptation/renesas_rcar/renesas_kernel.git] / arch / x86 / mm / tlb.c
index 05446c1..5176526 100644 (file)
@@ -189,6 +189,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
 {
        unsigned long addr;
        unsigned act_entries, tlb_entries = 0;
+       unsigned long nr_base_pages;
 
        preempt_disable();
        if (current->active_mm != mm)
@@ -210,18 +211,17 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
                tlb_entries = tlb_lli_4k[ENTRIES];
        else
                tlb_entries = tlb_lld_4k[ENTRIES];
+
        /* Assume all of TLB entries was occupied by this task */
-       act_entries = mm->total_vm > tlb_entries ? tlb_entries : mm->total_vm;
+       act_entries = tlb_entries >> tlb_flushall_shift;
+       act_entries = mm->total_vm > act_entries ? act_entries : mm->total_vm;
+       nr_base_pages = (end - start) >> PAGE_SHIFT;
 
        /* tlb_flushall_shift is on balance point, details in commit log */
-       if ((end - start) >> PAGE_SHIFT > act_entries >> tlb_flushall_shift) {
+       if (nr_base_pages > act_entries || has_large_page(mm, start, end)) {
                count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
                local_flush_tlb();
        } else {
-               if (has_large_page(mm, start, end)) {
-                       local_flush_tlb();
-                       goto flush_all;
-               }
                /* flush range by one by one 'invlpg' */
                for (addr = start; addr < end;  addr += PAGE_SIZE) {
                        count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);