1 #ifndef _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
2 #define _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
4 #define MMU_NO_CONTEXT ~0UL
7 #include <asm/book3s/64/tlbflush-hash.h>
8 #include <asm/book3s/64/tlbflush-radix.h>
10 static inline void flush_tlb_range(struct vm_area_struct *vma,
11 unsigned long start, unsigned long end)
14 return radix__flush_tlb_range(vma, start, end);
15 return hash__flush_tlb_range(vma, start, end);
18 static inline void flush_tlb_kernel_range(unsigned long start,
22 return radix__flush_tlb_kernel_range(start, end);
23 return hash__flush_tlb_kernel_range(start, end);
26 static inline void local_flush_tlb_mm(struct mm_struct *mm)
29 return radix__local_flush_tlb_mm(mm);
30 return hash__local_flush_tlb_mm(mm);
33 static inline void local_flush_tlb_page(struct vm_area_struct *vma,
37 return radix__local_flush_tlb_page(vma, vmaddr);
38 return hash__local_flush_tlb_page(vma, vmaddr);
41 static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
45 return radix__flush_tlb_page(vma, vmaddr);
46 return hash__flush_tlb_page_nohash(vma, vmaddr);
49 static inline void tlb_flush(struct mmu_gather *tlb)
52 return radix__tlb_flush(tlb);
53 return hash__tlb_flush(tlb);
57 static inline void flush_tlb_mm(struct mm_struct *mm)
60 return radix__flush_tlb_mm(mm);
61 return hash__flush_tlb_mm(mm);
64 static inline void flush_tlb_page(struct vm_area_struct *vma,
68 return radix__flush_tlb_page(vma, vmaddr);
69 return hash__flush_tlb_page(vma, vmaddr);
72 #define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
73 #define flush_tlb_page(vma, addr) local_flush_tlb_page(vma, addr)
74 #endif /* CONFIG_SMP */
76 * flush the page walk cache for the address
78 static inline void flush_tlb_pgtable(struct mmu_gather *tlb, unsigned long address)
81 * Flush the page table walk cache on freeing a page table. We already
82 * have marked the upper/higher level page table entry none by now.
83 * So it is safe to flush PWC here.
88 radix__flush_tlb_pwc(tlb, address);
90 #endif /* _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H */