1 #ifndef _LINUX_HUGE_MM_H
2 #define _LINUX_HUGE_MM_H
4 extern int do_huge_pmd_anonymous_page(struct mm_struct *mm,
5 struct vm_area_struct *vma,
6 unsigned long address, pmd_t *pmd,
8 extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
9 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
10 struct vm_area_struct *vma);
11 extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
12 unsigned long address, pmd_t *pmd,
14 extern pgtable_t get_pmd_huge_pte(struct mm_struct *mm);
15 extern struct page *follow_trans_huge_pmd(struct mm_struct *mm,
19 extern int zap_huge_pmd(struct mmu_gather *tlb,
20 struct vm_area_struct *vma,
21 pmd_t *pmd, unsigned long addr);
22 extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
23 unsigned long addr, unsigned long end,
25 extern int move_huge_pmd(struct vm_area_struct *vma,
26 struct vm_area_struct *new_vma,
27 unsigned long old_addr,
28 unsigned long new_addr, unsigned long old_end,
29 pmd_t *old_pmd, pmd_t *new_pmd);
30 extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
31 unsigned long addr, pgprot_t newprot);
33 enum transparent_hugepage_flag {
34 TRANSPARENT_HUGEPAGE_FLAG,
35 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
36 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
37 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
38 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
39 #ifdef CONFIG_DEBUG_VM
40 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG,
44 enum page_check_address_pmd_flag {
45 PAGE_CHECK_ADDRESS_PMD_FLAG,
46 PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG,
47 PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG,
49 extern pmd_t *page_check_address_pmd(struct page *page,
51 unsigned long address,
52 enum page_check_address_pmd_flag flag);
54 #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
55 #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
57 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
58 #define HPAGE_PMD_SHIFT HPAGE_SHIFT
59 #define HPAGE_PMD_MASK HPAGE_MASK
60 #define HPAGE_PMD_SIZE HPAGE_SIZE
62 extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
64 #define transparent_hugepage_enabled(__vma) \
65 ((transparent_hugepage_flags & \
66 (1<<TRANSPARENT_HUGEPAGE_FLAG) || \
67 (transparent_hugepage_flags & \
68 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) && \
69 ((__vma)->vm_flags & VM_HUGEPAGE))) && \
70 !((__vma)->vm_flags & VM_NOHUGEPAGE) && \
71 !is_vma_temporary_stack(__vma))
72 #define transparent_hugepage_defrag(__vma) \
73 ((transparent_hugepage_flags & \
74 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)) || \
75 (transparent_hugepage_flags & \
76 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG) && \
77 (__vma)->vm_flags & VM_HUGEPAGE))
78 #ifdef CONFIG_DEBUG_VM
79 #define transparent_hugepage_debug_cow() \
80 (transparent_hugepage_flags & \
81 (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG))
82 #else /* CONFIG_DEBUG_VM */
83 #define transparent_hugepage_debug_cow() 0
84 #endif /* CONFIG_DEBUG_VM */
86 extern unsigned long transparent_hugepage_flags;
87 extern int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
88 pmd_t *dst_pmd, pmd_t *src_pmd,
89 struct vm_area_struct *vma,
90 unsigned long addr, unsigned long end);
91 extern int handle_pte_fault(struct mm_struct *mm,
92 struct vm_area_struct *vma, unsigned long address,
93 pte_t *pte, pmd_t *pmd, unsigned int flags);
94 extern int split_huge_page(struct page *page);
95 extern void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd);
96 #define split_huge_page_pmd(__mm, __pmd) \
98 pmd_t *____pmd = (__pmd); \
99 if (unlikely(pmd_trans_huge(*____pmd))) \
100 __split_huge_page_pmd(__mm, ____pmd); \
102 #define wait_split_huge_page(__anon_vma, __pmd) \
104 pmd_t *____pmd = (__pmd); \
105 anon_vma_lock(__anon_vma); \
106 anon_vma_unlock(__anon_vma); \
107 BUG_ON(pmd_trans_splitting(*____pmd) || \
108 pmd_trans_huge(*____pmd)); \
110 #if HPAGE_PMD_ORDER > MAX_ORDER
111 #error "hugepages can't be allocated by the buddy allocator"
113 extern int hugepage_madvise(struct vm_area_struct *vma,
114 unsigned long *vm_flags, int advice);
115 extern void __vma_adjust_trans_huge(struct vm_area_struct *vma,
119 extern int __pmd_trans_huge_lock(pmd_t *pmd,
120 struct vm_area_struct *vma);
121 /* mmap_sem must be held on entry */
122 static inline int pmd_trans_huge_lock(pmd_t *pmd,
123 struct vm_area_struct *vma)
125 VM_BUG_ON(!rwsem_is_locked(&vma->vm_mm->mmap_sem));
126 if (pmd_trans_huge(*pmd))
127 return __pmd_trans_huge_lock(pmd, vma);
131 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
136 if (!vma->anon_vma || vma->vm_ops)
138 __vma_adjust_trans_huge(vma, start, end, adjust_next);
140 static inline int hpage_nr_pages(struct page *page)
142 if (unlikely(PageTransHuge(page)))
146 static inline struct page *compound_trans_head(struct page *page)
148 if (PageTail(page)) {
150 head = page->first_page;
153 * head may be a dangling pointer.
154 * __split_huge_page_refcount clears PageTail before
155 * overwriting first_page, so if PageTail is still
156 * there it means the head pointer isn't dangling.
163 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
164 #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
165 #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
166 #define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
168 #define hpage_nr_pages(x) 1
170 #define transparent_hugepage_enabled(__vma) 0
172 #define transparent_hugepage_flags 0UL
173 static inline int split_huge_page(struct page *page)
177 #define split_huge_page_pmd(__mm, __pmd) \
179 #define wait_split_huge_page(__anon_vma, __pmd) \
181 #define compound_trans_head(page) compound_head(page)
182 static inline int hugepage_madvise(struct vm_area_struct *vma,
183 unsigned long *vm_flags, int advice)
188 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
194 static inline int pmd_trans_huge_lock(pmd_t *pmd,
195 struct vm_area_struct *vma)
199 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
201 #endif /* _LINUX_HUGE_MM_H */