2 * IA-32 Huge TLB Page Support for Kernel.
4 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
7 #include <linux/init.h>
10 #include <linux/hugetlb.h>
11 #include <linux/pagemap.h>
12 #include <linux/err.h>
13 #include <linux/sysctl.h>
16 #include <asm/tlbflush.h>
17 #include <asm/pgalloc.h>
19 #if 0 /* This is just for testing */
21 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
23 unsigned long start = address;
27 struct vm_area_struct *vma;
29 vma = find_vma(mm, addr);
30 if (!vma || !is_vm_hugetlb_page(vma))
31 return ERR_PTR(-EINVAL);
33 pte = huge_pte_offset(mm, address);
35 /* hugetlb should be locked, and hence, prefaulted */
36 WARN_ON(!pte || pte_none(*pte));
38 page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
40 WARN_ON(!PageHead(page));
45 int pmd_huge(pmd_t pmd)
50 int pud_huge(pud_t pud)
56 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
57 pmd_t *pmd, int write)
62 int pmd_huge_support(void)
69 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
71 return ERR_PTR(-EINVAL);
74 int pmd_huge(pmd_t pmd)
76 return !!(pmd_val(pmd) & _PAGE_PSE);
79 int pud_huge(pud_t pud)
81 return !!(pud_val(pud) & _PAGE_PSE);
84 int pmd_huge_support(void)
90 #ifdef CONFIG_HUGETLB_PAGE
91 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
92 unsigned long addr, unsigned long len,
93 unsigned long pgoff, unsigned long flags)
95 struct hstate *h = hstate_file(file);
96 struct vm_unmapped_area_info info;
100 info.low_limit = current->mm->mmap_legacy_base;
101 info.high_limit = TASK_SIZE;
102 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
103 info.align_offset = 0;
104 return vm_unmapped_area(&info);
107 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
108 unsigned long addr0, unsigned long len,
109 unsigned long pgoff, unsigned long flags)
111 struct hstate *h = hstate_file(file);
112 struct vm_unmapped_area_info info;
115 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
117 info.low_limit = PAGE_SIZE;
118 info.high_limit = current->mm->mmap_base;
119 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
120 info.align_offset = 0;
121 addr = vm_unmapped_area(&info);
124 * A failed mmap() very likely causes application failure,
125 * so fall back to the bottom-up function here. This scenario
126 * can happen with large stack limits and large mmap()
129 if (addr & ~PAGE_MASK) {
130 VM_BUG_ON(addr != -ENOMEM);
132 info.low_limit = TASK_UNMAPPED_BASE;
133 info.high_limit = TASK_SIZE;
134 addr = vm_unmapped_area(&info);
141 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
142 unsigned long len, unsigned long pgoff, unsigned long flags)
144 struct hstate *h = hstate_file(file);
145 struct mm_struct *mm = current->mm;
146 struct vm_area_struct *vma;
148 if (len & ~huge_page_mask(h))
153 if (flags & MAP_FIXED) {
154 if (prepare_hugepage_range(file, addr, len))
160 addr = ALIGN(addr, huge_page_size(h));
161 vma = find_vma(mm, addr);
162 if (TASK_SIZE - len >= addr &&
163 (!vma || addr + len <= vma->vm_start))
166 if (mm->get_unmapped_area == arch_get_unmapped_area)
167 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
170 return hugetlb_get_unmapped_area_topdown(file, addr, len,
173 #endif /* CONFIG_HUGETLB_PAGE */
176 static __init int setup_hugepagesz(char *opt)
178 unsigned long ps = memparse(opt, &opt);
179 if (ps == PMD_SIZE) {
180 hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
181 } else if (ps == PUD_SIZE && cpu_has_gbpages) {
182 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
184 printk(KERN_ERR "hugepagesz: Unsupported page size %lu M\n",
190 __setup("hugepagesz=", setup_hugepagesz);