mm: migrate: make core migration code aware of hugepage
[platform/adaptation/renesas_rcar/renesas_kernel.git] / mm / hugetlb.c
1 /*
2  * Generic hugetlb support.
3  * (C) Nadia Yvette Chambers, April 2004
4  */
5 #include <linux/list.h>
6 #include <linux/init.h>
7 #include <linux/module.h>
8 #include <linux/mm.h>
9 #include <linux/seq_file.h>
10 #include <linux/sysctl.h>
11 #include <linux/highmem.h>
12 #include <linux/mmu_notifier.h>
13 #include <linux/nodemask.h>
14 #include <linux/pagemap.h>
15 #include <linux/mempolicy.h>
16 #include <linux/cpuset.h>
17 #include <linux/mutex.h>
18 #include <linux/bootmem.h>
19 #include <linux/sysfs.h>
20 #include <linux/slab.h>
21 #include <linux/rmap.h>
22 #include <linux/swap.h>
23 #include <linux/swapops.h>
24
25 #include <asm/page.h>
26 #include <asm/pgtable.h>
27 #include <asm/tlb.h>
28
29 #include <linux/io.h>
30 #include <linux/hugetlb.h>
31 #include <linux/hugetlb_cgroup.h>
32 #include <linux/node.h>
33 #include "internal.h"
34
35 const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
36 static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
37 unsigned long hugepages_treat_as_movable;
38
39 int hugetlb_max_hstate __read_mostly;
40 unsigned int default_hstate_idx;
41 struct hstate hstates[HUGE_MAX_HSTATE];
42
43 __initdata LIST_HEAD(huge_boot_pages);
44
45 /* for command line parsing */
46 static struct hstate * __initdata parsed_hstate;
47 static unsigned long __initdata default_hstate_max_huge_pages;
48 static unsigned long __initdata default_hstate_size;
49
50 /*
51  * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
52  * free_huge_pages, and surplus_huge_pages.
53  */
54 DEFINE_SPINLOCK(hugetlb_lock);
55
56 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
57 {
58         bool free = (spool->count == 0) && (spool->used_hpages == 0);
59
60         spin_unlock(&spool->lock);
61
62         /* If no pages are used, and no other handles to the subpool
63          * remain, free the subpool the subpool remain */
64         if (free)
65                 kfree(spool);
66 }
67
68 struct hugepage_subpool *hugepage_new_subpool(long nr_blocks)
69 {
70         struct hugepage_subpool *spool;
71
72         spool = kmalloc(sizeof(*spool), GFP_KERNEL);
73         if (!spool)
74                 return NULL;
75
76         spin_lock_init(&spool->lock);
77         spool->count = 1;
78         spool->max_hpages = nr_blocks;
79         spool->used_hpages = 0;
80
81         return spool;
82 }
83
84 void hugepage_put_subpool(struct hugepage_subpool *spool)
85 {
86         spin_lock(&spool->lock);
87         BUG_ON(!spool->count);
88         spool->count--;
89         unlock_or_release_subpool(spool);
90 }
91
92 static int hugepage_subpool_get_pages(struct hugepage_subpool *spool,
93                                       long delta)
94 {
95         int ret = 0;
96
97         if (!spool)
98                 return 0;
99
100         spin_lock(&spool->lock);
101         if ((spool->used_hpages + delta) <= spool->max_hpages) {
102                 spool->used_hpages += delta;
103         } else {
104                 ret = -ENOMEM;
105         }
106         spin_unlock(&spool->lock);
107
108         return ret;
109 }
110
111 static void hugepage_subpool_put_pages(struct hugepage_subpool *spool,
112                                        long delta)
113 {
114         if (!spool)
115                 return;
116
117         spin_lock(&spool->lock);
118         spool->used_hpages -= delta;
119         /* If hugetlbfs_put_super couldn't free spool due to
120         * an outstanding quota reference, free it now. */
121         unlock_or_release_subpool(spool);
122 }
123
124 static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
125 {
126         return HUGETLBFS_SB(inode->i_sb)->spool;
127 }
128
129 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
130 {
131         return subpool_inode(file_inode(vma->vm_file));
132 }
133
134 /*
135  * Region tracking -- allows tracking of reservations and instantiated pages
136  *                    across the pages in a mapping.
137  *
138  * The region data structures are protected by a combination of the mmap_sem
139  * and the hugetlb_instantiation_mutex.  To access or modify a region the caller
140  * must either hold the mmap_sem for write, or the mmap_sem for read and
141  * the hugetlb_instantiation_mutex:
142  *
143  *      down_write(&mm->mmap_sem);
144  * or
145  *      down_read(&mm->mmap_sem);
146  *      mutex_lock(&hugetlb_instantiation_mutex);
147  */
148 struct file_region {
149         struct list_head link;
150         long from;
151         long to;
152 };
153
154 static long region_add(struct list_head *head, long f, long t)
155 {
156         struct file_region *rg, *nrg, *trg;
157
158         /* Locate the region we are either in or before. */
159         list_for_each_entry(rg, head, link)
160                 if (f <= rg->to)
161                         break;
162
163         /* Round our left edge to the current segment if it encloses us. */
164         if (f > rg->from)
165                 f = rg->from;
166
167         /* Check for and consume any regions we now overlap with. */
168         nrg = rg;
169         list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
170                 if (&rg->link == head)
171                         break;
172                 if (rg->from > t)
173                         break;
174
175                 /* If this area reaches higher then extend our area to
176                  * include it completely.  If this is not the first area
177                  * which we intend to reuse, free it. */
178                 if (rg->to > t)
179                         t = rg->to;
180                 if (rg != nrg) {
181                         list_del(&rg->link);
182                         kfree(rg);
183                 }
184         }
185         nrg->from = f;
186         nrg->to = t;
187         return 0;
188 }
189
190 static long region_chg(struct list_head *head, long f, long t)
191 {
192         struct file_region *rg, *nrg;
193         long chg = 0;
194
195         /* Locate the region we are before or in. */
196         list_for_each_entry(rg, head, link)
197                 if (f <= rg->to)
198                         break;
199
200         /* If we are below the current region then a new region is required.
201          * Subtle, allocate a new region at the position but make it zero
202          * size such that we can guarantee to record the reservation. */
203         if (&rg->link == head || t < rg->from) {
204                 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
205                 if (!nrg)
206                         return -ENOMEM;
207                 nrg->from = f;
208                 nrg->to   = f;
209                 INIT_LIST_HEAD(&nrg->link);
210                 list_add(&nrg->link, rg->link.prev);
211
212                 return t - f;
213         }
214
215         /* Round our left edge to the current segment if it encloses us. */
216         if (f > rg->from)
217                 f = rg->from;
218         chg = t - f;
219
220         /* Check for and consume any regions we now overlap with. */
221         list_for_each_entry(rg, rg->link.prev, link) {
222                 if (&rg->link == head)
223                         break;
224                 if (rg->from > t)
225                         return chg;
226
227                 /* We overlap with this area, if it extends further than
228                  * us then we must extend ourselves.  Account for its
229                  * existing reservation. */
230                 if (rg->to > t) {
231                         chg += rg->to - t;
232                         t = rg->to;
233                 }
234                 chg -= rg->to - rg->from;
235         }
236         return chg;
237 }
238
239 static long region_truncate(struct list_head *head, long end)
240 {
241         struct file_region *rg, *trg;
242         long chg = 0;
243
244         /* Locate the region we are either in or before. */
245         list_for_each_entry(rg, head, link)
246                 if (end <= rg->to)
247                         break;
248         if (&rg->link == head)
249                 return 0;
250
251         /* If we are in the middle of a region then adjust it. */
252         if (end > rg->from) {
253                 chg = rg->to - end;
254                 rg->to = end;
255                 rg = list_entry(rg->link.next, typeof(*rg), link);
256         }
257
258         /* Drop any remaining regions. */
259         list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
260                 if (&rg->link == head)
261                         break;
262                 chg += rg->to - rg->from;
263                 list_del(&rg->link);
264                 kfree(rg);
265         }
266         return chg;
267 }
268
269 static long region_count(struct list_head *head, long f, long t)
270 {
271         struct file_region *rg;
272         long chg = 0;
273
274         /* Locate each segment we overlap with, and count that overlap. */
275         list_for_each_entry(rg, head, link) {
276                 long seg_from;
277                 long seg_to;
278
279                 if (rg->to <= f)
280                         continue;
281                 if (rg->from >= t)
282                         break;
283
284                 seg_from = max(rg->from, f);
285                 seg_to = min(rg->to, t);
286
287                 chg += seg_to - seg_from;
288         }
289
290         return chg;
291 }
292
293 /*
294  * Convert the address within this vma to the page offset within
295  * the mapping, in pagecache page units; huge pages here.
296  */
297 static pgoff_t vma_hugecache_offset(struct hstate *h,
298                         struct vm_area_struct *vma, unsigned long address)
299 {
300         return ((address - vma->vm_start) >> huge_page_shift(h)) +
301                         (vma->vm_pgoff >> huge_page_order(h));
302 }
303
304 pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
305                                      unsigned long address)
306 {
307         return vma_hugecache_offset(hstate_vma(vma), vma, address);
308 }
309
310 /*
311  * Return the size of the pages allocated when backing a VMA. In the majority
312  * cases this will be same size as used by the page table entries.
313  */
314 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
315 {
316         struct hstate *hstate;
317
318         if (!is_vm_hugetlb_page(vma))
319                 return PAGE_SIZE;
320
321         hstate = hstate_vma(vma);
322
323         return 1UL << huge_page_shift(hstate);
324 }
325 EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
326
327 /*
328  * Return the page size being used by the MMU to back a VMA. In the majority
329  * of cases, the page size used by the kernel matches the MMU size. On
330  * architectures where it differs, an architecture-specific version of this
331  * function is required.
332  */
333 #ifndef vma_mmu_pagesize
334 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
335 {
336         return vma_kernel_pagesize(vma);
337 }
338 #endif
339
340 /*
341  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
342  * bits of the reservation map pointer, which are always clear due to
343  * alignment.
344  */
345 #define HPAGE_RESV_OWNER    (1UL << 0)
346 #define HPAGE_RESV_UNMAPPED (1UL << 1)
347 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
348
349 /*
350  * These helpers are used to track how many pages are reserved for
351  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
352  * is guaranteed to have their future faults succeed.
353  *
354  * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
355  * the reserve counters are updated with the hugetlb_lock held. It is safe
356  * to reset the VMA at fork() time as it is not in use yet and there is no
357  * chance of the global counters getting corrupted as a result of the values.
358  *
359  * The private mapping reservation is represented in a subtly different
360  * manner to a shared mapping.  A shared mapping has a region map associated
361  * with the underlying file, this region map represents the backing file
362  * pages which have ever had a reservation assigned which this persists even
363  * after the page is instantiated.  A private mapping has a region map
364  * associated with the original mmap which is attached to all VMAs which
365  * reference it, this region map represents those offsets which have consumed
366  * reservation ie. where pages have been instantiated.
367  */
368 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
369 {
370         return (unsigned long)vma->vm_private_data;
371 }
372
373 static void set_vma_private_data(struct vm_area_struct *vma,
374                                                         unsigned long value)
375 {
376         vma->vm_private_data = (void *)value;
377 }
378
379 struct resv_map {
380         struct kref refs;
381         struct list_head regions;
382 };
383
384 static struct resv_map *resv_map_alloc(void)
385 {
386         struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
387         if (!resv_map)
388                 return NULL;
389
390         kref_init(&resv_map->refs);
391         INIT_LIST_HEAD(&resv_map->regions);
392
393         return resv_map;
394 }
395
396 static void resv_map_release(struct kref *ref)
397 {
398         struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
399
400         /* Clear out any active regions before we release the map. */
401         region_truncate(&resv_map->regions, 0);
402         kfree(resv_map);
403 }
404
405 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
406 {
407         VM_BUG_ON(!is_vm_hugetlb_page(vma));
408         if (!(vma->vm_flags & VM_MAYSHARE))
409                 return (struct resv_map *)(get_vma_private_data(vma) &
410                                                         ~HPAGE_RESV_MASK);
411         return NULL;
412 }
413
414 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
415 {
416         VM_BUG_ON(!is_vm_hugetlb_page(vma));
417         VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
418
419         set_vma_private_data(vma, (get_vma_private_data(vma) &
420                                 HPAGE_RESV_MASK) | (unsigned long)map);
421 }
422
423 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
424 {
425         VM_BUG_ON(!is_vm_hugetlb_page(vma));
426         VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
427
428         set_vma_private_data(vma, get_vma_private_data(vma) | flags);
429 }
430
431 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
432 {
433         VM_BUG_ON(!is_vm_hugetlb_page(vma));
434
435         return (get_vma_private_data(vma) & flag) != 0;
436 }
437
438 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
439 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
440 {
441         VM_BUG_ON(!is_vm_hugetlb_page(vma));
442         if (!(vma->vm_flags & VM_MAYSHARE))
443                 vma->vm_private_data = (void *)0;
444 }
445
446 /* Returns true if the VMA has associated reserve pages */
447 static int vma_has_reserves(struct vm_area_struct *vma, long chg)
448 {
449         if (vma->vm_flags & VM_NORESERVE) {
450                 /*
451                  * This address is already reserved by other process(chg == 0),
452                  * so, we should decrement reserved count. Without decrementing,
453                  * reserve count remains after releasing inode, because this
454                  * allocated page will go into page cache and is regarded as
455                  * coming from reserved pool in releasing step.  Currently, we
456                  * don't have any other solution to deal with this situation
457                  * properly, so add work-around here.
458                  */
459                 if (vma->vm_flags & VM_MAYSHARE && chg == 0)
460                         return 1;
461                 else
462                         return 0;
463         }
464
465         /* Shared mappings always use reserves */
466         if (vma->vm_flags & VM_MAYSHARE)
467                 return 1;
468
469         /*
470          * Only the process that called mmap() has reserves for
471          * private mappings.
472          */
473         if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
474                 return 1;
475
476         return 0;
477 }
478
479 static void copy_gigantic_page(struct page *dst, struct page *src)
480 {
481         int i;
482         struct hstate *h = page_hstate(src);
483         struct page *dst_base = dst;
484         struct page *src_base = src;
485
486         for (i = 0; i < pages_per_huge_page(h); ) {
487                 cond_resched();
488                 copy_highpage(dst, src);
489
490                 i++;
491                 dst = mem_map_next(dst, dst_base, i);
492                 src = mem_map_next(src, src_base, i);
493         }
494 }
495
496 void copy_huge_page(struct page *dst, struct page *src)
497 {
498         int i;
499         struct hstate *h = page_hstate(src);
500
501         if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
502                 copy_gigantic_page(dst, src);
503                 return;
504         }
505
506         might_sleep();
507         for (i = 0; i < pages_per_huge_page(h); i++) {
508                 cond_resched();
509                 copy_highpage(dst + i, src + i);
510         }
511 }
512
513 static void enqueue_huge_page(struct hstate *h, struct page *page)
514 {
515         int nid = page_to_nid(page);
516         list_move(&page->lru, &h->hugepage_freelists[nid]);
517         h->free_huge_pages++;
518         h->free_huge_pages_node[nid]++;
519 }
520
521 static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
522 {
523         struct page *page;
524
525         if (list_empty(&h->hugepage_freelists[nid]))
526                 return NULL;
527         page = list_entry(h->hugepage_freelists[nid].next, struct page, lru);
528         list_move(&page->lru, &h->hugepage_activelist);
529         set_page_refcounted(page);
530         h->free_huge_pages--;
531         h->free_huge_pages_node[nid]--;
532         return page;
533 }
534
535 static struct page *dequeue_huge_page_vma(struct hstate *h,
536                                 struct vm_area_struct *vma,
537                                 unsigned long address, int avoid_reserve,
538                                 long chg)
539 {
540         struct page *page = NULL;
541         struct mempolicy *mpol;
542         nodemask_t *nodemask;
543         struct zonelist *zonelist;
544         struct zone *zone;
545         struct zoneref *z;
546         unsigned int cpuset_mems_cookie;
547
548         /*
549          * A child process with MAP_PRIVATE mappings created by their parent
550          * have no page reserves. This check ensures that reservations are
551          * not "stolen". The child may still get SIGKILLed
552          */
553         if (!vma_has_reserves(vma, chg) &&
554                         h->free_huge_pages - h->resv_huge_pages == 0)
555                 goto err;
556
557         /* If reserves cannot be used, ensure enough pages are in the pool */
558         if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
559                 goto err;
560
561 retry_cpuset:
562         cpuset_mems_cookie = get_mems_allowed();
563         zonelist = huge_zonelist(vma, address,
564                                         htlb_alloc_mask, &mpol, &nodemask);
565
566         for_each_zone_zonelist_nodemask(zone, z, zonelist,
567                                                 MAX_NR_ZONES - 1, nodemask) {
568                 if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask)) {
569                         page = dequeue_huge_page_node(h, zone_to_nid(zone));
570                         if (page) {
571                                 if (avoid_reserve)
572                                         break;
573                                 if (!vma_has_reserves(vma, chg))
574                                         break;
575
576                                 SetPagePrivate(page);
577                                 h->resv_huge_pages--;
578                                 break;
579                         }
580                 }
581         }
582
583         mpol_cond_put(mpol);
584         if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
585                 goto retry_cpuset;
586         return page;
587
588 err:
589         return NULL;
590 }
591
592 static void update_and_free_page(struct hstate *h, struct page *page)
593 {
594         int i;
595
596         VM_BUG_ON(h->order >= MAX_ORDER);
597
598         h->nr_huge_pages--;
599         h->nr_huge_pages_node[page_to_nid(page)]--;
600         for (i = 0; i < pages_per_huge_page(h); i++) {
601                 page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
602                                 1 << PG_referenced | 1 << PG_dirty |
603                                 1 << PG_active | 1 << PG_reserved |
604                                 1 << PG_private | 1 << PG_writeback);
605         }
606         VM_BUG_ON(hugetlb_cgroup_from_page(page));
607         set_compound_page_dtor(page, NULL);
608         set_page_refcounted(page);
609         arch_release_hugepage(page);
610         __free_pages(page, huge_page_order(h));
611 }
612
613 struct hstate *size_to_hstate(unsigned long size)
614 {
615         struct hstate *h;
616
617         for_each_hstate(h) {
618                 if (huge_page_size(h) == size)
619                         return h;
620         }
621         return NULL;
622 }
623
624 static void free_huge_page(struct page *page)
625 {
626         /*
627          * Can't pass hstate in here because it is called from the
628          * compound page destructor.
629          */
630         struct hstate *h = page_hstate(page);
631         int nid = page_to_nid(page);
632         struct hugepage_subpool *spool =
633                 (struct hugepage_subpool *)page_private(page);
634         bool restore_reserve;
635
636         set_page_private(page, 0);
637         page->mapping = NULL;
638         BUG_ON(page_count(page));
639         BUG_ON(page_mapcount(page));
640         restore_reserve = PagePrivate(page);
641
642         spin_lock(&hugetlb_lock);
643         hugetlb_cgroup_uncharge_page(hstate_index(h),
644                                      pages_per_huge_page(h), page);
645         if (restore_reserve)
646                 h->resv_huge_pages++;
647
648         if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {
649                 /* remove the page from active list */
650                 list_del(&page->lru);
651                 update_and_free_page(h, page);
652                 h->surplus_huge_pages--;
653                 h->surplus_huge_pages_node[nid]--;
654         } else {
655                 arch_clear_hugepage_flags(page);
656                 enqueue_huge_page(h, page);
657         }
658         spin_unlock(&hugetlb_lock);
659         hugepage_subpool_put_pages(spool, 1);
660 }
661
662 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
663 {
664         INIT_LIST_HEAD(&page->lru);
665         set_compound_page_dtor(page, free_huge_page);
666         spin_lock(&hugetlb_lock);
667         set_hugetlb_cgroup(page, NULL);
668         h->nr_huge_pages++;
669         h->nr_huge_pages_node[nid]++;
670         spin_unlock(&hugetlb_lock);
671         put_page(page); /* free it into the hugepage allocator */
672 }
673
674 static void prep_compound_gigantic_page(struct page *page, unsigned long order)
675 {
676         int i;
677         int nr_pages = 1 << order;
678         struct page *p = page + 1;
679
680         /* we rely on prep_new_huge_page to set the destructor */
681         set_compound_order(page, order);
682         __SetPageHead(page);
683         for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
684                 __SetPageTail(p);
685                 set_page_count(p, 0);
686                 p->first_page = page;
687         }
688 }
689
690 /*
691  * PageHuge() only returns true for hugetlbfs pages, but not for normal or
692  * transparent huge pages.  See the PageTransHuge() documentation for more
693  * details.
694  */
695 int PageHuge(struct page *page)
696 {
697         compound_page_dtor *dtor;
698
699         if (!PageCompound(page))
700                 return 0;
701
702         page = compound_head(page);
703         dtor = get_compound_page_dtor(page);
704
705         return dtor == free_huge_page;
706 }
707 EXPORT_SYMBOL_GPL(PageHuge);
708
709 pgoff_t __basepage_index(struct page *page)
710 {
711         struct page *page_head = compound_head(page);
712         pgoff_t index = page_index(page_head);
713         unsigned long compound_idx;
714
715         if (!PageHuge(page_head))
716                 return page_index(page);
717
718         if (compound_order(page_head) >= MAX_ORDER)
719                 compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
720         else
721                 compound_idx = page - page_head;
722
723         return (index << compound_order(page_head)) + compound_idx;
724 }
725
726 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
727 {
728         struct page *page;
729
730         if (h->order >= MAX_ORDER)
731                 return NULL;
732
733         page = alloc_pages_exact_node(nid,
734                 htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
735                                                 __GFP_REPEAT|__GFP_NOWARN,
736                 huge_page_order(h));
737         if (page) {
738                 if (arch_prepare_hugepage(page)) {
739                         __free_pages(page, huge_page_order(h));
740                         return NULL;
741                 }
742                 prep_new_huge_page(h, page, nid);
743         }
744
745         return page;
746 }
747
748 /*
749  * common helper functions for hstate_next_node_to_{alloc|free}.
750  * We may have allocated or freed a huge page based on a different
751  * nodes_allowed previously, so h->next_node_to_{alloc|free} might
752  * be outside of *nodes_allowed.  Ensure that we use an allowed
753  * node for alloc or free.
754  */
755 static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
756 {
757         nid = next_node(nid, *nodes_allowed);
758         if (nid == MAX_NUMNODES)
759                 nid = first_node(*nodes_allowed);
760         VM_BUG_ON(nid >= MAX_NUMNODES);
761
762         return nid;
763 }
764
765 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
766 {
767         if (!node_isset(nid, *nodes_allowed))
768                 nid = next_node_allowed(nid, nodes_allowed);
769         return nid;
770 }
771
772 /*
773  * returns the previously saved node ["this node"] from which to
774  * allocate a persistent huge page for the pool and advance the
775  * next node from which to allocate, handling wrap at end of node
776  * mask.
777  */
778 static int hstate_next_node_to_alloc(struct hstate *h,
779                                         nodemask_t *nodes_allowed)
780 {
781         int nid;
782
783         VM_BUG_ON(!nodes_allowed);
784
785         nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
786         h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
787
788         return nid;
789 }
790
791 /*
792  * helper for free_pool_huge_page() - return the previously saved
793  * node ["this node"] from which to free a huge page.  Advance the
794  * next node id whether or not we find a free huge page to free so
795  * that the next attempt to free addresses the next node.
796  */
797 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
798 {
799         int nid;
800
801         VM_BUG_ON(!nodes_allowed);
802
803         nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
804         h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
805
806         return nid;
807 }
808
809 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask)           \
810         for (nr_nodes = nodes_weight(*mask);                            \
811                 nr_nodes > 0 &&                                         \
812                 ((node = hstate_next_node_to_alloc(hs, mask)) || 1);    \
813                 nr_nodes--)
814
815 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask)            \
816         for (nr_nodes = nodes_weight(*mask);                            \
817                 nr_nodes > 0 &&                                         \
818                 ((node = hstate_next_node_to_free(hs, mask)) || 1);     \
819                 nr_nodes--)
820
821 static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
822 {
823         struct page *page;
824         int nr_nodes, node;
825         int ret = 0;
826
827         for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
828                 page = alloc_fresh_huge_page_node(h, node);
829                 if (page) {
830                         ret = 1;
831                         break;
832                 }
833         }
834
835         if (ret)
836                 count_vm_event(HTLB_BUDDY_PGALLOC);
837         else
838                 count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
839
840         return ret;
841 }
842
843 /*
844  * Free huge page from pool from next node to free.
845  * Attempt to keep persistent huge pages more or less
846  * balanced over allowed nodes.
847  * Called with hugetlb_lock locked.
848  */
849 static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
850                                                          bool acct_surplus)
851 {
852         int nr_nodes, node;
853         int ret = 0;
854
855         for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
856                 /*
857                  * If we're returning unused surplus pages, only examine
858                  * nodes with surplus pages.
859                  */
860                 if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
861                     !list_empty(&h->hugepage_freelists[node])) {
862                         struct page *page =
863                                 list_entry(h->hugepage_freelists[node].next,
864                                           struct page, lru);
865                         list_del(&page->lru);
866                         h->free_huge_pages--;
867                         h->free_huge_pages_node[node]--;
868                         if (acct_surplus) {
869                                 h->surplus_huge_pages--;
870                                 h->surplus_huge_pages_node[node]--;
871                         }
872                         update_and_free_page(h, page);
873                         ret = 1;
874                         break;
875                 }
876         }
877
878         return ret;
879 }
880
881 static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
882 {
883         struct page *page;
884         unsigned int r_nid;
885
886         if (h->order >= MAX_ORDER)
887                 return NULL;
888
889         /*
890          * Assume we will successfully allocate the surplus page to
891          * prevent racing processes from causing the surplus to exceed
892          * overcommit
893          *
894          * This however introduces a different race, where a process B
895          * tries to grow the static hugepage pool while alloc_pages() is
896          * called by process A. B will only examine the per-node
897          * counters in determining if surplus huge pages can be
898          * converted to normal huge pages in adjust_pool_surplus(). A
899          * won't be able to increment the per-node counter, until the
900          * lock is dropped by B, but B doesn't drop hugetlb_lock until
901          * no more huge pages can be converted from surplus to normal
902          * state (and doesn't try to convert again). Thus, we have a
903          * case where a surplus huge page exists, the pool is grown, and
904          * the surplus huge page still exists after, even though it
905          * should just have been converted to a normal huge page. This
906          * does not leak memory, though, as the hugepage will be freed
907          * once it is out of use. It also does not allow the counters to
908          * go out of whack in adjust_pool_surplus() as we don't modify
909          * the node values until we've gotten the hugepage and only the
910          * per-node value is checked there.
911          */
912         spin_lock(&hugetlb_lock);
913         if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
914                 spin_unlock(&hugetlb_lock);
915                 return NULL;
916         } else {
917                 h->nr_huge_pages++;
918                 h->surplus_huge_pages++;
919         }
920         spin_unlock(&hugetlb_lock);
921
922         if (nid == NUMA_NO_NODE)
923                 page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
924                                    __GFP_REPEAT|__GFP_NOWARN,
925                                    huge_page_order(h));
926         else
927                 page = alloc_pages_exact_node(nid,
928                         htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
929                         __GFP_REPEAT|__GFP_NOWARN, huge_page_order(h));
930
931         if (page && arch_prepare_hugepage(page)) {
932                 __free_pages(page, huge_page_order(h));
933                 page = NULL;
934         }
935
936         spin_lock(&hugetlb_lock);
937         if (page) {
938                 INIT_LIST_HEAD(&page->lru);
939                 r_nid = page_to_nid(page);
940                 set_compound_page_dtor(page, free_huge_page);
941                 set_hugetlb_cgroup(page, NULL);
942                 /*
943                  * We incremented the global counters already
944                  */
945                 h->nr_huge_pages_node[r_nid]++;
946                 h->surplus_huge_pages_node[r_nid]++;
947                 __count_vm_event(HTLB_BUDDY_PGALLOC);
948         } else {
949                 h->nr_huge_pages--;
950                 h->surplus_huge_pages--;
951                 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
952         }
953         spin_unlock(&hugetlb_lock);
954
955         return page;
956 }
957
958 /*
959  * This allocation function is useful in the context where vma is irrelevant.
960  * E.g. soft-offlining uses this function because it only cares physical
961  * address of error page.
962  */
963 struct page *alloc_huge_page_node(struct hstate *h, int nid)
964 {
965         struct page *page = NULL;
966
967         spin_lock(&hugetlb_lock);
968         if (h->free_huge_pages - h->resv_huge_pages > 0)
969                 page = dequeue_huge_page_node(h, nid);
970         spin_unlock(&hugetlb_lock);
971
972         if (!page)
973                 page = alloc_buddy_huge_page(h, nid);
974
975         return page;
976 }
977
978 /*
979  * Increase the hugetlb pool such that it can accommodate a reservation
980  * of size 'delta'.
981  */
982 static int gather_surplus_pages(struct hstate *h, int delta)
983 {
984         struct list_head surplus_list;
985         struct page *page, *tmp;
986         int ret, i;
987         int needed, allocated;
988         bool alloc_ok = true;
989
990         needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
991         if (needed <= 0) {
992                 h->resv_huge_pages += delta;
993                 return 0;
994         }
995
996         allocated = 0;
997         INIT_LIST_HEAD(&surplus_list);
998
999         ret = -ENOMEM;
1000 retry:
1001         spin_unlock(&hugetlb_lock);
1002         for (i = 0; i < needed; i++) {
1003                 page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
1004                 if (!page) {
1005                         alloc_ok = false;
1006                         break;
1007                 }
1008                 list_add(&page->lru, &surplus_list);
1009         }
1010         allocated += i;
1011
1012         /*
1013          * After retaking hugetlb_lock, we need to recalculate 'needed'
1014          * because either resv_huge_pages or free_huge_pages may have changed.
1015          */
1016         spin_lock(&hugetlb_lock);
1017         needed = (h->resv_huge_pages + delta) -
1018                         (h->free_huge_pages + allocated);
1019         if (needed > 0) {
1020                 if (alloc_ok)
1021                         goto retry;
1022                 /*
1023                  * We were not able to allocate enough pages to
1024                  * satisfy the entire reservation so we free what
1025                  * we've allocated so far.
1026                  */
1027                 goto free;
1028         }
1029         /*
1030          * The surplus_list now contains _at_least_ the number of extra pages
1031          * needed to accommodate the reservation.  Add the appropriate number
1032          * of pages to the hugetlb pool and free the extras back to the buddy
1033          * allocator.  Commit the entire reservation here to prevent another
1034          * process from stealing the pages as they are added to the pool but
1035          * before they are reserved.
1036          */
1037         needed += allocated;
1038         h->resv_huge_pages += delta;
1039         ret = 0;
1040
1041         /* Free the needed pages to the hugetlb pool */
1042         list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1043                 if ((--needed) < 0)
1044                         break;
1045                 /*
1046                  * This page is now managed by the hugetlb allocator and has
1047                  * no users -- drop the buddy allocator's reference.
1048                  */
1049                 put_page_testzero(page);
1050                 VM_BUG_ON(page_count(page));
1051                 enqueue_huge_page(h, page);
1052         }
1053 free:
1054         spin_unlock(&hugetlb_lock);
1055
1056         /* Free unnecessary surplus pages to the buddy allocator */
1057         list_for_each_entry_safe(page, tmp, &surplus_list, lru)
1058                 put_page(page);
1059         spin_lock(&hugetlb_lock);
1060
1061         return ret;
1062 }
1063
1064 /*
1065  * When releasing a hugetlb pool reservation, any surplus pages that were
1066  * allocated to satisfy the reservation must be explicitly freed if they were
1067  * never used.
1068  * Called with hugetlb_lock held.
1069  */
1070 static void return_unused_surplus_pages(struct hstate *h,
1071                                         unsigned long unused_resv_pages)
1072 {
1073         unsigned long nr_pages;
1074
1075         /* Uncommit the reservation */
1076         h->resv_huge_pages -= unused_resv_pages;
1077
1078         /* Cannot return gigantic pages currently */
1079         if (h->order >= MAX_ORDER)
1080                 return;
1081
1082         nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1083
1084         /*
1085          * We want to release as many surplus pages as possible, spread
1086          * evenly across all nodes with memory. Iterate across these nodes
1087          * until we can no longer free unreserved surplus pages. This occurs
1088          * when the nodes with surplus pages have no free pages.
1089          * free_pool_huge_page() will balance the the freed pages across the
1090          * on-line nodes with memory and will handle the hstate accounting.
1091          */
1092         while (nr_pages--) {
1093                 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
1094                         break;
1095         }
1096 }
1097
1098 /*
1099  * Determine if the huge page at addr within the vma has an associated
1100  * reservation.  Where it does not we will need to logically increase
1101  * reservation and actually increase subpool usage before an allocation
1102  * can occur.  Where any new reservation would be required the
1103  * reservation change is prepared, but not committed.  Once the page
1104  * has been allocated from the subpool and instantiated the change should
1105  * be committed via vma_commit_reservation.  No action is required on
1106  * failure.
1107  */
1108 static long vma_needs_reservation(struct hstate *h,
1109                         struct vm_area_struct *vma, unsigned long addr)
1110 {
1111         struct address_space *mapping = vma->vm_file->f_mapping;
1112         struct inode *inode = mapping->host;
1113
1114         if (vma->vm_flags & VM_MAYSHARE) {
1115                 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1116                 return region_chg(&inode->i_mapping->private_list,
1117                                                         idx, idx + 1);
1118
1119         } else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1120                 return 1;
1121
1122         } else  {
1123                 long err;
1124                 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1125                 struct resv_map *resv = vma_resv_map(vma);
1126
1127                 err = region_chg(&resv->regions, idx, idx + 1);
1128                 if (err < 0)
1129                         return err;
1130                 return 0;
1131         }
1132 }
1133 static void vma_commit_reservation(struct hstate *h,
1134                         struct vm_area_struct *vma, unsigned long addr)
1135 {
1136         struct address_space *mapping = vma->vm_file->f_mapping;
1137         struct inode *inode = mapping->host;
1138
1139         if (vma->vm_flags & VM_MAYSHARE) {
1140                 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1141                 region_add(&inode->i_mapping->private_list, idx, idx + 1);
1142
1143         } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1144                 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1145                 struct resv_map *resv = vma_resv_map(vma);
1146
1147                 /* Mark this page used in the map. */
1148                 region_add(&resv->regions, idx, idx + 1);
1149         }
1150 }
1151
1152 static struct page *alloc_huge_page(struct vm_area_struct *vma,
1153                                     unsigned long addr, int avoid_reserve)
1154 {
1155         struct hugepage_subpool *spool = subpool_vma(vma);
1156         struct hstate *h = hstate_vma(vma);
1157         struct page *page;
1158         long chg;
1159         int ret, idx;
1160         struct hugetlb_cgroup *h_cg;
1161
1162         idx = hstate_index(h);
1163         /*
1164          * Processes that did not create the mapping will have no
1165          * reserves and will not have accounted against subpool
1166          * limit. Check that the subpool limit can be made before
1167          * satisfying the allocation MAP_NORESERVE mappings may also
1168          * need pages and subpool limit allocated allocated if no reserve
1169          * mapping overlaps.
1170          */
1171         chg = vma_needs_reservation(h, vma, addr);
1172         if (chg < 0)
1173                 return ERR_PTR(-ENOMEM);
1174         if (chg || avoid_reserve)
1175                 if (hugepage_subpool_get_pages(spool, 1))
1176                         return ERR_PTR(-ENOSPC);
1177
1178         ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
1179         if (ret) {
1180                 if (chg || avoid_reserve)
1181                         hugepage_subpool_put_pages(spool, 1);
1182                 return ERR_PTR(-ENOSPC);
1183         }
1184         spin_lock(&hugetlb_lock);
1185         page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, chg);
1186         if (!page) {
1187                 spin_unlock(&hugetlb_lock);
1188                 page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
1189                 if (!page) {
1190                         hugetlb_cgroup_uncharge_cgroup(idx,
1191                                                        pages_per_huge_page(h),
1192                                                        h_cg);
1193                         if (chg || avoid_reserve)
1194                                 hugepage_subpool_put_pages(spool, 1);
1195                         return ERR_PTR(-ENOSPC);
1196                 }
1197                 spin_lock(&hugetlb_lock);
1198                 list_move(&page->lru, &h->hugepage_activelist);
1199                 /* Fall through */
1200         }
1201         hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
1202         spin_unlock(&hugetlb_lock);
1203
1204         set_page_private(page, (unsigned long)spool);
1205
1206         vma_commit_reservation(h, vma, addr);
1207         return page;
1208 }
1209
1210 int __weak alloc_bootmem_huge_page(struct hstate *h)
1211 {
1212         struct huge_bootmem_page *m;
1213         int nr_nodes, node;
1214
1215         for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
1216                 void *addr;
1217
1218                 addr = __alloc_bootmem_node_nopanic(NODE_DATA(node),
1219                                 huge_page_size(h), huge_page_size(h), 0);
1220
1221                 if (addr) {
1222                         /*
1223                          * Use the beginning of the huge page to store the
1224                          * huge_bootmem_page struct (until gather_bootmem
1225                          * puts them into the mem_map).
1226                          */
1227                         m = addr;
1228                         goto found;
1229                 }
1230         }
1231         return 0;
1232
1233 found:
1234         BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1));
1235         /* Put them into a private list first because mem_map is not up yet */
1236         list_add(&m->list, &huge_boot_pages);
1237         m->hstate = h;
1238         return 1;
1239 }
1240
1241 static void prep_compound_huge_page(struct page *page, int order)
1242 {
1243         if (unlikely(order > (MAX_ORDER - 1)))
1244                 prep_compound_gigantic_page(page, order);
1245         else
1246                 prep_compound_page(page, order);
1247 }
1248
1249 /* Put bootmem huge pages into the standard lists after mem_map is up */
1250 static void __init gather_bootmem_prealloc(void)
1251 {
1252         struct huge_bootmem_page *m;
1253
1254         list_for_each_entry(m, &huge_boot_pages, list) {
1255                 struct hstate *h = m->hstate;
1256                 struct page *page;
1257
1258 #ifdef CONFIG_HIGHMEM
1259                 page = pfn_to_page(m->phys >> PAGE_SHIFT);
1260                 free_bootmem_late((unsigned long)m,
1261                                   sizeof(struct huge_bootmem_page));
1262 #else
1263                 page = virt_to_page(m);
1264 #endif
1265                 __ClearPageReserved(page);
1266                 WARN_ON(page_count(page) != 1);
1267                 prep_compound_huge_page(page, h->order);
1268                 prep_new_huge_page(h, page, page_to_nid(page));
1269                 /*
1270                  * If we had gigantic hugepages allocated at boot time, we need
1271                  * to restore the 'stolen' pages to totalram_pages in order to
1272                  * fix confusing memory reports from free(1) and another
1273                  * side-effects, like CommitLimit going negative.
1274                  */
1275                 if (h->order > (MAX_ORDER - 1))
1276                         adjust_managed_page_count(page, 1 << h->order);
1277         }
1278 }
1279
1280 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
1281 {
1282         unsigned long i;
1283
1284         for (i = 0; i < h->max_huge_pages; ++i) {
1285                 if (h->order >= MAX_ORDER) {
1286                         if (!alloc_bootmem_huge_page(h))
1287                                 break;
1288                 } else if (!alloc_fresh_huge_page(h,
1289                                          &node_states[N_MEMORY]))
1290                         break;
1291         }
1292         h->max_huge_pages = i;
1293 }
1294
1295 static void __init hugetlb_init_hstates(void)
1296 {
1297         struct hstate *h;
1298
1299         for_each_hstate(h) {
1300                 /* oversize hugepages were init'ed in early boot */
1301                 if (h->order < MAX_ORDER)
1302                         hugetlb_hstate_alloc_pages(h);
1303         }
1304 }
1305
1306 static char * __init memfmt(char *buf, unsigned long n)
1307 {
1308         if (n >= (1UL << 30))
1309                 sprintf(buf, "%lu GB", n >> 30);
1310         else if (n >= (1UL << 20))
1311                 sprintf(buf, "%lu MB", n >> 20);
1312         else
1313                 sprintf(buf, "%lu KB", n >> 10);
1314         return buf;
1315 }
1316
1317 static void __init report_hugepages(void)
1318 {
1319         struct hstate *h;
1320
1321         for_each_hstate(h) {
1322                 char buf[32];
1323                 pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
1324                         memfmt(buf, huge_page_size(h)),
1325                         h->free_huge_pages);
1326         }
1327 }
1328
1329 #ifdef CONFIG_HIGHMEM
1330 static void try_to_free_low(struct hstate *h, unsigned long count,
1331                                                 nodemask_t *nodes_allowed)
1332 {
1333         int i;
1334
1335         if (h->order >= MAX_ORDER)
1336                 return;
1337
1338         for_each_node_mask(i, *nodes_allowed) {
1339                 struct page *page, *next;
1340                 struct list_head *freel = &h->hugepage_freelists[i];
1341                 list_for_each_entry_safe(page, next, freel, lru) {
1342                         if (count >= h->nr_huge_pages)
1343                                 return;
1344                         if (PageHighMem(page))
1345                                 continue;
1346                         list_del(&page->lru);
1347                         update_and_free_page(h, page);
1348                         h->free_huge_pages--;
1349                         h->free_huge_pages_node[page_to_nid(page)]--;
1350                 }
1351         }
1352 }
1353 #else
1354 static inline void try_to_free_low(struct hstate *h, unsigned long count,
1355                                                 nodemask_t *nodes_allowed)
1356 {
1357 }
1358 #endif
1359
1360 /*
1361  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
1362  * balanced by operating on them in a round-robin fashion.
1363  * Returns 1 if an adjustment was made.
1364  */
1365 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
1366                                 int delta)
1367 {
1368         int nr_nodes, node;
1369
1370         VM_BUG_ON(delta != -1 && delta != 1);
1371
1372         if (delta < 0) {
1373                 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1374                         if (h->surplus_huge_pages_node[node])
1375                                 goto found;
1376                 }
1377         } else {
1378                 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1379                         if (h->surplus_huge_pages_node[node] <
1380                                         h->nr_huge_pages_node[node])
1381                                 goto found;
1382                 }
1383         }
1384         return 0;
1385
1386 found:
1387         h->surplus_huge_pages += delta;
1388         h->surplus_huge_pages_node[node] += delta;
1389         return 1;
1390 }
1391
1392 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
1393 static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
1394                                                 nodemask_t *nodes_allowed)
1395 {
1396         unsigned long min_count, ret;
1397
1398         if (h->order >= MAX_ORDER)
1399                 return h->max_huge_pages;
1400
1401         /*
1402          * Increase the pool size
1403          * First take pages out of surplus state.  Then make up the
1404          * remaining difference by allocating fresh huge pages.
1405          *
1406          * We might race with alloc_buddy_huge_page() here and be unable
1407          * to convert a surplus huge page to a normal huge page. That is
1408          * not critical, though, it just means the overall size of the
1409          * pool might be one hugepage larger than it needs to be, but
1410          * within all the constraints specified by the sysctls.
1411          */
1412         spin_lock(&hugetlb_lock);
1413         while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
1414                 if (!adjust_pool_surplus(h, nodes_allowed, -1))
1415                         break;
1416         }
1417
1418         while (count > persistent_huge_pages(h)) {
1419                 /*
1420                  * If this allocation races such that we no longer need the
1421                  * page, free_huge_page will handle it by freeing the page
1422                  * and reducing the surplus.
1423                  */
1424                 spin_unlock(&hugetlb_lock);
1425                 ret = alloc_fresh_huge_page(h, nodes_allowed);
1426                 spin_lock(&hugetlb_lock);
1427                 if (!ret)
1428                         goto out;
1429
1430                 /* Bail for signals. Probably ctrl-c from user */
1431                 if (signal_pending(current))
1432                         goto out;
1433         }
1434
1435         /*
1436          * Decrease the pool size
1437          * First return free pages to the buddy allocator (being careful
1438          * to keep enough around to satisfy reservations).  Then place
1439          * pages into surplus state as needed so the pool will shrink
1440          * to the desired size as pages become free.
1441          *
1442          * By placing pages into the surplus state independent of the
1443          * overcommit value, we are allowing the surplus pool size to
1444          * exceed overcommit. There are few sane options here. Since
1445          * alloc_buddy_huge_page() is checking the global counter,
1446          * though, we'll note that we're not allowed to exceed surplus
1447          * and won't grow the pool anywhere else. Not until one of the
1448          * sysctls are changed, or the surplus pages go out of use.
1449          */
1450         min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
1451         min_count = max(count, min_count);
1452         try_to_free_low(h, min_count, nodes_allowed);
1453         while (min_count < persistent_huge_pages(h)) {
1454                 if (!free_pool_huge_page(h, nodes_allowed, 0))
1455                         break;
1456         }
1457         while (count < persistent_huge_pages(h)) {
1458                 if (!adjust_pool_surplus(h, nodes_allowed, 1))
1459                         break;
1460         }
1461 out:
1462         ret = persistent_huge_pages(h);
1463         spin_unlock(&hugetlb_lock);
1464         return ret;
1465 }
1466
1467 #define HSTATE_ATTR_RO(_name) \
1468         static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
1469
1470 #define HSTATE_ATTR(_name) \
1471         static struct kobj_attribute _name##_attr = \
1472                 __ATTR(_name, 0644, _name##_show, _name##_store)
1473
1474 static struct kobject *hugepages_kobj;
1475 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
1476
1477 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
1478
1479 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
1480 {
1481         int i;
1482
1483         for (i = 0; i < HUGE_MAX_HSTATE; i++)
1484                 if (hstate_kobjs[i] == kobj) {
1485                         if (nidp)
1486                                 *nidp = NUMA_NO_NODE;
1487                         return &hstates[i];
1488                 }
1489
1490         return kobj_to_node_hstate(kobj, nidp);
1491 }
1492
1493 static ssize_t nr_hugepages_show_common(struct kobject *kobj,
1494                                         struct kobj_attribute *attr, char *buf)
1495 {
1496         struct hstate *h;
1497         unsigned long nr_huge_pages;
1498         int nid;
1499
1500         h = kobj_to_hstate(kobj, &nid);
1501         if (nid == NUMA_NO_NODE)
1502                 nr_huge_pages = h->nr_huge_pages;
1503         else
1504                 nr_huge_pages = h->nr_huge_pages_node[nid];
1505
1506         return sprintf(buf, "%lu\n", nr_huge_pages);
1507 }
1508
1509 static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
1510                         struct kobject *kobj, struct kobj_attribute *attr,
1511                         const char *buf, size_t len)
1512 {
1513         int err;
1514         int nid;
1515         unsigned long count;
1516         struct hstate *h;
1517         NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
1518
1519         err = kstrtoul(buf, 10, &count);
1520         if (err)
1521                 goto out;
1522
1523         h = kobj_to_hstate(kobj, &nid);
1524         if (h->order >= MAX_ORDER) {
1525                 err = -EINVAL;
1526                 goto out;
1527         }
1528
1529         if (nid == NUMA_NO_NODE) {
1530                 /*
1531                  * global hstate attribute
1532                  */
1533                 if (!(obey_mempolicy &&
1534                                 init_nodemask_of_mempolicy(nodes_allowed))) {
1535                         NODEMASK_FREE(nodes_allowed);
1536                         nodes_allowed = &node_states[N_MEMORY];
1537                 }
1538         } else if (nodes_allowed) {
1539                 /*
1540                  * per node hstate attribute: adjust count to global,
1541                  * but restrict alloc/free to the specified node.
1542                  */
1543                 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
1544                 init_nodemask_of_node(nodes_allowed, nid);
1545         } else
1546                 nodes_allowed = &node_states[N_MEMORY];
1547
1548         h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
1549
1550         if (nodes_allowed != &node_states[N_MEMORY])
1551                 NODEMASK_FREE(nodes_allowed);
1552
1553         return len;
1554 out:
1555         NODEMASK_FREE(nodes_allowed);
1556         return err;
1557 }
1558
1559 static ssize_t nr_hugepages_show(struct kobject *kobj,
1560                                        struct kobj_attribute *attr, char *buf)
1561 {
1562         return nr_hugepages_show_common(kobj, attr, buf);
1563 }
1564
1565 static ssize_t nr_hugepages_store(struct kobject *kobj,
1566                struct kobj_attribute *attr, const char *buf, size_t len)
1567 {
1568         return nr_hugepages_store_common(false, kobj, attr, buf, len);
1569 }
1570 HSTATE_ATTR(nr_hugepages);
1571
1572 #ifdef CONFIG_NUMA
1573
1574 /*
1575  * hstate attribute for optionally mempolicy-based constraint on persistent
1576  * huge page alloc/free.
1577  */
1578 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
1579                                        struct kobj_attribute *attr, char *buf)
1580 {
1581         return nr_hugepages_show_common(kobj, attr, buf);
1582 }
1583
1584 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
1585                struct kobj_attribute *attr, const char *buf, size_t len)
1586 {
1587         return nr_hugepages_store_common(true, kobj, attr, buf, len);
1588 }
1589 HSTATE_ATTR(nr_hugepages_mempolicy);
1590 #endif
1591
1592
1593 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
1594                                         struct kobj_attribute *attr, char *buf)
1595 {
1596         struct hstate *h = kobj_to_hstate(kobj, NULL);
1597         return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
1598 }
1599
1600 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
1601                 struct kobj_attribute *attr, const char *buf, size_t count)
1602 {
1603         int err;
1604         unsigned long input;
1605         struct hstate *h = kobj_to_hstate(kobj, NULL);
1606
1607         if (h->order >= MAX_ORDER)
1608                 return -EINVAL;
1609
1610         err = kstrtoul(buf, 10, &input);
1611         if (err)
1612                 return err;
1613
1614         spin_lock(&hugetlb_lock);
1615         h->nr_overcommit_huge_pages = input;
1616         spin_unlock(&hugetlb_lock);
1617
1618         return count;
1619 }
1620 HSTATE_ATTR(nr_overcommit_hugepages);
1621
1622 static ssize_t free_hugepages_show(struct kobject *kobj,
1623                                         struct kobj_attribute *attr, char *buf)
1624 {
1625         struct hstate *h;
1626         unsigned long free_huge_pages;
1627         int nid;
1628
1629         h = kobj_to_hstate(kobj, &nid);
1630         if (nid == NUMA_NO_NODE)
1631                 free_huge_pages = h->free_huge_pages;
1632         else
1633                 free_huge_pages = h->free_huge_pages_node[nid];
1634
1635         return sprintf(buf, "%lu\n", free_huge_pages);
1636 }
1637 HSTATE_ATTR_RO(free_hugepages);
1638
1639 static ssize_t resv_hugepages_show(struct kobject *kobj,
1640                                         struct kobj_attribute *attr, char *buf)
1641 {
1642         struct hstate *h = kobj_to_hstate(kobj, NULL);
1643         return sprintf(buf, "%lu\n", h->resv_huge_pages);
1644 }
1645 HSTATE_ATTR_RO(resv_hugepages);
1646
1647 static ssize_t surplus_hugepages_show(struct kobject *kobj,
1648                                         struct kobj_attribute *attr, char *buf)
1649 {
1650         struct hstate *h;
1651         unsigned long surplus_huge_pages;
1652         int nid;
1653
1654         h = kobj_to_hstate(kobj, &nid);
1655         if (nid == NUMA_NO_NODE)
1656                 surplus_huge_pages = h->surplus_huge_pages;
1657         else
1658                 surplus_huge_pages = h->surplus_huge_pages_node[nid];
1659
1660         return sprintf(buf, "%lu\n", surplus_huge_pages);
1661 }
1662 HSTATE_ATTR_RO(surplus_hugepages);
1663
1664 static struct attribute *hstate_attrs[] = {
1665         &nr_hugepages_attr.attr,
1666         &nr_overcommit_hugepages_attr.attr,
1667         &free_hugepages_attr.attr,
1668         &resv_hugepages_attr.attr,
1669         &surplus_hugepages_attr.attr,
1670 #ifdef CONFIG_NUMA
1671         &nr_hugepages_mempolicy_attr.attr,
1672 #endif
1673         NULL,
1674 };
1675
1676 static struct attribute_group hstate_attr_group = {
1677         .attrs = hstate_attrs,
1678 };
1679
1680 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
1681                                     struct kobject **hstate_kobjs,
1682                                     struct attribute_group *hstate_attr_group)
1683 {
1684         int retval;
1685         int hi = hstate_index(h);
1686
1687         hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
1688         if (!hstate_kobjs[hi])
1689                 return -ENOMEM;
1690
1691         retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
1692         if (retval)
1693                 kobject_put(hstate_kobjs[hi]);
1694
1695         return retval;
1696 }
1697
1698 static void __init hugetlb_sysfs_init(void)
1699 {
1700         struct hstate *h;
1701         int err;
1702
1703         hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
1704         if (!hugepages_kobj)
1705                 return;
1706
1707         for_each_hstate(h) {
1708                 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
1709                                          hstate_kobjs, &hstate_attr_group);
1710                 if (err)
1711                         pr_err("Hugetlb: Unable to add hstate %s", h->name);
1712         }
1713 }
1714
1715 #ifdef CONFIG_NUMA
1716
1717 /*
1718  * node_hstate/s - associate per node hstate attributes, via their kobjects,
1719  * with node devices in node_devices[] using a parallel array.  The array
1720  * index of a node device or _hstate == node id.
1721  * This is here to avoid any static dependency of the node device driver, in
1722  * the base kernel, on the hugetlb module.
1723  */
1724 struct node_hstate {
1725         struct kobject          *hugepages_kobj;
1726         struct kobject          *hstate_kobjs[HUGE_MAX_HSTATE];
1727 };
1728 struct node_hstate node_hstates[MAX_NUMNODES];
1729
1730 /*
1731  * A subset of global hstate attributes for node devices
1732  */
1733 static struct attribute *per_node_hstate_attrs[] = {
1734         &nr_hugepages_attr.attr,
1735         &free_hugepages_attr.attr,
1736         &surplus_hugepages_attr.attr,
1737         NULL,
1738 };
1739
1740 static struct attribute_group per_node_hstate_attr_group = {
1741         .attrs = per_node_hstate_attrs,
1742 };
1743
1744 /*
1745  * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
1746  * Returns node id via non-NULL nidp.
1747  */
1748 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
1749 {
1750         int nid;
1751
1752         for (nid = 0; nid < nr_node_ids; nid++) {
1753                 struct node_hstate *nhs = &node_hstates[nid];
1754                 int i;
1755                 for (i = 0; i < HUGE_MAX_HSTATE; i++)
1756                         if (nhs->hstate_kobjs[i] == kobj) {
1757                                 if (nidp)
1758                                         *nidp = nid;
1759                                 return &hstates[i];
1760                         }
1761         }
1762
1763         BUG();
1764         return NULL;
1765 }
1766
1767 /*
1768  * Unregister hstate attributes from a single node device.
1769  * No-op if no hstate attributes attached.
1770  */
1771 static void hugetlb_unregister_node(struct node *node)
1772 {
1773         struct hstate *h;
1774         struct node_hstate *nhs = &node_hstates[node->dev.id];
1775
1776         if (!nhs->hugepages_kobj)
1777                 return;         /* no hstate attributes */
1778
1779         for_each_hstate(h) {
1780                 int idx = hstate_index(h);
1781                 if (nhs->hstate_kobjs[idx]) {
1782                         kobject_put(nhs->hstate_kobjs[idx]);
1783                         nhs->hstate_kobjs[idx] = NULL;
1784                 }
1785         }
1786
1787         kobject_put(nhs->hugepages_kobj);
1788         nhs->hugepages_kobj = NULL;
1789 }
1790
1791 /*
1792  * hugetlb module exit:  unregister hstate attributes from node devices
1793  * that have them.
1794  */
1795 static void hugetlb_unregister_all_nodes(void)
1796 {
1797         int nid;
1798
1799         /*
1800          * disable node device registrations.
1801          */
1802         register_hugetlbfs_with_node(NULL, NULL);
1803
1804         /*
1805          * remove hstate attributes from any nodes that have them.
1806          */
1807         for (nid = 0; nid < nr_node_ids; nid++)
1808                 hugetlb_unregister_node(node_devices[nid]);
1809 }
1810
1811 /*
1812  * Register hstate attributes for a single node device.
1813  * No-op if attributes already registered.
1814  */
1815 static void hugetlb_register_node(struct node *node)
1816 {
1817         struct hstate *h;
1818         struct node_hstate *nhs = &node_hstates[node->dev.id];
1819         int err;
1820
1821         if (nhs->hugepages_kobj)
1822                 return;         /* already allocated */
1823
1824         nhs->hugepages_kobj = kobject_create_and_add("hugepages",
1825                                                         &node->dev.kobj);
1826         if (!nhs->hugepages_kobj)
1827                 return;
1828
1829         for_each_hstate(h) {
1830                 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
1831                                                 nhs->hstate_kobjs,
1832                                                 &per_node_hstate_attr_group);
1833                 if (err) {
1834                         pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
1835                                 h->name, node->dev.id);
1836                         hugetlb_unregister_node(node);
1837                         break;
1838                 }
1839         }
1840 }
1841
1842 /*
1843  * hugetlb init time:  register hstate attributes for all registered node
1844  * devices of nodes that have memory.  All on-line nodes should have
1845  * registered their associated device by this time.
1846  */
1847 static void hugetlb_register_all_nodes(void)
1848 {
1849         int nid;
1850
1851         for_each_node_state(nid, N_MEMORY) {
1852                 struct node *node = node_devices[nid];
1853                 if (node->dev.id == nid)
1854                         hugetlb_register_node(node);
1855         }
1856
1857         /*
1858          * Let the node device driver know we're here so it can
1859          * [un]register hstate attributes on node hotplug.
1860          */
1861         register_hugetlbfs_with_node(hugetlb_register_node,
1862                                      hugetlb_unregister_node);
1863 }
1864 #else   /* !CONFIG_NUMA */
1865
1866 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
1867 {
1868         BUG();
1869         if (nidp)
1870                 *nidp = -1;
1871         return NULL;
1872 }
1873
1874 static void hugetlb_unregister_all_nodes(void) { }
1875
1876 static void hugetlb_register_all_nodes(void) { }
1877
1878 #endif
1879
1880 static void __exit hugetlb_exit(void)
1881 {
1882         struct hstate *h;
1883
1884         hugetlb_unregister_all_nodes();
1885
1886         for_each_hstate(h) {
1887                 kobject_put(hstate_kobjs[hstate_index(h)]);
1888         }
1889
1890         kobject_put(hugepages_kobj);
1891 }
1892 module_exit(hugetlb_exit);
1893
1894 static int __init hugetlb_init(void)
1895 {
1896         /* Some platform decide whether they support huge pages at boot
1897          * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
1898          * there is no such support
1899          */
1900         if (HPAGE_SHIFT == 0)
1901                 return 0;
1902
1903         if (!size_to_hstate(default_hstate_size)) {
1904                 default_hstate_size = HPAGE_SIZE;
1905                 if (!size_to_hstate(default_hstate_size))
1906                         hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
1907         }
1908         default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
1909         if (default_hstate_max_huge_pages)
1910                 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
1911
1912         hugetlb_init_hstates();
1913         gather_bootmem_prealloc();
1914         report_hugepages();
1915
1916         hugetlb_sysfs_init();
1917         hugetlb_register_all_nodes();
1918         hugetlb_cgroup_file_init();
1919
1920         return 0;
1921 }
1922 module_init(hugetlb_init);
1923
1924 /* Should be called on processing a hugepagesz=... option */
1925 void __init hugetlb_add_hstate(unsigned order)
1926 {
1927         struct hstate *h;
1928         unsigned long i;
1929
1930         if (size_to_hstate(PAGE_SIZE << order)) {
1931                 pr_warning("hugepagesz= specified twice, ignoring\n");
1932                 return;
1933         }
1934         BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
1935         BUG_ON(order == 0);
1936         h = &hstates[hugetlb_max_hstate++];
1937         h->order = order;
1938         h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
1939         h->nr_huge_pages = 0;
1940         h->free_huge_pages = 0;
1941         for (i = 0; i < MAX_NUMNODES; ++i)
1942                 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
1943         INIT_LIST_HEAD(&h->hugepage_activelist);
1944         h->next_nid_to_alloc = first_node(node_states[N_MEMORY]);
1945         h->next_nid_to_free = first_node(node_states[N_MEMORY]);
1946         snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
1947                                         huge_page_size(h)/1024);
1948
1949         parsed_hstate = h;
1950 }
1951
1952 static int __init hugetlb_nrpages_setup(char *s)
1953 {
1954         unsigned long *mhp;
1955         static unsigned long *last_mhp;
1956
1957         /*
1958          * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
1959          * so this hugepages= parameter goes to the "default hstate".
1960          */
1961         if (!hugetlb_max_hstate)
1962                 mhp = &default_hstate_max_huge_pages;
1963         else
1964                 mhp = &parsed_hstate->max_huge_pages;
1965
1966         if (mhp == last_mhp) {
1967                 pr_warning("hugepages= specified twice without "
1968                            "interleaving hugepagesz=, ignoring\n");
1969                 return 1;
1970         }
1971
1972         if (sscanf(s, "%lu", mhp) <= 0)
1973                 *mhp = 0;
1974
1975         /*
1976          * Global state is always initialized later in hugetlb_init.
1977          * But we need to allocate >= MAX_ORDER hstates here early to still
1978          * use the bootmem allocator.
1979          */
1980         if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
1981                 hugetlb_hstate_alloc_pages(parsed_hstate);
1982
1983         last_mhp = mhp;
1984
1985         return 1;
1986 }
1987 __setup("hugepages=", hugetlb_nrpages_setup);
1988
1989 static int __init hugetlb_default_setup(char *s)
1990 {
1991         default_hstate_size = memparse(s, &s);
1992         return 1;
1993 }
1994 __setup("default_hugepagesz=", hugetlb_default_setup);
1995
1996 static unsigned int cpuset_mems_nr(unsigned int *array)
1997 {
1998         int node;
1999         unsigned int nr = 0;
2000
2001         for_each_node_mask(node, cpuset_current_mems_allowed)
2002                 nr += array[node];
2003
2004         return nr;
2005 }
2006
2007 #ifdef CONFIG_SYSCTL
2008 static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
2009                          struct ctl_table *table, int write,
2010                          void __user *buffer, size_t *length, loff_t *ppos)
2011 {
2012         struct hstate *h = &default_hstate;
2013         unsigned long tmp;
2014         int ret;
2015
2016         tmp = h->max_huge_pages;
2017
2018         if (write && h->order >= MAX_ORDER)
2019                 return -EINVAL;
2020
2021         table->data = &tmp;
2022         table->maxlen = sizeof(unsigned long);
2023         ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2024         if (ret)
2025                 goto out;
2026
2027         if (write) {
2028                 NODEMASK_ALLOC(nodemask_t, nodes_allowed,
2029                                                 GFP_KERNEL | __GFP_NORETRY);
2030                 if (!(obey_mempolicy &&
2031                                init_nodemask_of_mempolicy(nodes_allowed))) {
2032                         NODEMASK_FREE(nodes_allowed);
2033                         nodes_allowed = &node_states[N_MEMORY];
2034                 }
2035                 h->max_huge_pages = set_max_huge_pages(h, tmp, nodes_allowed);
2036
2037                 if (nodes_allowed != &node_states[N_MEMORY])
2038                         NODEMASK_FREE(nodes_allowed);
2039         }
2040 out:
2041         return ret;
2042 }
2043
2044 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
2045                           void __user *buffer, size_t *length, loff_t *ppos)
2046 {
2047
2048         return hugetlb_sysctl_handler_common(false, table, write,
2049                                                         buffer, length, ppos);
2050 }
2051
2052 #ifdef CONFIG_NUMA
2053 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
2054                           void __user *buffer, size_t *length, loff_t *ppos)
2055 {
2056         return hugetlb_sysctl_handler_common(true, table, write,
2057                                                         buffer, length, ppos);
2058 }
2059 #endif /* CONFIG_NUMA */
2060
2061 int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
2062                         void __user *buffer,
2063                         size_t *length, loff_t *ppos)
2064 {
2065         proc_dointvec(table, write, buffer, length, ppos);
2066         if (hugepages_treat_as_movable)
2067                 htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
2068         else
2069                 htlb_alloc_mask = GFP_HIGHUSER;
2070         return 0;
2071 }
2072
2073 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
2074                         void __user *buffer,
2075                         size_t *length, loff_t *ppos)
2076 {
2077         struct hstate *h = &default_hstate;
2078         unsigned long tmp;
2079         int ret;
2080
2081         tmp = h->nr_overcommit_huge_pages;
2082
2083         if (write && h->order >= MAX_ORDER)
2084                 return -EINVAL;
2085
2086         table->data = &tmp;
2087         table->maxlen = sizeof(unsigned long);
2088         ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2089         if (ret)
2090                 goto out;
2091
2092         if (write) {
2093                 spin_lock(&hugetlb_lock);
2094                 h->nr_overcommit_huge_pages = tmp;
2095                 spin_unlock(&hugetlb_lock);
2096         }
2097 out:
2098         return ret;
2099 }
2100
2101 #endif /* CONFIG_SYSCTL */
2102
2103 void hugetlb_report_meminfo(struct seq_file *m)
2104 {
2105         struct hstate *h = &default_hstate;
2106         seq_printf(m,
2107                         "HugePages_Total:   %5lu\n"
2108                         "HugePages_Free:    %5lu\n"
2109                         "HugePages_Rsvd:    %5lu\n"
2110                         "HugePages_Surp:    %5lu\n"
2111                         "Hugepagesize:   %8lu kB\n",
2112                         h->nr_huge_pages,
2113                         h->free_huge_pages,
2114                         h->resv_huge_pages,
2115                         h->surplus_huge_pages,
2116                         1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2117 }
2118
2119 int hugetlb_report_node_meminfo(int nid, char *buf)
2120 {
2121         struct hstate *h = &default_hstate;
2122         return sprintf(buf,
2123                 "Node %d HugePages_Total: %5u\n"
2124                 "Node %d HugePages_Free:  %5u\n"
2125                 "Node %d HugePages_Surp:  %5u\n",
2126                 nid, h->nr_huge_pages_node[nid],
2127                 nid, h->free_huge_pages_node[nid],
2128                 nid, h->surplus_huge_pages_node[nid]);
2129 }
2130
2131 void hugetlb_show_meminfo(void)
2132 {
2133         struct hstate *h;
2134         int nid;
2135
2136         for_each_node_state(nid, N_MEMORY)
2137                 for_each_hstate(h)
2138                         pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
2139                                 nid,
2140                                 h->nr_huge_pages_node[nid],
2141                                 h->free_huge_pages_node[nid],
2142                                 h->surplus_huge_pages_node[nid],
2143                                 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2144 }
2145
2146 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
2147 unsigned long hugetlb_total_pages(void)
2148 {
2149         struct hstate *h;
2150         unsigned long nr_total_pages = 0;
2151
2152         for_each_hstate(h)
2153                 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
2154         return nr_total_pages;
2155 }
2156
2157 static int hugetlb_acct_memory(struct hstate *h, long delta)
2158 {
2159         int ret = -ENOMEM;
2160
2161         spin_lock(&hugetlb_lock);
2162         /*
2163          * When cpuset is configured, it breaks the strict hugetlb page
2164          * reservation as the accounting is done on a global variable. Such
2165          * reservation is completely rubbish in the presence of cpuset because
2166          * the reservation is not checked against page availability for the
2167          * current cpuset. Application can still potentially OOM'ed by kernel
2168          * with lack of free htlb page in cpuset that the task is in.
2169          * Attempt to enforce strict accounting with cpuset is almost
2170          * impossible (or too ugly) because cpuset is too fluid that
2171          * task or memory node can be dynamically moved between cpusets.
2172          *
2173          * The change of semantics for shared hugetlb mapping with cpuset is
2174          * undesirable. However, in order to preserve some of the semantics,
2175          * we fall back to check against current free page availability as
2176          * a best attempt and hopefully to minimize the impact of changing
2177          * semantics that cpuset has.
2178          */
2179         if (delta > 0) {
2180                 if (gather_surplus_pages(h, delta) < 0)
2181                         goto out;
2182
2183                 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
2184                         return_unused_surplus_pages(h, delta);
2185                         goto out;
2186                 }
2187         }
2188
2189         ret = 0;
2190         if (delta < 0)
2191                 return_unused_surplus_pages(h, (unsigned long) -delta);
2192
2193 out:
2194         spin_unlock(&hugetlb_lock);
2195         return ret;
2196 }
2197
2198 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
2199 {
2200         struct resv_map *resv = vma_resv_map(vma);
2201
2202         /*
2203          * This new VMA should share its siblings reservation map if present.
2204          * The VMA will only ever have a valid reservation map pointer where
2205          * it is being copied for another still existing VMA.  As that VMA
2206          * has a reference to the reservation map it cannot disappear until
2207          * after this open call completes.  It is therefore safe to take a
2208          * new reference here without additional locking.
2209          */
2210         if (resv)
2211                 kref_get(&resv->refs);
2212 }
2213
2214 static void resv_map_put(struct vm_area_struct *vma)
2215 {
2216         struct resv_map *resv = vma_resv_map(vma);
2217
2218         if (!resv)
2219                 return;
2220         kref_put(&resv->refs, resv_map_release);
2221 }
2222
2223 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
2224 {
2225         struct hstate *h = hstate_vma(vma);
2226         struct resv_map *resv = vma_resv_map(vma);
2227         struct hugepage_subpool *spool = subpool_vma(vma);
2228         unsigned long reserve;
2229         unsigned long start;
2230         unsigned long end;
2231
2232         if (resv) {
2233                 start = vma_hugecache_offset(h, vma, vma->vm_start);
2234                 end = vma_hugecache_offset(h, vma, vma->vm_end);
2235
2236                 reserve = (end - start) -
2237                         region_count(&resv->regions, start, end);
2238
2239                 resv_map_put(vma);
2240
2241                 if (reserve) {
2242                         hugetlb_acct_memory(h, -reserve);
2243                         hugepage_subpool_put_pages(spool, reserve);
2244                 }
2245         }
2246 }
2247
2248 /*
2249  * We cannot handle pagefaults against hugetlb pages at all.  They cause
2250  * handle_mm_fault() to try to instantiate regular-sized pages in the
2251  * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
2252  * this far.
2253  */
2254 static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2255 {
2256         BUG();
2257         return 0;
2258 }
2259
2260 const struct vm_operations_struct hugetlb_vm_ops = {
2261         .fault = hugetlb_vm_op_fault,
2262         .open = hugetlb_vm_op_open,
2263         .close = hugetlb_vm_op_close,
2264 };
2265
2266 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
2267                                 int writable)
2268 {
2269         pte_t entry;
2270
2271         if (writable) {
2272                 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
2273                                          vma->vm_page_prot)));
2274         } else {
2275                 entry = huge_pte_wrprotect(mk_huge_pte(page,
2276                                            vma->vm_page_prot));
2277         }
2278         entry = pte_mkyoung(entry);
2279         entry = pte_mkhuge(entry);
2280         entry = arch_make_huge_pte(entry, vma, page, writable);
2281
2282         return entry;
2283 }
2284
2285 static void set_huge_ptep_writable(struct vm_area_struct *vma,
2286                                    unsigned long address, pte_t *ptep)
2287 {
2288         pte_t entry;
2289
2290         entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
2291         if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
2292                 update_mmu_cache(vma, address, ptep);
2293 }
2294
2295
2296 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
2297                             struct vm_area_struct *vma)
2298 {
2299         pte_t *src_pte, *dst_pte, entry;
2300         struct page *ptepage;
2301         unsigned long addr;
2302         int cow;
2303         struct hstate *h = hstate_vma(vma);
2304         unsigned long sz = huge_page_size(h);
2305
2306         cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
2307
2308         for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
2309                 src_pte = huge_pte_offset(src, addr);
2310                 if (!src_pte)
2311                         continue;
2312                 dst_pte = huge_pte_alloc(dst, addr, sz);
2313                 if (!dst_pte)
2314                         goto nomem;
2315
2316                 /* If the pagetables are shared don't copy or take references */
2317                 if (dst_pte == src_pte)
2318                         continue;
2319
2320                 spin_lock(&dst->page_table_lock);
2321                 spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING);
2322                 if (!huge_pte_none(huge_ptep_get(src_pte))) {
2323                         if (cow)
2324                                 huge_ptep_set_wrprotect(src, addr, src_pte);
2325                         entry = huge_ptep_get(src_pte);
2326                         ptepage = pte_page(entry);
2327                         get_page(ptepage);
2328                         page_dup_rmap(ptepage);
2329                         set_huge_pte_at(dst, addr, dst_pte, entry);
2330                 }
2331                 spin_unlock(&src->page_table_lock);
2332                 spin_unlock(&dst->page_table_lock);
2333         }
2334         return 0;
2335
2336 nomem:
2337         return -ENOMEM;
2338 }
2339
2340 static int is_hugetlb_entry_migration(pte_t pte)
2341 {
2342         swp_entry_t swp;
2343
2344         if (huge_pte_none(pte) || pte_present(pte))
2345                 return 0;
2346         swp = pte_to_swp_entry(pte);
2347         if (non_swap_entry(swp) && is_migration_entry(swp))
2348                 return 1;
2349         else
2350                 return 0;
2351 }
2352
2353 static int is_hugetlb_entry_hwpoisoned(pte_t pte)
2354 {
2355         swp_entry_t swp;
2356
2357         if (huge_pte_none(pte) || pte_present(pte))
2358                 return 0;
2359         swp = pte_to_swp_entry(pte);
2360         if (non_swap_entry(swp) && is_hwpoison_entry(swp))
2361                 return 1;
2362         else
2363                 return 0;
2364 }
2365
2366 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
2367                             unsigned long start, unsigned long end,
2368                             struct page *ref_page)
2369 {
2370         int force_flush = 0;
2371         struct mm_struct *mm = vma->vm_mm;
2372         unsigned long address;
2373         pte_t *ptep;
2374         pte_t pte;
2375         struct page *page;
2376         struct hstate *h = hstate_vma(vma);
2377         unsigned long sz = huge_page_size(h);
2378         const unsigned long mmun_start = start; /* For mmu_notifiers */
2379         const unsigned long mmun_end   = end;   /* For mmu_notifiers */
2380
2381         WARN_ON(!is_vm_hugetlb_page(vma));
2382         BUG_ON(start & ~huge_page_mask(h));
2383         BUG_ON(end & ~huge_page_mask(h));
2384
2385         tlb_start_vma(tlb, vma);
2386         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
2387 again:
2388         spin_lock(&mm->page_table_lock);
2389         for (address = start; address < end; address += sz) {
2390                 ptep = huge_pte_offset(mm, address);
2391                 if (!ptep)
2392                         continue;
2393
2394                 if (huge_pmd_unshare(mm, &address, ptep))
2395                         continue;
2396
2397                 pte = huge_ptep_get(ptep);
2398                 if (huge_pte_none(pte))
2399                         continue;
2400
2401                 /*
2402                  * HWPoisoned hugepage is already unmapped and dropped reference
2403                  */
2404                 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
2405                         huge_pte_clear(mm, address, ptep);
2406                         continue;
2407                 }
2408
2409                 page = pte_page(pte);
2410                 /*
2411                  * If a reference page is supplied, it is because a specific
2412                  * page is being unmapped, not a range. Ensure the page we
2413                  * are about to unmap is the actual page of interest.
2414                  */
2415                 if (ref_page) {
2416                         if (page != ref_page)
2417                                 continue;
2418
2419                         /*
2420                          * Mark the VMA as having unmapped its page so that
2421                          * future faults in this VMA will fail rather than
2422                          * looking like data was lost
2423                          */
2424                         set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
2425                 }
2426
2427                 pte = huge_ptep_get_and_clear(mm, address, ptep);
2428                 tlb_remove_tlb_entry(tlb, ptep, address);
2429                 if (huge_pte_dirty(pte))
2430                         set_page_dirty(page);
2431
2432                 page_remove_rmap(page);
2433                 force_flush = !__tlb_remove_page(tlb, page);
2434                 if (force_flush)
2435                         break;
2436                 /* Bail out after unmapping reference page if supplied */
2437                 if (ref_page)
2438                         break;
2439         }
2440         spin_unlock(&mm->page_table_lock);
2441         /*
2442          * mmu_gather ran out of room to batch pages, we break out of
2443          * the PTE lock to avoid doing the potential expensive TLB invalidate
2444          * and page-free while holding it.
2445          */
2446         if (force_flush) {
2447                 force_flush = 0;
2448                 tlb_flush_mmu(tlb);
2449                 if (address < end && !ref_page)
2450                         goto again;
2451         }
2452         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2453         tlb_end_vma(tlb, vma);
2454 }
2455
2456 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
2457                           struct vm_area_struct *vma, unsigned long start,
2458                           unsigned long end, struct page *ref_page)
2459 {
2460         __unmap_hugepage_range(tlb, vma, start, end, ref_page);
2461
2462         /*
2463          * Clear this flag so that x86's huge_pmd_share page_table_shareable
2464          * test will fail on a vma being torn down, and not grab a page table
2465          * on its way out.  We're lucky that the flag has such an appropriate
2466          * name, and can in fact be safely cleared here. We could clear it
2467          * before the __unmap_hugepage_range above, but all that's necessary
2468          * is to clear it before releasing the i_mmap_mutex. This works
2469          * because in the context this is called, the VMA is about to be
2470          * destroyed and the i_mmap_mutex is held.
2471          */
2472         vma->vm_flags &= ~VM_MAYSHARE;
2473 }
2474
2475 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2476                           unsigned long end, struct page *ref_page)
2477 {
2478         struct mm_struct *mm;
2479         struct mmu_gather tlb;
2480
2481         mm = vma->vm_mm;
2482
2483         tlb_gather_mmu(&tlb, mm, start, end);
2484         __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
2485         tlb_finish_mmu(&tlb, start, end);
2486 }
2487
2488 /*
2489  * This is called when the original mapper is failing to COW a MAP_PRIVATE
2490  * mappping it owns the reserve page for. The intention is to unmap the page
2491  * from other VMAs and let the children be SIGKILLed if they are faulting the
2492  * same region.
2493  */
2494 static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
2495                                 struct page *page, unsigned long address)
2496 {
2497         struct hstate *h = hstate_vma(vma);
2498         struct vm_area_struct *iter_vma;
2499         struct address_space *mapping;
2500         pgoff_t pgoff;
2501
2502         /*
2503          * vm_pgoff is in PAGE_SIZE units, hence the different calculation
2504          * from page cache lookup which is in HPAGE_SIZE units.
2505          */
2506         address = address & huge_page_mask(h);
2507         pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
2508                         vma->vm_pgoff;
2509         mapping = file_inode(vma->vm_file)->i_mapping;
2510
2511         /*
2512          * Take the mapping lock for the duration of the table walk. As
2513          * this mapping should be shared between all the VMAs,
2514          * __unmap_hugepage_range() is called as the lock is already held
2515          */
2516         mutex_lock(&mapping->i_mmap_mutex);
2517         vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
2518                 /* Do not unmap the current VMA */
2519                 if (iter_vma == vma)
2520                         continue;
2521
2522                 /*
2523                  * Unmap the page from other VMAs without their own reserves.
2524                  * They get marked to be SIGKILLed if they fault in these
2525                  * areas. This is because a future no-page fault on this VMA
2526                  * could insert a zeroed page instead of the data existing
2527                  * from the time of fork. This would look like data corruption
2528                  */
2529                 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
2530                         unmap_hugepage_range(iter_vma, address,
2531                                              address + huge_page_size(h), page);
2532         }
2533         mutex_unlock(&mapping->i_mmap_mutex);
2534
2535         return 1;
2536 }
2537
2538 /*
2539  * Hugetlb_cow() should be called with page lock of the original hugepage held.
2540  * Called with hugetlb_instantiation_mutex held and pte_page locked so we
2541  * cannot race with other handlers or page migration.
2542  * Keep the pte_same checks anyway to make transition from the mutex easier.
2543  */
2544 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
2545                         unsigned long address, pte_t *ptep, pte_t pte,
2546                         struct page *pagecache_page)
2547 {
2548         struct hstate *h = hstate_vma(vma);
2549         struct page *old_page, *new_page;
2550         int outside_reserve = 0;
2551         unsigned long mmun_start;       /* For mmu_notifiers */
2552         unsigned long mmun_end;         /* For mmu_notifiers */
2553
2554         old_page = pte_page(pte);
2555
2556 retry_avoidcopy:
2557         /* If no-one else is actually using this page, avoid the copy
2558          * and just make the page writable */
2559         if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
2560                 page_move_anon_rmap(old_page, vma, address);
2561                 set_huge_ptep_writable(vma, address, ptep);
2562                 return 0;
2563         }
2564
2565         /*
2566          * If the process that created a MAP_PRIVATE mapping is about to
2567          * perform a COW due to a shared page count, attempt to satisfy
2568          * the allocation without using the existing reserves. The pagecache
2569          * page is used to determine if the reserve at this address was
2570          * consumed or not. If reserves were used, a partial faulted mapping
2571          * at the time of fork() could consume its reserves on COW instead
2572          * of the full address range.
2573          */
2574         if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
2575                         old_page != pagecache_page)
2576                 outside_reserve = 1;
2577
2578         page_cache_get(old_page);
2579
2580         /* Drop page_table_lock as buddy allocator may be called */
2581         spin_unlock(&mm->page_table_lock);
2582         new_page = alloc_huge_page(vma, address, outside_reserve);
2583
2584         if (IS_ERR(new_page)) {
2585                 long err = PTR_ERR(new_page);
2586                 page_cache_release(old_page);
2587
2588                 /*
2589                  * If a process owning a MAP_PRIVATE mapping fails to COW,
2590                  * it is due to references held by a child and an insufficient
2591                  * huge page pool. To guarantee the original mappers
2592                  * reliability, unmap the page from child processes. The child
2593                  * may get SIGKILLed if it later faults.
2594                  */
2595                 if (outside_reserve) {
2596                         BUG_ON(huge_pte_none(pte));
2597                         if (unmap_ref_private(mm, vma, old_page, address)) {
2598                                 BUG_ON(huge_pte_none(pte));
2599                                 spin_lock(&mm->page_table_lock);
2600                                 ptep = huge_pte_offset(mm, address & huge_page_mask(h));
2601                                 if (likely(pte_same(huge_ptep_get(ptep), pte)))
2602                                         goto retry_avoidcopy;
2603                                 /*
2604                                  * race occurs while re-acquiring page_table_lock, and
2605                                  * our job is done.
2606                                  */
2607                                 return 0;
2608                         }
2609                         WARN_ON_ONCE(1);
2610                 }
2611
2612                 /* Caller expects lock to be held */
2613                 spin_lock(&mm->page_table_lock);
2614                 if (err == -ENOMEM)
2615                         return VM_FAULT_OOM;
2616                 else
2617                         return VM_FAULT_SIGBUS;
2618         }
2619
2620         /*
2621          * When the original hugepage is shared one, it does not have
2622          * anon_vma prepared.
2623          */
2624         if (unlikely(anon_vma_prepare(vma))) {
2625                 page_cache_release(new_page);
2626                 page_cache_release(old_page);
2627                 /* Caller expects lock to be held */
2628                 spin_lock(&mm->page_table_lock);
2629                 return VM_FAULT_OOM;
2630         }
2631
2632         copy_user_huge_page(new_page, old_page, address, vma,
2633                             pages_per_huge_page(h));
2634         __SetPageUptodate(new_page);
2635
2636         mmun_start = address & huge_page_mask(h);
2637         mmun_end = mmun_start + huge_page_size(h);
2638         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
2639         /*
2640          * Retake the page_table_lock to check for racing updates
2641          * before the page tables are altered
2642          */
2643         spin_lock(&mm->page_table_lock);
2644         ptep = huge_pte_offset(mm, address & huge_page_mask(h));
2645         if (likely(pte_same(huge_ptep_get(ptep), pte))) {
2646                 ClearPagePrivate(new_page);
2647
2648                 /* Break COW */
2649                 huge_ptep_clear_flush(vma, address, ptep);
2650                 set_huge_pte_at(mm, address, ptep,
2651                                 make_huge_pte(vma, new_page, 1));
2652                 page_remove_rmap(old_page);
2653                 hugepage_add_new_anon_rmap(new_page, vma, address);
2654                 /* Make the old page be freed below */
2655                 new_page = old_page;
2656         }
2657         spin_unlock(&mm->page_table_lock);
2658         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2659         page_cache_release(new_page);
2660         page_cache_release(old_page);
2661
2662         /* Caller expects lock to be held */
2663         spin_lock(&mm->page_table_lock);
2664         return 0;
2665 }
2666
2667 /* Return the pagecache page at a given address within a VMA */
2668 static struct page *hugetlbfs_pagecache_page(struct hstate *h,
2669                         struct vm_area_struct *vma, unsigned long address)
2670 {
2671         struct address_space *mapping;
2672         pgoff_t idx;
2673
2674         mapping = vma->vm_file->f_mapping;
2675         idx = vma_hugecache_offset(h, vma, address);
2676
2677         return find_lock_page(mapping, idx);
2678 }
2679
2680 /*
2681  * Return whether there is a pagecache page to back given address within VMA.
2682  * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
2683  */
2684 static bool hugetlbfs_pagecache_present(struct hstate *h,
2685                         struct vm_area_struct *vma, unsigned long address)
2686 {
2687         struct address_space *mapping;
2688         pgoff_t idx;
2689         struct page *page;
2690
2691         mapping = vma->vm_file->f_mapping;
2692         idx = vma_hugecache_offset(h, vma, address);
2693
2694         page = find_get_page(mapping, idx);
2695         if (page)
2696                 put_page(page);
2697         return page != NULL;
2698 }
2699
2700 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
2701                         unsigned long address, pte_t *ptep, unsigned int flags)
2702 {
2703         struct hstate *h = hstate_vma(vma);
2704         int ret = VM_FAULT_SIGBUS;
2705         int anon_rmap = 0;
2706         pgoff_t idx;
2707         unsigned long size;
2708         struct page *page;
2709         struct address_space *mapping;
2710         pte_t new_pte;
2711
2712         /*
2713          * Currently, we are forced to kill the process in the event the
2714          * original mapper has unmapped pages from the child due to a failed
2715          * COW. Warn that such a situation has occurred as it may not be obvious
2716          */
2717         if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
2718                 pr_warning("PID %d killed due to inadequate hugepage pool\n",
2719                            current->pid);
2720                 return ret;
2721         }
2722
2723         mapping = vma->vm_file->f_mapping;
2724         idx = vma_hugecache_offset(h, vma, address);
2725
2726         /*
2727          * Use page lock to guard against racing truncation
2728          * before we get page_table_lock.
2729          */
2730 retry:
2731         page = find_lock_page(mapping, idx);
2732         if (!page) {
2733                 size = i_size_read(mapping->host) >> huge_page_shift(h);
2734                 if (idx >= size)
2735                         goto out;
2736                 page = alloc_huge_page(vma, address, 0);
2737                 if (IS_ERR(page)) {
2738                         ret = PTR_ERR(page);
2739                         if (ret == -ENOMEM)
2740                                 ret = VM_FAULT_OOM;
2741                         else
2742                                 ret = VM_FAULT_SIGBUS;
2743                         goto out;
2744                 }
2745                 clear_huge_page(page, address, pages_per_huge_page(h));
2746                 __SetPageUptodate(page);
2747
2748                 if (vma->vm_flags & VM_MAYSHARE) {
2749                         int err;
2750                         struct inode *inode = mapping->host;
2751
2752                         err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
2753                         if (err) {
2754                                 put_page(page);
2755                                 if (err == -EEXIST)
2756                                         goto retry;
2757                                 goto out;
2758                         }
2759                         ClearPagePrivate(page);
2760
2761                         spin_lock(&inode->i_lock);
2762                         inode->i_blocks += blocks_per_huge_page(h);
2763                         spin_unlock(&inode->i_lock);
2764                 } else {
2765                         lock_page(page);
2766                         if (unlikely(anon_vma_prepare(vma))) {
2767                                 ret = VM_FAULT_OOM;
2768                                 goto backout_unlocked;
2769                         }
2770                         anon_rmap = 1;
2771                 }
2772         } else {
2773                 /*
2774                  * If memory error occurs between mmap() and fault, some process
2775                  * don't have hwpoisoned swap entry for errored virtual address.
2776                  * So we need to block hugepage fault by PG_hwpoison bit check.
2777                  */
2778                 if (unlikely(PageHWPoison(page))) {
2779                         ret = VM_FAULT_HWPOISON |
2780                                 VM_FAULT_SET_HINDEX(hstate_index(h));
2781                         goto backout_unlocked;
2782                 }
2783         }
2784
2785         /*
2786          * If we are going to COW a private mapping later, we examine the
2787          * pending reservations for this page now. This will ensure that
2788          * any allocations necessary to record that reservation occur outside
2789          * the spinlock.
2790          */
2791         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
2792                 if (vma_needs_reservation(h, vma, address) < 0) {
2793                         ret = VM_FAULT_OOM;
2794                         goto backout_unlocked;
2795                 }
2796
2797         spin_lock(&mm->page_table_lock);
2798         size = i_size_read(mapping->host) >> huge_page_shift(h);
2799         if (idx >= size)
2800                 goto backout;
2801
2802         ret = 0;
2803         if (!huge_pte_none(huge_ptep_get(ptep)))
2804                 goto backout;
2805
2806         if (anon_rmap) {
2807                 ClearPagePrivate(page);
2808                 hugepage_add_new_anon_rmap(page, vma, address);
2809         }
2810         else
2811                 page_dup_rmap(page);
2812         new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
2813                                 && (vma->vm_flags & VM_SHARED)));
2814         set_huge_pte_at(mm, address, ptep, new_pte);
2815
2816         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
2817                 /* Optimization, do the COW without a second fault */
2818                 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
2819         }
2820
2821         spin_unlock(&mm->page_table_lock);
2822         unlock_page(page);
2823 out:
2824         return ret;
2825
2826 backout:
2827         spin_unlock(&mm->page_table_lock);
2828 backout_unlocked:
2829         unlock_page(page);
2830         put_page(page);
2831         goto out;
2832 }
2833
2834 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2835                         unsigned long address, unsigned int flags)
2836 {
2837         pte_t *ptep;
2838         pte_t entry;
2839         int ret;
2840         struct page *page = NULL;
2841         struct page *pagecache_page = NULL;
2842         static DEFINE_MUTEX(hugetlb_instantiation_mutex);
2843         struct hstate *h = hstate_vma(vma);
2844
2845         address &= huge_page_mask(h);
2846
2847         ptep = huge_pte_offset(mm, address);
2848         if (ptep) {
2849                 entry = huge_ptep_get(ptep);
2850                 if (unlikely(is_hugetlb_entry_migration(entry))) {
2851                         migration_entry_wait_huge(mm, ptep);
2852                         return 0;
2853                 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
2854                         return VM_FAULT_HWPOISON_LARGE |
2855                                 VM_FAULT_SET_HINDEX(hstate_index(h));
2856         }
2857
2858         ptep = huge_pte_alloc(mm, address, huge_page_size(h));
2859         if (!ptep)
2860                 return VM_FAULT_OOM;
2861
2862         /*
2863          * Serialize hugepage allocation and instantiation, so that we don't
2864          * get spurious allocation failures if two CPUs race to instantiate
2865          * the same page in the page cache.
2866          */
2867         mutex_lock(&hugetlb_instantiation_mutex);
2868         entry = huge_ptep_get(ptep);
2869         if (huge_pte_none(entry)) {
2870                 ret = hugetlb_no_page(mm, vma, address, ptep, flags);
2871                 goto out_mutex;
2872         }
2873
2874         ret = 0;
2875
2876         /*
2877          * If we are going to COW the mapping later, we examine the pending
2878          * reservations for this page now. This will ensure that any
2879          * allocations necessary to record that reservation occur outside the
2880          * spinlock. For private mappings, we also lookup the pagecache
2881          * page now as it is used to determine if a reservation has been
2882          * consumed.
2883          */
2884         if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
2885                 if (vma_needs_reservation(h, vma, address) < 0) {
2886                         ret = VM_FAULT_OOM;
2887                         goto out_mutex;
2888                 }
2889
2890                 if (!(vma->vm_flags & VM_MAYSHARE))
2891                         pagecache_page = hugetlbfs_pagecache_page(h,
2892                                                                 vma, address);
2893         }
2894
2895         /*
2896          * hugetlb_cow() requires page locks of pte_page(entry) and
2897          * pagecache_page, so here we need take the former one
2898          * when page != pagecache_page or !pagecache_page.
2899          * Note that locking order is always pagecache_page -> page,
2900          * so no worry about deadlock.
2901          */
2902         page = pte_page(entry);
2903         get_page(page);
2904         if (page != pagecache_page)
2905                 lock_page(page);
2906
2907         spin_lock(&mm->page_table_lock);
2908         /* Check for a racing update before calling hugetlb_cow */
2909         if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
2910                 goto out_page_table_lock;
2911
2912
2913         if (flags & FAULT_FLAG_WRITE) {
2914                 if (!huge_pte_write(entry)) {
2915                         ret = hugetlb_cow(mm, vma, address, ptep, entry,
2916                                                         pagecache_page);
2917                         goto out_page_table_lock;
2918                 }
2919                 entry = huge_pte_mkdirty(entry);
2920         }
2921         entry = pte_mkyoung(entry);
2922         if (huge_ptep_set_access_flags(vma, address, ptep, entry,
2923                                                 flags & FAULT_FLAG_WRITE))
2924                 update_mmu_cache(vma, address, ptep);
2925
2926 out_page_table_lock:
2927         spin_unlock(&mm->page_table_lock);
2928
2929         if (pagecache_page) {
2930                 unlock_page(pagecache_page);
2931                 put_page(pagecache_page);
2932         }
2933         if (page != pagecache_page)
2934                 unlock_page(page);
2935         put_page(page);
2936
2937 out_mutex:
2938         mutex_unlock(&hugetlb_instantiation_mutex);
2939
2940         return ret;
2941 }
2942
2943 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
2944                          struct page **pages, struct vm_area_struct **vmas,
2945                          unsigned long *position, unsigned long *nr_pages,
2946                          long i, unsigned int flags)
2947 {
2948         unsigned long pfn_offset;
2949         unsigned long vaddr = *position;
2950         unsigned long remainder = *nr_pages;
2951         struct hstate *h = hstate_vma(vma);
2952
2953         spin_lock(&mm->page_table_lock);
2954         while (vaddr < vma->vm_end && remainder) {
2955                 pte_t *pte;
2956                 int absent;
2957                 struct page *page;
2958
2959                 /*
2960                  * Some archs (sparc64, sh*) have multiple pte_ts to
2961                  * each hugepage.  We have to make sure we get the
2962                  * first, for the page indexing below to work.
2963                  */
2964                 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
2965                 absent = !pte || huge_pte_none(huge_ptep_get(pte));
2966
2967                 /*
2968                  * When coredumping, it suits get_dump_page if we just return
2969                  * an error where there's an empty slot with no huge pagecache
2970                  * to back it.  This way, we avoid allocating a hugepage, and
2971                  * the sparse dumpfile avoids allocating disk blocks, but its
2972                  * huge holes still show up with zeroes where they need to be.
2973                  */
2974                 if (absent && (flags & FOLL_DUMP) &&
2975                     !hugetlbfs_pagecache_present(h, vma, vaddr)) {
2976                         remainder = 0;
2977                         break;
2978                 }
2979
2980                 /*
2981                  * We need call hugetlb_fault for both hugepages under migration
2982                  * (in which case hugetlb_fault waits for the migration,) and
2983                  * hwpoisoned hugepages (in which case we need to prevent the
2984                  * caller from accessing to them.) In order to do this, we use
2985                  * here is_swap_pte instead of is_hugetlb_entry_migration and
2986                  * is_hugetlb_entry_hwpoisoned. This is because it simply covers
2987                  * both cases, and because we can't follow correct pages
2988                  * directly from any kind of swap entries.
2989                  */
2990                 if (absent || is_swap_pte(huge_ptep_get(pte)) ||
2991                     ((flags & FOLL_WRITE) &&
2992                       !huge_pte_write(huge_ptep_get(pte)))) {
2993                         int ret;
2994
2995                         spin_unlock(&mm->page_table_lock);
2996                         ret = hugetlb_fault(mm, vma, vaddr,
2997                                 (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
2998                         spin_lock(&mm->page_table_lock);
2999                         if (!(ret & VM_FAULT_ERROR))
3000                                 continue;
3001
3002                         remainder = 0;
3003                         break;
3004                 }
3005
3006                 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
3007                 page = pte_page(huge_ptep_get(pte));
3008 same_page:
3009                 if (pages) {
3010                         pages[i] = mem_map_offset(page, pfn_offset);
3011                         get_page(pages[i]);
3012                 }
3013
3014                 if (vmas)
3015                         vmas[i] = vma;
3016
3017                 vaddr += PAGE_SIZE;
3018                 ++pfn_offset;
3019                 --remainder;
3020                 ++i;
3021                 if (vaddr < vma->vm_end && remainder &&
3022                                 pfn_offset < pages_per_huge_page(h)) {
3023                         /*
3024                          * We use pfn_offset to avoid touching the pageframes
3025                          * of this compound page.
3026                          */
3027                         goto same_page;
3028                 }
3029         }
3030         spin_unlock(&mm->page_table_lock);
3031         *nr_pages = remainder;
3032         *position = vaddr;
3033
3034         return i ? i : -EFAULT;
3035 }
3036
3037 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
3038                 unsigned long address, unsigned long end, pgprot_t newprot)
3039 {
3040         struct mm_struct *mm = vma->vm_mm;
3041         unsigned long start = address;
3042         pte_t *ptep;
3043         pte_t pte;
3044         struct hstate *h = hstate_vma(vma);
3045         unsigned long pages = 0;
3046
3047         BUG_ON(address >= end);
3048         flush_cache_range(vma, address, end);
3049
3050         mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
3051         spin_lock(&mm->page_table_lock);
3052         for (; address < end; address += huge_page_size(h)) {
3053                 ptep = huge_pte_offset(mm, address);
3054                 if (!ptep)
3055                         continue;
3056                 if (huge_pmd_unshare(mm, &address, ptep)) {
3057                         pages++;
3058                         continue;
3059                 }
3060                 if (!huge_pte_none(huge_ptep_get(ptep))) {
3061                         pte = huge_ptep_get_and_clear(mm, address, ptep);
3062                         pte = pte_mkhuge(huge_pte_modify(pte, newprot));
3063                         pte = arch_make_huge_pte(pte, vma, NULL, 0);
3064                         set_huge_pte_at(mm, address, ptep, pte);
3065                         pages++;
3066                 }
3067         }
3068         spin_unlock(&mm->page_table_lock);
3069         /*
3070          * Must flush TLB before releasing i_mmap_mutex: x86's huge_pmd_unshare
3071          * may have cleared our pud entry and done put_page on the page table:
3072          * once we release i_mmap_mutex, another task can do the final put_page
3073          * and that page table be reused and filled with junk.
3074          */
3075         flush_tlb_range(vma, start, end);
3076         mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
3077
3078         return pages << h->order;
3079 }
3080
3081 int hugetlb_reserve_pages(struct inode *inode,
3082                                         long from, long to,
3083                                         struct vm_area_struct *vma,
3084                                         vm_flags_t vm_flags)
3085 {
3086         long ret, chg;
3087         struct hstate *h = hstate_inode(inode);
3088         struct hugepage_subpool *spool = subpool_inode(inode);
3089
3090         /*
3091          * Only apply hugepage reservation if asked. At fault time, an
3092          * attempt will be made for VM_NORESERVE to allocate a page
3093          * without using reserves
3094          */
3095         if (vm_flags & VM_NORESERVE)
3096                 return 0;
3097
3098         /*
3099          * Shared mappings base their reservation on the number of pages that
3100          * are already allocated on behalf of the file. Private mappings need
3101          * to reserve the full area even if read-only as mprotect() may be
3102          * called to make the mapping read-write. Assume !vma is a shm mapping
3103          */
3104         if (!vma || vma->vm_flags & VM_MAYSHARE)
3105                 chg = region_chg(&inode->i_mapping->private_list, from, to);
3106         else {
3107                 struct resv_map *resv_map = resv_map_alloc();
3108                 if (!resv_map)
3109                         return -ENOMEM;
3110
3111                 chg = to - from;
3112
3113                 set_vma_resv_map(vma, resv_map);
3114                 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
3115         }
3116
3117         if (chg < 0) {
3118                 ret = chg;
3119                 goto out_err;
3120         }
3121
3122         /* There must be enough pages in the subpool for the mapping */
3123         if (hugepage_subpool_get_pages(spool, chg)) {
3124                 ret = -ENOSPC;
3125                 goto out_err;
3126         }
3127
3128         /*
3129          * Check enough hugepages are available for the reservation.
3130          * Hand the pages back to the subpool if there are not
3131          */
3132         ret = hugetlb_acct_memory(h, chg);
3133         if (ret < 0) {
3134                 hugepage_subpool_put_pages(spool, chg);
3135                 goto out_err;
3136         }
3137
3138         /*
3139          * Account for the reservations made. Shared mappings record regions
3140          * that have reservations as they are shared by multiple VMAs.
3141          * When the last VMA disappears, the region map says how much
3142          * the reservation was and the page cache tells how much of
3143          * the reservation was consumed. Private mappings are per-VMA and
3144          * only the consumed reservations are tracked. When the VMA
3145          * disappears, the original reservation is the VMA size and the
3146          * consumed reservations are stored in the map. Hence, nothing
3147          * else has to be done for private mappings here
3148          */
3149         if (!vma || vma->vm_flags & VM_MAYSHARE)
3150                 region_add(&inode->i_mapping->private_list, from, to);
3151         return 0;
3152 out_err:
3153         if (vma)
3154                 resv_map_put(vma);
3155         return ret;
3156 }
3157
3158 void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
3159 {
3160         struct hstate *h = hstate_inode(inode);
3161         long chg = region_truncate(&inode->i_mapping->private_list, offset);
3162         struct hugepage_subpool *spool = subpool_inode(inode);
3163
3164         spin_lock(&inode->i_lock);
3165         inode->i_blocks -= (blocks_per_huge_page(h) * freed);
3166         spin_unlock(&inode->i_lock);
3167
3168         hugepage_subpool_put_pages(spool, (chg - freed));
3169         hugetlb_acct_memory(h, -(chg - freed));
3170 }
3171
3172 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
3173 static unsigned long page_table_shareable(struct vm_area_struct *svma,
3174                                 struct vm_area_struct *vma,
3175                                 unsigned long addr, pgoff_t idx)
3176 {
3177         unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
3178                                 svma->vm_start;
3179         unsigned long sbase = saddr & PUD_MASK;
3180         unsigned long s_end = sbase + PUD_SIZE;
3181
3182         /* Allow segments to share if only one is marked locked */
3183         unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED;
3184         unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED;
3185
3186         /*
3187          * match the virtual addresses, permission and the alignment of the
3188          * page table page.
3189          */
3190         if (pmd_index(addr) != pmd_index(saddr) ||
3191             vm_flags != svm_flags ||
3192             sbase < svma->vm_start || svma->vm_end < s_end)
3193                 return 0;
3194
3195         return saddr;
3196 }
3197
3198 static int vma_shareable(struct vm_area_struct *vma, unsigned long addr)
3199 {
3200         unsigned long base = addr & PUD_MASK;
3201         unsigned long end = base + PUD_SIZE;
3202
3203         /*
3204          * check on proper vm_flags and page table alignment
3205          */
3206         if (vma->vm_flags & VM_MAYSHARE &&
3207             vma->vm_start <= base && end <= vma->vm_end)
3208                 return 1;
3209         return 0;
3210 }
3211
3212 /*
3213  * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
3214  * and returns the corresponding pte. While this is not necessary for the
3215  * !shared pmd case because we can allocate the pmd later as well, it makes the
3216  * code much cleaner. pmd allocation is essential for the shared case because
3217  * pud has to be populated inside the same i_mmap_mutex section - otherwise
3218  * racing tasks could either miss the sharing (see huge_pte_offset) or select a
3219  * bad pmd for sharing.
3220  */
3221 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
3222 {
3223         struct vm_area_struct *vma = find_vma(mm, addr);
3224         struct address_space *mapping = vma->vm_file->f_mapping;
3225         pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
3226                         vma->vm_pgoff;
3227         struct vm_area_struct *svma;
3228         unsigned long saddr;
3229         pte_t *spte = NULL;
3230         pte_t *pte;
3231
3232         if (!vma_shareable(vma, addr))
3233                 return (pte_t *)pmd_alloc(mm, pud, addr);
3234
3235         mutex_lock(&mapping->i_mmap_mutex);
3236         vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
3237                 if (svma == vma)
3238                         continue;
3239
3240                 saddr = page_table_shareable(svma, vma, addr, idx);
3241                 if (saddr) {
3242                         spte = huge_pte_offset(svma->vm_mm, saddr);
3243                         if (spte) {
3244                                 get_page(virt_to_page(spte));
3245                                 break;
3246                         }
3247                 }
3248         }
3249
3250         if (!spte)
3251                 goto out;
3252
3253         spin_lock(&mm->page_table_lock);
3254         if (pud_none(*pud))
3255                 pud_populate(mm, pud,
3256                                 (pmd_t *)((unsigned long)spte & PAGE_MASK));
3257         else
3258                 put_page(virt_to_page(spte));
3259         spin_unlock(&mm->page_table_lock);
3260 out:
3261         pte = (pte_t *)pmd_alloc(mm, pud, addr);
3262         mutex_unlock(&mapping->i_mmap_mutex);
3263         return pte;
3264 }
3265
3266 /*
3267  * unmap huge page backed by shared pte.
3268  *
3269  * Hugetlb pte page is ref counted at the time of mapping.  If pte is shared
3270  * indicated by page_count > 1, unmap is achieved by clearing pud and
3271  * decrementing the ref count. If count == 1, the pte page is not shared.
3272  *
3273  * called with vma->vm_mm->page_table_lock held.
3274  *
3275  * returns: 1 successfully unmapped a shared pte page
3276  *          0 the underlying pte page is not shared, or it is the last user
3277  */
3278 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
3279 {
3280         pgd_t *pgd = pgd_offset(mm, *addr);
3281         pud_t *pud = pud_offset(pgd, *addr);
3282
3283         BUG_ON(page_count(virt_to_page(ptep)) == 0);
3284         if (page_count(virt_to_page(ptep)) == 1)
3285                 return 0;
3286
3287         pud_clear(pud);
3288         put_page(virt_to_page(ptep));
3289         *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
3290         return 1;
3291 }
3292 #define want_pmd_share()        (1)
3293 #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
3294 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
3295 {
3296         return NULL;
3297 }
3298 #define want_pmd_share()        (0)
3299 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
3300
3301 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
3302 pte_t *huge_pte_alloc(struct mm_struct *mm,
3303                         unsigned long addr, unsigned long sz)
3304 {
3305         pgd_t *pgd;
3306         pud_t *pud;
3307         pte_t *pte = NULL;
3308
3309         pgd = pgd_offset(mm, addr);
3310         pud = pud_alloc(mm, pgd, addr);
3311         if (pud) {
3312                 if (sz == PUD_SIZE) {
3313                         pte = (pte_t *)pud;
3314                 } else {
3315                         BUG_ON(sz != PMD_SIZE);
3316                         if (want_pmd_share() && pud_none(*pud))
3317                                 pte = huge_pmd_share(mm, addr, pud);
3318                         else
3319                                 pte = (pte_t *)pmd_alloc(mm, pud, addr);
3320                 }
3321         }
3322         BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
3323
3324         return pte;
3325 }
3326
3327 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
3328 {
3329         pgd_t *pgd;
3330         pud_t *pud;
3331         pmd_t *pmd = NULL;
3332
3333         pgd = pgd_offset(mm, addr);
3334         if (pgd_present(*pgd)) {
3335                 pud = pud_offset(pgd, addr);
3336                 if (pud_present(*pud)) {
3337                         if (pud_huge(*pud))
3338                                 return (pte_t *)pud;
3339                         pmd = pmd_offset(pud, addr);
3340                 }
3341         }
3342         return (pte_t *) pmd;
3343 }
3344
3345 struct page *
3346 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
3347                 pmd_t *pmd, int write)
3348 {
3349         struct page *page;
3350
3351         page = pte_page(*(pte_t *)pmd);
3352         if (page)
3353                 page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
3354         return page;
3355 }
3356
3357 struct page *
3358 follow_huge_pud(struct mm_struct *mm, unsigned long address,
3359                 pud_t *pud, int write)
3360 {
3361         struct page *page;
3362
3363         page = pte_page(*(pte_t *)pud);
3364         if (page)
3365                 page += ((address & ~PUD_MASK) >> PAGE_SHIFT);
3366         return page;
3367 }
3368
3369 #else /* !CONFIG_ARCH_WANT_GENERAL_HUGETLB */
3370
3371 /* Can be overriden by architectures */
3372 __attribute__((weak)) struct page *
3373 follow_huge_pud(struct mm_struct *mm, unsigned long address,
3374                pud_t *pud, int write)
3375 {
3376         BUG();
3377         return NULL;
3378 }
3379
3380 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
3381
3382 #ifdef CONFIG_MEMORY_FAILURE
3383
3384 /* Should be called in hugetlb_lock */
3385 static int is_hugepage_on_freelist(struct page *hpage)
3386 {
3387         struct page *page;
3388         struct page *tmp;
3389         struct hstate *h = page_hstate(hpage);
3390         int nid = page_to_nid(hpage);
3391
3392         list_for_each_entry_safe(page, tmp, &h->hugepage_freelists[nid], lru)
3393                 if (page == hpage)
3394                         return 1;
3395         return 0;
3396 }
3397
3398 /*
3399  * This function is called from memory failure code.
3400  * Assume the caller holds page lock of the head page.
3401  */
3402 int dequeue_hwpoisoned_huge_page(struct page *hpage)
3403 {
3404         struct hstate *h = page_hstate(hpage);
3405         int nid = page_to_nid(hpage);
3406         int ret = -EBUSY;
3407
3408         spin_lock(&hugetlb_lock);
3409         if (is_hugepage_on_freelist(hpage)) {
3410                 /*
3411                  * Hwpoisoned hugepage isn't linked to activelist or freelist,
3412                  * but dangling hpage->lru can trigger list-debug warnings
3413                  * (this happens when we call unpoison_memory() on it),
3414                  * so let it point to itself with list_del_init().
3415                  */
3416                 list_del_init(&hpage->lru);
3417                 set_page_refcounted(hpage);
3418                 h->free_huge_pages--;
3419                 h->free_huge_pages_node[nid]--;
3420                 ret = 0;
3421         }
3422         spin_unlock(&hugetlb_lock);
3423         return ret;
3424 }
3425 #endif
3426
3427 bool isolate_huge_page(struct page *page, struct list_head *list)
3428 {
3429         VM_BUG_ON(!PageHead(page));
3430         if (!get_page_unless_zero(page))
3431                 return false;
3432         spin_lock(&hugetlb_lock);
3433         list_move_tail(&page->lru, list);
3434         spin_unlock(&hugetlb_lock);
3435         return true;
3436 }
3437
3438 void putback_active_hugepage(struct page *page)
3439 {
3440         VM_BUG_ON(!PageHead(page));
3441         spin_lock(&hugetlb_lock);
3442         list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
3443         spin_unlock(&hugetlb_lock);
3444         put_page(page);
3445 }