Merge tag 'jfs-5.18' of https://github.com/kleikamp/linux-shaggy
[platform/kernel/linux-starfive.git] / mm / hugetlb.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Generic hugetlb support.
4  * (C) Nadia Yvette Chambers, April 2004
5  */
6 #include <linux/list.h>
7 #include <linux/init.h>
8 #include <linux/mm.h>
9 #include <linux/seq_file.h>
10 #include <linux/sysctl.h>
11 #include <linux/highmem.h>
12 #include <linux/mmu_notifier.h>
13 #include <linux/nodemask.h>
14 #include <linux/pagemap.h>
15 #include <linux/mempolicy.h>
16 #include <linux/compiler.h>
17 #include <linux/cpuset.h>
18 #include <linux/mutex.h>
19 #include <linux/memblock.h>
20 #include <linux/sysfs.h>
21 #include <linux/slab.h>
22 #include <linux/sched/mm.h>
23 #include <linux/mmdebug.h>
24 #include <linux/sched/signal.h>
25 #include <linux/rmap.h>
26 #include <linux/string_helpers.h>
27 #include <linux/swap.h>
28 #include <linux/swapops.h>
29 #include <linux/jhash.h>
30 #include <linux/numa.h>
31 #include <linux/llist.h>
32 #include <linux/cma.h>
33 #include <linux/migrate.h>
34 #include <linux/nospec.h>
35
36 #include <asm/page.h>
37 #include <asm/pgalloc.h>
38 #include <asm/tlb.h>
39
40 #include <linux/io.h>
41 #include <linux/hugetlb.h>
42 #include <linux/hugetlb_cgroup.h>
43 #include <linux/node.h>
44 #include <linux/page_owner.h>
45 #include "internal.h"
46 #include "hugetlb_vmemmap.h"
47
48 int hugetlb_max_hstate __read_mostly;
49 unsigned int default_hstate_idx;
50 struct hstate hstates[HUGE_MAX_HSTATE];
51
52 #ifdef CONFIG_CMA
53 static struct cma *hugetlb_cma[MAX_NUMNODES];
54 static unsigned long hugetlb_cma_size_in_node[MAX_NUMNODES] __initdata;
55 static bool hugetlb_cma_page(struct page *page, unsigned int order)
56 {
57         return cma_pages_valid(hugetlb_cma[page_to_nid(page)], page,
58                                 1 << order);
59 }
60 #else
61 static bool hugetlb_cma_page(struct page *page, unsigned int order)
62 {
63         return false;
64 }
65 #endif
66 static unsigned long hugetlb_cma_size __initdata;
67
68 /*
69  * Minimum page order among possible hugepage sizes, set to a proper value
70  * at boot time.
71  */
72 static unsigned int minimum_order __read_mostly = UINT_MAX;
73
74 __initdata LIST_HEAD(huge_boot_pages);
75
76 /* for command line parsing */
77 static struct hstate * __initdata parsed_hstate;
78 static unsigned long __initdata default_hstate_max_huge_pages;
79 static bool __initdata parsed_valid_hugepagesz = true;
80 static bool __initdata parsed_default_hugepagesz;
81 static unsigned int default_hugepages_in_node[MAX_NUMNODES] __initdata;
82
83 /*
84  * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
85  * free_huge_pages, and surplus_huge_pages.
86  */
87 DEFINE_SPINLOCK(hugetlb_lock);
88
89 /*
90  * Serializes faults on the same logical page.  This is used to
91  * prevent spurious OOMs when the hugepage pool is fully utilized.
92  */
93 static int num_fault_mutexes;
94 struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
95
96 /* Forward declaration */
97 static int hugetlb_acct_memory(struct hstate *h, long delta);
98
99 static inline bool subpool_is_free(struct hugepage_subpool *spool)
100 {
101         if (spool->count)
102                 return false;
103         if (spool->max_hpages != -1)
104                 return spool->used_hpages == 0;
105         if (spool->min_hpages != -1)
106                 return spool->rsv_hpages == spool->min_hpages;
107
108         return true;
109 }
110
111 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool,
112                                                 unsigned long irq_flags)
113 {
114         spin_unlock_irqrestore(&spool->lock, irq_flags);
115
116         /* If no pages are used, and no other handles to the subpool
117          * remain, give up any reservations based on minimum size and
118          * free the subpool */
119         if (subpool_is_free(spool)) {
120                 if (spool->min_hpages != -1)
121                         hugetlb_acct_memory(spool->hstate,
122                                                 -spool->min_hpages);
123                 kfree(spool);
124         }
125 }
126
127 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
128                                                 long min_hpages)
129 {
130         struct hugepage_subpool *spool;
131
132         spool = kzalloc(sizeof(*spool), GFP_KERNEL);
133         if (!spool)
134                 return NULL;
135
136         spin_lock_init(&spool->lock);
137         spool->count = 1;
138         spool->max_hpages = max_hpages;
139         spool->hstate = h;
140         spool->min_hpages = min_hpages;
141
142         if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
143                 kfree(spool);
144                 return NULL;
145         }
146         spool->rsv_hpages = min_hpages;
147
148         return spool;
149 }
150
151 void hugepage_put_subpool(struct hugepage_subpool *spool)
152 {
153         unsigned long flags;
154
155         spin_lock_irqsave(&spool->lock, flags);
156         BUG_ON(!spool->count);
157         spool->count--;
158         unlock_or_release_subpool(spool, flags);
159 }
160
161 /*
162  * Subpool accounting for allocating and reserving pages.
163  * Return -ENOMEM if there are not enough resources to satisfy the
164  * request.  Otherwise, return the number of pages by which the
165  * global pools must be adjusted (upward).  The returned value may
166  * only be different than the passed value (delta) in the case where
167  * a subpool minimum size must be maintained.
168  */
169 static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
170                                       long delta)
171 {
172         long ret = delta;
173
174         if (!spool)
175                 return ret;
176
177         spin_lock_irq(&spool->lock);
178
179         if (spool->max_hpages != -1) {          /* maximum size accounting */
180                 if ((spool->used_hpages + delta) <= spool->max_hpages)
181                         spool->used_hpages += delta;
182                 else {
183                         ret = -ENOMEM;
184                         goto unlock_ret;
185                 }
186         }
187
188         /* minimum size accounting */
189         if (spool->min_hpages != -1 && spool->rsv_hpages) {
190                 if (delta > spool->rsv_hpages) {
191                         /*
192                          * Asking for more reserves than those already taken on
193                          * behalf of subpool.  Return difference.
194                          */
195                         ret = delta - spool->rsv_hpages;
196                         spool->rsv_hpages = 0;
197                 } else {
198                         ret = 0;        /* reserves already accounted for */
199                         spool->rsv_hpages -= delta;
200                 }
201         }
202
203 unlock_ret:
204         spin_unlock_irq(&spool->lock);
205         return ret;
206 }
207
208 /*
209  * Subpool accounting for freeing and unreserving pages.
210  * Return the number of global page reservations that must be dropped.
211  * The return value may only be different than the passed value (delta)
212  * in the case where a subpool minimum size must be maintained.
213  */
214 static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
215                                        long delta)
216 {
217         long ret = delta;
218         unsigned long flags;
219
220         if (!spool)
221                 return delta;
222
223         spin_lock_irqsave(&spool->lock, flags);
224
225         if (spool->max_hpages != -1)            /* maximum size accounting */
226                 spool->used_hpages -= delta;
227
228          /* minimum size accounting */
229         if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
230                 if (spool->rsv_hpages + delta <= spool->min_hpages)
231                         ret = 0;
232                 else
233                         ret = spool->rsv_hpages + delta - spool->min_hpages;
234
235                 spool->rsv_hpages += delta;
236                 if (spool->rsv_hpages > spool->min_hpages)
237                         spool->rsv_hpages = spool->min_hpages;
238         }
239
240         /*
241          * If hugetlbfs_put_super couldn't free spool due to an outstanding
242          * quota reference, free it now.
243          */
244         unlock_or_release_subpool(spool, flags);
245
246         return ret;
247 }
248
249 static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
250 {
251         return HUGETLBFS_SB(inode->i_sb)->spool;
252 }
253
254 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
255 {
256         return subpool_inode(file_inode(vma->vm_file));
257 }
258
259 /* Helper that removes a struct file_region from the resv_map cache and returns
260  * it for use.
261  */
262 static struct file_region *
263 get_file_region_entry_from_cache(struct resv_map *resv, long from, long to)
264 {
265         struct file_region *nrg = NULL;
266
267         VM_BUG_ON(resv->region_cache_count <= 0);
268
269         resv->region_cache_count--;
270         nrg = list_first_entry(&resv->region_cache, struct file_region, link);
271         list_del(&nrg->link);
272
273         nrg->from = from;
274         nrg->to = to;
275
276         return nrg;
277 }
278
279 static void copy_hugetlb_cgroup_uncharge_info(struct file_region *nrg,
280                                               struct file_region *rg)
281 {
282 #ifdef CONFIG_CGROUP_HUGETLB
283         nrg->reservation_counter = rg->reservation_counter;
284         nrg->css = rg->css;
285         if (rg->css)
286                 css_get(rg->css);
287 #endif
288 }
289
290 /* Helper that records hugetlb_cgroup uncharge info. */
291 static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg,
292                                                 struct hstate *h,
293                                                 struct resv_map *resv,
294                                                 struct file_region *nrg)
295 {
296 #ifdef CONFIG_CGROUP_HUGETLB
297         if (h_cg) {
298                 nrg->reservation_counter =
299                         &h_cg->rsvd_hugepage[hstate_index(h)];
300                 nrg->css = &h_cg->css;
301                 /*
302                  * The caller will hold exactly one h_cg->css reference for the
303                  * whole contiguous reservation region. But this area might be
304                  * scattered when there are already some file_regions reside in
305                  * it. As a result, many file_regions may share only one css
306                  * reference. In order to ensure that one file_region must hold
307                  * exactly one h_cg->css reference, we should do css_get for
308                  * each file_region and leave the reference held by caller
309                  * untouched.
310                  */
311                 css_get(&h_cg->css);
312                 if (!resv->pages_per_hpage)
313                         resv->pages_per_hpage = pages_per_huge_page(h);
314                 /* pages_per_hpage should be the same for all entries in
315                  * a resv_map.
316                  */
317                 VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h));
318         } else {
319                 nrg->reservation_counter = NULL;
320                 nrg->css = NULL;
321         }
322 #endif
323 }
324
325 static void put_uncharge_info(struct file_region *rg)
326 {
327 #ifdef CONFIG_CGROUP_HUGETLB
328         if (rg->css)
329                 css_put(rg->css);
330 #endif
331 }
332
333 static bool has_same_uncharge_info(struct file_region *rg,
334                                    struct file_region *org)
335 {
336 #ifdef CONFIG_CGROUP_HUGETLB
337         return rg->reservation_counter == org->reservation_counter &&
338                rg->css == org->css;
339
340 #else
341         return true;
342 #endif
343 }
344
345 static void coalesce_file_region(struct resv_map *resv, struct file_region *rg)
346 {
347         struct file_region *nrg = NULL, *prg = NULL;
348
349         prg = list_prev_entry(rg, link);
350         if (&prg->link != &resv->regions && prg->to == rg->from &&
351             has_same_uncharge_info(prg, rg)) {
352                 prg->to = rg->to;
353
354                 list_del(&rg->link);
355                 put_uncharge_info(rg);
356                 kfree(rg);
357
358                 rg = prg;
359         }
360
361         nrg = list_next_entry(rg, link);
362         if (&nrg->link != &resv->regions && nrg->from == rg->to &&
363             has_same_uncharge_info(nrg, rg)) {
364                 nrg->from = rg->from;
365
366                 list_del(&rg->link);
367                 put_uncharge_info(rg);
368                 kfree(rg);
369         }
370 }
371
372 static inline long
373 hugetlb_resv_map_add(struct resv_map *map, struct file_region *rg, long from,
374                      long to, struct hstate *h, struct hugetlb_cgroup *cg,
375                      long *regions_needed)
376 {
377         struct file_region *nrg;
378
379         if (!regions_needed) {
380                 nrg = get_file_region_entry_from_cache(map, from, to);
381                 record_hugetlb_cgroup_uncharge_info(cg, h, map, nrg);
382                 list_add(&nrg->link, rg->link.prev);
383                 coalesce_file_region(map, nrg);
384         } else
385                 *regions_needed += 1;
386
387         return to - from;
388 }
389
390 /*
391  * Must be called with resv->lock held.
392  *
393  * Calling this with regions_needed != NULL will count the number of pages
394  * to be added but will not modify the linked list. And regions_needed will
395  * indicate the number of file_regions needed in the cache to carry out to add
396  * the regions for this range.
397  */
398 static long add_reservation_in_range(struct resv_map *resv, long f, long t,
399                                      struct hugetlb_cgroup *h_cg,
400                                      struct hstate *h, long *regions_needed)
401 {
402         long add = 0;
403         struct list_head *head = &resv->regions;
404         long last_accounted_offset = f;
405         struct file_region *rg = NULL, *trg = NULL;
406
407         if (regions_needed)
408                 *regions_needed = 0;
409
410         /* In this loop, we essentially handle an entry for the range
411          * [last_accounted_offset, rg->from), at every iteration, with some
412          * bounds checking.
413          */
414         list_for_each_entry_safe(rg, trg, head, link) {
415                 /* Skip irrelevant regions that start before our range. */
416                 if (rg->from < f) {
417                         /* If this region ends after the last accounted offset,
418                          * then we need to update last_accounted_offset.
419                          */
420                         if (rg->to > last_accounted_offset)
421                                 last_accounted_offset = rg->to;
422                         continue;
423                 }
424
425                 /* When we find a region that starts beyond our range, we've
426                  * finished.
427                  */
428                 if (rg->from >= t)
429                         break;
430
431                 /* Add an entry for last_accounted_offset -> rg->from, and
432                  * update last_accounted_offset.
433                  */
434                 if (rg->from > last_accounted_offset)
435                         add += hugetlb_resv_map_add(resv, rg,
436                                                     last_accounted_offset,
437                                                     rg->from, h, h_cg,
438                                                     regions_needed);
439
440                 last_accounted_offset = rg->to;
441         }
442
443         /* Handle the case where our range extends beyond
444          * last_accounted_offset.
445          */
446         if (last_accounted_offset < t)
447                 add += hugetlb_resv_map_add(resv, rg, last_accounted_offset,
448                                             t, h, h_cg, regions_needed);
449
450         return add;
451 }
452
453 /* Must be called with resv->lock acquired. Will drop lock to allocate entries.
454  */
455 static int allocate_file_region_entries(struct resv_map *resv,
456                                         int regions_needed)
457         __must_hold(&resv->lock)
458 {
459         struct list_head allocated_regions;
460         int to_allocate = 0, i = 0;
461         struct file_region *trg = NULL, *rg = NULL;
462
463         VM_BUG_ON(regions_needed < 0);
464
465         INIT_LIST_HEAD(&allocated_regions);
466
467         /*
468          * Check for sufficient descriptors in the cache to accommodate
469          * the number of in progress add operations plus regions_needed.
470          *
471          * This is a while loop because when we drop the lock, some other call
472          * to region_add or region_del may have consumed some region_entries,
473          * so we keep looping here until we finally have enough entries for
474          * (adds_in_progress + regions_needed).
475          */
476         while (resv->region_cache_count <
477                (resv->adds_in_progress + regions_needed)) {
478                 to_allocate = resv->adds_in_progress + regions_needed -
479                               resv->region_cache_count;
480
481                 /* At this point, we should have enough entries in the cache
482                  * for all the existing adds_in_progress. We should only be
483                  * needing to allocate for regions_needed.
484                  */
485                 VM_BUG_ON(resv->region_cache_count < resv->adds_in_progress);
486
487                 spin_unlock(&resv->lock);
488                 for (i = 0; i < to_allocate; i++) {
489                         trg = kmalloc(sizeof(*trg), GFP_KERNEL);
490                         if (!trg)
491                                 goto out_of_memory;
492                         list_add(&trg->link, &allocated_regions);
493                 }
494
495                 spin_lock(&resv->lock);
496
497                 list_splice(&allocated_regions, &resv->region_cache);
498                 resv->region_cache_count += to_allocate;
499         }
500
501         return 0;
502
503 out_of_memory:
504         list_for_each_entry_safe(rg, trg, &allocated_regions, link) {
505                 list_del(&rg->link);
506                 kfree(rg);
507         }
508         return -ENOMEM;
509 }
510
511 /*
512  * Add the huge page range represented by [f, t) to the reserve
513  * map.  Regions will be taken from the cache to fill in this range.
514  * Sufficient regions should exist in the cache due to the previous
515  * call to region_chg with the same range, but in some cases the cache will not
516  * have sufficient entries due to races with other code doing region_add or
517  * region_del.  The extra needed entries will be allocated.
518  *
519  * regions_needed is the out value provided by a previous call to region_chg.
520  *
521  * Return the number of new huge pages added to the map.  This number is greater
522  * than or equal to zero.  If file_region entries needed to be allocated for
523  * this operation and we were not able to allocate, it returns -ENOMEM.
524  * region_add of regions of length 1 never allocate file_regions and cannot
525  * fail; region_chg will always allocate at least 1 entry and a region_add for
526  * 1 page will only require at most 1 entry.
527  */
528 static long region_add(struct resv_map *resv, long f, long t,
529                        long in_regions_needed, struct hstate *h,
530                        struct hugetlb_cgroup *h_cg)
531 {
532         long add = 0, actual_regions_needed = 0;
533
534         spin_lock(&resv->lock);
535 retry:
536
537         /* Count how many regions are actually needed to execute this add. */
538         add_reservation_in_range(resv, f, t, NULL, NULL,
539                                  &actual_regions_needed);
540
541         /*
542          * Check for sufficient descriptors in the cache to accommodate
543          * this add operation. Note that actual_regions_needed may be greater
544          * than in_regions_needed, as the resv_map may have been modified since
545          * the region_chg call. In this case, we need to make sure that we
546          * allocate extra entries, such that we have enough for all the
547          * existing adds_in_progress, plus the excess needed for this
548          * operation.
549          */
550         if (actual_regions_needed > in_regions_needed &&
551             resv->region_cache_count <
552                     resv->adds_in_progress +
553                             (actual_regions_needed - in_regions_needed)) {
554                 /* region_add operation of range 1 should never need to
555                  * allocate file_region entries.
556                  */
557                 VM_BUG_ON(t - f <= 1);
558
559                 if (allocate_file_region_entries(
560                             resv, actual_regions_needed - in_regions_needed)) {
561                         return -ENOMEM;
562                 }
563
564                 goto retry;
565         }
566
567         add = add_reservation_in_range(resv, f, t, h_cg, h, NULL);
568
569         resv->adds_in_progress -= in_regions_needed;
570
571         spin_unlock(&resv->lock);
572         return add;
573 }
574
575 /*
576  * Examine the existing reserve map and determine how many
577  * huge pages in the specified range [f, t) are NOT currently
578  * represented.  This routine is called before a subsequent
579  * call to region_add that will actually modify the reserve
580  * map to add the specified range [f, t).  region_chg does
581  * not change the number of huge pages represented by the
582  * map.  A number of new file_region structures is added to the cache as a
583  * placeholder, for the subsequent region_add call to use. At least 1
584  * file_region structure is added.
585  *
586  * out_regions_needed is the number of regions added to the
587  * resv->adds_in_progress.  This value needs to be provided to a follow up call
588  * to region_add or region_abort for proper accounting.
589  *
590  * Returns the number of huge pages that need to be added to the existing
591  * reservation map for the range [f, t).  This number is greater or equal to
592  * zero.  -ENOMEM is returned if a new file_region structure or cache entry
593  * is needed and can not be allocated.
594  */
595 static long region_chg(struct resv_map *resv, long f, long t,
596                        long *out_regions_needed)
597 {
598         long chg = 0;
599
600         spin_lock(&resv->lock);
601
602         /* Count how many hugepages in this range are NOT represented. */
603         chg = add_reservation_in_range(resv, f, t, NULL, NULL,
604                                        out_regions_needed);
605
606         if (*out_regions_needed == 0)
607                 *out_regions_needed = 1;
608
609         if (allocate_file_region_entries(resv, *out_regions_needed))
610                 return -ENOMEM;
611
612         resv->adds_in_progress += *out_regions_needed;
613
614         spin_unlock(&resv->lock);
615         return chg;
616 }
617
618 /*
619  * Abort the in progress add operation.  The adds_in_progress field
620  * of the resv_map keeps track of the operations in progress between
621  * calls to region_chg and region_add.  Operations are sometimes
622  * aborted after the call to region_chg.  In such cases, region_abort
623  * is called to decrement the adds_in_progress counter. regions_needed
624  * is the value returned by the region_chg call, it is used to decrement
625  * the adds_in_progress counter.
626  *
627  * NOTE: The range arguments [f, t) are not needed or used in this
628  * routine.  They are kept to make reading the calling code easier as
629  * arguments will match the associated region_chg call.
630  */
631 static void region_abort(struct resv_map *resv, long f, long t,
632                          long regions_needed)
633 {
634         spin_lock(&resv->lock);
635         VM_BUG_ON(!resv->region_cache_count);
636         resv->adds_in_progress -= regions_needed;
637         spin_unlock(&resv->lock);
638 }
639
640 /*
641  * Delete the specified range [f, t) from the reserve map.  If the
642  * t parameter is LONG_MAX, this indicates that ALL regions after f
643  * should be deleted.  Locate the regions which intersect [f, t)
644  * and either trim, delete or split the existing regions.
645  *
646  * Returns the number of huge pages deleted from the reserve map.
647  * In the normal case, the return value is zero or more.  In the
648  * case where a region must be split, a new region descriptor must
649  * be allocated.  If the allocation fails, -ENOMEM will be returned.
650  * NOTE: If the parameter t == LONG_MAX, then we will never split
651  * a region and possibly return -ENOMEM.  Callers specifying
652  * t == LONG_MAX do not need to check for -ENOMEM error.
653  */
654 static long region_del(struct resv_map *resv, long f, long t)
655 {
656         struct list_head *head = &resv->regions;
657         struct file_region *rg, *trg;
658         struct file_region *nrg = NULL;
659         long del = 0;
660
661 retry:
662         spin_lock(&resv->lock);
663         list_for_each_entry_safe(rg, trg, head, link) {
664                 /*
665                  * Skip regions before the range to be deleted.  file_region
666                  * ranges are normally of the form [from, to).  However, there
667                  * may be a "placeholder" entry in the map which is of the form
668                  * (from, to) with from == to.  Check for placeholder entries
669                  * at the beginning of the range to be deleted.
670                  */
671                 if (rg->to <= f && (rg->to != rg->from || rg->to != f))
672                         continue;
673
674                 if (rg->from >= t)
675                         break;
676
677                 if (f > rg->from && t < rg->to) { /* Must split region */
678                         /*
679                          * Check for an entry in the cache before dropping
680                          * lock and attempting allocation.
681                          */
682                         if (!nrg &&
683                             resv->region_cache_count > resv->adds_in_progress) {
684                                 nrg = list_first_entry(&resv->region_cache,
685                                                         struct file_region,
686                                                         link);
687                                 list_del(&nrg->link);
688                                 resv->region_cache_count--;
689                         }
690
691                         if (!nrg) {
692                                 spin_unlock(&resv->lock);
693                                 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
694                                 if (!nrg)
695                                         return -ENOMEM;
696                                 goto retry;
697                         }
698
699                         del += t - f;
700                         hugetlb_cgroup_uncharge_file_region(
701                                 resv, rg, t - f, false);
702
703                         /* New entry for end of split region */
704                         nrg->from = t;
705                         nrg->to = rg->to;
706
707                         copy_hugetlb_cgroup_uncharge_info(nrg, rg);
708
709                         INIT_LIST_HEAD(&nrg->link);
710
711                         /* Original entry is trimmed */
712                         rg->to = f;
713
714                         list_add(&nrg->link, &rg->link);
715                         nrg = NULL;
716                         break;
717                 }
718
719                 if (f <= rg->from && t >= rg->to) { /* Remove entire region */
720                         del += rg->to - rg->from;
721                         hugetlb_cgroup_uncharge_file_region(resv, rg,
722                                                             rg->to - rg->from, true);
723                         list_del(&rg->link);
724                         kfree(rg);
725                         continue;
726                 }
727
728                 if (f <= rg->from) {    /* Trim beginning of region */
729                         hugetlb_cgroup_uncharge_file_region(resv, rg,
730                                                             t - rg->from, false);
731
732                         del += t - rg->from;
733                         rg->from = t;
734                 } else {                /* Trim end of region */
735                         hugetlb_cgroup_uncharge_file_region(resv, rg,
736                                                             rg->to - f, false);
737
738                         del += rg->to - f;
739                         rg->to = f;
740                 }
741         }
742
743         spin_unlock(&resv->lock);
744         kfree(nrg);
745         return del;
746 }
747
748 /*
749  * A rare out of memory error was encountered which prevented removal of
750  * the reserve map region for a page.  The huge page itself was free'ed
751  * and removed from the page cache.  This routine will adjust the subpool
752  * usage count, and the global reserve count if needed.  By incrementing
753  * these counts, the reserve map entry which could not be deleted will
754  * appear as a "reserved" entry instead of simply dangling with incorrect
755  * counts.
756  */
757 void hugetlb_fix_reserve_counts(struct inode *inode)
758 {
759         struct hugepage_subpool *spool = subpool_inode(inode);
760         long rsv_adjust;
761         bool reserved = false;
762
763         rsv_adjust = hugepage_subpool_get_pages(spool, 1);
764         if (rsv_adjust > 0) {
765                 struct hstate *h = hstate_inode(inode);
766
767                 if (!hugetlb_acct_memory(h, 1))
768                         reserved = true;
769         } else if (!rsv_adjust) {
770                 reserved = true;
771         }
772
773         if (!reserved)
774                 pr_warn("hugetlb: Huge Page Reserved count may go negative.\n");
775 }
776
777 /*
778  * Count and return the number of huge pages in the reserve map
779  * that intersect with the range [f, t).
780  */
781 static long region_count(struct resv_map *resv, long f, long t)
782 {
783         struct list_head *head = &resv->regions;
784         struct file_region *rg;
785         long chg = 0;
786
787         spin_lock(&resv->lock);
788         /* Locate each segment we overlap with, and count that overlap. */
789         list_for_each_entry(rg, head, link) {
790                 long seg_from;
791                 long seg_to;
792
793                 if (rg->to <= f)
794                         continue;
795                 if (rg->from >= t)
796                         break;
797
798                 seg_from = max(rg->from, f);
799                 seg_to = min(rg->to, t);
800
801                 chg += seg_to - seg_from;
802         }
803         spin_unlock(&resv->lock);
804
805         return chg;
806 }
807
808 /*
809  * Convert the address within this vma to the page offset within
810  * the mapping, in pagecache page units; huge pages here.
811  */
812 static pgoff_t vma_hugecache_offset(struct hstate *h,
813                         struct vm_area_struct *vma, unsigned long address)
814 {
815         return ((address - vma->vm_start) >> huge_page_shift(h)) +
816                         (vma->vm_pgoff >> huge_page_order(h));
817 }
818
819 pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
820                                      unsigned long address)
821 {
822         return vma_hugecache_offset(hstate_vma(vma), vma, address);
823 }
824 EXPORT_SYMBOL_GPL(linear_hugepage_index);
825
826 /*
827  * Return the size of the pages allocated when backing a VMA. In the majority
828  * cases this will be same size as used by the page table entries.
829  */
830 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
831 {
832         if (vma->vm_ops && vma->vm_ops->pagesize)
833                 return vma->vm_ops->pagesize(vma);
834         return PAGE_SIZE;
835 }
836 EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
837
838 /*
839  * Return the page size being used by the MMU to back a VMA. In the majority
840  * of cases, the page size used by the kernel matches the MMU size. On
841  * architectures where it differs, an architecture-specific 'strong'
842  * version of this symbol is required.
843  */
844 __weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
845 {
846         return vma_kernel_pagesize(vma);
847 }
848
849 /*
850  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
851  * bits of the reservation map pointer, which are always clear due to
852  * alignment.
853  */
854 #define HPAGE_RESV_OWNER    (1UL << 0)
855 #define HPAGE_RESV_UNMAPPED (1UL << 1)
856 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
857
858 /*
859  * These helpers are used to track how many pages are reserved for
860  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
861  * is guaranteed to have their future faults succeed.
862  *
863  * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
864  * the reserve counters are updated with the hugetlb_lock held. It is safe
865  * to reset the VMA at fork() time as it is not in use yet and there is no
866  * chance of the global counters getting corrupted as a result of the values.
867  *
868  * The private mapping reservation is represented in a subtly different
869  * manner to a shared mapping.  A shared mapping has a region map associated
870  * with the underlying file, this region map represents the backing file
871  * pages which have ever had a reservation assigned which this persists even
872  * after the page is instantiated.  A private mapping has a region map
873  * associated with the original mmap which is attached to all VMAs which
874  * reference it, this region map represents those offsets which have consumed
875  * reservation ie. where pages have been instantiated.
876  */
877 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
878 {
879         return (unsigned long)vma->vm_private_data;
880 }
881
882 static void set_vma_private_data(struct vm_area_struct *vma,
883                                                         unsigned long value)
884 {
885         vma->vm_private_data = (void *)value;
886 }
887
888 static void
889 resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map *resv_map,
890                                           struct hugetlb_cgroup *h_cg,
891                                           struct hstate *h)
892 {
893 #ifdef CONFIG_CGROUP_HUGETLB
894         if (!h_cg || !h) {
895                 resv_map->reservation_counter = NULL;
896                 resv_map->pages_per_hpage = 0;
897                 resv_map->css = NULL;
898         } else {
899                 resv_map->reservation_counter =
900                         &h_cg->rsvd_hugepage[hstate_index(h)];
901                 resv_map->pages_per_hpage = pages_per_huge_page(h);
902                 resv_map->css = &h_cg->css;
903         }
904 #endif
905 }
906
907 struct resv_map *resv_map_alloc(void)
908 {
909         struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
910         struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
911
912         if (!resv_map || !rg) {
913                 kfree(resv_map);
914                 kfree(rg);
915                 return NULL;
916         }
917
918         kref_init(&resv_map->refs);
919         spin_lock_init(&resv_map->lock);
920         INIT_LIST_HEAD(&resv_map->regions);
921
922         resv_map->adds_in_progress = 0;
923         /*
924          * Initialize these to 0. On shared mappings, 0's here indicate these
925          * fields don't do cgroup accounting. On private mappings, these will be
926          * re-initialized to the proper values, to indicate that hugetlb cgroup
927          * reservations are to be un-charged from here.
928          */
929         resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, NULL, NULL);
930
931         INIT_LIST_HEAD(&resv_map->region_cache);
932         list_add(&rg->link, &resv_map->region_cache);
933         resv_map->region_cache_count = 1;
934
935         return resv_map;
936 }
937
938 void resv_map_release(struct kref *ref)
939 {
940         struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
941         struct list_head *head = &resv_map->region_cache;
942         struct file_region *rg, *trg;
943
944         /* Clear out any active regions before we release the map. */
945         region_del(resv_map, 0, LONG_MAX);
946
947         /* ... and any entries left in the cache */
948         list_for_each_entry_safe(rg, trg, head, link) {
949                 list_del(&rg->link);
950                 kfree(rg);
951         }
952
953         VM_BUG_ON(resv_map->adds_in_progress);
954
955         kfree(resv_map);
956 }
957
958 static inline struct resv_map *inode_resv_map(struct inode *inode)
959 {
960         /*
961          * At inode evict time, i_mapping may not point to the original
962          * address space within the inode.  This original address space
963          * contains the pointer to the resv_map.  So, always use the
964          * address space embedded within the inode.
965          * The VERY common case is inode->mapping == &inode->i_data but,
966          * this may not be true for device special inodes.
967          */
968         return (struct resv_map *)(&inode->i_data)->private_data;
969 }
970
971 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
972 {
973         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
974         if (vma->vm_flags & VM_MAYSHARE) {
975                 struct address_space *mapping = vma->vm_file->f_mapping;
976                 struct inode *inode = mapping->host;
977
978                 return inode_resv_map(inode);
979
980         } else {
981                 return (struct resv_map *)(get_vma_private_data(vma) &
982                                                         ~HPAGE_RESV_MASK);
983         }
984 }
985
986 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
987 {
988         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
989         VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
990
991         set_vma_private_data(vma, (get_vma_private_data(vma) &
992                                 HPAGE_RESV_MASK) | (unsigned long)map);
993 }
994
995 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
996 {
997         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
998         VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
999
1000         set_vma_private_data(vma, get_vma_private_data(vma) | flags);
1001 }
1002
1003 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
1004 {
1005         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1006
1007         return (get_vma_private_data(vma) & flag) != 0;
1008 }
1009
1010 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
1011 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
1012 {
1013         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1014         if (!(vma->vm_flags & VM_MAYSHARE))
1015                 vma->vm_private_data = (void *)0;
1016 }
1017
1018 /*
1019  * Reset and decrement one ref on hugepage private reservation.
1020  * Called with mm->mmap_sem writer semaphore held.
1021  * This function should be only used by move_vma() and operate on
1022  * same sized vma. It should never come here with last ref on the
1023  * reservation.
1024  */
1025 void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
1026 {
1027         /*
1028          * Clear the old hugetlb private page reservation.
1029          * It has already been transferred to new_vma.
1030          *
1031          * During a mremap() operation of a hugetlb vma we call move_vma()
1032          * which copies vma into new_vma and unmaps vma. After the copy
1033          * operation both new_vma and vma share a reference to the resv_map
1034          * struct, and at that point vma is about to be unmapped. We don't
1035          * want to return the reservation to the pool at unmap of vma because
1036          * the reservation still lives on in new_vma, so simply decrement the
1037          * ref here and remove the resv_map reference from this vma.
1038          */
1039         struct resv_map *reservations = vma_resv_map(vma);
1040
1041         if (reservations && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1042                 resv_map_put_hugetlb_cgroup_uncharge_info(reservations);
1043                 kref_put(&reservations->refs, resv_map_release);
1044         }
1045
1046         reset_vma_resv_huge_pages(vma);
1047 }
1048
1049 /* Returns true if the VMA has associated reserve pages */
1050 static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
1051 {
1052         if (vma->vm_flags & VM_NORESERVE) {
1053                 /*
1054                  * This address is already reserved by other process(chg == 0),
1055                  * so, we should decrement reserved count. Without decrementing,
1056                  * reserve count remains after releasing inode, because this
1057                  * allocated page will go into page cache and is regarded as
1058                  * coming from reserved pool in releasing step.  Currently, we
1059                  * don't have any other solution to deal with this situation
1060                  * properly, so add work-around here.
1061                  */
1062                 if (vma->vm_flags & VM_MAYSHARE && chg == 0)
1063                         return true;
1064                 else
1065                         return false;
1066         }
1067
1068         /* Shared mappings always use reserves */
1069         if (vma->vm_flags & VM_MAYSHARE) {
1070                 /*
1071                  * We know VM_NORESERVE is not set.  Therefore, there SHOULD
1072                  * be a region map for all pages.  The only situation where
1073                  * there is no region map is if a hole was punched via
1074                  * fallocate.  In this case, there really are no reserves to
1075                  * use.  This situation is indicated if chg != 0.
1076                  */
1077                 if (chg)
1078                         return false;
1079                 else
1080                         return true;
1081         }
1082
1083         /*
1084          * Only the process that called mmap() has reserves for
1085          * private mappings.
1086          */
1087         if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1088                 /*
1089                  * Like the shared case above, a hole punch or truncate
1090                  * could have been performed on the private mapping.
1091                  * Examine the value of chg to determine if reserves
1092                  * actually exist or were previously consumed.
1093                  * Very Subtle - The value of chg comes from a previous
1094                  * call to vma_needs_reserves().  The reserve map for
1095                  * private mappings has different (opposite) semantics
1096                  * than that of shared mappings.  vma_needs_reserves()
1097                  * has already taken this difference in semantics into
1098                  * account.  Therefore, the meaning of chg is the same
1099                  * as in the shared case above.  Code could easily be
1100                  * combined, but keeping it separate draws attention to
1101                  * subtle differences.
1102                  */
1103                 if (chg)
1104                         return false;
1105                 else
1106                         return true;
1107         }
1108
1109         return false;
1110 }
1111
1112 static void enqueue_huge_page(struct hstate *h, struct page *page)
1113 {
1114         int nid = page_to_nid(page);
1115
1116         lockdep_assert_held(&hugetlb_lock);
1117         VM_BUG_ON_PAGE(page_count(page), page);
1118
1119         list_move(&page->lru, &h->hugepage_freelists[nid]);
1120         h->free_huge_pages++;
1121         h->free_huge_pages_node[nid]++;
1122         SetHPageFreed(page);
1123 }
1124
1125 static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
1126 {
1127         struct page *page;
1128         bool pin = !!(current->flags & PF_MEMALLOC_PIN);
1129
1130         lockdep_assert_held(&hugetlb_lock);
1131         list_for_each_entry(page, &h->hugepage_freelists[nid], lru) {
1132                 if (pin && !is_pinnable_page(page))
1133                         continue;
1134
1135                 if (PageHWPoison(page))
1136                         continue;
1137
1138                 list_move(&page->lru, &h->hugepage_activelist);
1139                 set_page_refcounted(page);
1140                 ClearHPageFreed(page);
1141                 h->free_huge_pages--;
1142                 h->free_huge_pages_node[nid]--;
1143                 return page;
1144         }
1145
1146         return NULL;
1147 }
1148
1149 static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid,
1150                 nodemask_t *nmask)
1151 {
1152         unsigned int cpuset_mems_cookie;
1153         struct zonelist *zonelist;
1154         struct zone *zone;
1155         struct zoneref *z;
1156         int node = NUMA_NO_NODE;
1157
1158         zonelist = node_zonelist(nid, gfp_mask);
1159
1160 retry_cpuset:
1161         cpuset_mems_cookie = read_mems_allowed_begin();
1162         for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
1163                 struct page *page;
1164
1165                 if (!cpuset_zone_allowed(zone, gfp_mask))
1166                         continue;
1167                 /*
1168                  * no need to ask again on the same node. Pool is node rather than
1169                  * zone aware
1170                  */
1171                 if (zone_to_nid(zone) == node)
1172                         continue;
1173                 node = zone_to_nid(zone);
1174
1175                 page = dequeue_huge_page_node_exact(h, node);
1176                 if (page)
1177                         return page;
1178         }
1179         if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie)))
1180                 goto retry_cpuset;
1181
1182         return NULL;
1183 }
1184
1185 static struct page *dequeue_huge_page_vma(struct hstate *h,
1186                                 struct vm_area_struct *vma,
1187                                 unsigned long address, int avoid_reserve,
1188                                 long chg)
1189 {
1190         struct page *page = NULL;
1191         struct mempolicy *mpol;
1192         gfp_t gfp_mask;
1193         nodemask_t *nodemask;
1194         int nid;
1195
1196         /*
1197          * A child process with MAP_PRIVATE mappings created by their parent
1198          * have no page reserves. This check ensures that reservations are
1199          * not "stolen". The child may still get SIGKILLed
1200          */
1201         if (!vma_has_reserves(vma, chg) &&
1202                         h->free_huge_pages - h->resv_huge_pages == 0)
1203                 goto err;
1204
1205         /* If reserves cannot be used, ensure enough pages are in the pool */
1206         if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
1207                 goto err;
1208
1209         gfp_mask = htlb_alloc_mask(h);
1210         nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
1211
1212         if (mpol_is_preferred_many(mpol)) {
1213                 page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
1214
1215                 /* Fallback to all nodes if page==NULL */
1216                 nodemask = NULL;
1217         }
1218
1219         if (!page)
1220                 page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
1221
1222         if (page && !avoid_reserve && vma_has_reserves(vma, chg)) {
1223                 SetHPageRestoreReserve(page);
1224                 h->resv_huge_pages--;
1225         }
1226
1227         mpol_cond_put(mpol);
1228         return page;
1229
1230 err:
1231         return NULL;
1232 }
1233
1234 /*
1235  * common helper functions for hstate_next_node_to_{alloc|free}.
1236  * We may have allocated or freed a huge page based on a different
1237  * nodes_allowed previously, so h->next_node_to_{alloc|free} might
1238  * be outside of *nodes_allowed.  Ensure that we use an allowed
1239  * node for alloc or free.
1240  */
1241 static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
1242 {
1243         nid = next_node_in(nid, *nodes_allowed);
1244         VM_BUG_ON(nid >= MAX_NUMNODES);
1245
1246         return nid;
1247 }
1248
1249 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
1250 {
1251         if (!node_isset(nid, *nodes_allowed))
1252                 nid = next_node_allowed(nid, nodes_allowed);
1253         return nid;
1254 }
1255
1256 /*
1257  * returns the previously saved node ["this node"] from which to
1258  * allocate a persistent huge page for the pool and advance the
1259  * next node from which to allocate, handling wrap at end of node
1260  * mask.
1261  */
1262 static int hstate_next_node_to_alloc(struct hstate *h,
1263                                         nodemask_t *nodes_allowed)
1264 {
1265         int nid;
1266
1267         VM_BUG_ON(!nodes_allowed);
1268
1269         nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
1270         h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
1271
1272         return nid;
1273 }
1274
1275 /*
1276  * helper for remove_pool_huge_page() - return the previously saved
1277  * node ["this node"] from which to free a huge page.  Advance the
1278  * next node id whether or not we find a free huge page to free so
1279  * that the next attempt to free addresses the next node.
1280  */
1281 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
1282 {
1283         int nid;
1284
1285         VM_BUG_ON(!nodes_allowed);
1286
1287         nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
1288         h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
1289
1290         return nid;
1291 }
1292
1293 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask)           \
1294         for (nr_nodes = nodes_weight(*mask);                            \
1295                 nr_nodes > 0 &&                                         \
1296                 ((node = hstate_next_node_to_alloc(hs, mask)) || 1);    \
1297                 nr_nodes--)
1298
1299 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask)            \
1300         for (nr_nodes = nodes_weight(*mask);                            \
1301                 nr_nodes > 0 &&                                         \
1302                 ((node = hstate_next_node_to_free(hs, mask)) || 1);     \
1303                 nr_nodes--)
1304
1305 /* used to demote non-gigantic_huge pages as well */
1306 static void __destroy_compound_gigantic_page(struct page *page,
1307                                         unsigned int order, bool demote)
1308 {
1309         int i;
1310         int nr_pages = 1 << order;
1311         struct page *p = page + 1;
1312
1313         atomic_set(compound_mapcount_ptr(page), 0);
1314         atomic_set(compound_pincount_ptr(page), 0);
1315
1316         for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1317                 p->mapping = NULL;
1318                 clear_compound_head(p);
1319                 if (!demote)
1320                         set_page_refcounted(p);
1321         }
1322
1323         set_compound_order(page, 0);
1324 #ifdef CONFIG_64BIT
1325         page[1].compound_nr = 0;
1326 #endif
1327         __ClearPageHead(page);
1328 }
1329
1330 static void destroy_compound_hugetlb_page_for_demote(struct page *page,
1331                                         unsigned int order)
1332 {
1333         __destroy_compound_gigantic_page(page, order, true);
1334 }
1335
1336 #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
1337 static void destroy_compound_gigantic_page(struct page *page,
1338                                         unsigned int order)
1339 {
1340         __destroy_compound_gigantic_page(page, order, false);
1341 }
1342
1343 static void free_gigantic_page(struct page *page, unsigned int order)
1344 {
1345         /*
1346          * If the page isn't allocated using the cma allocator,
1347          * cma_release() returns false.
1348          */
1349 #ifdef CONFIG_CMA
1350         if (cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order))
1351                 return;
1352 #endif
1353
1354         free_contig_range(page_to_pfn(page), 1 << order);
1355 }
1356
1357 #ifdef CONFIG_CONTIG_ALLOC
1358 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1359                 int nid, nodemask_t *nodemask)
1360 {
1361         unsigned long nr_pages = pages_per_huge_page(h);
1362         if (nid == NUMA_NO_NODE)
1363                 nid = numa_mem_id();
1364
1365 #ifdef CONFIG_CMA
1366         {
1367                 struct page *page;
1368                 int node;
1369
1370                 if (hugetlb_cma[nid]) {
1371                         page = cma_alloc(hugetlb_cma[nid], nr_pages,
1372                                         huge_page_order(h), true);
1373                         if (page)
1374                                 return page;
1375                 }
1376
1377                 if (!(gfp_mask & __GFP_THISNODE)) {
1378                         for_each_node_mask(node, *nodemask) {
1379                                 if (node == nid || !hugetlb_cma[node])
1380                                         continue;
1381
1382                                 page = cma_alloc(hugetlb_cma[node], nr_pages,
1383                                                 huge_page_order(h), true);
1384                                 if (page)
1385                                         return page;
1386                         }
1387                 }
1388         }
1389 #endif
1390
1391         return alloc_contig_pages(nr_pages, gfp_mask, nid, nodemask);
1392 }
1393
1394 #else /* !CONFIG_CONTIG_ALLOC */
1395 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1396                                         int nid, nodemask_t *nodemask)
1397 {
1398         return NULL;
1399 }
1400 #endif /* CONFIG_CONTIG_ALLOC */
1401
1402 #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
1403 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1404                                         int nid, nodemask_t *nodemask)
1405 {
1406         return NULL;
1407 }
1408 static inline void free_gigantic_page(struct page *page, unsigned int order) { }
1409 static inline void destroy_compound_gigantic_page(struct page *page,
1410                                                 unsigned int order) { }
1411 #endif
1412
1413 /*
1414  * Remove hugetlb page from lists, and update dtor so that page appears
1415  * as just a compound page.
1416  *
1417  * A reference is held on the page, except in the case of demote.
1418  *
1419  * Must be called with hugetlb lock held.
1420  */
1421 static void __remove_hugetlb_page(struct hstate *h, struct page *page,
1422                                                         bool adjust_surplus,
1423                                                         bool demote)
1424 {
1425         int nid = page_to_nid(page);
1426
1427         VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
1428         VM_BUG_ON_PAGE(hugetlb_cgroup_from_page_rsvd(page), page);
1429
1430         lockdep_assert_held(&hugetlb_lock);
1431         if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
1432                 return;
1433
1434         list_del(&page->lru);
1435
1436         if (HPageFreed(page)) {
1437                 h->free_huge_pages--;
1438                 h->free_huge_pages_node[nid]--;
1439         }
1440         if (adjust_surplus) {
1441                 h->surplus_huge_pages--;
1442                 h->surplus_huge_pages_node[nid]--;
1443         }
1444
1445         /*
1446          * Very subtle
1447          *
1448          * For non-gigantic pages set the destructor to the normal compound
1449          * page dtor.  This is needed in case someone takes an additional
1450          * temporary ref to the page, and freeing is delayed until they drop
1451          * their reference.
1452          *
1453          * For gigantic pages set the destructor to the null dtor.  This
1454          * destructor will never be called.  Before freeing the gigantic
1455          * page destroy_compound_gigantic_page will turn the compound page
1456          * into a simple group of pages.  After this the destructor does not
1457          * apply.
1458          *
1459          * This handles the case where more than one ref is held when and
1460          * after update_and_free_page is called.
1461          *
1462          * In the case of demote we do not ref count the page as it will soon
1463          * be turned into a page of smaller size.
1464          */
1465         if (!demote)
1466                 set_page_refcounted(page);
1467         if (hstate_is_gigantic(h))
1468                 set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
1469         else
1470                 set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
1471
1472         h->nr_huge_pages--;
1473         h->nr_huge_pages_node[nid]--;
1474 }
1475
1476 static void remove_hugetlb_page(struct hstate *h, struct page *page,
1477                                                         bool adjust_surplus)
1478 {
1479         __remove_hugetlb_page(h, page, adjust_surplus, false);
1480 }
1481
1482 static void remove_hugetlb_page_for_demote(struct hstate *h, struct page *page,
1483                                                         bool adjust_surplus)
1484 {
1485         __remove_hugetlb_page(h, page, adjust_surplus, true);
1486 }
1487
1488 static void add_hugetlb_page(struct hstate *h, struct page *page,
1489                              bool adjust_surplus)
1490 {
1491         int zeroed;
1492         int nid = page_to_nid(page);
1493
1494         VM_BUG_ON_PAGE(!HPageVmemmapOptimized(page), page);
1495
1496         lockdep_assert_held(&hugetlb_lock);
1497
1498         INIT_LIST_HEAD(&page->lru);
1499         h->nr_huge_pages++;
1500         h->nr_huge_pages_node[nid]++;
1501
1502         if (adjust_surplus) {
1503                 h->surplus_huge_pages++;
1504                 h->surplus_huge_pages_node[nid]++;
1505         }
1506
1507         set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1508         set_page_private(page, 0);
1509         SetHPageVmemmapOptimized(page);
1510
1511         /*
1512          * This page is about to be managed by the hugetlb allocator and
1513          * should have no users.  Drop our reference, and check for others
1514          * just in case.
1515          */
1516         zeroed = put_page_testzero(page);
1517         if (!zeroed)
1518                 /*
1519                  * It is VERY unlikely soneone else has taken a ref on
1520                  * the page.  In this case, we simply return as the
1521                  * hugetlb destructor (free_huge_page) will be called
1522                  * when this other ref is dropped.
1523                  */
1524                 return;
1525
1526         arch_clear_hugepage_flags(page);
1527         enqueue_huge_page(h, page);
1528 }
1529
1530 static void __update_and_free_page(struct hstate *h, struct page *page)
1531 {
1532         int i;
1533         struct page *subpage = page;
1534
1535         if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
1536                 return;
1537
1538         if (alloc_huge_page_vmemmap(h, page)) {
1539                 spin_lock_irq(&hugetlb_lock);
1540                 /*
1541                  * If we cannot allocate vmemmap pages, just refuse to free the
1542                  * page and put the page back on the hugetlb free list and treat
1543                  * as a surplus page.
1544                  */
1545                 add_hugetlb_page(h, page, true);
1546                 spin_unlock_irq(&hugetlb_lock);
1547                 return;
1548         }
1549
1550         for (i = 0; i < pages_per_huge_page(h);
1551              i++, subpage = mem_map_next(subpage, page, i)) {
1552                 subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
1553                                 1 << PG_referenced | 1 << PG_dirty |
1554                                 1 << PG_active | 1 << PG_private |
1555                                 1 << PG_writeback);
1556         }
1557
1558         /*
1559          * Non-gigantic pages demoted from CMA allocated gigantic pages
1560          * need to be given back to CMA in free_gigantic_page.
1561          */
1562         if (hstate_is_gigantic(h) ||
1563             hugetlb_cma_page(page, huge_page_order(h))) {
1564                 destroy_compound_gigantic_page(page, huge_page_order(h));
1565                 free_gigantic_page(page, huge_page_order(h));
1566         } else {
1567                 __free_pages(page, huge_page_order(h));
1568         }
1569 }
1570
1571 /*
1572  * As update_and_free_page() can be called under any context, so we cannot
1573  * use GFP_KERNEL to allocate vmemmap pages. However, we can defer the
1574  * actual freeing in a workqueue to prevent from using GFP_ATOMIC to allocate
1575  * the vmemmap pages.
1576  *
1577  * free_hpage_workfn() locklessly retrieves the linked list of pages to be
1578  * freed and frees them one-by-one. As the page->mapping pointer is going
1579  * to be cleared in free_hpage_workfn() anyway, it is reused as the llist_node
1580  * structure of a lockless linked list of huge pages to be freed.
1581  */
1582 static LLIST_HEAD(hpage_freelist);
1583
1584 static void free_hpage_workfn(struct work_struct *work)
1585 {
1586         struct llist_node *node;
1587
1588         node = llist_del_all(&hpage_freelist);
1589
1590         while (node) {
1591                 struct page *page;
1592                 struct hstate *h;
1593
1594                 page = container_of((struct address_space **)node,
1595                                      struct page, mapping);
1596                 node = node->next;
1597                 page->mapping = NULL;
1598                 /*
1599                  * The VM_BUG_ON_PAGE(!PageHuge(page), page) in page_hstate()
1600                  * is going to trigger because a previous call to
1601                  * remove_hugetlb_page() will set_compound_page_dtor(page,
1602                  * NULL_COMPOUND_DTOR), so do not use page_hstate() directly.
1603                  */
1604                 h = size_to_hstate(page_size(page));
1605
1606                 __update_and_free_page(h, page);
1607
1608                 cond_resched();
1609         }
1610 }
1611 static DECLARE_WORK(free_hpage_work, free_hpage_workfn);
1612
1613 static inline void flush_free_hpage_work(struct hstate *h)
1614 {
1615         if (free_vmemmap_pages_per_hpage(h))
1616                 flush_work(&free_hpage_work);
1617 }
1618
1619 static void update_and_free_page(struct hstate *h, struct page *page,
1620                                  bool atomic)
1621 {
1622         if (!HPageVmemmapOptimized(page) || !atomic) {
1623                 __update_and_free_page(h, page);
1624                 return;
1625         }
1626
1627         /*
1628          * Defer freeing to avoid using GFP_ATOMIC to allocate vmemmap pages.
1629          *
1630          * Only call schedule_work() if hpage_freelist is previously
1631          * empty. Otherwise, schedule_work() had been called but the workfn
1632          * hasn't retrieved the list yet.
1633          */
1634         if (llist_add((struct llist_node *)&page->mapping, &hpage_freelist))
1635                 schedule_work(&free_hpage_work);
1636 }
1637
1638 static void update_and_free_pages_bulk(struct hstate *h, struct list_head *list)
1639 {
1640         struct page *page, *t_page;
1641
1642         list_for_each_entry_safe(page, t_page, list, lru) {
1643                 update_and_free_page(h, page, false);
1644                 cond_resched();
1645         }
1646 }
1647
1648 struct hstate *size_to_hstate(unsigned long size)
1649 {
1650         struct hstate *h;
1651
1652         for_each_hstate(h) {
1653                 if (huge_page_size(h) == size)
1654                         return h;
1655         }
1656         return NULL;
1657 }
1658
1659 void free_huge_page(struct page *page)
1660 {
1661         /*
1662          * Can't pass hstate in here because it is called from the
1663          * compound page destructor.
1664          */
1665         struct hstate *h = page_hstate(page);
1666         int nid = page_to_nid(page);
1667         struct hugepage_subpool *spool = hugetlb_page_subpool(page);
1668         bool restore_reserve;
1669         unsigned long flags;
1670
1671         VM_BUG_ON_PAGE(page_count(page), page);
1672         VM_BUG_ON_PAGE(page_mapcount(page), page);
1673
1674         hugetlb_set_page_subpool(page, NULL);
1675         page->mapping = NULL;
1676         restore_reserve = HPageRestoreReserve(page);
1677         ClearHPageRestoreReserve(page);
1678
1679         /*
1680          * If HPageRestoreReserve was set on page, page allocation consumed a
1681          * reservation.  If the page was associated with a subpool, there
1682          * would have been a page reserved in the subpool before allocation
1683          * via hugepage_subpool_get_pages().  Since we are 'restoring' the
1684          * reservation, do not call hugepage_subpool_put_pages() as this will
1685          * remove the reserved page from the subpool.
1686          */
1687         if (!restore_reserve) {
1688                 /*
1689                  * A return code of zero implies that the subpool will be
1690                  * under its minimum size if the reservation is not restored
1691                  * after page is free.  Therefore, force restore_reserve
1692                  * operation.
1693                  */
1694                 if (hugepage_subpool_put_pages(spool, 1) == 0)
1695                         restore_reserve = true;
1696         }
1697
1698         spin_lock_irqsave(&hugetlb_lock, flags);
1699         ClearHPageMigratable(page);
1700         hugetlb_cgroup_uncharge_page(hstate_index(h),
1701                                      pages_per_huge_page(h), page);
1702         hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h),
1703                                           pages_per_huge_page(h), page);
1704         if (restore_reserve)
1705                 h->resv_huge_pages++;
1706
1707         if (HPageTemporary(page)) {
1708                 remove_hugetlb_page(h, page, false);
1709                 spin_unlock_irqrestore(&hugetlb_lock, flags);
1710                 update_and_free_page(h, page, true);
1711         } else if (h->surplus_huge_pages_node[nid]) {
1712                 /* remove the page from active list */
1713                 remove_hugetlb_page(h, page, true);
1714                 spin_unlock_irqrestore(&hugetlb_lock, flags);
1715                 update_and_free_page(h, page, true);
1716         } else {
1717                 arch_clear_hugepage_flags(page);
1718                 enqueue_huge_page(h, page);
1719                 spin_unlock_irqrestore(&hugetlb_lock, flags);
1720         }
1721 }
1722
1723 /*
1724  * Must be called with the hugetlb lock held
1725  */
1726 static void __prep_account_new_huge_page(struct hstate *h, int nid)
1727 {
1728         lockdep_assert_held(&hugetlb_lock);
1729         h->nr_huge_pages++;
1730         h->nr_huge_pages_node[nid]++;
1731 }
1732
1733 static void __prep_new_huge_page(struct hstate *h, struct page *page)
1734 {
1735         free_huge_page_vmemmap(h, page);
1736         INIT_LIST_HEAD(&page->lru);
1737         set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1738         hugetlb_set_page_subpool(page, NULL);
1739         set_hugetlb_cgroup(page, NULL);
1740         set_hugetlb_cgroup_rsvd(page, NULL);
1741 }
1742
1743 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
1744 {
1745         __prep_new_huge_page(h, page);
1746         spin_lock_irq(&hugetlb_lock);
1747         __prep_account_new_huge_page(h, nid);
1748         spin_unlock_irq(&hugetlb_lock);
1749 }
1750
1751 static bool __prep_compound_gigantic_page(struct page *page, unsigned int order,
1752                                                                 bool demote)
1753 {
1754         int i, j;
1755         int nr_pages = 1 << order;
1756         struct page *p = page + 1;
1757
1758         /* we rely on prep_new_huge_page to set the destructor */
1759         set_compound_order(page, order);
1760         __ClearPageReserved(page);
1761         __SetPageHead(page);
1762         for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1763                 /*
1764                  * For gigantic hugepages allocated through bootmem at
1765                  * boot, it's safer to be consistent with the not-gigantic
1766                  * hugepages and clear the PG_reserved bit from all tail pages
1767                  * too.  Otherwise drivers using get_user_pages() to access tail
1768                  * pages may get the reference counting wrong if they see
1769                  * PG_reserved set on a tail page (despite the head page not
1770                  * having PG_reserved set).  Enforcing this consistency between
1771                  * head and tail pages allows drivers to optimize away a check
1772                  * on the head page when they need know if put_page() is needed
1773                  * after get_user_pages().
1774                  */
1775                 __ClearPageReserved(p);
1776                 /*
1777                  * Subtle and very unlikely
1778                  *
1779                  * Gigantic 'page allocators' such as memblock or cma will
1780                  * return a set of pages with each page ref counted.  We need
1781                  * to turn this set of pages into a compound page with tail
1782                  * page ref counts set to zero.  Code such as speculative page
1783                  * cache adding could take a ref on a 'to be' tail page.
1784                  * We need to respect any increased ref count, and only set
1785                  * the ref count to zero if count is currently 1.  If count
1786                  * is not 1, we return an error.  An error return indicates
1787                  * the set of pages can not be converted to a gigantic page.
1788                  * The caller who allocated the pages should then discard the
1789                  * pages using the appropriate free interface.
1790                  *
1791                  * In the case of demote, the ref count will be zero.
1792                  */
1793                 if (!demote) {
1794                         if (!page_ref_freeze(p, 1)) {
1795                                 pr_warn("HugeTLB page can not be used due to unexpected inflated ref count\n");
1796                                 goto out_error;
1797                         }
1798                 } else {
1799                         VM_BUG_ON_PAGE(page_count(p), p);
1800                 }
1801                 set_compound_head(p, page);
1802         }
1803         atomic_set(compound_mapcount_ptr(page), -1);
1804         atomic_set(compound_pincount_ptr(page), 0);
1805         return true;
1806
1807 out_error:
1808         /* undo tail page modifications made above */
1809         p = page + 1;
1810         for (j = 1; j < i; j++, p = mem_map_next(p, page, j)) {
1811                 clear_compound_head(p);
1812                 set_page_refcounted(p);
1813         }
1814         /* need to clear PG_reserved on remaining tail pages  */
1815         for (; j < nr_pages; j++, p = mem_map_next(p, page, j))
1816                 __ClearPageReserved(p);
1817         set_compound_order(page, 0);
1818 #ifdef CONFIG_64BIT
1819         page[1].compound_nr = 0;
1820 #endif
1821         __ClearPageHead(page);
1822         return false;
1823 }
1824
1825 static bool prep_compound_gigantic_page(struct page *page, unsigned int order)
1826 {
1827         return __prep_compound_gigantic_page(page, order, false);
1828 }
1829
1830 static bool prep_compound_gigantic_page_for_demote(struct page *page,
1831                                                         unsigned int order)
1832 {
1833         return __prep_compound_gigantic_page(page, order, true);
1834 }
1835
1836 /*
1837  * PageHuge() only returns true for hugetlbfs pages, but not for normal or
1838  * transparent huge pages.  See the PageTransHuge() documentation for more
1839  * details.
1840  */
1841 int PageHuge(struct page *page)
1842 {
1843         if (!PageCompound(page))
1844                 return 0;
1845
1846         page = compound_head(page);
1847         return page[1].compound_dtor == HUGETLB_PAGE_DTOR;
1848 }
1849 EXPORT_SYMBOL_GPL(PageHuge);
1850
1851 /*
1852  * PageHeadHuge() only returns true for hugetlbfs head page, but not for
1853  * normal or transparent huge pages.
1854  */
1855 int PageHeadHuge(struct page *page_head)
1856 {
1857         if (!PageHead(page_head))
1858                 return 0;
1859
1860         return page_head[1].compound_dtor == HUGETLB_PAGE_DTOR;
1861 }
1862 EXPORT_SYMBOL_GPL(PageHeadHuge);
1863
1864 /*
1865  * Find and lock address space (mapping) in write mode.
1866  *
1867  * Upon entry, the page is locked which means that page_mapping() is
1868  * stable.  Due to locking order, we can only trylock_write.  If we can
1869  * not get the lock, simply return NULL to caller.
1870  */
1871 struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage)
1872 {
1873         struct address_space *mapping = page_mapping(hpage);
1874
1875         if (!mapping)
1876                 return mapping;
1877
1878         if (i_mmap_trylock_write(mapping))
1879                 return mapping;
1880
1881         return NULL;
1882 }
1883
1884 pgoff_t hugetlb_basepage_index(struct page *page)
1885 {
1886         struct page *page_head = compound_head(page);
1887         pgoff_t index = page_index(page_head);
1888         unsigned long compound_idx;
1889
1890         if (compound_order(page_head) >= MAX_ORDER)
1891                 compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
1892         else
1893                 compound_idx = page - page_head;
1894
1895         return (index << compound_order(page_head)) + compound_idx;
1896 }
1897
1898 static struct page *alloc_buddy_huge_page(struct hstate *h,
1899                 gfp_t gfp_mask, int nid, nodemask_t *nmask,
1900                 nodemask_t *node_alloc_noretry)
1901 {
1902         int order = huge_page_order(h);
1903         struct page *page;
1904         bool alloc_try_hard = true;
1905
1906         /*
1907          * By default we always try hard to allocate the page with
1908          * __GFP_RETRY_MAYFAIL flag.  However, if we are allocating pages in
1909          * a loop (to adjust global huge page counts) and previous allocation
1910          * failed, do not continue to try hard on the same node.  Use the
1911          * node_alloc_noretry bitmap to manage this state information.
1912          */
1913         if (node_alloc_noretry && node_isset(nid, *node_alloc_noretry))
1914                 alloc_try_hard = false;
1915         gfp_mask |= __GFP_COMP|__GFP_NOWARN;
1916         if (alloc_try_hard)
1917                 gfp_mask |= __GFP_RETRY_MAYFAIL;
1918         if (nid == NUMA_NO_NODE)
1919                 nid = numa_mem_id();
1920         page = __alloc_pages(gfp_mask, order, nid, nmask);
1921         if (page)
1922                 __count_vm_event(HTLB_BUDDY_PGALLOC);
1923         else
1924                 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1925
1926         /*
1927          * If we did not specify __GFP_RETRY_MAYFAIL, but still got a page this
1928          * indicates an overall state change.  Clear bit so that we resume
1929          * normal 'try hard' allocations.
1930          */
1931         if (node_alloc_noretry && page && !alloc_try_hard)
1932                 node_clear(nid, *node_alloc_noretry);
1933
1934         /*
1935          * If we tried hard to get a page but failed, set bit so that
1936          * subsequent attempts will not try as hard until there is an
1937          * overall state change.
1938          */
1939         if (node_alloc_noretry && !page && alloc_try_hard)
1940                 node_set(nid, *node_alloc_noretry);
1941
1942         return page;
1943 }
1944
1945 /*
1946  * Common helper to allocate a fresh hugetlb page. All specific allocators
1947  * should use this function to get new hugetlb pages
1948  */
1949 static struct page *alloc_fresh_huge_page(struct hstate *h,
1950                 gfp_t gfp_mask, int nid, nodemask_t *nmask,
1951                 nodemask_t *node_alloc_noretry)
1952 {
1953         struct page *page;
1954         bool retry = false;
1955
1956 retry:
1957         if (hstate_is_gigantic(h))
1958                 page = alloc_gigantic_page(h, gfp_mask, nid, nmask);
1959         else
1960                 page = alloc_buddy_huge_page(h, gfp_mask,
1961                                 nid, nmask, node_alloc_noretry);
1962         if (!page)
1963                 return NULL;
1964
1965         if (hstate_is_gigantic(h)) {
1966                 if (!prep_compound_gigantic_page(page, huge_page_order(h))) {
1967                         /*
1968                          * Rare failure to convert pages to compound page.
1969                          * Free pages and try again - ONCE!
1970                          */
1971                         free_gigantic_page(page, huge_page_order(h));
1972                         if (!retry) {
1973                                 retry = true;
1974                                 goto retry;
1975                         }
1976                         return NULL;
1977                 }
1978         }
1979         prep_new_huge_page(h, page, page_to_nid(page));
1980
1981         return page;
1982 }
1983
1984 /*
1985  * Allocates a fresh page to the hugetlb allocator pool in the node interleaved
1986  * manner.
1987  */
1988 static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1989                                 nodemask_t *node_alloc_noretry)
1990 {
1991         struct page *page;
1992         int nr_nodes, node;
1993         gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
1994
1995         for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1996                 page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed,
1997                                                 node_alloc_noretry);
1998                 if (page)
1999                         break;
2000         }
2001
2002         if (!page)
2003                 return 0;
2004
2005         put_page(page); /* free it into the hugepage allocator */
2006
2007         return 1;
2008 }
2009
2010 /*
2011  * Remove huge page from pool from next node to free.  Attempt to keep
2012  * persistent huge pages more or less balanced over allowed nodes.
2013  * This routine only 'removes' the hugetlb page.  The caller must make
2014  * an additional call to free the page to low level allocators.
2015  * Called with hugetlb_lock locked.
2016  */
2017 static struct page *remove_pool_huge_page(struct hstate *h,
2018                                                 nodemask_t *nodes_allowed,
2019                                                  bool acct_surplus)
2020 {
2021         int nr_nodes, node;
2022         struct page *page = NULL;
2023
2024         lockdep_assert_held(&hugetlb_lock);
2025         for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2026                 /*
2027                  * If we're returning unused surplus pages, only examine
2028                  * nodes with surplus pages.
2029                  */
2030                 if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
2031                     !list_empty(&h->hugepage_freelists[node])) {
2032                         page = list_entry(h->hugepage_freelists[node].next,
2033                                           struct page, lru);
2034                         remove_hugetlb_page(h, page, acct_surplus);
2035                         break;
2036                 }
2037         }
2038
2039         return page;
2040 }
2041
2042 /*
2043  * Dissolve a given free hugepage into free buddy pages. This function does
2044  * nothing for in-use hugepages and non-hugepages.
2045  * This function returns values like below:
2046  *
2047  *  -ENOMEM: failed to allocate vmemmap pages to free the freed hugepages
2048  *           when the system is under memory pressure and the feature of
2049  *           freeing unused vmemmap pages associated with each hugetlb page
2050  *           is enabled.
2051  *  -EBUSY:  failed to dissolved free hugepages or the hugepage is in-use
2052  *           (allocated or reserved.)
2053  *       0:  successfully dissolved free hugepages or the page is not a
2054  *           hugepage (considered as already dissolved)
2055  */
2056 int dissolve_free_huge_page(struct page *page)
2057 {
2058         int rc = -EBUSY;
2059
2060 retry:
2061         /* Not to disrupt normal path by vainly holding hugetlb_lock */
2062         if (!PageHuge(page))
2063                 return 0;
2064
2065         spin_lock_irq(&hugetlb_lock);
2066         if (!PageHuge(page)) {
2067                 rc = 0;
2068                 goto out;
2069         }
2070
2071         if (!page_count(page)) {
2072                 struct page *head = compound_head(page);
2073                 struct hstate *h = page_hstate(head);
2074                 if (h->free_huge_pages - h->resv_huge_pages == 0)
2075                         goto out;
2076
2077                 /*
2078                  * We should make sure that the page is already on the free list
2079                  * when it is dissolved.
2080                  */
2081                 if (unlikely(!HPageFreed(head))) {
2082                         spin_unlock_irq(&hugetlb_lock);
2083                         cond_resched();
2084
2085                         /*
2086                          * Theoretically, we should return -EBUSY when we
2087                          * encounter this race. In fact, we have a chance
2088                          * to successfully dissolve the page if we do a
2089                          * retry. Because the race window is quite small.
2090                          * If we seize this opportunity, it is an optimization
2091                          * for increasing the success rate of dissolving page.
2092                          */
2093                         goto retry;
2094                 }
2095
2096                 remove_hugetlb_page(h, head, false);
2097                 h->max_huge_pages--;
2098                 spin_unlock_irq(&hugetlb_lock);
2099
2100                 /*
2101                  * Normally update_and_free_page will allocate required vmemmmap
2102                  * before freeing the page.  update_and_free_page will fail to
2103                  * free the page if it can not allocate required vmemmap.  We
2104                  * need to adjust max_huge_pages if the page is not freed.
2105                  * Attempt to allocate vmemmmap here so that we can take
2106                  * appropriate action on failure.
2107                  */
2108                 rc = alloc_huge_page_vmemmap(h, head);
2109                 if (!rc) {
2110                         /*
2111                          * Move PageHWPoison flag from head page to the raw
2112                          * error page, which makes any subpages rather than
2113                          * the error page reusable.
2114                          */
2115                         if (PageHWPoison(head) && page != head) {
2116                                 SetPageHWPoison(page);
2117                                 ClearPageHWPoison(head);
2118                         }
2119                         update_and_free_page(h, head, false);
2120                 } else {
2121                         spin_lock_irq(&hugetlb_lock);
2122                         add_hugetlb_page(h, head, false);
2123                         h->max_huge_pages++;
2124                         spin_unlock_irq(&hugetlb_lock);
2125                 }
2126
2127                 return rc;
2128         }
2129 out:
2130         spin_unlock_irq(&hugetlb_lock);
2131         return rc;
2132 }
2133
2134 /*
2135  * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
2136  * make specified memory blocks removable from the system.
2137  * Note that this will dissolve a free gigantic hugepage completely, if any
2138  * part of it lies within the given range.
2139  * Also note that if dissolve_free_huge_page() returns with an error, all
2140  * free hugepages that were dissolved before that error are lost.
2141  */
2142 int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
2143 {
2144         unsigned long pfn;
2145         struct page *page;
2146         int rc = 0;
2147
2148         if (!hugepages_supported())
2149                 return rc;
2150
2151         for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) {
2152                 page = pfn_to_page(pfn);
2153                 rc = dissolve_free_huge_page(page);
2154                 if (rc)
2155                         break;
2156         }
2157
2158         return rc;
2159 }
2160
2161 /*
2162  * Allocates a fresh surplus page from the page allocator.
2163  */
2164 static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
2165                 int nid, nodemask_t *nmask, bool zero_ref)
2166 {
2167         struct page *page = NULL;
2168         bool retry = false;
2169
2170         if (hstate_is_gigantic(h))
2171                 return NULL;
2172
2173         spin_lock_irq(&hugetlb_lock);
2174         if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages)
2175                 goto out_unlock;
2176         spin_unlock_irq(&hugetlb_lock);
2177
2178 retry:
2179         page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL);
2180         if (!page)
2181                 return NULL;
2182
2183         spin_lock_irq(&hugetlb_lock);
2184         /*
2185          * We could have raced with the pool size change.
2186          * Double check that and simply deallocate the new page
2187          * if we would end up overcommiting the surpluses. Abuse
2188          * temporary page to workaround the nasty free_huge_page
2189          * codeflow
2190          */
2191         if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
2192                 SetHPageTemporary(page);
2193                 spin_unlock_irq(&hugetlb_lock);
2194                 put_page(page);
2195                 return NULL;
2196         }
2197
2198         if (zero_ref) {
2199                 /*
2200                  * Caller requires a page with zero ref count.
2201                  * We will drop ref count here.  If someone else is holding
2202                  * a ref, the page will be freed when they drop it.  Abuse
2203                  * temporary page flag to accomplish this.
2204                  */
2205                 SetHPageTemporary(page);
2206                 if (!put_page_testzero(page)) {
2207                         /*
2208                          * Unexpected inflated ref count on freshly allocated
2209                          * huge.  Retry once.
2210                          */
2211                         pr_info("HugeTLB unexpected inflated ref count on freshly allocated page\n");
2212                         spin_unlock_irq(&hugetlb_lock);
2213                         if (retry)
2214                                 return NULL;
2215
2216                         retry = true;
2217                         goto retry;
2218                 }
2219                 ClearHPageTemporary(page);
2220         }
2221
2222         h->surplus_huge_pages++;
2223         h->surplus_huge_pages_node[page_to_nid(page)]++;
2224
2225 out_unlock:
2226         spin_unlock_irq(&hugetlb_lock);
2227
2228         return page;
2229 }
2230
2231 static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
2232                                      int nid, nodemask_t *nmask)
2233 {
2234         struct page *page;
2235
2236         if (hstate_is_gigantic(h))
2237                 return NULL;
2238
2239         page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL);
2240         if (!page)
2241                 return NULL;
2242
2243         /*
2244          * We do not account these pages as surplus because they are only
2245          * temporary and will be released properly on the last reference
2246          */
2247         SetHPageTemporary(page);
2248
2249         return page;
2250 }
2251
2252 /*
2253  * Use the VMA's mpolicy to allocate a huge page from the buddy.
2254  */
2255 static
2256 struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
2257                 struct vm_area_struct *vma, unsigned long addr)
2258 {
2259         struct page *page = NULL;
2260         struct mempolicy *mpol;
2261         gfp_t gfp_mask = htlb_alloc_mask(h);
2262         int nid;
2263         nodemask_t *nodemask;
2264
2265         nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
2266         if (mpol_is_preferred_many(mpol)) {
2267                 gfp_t gfp = gfp_mask | __GFP_NOWARN;
2268
2269                 gfp &=  ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2270                 page = alloc_surplus_huge_page(h, gfp, nid, nodemask, false);
2271
2272                 /* Fallback to all nodes if page==NULL */
2273                 nodemask = NULL;
2274         }
2275
2276         if (!page)
2277                 page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask, false);
2278         mpol_cond_put(mpol);
2279         return page;
2280 }
2281
2282 /* page migration callback function */
2283 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
2284                 nodemask_t *nmask, gfp_t gfp_mask)
2285 {
2286         spin_lock_irq(&hugetlb_lock);
2287         if (h->free_huge_pages - h->resv_huge_pages > 0) {
2288                 struct page *page;
2289
2290                 page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask);
2291                 if (page) {
2292                         spin_unlock_irq(&hugetlb_lock);
2293                         return page;
2294                 }
2295         }
2296         spin_unlock_irq(&hugetlb_lock);
2297
2298         return alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask);
2299 }
2300
2301 /* mempolicy aware migration callback */
2302 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
2303                 unsigned long address)
2304 {
2305         struct mempolicy *mpol;
2306         nodemask_t *nodemask;
2307         struct page *page;
2308         gfp_t gfp_mask;
2309         int node;
2310
2311         gfp_mask = htlb_alloc_mask(h);
2312         node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
2313         page = alloc_huge_page_nodemask(h, node, nodemask, gfp_mask);
2314         mpol_cond_put(mpol);
2315
2316         return page;
2317 }
2318
2319 /*
2320  * Increase the hugetlb pool such that it can accommodate a reservation
2321  * of size 'delta'.
2322  */
2323 static int gather_surplus_pages(struct hstate *h, long delta)
2324         __must_hold(&hugetlb_lock)
2325 {
2326         struct list_head surplus_list;
2327         struct page *page, *tmp;
2328         int ret;
2329         long i;
2330         long needed, allocated;
2331         bool alloc_ok = true;
2332
2333         lockdep_assert_held(&hugetlb_lock);
2334         needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
2335         if (needed <= 0) {
2336                 h->resv_huge_pages += delta;
2337                 return 0;
2338         }
2339
2340         allocated = 0;
2341         INIT_LIST_HEAD(&surplus_list);
2342
2343         ret = -ENOMEM;
2344 retry:
2345         spin_unlock_irq(&hugetlb_lock);
2346         for (i = 0; i < needed; i++) {
2347                 page = alloc_surplus_huge_page(h, htlb_alloc_mask(h),
2348                                 NUMA_NO_NODE, NULL, true);
2349                 if (!page) {
2350                         alloc_ok = false;
2351                         break;
2352                 }
2353                 list_add(&page->lru, &surplus_list);
2354                 cond_resched();
2355         }
2356         allocated += i;
2357
2358         /*
2359          * After retaking hugetlb_lock, we need to recalculate 'needed'
2360          * because either resv_huge_pages or free_huge_pages may have changed.
2361          */
2362         spin_lock_irq(&hugetlb_lock);
2363         needed = (h->resv_huge_pages + delta) -
2364                         (h->free_huge_pages + allocated);
2365         if (needed > 0) {
2366                 if (alloc_ok)
2367                         goto retry;
2368                 /*
2369                  * We were not able to allocate enough pages to
2370                  * satisfy the entire reservation so we free what
2371                  * we've allocated so far.
2372                  */
2373                 goto free;
2374         }
2375         /*
2376          * The surplus_list now contains _at_least_ the number of extra pages
2377          * needed to accommodate the reservation.  Add the appropriate number
2378          * of pages to the hugetlb pool and free the extras back to the buddy
2379          * allocator.  Commit the entire reservation here to prevent another
2380          * process from stealing the pages as they are added to the pool but
2381          * before they are reserved.
2382          */
2383         needed += allocated;
2384         h->resv_huge_pages += delta;
2385         ret = 0;
2386
2387         /* Free the needed pages to the hugetlb pool */
2388         list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
2389                 if ((--needed) < 0)
2390                         break;
2391                 /* Add the page to the hugetlb allocator */
2392                 enqueue_huge_page(h, page);
2393         }
2394 free:
2395         spin_unlock_irq(&hugetlb_lock);
2396
2397         /*
2398          * Free unnecessary surplus pages to the buddy allocator.
2399          * Pages have no ref count, call free_huge_page directly.
2400          */
2401         list_for_each_entry_safe(page, tmp, &surplus_list, lru)
2402                 free_huge_page(page);
2403         spin_lock_irq(&hugetlb_lock);
2404
2405         return ret;
2406 }
2407
2408 /*
2409  * This routine has two main purposes:
2410  * 1) Decrement the reservation count (resv_huge_pages) by the value passed
2411  *    in unused_resv_pages.  This corresponds to the prior adjustments made
2412  *    to the associated reservation map.
2413  * 2) Free any unused surplus pages that may have been allocated to satisfy
2414  *    the reservation.  As many as unused_resv_pages may be freed.
2415  */
2416 static void return_unused_surplus_pages(struct hstate *h,
2417                                         unsigned long unused_resv_pages)
2418 {
2419         unsigned long nr_pages;
2420         struct page *page;
2421         LIST_HEAD(page_list);
2422
2423         lockdep_assert_held(&hugetlb_lock);
2424         /* Uncommit the reservation */
2425         h->resv_huge_pages -= unused_resv_pages;
2426
2427         /* Cannot return gigantic pages currently */
2428         if (hstate_is_gigantic(h))
2429                 goto out;
2430
2431         /*
2432          * Part (or even all) of the reservation could have been backed
2433          * by pre-allocated pages. Only free surplus pages.
2434          */
2435         nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
2436
2437         /*
2438          * We want to release as many surplus pages as possible, spread
2439          * evenly across all nodes with memory. Iterate across these nodes
2440          * until we can no longer free unreserved surplus pages. This occurs
2441          * when the nodes with surplus pages have no free pages.
2442          * remove_pool_huge_page() will balance the freed pages across the
2443          * on-line nodes with memory and will handle the hstate accounting.
2444          */
2445         while (nr_pages--) {
2446                 page = remove_pool_huge_page(h, &node_states[N_MEMORY], 1);
2447                 if (!page)
2448                         goto out;
2449
2450                 list_add(&page->lru, &page_list);
2451         }
2452
2453 out:
2454         spin_unlock_irq(&hugetlb_lock);
2455         update_and_free_pages_bulk(h, &page_list);
2456         spin_lock_irq(&hugetlb_lock);
2457 }
2458
2459
2460 /*
2461  * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
2462  * are used by the huge page allocation routines to manage reservations.
2463  *
2464  * vma_needs_reservation is called to determine if the huge page at addr
2465  * within the vma has an associated reservation.  If a reservation is
2466  * needed, the value 1 is returned.  The caller is then responsible for
2467  * managing the global reservation and subpool usage counts.  After
2468  * the huge page has been allocated, vma_commit_reservation is called
2469  * to add the page to the reservation map.  If the page allocation fails,
2470  * the reservation must be ended instead of committed.  vma_end_reservation
2471  * is called in such cases.
2472  *
2473  * In the normal case, vma_commit_reservation returns the same value
2474  * as the preceding vma_needs_reservation call.  The only time this
2475  * is not the case is if a reserve map was changed between calls.  It
2476  * is the responsibility of the caller to notice the difference and
2477  * take appropriate action.
2478  *
2479  * vma_add_reservation is used in error paths where a reservation must
2480  * be restored when a newly allocated huge page must be freed.  It is
2481  * to be called after calling vma_needs_reservation to determine if a
2482  * reservation exists.
2483  *
2484  * vma_del_reservation is used in error paths where an entry in the reserve
2485  * map was created during huge page allocation and must be removed.  It is to
2486  * be called after calling vma_needs_reservation to determine if a reservation
2487  * exists.
2488  */
2489 enum vma_resv_mode {
2490         VMA_NEEDS_RESV,
2491         VMA_COMMIT_RESV,
2492         VMA_END_RESV,
2493         VMA_ADD_RESV,
2494         VMA_DEL_RESV,
2495 };
2496 static long __vma_reservation_common(struct hstate *h,
2497                                 struct vm_area_struct *vma, unsigned long addr,
2498                                 enum vma_resv_mode mode)
2499 {
2500         struct resv_map *resv;
2501         pgoff_t idx;
2502         long ret;
2503         long dummy_out_regions_needed;
2504
2505         resv = vma_resv_map(vma);
2506         if (!resv)
2507                 return 1;
2508
2509         idx = vma_hugecache_offset(h, vma, addr);
2510         switch (mode) {
2511         case VMA_NEEDS_RESV:
2512                 ret = region_chg(resv, idx, idx + 1, &dummy_out_regions_needed);
2513                 /* We assume that vma_reservation_* routines always operate on
2514                  * 1 page, and that adding to resv map a 1 page entry can only
2515                  * ever require 1 region.
2516                  */
2517                 VM_BUG_ON(dummy_out_regions_needed != 1);
2518                 break;
2519         case VMA_COMMIT_RESV:
2520                 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2521                 /* region_add calls of range 1 should never fail. */
2522                 VM_BUG_ON(ret < 0);
2523                 break;
2524         case VMA_END_RESV:
2525                 region_abort(resv, idx, idx + 1, 1);
2526                 ret = 0;
2527                 break;
2528         case VMA_ADD_RESV:
2529                 if (vma->vm_flags & VM_MAYSHARE) {
2530                         ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2531                         /* region_add calls of range 1 should never fail. */
2532                         VM_BUG_ON(ret < 0);
2533                 } else {
2534                         region_abort(resv, idx, idx + 1, 1);
2535                         ret = region_del(resv, idx, idx + 1);
2536                 }
2537                 break;
2538         case VMA_DEL_RESV:
2539                 if (vma->vm_flags & VM_MAYSHARE) {
2540                         region_abort(resv, idx, idx + 1, 1);
2541                         ret = region_del(resv, idx, idx + 1);
2542                 } else {
2543                         ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2544                         /* region_add calls of range 1 should never fail. */
2545                         VM_BUG_ON(ret < 0);
2546                 }
2547                 break;
2548         default:
2549                 BUG();
2550         }
2551
2552         if (vma->vm_flags & VM_MAYSHARE || mode == VMA_DEL_RESV)
2553                 return ret;
2554         /*
2555          * We know private mapping must have HPAGE_RESV_OWNER set.
2556          *
2557          * In most cases, reserves always exist for private mappings.
2558          * However, a file associated with mapping could have been
2559          * hole punched or truncated after reserves were consumed.
2560          * As subsequent fault on such a range will not use reserves.
2561          * Subtle - The reserve map for private mappings has the
2562          * opposite meaning than that of shared mappings.  If NO
2563          * entry is in the reserve map, it means a reservation exists.
2564          * If an entry exists in the reserve map, it means the
2565          * reservation has already been consumed.  As a result, the
2566          * return value of this routine is the opposite of the
2567          * value returned from reserve map manipulation routines above.
2568          */
2569         if (ret > 0)
2570                 return 0;
2571         if (ret == 0)
2572                 return 1;
2573         return ret;
2574 }
2575
2576 static long vma_needs_reservation(struct hstate *h,
2577                         struct vm_area_struct *vma, unsigned long addr)
2578 {
2579         return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
2580 }
2581
2582 static long vma_commit_reservation(struct hstate *h,
2583                         struct vm_area_struct *vma, unsigned long addr)
2584 {
2585         return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
2586 }
2587
2588 static void vma_end_reservation(struct hstate *h,
2589                         struct vm_area_struct *vma, unsigned long addr)
2590 {
2591         (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
2592 }
2593
2594 static long vma_add_reservation(struct hstate *h,
2595                         struct vm_area_struct *vma, unsigned long addr)
2596 {
2597         return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
2598 }
2599
2600 static long vma_del_reservation(struct hstate *h,
2601                         struct vm_area_struct *vma, unsigned long addr)
2602 {
2603         return __vma_reservation_common(h, vma, addr, VMA_DEL_RESV);
2604 }
2605
2606 /*
2607  * This routine is called to restore reservation information on error paths.
2608  * It should ONLY be called for pages allocated via alloc_huge_page(), and
2609  * the hugetlb mutex should remain held when calling this routine.
2610  *
2611  * It handles two specific cases:
2612  * 1) A reservation was in place and the page consumed the reservation.
2613  *    HPageRestoreReserve is set in the page.
2614  * 2) No reservation was in place for the page, so HPageRestoreReserve is
2615  *    not set.  However, alloc_huge_page always updates the reserve map.
2616  *
2617  * In case 1, free_huge_page later in the error path will increment the
2618  * global reserve count.  But, free_huge_page does not have enough context
2619  * to adjust the reservation map.  This case deals primarily with private
2620  * mappings.  Adjust the reserve map here to be consistent with global
2621  * reserve count adjustments to be made by free_huge_page.  Make sure the
2622  * reserve map indicates there is a reservation present.
2623  *
2624  * In case 2, simply undo reserve map modifications done by alloc_huge_page.
2625  */
2626 void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
2627                         unsigned long address, struct page *page)
2628 {
2629         long rc = vma_needs_reservation(h, vma, address);
2630
2631         if (HPageRestoreReserve(page)) {
2632                 if (unlikely(rc < 0))
2633                         /*
2634                          * Rare out of memory condition in reserve map
2635                          * manipulation.  Clear HPageRestoreReserve so that
2636                          * global reserve count will not be incremented
2637                          * by free_huge_page.  This will make it appear
2638                          * as though the reservation for this page was
2639                          * consumed.  This may prevent the task from
2640                          * faulting in the page at a later time.  This
2641                          * is better than inconsistent global huge page
2642                          * accounting of reserve counts.
2643                          */
2644                         ClearHPageRestoreReserve(page);
2645                 else if (rc)
2646                         (void)vma_add_reservation(h, vma, address);
2647                 else
2648                         vma_end_reservation(h, vma, address);
2649         } else {
2650                 if (!rc) {
2651                         /*
2652                          * This indicates there is an entry in the reserve map
2653                          * not added by alloc_huge_page.  We know it was added
2654                          * before the alloc_huge_page call, otherwise
2655                          * HPageRestoreReserve would be set on the page.
2656                          * Remove the entry so that a subsequent allocation
2657                          * does not consume a reservation.
2658                          */
2659                         rc = vma_del_reservation(h, vma, address);
2660                         if (rc < 0)
2661                                 /*
2662                                  * VERY rare out of memory condition.  Since
2663                                  * we can not delete the entry, set
2664                                  * HPageRestoreReserve so that the reserve
2665                                  * count will be incremented when the page
2666                                  * is freed.  This reserve will be consumed
2667                                  * on a subsequent allocation.
2668                                  */
2669                                 SetHPageRestoreReserve(page);
2670                 } else if (rc < 0) {
2671                         /*
2672                          * Rare out of memory condition from
2673                          * vma_needs_reservation call.  Memory allocation is
2674                          * only attempted if a new entry is needed.  Therefore,
2675                          * this implies there is not an entry in the
2676                          * reserve map.
2677                          *
2678                          * For shared mappings, no entry in the map indicates
2679                          * no reservation.  We are done.
2680                          */
2681                         if (!(vma->vm_flags & VM_MAYSHARE))
2682                                 /*
2683                                  * For private mappings, no entry indicates
2684                                  * a reservation is present.  Since we can
2685                                  * not add an entry, set SetHPageRestoreReserve
2686                                  * on the page so reserve count will be
2687                                  * incremented when freed.  This reserve will
2688                                  * be consumed on a subsequent allocation.
2689                                  */
2690                                 SetHPageRestoreReserve(page);
2691                 } else
2692                         /*
2693                          * No reservation present, do nothing
2694                          */
2695                          vma_end_reservation(h, vma, address);
2696         }
2697 }
2698
2699 /*
2700  * alloc_and_dissolve_huge_page - Allocate a new page and dissolve the old one
2701  * @h: struct hstate old page belongs to
2702  * @old_page: Old page to dissolve
2703  * @list: List to isolate the page in case we need to
2704  * Returns 0 on success, otherwise negated error.
2705  */
2706 static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
2707                                         struct list_head *list)
2708 {
2709         gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
2710         int nid = page_to_nid(old_page);
2711         bool alloc_retry = false;
2712         struct page *new_page;
2713         int ret = 0;
2714
2715         /*
2716          * Before dissolving the page, we need to allocate a new one for the
2717          * pool to remain stable.  Here, we allocate the page and 'prep' it
2718          * by doing everything but actually updating counters and adding to
2719          * the pool.  This simplifies and let us do most of the processing
2720          * under the lock.
2721          */
2722 alloc_retry:
2723         new_page = alloc_buddy_huge_page(h, gfp_mask, nid, NULL, NULL);
2724         if (!new_page)
2725                 return -ENOMEM;
2726         /*
2727          * If all goes well, this page will be directly added to the free
2728          * list in the pool.  For this the ref count needs to be zero.
2729          * Attempt to drop now, and retry once if needed.  It is VERY
2730          * unlikely there is another ref on the page.
2731          *
2732          * If someone else has a reference to the page, it will be freed
2733          * when they drop their ref.  Abuse temporary page flag to accomplish
2734          * this.  Retry once if there is an inflated ref count.
2735          */
2736         SetHPageTemporary(new_page);
2737         if (!put_page_testzero(new_page)) {
2738                 if (alloc_retry)
2739                         return -EBUSY;
2740
2741                 alloc_retry = true;
2742                 goto alloc_retry;
2743         }
2744         ClearHPageTemporary(new_page);
2745
2746         __prep_new_huge_page(h, new_page);
2747
2748 retry:
2749         spin_lock_irq(&hugetlb_lock);
2750         if (!PageHuge(old_page)) {
2751                 /*
2752                  * Freed from under us. Drop new_page too.
2753                  */
2754                 goto free_new;
2755         } else if (page_count(old_page)) {
2756                 /*
2757                  * Someone has grabbed the page, try to isolate it here.
2758                  * Fail with -EBUSY if not possible.
2759                  */
2760                 spin_unlock_irq(&hugetlb_lock);
2761                 if (!isolate_huge_page(old_page, list))
2762                         ret = -EBUSY;
2763                 spin_lock_irq(&hugetlb_lock);
2764                 goto free_new;
2765         } else if (!HPageFreed(old_page)) {
2766                 /*
2767                  * Page's refcount is 0 but it has not been enqueued in the
2768                  * freelist yet. Race window is small, so we can succeed here if
2769                  * we retry.
2770                  */
2771                 spin_unlock_irq(&hugetlb_lock);
2772                 cond_resched();
2773                 goto retry;
2774         } else {
2775                 /*
2776                  * Ok, old_page is still a genuine free hugepage. Remove it from
2777                  * the freelist and decrease the counters. These will be
2778                  * incremented again when calling __prep_account_new_huge_page()
2779                  * and enqueue_huge_page() for new_page. The counters will remain
2780                  * stable since this happens under the lock.
2781                  */
2782                 remove_hugetlb_page(h, old_page, false);
2783
2784                 /*
2785                  * Ref count on new page is already zero as it was dropped
2786                  * earlier.  It can be directly added to the pool free list.
2787                  */
2788                 __prep_account_new_huge_page(h, nid);
2789                 enqueue_huge_page(h, new_page);
2790
2791                 /*
2792                  * Pages have been replaced, we can safely free the old one.
2793                  */
2794                 spin_unlock_irq(&hugetlb_lock);
2795                 update_and_free_page(h, old_page, false);
2796         }
2797
2798         return ret;
2799
2800 free_new:
2801         spin_unlock_irq(&hugetlb_lock);
2802         /* Page has a zero ref count, but needs a ref to be freed */
2803         set_page_refcounted(new_page);
2804         update_and_free_page(h, new_page, false);
2805
2806         return ret;
2807 }
2808
2809 int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list)
2810 {
2811         struct hstate *h;
2812         struct page *head;
2813         int ret = -EBUSY;
2814
2815         /*
2816          * The page might have been dissolved from under our feet, so make sure
2817          * to carefully check the state under the lock.
2818          * Return success when racing as if we dissolved the page ourselves.
2819          */
2820         spin_lock_irq(&hugetlb_lock);
2821         if (PageHuge(page)) {
2822                 head = compound_head(page);
2823                 h = page_hstate(head);
2824         } else {
2825                 spin_unlock_irq(&hugetlb_lock);
2826                 return 0;
2827         }
2828         spin_unlock_irq(&hugetlb_lock);
2829
2830         /*
2831          * Fence off gigantic pages as there is a cyclic dependency between
2832          * alloc_contig_range and them. Return -ENOMEM as this has the effect
2833          * of bailing out right away without further retrying.
2834          */
2835         if (hstate_is_gigantic(h))
2836                 return -ENOMEM;
2837
2838         if (page_count(head) && isolate_huge_page(head, list))
2839                 ret = 0;
2840         else if (!page_count(head))
2841                 ret = alloc_and_dissolve_huge_page(h, head, list);
2842
2843         return ret;
2844 }
2845
2846 struct page *alloc_huge_page(struct vm_area_struct *vma,
2847                                     unsigned long addr, int avoid_reserve)
2848 {
2849         struct hugepage_subpool *spool = subpool_vma(vma);
2850         struct hstate *h = hstate_vma(vma);
2851         struct page *page;
2852         long map_chg, map_commit;
2853         long gbl_chg;
2854         int ret, idx;
2855         struct hugetlb_cgroup *h_cg;
2856         bool deferred_reserve;
2857
2858         idx = hstate_index(h);
2859         /*
2860          * Examine the region/reserve map to determine if the process
2861          * has a reservation for the page to be allocated.  A return
2862          * code of zero indicates a reservation exists (no change).
2863          */
2864         map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
2865         if (map_chg < 0)
2866                 return ERR_PTR(-ENOMEM);
2867
2868         /*
2869          * Processes that did not create the mapping will have no
2870          * reserves as indicated by the region/reserve map. Check
2871          * that the allocation will not exceed the subpool limit.
2872          * Allocations for MAP_NORESERVE mappings also need to be
2873          * checked against any subpool limit.
2874          */
2875         if (map_chg || avoid_reserve) {
2876                 gbl_chg = hugepage_subpool_get_pages(spool, 1);
2877                 if (gbl_chg < 0) {
2878                         vma_end_reservation(h, vma, addr);
2879                         return ERR_PTR(-ENOSPC);
2880                 }
2881
2882                 /*
2883                  * Even though there was no reservation in the region/reserve
2884                  * map, there could be reservations associated with the
2885                  * subpool that can be used.  This would be indicated if the
2886                  * return value of hugepage_subpool_get_pages() is zero.
2887                  * However, if avoid_reserve is specified we still avoid even
2888                  * the subpool reservations.
2889                  */
2890                 if (avoid_reserve)
2891                         gbl_chg = 1;
2892         }
2893
2894         /* If this allocation is not consuming a reservation, charge it now.
2895          */
2896         deferred_reserve = map_chg || avoid_reserve;
2897         if (deferred_reserve) {
2898                 ret = hugetlb_cgroup_charge_cgroup_rsvd(
2899                         idx, pages_per_huge_page(h), &h_cg);
2900                 if (ret)
2901                         goto out_subpool_put;
2902         }
2903
2904         ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
2905         if (ret)
2906                 goto out_uncharge_cgroup_reservation;
2907
2908         spin_lock_irq(&hugetlb_lock);
2909         /*
2910          * glb_chg is passed to indicate whether or not a page must be taken
2911          * from the global free pool (global change).  gbl_chg == 0 indicates
2912          * a reservation exists for the allocation.
2913          */
2914         page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
2915         if (!page) {
2916                 spin_unlock_irq(&hugetlb_lock);
2917                 page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
2918                 if (!page)
2919                         goto out_uncharge_cgroup;
2920                 if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
2921                         SetHPageRestoreReserve(page);
2922                         h->resv_huge_pages--;
2923                 }
2924                 spin_lock_irq(&hugetlb_lock);
2925                 list_add(&page->lru, &h->hugepage_activelist);
2926                 /* Fall through */
2927         }
2928         hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
2929         /* If allocation is not consuming a reservation, also store the
2930          * hugetlb_cgroup pointer on the page.
2931          */
2932         if (deferred_reserve) {
2933                 hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h),
2934                                                   h_cg, page);
2935         }
2936
2937         spin_unlock_irq(&hugetlb_lock);
2938
2939         hugetlb_set_page_subpool(page, spool);
2940
2941         map_commit = vma_commit_reservation(h, vma, addr);
2942         if (unlikely(map_chg > map_commit)) {
2943                 /*
2944                  * The page was added to the reservation map between
2945                  * vma_needs_reservation and vma_commit_reservation.
2946                  * This indicates a race with hugetlb_reserve_pages.
2947                  * Adjust for the subpool count incremented above AND
2948                  * in hugetlb_reserve_pages for the same page.  Also,
2949                  * the reservation count added in hugetlb_reserve_pages
2950                  * no longer applies.
2951                  */
2952                 long rsv_adjust;
2953
2954                 rsv_adjust = hugepage_subpool_put_pages(spool, 1);
2955                 hugetlb_acct_memory(h, -rsv_adjust);
2956                 if (deferred_reserve)
2957                         hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h),
2958                                         pages_per_huge_page(h), page);
2959         }
2960         return page;
2961
2962 out_uncharge_cgroup:
2963         hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
2964 out_uncharge_cgroup_reservation:
2965         if (deferred_reserve)
2966                 hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h),
2967                                                     h_cg);
2968 out_subpool_put:
2969         if (map_chg || avoid_reserve)
2970                 hugepage_subpool_put_pages(spool, 1);
2971         vma_end_reservation(h, vma, addr);
2972         return ERR_PTR(-ENOSPC);
2973 }
2974
2975 int alloc_bootmem_huge_page(struct hstate *h, int nid)
2976         __attribute__ ((weak, alias("__alloc_bootmem_huge_page")));
2977 int __alloc_bootmem_huge_page(struct hstate *h, int nid)
2978 {
2979         struct huge_bootmem_page *m = NULL; /* initialize for clang */
2980         int nr_nodes, node;
2981
2982         if (nid != NUMA_NO_NODE && nid >= nr_online_nodes)
2983                 return 0;
2984         /* do node specific alloc */
2985         if (nid != NUMA_NO_NODE) {
2986                 m = memblock_alloc_try_nid_raw(huge_page_size(h), huge_page_size(h),
2987                                 0, MEMBLOCK_ALLOC_ACCESSIBLE, nid);
2988                 if (!m)
2989                         return 0;
2990                 goto found;
2991         }
2992         /* allocate from next node when distributing huge pages */
2993         for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
2994                 m = memblock_alloc_try_nid_raw(
2995                                 huge_page_size(h), huge_page_size(h),
2996                                 0, MEMBLOCK_ALLOC_ACCESSIBLE, node);
2997                 /*
2998                  * Use the beginning of the huge page to store the
2999                  * huge_bootmem_page struct (until gather_bootmem
3000                  * puts them into the mem_map).
3001                  */
3002                 if (!m)
3003                         return 0;
3004                 goto found;
3005         }
3006
3007 found:
3008         /* Put them into a private list first because mem_map is not up yet */
3009         INIT_LIST_HEAD(&m->list);
3010         list_add(&m->list, &huge_boot_pages);
3011         m->hstate = h;
3012         return 1;
3013 }
3014
3015 /*
3016  * Put bootmem huge pages into the standard lists after mem_map is up.
3017  * Note: This only applies to gigantic (order > MAX_ORDER) pages.
3018  */
3019 static void __init gather_bootmem_prealloc(void)
3020 {
3021         struct huge_bootmem_page *m;
3022
3023         list_for_each_entry(m, &huge_boot_pages, list) {
3024                 struct page *page = virt_to_page(m);
3025                 struct hstate *h = m->hstate;
3026
3027                 VM_BUG_ON(!hstate_is_gigantic(h));
3028                 WARN_ON(page_count(page) != 1);
3029                 if (prep_compound_gigantic_page(page, huge_page_order(h))) {
3030                         WARN_ON(PageReserved(page));
3031                         prep_new_huge_page(h, page, page_to_nid(page));
3032                         put_page(page); /* add to the hugepage allocator */
3033                 } else {
3034                         /* VERY unlikely inflated ref count on a tail page */
3035                         free_gigantic_page(page, huge_page_order(h));
3036                 }
3037
3038                 /*
3039                  * We need to restore the 'stolen' pages to totalram_pages
3040                  * in order to fix confusing memory reports from free(1) and
3041                  * other side-effects, like CommitLimit going negative.
3042                  */
3043                 adjust_managed_page_count(page, pages_per_huge_page(h));
3044                 cond_resched();
3045         }
3046 }
3047 static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid)
3048 {
3049         unsigned long i;
3050         char buf[32];
3051
3052         for (i = 0; i < h->max_huge_pages_node[nid]; ++i) {
3053                 if (hstate_is_gigantic(h)) {
3054                         if (!alloc_bootmem_huge_page(h, nid))
3055                                 break;
3056                 } else {
3057                         struct page *page;
3058                         gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
3059
3060                         page = alloc_fresh_huge_page(h, gfp_mask, nid,
3061                                         &node_states[N_MEMORY], NULL);
3062                         if (!page)
3063                                 break;
3064                         put_page(page); /* free it into the hugepage allocator */
3065                 }
3066                 cond_resched();
3067         }
3068         if (i == h->max_huge_pages_node[nid])
3069                 return;
3070
3071         string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3072         pr_warn("HugeTLB: allocating %u of page size %s failed node%d.  Only allocated %lu hugepages.\n",
3073                 h->max_huge_pages_node[nid], buf, nid, i);
3074         h->max_huge_pages -= (h->max_huge_pages_node[nid] - i);
3075         h->max_huge_pages_node[nid] = i;
3076 }
3077
3078 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
3079 {
3080         unsigned long i;
3081         nodemask_t *node_alloc_noretry;
3082         bool node_specific_alloc = false;
3083
3084         /* skip gigantic hugepages allocation if hugetlb_cma enabled */
3085         if (hstate_is_gigantic(h) && hugetlb_cma_size) {
3086                 pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n");
3087                 return;
3088         }
3089
3090         /* do node specific alloc */
3091         for (i = 0; i < nr_online_nodes; i++) {
3092                 if (h->max_huge_pages_node[i] > 0) {
3093                         hugetlb_hstate_alloc_pages_onenode(h, i);
3094                         node_specific_alloc = true;
3095                 }
3096         }
3097
3098         if (node_specific_alloc)
3099                 return;
3100
3101         /* below will do all node balanced alloc */
3102         if (!hstate_is_gigantic(h)) {
3103                 /*
3104                  * Bit mask controlling how hard we retry per-node allocations.
3105                  * Ignore errors as lower level routines can deal with
3106                  * node_alloc_noretry == NULL.  If this kmalloc fails at boot
3107                  * time, we are likely in bigger trouble.
3108                  */
3109                 node_alloc_noretry = kmalloc(sizeof(*node_alloc_noretry),
3110                                                 GFP_KERNEL);
3111         } else {
3112                 /* allocations done at boot time */
3113                 node_alloc_noretry = NULL;
3114         }
3115
3116         /* bit mask controlling how hard we retry per-node allocations */
3117         if (node_alloc_noretry)
3118                 nodes_clear(*node_alloc_noretry);
3119
3120         for (i = 0; i < h->max_huge_pages; ++i) {
3121                 if (hstate_is_gigantic(h)) {
3122                         if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE))
3123                                 break;
3124                 } else if (!alloc_pool_huge_page(h,
3125                                          &node_states[N_MEMORY],
3126                                          node_alloc_noretry))
3127                         break;
3128                 cond_resched();
3129         }
3130         if (i < h->max_huge_pages) {
3131                 char buf[32];
3132
3133                 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3134                 pr_warn("HugeTLB: allocating %lu of page size %s failed.  Only allocated %lu hugepages.\n",
3135                         h->max_huge_pages, buf, i);
3136                 h->max_huge_pages = i;
3137         }
3138         kfree(node_alloc_noretry);
3139 }
3140
3141 static void __init hugetlb_init_hstates(void)
3142 {
3143         struct hstate *h, *h2;
3144
3145         for_each_hstate(h) {
3146                 if (minimum_order > huge_page_order(h))
3147                         minimum_order = huge_page_order(h);
3148
3149                 /* oversize hugepages were init'ed in early boot */
3150                 if (!hstate_is_gigantic(h))
3151                         hugetlb_hstate_alloc_pages(h);
3152
3153                 /*
3154                  * Set demote order for each hstate.  Note that
3155                  * h->demote_order is initially 0.
3156                  * - We can not demote gigantic pages if runtime freeing
3157                  *   is not supported, so skip this.
3158                  * - If CMA allocation is possible, we can not demote
3159                  *   HUGETLB_PAGE_ORDER or smaller size pages.
3160                  */
3161                 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
3162                         continue;
3163                 if (hugetlb_cma_size && h->order <= HUGETLB_PAGE_ORDER)
3164                         continue;
3165                 for_each_hstate(h2) {
3166                         if (h2 == h)
3167                                 continue;
3168                         if (h2->order < h->order &&
3169                             h2->order > h->demote_order)
3170                                 h->demote_order = h2->order;
3171                 }
3172         }
3173         VM_BUG_ON(minimum_order == UINT_MAX);
3174 }
3175
3176 static void __init report_hugepages(void)
3177 {
3178         struct hstate *h;
3179
3180         for_each_hstate(h) {
3181                 char buf[32];
3182
3183                 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3184                 pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
3185                         buf, h->free_huge_pages);
3186         }
3187 }
3188
3189 #ifdef CONFIG_HIGHMEM
3190 static void try_to_free_low(struct hstate *h, unsigned long count,
3191                                                 nodemask_t *nodes_allowed)
3192 {
3193         int i;
3194         LIST_HEAD(page_list);
3195
3196         lockdep_assert_held(&hugetlb_lock);
3197         if (hstate_is_gigantic(h))
3198                 return;
3199
3200         /*
3201          * Collect pages to be freed on a list, and free after dropping lock
3202          */
3203         for_each_node_mask(i, *nodes_allowed) {
3204                 struct page *page, *next;
3205                 struct list_head *freel = &h->hugepage_freelists[i];
3206                 list_for_each_entry_safe(page, next, freel, lru) {
3207                         if (count >= h->nr_huge_pages)
3208                                 goto out;
3209                         if (PageHighMem(page))
3210                                 continue;
3211                         remove_hugetlb_page(h, page, false);
3212                         list_add(&page->lru, &page_list);
3213                 }
3214         }
3215
3216 out:
3217         spin_unlock_irq(&hugetlb_lock);
3218         update_and_free_pages_bulk(h, &page_list);
3219         spin_lock_irq(&hugetlb_lock);
3220 }
3221 #else
3222 static inline void try_to_free_low(struct hstate *h, unsigned long count,
3223                                                 nodemask_t *nodes_allowed)
3224 {
3225 }
3226 #endif
3227
3228 /*
3229  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
3230  * balanced by operating on them in a round-robin fashion.
3231  * Returns 1 if an adjustment was made.
3232  */
3233 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
3234                                 int delta)
3235 {
3236         int nr_nodes, node;
3237
3238         lockdep_assert_held(&hugetlb_lock);
3239         VM_BUG_ON(delta != -1 && delta != 1);
3240
3241         if (delta < 0) {
3242                 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
3243                         if (h->surplus_huge_pages_node[node])
3244                                 goto found;
3245                 }
3246         } else {
3247                 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
3248                         if (h->surplus_huge_pages_node[node] <
3249                                         h->nr_huge_pages_node[node])
3250                                 goto found;
3251                 }
3252         }
3253         return 0;
3254
3255 found:
3256         h->surplus_huge_pages += delta;
3257         h->surplus_huge_pages_node[node] += delta;
3258         return 1;
3259 }
3260
3261 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
3262 static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
3263                               nodemask_t *nodes_allowed)
3264 {
3265         unsigned long min_count, ret;
3266         struct page *page;
3267         LIST_HEAD(page_list);
3268         NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL);
3269
3270         /*
3271          * Bit mask controlling how hard we retry per-node allocations.
3272          * If we can not allocate the bit mask, do not attempt to allocate
3273          * the requested huge pages.
3274          */
3275         if (node_alloc_noretry)
3276                 nodes_clear(*node_alloc_noretry);
3277         else
3278                 return -ENOMEM;
3279
3280         /*
3281          * resize_lock mutex prevents concurrent adjustments to number of
3282          * pages in hstate via the proc/sysfs interfaces.
3283          */
3284         mutex_lock(&h->resize_lock);
3285         flush_free_hpage_work(h);
3286         spin_lock_irq(&hugetlb_lock);
3287
3288         /*
3289          * Check for a node specific request.
3290          * Changing node specific huge page count may require a corresponding
3291          * change to the global count.  In any case, the passed node mask
3292          * (nodes_allowed) will restrict alloc/free to the specified node.
3293          */
3294         if (nid != NUMA_NO_NODE) {
3295                 unsigned long old_count = count;
3296
3297                 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
3298                 /*
3299                  * User may have specified a large count value which caused the
3300                  * above calculation to overflow.  In this case, they wanted
3301                  * to allocate as many huge pages as possible.  Set count to
3302                  * largest possible value to align with their intention.
3303                  */
3304                 if (count < old_count)
3305                         count = ULONG_MAX;
3306         }
3307
3308         /*
3309          * Gigantic pages runtime allocation depend on the capability for large
3310          * page range allocation.
3311          * If the system does not provide this feature, return an error when
3312          * the user tries to allocate gigantic pages but let the user free the
3313          * boottime allocated gigantic pages.
3314          */
3315         if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) {
3316                 if (count > persistent_huge_pages(h)) {
3317                         spin_unlock_irq(&hugetlb_lock);
3318                         mutex_unlock(&h->resize_lock);
3319                         NODEMASK_FREE(node_alloc_noretry);
3320                         return -EINVAL;
3321                 }
3322                 /* Fall through to decrease pool */
3323         }
3324
3325         /*
3326          * Increase the pool size
3327          * First take pages out of surplus state.  Then make up the
3328          * remaining difference by allocating fresh huge pages.
3329          *
3330          * We might race with alloc_surplus_huge_page() here and be unable
3331          * to convert a surplus huge page to a normal huge page. That is
3332          * not critical, though, it just means the overall size of the
3333          * pool might be one hugepage larger than it needs to be, but
3334          * within all the constraints specified by the sysctls.
3335          */
3336         while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
3337                 if (!adjust_pool_surplus(h, nodes_allowed, -1))
3338                         break;
3339         }
3340
3341         while (count > persistent_huge_pages(h)) {
3342                 /*
3343                  * If this allocation races such that we no longer need the
3344                  * page, free_huge_page will handle it by freeing the page
3345                  * and reducing the surplus.
3346                  */
3347                 spin_unlock_irq(&hugetlb_lock);
3348
3349                 /* yield cpu to avoid soft lockup */
3350                 cond_resched();
3351
3352                 ret = alloc_pool_huge_page(h, nodes_allowed,
3353                                                 node_alloc_noretry);
3354                 spin_lock_irq(&hugetlb_lock);
3355                 if (!ret)
3356                         goto out;
3357
3358                 /* Bail for signals. Probably ctrl-c from user */
3359                 if (signal_pending(current))
3360                         goto out;
3361         }
3362
3363         /*
3364          * Decrease the pool size
3365          * First return free pages to the buddy allocator (being careful
3366          * to keep enough around to satisfy reservations).  Then place
3367          * pages into surplus state as needed so the pool will shrink
3368          * to the desired size as pages become free.
3369          *
3370          * By placing pages into the surplus state independent of the
3371          * overcommit value, we are allowing the surplus pool size to
3372          * exceed overcommit. There are few sane options here. Since
3373          * alloc_surplus_huge_page() is checking the global counter,
3374          * though, we'll note that we're not allowed to exceed surplus
3375          * and won't grow the pool anywhere else. Not until one of the
3376          * sysctls are changed, or the surplus pages go out of use.
3377          */
3378         min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
3379         min_count = max(count, min_count);
3380         try_to_free_low(h, min_count, nodes_allowed);
3381
3382         /*
3383          * Collect pages to be removed on list without dropping lock
3384          */
3385         while (min_count < persistent_huge_pages(h)) {
3386                 page = remove_pool_huge_page(h, nodes_allowed, 0);
3387                 if (!page)
3388                         break;
3389
3390                 list_add(&page->lru, &page_list);
3391         }
3392         /* free the pages after dropping lock */
3393         spin_unlock_irq(&hugetlb_lock);
3394         update_and_free_pages_bulk(h, &page_list);
3395         flush_free_hpage_work(h);
3396         spin_lock_irq(&hugetlb_lock);
3397
3398         while (count < persistent_huge_pages(h)) {
3399                 if (!adjust_pool_surplus(h, nodes_allowed, 1))
3400                         break;
3401         }
3402 out:
3403         h->max_huge_pages = persistent_huge_pages(h);
3404         spin_unlock_irq(&hugetlb_lock);
3405         mutex_unlock(&h->resize_lock);
3406
3407         NODEMASK_FREE(node_alloc_noretry);
3408
3409         return 0;
3410 }
3411
3412 static int demote_free_huge_page(struct hstate *h, struct page *page)
3413 {
3414         int i, nid = page_to_nid(page);
3415         struct hstate *target_hstate;
3416         int rc = 0;
3417
3418         target_hstate = size_to_hstate(PAGE_SIZE << h->demote_order);
3419
3420         remove_hugetlb_page_for_demote(h, page, false);
3421         spin_unlock_irq(&hugetlb_lock);
3422
3423         rc = alloc_huge_page_vmemmap(h, page);
3424         if (rc) {
3425                 /* Allocation of vmemmmap failed, we can not demote page */
3426                 spin_lock_irq(&hugetlb_lock);
3427                 set_page_refcounted(page);
3428                 add_hugetlb_page(h, page, false);
3429                 return rc;
3430         }
3431
3432         /*
3433          * Use destroy_compound_hugetlb_page_for_demote for all huge page
3434          * sizes as it will not ref count pages.
3435          */
3436         destroy_compound_hugetlb_page_for_demote(page, huge_page_order(h));
3437
3438         /*
3439          * Taking target hstate mutex synchronizes with set_max_huge_pages.
3440          * Without the mutex, pages added to target hstate could be marked
3441          * as surplus.
3442          *
3443          * Note that we already hold h->resize_lock.  To prevent deadlock,
3444          * use the convention of always taking larger size hstate mutex first.
3445          */
3446         mutex_lock(&target_hstate->resize_lock);
3447         for (i = 0; i < pages_per_huge_page(h);
3448                                 i += pages_per_huge_page(target_hstate)) {
3449                 if (hstate_is_gigantic(target_hstate))
3450                         prep_compound_gigantic_page_for_demote(page + i,
3451                                                         target_hstate->order);
3452                 else
3453                         prep_compound_page(page + i, target_hstate->order);
3454                 set_page_private(page + i, 0);
3455                 set_page_refcounted(page + i);
3456                 prep_new_huge_page(target_hstate, page + i, nid);
3457                 put_page(page + i);
3458         }
3459         mutex_unlock(&target_hstate->resize_lock);
3460
3461         spin_lock_irq(&hugetlb_lock);
3462
3463         /*
3464          * Not absolutely necessary, but for consistency update max_huge_pages
3465          * based on pool changes for the demoted page.
3466          */
3467         h->max_huge_pages--;
3468         target_hstate->max_huge_pages += pages_per_huge_page(h);
3469
3470         return rc;
3471 }
3472
3473 static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
3474         __must_hold(&hugetlb_lock)
3475 {
3476         int nr_nodes, node;
3477         struct page *page;
3478         int rc = 0;
3479
3480         lockdep_assert_held(&hugetlb_lock);
3481
3482         /* We should never get here if no demote order */
3483         if (!h->demote_order) {
3484                 pr_warn("HugeTLB: NULL demote order passed to demote_pool_huge_page.\n");
3485                 return -EINVAL;         /* internal error */
3486         }
3487
3488         for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
3489                 if (!list_empty(&h->hugepage_freelists[node])) {
3490                         page = list_entry(h->hugepage_freelists[node].next,
3491                                         struct page, lru);
3492                         rc = demote_free_huge_page(h, page);
3493                         break;
3494                 }
3495         }
3496
3497         return rc;
3498 }
3499
3500 #define HSTATE_ATTR_RO(_name) \
3501         static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
3502
3503 #define HSTATE_ATTR_WO(_name) \
3504         static struct kobj_attribute _name##_attr = __ATTR_WO(_name)
3505
3506 #define HSTATE_ATTR(_name) \
3507         static struct kobj_attribute _name##_attr = __ATTR_RW(_name)
3508
3509 static struct kobject *hugepages_kobj;
3510 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
3511
3512 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
3513
3514 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
3515 {
3516         int i;
3517
3518         for (i = 0; i < HUGE_MAX_HSTATE; i++)
3519                 if (hstate_kobjs[i] == kobj) {
3520                         if (nidp)
3521                                 *nidp = NUMA_NO_NODE;
3522                         return &hstates[i];
3523                 }
3524
3525         return kobj_to_node_hstate(kobj, nidp);
3526 }
3527
3528 static ssize_t nr_hugepages_show_common(struct kobject *kobj,
3529                                         struct kobj_attribute *attr, char *buf)
3530 {
3531         struct hstate *h;
3532         unsigned long nr_huge_pages;
3533         int nid;
3534
3535         h = kobj_to_hstate(kobj, &nid);
3536         if (nid == NUMA_NO_NODE)
3537                 nr_huge_pages = h->nr_huge_pages;
3538         else
3539                 nr_huge_pages = h->nr_huge_pages_node[nid];
3540
3541         return sysfs_emit(buf, "%lu\n", nr_huge_pages);
3542 }
3543
3544 static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
3545                                            struct hstate *h, int nid,
3546                                            unsigned long count, size_t len)
3547 {
3548         int err;
3549         nodemask_t nodes_allowed, *n_mask;
3550
3551         if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
3552                 return -EINVAL;
3553
3554         if (nid == NUMA_NO_NODE) {
3555                 /*
3556                  * global hstate attribute
3557                  */
3558                 if (!(obey_mempolicy &&
3559                                 init_nodemask_of_mempolicy(&nodes_allowed)))
3560                         n_mask = &node_states[N_MEMORY];
3561                 else
3562                         n_mask = &nodes_allowed;
3563         } else {
3564                 /*
3565                  * Node specific request.  count adjustment happens in
3566                  * set_max_huge_pages() after acquiring hugetlb_lock.
3567                  */
3568                 init_nodemask_of_node(&nodes_allowed, nid);
3569                 n_mask = &nodes_allowed;
3570         }
3571
3572         err = set_max_huge_pages(h, count, nid, n_mask);
3573
3574         return err ? err : len;
3575 }
3576
3577 static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
3578                                          struct kobject *kobj, const char *buf,
3579                                          size_t len)
3580 {
3581         struct hstate *h;
3582         unsigned long count;
3583         int nid;
3584         int err;
3585
3586         err = kstrtoul(buf, 10, &count);
3587         if (err)
3588                 return err;
3589
3590         h = kobj_to_hstate(kobj, &nid);
3591         return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
3592 }
3593
3594 static ssize_t nr_hugepages_show(struct kobject *kobj,
3595                                        struct kobj_attribute *attr, char *buf)
3596 {
3597         return nr_hugepages_show_common(kobj, attr, buf);
3598 }
3599
3600 static ssize_t nr_hugepages_store(struct kobject *kobj,
3601                struct kobj_attribute *attr, const char *buf, size_t len)
3602 {
3603         return nr_hugepages_store_common(false, kobj, buf, len);
3604 }
3605 HSTATE_ATTR(nr_hugepages);
3606
3607 #ifdef CONFIG_NUMA
3608
3609 /*
3610  * hstate attribute for optionally mempolicy-based constraint on persistent
3611  * huge page alloc/free.
3612  */
3613 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
3614                                            struct kobj_attribute *attr,
3615                                            char *buf)
3616 {
3617         return nr_hugepages_show_common(kobj, attr, buf);
3618 }
3619
3620 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
3621                struct kobj_attribute *attr, const char *buf, size_t len)
3622 {
3623         return nr_hugepages_store_common(true, kobj, buf, len);
3624 }
3625 HSTATE_ATTR(nr_hugepages_mempolicy);
3626 #endif
3627
3628
3629 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
3630                                         struct kobj_attribute *attr, char *buf)
3631 {
3632         struct hstate *h = kobj_to_hstate(kobj, NULL);
3633         return sysfs_emit(buf, "%lu\n", h->nr_overcommit_huge_pages);
3634 }
3635
3636 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
3637                 struct kobj_attribute *attr, const char *buf, size_t count)
3638 {
3639         int err;
3640         unsigned long input;
3641         struct hstate *h = kobj_to_hstate(kobj, NULL);
3642
3643         if (hstate_is_gigantic(h))
3644                 return -EINVAL;
3645
3646         err = kstrtoul(buf, 10, &input);
3647         if (err)
3648                 return err;
3649
3650         spin_lock_irq(&hugetlb_lock);
3651         h->nr_overcommit_huge_pages = input;
3652         spin_unlock_irq(&hugetlb_lock);
3653
3654         return count;
3655 }
3656 HSTATE_ATTR(nr_overcommit_hugepages);
3657
3658 static ssize_t free_hugepages_show(struct kobject *kobj,
3659                                         struct kobj_attribute *attr, char *buf)
3660 {
3661         struct hstate *h;
3662         unsigned long free_huge_pages;
3663         int nid;
3664
3665         h = kobj_to_hstate(kobj, &nid);
3666         if (nid == NUMA_NO_NODE)
3667                 free_huge_pages = h->free_huge_pages;
3668         else
3669                 free_huge_pages = h->free_huge_pages_node[nid];
3670
3671         return sysfs_emit(buf, "%lu\n", free_huge_pages);
3672 }
3673 HSTATE_ATTR_RO(free_hugepages);
3674
3675 static ssize_t resv_hugepages_show(struct kobject *kobj,
3676                                         struct kobj_attribute *attr, char *buf)
3677 {
3678         struct hstate *h = kobj_to_hstate(kobj, NULL);
3679         return sysfs_emit(buf, "%lu\n", h->resv_huge_pages);
3680 }
3681 HSTATE_ATTR_RO(resv_hugepages);
3682
3683 static ssize_t surplus_hugepages_show(struct kobject *kobj,
3684                                         struct kobj_attribute *attr, char *buf)
3685 {
3686         struct hstate *h;
3687         unsigned long surplus_huge_pages;
3688         int nid;
3689
3690         h = kobj_to_hstate(kobj, &nid);
3691         if (nid == NUMA_NO_NODE)
3692                 surplus_huge_pages = h->surplus_huge_pages;
3693         else
3694                 surplus_huge_pages = h->surplus_huge_pages_node[nid];
3695
3696         return sysfs_emit(buf, "%lu\n", surplus_huge_pages);
3697 }
3698 HSTATE_ATTR_RO(surplus_hugepages);
3699
3700 static ssize_t demote_store(struct kobject *kobj,
3701                struct kobj_attribute *attr, const char *buf, size_t len)
3702 {
3703         unsigned long nr_demote;
3704         unsigned long nr_available;
3705         nodemask_t nodes_allowed, *n_mask;
3706         struct hstate *h;
3707         int err = 0;
3708         int nid;
3709
3710         err = kstrtoul(buf, 10, &nr_demote);
3711         if (err)
3712                 return err;
3713         h = kobj_to_hstate(kobj, &nid);
3714
3715         if (nid != NUMA_NO_NODE) {
3716                 init_nodemask_of_node(&nodes_allowed, nid);
3717                 n_mask = &nodes_allowed;
3718         } else {
3719                 n_mask = &node_states[N_MEMORY];
3720         }
3721
3722         /* Synchronize with other sysfs operations modifying huge pages */
3723         mutex_lock(&h->resize_lock);
3724         spin_lock_irq(&hugetlb_lock);
3725
3726         while (nr_demote) {
3727                 /*
3728                  * Check for available pages to demote each time thorough the
3729                  * loop as demote_pool_huge_page will drop hugetlb_lock.
3730                  */
3731                 if (nid != NUMA_NO_NODE)
3732                         nr_available = h->free_huge_pages_node[nid];
3733                 else
3734                         nr_available = h->free_huge_pages;
3735                 nr_available -= h->resv_huge_pages;
3736                 if (!nr_available)
3737                         break;
3738
3739                 err = demote_pool_huge_page(h, n_mask);
3740                 if (err)
3741                         break;
3742
3743                 nr_demote--;
3744         }
3745
3746         spin_unlock_irq(&hugetlb_lock);
3747         mutex_unlock(&h->resize_lock);
3748
3749         if (err)
3750                 return err;
3751         return len;
3752 }
3753 HSTATE_ATTR_WO(demote);
3754
3755 static ssize_t demote_size_show(struct kobject *kobj,
3756                                         struct kobj_attribute *attr, char *buf)
3757 {
3758         int nid;
3759         struct hstate *h = kobj_to_hstate(kobj, &nid);
3760         unsigned long demote_size = (PAGE_SIZE << h->demote_order) / SZ_1K;
3761
3762         return sysfs_emit(buf, "%lukB\n", demote_size);
3763 }
3764
3765 static ssize_t demote_size_store(struct kobject *kobj,
3766                                         struct kobj_attribute *attr,
3767                                         const char *buf, size_t count)
3768 {
3769         struct hstate *h, *demote_hstate;
3770         unsigned long demote_size;
3771         unsigned int demote_order;
3772         int nid;
3773
3774         demote_size = (unsigned long)memparse(buf, NULL);
3775
3776         demote_hstate = size_to_hstate(demote_size);
3777         if (!demote_hstate)
3778                 return -EINVAL;
3779         demote_order = demote_hstate->order;
3780         if (demote_order < HUGETLB_PAGE_ORDER)
3781                 return -EINVAL;
3782
3783         /* demote order must be smaller than hstate order */
3784         h = kobj_to_hstate(kobj, &nid);
3785         if (demote_order >= h->order)
3786                 return -EINVAL;
3787
3788         /* resize_lock synchronizes access to demote size and writes */
3789         mutex_lock(&h->resize_lock);
3790         h->demote_order = demote_order;
3791         mutex_unlock(&h->resize_lock);
3792
3793         return count;
3794 }
3795 HSTATE_ATTR(demote_size);
3796
3797 static struct attribute *hstate_attrs[] = {
3798         &nr_hugepages_attr.attr,
3799         &nr_overcommit_hugepages_attr.attr,
3800         &free_hugepages_attr.attr,
3801         &resv_hugepages_attr.attr,
3802         &surplus_hugepages_attr.attr,
3803 #ifdef CONFIG_NUMA
3804         &nr_hugepages_mempolicy_attr.attr,
3805 #endif
3806         NULL,
3807 };
3808
3809 static const struct attribute_group hstate_attr_group = {
3810         .attrs = hstate_attrs,
3811 };
3812
3813 static struct attribute *hstate_demote_attrs[] = {
3814         &demote_size_attr.attr,
3815         &demote_attr.attr,
3816         NULL,
3817 };
3818
3819 static const struct attribute_group hstate_demote_attr_group = {
3820         .attrs = hstate_demote_attrs,
3821 };
3822
3823 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
3824                                     struct kobject **hstate_kobjs,
3825                                     const struct attribute_group *hstate_attr_group)
3826 {
3827         int retval;
3828         int hi = hstate_index(h);
3829
3830         hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
3831         if (!hstate_kobjs[hi])
3832                 return -ENOMEM;
3833
3834         retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
3835         if (retval) {
3836                 kobject_put(hstate_kobjs[hi]);
3837                 hstate_kobjs[hi] = NULL;
3838         }
3839
3840         if (h->demote_order) {
3841                 if (sysfs_create_group(hstate_kobjs[hi],
3842                                         &hstate_demote_attr_group))
3843                         pr_warn("HugeTLB unable to create demote interfaces for %s\n", h->name);
3844         }
3845
3846         return retval;
3847 }
3848
3849 static void __init hugetlb_sysfs_init(void)
3850 {
3851         struct hstate *h;
3852         int err;
3853
3854         hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
3855         if (!hugepages_kobj)
3856                 return;
3857
3858         for_each_hstate(h) {
3859                 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
3860                                          hstate_kobjs, &hstate_attr_group);
3861                 if (err)
3862                         pr_err("HugeTLB: Unable to add hstate %s", h->name);
3863         }
3864 }
3865
3866 #ifdef CONFIG_NUMA
3867
3868 /*
3869  * node_hstate/s - associate per node hstate attributes, via their kobjects,
3870  * with node devices in node_devices[] using a parallel array.  The array
3871  * index of a node device or _hstate == node id.
3872  * This is here to avoid any static dependency of the node device driver, in
3873  * the base kernel, on the hugetlb module.
3874  */
3875 struct node_hstate {
3876         struct kobject          *hugepages_kobj;
3877         struct kobject          *hstate_kobjs[HUGE_MAX_HSTATE];
3878 };
3879 static struct node_hstate node_hstates[MAX_NUMNODES];
3880
3881 /*
3882  * A subset of global hstate attributes for node devices
3883  */
3884 static struct attribute *per_node_hstate_attrs[] = {
3885         &nr_hugepages_attr.attr,
3886         &free_hugepages_attr.attr,
3887         &surplus_hugepages_attr.attr,
3888         NULL,
3889 };
3890
3891 static const struct attribute_group per_node_hstate_attr_group = {
3892         .attrs = per_node_hstate_attrs,
3893 };
3894
3895 /*
3896  * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
3897  * Returns node id via non-NULL nidp.
3898  */
3899 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
3900 {
3901         int nid;
3902
3903         for (nid = 0; nid < nr_node_ids; nid++) {
3904                 struct node_hstate *nhs = &node_hstates[nid];
3905                 int i;
3906                 for (i = 0; i < HUGE_MAX_HSTATE; i++)
3907                         if (nhs->hstate_kobjs[i] == kobj) {
3908                                 if (nidp)
3909                                         *nidp = nid;
3910                                 return &hstates[i];
3911                         }
3912         }
3913
3914         BUG();
3915         return NULL;
3916 }
3917
3918 /*
3919  * Unregister hstate attributes from a single node device.
3920  * No-op if no hstate attributes attached.
3921  */
3922 static void hugetlb_unregister_node(struct node *node)
3923 {
3924         struct hstate *h;
3925         struct node_hstate *nhs = &node_hstates[node->dev.id];
3926
3927         if (!nhs->hugepages_kobj)
3928                 return;         /* no hstate attributes */
3929
3930         for_each_hstate(h) {
3931                 int idx = hstate_index(h);
3932                 if (nhs->hstate_kobjs[idx]) {
3933                         kobject_put(nhs->hstate_kobjs[idx]);
3934                         nhs->hstate_kobjs[idx] = NULL;
3935                 }
3936         }
3937
3938         kobject_put(nhs->hugepages_kobj);
3939         nhs->hugepages_kobj = NULL;
3940 }
3941
3942
3943 /*
3944  * Register hstate attributes for a single node device.
3945  * No-op if attributes already registered.
3946  */
3947 static void hugetlb_register_node(struct node *node)
3948 {
3949         struct hstate *h;
3950         struct node_hstate *nhs = &node_hstates[node->dev.id];
3951         int err;
3952
3953         if (nhs->hugepages_kobj)
3954                 return;         /* already allocated */
3955
3956         nhs->hugepages_kobj = kobject_create_and_add("hugepages",
3957                                                         &node->dev.kobj);
3958         if (!nhs->hugepages_kobj)
3959                 return;
3960
3961         for_each_hstate(h) {
3962                 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
3963                                                 nhs->hstate_kobjs,
3964                                                 &per_node_hstate_attr_group);
3965                 if (err) {
3966                         pr_err("HugeTLB: Unable to add hstate %s for node %d\n",
3967                                 h->name, node->dev.id);
3968                         hugetlb_unregister_node(node);
3969                         break;
3970                 }
3971         }
3972 }
3973
3974 /*
3975  * hugetlb init time:  register hstate attributes for all registered node
3976  * devices of nodes that have memory.  All on-line nodes should have
3977  * registered their associated device by this time.
3978  */
3979 static void __init hugetlb_register_all_nodes(void)
3980 {
3981         int nid;
3982
3983         for_each_node_state(nid, N_MEMORY) {
3984                 struct node *node = node_devices[nid];
3985                 if (node->dev.id == nid)
3986                         hugetlb_register_node(node);
3987         }
3988
3989         /*
3990          * Let the node device driver know we're here so it can
3991          * [un]register hstate attributes on node hotplug.
3992          */
3993         register_hugetlbfs_with_node(hugetlb_register_node,
3994                                      hugetlb_unregister_node);
3995 }
3996 #else   /* !CONFIG_NUMA */
3997
3998 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
3999 {
4000         BUG();
4001         if (nidp)
4002                 *nidp = -1;
4003         return NULL;
4004 }
4005
4006 static void hugetlb_register_all_nodes(void) { }
4007
4008 #endif
4009
4010 static int __init hugetlb_init(void)
4011 {
4012         int i;
4013
4014         BUILD_BUG_ON(sizeof_field(struct page, private) * BITS_PER_BYTE <
4015                         __NR_HPAGEFLAGS);
4016
4017         if (!hugepages_supported()) {
4018                 if (hugetlb_max_hstate || default_hstate_max_huge_pages)
4019                         pr_warn("HugeTLB: huge pages not supported, ignoring associated command-line parameters\n");
4020                 return 0;
4021         }
4022
4023         /*
4024          * Make sure HPAGE_SIZE (HUGETLB_PAGE_ORDER) hstate exists.  Some
4025          * architectures depend on setup being done here.
4026          */
4027         hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
4028         if (!parsed_default_hugepagesz) {
4029                 /*
4030                  * If we did not parse a default huge page size, set
4031                  * default_hstate_idx to HPAGE_SIZE hstate. And, if the
4032                  * number of huge pages for this default size was implicitly
4033                  * specified, set that here as well.
4034                  * Note that the implicit setting will overwrite an explicit
4035                  * setting.  A warning will be printed in this case.
4036                  */
4037                 default_hstate_idx = hstate_index(size_to_hstate(HPAGE_SIZE));
4038                 if (default_hstate_max_huge_pages) {
4039                         if (default_hstate.max_huge_pages) {
4040                                 char buf[32];
4041
4042                                 string_get_size(huge_page_size(&default_hstate),
4043                                         1, STRING_UNITS_2, buf, 32);
4044                                 pr_warn("HugeTLB: Ignoring hugepages=%lu associated with %s page size\n",
4045                                         default_hstate.max_huge_pages, buf);
4046                                 pr_warn("HugeTLB: Using hugepages=%lu for number of default huge pages\n",
4047                                         default_hstate_max_huge_pages);
4048                         }
4049                         default_hstate.max_huge_pages =
4050                                 default_hstate_max_huge_pages;
4051
4052                         for (i = 0; i < nr_online_nodes; i++)
4053                                 default_hstate.max_huge_pages_node[i] =
4054                                         default_hugepages_in_node[i];
4055                 }
4056         }
4057
4058         hugetlb_cma_check();
4059         hugetlb_init_hstates();
4060         gather_bootmem_prealloc();
4061         report_hugepages();
4062
4063         hugetlb_sysfs_init();
4064         hugetlb_register_all_nodes();
4065         hugetlb_cgroup_file_init();
4066
4067 #ifdef CONFIG_SMP
4068         num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
4069 #else
4070         num_fault_mutexes = 1;
4071 #endif
4072         hugetlb_fault_mutex_table =
4073                 kmalloc_array(num_fault_mutexes, sizeof(struct mutex),
4074                               GFP_KERNEL);
4075         BUG_ON(!hugetlb_fault_mutex_table);
4076
4077         for (i = 0; i < num_fault_mutexes; i++)
4078                 mutex_init(&hugetlb_fault_mutex_table[i]);
4079         return 0;
4080 }
4081 subsys_initcall(hugetlb_init);
4082
4083 /* Overwritten by architectures with more huge page sizes */
4084 bool __init __attribute((weak)) arch_hugetlb_valid_size(unsigned long size)
4085 {
4086         return size == HPAGE_SIZE;
4087 }
4088
4089 void __init hugetlb_add_hstate(unsigned int order)
4090 {
4091         struct hstate *h;
4092         unsigned long i;
4093
4094         if (size_to_hstate(PAGE_SIZE << order)) {
4095                 return;
4096         }
4097         BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
4098         BUG_ON(order == 0);
4099         h = &hstates[hugetlb_max_hstate++];
4100         mutex_init(&h->resize_lock);
4101         h->order = order;
4102         h->mask = ~(huge_page_size(h) - 1);
4103         for (i = 0; i < MAX_NUMNODES; ++i)
4104                 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
4105         INIT_LIST_HEAD(&h->hugepage_activelist);
4106         h->next_nid_to_alloc = first_memory_node;
4107         h->next_nid_to_free = first_memory_node;
4108         snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
4109                                         huge_page_size(h)/1024);
4110         hugetlb_vmemmap_init(h);
4111
4112         parsed_hstate = h;
4113 }
4114
4115 bool __init __weak hugetlb_node_alloc_supported(void)
4116 {
4117         return true;
4118 }
4119 /*
4120  * hugepages command line processing
4121  * hugepages normally follows a valid hugepagsz or default_hugepagsz
4122  * specification.  If not, ignore the hugepages value.  hugepages can also
4123  * be the first huge page command line  option in which case it implicitly
4124  * specifies the number of huge pages for the default size.
4125  */
4126 static int __init hugepages_setup(char *s)
4127 {
4128         unsigned long *mhp;
4129         static unsigned long *last_mhp;
4130         int node = NUMA_NO_NODE;
4131         int count;
4132         unsigned long tmp;
4133         char *p = s;
4134
4135         if (!parsed_valid_hugepagesz) {
4136                 pr_warn("HugeTLB: hugepages=%s does not follow a valid hugepagesz, ignoring\n", s);
4137                 parsed_valid_hugepagesz = true;
4138                 return 0;
4139         }
4140
4141         /*
4142          * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter
4143          * yet, so this hugepages= parameter goes to the "default hstate".
4144          * Otherwise, it goes with the previously parsed hugepagesz or
4145          * default_hugepagesz.
4146          */
4147         else if (!hugetlb_max_hstate)
4148                 mhp = &default_hstate_max_huge_pages;
4149         else
4150                 mhp = &parsed_hstate->max_huge_pages;
4151
4152         if (mhp == last_mhp) {
4153                 pr_warn("HugeTLB: hugepages= specified twice without interleaving hugepagesz=, ignoring hugepages=%s\n", s);
4154                 return 0;
4155         }
4156
4157         while (*p) {
4158                 count = 0;
4159                 if (sscanf(p, "%lu%n", &tmp, &count) != 1)
4160                         goto invalid;
4161                 /* Parameter is node format */
4162                 if (p[count] == ':') {
4163                         if (!hugetlb_node_alloc_supported()) {
4164                                 pr_warn("HugeTLB: architecture can't support node specific alloc, ignoring!\n");
4165                                 return 0;
4166                         }
4167                         if (tmp >= nr_online_nodes)
4168                                 goto invalid;
4169                         node = array_index_nospec(tmp, nr_online_nodes);
4170                         p += count + 1;
4171                         /* Parse hugepages */
4172                         if (sscanf(p, "%lu%n", &tmp, &count) != 1)
4173                                 goto invalid;
4174                         if (!hugetlb_max_hstate)
4175                                 default_hugepages_in_node[node] = tmp;
4176                         else
4177                                 parsed_hstate->max_huge_pages_node[node] = tmp;
4178                         *mhp += tmp;
4179                         /* Go to parse next node*/
4180                         if (p[count] == ',')
4181                                 p += count + 1;
4182                         else
4183                                 break;
4184                 } else {
4185                         if (p != s)
4186                                 goto invalid;
4187                         *mhp = tmp;
4188                         break;
4189                 }
4190         }
4191
4192         /*
4193          * Global state is always initialized later in hugetlb_init.
4194          * But we need to allocate gigantic hstates here early to still
4195          * use the bootmem allocator.
4196          */
4197         if (hugetlb_max_hstate && hstate_is_gigantic(parsed_hstate))
4198                 hugetlb_hstate_alloc_pages(parsed_hstate);
4199
4200         last_mhp = mhp;
4201
4202         return 1;
4203
4204 invalid:
4205         pr_warn("HugeTLB: Invalid hugepages parameter %s\n", p);
4206         return 0;
4207 }
4208 __setup("hugepages=", hugepages_setup);
4209
4210 /*
4211  * hugepagesz command line processing
4212  * A specific huge page size can only be specified once with hugepagesz.
4213  * hugepagesz is followed by hugepages on the command line.  The global
4214  * variable 'parsed_valid_hugepagesz' is used to determine if prior
4215  * hugepagesz argument was valid.
4216  */
4217 static int __init hugepagesz_setup(char *s)
4218 {
4219         unsigned long size;
4220         struct hstate *h;
4221
4222         parsed_valid_hugepagesz = false;
4223         size = (unsigned long)memparse(s, NULL);
4224
4225         if (!arch_hugetlb_valid_size(size)) {
4226                 pr_err("HugeTLB: unsupported hugepagesz=%s\n", s);
4227                 return 0;
4228         }
4229
4230         h = size_to_hstate(size);
4231         if (h) {
4232                 /*
4233                  * hstate for this size already exists.  This is normally
4234                  * an error, but is allowed if the existing hstate is the
4235                  * default hstate.  More specifically, it is only allowed if
4236                  * the number of huge pages for the default hstate was not
4237                  * previously specified.
4238                  */
4239                 if (!parsed_default_hugepagesz ||  h != &default_hstate ||
4240                     default_hstate.max_huge_pages) {
4241                         pr_warn("HugeTLB: hugepagesz=%s specified twice, ignoring\n", s);
4242                         return 0;
4243                 }
4244
4245                 /*
4246                  * No need to call hugetlb_add_hstate() as hstate already
4247                  * exists.  But, do set parsed_hstate so that a following
4248                  * hugepages= parameter will be applied to this hstate.
4249                  */
4250                 parsed_hstate = h;
4251                 parsed_valid_hugepagesz = true;
4252                 return 1;
4253         }
4254
4255         hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
4256         parsed_valid_hugepagesz = true;
4257         return 1;
4258 }
4259 __setup("hugepagesz=", hugepagesz_setup);
4260
4261 /*
4262  * default_hugepagesz command line input
4263  * Only one instance of default_hugepagesz allowed on command line.
4264  */
4265 static int __init default_hugepagesz_setup(char *s)
4266 {
4267         unsigned long size;
4268         int i;
4269
4270         parsed_valid_hugepagesz = false;
4271         if (parsed_default_hugepagesz) {
4272                 pr_err("HugeTLB: default_hugepagesz previously specified, ignoring %s\n", s);
4273                 return 0;
4274         }
4275
4276         size = (unsigned long)memparse(s, NULL);
4277
4278         if (!arch_hugetlb_valid_size(size)) {
4279                 pr_err("HugeTLB: unsupported default_hugepagesz=%s\n", s);
4280                 return 0;
4281         }
4282
4283         hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
4284         parsed_valid_hugepagesz = true;
4285         parsed_default_hugepagesz = true;
4286         default_hstate_idx = hstate_index(size_to_hstate(size));
4287
4288         /*
4289          * The number of default huge pages (for this size) could have been
4290          * specified as the first hugetlb parameter: hugepages=X.  If so,
4291          * then default_hstate_max_huge_pages is set.  If the default huge
4292          * page size is gigantic (>= MAX_ORDER), then the pages must be
4293          * allocated here from bootmem allocator.
4294          */
4295         if (default_hstate_max_huge_pages) {
4296                 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
4297                 for (i = 0; i < nr_online_nodes; i++)
4298                         default_hstate.max_huge_pages_node[i] =
4299                                 default_hugepages_in_node[i];
4300                 if (hstate_is_gigantic(&default_hstate))
4301                         hugetlb_hstate_alloc_pages(&default_hstate);
4302                 default_hstate_max_huge_pages = 0;
4303         }
4304
4305         return 1;
4306 }
4307 __setup("default_hugepagesz=", default_hugepagesz_setup);
4308
4309 static unsigned int allowed_mems_nr(struct hstate *h)
4310 {
4311         int node;
4312         unsigned int nr = 0;
4313         nodemask_t *mpol_allowed;
4314         unsigned int *array = h->free_huge_pages_node;
4315         gfp_t gfp_mask = htlb_alloc_mask(h);
4316
4317         mpol_allowed = policy_nodemask_current(gfp_mask);
4318
4319         for_each_node_mask(node, cpuset_current_mems_allowed) {
4320                 if (!mpol_allowed || node_isset(node, *mpol_allowed))
4321                         nr += array[node];
4322         }
4323
4324         return nr;
4325 }
4326
4327 #ifdef CONFIG_SYSCTL
4328 static int proc_hugetlb_doulongvec_minmax(struct ctl_table *table, int write,
4329                                           void *buffer, size_t *length,
4330                                           loff_t *ppos, unsigned long *out)
4331 {
4332         struct ctl_table dup_table;
4333
4334         /*
4335          * In order to avoid races with __do_proc_doulongvec_minmax(), we
4336          * can duplicate the @table and alter the duplicate of it.
4337          */
4338         dup_table = *table;
4339         dup_table.data = out;
4340
4341         return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos);
4342 }
4343
4344 static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
4345                          struct ctl_table *table, int write,
4346                          void *buffer, size_t *length, loff_t *ppos)
4347 {
4348         struct hstate *h = &default_hstate;
4349         unsigned long tmp = h->max_huge_pages;
4350         int ret;
4351
4352         if (!hugepages_supported())
4353                 return -EOPNOTSUPP;
4354
4355         ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
4356                                              &tmp);
4357         if (ret)
4358                 goto out;
4359
4360         if (write)
4361                 ret = __nr_hugepages_store_common(obey_mempolicy, h,
4362                                                   NUMA_NO_NODE, tmp, *length);
4363 out:
4364         return ret;
4365 }
4366
4367 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
4368                           void *buffer, size_t *length, loff_t *ppos)
4369 {
4370
4371         return hugetlb_sysctl_handler_common(false, table, write,
4372                                                         buffer, length, ppos);
4373 }
4374
4375 #ifdef CONFIG_NUMA
4376 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
4377                           void *buffer, size_t *length, loff_t *ppos)
4378 {
4379         return hugetlb_sysctl_handler_common(true, table, write,
4380                                                         buffer, length, ppos);
4381 }
4382 #endif /* CONFIG_NUMA */
4383
4384 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
4385                 void *buffer, size_t *length, loff_t *ppos)
4386 {
4387         struct hstate *h = &default_hstate;
4388         unsigned long tmp;
4389         int ret;
4390
4391         if (!hugepages_supported())
4392                 return -EOPNOTSUPP;
4393
4394         tmp = h->nr_overcommit_huge_pages;
4395
4396         if (write && hstate_is_gigantic(h))
4397                 return -EINVAL;
4398
4399         ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
4400                                              &tmp);
4401         if (ret)
4402                 goto out;
4403
4404         if (write) {
4405                 spin_lock_irq(&hugetlb_lock);
4406                 h->nr_overcommit_huge_pages = tmp;
4407                 spin_unlock_irq(&hugetlb_lock);
4408         }
4409 out:
4410         return ret;
4411 }
4412
4413 #endif /* CONFIG_SYSCTL */
4414
4415 void hugetlb_report_meminfo(struct seq_file *m)
4416 {
4417         struct hstate *h;
4418         unsigned long total = 0;
4419
4420         if (!hugepages_supported())
4421                 return;
4422
4423         for_each_hstate(h) {
4424                 unsigned long count = h->nr_huge_pages;
4425
4426                 total += huge_page_size(h) * count;
4427
4428                 if (h == &default_hstate)
4429                         seq_printf(m,
4430                                    "HugePages_Total:   %5lu\n"
4431                                    "HugePages_Free:    %5lu\n"
4432                                    "HugePages_Rsvd:    %5lu\n"
4433                                    "HugePages_Surp:    %5lu\n"
4434                                    "Hugepagesize:   %8lu kB\n",
4435                                    count,
4436                                    h->free_huge_pages,
4437                                    h->resv_huge_pages,
4438                                    h->surplus_huge_pages,
4439                                    huge_page_size(h) / SZ_1K);
4440         }
4441
4442         seq_printf(m, "Hugetlb:        %8lu kB\n", total / SZ_1K);
4443 }
4444
4445 int hugetlb_report_node_meminfo(char *buf, int len, int nid)
4446 {
4447         struct hstate *h = &default_hstate;
4448
4449         if (!hugepages_supported())
4450                 return 0;
4451
4452         return sysfs_emit_at(buf, len,
4453                              "Node %d HugePages_Total: %5u\n"
4454                              "Node %d HugePages_Free:  %5u\n"
4455                              "Node %d HugePages_Surp:  %5u\n",
4456                              nid, h->nr_huge_pages_node[nid],
4457                              nid, h->free_huge_pages_node[nid],
4458                              nid, h->surplus_huge_pages_node[nid]);
4459 }
4460
4461 void hugetlb_show_meminfo(void)
4462 {
4463         struct hstate *h;
4464         int nid;
4465
4466         if (!hugepages_supported())
4467                 return;
4468
4469         for_each_node_state(nid, N_MEMORY)
4470                 for_each_hstate(h)
4471                         pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
4472                                 nid,
4473                                 h->nr_huge_pages_node[nid],
4474                                 h->free_huge_pages_node[nid],
4475                                 h->surplus_huge_pages_node[nid],
4476                                 huge_page_size(h) / SZ_1K);
4477 }
4478
4479 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
4480 {
4481         seq_printf(m, "HugetlbPages:\t%8lu kB\n",
4482                    atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10));
4483 }
4484
4485 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
4486 unsigned long hugetlb_total_pages(void)
4487 {
4488         struct hstate *h;
4489         unsigned long nr_total_pages = 0;
4490
4491         for_each_hstate(h)
4492                 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
4493         return nr_total_pages;
4494 }
4495
4496 static int hugetlb_acct_memory(struct hstate *h, long delta)
4497 {
4498         int ret = -ENOMEM;
4499
4500         if (!delta)
4501                 return 0;
4502
4503         spin_lock_irq(&hugetlb_lock);
4504         /*
4505          * When cpuset is configured, it breaks the strict hugetlb page
4506          * reservation as the accounting is done on a global variable. Such
4507          * reservation is completely rubbish in the presence of cpuset because
4508          * the reservation is not checked against page availability for the
4509          * current cpuset. Application can still potentially OOM'ed by kernel
4510          * with lack of free htlb page in cpuset that the task is in.
4511          * Attempt to enforce strict accounting with cpuset is almost
4512          * impossible (or too ugly) because cpuset is too fluid that
4513          * task or memory node can be dynamically moved between cpusets.
4514          *
4515          * The change of semantics for shared hugetlb mapping with cpuset is
4516          * undesirable. However, in order to preserve some of the semantics,
4517          * we fall back to check against current free page availability as
4518          * a best attempt and hopefully to minimize the impact of changing
4519          * semantics that cpuset has.
4520          *
4521          * Apart from cpuset, we also have memory policy mechanism that
4522          * also determines from which node the kernel will allocate memory
4523          * in a NUMA system. So similar to cpuset, we also should consider
4524          * the memory policy of the current task. Similar to the description
4525          * above.
4526          */
4527         if (delta > 0) {
4528                 if (gather_surplus_pages(h, delta) < 0)
4529                         goto out;
4530
4531                 if (delta > allowed_mems_nr(h)) {
4532                         return_unused_surplus_pages(h, delta);
4533                         goto out;
4534                 }
4535         }
4536
4537         ret = 0;
4538         if (delta < 0)
4539                 return_unused_surplus_pages(h, (unsigned long) -delta);
4540
4541 out:
4542         spin_unlock_irq(&hugetlb_lock);
4543         return ret;
4544 }
4545
4546 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
4547 {
4548         struct resv_map *resv = vma_resv_map(vma);
4549
4550         /*
4551          * This new VMA should share its siblings reservation map if present.
4552          * The VMA will only ever have a valid reservation map pointer where
4553          * it is being copied for another still existing VMA.  As that VMA
4554          * has a reference to the reservation map it cannot disappear until
4555          * after this open call completes.  It is therefore safe to take a
4556          * new reference here without additional locking.
4557          */
4558         if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
4559                 resv_map_dup_hugetlb_cgroup_uncharge_info(resv);
4560                 kref_get(&resv->refs);
4561         }
4562 }
4563
4564 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
4565 {
4566         struct hstate *h = hstate_vma(vma);
4567         struct resv_map *resv = vma_resv_map(vma);
4568         struct hugepage_subpool *spool = subpool_vma(vma);
4569         unsigned long reserve, start, end;
4570         long gbl_reserve;
4571
4572         if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
4573                 return;
4574
4575         start = vma_hugecache_offset(h, vma, vma->vm_start);
4576         end = vma_hugecache_offset(h, vma, vma->vm_end);
4577
4578         reserve = (end - start) - region_count(resv, start, end);
4579         hugetlb_cgroup_uncharge_counter(resv, start, end);
4580         if (reserve) {
4581                 /*
4582                  * Decrement reserve counts.  The global reserve count may be
4583                  * adjusted if the subpool has a minimum size.
4584                  */
4585                 gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
4586                 hugetlb_acct_memory(h, -gbl_reserve);
4587         }
4588
4589         kref_put(&resv->refs, resv_map_release);
4590 }
4591
4592 static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
4593 {
4594         if (addr & ~(huge_page_mask(hstate_vma(vma))))
4595                 return -EINVAL;
4596         return 0;
4597 }
4598
4599 static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
4600 {
4601         return huge_page_size(hstate_vma(vma));
4602 }
4603
4604 /*
4605  * We cannot handle pagefaults against hugetlb pages at all.  They cause
4606  * handle_mm_fault() to try to instantiate regular-sized pages in the
4607  * hugepage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
4608  * this far.
4609  */
4610 static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
4611 {
4612         BUG();
4613         return 0;
4614 }
4615
4616 /*
4617  * When a new function is introduced to vm_operations_struct and added
4618  * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops.
4619  * This is because under System V memory model, mappings created via
4620  * shmget/shmat with "huge page" specified are backed by hugetlbfs files,
4621  * their original vm_ops are overwritten with shm_vm_ops.
4622  */
4623 const struct vm_operations_struct hugetlb_vm_ops = {
4624         .fault = hugetlb_vm_op_fault,
4625         .open = hugetlb_vm_op_open,
4626         .close = hugetlb_vm_op_close,
4627         .may_split = hugetlb_vm_op_split,
4628         .pagesize = hugetlb_vm_op_pagesize,
4629 };
4630
4631 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
4632                                 int writable)
4633 {
4634         pte_t entry;
4635         unsigned int shift = huge_page_shift(hstate_vma(vma));
4636
4637         if (writable) {
4638                 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
4639                                          vma->vm_page_prot)));
4640         } else {
4641                 entry = huge_pte_wrprotect(mk_huge_pte(page,
4642                                            vma->vm_page_prot));
4643         }
4644         entry = pte_mkyoung(entry);
4645         entry = arch_make_huge_pte(entry, shift, vma->vm_flags);
4646
4647         return entry;
4648 }
4649
4650 static void set_huge_ptep_writable(struct vm_area_struct *vma,
4651                                    unsigned long address, pte_t *ptep)
4652 {
4653         pte_t entry;
4654
4655         entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
4656         if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
4657                 update_mmu_cache(vma, address, ptep);
4658 }
4659
4660 bool is_hugetlb_entry_migration(pte_t pte)
4661 {
4662         swp_entry_t swp;
4663
4664         if (huge_pte_none(pte) || pte_present(pte))
4665                 return false;
4666         swp = pte_to_swp_entry(pte);
4667         if (is_migration_entry(swp))
4668                 return true;
4669         else
4670                 return false;
4671 }
4672
4673 static bool is_hugetlb_entry_hwpoisoned(pte_t pte)
4674 {
4675         swp_entry_t swp;
4676
4677         if (huge_pte_none(pte) || pte_present(pte))
4678                 return false;
4679         swp = pte_to_swp_entry(pte);
4680         if (is_hwpoison_entry(swp))
4681                 return true;
4682         else
4683                 return false;
4684 }
4685
4686 static void
4687 hugetlb_install_page(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr,
4688                      struct page *new_page)
4689 {
4690         __SetPageUptodate(new_page);
4691         hugepage_add_new_anon_rmap(new_page, vma, addr);
4692         set_huge_pte_at(vma->vm_mm, addr, ptep, make_huge_pte(vma, new_page, 1));
4693         hugetlb_count_add(pages_per_huge_page(hstate_vma(vma)), vma->vm_mm);
4694         ClearHPageRestoreReserve(new_page);
4695         SetHPageMigratable(new_page);
4696 }
4697
4698 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
4699                             struct vm_area_struct *vma)
4700 {
4701         pte_t *src_pte, *dst_pte, entry, dst_entry;
4702         struct page *ptepage;
4703         unsigned long addr;
4704         bool cow = is_cow_mapping(vma->vm_flags);
4705         struct hstate *h = hstate_vma(vma);
4706         unsigned long sz = huge_page_size(h);
4707         unsigned long npages = pages_per_huge_page(h);
4708         struct address_space *mapping = vma->vm_file->f_mapping;
4709         struct mmu_notifier_range range;
4710         int ret = 0;
4711
4712         if (cow) {
4713                 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, src,
4714                                         vma->vm_start,
4715                                         vma->vm_end);
4716                 mmu_notifier_invalidate_range_start(&range);
4717         } else {
4718                 /*
4719                  * For shared mappings i_mmap_rwsem must be held to call
4720                  * huge_pte_alloc, otherwise the returned ptep could go
4721                  * away if part of a shared pmd and another thread calls
4722                  * huge_pmd_unshare.
4723                  */
4724                 i_mmap_lock_read(mapping);
4725         }
4726
4727         for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
4728                 spinlock_t *src_ptl, *dst_ptl;
4729                 src_pte = huge_pte_offset(src, addr, sz);
4730                 if (!src_pte)
4731                         continue;
4732                 dst_pte = huge_pte_alloc(dst, vma, addr, sz);
4733                 if (!dst_pte) {
4734                         ret = -ENOMEM;
4735                         break;
4736                 }
4737
4738                 /*
4739                  * If the pagetables are shared don't copy or take references.
4740                  * dst_pte == src_pte is the common case of src/dest sharing.
4741                  *
4742                  * However, src could have 'unshared' and dst shares with
4743                  * another vma.  If dst_pte !none, this implies sharing.
4744                  * Check here before taking page table lock, and once again
4745                  * after taking the lock below.
4746                  */
4747                 dst_entry = huge_ptep_get(dst_pte);
4748                 if ((dst_pte == src_pte) || !huge_pte_none(dst_entry))
4749                         continue;
4750
4751                 dst_ptl = huge_pte_lock(h, dst, dst_pte);
4752                 src_ptl = huge_pte_lockptr(h, src, src_pte);
4753                 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
4754                 entry = huge_ptep_get(src_pte);
4755                 dst_entry = huge_ptep_get(dst_pte);
4756 again:
4757                 if (huge_pte_none(entry) || !huge_pte_none(dst_entry)) {
4758                         /*
4759                          * Skip if src entry none.  Also, skip in the
4760                          * unlikely case dst entry !none as this implies
4761                          * sharing with another vma.
4762                          */
4763                         ;
4764                 } else if (unlikely(is_hugetlb_entry_migration(entry) ||
4765                                     is_hugetlb_entry_hwpoisoned(entry))) {
4766                         swp_entry_t swp_entry = pte_to_swp_entry(entry);
4767
4768                         if (is_writable_migration_entry(swp_entry) && cow) {
4769                                 /*
4770                                  * COW mappings require pages in both
4771                                  * parent and child to be set to read.
4772                                  */
4773                                 swp_entry = make_readable_migration_entry(
4774                                                         swp_offset(swp_entry));
4775                                 entry = swp_entry_to_pte(swp_entry);
4776                                 set_huge_swap_pte_at(src, addr, src_pte,
4777                                                      entry, sz);
4778                         }
4779                         set_huge_swap_pte_at(dst, addr, dst_pte, entry, sz);
4780                 } else {
4781                         entry = huge_ptep_get(src_pte);
4782                         ptepage = pte_page(entry);
4783                         get_page(ptepage);
4784
4785                         /*
4786                          * This is a rare case where we see pinned hugetlb
4787                          * pages while they're prone to COW.  We need to do the
4788                          * COW earlier during fork.
4789                          *
4790                          * When pre-allocating the page or copying data, we
4791                          * need to be without the pgtable locks since we could
4792                          * sleep during the process.
4793                          */
4794                         if (unlikely(page_needs_cow_for_dma(vma, ptepage))) {
4795                                 pte_t src_pte_old = entry;
4796                                 struct page *new;
4797
4798                                 spin_unlock(src_ptl);
4799                                 spin_unlock(dst_ptl);
4800                                 /* Do not use reserve as it's private owned */
4801                                 new = alloc_huge_page(vma, addr, 1);
4802                                 if (IS_ERR(new)) {
4803                                         put_page(ptepage);
4804                                         ret = PTR_ERR(new);
4805                                         break;
4806                                 }
4807                                 copy_user_huge_page(new, ptepage, addr, vma,
4808                                                     npages);
4809                                 put_page(ptepage);
4810
4811                                 /* Install the new huge page if src pte stable */
4812                                 dst_ptl = huge_pte_lock(h, dst, dst_pte);
4813                                 src_ptl = huge_pte_lockptr(h, src, src_pte);
4814                                 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
4815                                 entry = huge_ptep_get(src_pte);
4816                                 if (!pte_same(src_pte_old, entry)) {
4817                                         restore_reserve_on_error(h, vma, addr,
4818                                                                 new);
4819                                         put_page(new);
4820                                         /* dst_entry won't change as in child */
4821                                         goto again;
4822                                 }
4823                                 hugetlb_install_page(vma, dst_pte, addr, new);
4824                                 spin_unlock(src_ptl);
4825                                 spin_unlock(dst_ptl);
4826                                 continue;
4827                         }
4828
4829                         if (cow) {
4830                                 /*
4831                                  * No need to notify as we are downgrading page
4832                                  * table protection not changing it to point
4833                                  * to a new page.
4834                                  *
4835                                  * See Documentation/vm/mmu_notifier.rst
4836                                  */
4837                                 huge_ptep_set_wrprotect(src, addr, src_pte);
4838                                 entry = huge_pte_wrprotect(entry);
4839                         }
4840
4841                         page_dup_rmap(ptepage, true);
4842                         set_huge_pte_at(dst, addr, dst_pte, entry);
4843                         hugetlb_count_add(npages, dst);
4844                 }
4845                 spin_unlock(src_ptl);
4846                 spin_unlock(dst_ptl);
4847         }
4848
4849         if (cow)
4850                 mmu_notifier_invalidate_range_end(&range);
4851         else
4852                 i_mmap_unlock_read(mapping);
4853
4854         return ret;
4855 }
4856
4857 static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr,
4858                           unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte)
4859 {
4860         struct hstate *h = hstate_vma(vma);
4861         struct mm_struct *mm = vma->vm_mm;
4862         spinlock_t *src_ptl, *dst_ptl;
4863         pte_t pte;
4864
4865         dst_ptl = huge_pte_lock(h, mm, dst_pte);
4866         src_ptl = huge_pte_lockptr(h, mm, src_pte);
4867
4868         /*
4869          * We don't have to worry about the ordering of src and dst ptlocks
4870          * because exclusive mmap_sem (or the i_mmap_lock) prevents deadlock.
4871          */
4872         if (src_ptl != dst_ptl)
4873                 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
4874
4875         pte = huge_ptep_get_and_clear(mm, old_addr, src_pte);
4876         set_huge_pte_at(mm, new_addr, dst_pte, pte);
4877
4878         if (src_ptl != dst_ptl)
4879                 spin_unlock(src_ptl);
4880         spin_unlock(dst_ptl);
4881 }
4882
4883 int move_hugetlb_page_tables(struct vm_area_struct *vma,
4884                              struct vm_area_struct *new_vma,
4885                              unsigned long old_addr, unsigned long new_addr,
4886                              unsigned long len)
4887 {
4888         struct hstate *h = hstate_vma(vma);
4889         struct address_space *mapping = vma->vm_file->f_mapping;
4890         unsigned long sz = huge_page_size(h);
4891         struct mm_struct *mm = vma->vm_mm;
4892         unsigned long old_end = old_addr + len;
4893         unsigned long old_addr_copy;
4894         pte_t *src_pte, *dst_pte;
4895         struct mmu_notifier_range range;
4896
4897         mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, old_addr,
4898                                 old_end);
4899         adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
4900         mmu_notifier_invalidate_range_start(&range);
4901         /* Prevent race with file truncation */
4902         i_mmap_lock_write(mapping);
4903         for (; old_addr < old_end; old_addr += sz, new_addr += sz) {
4904                 src_pte = huge_pte_offset(mm, old_addr, sz);
4905                 if (!src_pte)
4906                         continue;
4907                 if (huge_pte_none(huge_ptep_get(src_pte)))
4908                         continue;
4909
4910                 /* old_addr arg to huge_pmd_unshare() is a pointer and so the
4911                  * arg may be modified. Pass a copy instead to preserve the
4912                  * value in old_addr.
4913                  */
4914                 old_addr_copy = old_addr;
4915
4916                 if (huge_pmd_unshare(mm, vma, &old_addr_copy, src_pte))
4917                         continue;
4918
4919                 dst_pte = huge_pte_alloc(mm, new_vma, new_addr, sz);
4920                 if (!dst_pte)
4921                         break;
4922
4923                 move_huge_pte(vma, old_addr, new_addr, src_pte, dst_pte);
4924         }
4925         flush_tlb_range(vma, old_end - len, old_end);
4926         mmu_notifier_invalidate_range_end(&range);
4927         i_mmap_unlock_write(mapping);
4928
4929         return len + old_addr - old_end;
4930 }
4931
4932 static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
4933                                    unsigned long start, unsigned long end,
4934                                    struct page *ref_page)
4935 {
4936         struct mm_struct *mm = vma->vm_mm;
4937         unsigned long address;
4938         pte_t *ptep;
4939         pte_t pte;
4940         spinlock_t *ptl;
4941         struct page *page;
4942         struct hstate *h = hstate_vma(vma);
4943         unsigned long sz = huge_page_size(h);
4944         struct mmu_notifier_range range;
4945         bool force_flush = false;
4946
4947         WARN_ON(!is_vm_hugetlb_page(vma));
4948         BUG_ON(start & ~huge_page_mask(h));
4949         BUG_ON(end & ~huge_page_mask(h));
4950
4951         /*
4952          * This is a hugetlb vma, all the pte entries should point
4953          * to huge page.
4954          */
4955         tlb_change_page_size(tlb, sz);
4956         tlb_start_vma(tlb, vma);
4957
4958         /*
4959          * If sharing possible, alert mmu notifiers of worst case.
4960          */
4961         mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, mm, start,
4962                                 end);
4963         adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
4964         mmu_notifier_invalidate_range_start(&range);
4965         address = start;
4966         for (; address < end; address += sz) {
4967                 ptep = huge_pte_offset(mm, address, sz);
4968                 if (!ptep)
4969                         continue;
4970
4971                 ptl = huge_pte_lock(h, mm, ptep);
4972                 if (huge_pmd_unshare(mm, vma, &address, ptep)) {
4973                         spin_unlock(ptl);
4974                         tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE);
4975                         force_flush = true;
4976                         continue;
4977                 }
4978
4979                 pte = huge_ptep_get(ptep);
4980                 if (huge_pte_none(pte)) {
4981                         spin_unlock(ptl);
4982                         continue;
4983                 }
4984
4985                 /*
4986                  * Migrating hugepage or HWPoisoned hugepage is already
4987                  * unmapped and its refcount is dropped, so just clear pte here.
4988                  */
4989                 if (unlikely(!pte_present(pte))) {
4990                         huge_pte_clear(mm, address, ptep, sz);
4991                         spin_unlock(ptl);
4992                         continue;
4993                 }
4994
4995                 page = pte_page(pte);
4996                 /*
4997                  * If a reference page is supplied, it is because a specific
4998                  * page is being unmapped, not a range. Ensure the page we
4999                  * are about to unmap is the actual page of interest.
5000                  */
5001                 if (ref_page) {
5002                         if (page != ref_page) {
5003                                 spin_unlock(ptl);
5004                                 continue;
5005                         }
5006                         /*
5007                          * Mark the VMA as having unmapped its page so that
5008                          * future faults in this VMA will fail rather than
5009                          * looking like data was lost
5010                          */
5011                         set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
5012                 }
5013
5014                 pte = huge_ptep_get_and_clear(mm, address, ptep);
5015                 tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
5016                 if (huge_pte_dirty(pte))
5017                         set_page_dirty(page);
5018
5019                 hugetlb_count_sub(pages_per_huge_page(h), mm);
5020                 page_remove_rmap(page, vma, true);
5021
5022                 spin_unlock(ptl);
5023                 tlb_remove_page_size(tlb, page, huge_page_size(h));
5024                 /*
5025                  * Bail out after unmapping reference page if supplied
5026                  */
5027                 if (ref_page)
5028                         break;
5029         }
5030         mmu_notifier_invalidate_range_end(&range);
5031         tlb_end_vma(tlb, vma);
5032
5033         /*
5034          * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We
5035          * could defer the flush until now, since by holding i_mmap_rwsem we
5036          * guaranteed that the last refernece would not be dropped. But we must
5037          * do the flushing before we return, as otherwise i_mmap_rwsem will be
5038          * dropped and the last reference to the shared PMDs page might be
5039          * dropped as well.
5040          *
5041          * In theory we could defer the freeing of the PMD pages as well, but
5042          * huge_pmd_unshare() relies on the exact page_count for the PMD page to
5043          * detect sharing, so we cannot defer the release of the page either.
5044          * Instead, do flush now.
5045          */
5046         if (force_flush)
5047                 tlb_flush_mmu_tlbonly(tlb);
5048 }
5049
5050 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
5051                           struct vm_area_struct *vma, unsigned long start,
5052                           unsigned long end, struct page *ref_page)
5053 {
5054         __unmap_hugepage_range(tlb, vma, start, end, ref_page);
5055
5056         /*
5057          * Clear this flag so that x86's huge_pmd_share page_table_shareable
5058          * test will fail on a vma being torn down, and not grab a page table
5059          * on its way out.  We're lucky that the flag has such an appropriate
5060          * name, and can in fact be safely cleared here. We could clear it
5061          * before the __unmap_hugepage_range above, but all that's necessary
5062          * is to clear it before releasing the i_mmap_rwsem. This works
5063          * because in the context this is called, the VMA is about to be
5064          * destroyed and the i_mmap_rwsem is held.
5065          */
5066         vma->vm_flags &= ~VM_MAYSHARE;
5067 }
5068
5069 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
5070                           unsigned long end, struct page *ref_page)
5071 {
5072         struct mmu_gather tlb;
5073
5074         tlb_gather_mmu(&tlb, vma->vm_mm);
5075         __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
5076         tlb_finish_mmu(&tlb);
5077 }
5078
5079 /*
5080  * This is called when the original mapper is failing to COW a MAP_PRIVATE
5081  * mapping it owns the reserve page for. The intention is to unmap the page
5082  * from other VMAs and let the children be SIGKILLed if they are faulting the
5083  * same region.
5084  */
5085 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
5086                               struct page *page, unsigned long address)
5087 {
5088         struct hstate *h = hstate_vma(vma);
5089         struct vm_area_struct *iter_vma;
5090         struct address_space *mapping;
5091         pgoff_t pgoff;
5092
5093         /*
5094          * vm_pgoff is in PAGE_SIZE units, hence the different calculation
5095          * from page cache lookup which is in HPAGE_SIZE units.
5096          */
5097         address = address & huge_page_mask(h);
5098         pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
5099                         vma->vm_pgoff;
5100         mapping = vma->vm_file->f_mapping;
5101
5102         /*
5103          * Take the mapping lock for the duration of the table walk. As
5104          * this mapping should be shared between all the VMAs,
5105          * __unmap_hugepage_range() is called as the lock is already held
5106          */
5107         i_mmap_lock_write(mapping);
5108         vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
5109                 /* Do not unmap the current VMA */
5110                 if (iter_vma == vma)
5111                         continue;
5112
5113                 /*
5114                  * Shared VMAs have their own reserves and do not affect
5115                  * MAP_PRIVATE accounting but it is possible that a shared
5116                  * VMA is using the same page so check and skip such VMAs.
5117                  */
5118                 if (iter_vma->vm_flags & VM_MAYSHARE)
5119                         continue;
5120
5121                 /*
5122                  * Unmap the page from other VMAs without their own reserves.
5123                  * They get marked to be SIGKILLed if they fault in these
5124                  * areas. This is because a future no-page fault on this VMA
5125                  * could insert a zeroed page instead of the data existing
5126                  * from the time of fork. This would look like data corruption
5127                  */
5128                 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
5129                         unmap_hugepage_range(iter_vma, address,
5130                                              address + huge_page_size(h), page);
5131         }
5132         i_mmap_unlock_write(mapping);
5133 }
5134
5135 /*
5136  * Hugetlb_cow() should be called with page lock of the original hugepage held.
5137  * Called with hugetlb_fault_mutex_table held and pte_page locked so we
5138  * cannot race with other handlers or page migration.
5139  * Keep the pte_same checks anyway to make transition from the mutex easier.
5140  */
5141 static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
5142                        unsigned long address, pte_t *ptep,
5143                        struct page *pagecache_page, spinlock_t *ptl)
5144 {
5145         pte_t pte;
5146         struct hstate *h = hstate_vma(vma);
5147         struct page *old_page, *new_page;
5148         int outside_reserve = 0;
5149         vm_fault_t ret = 0;
5150         unsigned long haddr = address & huge_page_mask(h);
5151         struct mmu_notifier_range range;
5152
5153         pte = huge_ptep_get(ptep);
5154         old_page = pte_page(pte);
5155
5156 retry_avoidcopy:
5157         /* If no-one else is actually using this page, avoid the copy
5158          * and just make the page writable */
5159         if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
5160                 page_move_anon_rmap(old_page, vma);
5161                 set_huge_ptep_writable(vma, haddr, ptep);
5162                 return 0;
5163         }
5164
5165         /*
5166          * If the process that created a MAP_PRIVATE mapping is about to
5167          * perform a COW due to a shared page count, attempt to satisfy
5168          * the allocation without using the existing reserves. The pagecache
5169          * page is used to determine if the reserve at this address was
5170          * consumed or not. If reserves were used, a partial faulted mapping
5171          * at the time of fork() could consume its reserves on COW instead
5172          * of the full address range.
5173          */
5174         if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
5175                         old_page != pagecache_page)
5176                 outside_reserve = 1;
5177
5178         get_page(old_page);
5179
5180         /*
5181          * Drop page table lock as buddy allocator may be called. It will
5182          * be acquired again before returning to the caller, as expected.
5183          */
5184         spin_unlock(ptl);
5185         new_page = alloc_huge_page(vma, haddr, outside_reserve);
5186
5187         if (IS_ERR(new_page)) {
5188                 /*
5189                  * If a process owning a MAP_PRIVATE mapping fails to COW,
5190                  * it is due to references held by a child and an insufficient
5191                  * huge page pool. To guarantee the original mappers
5192                  * reliability, unmap the page from child processes. The child
5193                  * may get SIGKILLed if it later faults.
5194                  */
5195                 if (outside_reserve) {
5196                         struct address_space *mapping = vma->vm_file->f_mapping;
5197                         pgoff_t idx;
5198                         u32 hash;
5199
5200                         put_page(old_page);
5201                         BUG_ON(huge_pte_none(pte));
5202                         /*
5203                          * Drop hugetlb_fault_mutex and i_mmap_rwsem before
5204                          * unmapping.  unmapping needs to hold i_mmap_rwsem
5205                          * in write mode.  Dropping i_mmap_rwsem in read mode
5206                          * here is OK as COW mappings do not interact with
5207                          * PMD sharing.
5208                          *
5209                          * Reacquire both after unmap operation.
5210                          */
5211                         idx = vma_hugecache_offset(h, vma, haddr);
5212                         hash = hugetlb_fault_mutex_hash(mapping, idx);
5213                         mutex_unlock(&hugetlb_fault_mutex_table[hash]);
5214                         i_mmap_unlock_read(mapping);
5215
5216                         unmap_ref_private(mm, vma, old_page, haddr);
5217
5218                         i_mmap_lock_read(mapping);
5219                         mutex_lock(&hugetlb_fault_mutex_table[hash]);
5220                         spin_lock(ptl);
5221                         ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
5222                         if (likely(ptep &&
5223                                    pte_same(huge_ptep_get(ptep), pte)))
5224                                 goto retry_avoidcopy;
5225                         /*
5226                          * race occurs while re-acquiring page table
5227                          * lock, and our job is done.
5228                          */
5229                         return 0;
5230                 }
5231
5232                 ret = vmf_error(PTR_ERR(new_page));
5233                 goto out_release_old;
5234         }
5235
5236         /*
5237          * When the original hugepage is shared one, it does not have
5238          * anon_vma prepared.
5239          */
5240         if (unlikely(anon_vma_prepare(vma))) {
5241                 ret = VM_FAULT_OOM;
5242                 goto out_release_all;
5243         }
5244
5245         copy_user_huge_page(new_page, old_page, address, vma,
5246                             pages_per_huge_page(h));
5247         __SetPageUptodate(new_page);
5248
5249         mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, haddr,
5250                                 haddr + huge_page_size(h));
5251         mmu_notifier_invalidate_range_start(&range);
5252
5253         /*
5254          * Retake the page table lock to check for racing updates
5255          * before the page tables are altered
5256          */
5257         spin_lock(ptl);
5258         ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
5259         if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
5260                 ClearHPageRestoreReserve(new_page);
5261
5262                 /* Break COW */
5263                 huge_ptep_clear_flush(vma, haddr, ptep);
5264                 mmu_notifier_invalidate_range(mm, range.start, range.end);
5265                 page_remove_rmap(old_page, vma, true);
5266                 hugepage_add_new_anon_rmap(new_page, vma, haddr);
5267                 set_huge_pte_at(mm, haddr, ptep,
5268                                 make_huge_pte(vma, new_page, 1));
5269                 SetHPageMigratable(new_page);
5270                 /* Make the old page be freed below */
5271                 new_page = old_page;
5272         }
5273         spin_unlock(ptl);
5274         mmu_notifier_invalidate_range_end(&range);
5275 out_release_all:
5276         /* No restore in case of successful pagetable update (Break COW) */
5277         if (new_page != old_page)
5278                 restore_reserve_on_error(h, vma, haddr, new_page);
5279         put_page(new_page);
5280 out_release_old:
5281         put_page(old_page);
5282
5283         spin_lock(ptl); /* Caller expects lock to be held */
5284         return ret;
5285 }
5286
5287 /* Return the pagecache page at a given address within a VMA */
5288 static struct page *hugetlbfs_pagecache_page(struct hstate *h,
5289                         struct vm_area_struct *vma, unsigned long address)
5290 {
5291         struct address_space *mapping;
5292         pgoff_t idx;
5293
5294         mapping = vma->vm_file->f_mapping;
5295         idx = vma_hugecache_offset(h, vma, address);
5296
5297         return find_lock_page(mapping, idx);
5298 }
5299
5300 /*
5301  * Return whether there is a pagecache page to back given address within VMA.
5302  * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
5303  */
5304 static bool hugetlbfs_pagecache_present(struct hstate *h,
5305                         struct vm_area_struct *vma, unsigned long address)
5306 {
5307         struct address_space *mapping;
5308         pgoff_t idx;
5309         struct page *page;
5310
5311         mapping = vma->vm_file->f_mapping;
5312         idx = vma_hugecache_offset(h, vma, address);
5313
5314         page = find_get_page(mapping, idx);
5315         if (page)
5316                 put_page(page);
5317         return page != NULL;
5318 }
5319
5320 int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
5321                            pgoff_t idx)
5322 {
5323         struct inode *inode = mapping->host;
5324         struct hstate *h = hstate_inode(inode);
5325         int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
5326
5327         if (err)
5328                 return err;
5329         ClearHPageRestoreReserve(page);
5330
5331         /*
5332          * set page dirty so that it will not be removed from cache/file
5333          * by non-hugetlbfs specific code paths.
5334          */
5335         set_page_dirty(page);
5336
5337         spin_lock(&inode->i_lock);
5338         inode->i_blocks += blocks_per_huge_page(h);
5339         spin_unlock(&inode->i_lock);
5340         return 0;
5341 }
5342
5343 static inline vm_fault_t hugetlb_handle_userfault(struct vm_area_struct *vma,
5344                                                   struct address_space *mapping,
5345                                                   pgoff_t idx,
5346                                                   unsigned int flags,
5347                                                   unsigned long haddr,
5348                                                   unsigned long addr,
5349                                                   unsigned long reason)
5350 {
5351         vm_fault_t ret;
5352         u32 hash;
5353         struct vm_fault vmf = {
5354                 .vma = vma,
5355                 .address = haddr,
5356                 .real_address = addr,
5357                 .flags = flags,
5358
5359                 /*
5360                  * Hard to debug if it ends up being
5361                  * used by a callee that assumes
5362                  * something about the other
5363                  * uninitialized fields... same as in
5364                  * memory.c
5365                  */
5366         };
5367
5368         /*
5369          * hugetlb_fault_mutex and i_mmap_rwsem must be
5370          * dropped before handling userfault.  Reacquire
5371          * after handling fault to make calling code simpler.
5372          */
5373         hash = hugetlb_fault_mutex_hash(mapping, idx);
5374         mutex_unlock(&hugetlb_fault_mutex_table[hash]);
5375         i_mmap_unlock_read(mapping);
5376         ret = handle_userfault(&vmf, reason);
5377         i_mmap_lock_read(mapping);
5378         mutex_lock(&hugetlb_fault_mutex_table[hash]);
5379
5380         return ret;
5381 }
5382
5383 static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
5384                         struct vm_area_struct *vma,
5385                         struct address_space *mapping, pgoff_t idx,
5386                         unsigned long address, pte_t *ptep, unsigned int flags)
5387 {
5388         struct hstate *h = hstate_vma(vma);
5389         vm_fault_t ret = VM_FAULT_SIGBUS;
5390         int anon_rmap = 0;
5391         unsigned long size;
5392         struct page *page;
5393         pte_t new_pte;
5394         spinlock_t *ptl;
5395         unsigned long haddr = address & huge_page_mask(h);
5396         bool new_page, new_pagecache_page = false;
5397
5398         /*
5399          * Currently, we are forced to kill the process in the event the
5400          * original mapper has unmapped pages from the child due to a failed
5401          * COW. Warn that such a situation has occurred as it may not be obvious
5402          */
5403         if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
5404                 pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
5405                            current->pid);
5406                 return ret;
5407         }
5408
5409         /*
5410          * We can not race with truncation due to holding i_mmap_rwsem.
5411          * i_size is modified when holding i_mmap_rwsem, so check here
5412          * once for faults beyond end of file.
5413          */
5414         size = i_size_read(mapping->host) >> huge_page_shift(h);
5415         if (idx >= size)
5416                 goto out;
5417
5418 retry:
5419         new_page = false;
5420         page = find_lock_page(mapping, idx);
5421         if (!page) {
5422                 /* Check for page in userfault range */
5423                 if (userfaultfd_missing(vma)) {
5424                         ret = hugetlb_handle_userfault(vma, mapping, idx,
5425                                                        flags, haddr, address,
5426                                                        VM_UFFD_MISSING);
5427                         goto out;
5428                 }
5429
5430                 page = alloc_huge_page(vma, haddr, 0);
5431                 if (IS_ERR(page)) {
5432                         /*
5433                          * Returning error will result in faulting task being
5434                          * sent SIGBUS.  The hugetlb fault mutex prevents two
5435                          * tasks from racing to fault in the same page which
5436                          * could result in false unable to allocate errors.
5437                          * Page migration does not take the fault mutex, but
5438                          * does a clear then write of pte's under page table
5439                          * lock.  Page fault code could race with migration,
5440                          * notice the clear pte and try to allocate a page
5441                          * here.  Before returning error, get ptl and make
5442                          * sure there really is no pte entry.
5443                          */
5444                         ptl = huge_pte_lock(h, mm, ptep);
5445                         ret = 0;
5446                         if (huge_pte_none(huge_ptep_get(ptep)))
5447                                 ret = vmf_error(PTR_ERR(page));
5448                         spin_unlock(ptl);
5449                         goto out;
5450                 }
5451                 clear_huge_page(page, address, pages_per_huge_page(h));
5452                 __SetPageUptodate(page);
5453                 new_page = true;
5454
5455                 if (vma->vm_flags & VM_MAYSHARE) {
5456                         int err = huge_add_to_page_cache(page, mapping, idx);
5457                         if (err) {
5458                                 put_page(page);
5459                                 if (err == -EEXIST)
5460                                         goto retry;
5461                                 goto out;
5462                         }
5463                         new_pagecache_page = true;
5464                 } else {
5465                         lock_page(page);
5466                         if (unlikely(anon_vma_prepare(vma))) {
5467                                 ret = VM_FAULT_OOM;
5468                                 goto backout_unlocked;
5469                         }
5470                         anon_rmap = 1;
5471                 }
5472         } else {
5473                 /*
5474                  * If memory error occurs between mmap() and fault, some process
5475                  * don't have hwpoisoned swap entry for errored virtual address.
5476                  * So we need to block hugepage fault by PG_hwpoison bit check.
5477                  */
5478                 if (unlikely(PageHWPoison(page))) {
5479                         ret = VM_FAULT_HWPOISON_LARGE |
5480                                 VM_FAULT_SET_HINDEX(hstate_index(h));
5481                         goto backout_unlocked;
5482                 }
5483
5484                 /* Check for page in userfault range. */
5485                 if (userfaultfd_minor(vma)) {
5486                         unlock_page(page);
5487                         put_page(page);
5488                         ret = hugetlb_handle_userfault(vma, mapping, idx,
5489                                                        flags, haddr, address,
5490                                                        VM_UFFD_MINOR);
5491                         goto out;
5492                 }
5493         }
5494
5495         /*
5496          * If we are going to COW a private mapping later, we examine the
5497          * pending reservations for this page now. This will ensure that
5498          * any allocations necessary to record that reservation occur outside
5499          * the spinlock.
5500          */
5501         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
5502                 if (vma_needs_reservation(h, vma, haddr) < 0) {
5503                         ret = VM_FAULT_OOM;
5504                         goto backout_unlocked;
5505                 }
5506                 /* Just decrements count, does not deallocate */
5507                 vma_end_reservation(h, vma, haddr);
5508         }
5509
5510         ptl = huge_pte_lock(h, mm, ptep);
5511         ret = 0;
5512         if (!huge_pte_none(huge_ptep_get(ptep)))
5513                 goto backout;
5514
5515         if (anon_rmap) {
5516                 ClearHPageRestoreReserve(page);
5517                 hugepage_add_new_anon_rmap(page, vma, haddr);
5518         } else
5519                 page_dup_rmap(page, true);
5520         new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
5521                                 && (vma->vm_flags & VM_SHARED)));
5522         set_huge_pte_at(mm, haddr, ptep, new_pte);
5523
5524         hugetlb_count_add(pages_per_huge_page(h), mm);
5525         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
5526                 /* Optimization, do the COW without a second fault */
5527                 ret = hugetlb_cow(mm, vma, address, ptep, page, ptl);
5528         }
5529
5530         spin_unlock(ptl);
5531
5532         /*
5533          * Only set HPageMigratable in newly allocated pages.  Existing pages
5534          * found in the pagecache may not have HPageMigratableset if they have
5535          * been isolated for migration.
5536          */
5537         if (new_page)
5538                 SetHPageMigratable(page);
5539
5540         unlock_page(page);
5541 out:
5542         return ret;
5543
5544 backout:
5545         spin_unlock(ptl);
5546 backout_unlocked:
5547         unlock_page(page);
5548         /* restore reserve for newly allocated pages not in page cache */
5549         if (new_page && !new_pagecache_page)
5550                 restore_reserve_on_error(h, vma, haddr, page);
5551         put_page(page);
5552         goto out;
5553 }
5554
5555 #ifdef CONFIG_SMP
5556 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
5557 {
5558         unsigned long key[2];
5559         u32 hash;
5560
5561         key[0] = (unsigned long) mapping;
5562         key[1] = idx;
5563
5564         hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0);
5565
5566         return hash & (num_fault_mutexes - 1);
5567 }
5568 #else
5569 /*
5570  * For uniprocessor systems we always use a single mutex, so just
5571  * return 0 and avoid the hashing overhead.
5572  */
5573 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
5574 {
5575         return 0;
5576 }
5577 #endif
5578
5579 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
5580                         unsigned long address, unsigned int flags)
5581 {
5582         pte_t *ptep, entry;
5583         spinlock_t *ptl;
5584         vm_fault_t ret;
5585         u32 hash;
5586         pgoff_t idx;
5587         struct page *page = NULL;
5588         struct page *pagecache_page = NULL;
5589         struct hstate *h = hstate_vma(vma);
5590         struct address_space *mapping;
5591         int need_wait_lock = 0;
5592         unsigned long haddr = address & huge_page_mask(h);
5593
5594         ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
5595         if (ptep) {
5596                 /*
5597                  * Since we hold no locks, ptep could be stale.  That is
5598                  * OK as we are only making decisions based on content and
5599                  * not actually modifying content here.
5600                  */
5601                 entry = huge_ptep_get(ptep);
5602                 if (unlikely(is_hugetlb_entry_migration(entry))) {
5603                         migration_entry_wait_huge(vma, mm, ptep);
5604                         return 0;
5605                 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
5606                         return VM_FAULT_HWPOISON_LARGE |
5607                                 VM_FAULT_SET_HINDEX(hstate_index(h));
5608         }
5609
5610         /*
5611          * Acquire i_mmap_rwsem before calling huge_pte_alloc and hold
5612          * until finished with ptep.  This serves two purposes:
5613          * 1) It prevents huge_pmd_unshare from being called elsewhere
5614          *    and making the ptep no longer valid.
5615          * 2) It synchronizes us with i_size modifications during truncation.
5616          *
5617          * ptep could have already be assigned via huge_pte_offset.  That
5618          * is OK, as huge_pte_alloc will return the same value unless
5619          * something has changed.
5620          */
5621         mapping = vma->vm_file->f_mapping;
5622         i_mmap_lock_read(mapping);
5623         ptep = huge_pte_alloc(mm, vma, haddr, huge_page_size(h));
5624         if (!ptep) {
5625                 i_mmap_unlock_read(mapping);
5626                 return VM_FAULT_OOM;
5627         }
5628
5629         /*
5630          * Serialize hugepage allocation and instantiation, so that we don't
5631          * get spurious allocation failures if two CPUs race to instantiate
5632          * the same page in the page cache.
5633          */
5634         idx = vma_hugecache_offset(h, vma, haddr);
5635         hash = hugetlb_fault_mutex_hash(mapping, idx);
5636         mutex_lock(&hugetlb_fault_mutex_table[hash]);
5637
5638         entry = huge_ptep_get(ptep);
5639         if (huge_pte_none(entry)) {
5640                 ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
5641                 goto out_mutex;
5642         }
5643
5644         ret = 0;
5645
5646         /*
5647          * entry could be a migration/hwpoison entry at this point, so this
5648          * check prevents the kernel from going below assuming that we have
5649          * an active hugepage in pagecache. This goto expects the 2nd page
5650          * fault, and is_hugetlb_entry_(migration|hwpoisoned) check will
5651          * properly handle it.
5652          */
5653         if (!pte_present(entry))
5654                 goto out_mutex;
5655
5656         /*
5657          * If we are going to COW the mapping later, we examine the pending
5658          * reservations for this page now. This will ensure that any
5659          * allocations necessary to record that reservation occur outside the
5660          * spinlock. For private mappings, we also lookup the pagecache
5661          * page now as it is used to determine if a reservation has been
5662          * consumed.
5663          */
5664         if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
5665                 if (vma_needs_reservation(h, vma, haddr) < 0) {
5666                         ret = VM_FAULT_OOM;
5667                         goto out_mutex;
5668                 }
5669                 /* Just decrements count, does not deallocate */
5670                 vma_end_reservation(h, vma, haddr);
5671
5672                 if (!(vma->vm_flags & VM_MAYSHARE))
5673                         pagecache_page = hugetlbfs_pagecache_page(h,
5674                                                                 vma, haddr);
5675         }
5676
5677         ptl = huge_pte_lock(h, mm, ptep);
5678
5679         /* Check for a racing update before calling hugetlb_cow */
5680         if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
5681                 goto out_ptl;
5682
5683         /*
5684          * hugetlb_cow() requires page locks of pte_page(entry) and
5685          * pagecache_page, so here we need take the former one
5686          * when page != pagecache_page or !pagecache_page.
5687          */
5688         page = pte_page(entry);
5689         if (page != pagecache_page)
5690                 if (!trylock_page(page)) {
5691                         need_wait_lock = 1;
5692                         goto out_ptl;
5693                 }
5694
5695         get_page(page);
5696
5697         if (flags & FAULT_FLAG_WRITE) {
5698                 if (!huge_pte_write(entry)) {
5699                         ret = hugetlb_cow(mm, vma, address, ptep,
5700                                           pagecache_page, ptl);
5701                         goto out_put_page;
5702                 }
5703                 entry = huge_pte_mkdirty(entry);
5704         }
5705         entry = pte_mkyoung(entry);
5706         if (huge_ptep_set_access_flags(vma, haddr, ptep, entry,
5707                                                 flags & FAULT_FLAG_WRITE))
5708                 update_mmu_cache(vma, haddr, ptep);
5709 out_put_page:
5710         if (page != pagecache_page)
5711                 unlock_page(page);
5712         put_page(page);
5713 out_ptl:
5714         spin_unlock(ptl);
5715
5716         if (pagecache_page) {
5717                 unlock_page(pagecache_page);
5718                 put_page(pagecache_page);
5719         }
5720 out_mutex:
5721         mutex_unlock(&hugetlb_fault_mutex_table[hash]);
5722         i_mmap_unlock_read(mapping);
5723         /*
5724          * Generally it's safe to hold refcount during waiting page lock. But
5725          * here we just wait to defer the next page fault to avoid busy loop and
5726          * the page is not used after unlocked before returning from the current
5727          * page fault. So we are safe from accessing freed page, even if we wait
5728          * here without taking refcount.
5729          */
5730         if (need_wait_lock)
5731                 wait_on_page_locked(page);
5732         return ret;
5733 }
5734
5735 #ifdef CONFIG_USERFAULTFD
5736 /*
5737  * Used by userfaultfd UFFDIO_COPY.  Based on mcopy_atomic_pte with
5738  * modifications for huge pages.
5739  */
5740 int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
5741                             pte_t *dst_pte,
5742                             struct vm_area_struct *dst_vma,
5743                             unsigned long dst_addr,
5744                             unsigned long src_addr,
5745                             enum mcopy_atomic_mode mode,
5746                             struct page **pagep)
5747 {
5748         bool is_continue = (mode == MCOPY_ATOMIC_CONTINUE);
5749         struct hstate *h = hstate_vma(dst_vma);
5750         struct address_space *mapping = dst_vma->vm_file->f_mapping;
5751         pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr);
5752         unsigned long size;
5753         int vm_shared = dst_vma->vm_flags & VM_SHARED;
5754         pte_t _dst_pte;
5755         spinlock_t *ptl;
5756         int ret = -ENOMEM;
5757         struct page *page;
5758         int writable;
5759         bool page_in_pagecache = false;
5760
5761         if (is_continue) {
5762                 ret = -EFAULT;
5763                 page = find_lock_page(mapping, idx);
5764                 if (!page)
5765                         goto out;
5766                 page_in_pagecache = true;
5767         } else if (!*pagep) {
5768                 /* If a page already exists, then it's UFFDIO_COPY for
5769                  * a non-missing case. Return -EEXIST.
5770                  */
5771                 if (vm_shared &&
5772                     hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
5773                         ret = -EEXIST;
5774                         goto out;
5775                 }
5776
5777                 page = alloc_huge_page(dst_vma, dst_addr, 0);
5778                 if (IS_ERR(page)) {
5779                         ret = -ENOMEM;
5780                         goto out;
5781                 }
5782
5783                 ret = copy_huge_page_from_user(page,
5784                                                 (const void __user *) src_addr,
5785                                                 pages_per_huge_page(h), false);
5786
5787                 /* fallback to copy_from_user outside mmap_lock */
5788                 if (unlikely(ret)) {
5789                         ret = -ENOENT;
5790                         /* Free the allocated page which may have
5791                          * consumed a reservation.
5792                          */
5793                         restore_reserve_on_error(h, dst_vma, dst_addr, page);
5794                         put_page(page);
5795
5796                         /* Allocate a temporary page to hold the copied
5797                          * contents.
5798                          */
5799                         page = alloc_huge_page_vma(h, dst_vma, dst_addr);
5800                         if (!page) {
5801                                 ret = -ENOMEM;
5802                                 goto out;
5803                         }
5804                         *pagep = page;
5805                         /* Set the outparam pagep and return to the caller to
5806                          * copy the contents outside the lock. Don't free the
5807                          * page.
5808                          */
5809                         goto out;
5810                 }
5811         } else {
5812                 if (vm_shared &&
5813                     hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
5814                         put_page(*pagep);
5815                         ret = -EEXIST;
5816                         *pagep = NULL;
5817                         goto out;
5818                 }
5819
5820                 page = alloc_huge_page(dst_vma, dst_addr, 0);
5821                 if (IS_ERR(page)) {
5822                         ret = -ENOMEM;
5823                         *pagep = NULL;
5824                         goto out;
5825                 }
5826                 copy_user_huge_page(page, *pagep, dst_addr, dst_vma,
5827                                     pages_per_huge_page(h));
5828                 put_page(*pagep);
5829                 *pagep = NULL;
5830         }
5831
5832         /*
5833          * The memory barrier inside __SetPageUptodate makes sure that
5834          * preceding stores to the page contents become visible before
5835          * the set_pte_at() write.
5836          */
5837         __SetPageUptodate(page);
5838
5839         /* Add shared, newly allocated pages to the page cache. */
5840         if (vm_shared && !is_continue) {
5841                 size = i_size_read(mapping->host) >> huge_page_shift(h);
5842                 ret = -EFAULT;
5843                 if (idx >= size)
5844                         goto out_release_nounlock;
5845
5846                 /*
5847                  * Serialization between remove_inode_hugepages() and
5848                  * huge_add_to_page_cache() below happens through the
5849                  * hugetlb_fault_mutex_table that here must be hold by
5850                  * the caller.
5851                  */
5852                 ret = huge_add_to_page_cache(page, mapping, idx);
5853                 if (ret)
5854                         goto out_release_nounlock;
5855                 page_in_pagecache = true;
5856         }
5857
5858         ptl = huge_pte_lockptr(h, dst_mm, dst_pte);
5859         spin_lock(ptl);
5860
5861         /*
5862          * Recheck the i_size after holding PT lock to make sure not
5863          * to leave any page mapped (as page_mapped()) beyond the end
5864          * of the i_size (remove_inode_hugepages() is strict about
5865          * enforcing that). If we bail out here, we'll also leave a
5866          * page in the radix tree in the vm_shared case beyond the end
5867          * of the i_size, but remove_inode_hugepages() will take care
5868          * of it as soon as we drop the hugetlb_fault_mutex_table.
5869          */
5870         size = i_size_read(mapping->host) >> huge_page_shift(h);
5871         ret = -EFAULT;
5872         if (idx >= size)
5873                 goto out_release_unlock;
5874
5875         ret = -EEXIST;
5876         if (!huge_pte_none(huge_ptep_get(dst_pte)))
5877                 goto out_release_unlock;
5878
5879         if (vm_shared) {
5880                 page_dup_rmap(page, true);
5881         } else {
5882                 ClearHPageRestoreReserve(page);
5883                 hugepage_add_new_anon_rmap(page, dst_vma, dst_addr);
5884         }
5885
5886         /* For CONTINUE on a non-shared VMA, don't set VM_WRITE for CoW. */
5887         if (is_continue && !vm_shared)
5888                 writable = 0;
5889         else
5890                 writable = dst_vma->vm_flags & VM_WRITE;
5891
5892         _dst_pte = make_huge_pte(dst_vma, page, writable);
5893         if (writable)
5894                 _dst_pte = huge_pte_mkdirty(_dst_pte);
5895         _dst_pte = pte_mkyoung(_dst_pte);
5896
5897         set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
5898
5899         (void)huge_ptep_set_access_flags(dst_vma, dst_addr, dst_pte, _dst_pte,
5900                                         dst_vma->vm_flags & VM_WRITE);
5901         hugetlb_count_add(pages_per_huge_page(h), dst_mm);
5902
5903         /* No need to invalidate - it was non-present before */
5904         update_mmu_cache(dst_vma, dst_addr, dst_pte);
5905
5906         spin_unlock(ptl);
5907         if (!is_continue)
5908                 SetHPageMigratable(page);
5909         if (vm_shared || is_continue)
5910                 unlock_page(page);
5911         ret = 0;
5912 out:
5913         return ret;
5914 out_release_unlock:
5915         spin_unlock(ptl);
5916         if (vm_shared || is_continue)
5917                 unlock_page(page);
5918 out_release_nounlock:
5919         if (!page_in_pagecache)
5920                 restore_reserve_on_error(h, dst_vma, dst_addr, page);
5921         put_page(page);
5922         goto out;
5923 }
5924 #endif /* CONFIG_USERFAULTFD */
5925
5926 static void record_subpages_vmas(struct page *page, struct vm_area_struct *vma,
5927                                  int refs, struct page **pages,
5928                                  struct vm_area_struct **vmas)
5929 {
5930         int nr;
5931
5932         for (nr = 0; nr < refs; nr++) {
5933                 if (likely(pages))
5934                         pages[nr] = mem_map_offset(page, nr);
5935                 if (vmas)
5936                         vmas[nr] = vma;
5937         }
5938 }
5939
5940 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
5941                          struct page **pages, struct vm_area_struct **vmas,
5942                          unsigned long *position, unsigned long *nr_pages,
5943                          long i, unsigned int flags, int *locked)
5944 {
5945         unsigned long pfn_offset;
5946         unsigned long vaddr = *position;
5947         unsigned long remainder = *nr_pages;
5948         struct hstate *h = hstate_vma(vma);
5949         int err = -EFAULT, refs;
5950
5951         while (vaddr < vma->vm_end && remainder) {
5952                 pte_t *pte;
5953                 spinlock_t *ptl = NULL;
5954                 int absent;
5955                 struct page *page;
5956
5957                 /*
5958                  * If we have a pending SIGKILL, don't keep faulting pages and
5959                  * potentially allocating memory.
5960                  */
5961                 if (fatal_signal_pending(current)) {
5962                         remainder = 0;
5963                         break;
5964                 }
5965
5966                 /*
5967                  * Some archs (sparc64, sh*) have multiple pte_ts to
5968                  * each hugepage.  We have to make sure we get the
5969                  * first, for the page indexing below to work.
5970                  *
5971                  * Note that page table lock is not held when pte is null.
5972                  */
5973                 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h),
5974                                       huge_page_size(h));
5975                 if (pte)
5976                         ptl = huge_pte_lock(h, mm, pte);
5977                 absent = !pte || huge_pte_none(huge_ptep_get(pte));
5978
5979                 /*
5980                  * When coredumping, it suits get_dump_page if we just return
5981                  * an error where there's an empty slot with no huge pagecache
5982                  * to back it.  This way, we avoid allocating a hugepage, and
5983                  * the sparse dumpfile avoids allocating disk blocks, but its
5984                  * huge holes still show up with zeroes where they need to be.
5985                  */
5986                 if (absent && (flags & FOLL_DUMP) &&
5987                     !hugetlbfs_pagecache_present(h, vma, vaddr)) {
5988                         if (pte)
5989                                 spin_unlock(ptl);
5990                         remainder = 0;
5991                         break;
5992                 }
5993
5994                 /*
5995                  * We need call hugetlb_fault for both hugepages under migration
5996                  * (in which case hugetlb_fault waits for the migration,) and
5997                  * hwpoisoned hugepages (in which case we need to prevent the
5998                  * caller from accessing to them.) In order to do this, we use
5999                  * here is_swap_pte instead of is_hugetlb_entry_migration and
6000                  * is_hugetlb_entry_hwpoisoned. This is because it simply covers
6001                  * both cases, and because we can't follow correct pages
6002                  * directly from any kind of swap entries.
6003                  */
6004                 if (absent || is_swap_pte(huge_ptep_get(pte)) ||
6005                     ((flags & FOLL_WRITE) &&
6006                       !huge_pte_write(huge_ptep_get(pte)))) {
6007                         vm_fault_t ret;
6008                         unsigned int fault_flags = 0;
6009
6010                         if (pte)
6011                                 spin_unlock(ptl);
6012                         if (flags & FOLL_WRITE)
6013                                 fault_flags |= FAULT_FLAG_WRITE;
6014                         if (locked)
6015                                 fault_flags |= FAULT_FLAG_ALLOW_RETRY |
6016                                         FAULT_FLAG_KILLABLE;
6017                         if (flags & FOLL_NOWAIT)
6018                                 fault_flags |= FAULT_FLAG_ALLOW_RETRY |
6019                                         FAULT_FLAG_RETRY_NOWAIT;
6020                         if (flags & FOLL_TRIED) {
6021                                 /*
6022                                  * Note: FAULT_FLAG_ALLOW_RETRY and
6023                                  * FAULT_FLAG_TRIED can co-exist
6024                                  */
6025                                 fault_flags |= FAULT_FLAG_TRIED;
6026                         }
6027                         ret = hugetlb_fault(mm, vma, vaddr, fault_flags);
6028                         if (ret & VM_FAULT_ERROR) {
6029                                 err = vm_fault_to_errno(ret, flags);
6030                                 remainder = 0;
6031                                 break;
6032                         }
6033                         if (ret & VM_FAULT_RETRY) {
6034                                 if (locked &&
6035                                     !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
6036                                         *locked = 0;
6037                                 *nr_pages = 0;
6038                                 /*
6039                                  * VM_FAULT_RETRY must not return an
6040                                  * error, it will return zero
6041                                  * instead.
6042                                  *
6043                                  * No need to update "position" as the
6044                                  * caller will not check it after
6045                                  * *nr_pages is set to 0.
6046                                  */
6047                                 return i;
6048                         }
6049                         continue;
6050                 }
6051
6052                 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
6053                 page = pte_page(huge_ptep_get(pte));
6054
6055                 /*
6056                  * If subpage information not requested, update counters
6057                  * and skip the same_page loop below.
6058                  */
6059                 if (!pages && !vmas && !pfn_offset &&
6060                     (vaddr + huge_page_size(h) < vma->vm_end) &&
6061                     (remainder >= pages_per_huge_page(h))) {
6062                         vaddr += huge_page_size(h);
6063                         remainder -= pages_per_huge_page(h);
6064                         i += pages_per_huge_page(h);
6065                         spin_unlock(ptl);
6066                         continue;
6067                 }
6068
6069                 /* vaddr may not be aligned to PAGE_SIZE */
6070                 refs = min3(pages_per_huge_page(h) - pfn_offset, remainder,
6071                     (vma->vm_end - ALIGN_DOWN(vaddr, PAGE_SIZE)) >> PAGE_SHIFT);
6072
6073                 if (pages || vmas)
6074                         record_subpages_vmas(mem_map_offset(page, pfn_offset),
6075                                              vma, refs,
6076                                              likely(pages) ? pages + i : NULL,
6077                                              vmas ? vmas + i : NULL);
6078
6079                 if (pages) {
6080                         /*
6081                          * try_grab_folio() should always succeed here,
6082                          * because: a) we hold the ptl lock, and b) we've just
6083                          * checked that the huge page is present in the page
6084                          * tables. If the huge page is present, then the tail
6085                          * pages must also be present. The ptl prevents the
6086                          * head page and tail pages from being rearranged in
6087                          * any way. So this page must be available at this
6088                          * point, unless the page refcount overflowed:
6089                          */
6090                         if (WARN_ON_ONCE(!try_grab_folio(pages[i], refs,
6091                                                          flags))) {
6092                                 spin_unlock(ptl);
6093                                 remainder = 0;
6094                                 err = -ENOMEM;
6095                                 break;
6096                         }
6097                 }
6098
6099                 vaddr += (refs << PAGE_SHIFT);
6100                 remainder -= refs;
6101                 i += refs;
6102
6103                 spin_unlock(ptl);
6104         }
6105         *nr_pages = remainder;
6106         /*
6107          * setting position is actually required only if remainder is
6108          * not zero but it's faster not to add a "if (remainder)"
6109          * branch.
6110          */
6111         *position = vaddr;
6112
6113         return i ? i : err;
6114 }
6115
6116 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
6117                 unsigned long address, unsigned long end, pgprot_t newprot)
6118 {
6119         struct mm_struct *mm = vma->vm_mm;
6120         unsigned long start = address;
6121         pte_t *ptep;
6122         pte_t pte;
6123         struct hstate *h = hstate_vma(vma);
6124         unsigned long pages = 0;
6125         bool shared_pmd = false;
6126         struct mmu_notifier_range range;
6127
6128         /*
6129          * In the case of shared PMDs, the area to flush could be beyond
6130          * start/end.  Set range.start/range.end to cover the maximum possible
6131          * range if PMD sharing is possible.
6132          */
6133         mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA,
6134                                 0, vma, mm, start, end);
6135         adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
6136
6137         BUG_ON(address >= end);
6138         flush_cache_range(vma, range.start, range.end);
6139
6140         mmu_notifier_invalidate_range_start(&range);
6141         i_mmap_lock_write(vma->vm_file->f_mapping);
6142         for (; address < end; address += huge_page_size(h)) {
6143                 spinlock_t *ptl;
6144                 ptep = huge_pte_offset(mm, address, huge_page_size(h));
6145                 if (!ptep)
6146                         continue;
6147                 ptl = huge_pte_lock(h, mm, ptep);
6148                 if (huge_pmd_unshare(mm, vma, &address, ptep)) {
6149                         pages++;
6150                         spin_unlock(ptl);
6151                         shared_pmd = true;
6152                         continue;
6153                 }
6154                 pte = huge_ptep_get(ptep);
6155                 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
6156                         spin_unlock(ptl);
6157                         continue;
6158                 }
6159                 if (unlikely(is_hugetlb_entry_migration(pte))) {
6160                         swp_entry_t entry = pte_to_swp_entry(pte);
6161
6162                         if (is_writable_migration_entry(entry)) {
6163                                 pte_t newpte;
6164
6165                                 entry = make_readable_migration_entry(
6166                                                         swp_offset(entry));
6167                                 newpte = swp_entry_to_pte(entry);
6168                                 set_huge_swap_pte_at(mm, address, ptep,
6169                                                      newpte, huge_page_size(h));
6170                                 pages++;
6171                         }
6172                         spin_unlock(ptl);
6173                         continue;
6174                 }
6175                 if (!huge_pte_none(pte)) {
6176                         pte_t old_pte;
6177                         unsigned int shift = huge_page_shift(hstate_vma(vma));
6178
6179                         old_pte = huge_ptep_modify_prot_start(vma, address, ptep);
6180                         pte = huge_pte_modify(old_pte, newprot);
6181                         pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
6182                         huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte);
6183                         pages++;
6184                 }
6185                 spin_unlock(ptl);
6186         }
6187         /*
6188          * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
6189          * may have cleared our pud entry and done put_page on the page table:
6190          * once we release i_mmap_rwsem, another task can do the final put_page
6191          * and that page table be reused and filled with junk.  If we actually
6192          * did unshare a page of pmds, flush the range corresponding to the pud.
6193          */
6194         if (shared_pmd)
6195                 flush_hugetlb_tlb_range(vma, range.start, range.end);
6196         else
6197                 flush_hugetlb_tlb_range(vma, start, end);
6198         /*
6199          * No need to call mmu_notifier_invalidate_range() we are downgrading
6200          * page table protection not changing it to point to a new page.
6201          *
6202          * See Documentation/vm/mmu_notifier.rst
6203          */
6204         i_mmap_unlock_write(vma->vm_file->f_mapping);
6205         mmu_notifier_invalidate_range_end(&range);
6206
6207         return pages << h->order;
6208 }
6209
6210 /* Return true if reservation was successful, false otherwise.  */
6211 bool hugetlb_reserve_pages(struct inode *inode,
6212                                         long from, long to,
6213                                         struct vm_area_struct *vma,
6214                                         vm_flags_t vm_flags)
6215 {
6216         long chg, add = -1;
6217         struct hstate *h = hstate_inode(inode);
6218         struct hugepage_subpool *spool = subpool_inode(inode);
6219         struct resv_map *resv_map;
6220         struct hugetlb_cgroup *h_cg = NULL;
6221         long gbl_reserve, regions_needed = 0;
6222
6223         /* This should never happen */
6224         if (from > to) {
6225                 VM_WARN(1, "%s called with a negative range\n", __func__);
6226                 return false;
6227         }
6228
6229         /*
6230          * Only apply hugepage reservation if asked. At fault time, an
6231          * attempt will be made for VM_NORESERVE to allocate a page
6232          * without using reserves
6233          */
6234         if (vm_flags & VM_NORESERVE)
6235                 return true;
6236
6237         /*
6238          * Shared mappings base their reservation on the number of pages that
6239          * are already allocated on behalf of the file. Private mappings need
6240          * to reserve the full area even if read-only as mprotect() may be
6241          * called to make the mapping read-write. Assume !vma is a shm mapping
6242          */
6243         if (!vma || vma->vm_flags & VM_MAYSHARE) {
6244                 /*
6245                  * resv_map can not be NULL as hugetlb_reserve_pages is only
6246                  * called for inodes for which resv_maps were created (see
6247                  * hugetlbfs_get_inode).
6248                  */
6249                 resv_map = inode_resv_map(inode);
6250
6251                 chg = region_chg(resv_map, from, to, &regions_needed);
6252
6253         } else {
6254                 /* Private mapping. */
6255                 resv_map = resv_map_alloc();
6256                 if (!resv_map)
6257                         return false;
6258
6259                 chg = to - from;
6260
6261                 set_vma_resv_map(vma, resv_map);
6262                 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
6263         }
6264
6265         if (chg < 0)
6266                 goto out_err;
6267
6268         if (hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h),
6269                                 chg * pages_per_huge_page(h), &h_cg) < 0)
6270                 goto out_err;
6271
6272         if (vma && !(vma->vm_flags & VM_MAYSHARE) && h_cg) {
6273                 /* For private mappings, the hugetlb_cgroup uncharge info hangs
6274                  * of the resv_map.
6275                  */
6276                 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, h_cg, h);
6277         }
6278
6279         /*
6280          * There must be enough pages in the subpool for the mapping. If
6281          * the subpool has a minimum size, there may be some global
6282          * reservations already in place (gbl_reserve).
6283          */
6284         gbl_reserve = hugepage_subpool_get_pages(spool, chg);
6285         if (gbl_reserve < 0)
6286                 goto out_uncharge_cgroup;
6287
6288         /*
6289          * Check enough hugepages are available for the reservation.
6290          * Hand the pages back to the subpool if there are not
6291          */
6292         if (hugetlb_acct_memory(h, gbl_reserve) < 0)
6293                 goto out_put_pages;
6294
6295         /*
6296          * Account for the reservations made. Shared mappings record regions
6297          * that have reservations as they are shared by multiple VMAs.
6298          * When the last VMA disappears, the region map says how much
6299          * the reservation was and the page cache tells how much of
6300          * the reservation was consumed. Private mappings are per-VMA and
6301          * only the consumed reservations are tracked. When the VMA
6302          * disappears, the original reservation is the VMA size and the
6303          * consumed reservations are stored in the map. Hence, nothing
6304          * else has to be done for private mappings here
6305          */
6306         if (!vma || vma->vm_flags & VM_MAYSHARE) {
6307                 add = region_add(resv_map, from, to, regions_needed, h, h_cg);
6308
6309                 if (unlikely(add < 0)) {
6310                         hugetlb_acct_memory(h, -gbl_reserve);
6311                         goto out_put_pages;
6312                 } else if (unlikely(chg > add)) {
6313                         /*
6314                          * pages in this range were added to the reserve
6315                          * map between region_chg and region_add.  This
6316                          * indicates a race with alloc_huge_page.  Adjust
6317                          * the subpool and reserve counts modified above
6318                          * based on the difference.
6319                          */
6320                         long rsv_adjust;
6321
6322                         /*
6323                          * hugetlb_cgroup_uncharge_cgroup_rsvd() will put the
6324                          * reference to h_cg->css. See comment below for detail.
6325                          */
6326                         hugetlb_cgroup_uncharge_cgroup_rsvd(
6327                                 hstate_index(h),
6328                                 (chg - add) * pages_per_huge_page(h), h_cg);
6329
6330                         rsv_adjust = hugepage_subpool_put_pages(spool,
6331                                                                 chg - add);
6332                         hugetlb_acct_memory(h, -rsv_adjust);
6333                 } else if (h_cg) {
6334                         /*
6335                          * The file_regions will hold their own reference to
6336                          * h_cg->css. So we should release the reference held
6337                          * via hugetlb_cgroup_charge_cgroup_rsvd() when we are
6338                          * done.
6339                          */
6340                         hugetlb_cgroup_put_rsvd_cgroup(h_cg);
6341                 }
6342         }
6343         return true;
6344
6345 out_put_pages:
6346         /* put back original number of pages, chg */
6347         (void)hugepage_subpool_put_pages(spool, chg);
6348 out_uncharge_cgroup:
6349         hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h),
6350                                             chg * pages_per_huge_page(h), h_cg);
6351 out_err:
6352         if (!vma || vma->vm_flags & VM_MAYSHARE)
6353                 /* Only call region_abort if the region_chg succeeded but the
6354                  * region_add failed or didn't run.
6355                  */
6356                 if (chg >= 0 && add < 0)
6357                         region_abort(resv_map, from, to, regions_needed);
6358         if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
6359                 kref_put(&resv_map->refs, resv_map_release);
6360         return false;
6361 }
6362
6363 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
6364                                                                 long freed)
6365 {
6366         struct hstate *h = hstate_inode(inode);
6367         struct resv_map *resv_map = inode_resv_map(inode);
6368         long chg = 0;
6369         struct hugepage_subpool *spool = subpool_inode(inode);
6370         long gbl_reserve;
6371
6372         /*
6373          * Since this routine can be called in the evict inode path for all
6374          * hugetlbfs inodes, resv_map could be NULL.
6375          */
6376         if (resv_map) {
6377                 chg = region_del(resv_map, start, end);
6378                 /*
6379                  * region_del() can fail in the rare case where a region
6380                  * must be split and another region descriptor can not be
6381                  * allocated.  If end == LONG_MAX, it will not fail.
6382                  */
6383                 if (chg < 0)
6384                         return chg;
6385         }
6386
6387         spin_lock(&inode->i_lock);
6388         inode->i_blocks -= (blocks_per_huge_page(h) * freed);
6389         spin_unlock(&inode->i_lock);
6390
6391         /*
6392          * If the subpool has a minimum size, the number of global
6393          * reservations to be released may be adjusted.
6394          *
6395          * Note that !resv_map implies freed == 0. So (chg - freed)
6396          * won't go negative.
6397          */
6398         gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
6399         hugetlb_acct_memory(h, -gbl_reserve);
6400
6401         return 0;
6402 }
6403
6404 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
6405 static unsigned long page_table_shareable(struct vm_area_struct *svma,
6406                                 struct vm_area_struct *vma,
6407                                 unsigned long addr, pgoff_t idx)
6408 {
6409         unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
6410                                 svma->vm_start;
6411         unsigned long sbase = saddr & PUD_MASK;
6412         unsigned long s_end = sbase + PUD_SIZE;
6413
6414         /* Allow segments to share if only one is marked locked */
6415         unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
6416         unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK;
6417
6418         /*
6419          * match the virtual addresses, permission and the alignment of the
6420          * page table page.
6421          */
6422         if (pmd_index(addr) != pmd_index(saddr) ||
6423             vm_flags != svm_flags ||
6424             !range_in_vma(svma, sbase, s_end))
6425                 return 0;
6426
6427         return saddr;
6428 }
6429
6430 static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
6431 {
6432         unsigned long base = addr & PUD_MASK;
6433         unsigned long end = base + PUD_SIZE;
6434
6435         /*
6436          * check on proper vm_flags and page table alignment
6437          */
6438         if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end))
6439                 return true;
6440         return false;
6441 }
6442
6443 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
6444 {
6445 #ifdef CONFIG_USERFAULTFD
6446         if (uffd_disable_huge_pmd_share(vma))
6447                 return false;
6448 #endif
6449         return vma_shareable(vma, addr);
6450 }
6451
6452 /*
6453  * Determine if start,end range within vma could be mapped by shared pmd.
6454  * If yes, adjust start and end to cover range associated with possible
6455  * shared pmd mappings.
6456  */
6457 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
6458                                 unsigned long *start, unsigned long *end)
6459 {
6460         unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE),
6461                 v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
6462
6463         /*
6464          * vma needs to span at least one aligned PUD size, and the range
6465          * must be at least partially within in.
6466          */
6467         if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) ||
6468                 (*end <= v_start) || (*start >= v_end))
6469                 return;
6470
6471         /* Extend the range to be PUD aligned for a worst case scenario */
6472         if (*start > v_start)
6473                 *start = ALIGN_DOWN(*start, PUD_SIZE);
6474
6475         if (*end < v_end)
6476                 *end = ALIGN(*end, PUD_SIZE);
6477 }
6478
6479 /*
6480  * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
6481  * and returns the corresponding pte. While this is not necessary for the
6482  * !shared pmd case because we can allocate the pmd later as well, it makes the
6483  * code much cleaner.
6484  *
6485  * This routine must be called with i_mmap_rwsem held in at least read mode if
6486  * sharing is possible.  For hugetlbfs, this prevents removal of any page
6487  * table entries associated with the address space.  This is important as we
6488  * are setting up sharing based on existing page table entries (mappings).
6489  */
6490 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
6491                       unsigned long addr, pud_t *pud)
6492 {
6493         struct address_space *mapping = vma->vm_file->f_mapping;
6494         pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
6495                         vma->vm_pgoff;
6496         struct vm_area_struct *svma;
6497         unsigned long saddr;
6498         pte_t *spte = NULL;
6499         pte_t *pte;
6500         spinlock_t *ptl;
6501
6502         i_mmap_assert_locked(mapping);
6503         vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
6504                 if (svma == vma)
6505                         continue;
6506
6507                 saddr = page_table_shareable(svma, vma, addr, idx);
6508                 if (saddr) {
6509                         spte = huge_pte_offset(svma->vm_mm, saddr,
6510                                                vma_mmu_pagesize(svma));
6511                         if (spte) {
6512                                 get_page(virt_to_page(spte));
6513                                 break;
6514                         }
6515                 }
6516         }
6517
6518         if (!spte)
6519                 goto out;
6520
6521         ptl = huge_pte_lock(hstate_vma(vma), mm, spte);
6522         if (pud_none(*pud)) {
6523                 pud_populate(mm, pud,
6524                                 (pmd_t *)((unsigned long)spte & PAGE_MASK));
6525                 mm_inc_nr_pmds(mm);
6526         } else {
6527                 put_page(virt_to_page(spte));
6528         }
6529         spin_unlock(ptl);
6530 out:
6531         pte = (pte_t *)pmd_alloc(mm, pud, addr);
6532         return pte;
6533 }
6534
6535 /*
6536  * unmap huge page backed by shared pte.
6537  *
6538  * Hugetlb pte page is ref counted at the time of mapping.  If pte is shared
6539  * indicated by page_count > 1, unmap is achieved by clearing pud and
6540  * decrementing the ref count. If count == 1, the pte page is not shared.
6541  *
6542  * Called with page table lock held and i_mmap_rwsem held in write mode.
6543  *
6544  * returns: 1 successfully unmapped a shared pte page
6545  *          0 the underlying pte page is not shared, or it is the last user
6546  */
6547 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
6548                                         unsigned long *addr, pte_t *ptep)
6549 {
6550         pgd_t *pgd = pgd_offset(mm, *addr);
6551         p4d_t *p4d = p4d_offset(pgd, *addr);
6552         pud_t *pud = pud_offset(p4d, *addr);
6553
6554         i_mmap_assert_write_locked(vma->vm_file->f_mapping);
6555         BUG_ON(page_count(virt_to_page(ptep)) == 0);
6556         if (page_count(virt_to_page(ptep)) == 1)
6557                 return 0;
6558
6559         pud_clear(pud);
6560         put_page(virt_to_page(ptep));
6561         mm_dec_nr_pmds(mm);
6562         *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
6563         return 1;
6564 }
6565
6566 #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
6567 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
6568                       unsigned long addr, pud_t *pud)
6569 {
6570         return NULL;
6571 }
6572
6573 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
6574                                 unsigned long *addr, pte_t *ptep)
6575 {
6576         return 0;
6577 }
6578
6579 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
6580                                 unsigned long *start, unsigned long *end)
6581 {
6582 }
6583
6584 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
6585 {
6586         return false;
6587 }
6588 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
6589
6590 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
6591 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
6592                         unsigned long addr, unsigned long sz)
6593 {
6594         pgd_t *pgd;
6595         p4d_t *p4d;
6596         pud_t *pud;
6597         pte_t *pte = NULL;
6598
6599         pgd = pgd_offset(mm, addr);
6600         p4d = p4d_alloc(mm, pgd, addr);
6601         if (!p4d)
6602                 return NULL;
6603         pud = pud_alloc(mm, p4d, addr);
6604         if (pud) {
6605                 if (sz == PUD_SIZE) {
6606                         pte = (pte_t *)pud;
6607                 } else {
6608                         BUG_ON(sz != PMD_SIZE);
6609                         if (want_pmd_share(vma, addr) && pud_none(*pud))
6610                                 pte = huge_pmd_share(mm, vma, addr, pud);
6611                         else
6612                                 pte = (pte_t *)pmd_alloc(mm, pud, addr);
6613                 }
6614         }
6615         BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte));
6616
6617         return pte;
6618 }
6619
6620 /*
6621  * huge_pte_offset() - Walk the page table to resolve the hugepage
6622  * entry at address @addr
6623  *
6624  * Return: Pointer to page table entry (PUD or PMD) for
6625  * address @addr, or NULL if a !p*d_present() entry is encountered and the
6626  * size @sz doesn't match the hugepage size at this level of the page
6627  * table.
6628  */
6629 pte_t *huge_pte_offset(struct mm_struct *mm,
6630                        unsigned long addr, unsigned long sz)
6631 {
6632         pgd_t *pgd;
6633         p4d_t *p4d;
6634         pud_t *pud;
6635         pmd_t *pmd;
6636
6637         pgd = pgd_offset(mm, addr);
6638         if (!pgd_present(*pgd))
6639                 return NULL;
6640         p4d = p4d_offset(pgd, addr);
6641         if (!p4d_present(*p4d))
6642                 return NULL;
6643
6644         pud = pud_offset(p4d, addr);
6645         if (sz == PUD_SIZE)
6646                 /* must be pud huge, non-present or none */
6647                 return (pte_t *)pud;
6648         if (!pud_present(*pud))
6649                 return NULL;
6650         /* must have a valid entry and size to go further */
6651
6652         pmd = pmd_offset(pud, addr);
6653         /* must be pmd huge, non-present or none */
6654         return (pte_t *)pmd;
6655 }
6656
6657 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
6658
6659 /*
6660  * These functions are overwritable if your architecture needs its own
6661  * behavior.
6662  */
6663 struct page * __weak
6664 follow_huge_addr(struct mm_struct *mm, unsigned long address,
6665                               int write)
6666 {
6667         return ERR_PTR(-EINVAL);
6668 }
6669
6670 struct page * __weak
6671 follow_huge_pd(struct vm_area_struct *vma,
6672                unsigned long address, hugepd_t hpd, int flags, int pdshift)
6673 {
6674         WARN(1, "hugepd follow called with no support for hugepage directory format\n");
6675         return NULL;
6676 }
6677
6678 struct page * __weak
6679 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
6680                 pmd_t *pmd, int flags)
6681 {
6682         struct page *page = NULL;
6683         spinlock_t *ptl;
6684         pte_t pte;
6685
6686         /* FOLL_GET and FOLL_PIN are mutually exclusive. */
6687         if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
6688                          (FOLL_PIN | FOLL_GET)))
6689                 return NULL;
6690
6691 retry:
6692         ptl = pmd_lockptr(mm, pmd);
6693         spin_lock(ptl);
6694         /*
6695          * make sure that the address range covered by this pmd is not
6696          * unmapped from other threads.
6697          */
6698         if (!pmd_huge(*pmd))
6699                 goto out;
6700         pte = huge_ptep_get((pte_t *)pmd);
6701         if (pte_present(pte)) {
6702                 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
6703                 /*
6704                  * try_grab_page() should always succeed here, because: a) we
6705                  * hold the pmd (ptl) lock, and b) we've just checked that the
6706                  * huge pmd (head) page is present in the page tables. The ptl
6707                  * prevents the head page and tail pages from being rearranged
6708                  * in any way. So this page must be available at this point,
6709                  * unless the page refcount overflowed:
6710                  */
6711                 if (WARN_ON_ONCE(!try_grab_page(page, flags))) {
6712                         page = NULL;
6713                         goto out;
6714                 }
6715         } else {
6716                 if (is_hugetlb_entry_migration(pte)) {
6717                         spin_unlock(ptl);
6718                         __migration_entry_wait(mm, (pte_t *)pmd, ptl);
6719                         goto retry;
6720                 }
6721                 /*
6722                  * hwpoisoned entry is treated as no_page_table in
6723                  * follow_page_mask().
6724                  */
6725         }
6726 out:
6727         spin_unlock(ptl);
6728         return page;
6729 }
6730
6731 struct page * __weak
6732 follow_huge_pud(struct mm_struct *mm, unsigned long address,
6733                 pud_t *pud, int flags)
6734 {
6735         if (flags & (FOLL_GET | FOLL_PIN))
6736                 return NULL;
6737
6738         return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
6739 }
6740
6741 struct page * __weak
6742 follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int flags)
6743 {
6744         if (flags & (FOLL_GET | FOLL_PIN))
6745                 return NULL;
6746
6747         return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT);
6748 }
6749
6750 bool isolate_huge_page(struct page *page, struct list_head *list)
6751 {
6752         bool ret = true;
6753
6754         spin_lock_irq(&hugetlb_lock);
6755         if (!PageHeadHuge(page) ||
6756             !HPageMigratable(page) ||
6757             !get_page_unless_zero(page)) {
6758                 ret = false;
6759                 goto unlock;
6760         }
6761         ClearHPageMigratable(page);
6762         list_move_tail(&page->lru, list);
6763 unlock:
6764         spin_unlock_irq(&hugetlb_lock);
6765         return ret;
6766 }
6767
6768 int get_hwpoison_huge_page(struct page *page, bool *hugetlb)
6769 {
6770         int ret = 0;
6771
6772         *hugetlb = false;
6773         spin_lock_irq(&hugetlb_lock);
6774         if (PageHeadHuge(page)) {
6775                 *hugetlb = true;
6776                 if (HPageFreed(page) || HPageMigratable(page))
6777                         ret = get_page_unless_zero(page);
6778                 else
6779                         ret = -EBUSY;
6780         }
6781         spin_unlock_irq(&hugetlb_lock);
6782         return ret;
6783 }
6784
6785 void putback_active_hugepage(struct page *page)
6786 {
6787         spin_lock_irq(&hugetlb_lock);
6788         SetHPageMigratable(page);
6789         list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
6790         spin_unlock_irq(&hugetlb_lock);
6791         put_page(page);
6792 }
6793
6794 void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason)
6795 {
6796         struct hstate *h = page_hstate(oldpage);
6797
6798         hugetlb_cgroup_migrate(oldpage, newpage);
6799         set_page_owner_migrate_reason(newpage, reason);
6800
6801         /*
6802          * transfer temporary state of the new huge page. This is
6803          * reverse to other transitions because the newpage is going to
6804          * be final while the old one will be freed so it takes over
6805          * the temporary status.
6806          *
6807          * Also note that we have to transfer the per-node surplus state
6808          * here as well otherwise the global surplus count will not match
6809          * the per-node's.
6810          */
6811         if (HPageTemporary(newpage)) {
6812                 int old_nid = page_to_nid(oldpage);
6813                 int new_nid = page_to_nid(newpage);
6814
6815                 SetHPageTemporary(oldpage);
6816                 ClearHPageTemporary(newpage);
6817
6818                 /*
6819                  * There is no need to transfer the per-node surplus state
6820                  * when we do not cross the node.
6821                  */
6822                 if (new_nid == old_nid)
6823                         return;
6824                 spin_lock_irq(&hugetlb_lock);
6825                 if (h->surplus_huge_pages_node[old_nid]) {
6826                         h->surplus_huge_pages_node[old_nid]--;
6827                         h->surplus_huge_pages_node[new_nid]++;
6828                 }
6829                 spin_unlock_irq(&hugetlb_lock);
6830         }
6831 }
6832
6833 /*
6834  * This function will unconditionally remove all the shared pmd pgtable entries
6835  * within the specific vma for a hugetlbfs memory range.
6836  */
6837 void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
6838 {
6839         struct hstate *h = hstate_vma(vma);
6840         unsigned long sz = huge_page_size(h);
6841         struct mm_struct *mm = vma->vm_mm;
6842         struct mmu_notifier_range range;
6843         unsigned long address, start, end;
6844         spinlock_t *ptl;
6845         pte_t *ptep;
6846
6847         if (!(vma->vm_flags & VM_MAYSHARE))
6848                 return;
6849
6850         start = ALIGN(vma->vm_start, PUD_SIZE);
6851         end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
6852
6853         if (start >= end)
6854                 return;
6855
6856         /*
6857          * No need to call adjust_range_if_pmd_sharing_possible(), because
6858          * we have already done the PUD_SIZE alignment.
6859          */
6860         mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
6861                                 start, end);
6862         mmu_notifier_invalidate_range_start(&range);
6863         i_mmap_lock_write(vma->vm_file->f_mapping);
6864         for (address = start; address < end; address += PUD_SIZE) {
6865                 unsigned long tmp = address;
6866
6867                 ptep = huge_pte_offset(mm, address, sz);
6868                 if (!ptep)
6869                         continue;
6870                 ptl = huge_pte_lock(h, mm, ptep);
6871                 /* We don't want 'address' to be changed */
6872                 huge_pmd_unshare(mm, vma, &tmp, ptep);
6873                 spin_unlock(ptl);
6874         }
6875         flush_hugetlb_tlb_range(vma, start, end);
6876         i_mmap_unlock_write(vma->vm_file->f_mapping);
6877         /*
6878          * No need to call mmu_notifier_invalidate_range(), see
6879          * Documentation/vm/mmu_notifier.rst.
6880          */
6881         mmu_notifier_invalidate_range_end(&range);
6882 }
6883
6884 #ifdef CONFIG_CMA
6885 static bool cma_reserve_called __initdata;
6886
6887 static int __init cmdline_parse_hugetlb_cma(char *p)
6888 {
6889         int nid, count = 0;
6890         unsigned long tmp;
6891         char *s = p;
6892
6893         while (*s) {
6894                 if (sscanf(s, "%lu%n", &tmp, &count) != 1)
6895                         break;
6896
6897                 if (s[count] == ':') {
6898                         if (tmp >= MAX_NUMNODES)
6899                                 break;
6900                         nid = array_index_nospec(tmp, MAX_NUMNODES);
6901
6902                         s += count + 1;
6903                         tmp = memparse(s, &s);
6904                         hugetlb_cma_size_in_node[nid] = tmp;
6905                         hugetlb_cma_size += tmp;
6906
6907                         /*
6908                          * Skip the separator if have one, otherwise
6909                          * break the parsing.
6910                          */
6911                         if (*s == ',')
6912                                 s++;
6913                         else
6914                                 break;
6915                 } else {
6916                         hugetlb_cma_size = memparse(p, &p);
6917                         break;
6918                 }
6919         }
6920
6921         return 0;
6922 }
6923
6924 early_param("hugetlb_cma", cmdline_parse_hugetlb_cma);
6925
6926 void __init hugetlb_cma_reserve(int order)
6927 {
6928         unsigned long size, reserved, per_node;
6929         bool node_specific_cma_alloc = false;
6930         int nid;
6931
6932         cma_reserve_called = true;
6933
6934         if (!hugetlb_cma_size)
6935                 return;
6936
6937         for (nid = 0; nid < MAX_NUMNODES; nid++) {
6938                 if (hugetlb_cma_size_in_node[nid] == 0)
6939                         continue;
6940
6941                 if (!node_state(nid, N_ONLINE)) {
6942                         pr_warn("hugetlb_cma: invalid node %d specified\n", nid);
6943                         hugetlb_cma_size -= hugetlb_cma_size_in_node[nid];
6944                         hugetlb_cma_size_in_node[nid] = 0;
6945                         continue;
6946                 }
6947
6948                 if (hugetlb_cma_size_in_node[nid] < (PAGE_SIZE << order)) {
6949                         pr_warn("hugetlb_cma: cma area of node %d should be at least %lu MiB\n",
6950                                 nid, (PAGE_SIZE << order) / SZ_1M);
6951                         hugetlb_cma_size -= hugetlb_cma_size_in_node[nid];
6952                         hugetlb_cma_size_in_node[nid] = 0;
6953                 } else {
6954                         node_specific_cma_alloc = true;
6955                 }
6956         }
6957
6958         /* Validate the CMA size again in case some invalid nodes specified. */
6959         if (!hugetlb_cma_size)
6960                 return;
6961
6962         if (hugetlb_cma_size < (PAGE_SIZE << order)) {
6963                 pr_warn("hugetlb_cma: cma area should be at least %lu MiB\n",
6964                         (PAGE_SIZE << order) / SZ_1M);
6965                 hugetlb_cma_size = 0;
6966                 return;
6967         }
6968
6969         if (!node_specific_cma_alloc) {
6970                 /*
6971                  * If 3 GB area is requested on a machine with 4 numa nodes,
6972                  * let's allocate 1 GB on first three nodes and ignore the last one.
6973                  */
6974                 per_node = DIV_ROUND_UP(hugetlb_cma_size, nr_online_nodes);
6975                 pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n",
6976                         hugetlb_cma_size / SZ_1M, per_node / SZ_1M);
6977         }
6978
6979         reserved = 0;
6980         for_each_node_state(nid, N_ONLINE) {
6981                 int res;
6982                 char name[CMA_MAX_NAME];
6983
6984                 if (node_specific_cma_alloc) {
6985                         if (hugetlb_cma_size_in_node[nid] == 0)
6986                                 continue;
6987
6988                         size = hugetlb_cma_size_in_node[nid];
6989                 } else {
6990                         size = min(per_node, hugetlb_cma_size - reserved);
6991                 }
6992
6993                 size = round_up(size, PAGE_SIZE << order);
6994
6995                 snprintf(name, sizeof(name), "hugetlb%d", nid);
6996                 /*
6997                  * Note that 'order per bit' is based on smallest size that
6998                  * may be returned to CMA allocator in the case of
6999                  * huge page demotion.
7000                  */
7001                 res = cma_declare_contiguous_nid(0, size, 0,
7002                                                 PAGE_SIZE << HUGETLB_PAGE_ORDER,
7003                                                  0, false, name,
7004                                                  &hugetlb_cma[nid], nid);
7005                 if (res) {
7006                         pr_warn("hugetlb_cma: reservation failed: err %d, node %d",
7007                                 res, nid);
7008                         continue;
7009                 }
7010
7011                 reserved += size;
7012                 pr_info("hugetlb_cma: reserved %lu MiB on node %d\n",
7013                         size / SZ_1M, nid);
7014
7015                 if (reserved >= hugetlb_cma_size)
7016                         break;
7017         }
7018
7019         if (!reserved)
7020                 /*
7021                  * hugetlb_cma_size is used to determine if allocations from
7022                  * cma are possible.  Set to zero if no cma regions are set up.
7023                  */
7024                 hugetlb_cma_size = 0;
7025 }
7026
7027 void __init hugetlb_cma_check(void)
7028 {
7029         if (!hugetlb_cma_size || cma_reserve_called)
7030                 return;
7031
7032         pr_warn("hugetlb_cma: the option isn't supported by current arch\n");
7033 }
7034
7035 #endif /* CONFIG_CMA */