hugetlbfs: clear resv_map pointer if mmap fails
[platform/kernel/linux-starfive.git] / mm / hugetlb.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Generic hugetlb support.
4  * (C) Nadia Yvette Chambers, April 2004
5  */
6 #include <linux/list.h>
7 #include <linux/init.h>
8 #include <linux/mm.h>
9 #include <linux/seq_file.h>
10 #include <linux/sysctl.h>
11 #include <linux/highmem.h>
12 #include <linux/mmu_notifier.h>
13 #include <linux/nodemask.h>
14 #include <linux/pagemap.h>
15 #include <linux/mempolicy.h>
16 #include <linux/compiler.h>
17 #include <linux/cpuset.h>
18 #include <linux/mutex.h>
19 #include <linux/memblock.h>
20 #include <linux/sysfs.h>
21 #include <linux/slab.h>
22 #include <linux/sched/mm.h>
23 #include <linux/mmdebug.h>
24 #include <linux/sched/signal.h>
25 #include <linux/rmap.h>
26 #include <linux/string_helpers.h>
27 #include <linux/swap.h>
28 #include <linux/swapops.h>
29 #include <linux/jhash.h>
30 #include <linux/numa.h>
31 #include <linux/llist.h>
32 #include <linux/cma.h>
33 #include <linux/migrate.h>
34 #include <linux/nospec.h>
35 #include <linux/delayacct.h>
36 #include <linux/memory.h>
37 #include <linux/mm_inline.h>
38
39 #include <asm/page.h>
40 #include <asm/pgalloc.h>
41 #include <asm/tlb.h>
42
43 #include <linux/io.h>
44 #include <linux/hugetlb.h>
45 #include <linux/hugetlb_cgroup.h>
46 #include <linux/node.h>
47 #include <linux/page_owner.h>
48 #include "internal.h"
49 #include "hugetlb_vmemmap.h"
50
51 int hugetlb_max_hstate __read_mostly;
52 unsigned int default_hstate_idx;
53 struct hstate hstates[HUGE_MAX_HSTATE];
54
55 #ifdef CONFIG_CMA
56 static struct cma *hugetlb_cma[MAX_NUMNODES];
57 static unsigned long hugetlb_cma_size_in_node[MAX_NUMNODES] __initdata;
58 static bool hugetlb_cma_folio(struct folio *folio, unsigned int order)
59 {
60         return cma_pages_valid(hugetlb_cma[folio_nid(folio)], &folio->page,
61                                 1 << order);
62 }
63 #else
64 static bool hugetlb_cma_folio(struct folio *folio, unsigned int order)
65 {
66         return false;
67 }
68 #endif
69 static unsigned long hugetlb_cma_size __initdata;
70
71 __initdata LIST_HEAD(huge_boot_pages);
72
73 /* for command line parsing */
74 static struct hstate * __initdata parsed_hstate;
75 static unsigned long __initdata default_hstate_max_huge_pages;
76 static bool __initdata parsed_valid_hugepagesz = true;
77 static bool __initdata parsed_default_hugepagesz;
78 static unsigned int default_hugepages_in_node[MAX_NUMNODES] __initdata;
79
80 /*
81  * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
82  * free_huge_pages, and surplus_huge_pages.
83  */
84 DEFINE_SPINLOCK(hugetlb_lock);
85
86 /*
87  * Serializes faults on the same logical page.  This is used to
88  * prevent spurious OOMs when the hugepage pool is fully utilized.
89  */
90 static int num_fault_mutexes;
91 struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
92
93 /* Forward declaration */
94 static int hugetlb_acct_memory(struct hstate *h, long delta);
95 static void hugetlb_vma_lock_free(struct vm_area_struct *vma);
96 static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma);
97 static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma);
98 static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
99                 unsigned long start, unsigned long end);
100
101 static inline bool subpool_is_free(struct hugepage_subpool *spool)
102 {
103         if (spool->count)
104                 return false;
105         if (spool->max_hpages != -1)
106                 return spool->used_hpages == 0;
107         if (spool->min_hpages != -1)
108                 return spool->rsv_hpages == spool->min_hpages;
109
110         return true;
111 }
112
113 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool,
114                                                 unsigned long irq_flags)
115 {
116         spin_unlock_irqrestore(&spool->lock, irq_flags);
117
118         /* If no pages are used, and no other handles to the subpool
119          * remain, give up any reservations based on minimum size and
120          * free the subpool */
121         if (subpool_is_free(spool)) {
122                 if (spool->min_hpages != -1)
123                         hugetlb_acct_memory(spool->hstate,
124                                                 -spool->min_hpages);
125                 kfree(spool);
126         }
127 }
128
129 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
130                                                 long min_hpages)
131 {
132         struct hugepage_subpool *spool;
133
134         spool = kzalloc(sizeof(*spool), GFP_KERNEL);
135         if (!spool)
136                 return NULL;
137
138         spin_lock_init(&spool->lock);
139         spool->count = 1;
140         spool->max_hpages = max_hpages;
141         spool->hstate = h;
142         spool->min_hpages = min_hpages;
143
144         if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
145                 kfree(spool);
146                 return NULL;
147         }
148         spool->rsv_hpages = min_hpages;
149
150         return spool;
151 }
152
153 void hugepage_put_subpool(struct hugepage_subpool *spool)
154 {
155         unsigned long flags;
156
157         spin_lock_irqsave(&spool->lock, flags);
158         BUG_ON(!spool->count);
159         spool->count--;
160         unlock_or_release_subpool(spool, flags);
161 }
162
163 /*
164  * Subpool accounting for allocating and reserving pages.
165  * Return -ENOMEM if there are not enough resources to satisfy the
166  * request.  Otherwise, return the number of pages by which the
167  * global pools must be adjusted (upward).  The returned value may
168  * only be different than the passed value (delta) in the case where
169  * a subpool minimum size must be maintained.
170  */
171 static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
172                                       long delta)
173 {
174         long ret = delta;
175
176         if (!spool)
177                 return ret;
178
179         spin_lock_irq(&spool->lock);
180
181         if (spool->max_hpages != -1) {          /* maximum size accounting */
182                 if ((spool->used_hpages + delta) <= spool->max_hpages)
183                         spool->used_hpages += delta;
184                 else {
185                         ret = -ENOMEM;
186                         goto unlock_ret;
187                 }
188         }
189
190         /* minimum size accounting */
191         if (spool->min_hpages != -1 && spool->rsv_hpages) {
192                 if (delta > spool->rsv_hpages) {
193                         /*
194                          * Asking for more reserves than those already taken on
195                          * behalf of subpool.  Return difference.
196                          */
197                         ret = delta - spool->rsv_hpages;
198                         spool->rsv_hpages = 0;
199                 } else {
200                         ret = 0;        /* reserves already accounted for */
201                         spool->rsv_hpages -= delta;
202                 }
203         }
204
205 unlock_ret:
206         spin_unlock_irq(&spool->lock);
207         return ret;
208 }
209
210 /*
211  * Subpool accounting for freeing and unreserving pages.
212  * Return the number of global page reservations that must be dropped.
213  * The return value may only be different than the passed value (delta)
214  * in the case where a subpool minimum size must be maintained.
215  */
216 static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
217                                        long delta)
218 {
219         long ret = delta;
220         unsigned long flags;
221
222         if (!spool)
223                 return delta;
224
225         spin_lock_irqsave(&spool->lock, flags);
226
227         if (spool->max_hpages != -1)            /* maximum size accounting */
228                 spool->used_hpages -= delta;
229
230          /* minimum size accounting */
231         if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
232                 if (spool->rsv_hpages + delta <= spool->min_hpages)
233                         ret = 0;
234                 else
235                         ret = spool->rsv_hpages + delta - spool->min_hpages;
236
237                 spool->rsv_hpages += delta;
238                 if (spool->rsv_hpages > spool->min_hpages)
239                         spool->rsv_hpages = spool->min_hpages;
240         }
241
242         /*
243          * If hugetlbfs_put_super couldn't free spool due to an outstanding
244          * quota reference, free it now.
245          */
246         unlock_or_release_subpool(spool, flags);
247
248         return ret;
249 }
250
251 static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
252 {
253         return HUGETLBFS_SB(inode->i_sb)->spool;
254 }
255
256 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
257 {
258         return subpool_inode(file_inode(vma->vm_file));
259 }
260
261 /*
262  * hugetlb vma_lock helper routines
263  */
264 void hugetlb_vma_lock_read(struct vm_area_struct *vma)
265 {
266         if (__vma_shareable_lock(vma)) {
267                 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
268
269                 down_read(&vma_lock->rw_sema);
270         }
271 }
272
273 void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
274 {
275         if (__vma_shareable_lock(vma)) {
276                 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
277
278                 up_read(&vma_lock->rw_sema);
279         }
280 }
281
282 void hugetlb_vma_lock_write(struct vm_area_struct *vma)
283 {
284         if (__vma_shareable_lock(vma)) {
285                 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
286
287                 down_write(&vma_lock->rw_sema);
288         }
289 }
290
291 void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
292 {
293         if (__vma_shareable_lock(vma)) {
294                 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
295
296                 up_write(&vma_lock->rw_sema);
297         }
298 }
299
300 int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
301 {
302         struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
303
304         if (!__vma_shareable_lock(vma))
305                 return 1;
306
307         return down_write_trylock(&vma_lock->rw_sema);
308 }
309
310 void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
311 {
312         if (__vma_shareable_lock(vma)) {
313                 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
314
315                 lockdep_assert_held(&vma_lock->rw_sema);
316         }
317 }
318
319 void hugetlb_vma_lock_release(struct kref *kref)
320 {
321         struct hugetlb_vma_lock *vma_lock = container_of(kref,
322                         struct hugetlb_vma_lock, refs);
323
324         kfree(vma_lock);
325 }
326
327 static void __hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock *vma_lock)
328 {
329         struct vm_area_struct *vma = vma_lock->vma;
330
331         /*
332          * vma_lock structure may or not be released as a result of put,
333          * it certainly will no longer be attached to vma so clear pointer.
334          * Semaphore synchronizes access to vma_lock->vma field.
335          */
336         vma_lock->vma = NULL;
337         vma->vm_private_data = NULL;
338         up_write(&vma_lock->rw_sema);
339         kref_put(&vma_lock->refs, hugetlb_vma_lock_release);
340 }
341
342 static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma)
343 {
344         if (__vma_shareable_lock(vma)) {
345                 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
346
347                 __hugetlb_vma_unlock_write_put(vma_lock);
348         }
349 }
350
351 static void hugetlb_vma_lock_free(struct vm_area_struct *vma)
352 {
353         /*
354          * Only present in sharable vmas.
355          */
356         if (!vma || !__vma_shareable_lock(vma))
357                 return;
358
359         if (vma->vm_private_data) {
360                 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
361
362                 down_write(&vma_lock->rw_sema);
363                 __hugetlb_vma_unlock_write_put(vma_lock);
364         }
365 }
366
367 static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma)
368 {
369         struct hugetlb_vma_lock *vma_lock;
370
371         /* Only establish in (flags) sharable vmas */
372         if (!vma || !(vma->vm_flags & VM_MAYSHARE))
373                 return;
374
375         /* Should never get here with non-NULL vm_private_data */
376         if (vma->vm_private_data)
377                 return;
378
379         vma_lock = kmalloc(sizeof(*vma_lock), GFP_KERNEL);
380         if (!vma_lock) {
381                 /*
382                  * If we can not allocate structure, then vma can not
383                  * participate in pmd sharing.  This is only a possible
384                  * performance enhancement and memory saving issue.
385                  * However, the lock is also used to synchronize page
386                  * faults with truncation.  If the lock is not present,
387                  * unlikely races could leave pages in a file past i_size
388                  * until the file is removed.  Warn in the unlikely case of
389                  * allocation failure.
390                  */
391                 pr_warn_once("HugeTLB: unable to allocate vma specific lock\n");
392                 return;
393         }
394
395         kref_init(&vma_lock->refs);
396         init_rwsem(&vma_lock->rw_sema);
397         vma_lock->vma = vma;
398         vma->vm_private_data = vma_lock;
399 }
400
401 /* Helper that removes a struct file_region from the resv_map cache and returns
402  * it for use.
403  */
404 static struct file_region *
405 get_file_region_entry_from_cache(struct resv_map *resv, long from, long to)
406 {
407         struct file_region *nrg;
408
409         VM_BUG_ON(resv->region_cache_count <= 0);
410
411         resv->region_cache_count--;
412         nrg = list_first_entry(&resv->region_cache, struct file_region, link);
413         list_del(&nrg->link);
414
415         nrg->from = from;
416         nrg->to = to;
417
418         return nrg;
419 }
420
421 static void copy_hugetlb_cgroup_uncharge_info(struct file_region *nrg,
422                                               struct file_region *rg)
423 {
424 #ifdef CONFIG_CGROUP_HUGETLB
425         nrg->reservation_counter = rg->reservation_counter;
426         nrg->css = rg->css;
427         if (rg->css)
428                 css_get(rg->css);
429 #endif
430 }
431
432 /* Helper that records hugetlb_cgroup uncharge info. */
433 static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg,
434                                                 struct hstate *h,
435                                                 struct resv_map *resv,
436                                                 struct file_region *nrg)
437 {
438 #ifdef CONFIG_CGROUP_HUGETLB
439         if (h_cg) {
440                 nrg->reservation_counter =
441                         &h_cg->rsvd_hugepage[hstate_index(h)];
442                 nrg->css = &h_cg->css;
443                 /*
444                  * The caller will hold exactly one h_cg->css reference for the
445                  * whole contiguous reservation region. But this area might be
446                  * scattered when there are already some file_regions reside in
447                  * it. As a result, many file_regions may share only one css
448                  * reference. In order to ensure that one file_region must hold
449                  * exactly one h_cg->css reference, we should do css_get for
450                  * each file_region and leave the reference held by caller
451                  * untouched.
452                  */
453                 css_get(&h_cg->css);
454                 if (!resv->pages_per_hpage)
455                         resv->pages_per_hpage = pages_per_huge_page(h);
456                 /* pages_per_hpage should be the same for all entries in
457                  * a resv_map.
458                  */
459                 VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h));
460         } else {
461                 nrg->reservation_counter = NULL;
462                 nrg->css = NULL;
463         }
464 #endif
465 }
466
467 static void put_uncharge_info(struct file_region *rg)
468 {
469 #ifdef CONFIG_CGROUP_HUGETLB
470         if (rg->css)
471                 css_put(rg->css);
472 #endif
473 }
474
475 static bool has_same_uncharge_info(struct file_region *rg,
476                                    struct file_region *org)
477 {
478 #ifdef CONFIG_CGROUP_HUGETLB
479         return rg->reservation_counter == org->reservation_counter &&
480                rg->css == org->css;
481
482 #else
483         return true;
484 #endif
485 }
486
487 static void coalesce_file_region(struct resv_map *resv, struct file_region *rg)
488 {
489         struct file_region *nrg, *prg;
490
491         prg = list_prev_entry(rg, link);
492         if (&prg->link != &resv->regions && prg->to == rg->from &&
493             has_same_uncharge_info(prg, rg)) {
494                 prg->to = rg->to;
495
496                 list_del(&rg->link);
497                 put_uncharge_info(rg);
498                 kfree(rg);
499
500                 rg = prg;
501         }
502
503         nrg = list_next_entry(rg, link);
504         if (&nrg->link != &resv->regions && nrg->from == rg->to &&
505             has_same_uncharge_info(nrg, rg)) {
506                 nrg->from = rg->from;
507
508                 list_del(&rg->link);
509                 put_uncharge_info(rg);
510                 kfree(rg);
511         }
512 }
513
514 static inline long
515 hugetlb_resv_map_add(struct resv_map *map, struct list_head *rg, long from,
516                      long to, struct hstate *h, struct hugetlb_cgroup *cg,
517                      long *regions_needed)
518 {
519         struct file_region *nrg;
520
521         if (!regions_needed) {
522                 nrg = get_file_region_entry_from_cache(map, from, to);
523                 record_hugetlb_cgroup_uncharge_info(cg, h, map, nrg);
524                 list_add(&nrg->link, rg);
525                 coalesce_file_region(map, nrg);
526         } else
527                 *regions_needed += 1;
528
529         return to - from;
530 }
531
532 /*
533  * Must be called with resv->lock held.
534  *
535  * Calling this with regions_needed != NULL will count the number of pages
536  * to be added but will not modify the linked list. And regions_needed will
537  * indicate the number of file_regions needed in the cache to carry out to add
538  * the regions for this range.
539  */
540 static long add_reservation_in_range(struct resv_map *resv, long f, long t,
541                                      struct hugetlb_cgroup *h_cg,
542                                      struct hstate *h, long *regions_needed)
543 {
544         long add = 0;
545         struct list_head *head = &resv->regions;
546         long last_accounted_offset = f;
547         struct file_region *iter, *trg = NULL;
548         struct list_head *rg = NULL;
549
550         if (regions_needed)
551                 *regions_needed = 0;
552
553         /* In this loop, we essentially handle an entry for the range
554          * [last_accounted_offset, iter->from), at every iteration, with some
555          * bounds checking.
556          */
557         list_for_each_entry_safe(iter, trg, head, link) {
558                 /* Skip irrelevant regions that start before our range. */
559                 if (iter->from < f) {
560                         /* If this region ends after the last accounted offset,
561                          * then we need to update last_accounted_offset.
562                          */
563                         if (iter->to > last_accounted_offset)
564                                 last_accounted_offset = iter->to;
565                         continue;
566                 }
567
568                 /* When we find a region that starts beyond our range, we've
569                  * finished.
570                  */
571                 if (iter->from >= t) {
572                         rg = iter->link.prev;
573                         break;
574                 }
575
576                 /* Add an entry for last_accounted_offset -> iter->from, and
577                  * update last_accounted_offset.
578                  */
579                 if (iter->from > last_accounted_offset)
580                         add += hugetlb_resv_map_add(resv, iter->link.prev,
581                                                     last_accounted_offset,
582                                                     iter->from, h, h_cg,
583                                                     regions_needed);
584
585                 last_accounted_offset = iter->to;
586         }
587
588         /* Handle the case where our range extends beyond
589          * last_accounted_offset.
590          */
591         if (!rg)
592                 rg = head->prev;
593         if (last_accounted_offset < t)
594                 add += hugetlb_resv_map_add(resv, rg, last_accounted_offset,
595                                             t, h, h_cg, regions_needed);
596
597         return add;
598 }
599
600 /* Must be called with resv->lock acquired. Will drop lock to allocate entries.
601  */
602 static int allocate_file_region_entries(struct resv_map *resv,
603                                         int regions_needed)
604         __must_hold(&resv->lock)
605 {
606         LIST_HEAD(allocated_regions);
607         int to_allocate = 0, i = 0;
608         struct file_region *trg = NULL, *rg = NULL;
609
610         VM_BUG_ON(regions_needed < 0);
611
612         /*
613          * Check for sufficient descriptors in the cache to accommodate
614          * the number of in progress add operations plus regions_needed.
615          *
616          * This is a while loop because when we drop the lock, some other call
617          * to region_add or region_del may have consumed some region_entries,
618          * so we keep looping here until we finally have enough entries for
619          * (adds_in_progress + regions_needed).
620          */
621         while (resv->region_cache_count <
622                (resv->adds_in_progress + regions_needed)) {
623                 to_allocate = resv->adds_in_progress + regions_needed -
624                               resv->region_cache_count;
625
626                 /* At this point, we should have enough entries in the cache
627                  * for all the existing adds_in_progress. We should only be
628                  * needing to allocate for regions_needed.
629                  */
630                 VM_BUG_ON(resv->region_cache_count < resv->adds_in_progress);
631
632                 spin_unlock(&resv->lock);
633                 for (i = 0; i < to_allocate; i++) {
634                         trg = kmalloc(sizeof(*trg), GFP_KERNEL);
635                         if (!trg)
636                                 goto out_of_memory;
637                         list_add(&trg->link, &allocated_regions);
638                 }
639
640                 spin_lock(&resv->lock);
641
642                 list_splice(&allocated_regions, &resv->region_cache);
643                 resv->region_cache_count += to_allocate;
644         }
645
646         return 0;
647
648 out_of_memory:
649         list_for_each_entry_safe(rg, trg, &allocated_regions, link) {
650                 list_del(&rg->link);
651                 kfree(rg);
652         }
653         return -ENOMEM;
654 }
655
656 /*
657  * Add the huge page range represented by [f, t) to the reserve
658  * map.  Regions will be taken from the cache to fill in this range.
659  * Sufficient regions should exist in the cache due to the previous
660  * call to region_chg with the same range, but in some cases the cache will not
661  * have sufficient entries due to races with other code doing region_add or
662  * region_del.  The extra needed entries will be allocated.
663  *
664  * regions_needed is the out value provided by a previous call to region_chg.
665  *
666  * Return the number of new huge pages added to the map.  This number is greater
667  * than or equal to zero.  If file_region entries needed to be allocated for
668  * this operation and we were not able to allocate, it returns -ENOMEM.
669  * region_add of regions of length 1 never allocate file_regions and cannot
670  * fail; region_chg will always allocate at least 1 entry and a region_add for
671  * 1 page will only require at most 1 entry.
672  */
673 static long region_add(struct resv_map *resv, long f, long t,
674                        long in_regions_needed, struct hstate *h,
675                        struct hugetlb_cgroup *h_cg)
676 {
677         long add = 0, actual_regions_needed = 0;
678
679         spin_lock(&resv->lock);
680 retry:
681
682         /* Count how many regions are actually needed to execute this add. */
683         add_reservation_in_range(resv, f, t, NULL, NULL,
684                                  &actual_regions_needed);
685
686         /*
687          * Check for sufficient descriptors in the cache to accommodate
688          * this add operation. Note that actual_regions_needed may be greater
689          * than in_regions_needed, as the resv_map may have been modified since
690          * the region_chg call. In this case, we need to make sure that we
691          * allocate extra entries, such that we have enough for all the
692          * existing adds_in_progress, plus the excess needed for this
693          * operation.
694          */
695         if (actual_regions_needed > in_regions_needed &&
696             resv->region_cache_count <
697                     resv->adds_in_progress +
698                             (actual_regions_needed - in_regions_needed)) {
699                 /* region_add operation of range 1 should never need to
700                  * allocate file_region entries.
701                  */
702                 VM_BUG_ON(t - f <= 1);
703
704                 if (allocate_file_region_entries(
705                             resv, actual_regions_needed - in_regions_needed)) {
706                         return -ENOMEM;
707                 }
708
709                 goto retry;
710         }
711
712         add = add_reservation_in_range(resv, f, t, h_cg, h, NULL);
713
714         resv->adds_in_progress -= in_regions_needed;
715
716         spin_unlock(&resv->lock);
717         return add;
718 }
719
720 /*
721  * Examine the existing reserve map and determine how many
722  * huge pages in the specified range [f, t) are NOT currently
723  * represented.  This routine is called before a subsequent
724  * call to region_add that will actually modify the reserve
725  * map to add the specified range [f, t).  region_chg does
726  * not change the number of huge pages represented by the
727  * map.  A number of new file_region structures is added to the cache as a
728  * placeholder, for the subsequent region_add call to use. At least 1
729  * file_region structure is added.
730  *
731  * out_regions_needed is the number of regions added to the
732  * resv->adds_in_progress.  This value needs to be provided to a follow up call
733  * to region_add or region_abort for proper accounting.
734  *
735  * Returns the number of huge pages that need to be added to the existing
736  * reservation map for the range [f, t).  This number is greater or equal to
737  * zero.  -ENOMEM is returned if a new file_region structure or cache entry
738  * is needed and can not be allocated.
739  */
740 static long region_chg(struct resv_map *resv, long f, long t,
741                        long *out_regions_needed)
742 {
743         long chg = 0;
744
745         spin_lock(&resv->lock);
746
747         /* Count how many hugepages in this range are NOT represented. */
748         chg = add_reservation_in_range(resv, f, t, NULL, NULL,
749                                        out_regions_needed);
750
751         if (*out_regions_needed == 0)
752                 *out_regions_needed = 1;
753
754         if (allocate_file_region_entries(resv, *out_regions_needed))
755                 return -ENOMEM;
756
757         resv->adds_in_progress += *out_regions_needed;
758
759         spin_unlock(&resv->lock);
760         return chg;
761 }
762
763 /*
764  * Abort the in progress add operation.  The adds_in_progress field
765  * of the resv_map keeps track of the operations in progress between
766  * calls to region_chg and region_add.  Operations are sometimes
767  * aborted after the call to region_chg.  In such cases, region_abort
768  * is called to decrement the adds_in_progress counter. regions_needed
769  * is the value returned by the region_chg call, it is used to decrement
770  * the adds_in_progress counter.
771  *
772  * NOTE: The range arguments [f, t) are not needed or used in this
773  * routine.  They are kept to make reading the calling code easier as
774  * arguments will match the associated region_chg call.
775  */
776 static void region_abort(struct resv_map *resv, long f, long t,
777                          long regions_needed)
778 {
779         spin_lock(&resv->lock);
780         VM_BUG_ON(!resv->region_cache_count);
781         resv->adds_in_progress -= regions_needed;
782         spin_unlock(&resv->lock);
783 }
784
785 /*
786  * Delete the specified range [f, t) from the reserve map.  If the
787  * t parameter is LONG_MAX, this indicates that ALL regions after f
788  * should be deleted.  Locate the regions which intersect [f, t)
789  * and either trim, delete or split the existing regions.
790  *
791  * Returns the number of huge pages deleted from the reserve map.
792  * In the normal case, the return value is zero or more.  In the
793  * case where a region must be split, a new region descriptor must
794  * be allocated.  If the allocation fails, -ENOMEM will be returned.
795  * NOTE: If the parameter t == LONG_MAX, then we will never split
796  * a region and possibly return -ENOMEM.  Callers specifying
797  * t == LONG_MAX do not need to check for -ENOMEM error.
798  */
799 static long region_del(struct resv_map *resv, long f, long t)
800 {
801         struct list_head *head = &resv->regions;
802         struct file_region *rg, *trg;
803         struct file_region *nrg = NULL;
804         long del = 0;
805
806 retry:
807         spin_lock(&resv->lock);
808         list_for_each_entry_safe(rg, trg, head, link) {
809                 /*
810                  * Skip regions before the range to be deleted.  file_region
811                  * ranges are normally of the form [from, to).  However, there
812                  * may be a "placeholder" entry in the map which is of the form
813                  * (from, to) with from == to.  Check for placeholder entries
814                  * at the beginning of the range to be deleted.
815                  */
816                 if (rg->to <= f && (rg->to != rg->from || rg->to != f))
817                         continue;
818
819                 if (rg->from >= t)
820                         break;
821
822                 if (f > rg->from && t < rg->to) { /* Must split region */
823                         /*
824                          * Check for an entry in the cache before dropping
825                          * lock and attempting allocation.
826                          */
827                         if (!nrg &&
828                             resv->region_cache_count > resv->adds_in_progress) {
829                                 nrg = list_first_entry(&resv->region_cache,
830                                                         struct file_region,
831                                                         link);
832                                 list_del(&nrg->link);
833                                 resv->region_cache_count--;
834                         }
835
836                         if (!nrg) {
837                                 spin_unlock(&resv->lock);
838                                 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
839                                 if (!nrg)
840                                         return -ENOMEM;
841                                 goto retry;
842                         }
843
844                         del += t - f;
845                         hugetlb_cgroup_uncharge_file_region(
846                                 resv, rg, t - f, false);
847
848                         /* New entry for end of split region */
849                         nrg->from = t;
850                         nrg->to = rg->to;
851
852                         copy_hugetlb_cgroup_uncharge_info(nrg, rg);
853
854                         INIT_LIST_HEAD(&nrg->link);
855
856                         /* Original entry is trimmed */
857                         rg->to = f;
858
859                         list_add(&nrg->link, &rg->link);
860                         nrg = NULL;
861                         break;
862                 }
863
864                 if (f <= rg->from && t >= rg->to) { /* Remove entire region */
865                         del += rg->to - rg->from;
866                         hugetlb_cgroup_uncharge_file_region(resv, rg,
867                                                             rg->to - rg->from, true);
868                         list_del(&rg->link);
869                         kfree(rg);
870                         continue;
871                 }
872
873                 if (f <= rg->from) {    /* Trim beginning of region */
874                         hugetlb_cgroup_uncharge_file_region(resv, rg,
875                                                             t - rg->from, false);
876
877                         del += t - rg->from;
878                         rg->from = t;
879                 } else {                /* Trim end of region */
880                         hugetlb_cgroup_uncharge_file_region(resv, rg,
881                                                             rg->to - f, false);
882
883                         del += rg->to - f;
884                         rg->to = f;
885                 }
886         }
887
888         spin_unlock(&resv->lock);
889         kfree(nrg);
890         return del;
891 }
892
893 /*
894  * A rare out of memory error was encountered which prevented removal of
895  * the reserve map region for a page.  The huge page itself was free'ed
896  * and removed from the page cache.  This routine will adjust the subpool
897  * usage count, and the global reserve count if needed.  By incrementing
898  * these counts, the reserve map entry which could not be deleted will
899  * appear as a "reserved" entry instead of simply dangling with incorrect
900  * counts.
901  */
902 void hugetlb_fix_reserve_counts(struct inode *inode)
903 {
904         struct hugepage_subpool *spool = subpool_inode(inode);
905         long rsv_adjust;
906         bool reserved = false;
907
908         rsv_adjust = hugepage_subpool_get_pages(spool, 1);
909         if (rsv_adjust > 0) {
910                 struct hstate *h = hstate_inode(inode);
911
912                 if (!hugetlb_acct_memory(h, 1))
913                         reserved = true;
914         } else if (!rsv_adjust) {
915                 reserved = true;
916         }
917
918         if (!reserved)
919                 pr_warn("hugetlb: Huge Page Reserved count may go negative.\n");
920 }
921
922 /*
923  * Count and return the number of huge pages in the reserve map
924  * that intersect with the range [f, t).
925  */
926 static long region_count(struct resv_map *resv, long f, long t)
927 {
928         struct list_head *head = &resv->regions;
929         struct file_region *rg;
930         long chg = 0;
931
932         spin_lock(&resv->lock);
933         /* Locate each segment we overlap with, and count that overlap. */
934         list_for_each_entry(rg, head, link) {
935                 long seg_from;
936                 long seg_to;
937
938                 if (rg->to <= f)
939                         continue;
940                 if (rg->from >= t)
941                         break;
942
943                 seg_from = max(rg->from, f);
944                 seg_to = min(rg->to, t);
945
946                 chg += seg_to - seg_from;
947         }
948         spin_unlock(&resv->lock);
949
950         return chg;
951 }
952
953 /*
954  * Convert the address within this vma to the page offset within
955  * the mapping, in pagecache page units; huge pages here.
956  */
957 static pgoff_t vma_hugecache_offset(struct hstate *h,
958                         struct vm_area_struct *vma, unsigned long address)
959 {
960         return ((address - vma->vm_start) >> huge_page_shift(h)) +
961                         (vma->vm_pgoff >> huge_page_order(h));
962 }
963
964 pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
965                                      unsigned long address)
966 {
967         return vma_hugecache_offset(hstate_vma(vma), vma, address);
968 }
969 EXPORT_SYMBOL_GPL(linear_hugepage_index);
970
971 /**
972  * vma_kernel_pagesize - Page size granularity for this VMA.
973  * @vma: The user mapping.
974  *
975  * Folios in this VMA will be aligned to, and at least the size of the
976  * number of bytes returned by this function.
977  *
978  * Return: The default size of the folios allocated when backing a VMA.
979  */
980 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
981 {
982         if (vma->vm_ops && vma->vm_ops->pagesize)
983                 return vma->vm_ops->pagesize(vma);
984         return PAGE_SIZE;
985 }
986 EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
987
988 /*
989  * Return the page size being used by the MMU to back a VMA. In the majority
990  * of cases, the page size used by the kernel matches the MMU size. On
991  * architectures where it differs, an architecture-specific 'strong'
992  * version of this symbol is required.
993  */
994 __weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
995 {
996         return vma_kernel_pagesize(vma);
997 }
998
999 /*
1000  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
1001  * bits of the reservation map pointer, which are always clear due to
1002  * alignment.
1003  */
1004 #define HPAGE_RESV_OWNER    (1UL << 0)
1005 #define HPAGE_RESV_UNMAPPED (1UL << 1)
1006 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
1007
1008 /*
1009  * These helpers are used to track how many pages are reserved for
1010  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
1011  * is guaranteed to have their future faults succeed.
1012  *
1013  * With the exception of hugetlb_dup_vma_private() which is called at fork(),
1014  * the reserve counters are updated with the hugetlb_lock held. It is safe
1015  * to reset the VMA at fork() time as it is not in use yet and there is no
1016  * chance of the global counters getting corrupted as a result of the values.
1017  *
1018  * The private mapping reservation is represented in a subtly different
1019  * manner to a shared mapping.  A shared mapping has a region map associated
1020  * with the underlying file, this region map represents the backing file
1021  * pages which have ever had a reservation assigned which this persists even
1022  * after the page is instantiated.  A private mapping has a region map
1023  * associated with the original mmap which is attached to all VMAs which
1024  * reference it, this region map represents those offsets which have consumed
1025  * reservation ie. where pages have been instantiated.
1026  */
1027 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
1028 {
1029         return (unsigned long)vma->vm_private_data;
1030 }
1031
1032 static void set_vma_private_data(struct vm_area_struct *vma,
1033                                                         unsigned long value)
1034 {
1035         vma->vm_private_data = (void *)value;
1036 }
1037
1038 static void
1039 resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map *resv_map,
1040                                           struct hugetlb_cgroup *h_cg,
1041                                           struct hstate *h)
1042 {
1043 #ifdef CONFIG_CGROUP_HUGETLB
1044         if (!h_cg || !h) {
1045                 resv_map->reservation_counter = NULL;
1046                 resv_map->pages_per_hpage = 0;
1047                 resv_map->css = NULL;
1048         } else {
1049                 resv_map->reservation_counter =
1050                         &h_cg->rsvd_hugepage[hstate_index(h)];
1051                 resv_map->pages_per_hpage = pages_per_huge_page(h);
1052                 resv_map->css = &h_cg->css;
1053         }
1054 #endif
1055 }
1056
1057 struct resv_map *resv_map_alloc(void)
1058 {
1059         struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
1060         struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
1061
1062         if (!resv_map || !rg) {
1063                 kfree(resv_map);
1064                 kfree(rg);
1065                 return NULL;
1066         }
1067
1068         kref_init(&resv_map->refs);
1069         spin_lock_init(&resv_map->lock);
1070         INIT_LIST_HEAD(&resv_map->regions);
1071
1072         resv_map->adds_in_progress = 0;
1073         /*
1074          * Initialize these to 0. On shared mappings, 0's here indicate these
1075          * fields don't do cgroup accounting. On private mappings, these will be
1076          * re-initialized to the proper values, to indicate that hugetlb cgroup
1077          * reservations are to be un-charged from here.
1078          */
1079         resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, NULL, NULL);
1080
1081         INIT_LIST_HEAD(&resv_map->region_cache);
1082         list_add(&rg->link, &resv_map->region_cache);
1083         resv_map->region_cache_count = 1;
1084
1085         return resv_map;
1086 }
1087
1088 void resv_map_release(struct kref *ref)
1089 {
1090         struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
1091         struct list_head *head = &resv_map->region_cache;
1092         struct file_region *rg, *trg;
1093
1094         /* Clear out any active regions before we release the map. */
1095         region_del(resv_map, 0, LONG_MAX);
1096
1097         /* ... and any entries left in the cache */
1098         list_for_each_entry_safe(rg, trg, head, link) {
1099                 list_del(&rg->link);
1100                 kfree(rg);
1101         }
1102
1103         VM_BUG_ON(resv_map->adds_in_progress);
1104
1105         kfree(resv_map);
1106 }
1107
1108 static inline struct resv_map *inode_resv_map(struct inode *inode)
1109 {
1110         /*
1111          * At inode evict time, i_mapping may not point to the original
1112          * address space within the inode.  This original address space
1113          * contains the pointer to the resv_map.  So, always use the
1114          * address space embedded within the inode.
1115          * The VERY common case is inode->mapping == &inode->i_data but,
1116          * this may not be true for device special inodes.
1117          */
1118         return (struct resv_map *)(&inode->i_data)->private_data;
1119 }
1120
1121 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
1122 {
1123         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1124         if (vma->vm_flags & VM_MAYSHARE) {
1125                 struct address_space *mapping = vma->vm_file->f_mapping;
1126                 struct inode *inode = mapping->host;
1127
1128                 return inode_resv_map(inode);
1129
1130         } else {
1131                 return (struct resv_map *)(get_vma_private_data(vma) &
1132                                                         ~HPAGE_RESV_MASK);
1133         }
1134 }
1135
1136 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
1137 {
1138         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1139         VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
1140
1141         set_vma_private_data(vma, (unsigned long)map);
1142 }
1143
1144 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
1145 {
1146         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1147         VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
1148
1149         set_vma_private_data(vma, get_vma_private_data(vma) | flags);
1150 }
1151
1152 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
1153 {
1154         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1155
1156         return (get_vma_private_data(vma) & flag) != 0;
1157 }
1158
1159 void hugetlb_dup_vma_private(struct vm_area_struct *vma)
1160 {
1161         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1162         /*
1163          * Clear vm_private_data
1164          * - For shared mappings this is a per-vma semaphore that may be
1165          *   allocated in a subsequent call to hugetlb_vm_op_open.
1166          *   Before clearing, make sure pointer is not associated with vma
1167          *   as this will leak the structure.  This is the case when called
1168          *   via clear_vma_resv_huge_pages() and hugetlb_vm_op_open has already
1169          *   been called to allocate a new structure.
1170          * - For MAP_PRIVATE mappings, this is the reserve map which does
1171          *   not apply to children.  Faults generated by the children are
1172          *   not guaranteed to succeed, even if read-only.
1173          */
1174         if (vma->vm_flags & VM_MAYSHARE) {
1175                 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
1176
1177                 if (vma_lock && vma_lock->vma != vma)
1178                         vma->vm_private_data = NULL;
1179         } else
1180                 vma->vm_private_data = NULL;
1181 }
1182
1183 /*
1184  * Reset and decrement one ref on hugepage private reservation.
1185  * Called with mm->mmap_lock writer semaphore held.
1186  * This function should be only used by move_vma() and operate on
1187  * same sized vma. It should never come here with last ref on the
1188  * reservation.
1189  */
1190 void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
1191 {
1192         /*
1193          * Clear the old hugetlb private page reservation.
1194          * It has already been transferred to new_vma.
1195          *
1196          * During a mremap() operation of a hugetlb vma we call move_vma()
1197          * which copies vma into new_vma and unmaps vma. After the copy
1198          * operation both new_vma and vma share a reference to the resv_map
1199          * struct, and at that point vma is about to be unmapped. We don't
1200          * want to return the reservation to the pool at unmap of vma because
1201          * the reservation still lives on in new_vma, so simply decrement the
1202          * ref here and remove the resv_map reference from this vma.
1203          */
1204         struct resv_map *reservations = vma_resv_map(vma);
1205
1206         if (reservations && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1207                 resv_map_put_hugetlb_cgroup_uncharge_info(reservations);
1208                 kref_put(&reservations->refs, resv_map_release);
1209         }
1210
1211         hugetlb_dup_vma_private(vma);
1212 }
1213
1214 /* Returns true if the VMA has associated reserve pages */
1215 static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
1216 {
1217         if (vma->vm_flags & VM_NORESERVE) {
1218                 /*
1219                  * This address is already reserved by other process(chg == 0),
1220                  * so, we should decrement reserved count. Without decrementing,
1221                  * reserve count remains after releasing inode, because this
1222                  * allocated page will go into page cache and is regarded as
1223                  * coming from reserved pool in releasing step.  Currently, we
1224                  * don't have any other solution to deal with this situation
1225                  * properly, so add work-around here.
1226                  */
1227                 if (vma->vm_flags & VM_MAYSHARE && chg == 0)
1228                         return true;
1229                 else
1230                         return false;
1231         }
1232
1233         /* Shared mappings always use reserves */
1234         if (vma->vm_flags & VM_MAYSHARE) {
1235                 /*
1236                  * We know VM_NORESERVE is not set.  Therefore, there SHOULD
1237                  * be a region map for all pages.  The only situation where
1238                  * there is no region map is if a hole was punched via
1239                  * fallocate.  In this case, there really are no reserves to
1240                  * use.  This situation is indicated if chg != 0.
1241                  */
1242                 if (chg)
1243                         return false;
1244                 else
1245                         return true;
1246         }
1247
1248         /*
1249          * Only the process that called mmap() has reserves for
1250          * private mappings.
1251          */
1252         if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1253                 /*
1254                  * Like the shared case above, a hole punch or truncate
1255                  * could have been performed on the private mapping.
1256                  * Examine the value of chg to determine if reserves
1257                  * actually exist or were previously consumed.
1258                  * Very Subtle - The value of chg comes from a previous
1259                  * call to vma_needs_reserves().  The reserve map for
1260                  * private mappings has different (opposite) semantics
1261                  * than that of shared mappings.  vma_needs_reserves()
1262                  * has already taken this difference in semantics into
1263                  * account.  Therefore, the meaning of chg is the same
1264                  * as in the shared case above.  Code could easily be
1265                  * combined, but keeping it separate draws attention to
1266                  * subtle differences.
1267                  */
1268                 if (chg)
1269                         return false;
1270                 else
1271                         return true;
1272         }
1273
1274         return false;
1275 }
1276
1277 static void enqueue_hugetlb_folio(struct hstate *h, struct folio *folio)
1278 {
1279         int nid = folio_nid(folio);
1280
1281         lockdep_assert_held(&hugetlb_lock);
1282         VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
1283
1284         list_move(&folio->lru, &h->hugepage_freelists[nid]);
1285         h->free_huge_pages++;
1286         h->free_huge_pages_node[nid]++;
1287         folio_set_hugetlb_freed(folio);
1288 }
1289
1290 static struct folio *dequeue_hugetlb_folio_node_exact(struct hstate *h,
1291                                                                 int nid)
1292 {
1293         struct folio *folio;
1294         bool pin = !!(current->flags & PF_MEMALLOC_PIN);
1295
1296         lockdep_assert_held(&hugetlb_lock);
1297         list_for_each_entry(folio, &h->hugepage_freelists[nid], lru) {
1298                 if (pin && !folio_is_longterm_pinnable(folio))
1299                         continue;
1300
1301                 if (folio_test_hwpoison(folio))
1302                         continue;
1303
1304                 list_move(&folio->lru, &h->hugepage_activelist);
1305                 folio_ref_unfreeze(folio, 1);
1306                 folio_clear_hugetlb_freed(folio);
1307                 h->free_huge_pages--;
1308                 h->free_huge_pages_node[nid]--;
1309                 return folio;
1310         }
1311
1312         return NULL;
1313 }
1314
1315 static struct folio *dequeue_hugetlb_folio_nodemask(struct hstate *h, gfp_t gfp_mask,
1316                                                         int nid, nodemask_t *nmask)
1317 {
1318         unsigned int cpuset_mems_cookie;
1319         struct zonelist *zonelist;
1320         struct zone *zone;
1321         struct zoneref *z;
1322         int node = NUMA_NO_NODE;
1323
1324         zonelist = node_zonelist(nid, gfp_mask);
1325
1326 retry_cpuset:
1327         cpuset_mems_cookie = read_mems_allowed_begin();
1328         for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
1329                 struct folio *folio;
1330
1331                 if (!cpuset_zone_allowed(zone, gfp_mask))
1332                         continue;
1333                 /*
1334                  * no need to ask again on the same node. Pool is node rather than
1335                  * zone aware
1336                  */
1337                 if (zone_to_nid(zone) == node)
1338                         continue;
1339                 node = zone_to_nid(zone);
1340
1341                 folio = dequeue_hugetlb_folio_node_exact(h, node);
1342                 if (folio)
1343                         return folio;
1344         }
1345         if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie)))
1346                 goto retry_cpuset;
1347
1348         return NULL;
1349 }
1350
1351 static unsigned long available_huge_pages(struct hstate *h)
1352 {
1353         return h->free_huge_pages - h->resv_huge_pages;
1354 }
1355
1356 static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h,
1357                                 struct vm_area_struct *vma,
1358                                 unsigned long address, int avoid_reserve,
1359                                 long chg)
1360 {
1361         struct folio *folio = NULL;
1362         struct mempolicy *mpol;
1363         gfp_t gfp_mask;
1364         nodemask_t *nodemask;
1365         int nid;
1366
1367         /*
1368          * A child process with MAP_PRIVATE mappings created by their parent
1369          * have no page reserves. This check ensures that reservations are
1370          * not "stolen". The child may still get SIGKILLed
1371          */
1372         if (!vma_has_reserves(vma, chg) && !available_huge_pages(h))
1373                 goto err;
1374
1375         /* If reserves cannot be used, ensure enough pages are in the pool */
1376         if (avoid_reserve && !available_huge_pages(h))
1377                 goto err;
1378
1379         gfp_mask = htlb_alloc_mask(h);
1380         nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
1381
1382         if (mpol_is_preferred_many(mpol)) {
1383                 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
1384                                                         nid, nodemask);
1385
1386                 /* Fallback to all nodes if page==NULL */
1387                 nodemask = NULL;
1388         }
1389
1390         if (!folio)
1391                 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
1392                                                         nid, nodemask);
1393
1394         if (folio && !avoid_reserve && vma_has_reserves(vma, chg)) {
1395                 folio_set_hugetlb_restore_reserve(folio);
1396                 h->resv_huge_pages--;
1397         }
1398
1399         mpol_cond_put(mpol);
1400         return folio;
1401
1402 err:
1403         return NULL;
1404 }
1405
1406 /*
1407  * common helper functions for hstate_next_node_to_{alloc|free}.
1408  * We may have allocated or freed a huge page based on a different
1409  * nodes_allowed previously, so h->next_node_to_{alloc|free} might
1410  * be outside of *nodes_allowed.  Ensure that we use an allowed
1411  * node for alloc or free.
1412  */
1413 static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
1414 {
1415         nid = next_node_in(nid, *nodes_allowed);
1416         VM_BUG_ON(nid >= MAX_NUMNODES);
1417
1418         return nid;
1419 }
1420
1421 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
1422 {
1423         if (!node_isset(nid, *nodes_allowed))
1424                 nid = next_node_allowed(nid, nodes_allowed);
1425         return nid;
1426 }
1427
1428 /*
1429  * returns the previously saved node ["this node"] from which to
1430  * allocate a persistent huge page for the pool and advance the
1431  * next node from which to allocate, handling wrap at end of node
1432  * mask.
1433  */
1434 static int hstate_next_node_to_alloc(struct hstate *h,
1435                                         nodemask_t *nodes_allowed)
1436 {
1437         int nid;
1438
1439         VM_BUG_ON(!nodes_allowed);
1440
1441         nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
1442         h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
1443
1444         return nid;
1445 }
1446
1447 /*
1448  * helper for remove_pool_huge_page() - return the previously saved
1449  * node ["this node"] from which to free a huge page.  Advance the
1450  * next node id whether or not we find a free huge page to free so
1451  * that the next attempt to free addresses the next node.
1452  */
1453 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
1454 {
1455         int nid;
1456
1457         VM_BUG_ON(!nodes_allowed);
1458
1459         nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
1460         h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
1461
1462         return nid;
1463 }
1464
1465 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask)           \
1466         for (nr_nodes = nodes_weight(*mask);                            \
1467                 nr_nodes > 0 &&                                         \
1468                 ((node = hstate_next_node_to_alloc(hs, mask)) || 1);    \
1469                 nr_nodes--)
1470
1471 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask)            \
1472         for (nr_nodes = nodes_weight(*mask);                            \
1473                 nr_nodes > 0 &&                                         \
1474                 ((node = hstate_next_node_to_free(hs, mask)) || 1);     \
1475                 nr_nodes--)
1476
1477 /* used to demote non-gigantic_huge pages as well */
1478 static void __destroy_compound_gigantic_folio(struct folio *folio,
1479                                         unsigned int order, bool demote)
1480 {
1481         int i;
1482         int nr_pages = 1 << order;
1483         struct page *p;
1484
1485         atomic_set(&folio->_entire_mapcount, 0);
1486         atomic_set(&folio->_nr_pages_mapped, 0);
1487         atomic_set(&folio->_pincount, 0);
1488
1489         for (i = 1; i < nr_pages; i++) {
1490                 p = folio_page(folio, i);
1491                 p->flags &= ~PAGE_FLAGS_CHECK_AT_FREE;
1492                 p->mapping = NULL;
1493                 clear_compound_head(p);
1494                 if (!demote)
1495                         set_page_refcounted(p);
1496         }
1497
1498         __folio_clear_head(folio);
1499 }
1500
1501 static void destroy_compound_hugetlb_folio_for_demote(struct folio *folio,
1502                                         unsigned int order)
1503 {
1504         __destroy_compound_gigantic_folio(folio, order, true);
1505 }
1506
1507 #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
1508 static void destroy_compound_gigantic_folio(struct folio *folio,
1509                                         unsigned int order)
1510 {
1511         __destroy_compound_gigantic_folio(folio, order, false);
1512 }
1513
1514 static void free_gigantic_folio(struct folio *folio, unsigned int order)
1515 {
1516         /*
1517          * If the page isn't allocated using the cma allocator,
1518          * cma_release() returns false.
1519          */
1520 #ifdef CONFIG_CMA
1521         int nid = folio_nid(folio);
1522
1523         if (cma_release(hugetlb_cma[nid], &folio->page, 1 << order))
1524                 return;
1525 #endif
1526
1527         free_contig_range(folio_pfn(folio), 1 << order);
1528 }
1529
1530 #ifdef CONFIG_CONTIG_ALLOC
1531 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
1532                 int nid, nodemask_t *nodemask)
1533 {
1534         struct page *page;
1535         unsigned long nr_pages = pages_per_huge_page(h);
1536         if (nid == NUMA_NO_NODE)
1537                 nid = numa_mem_id();
1538
1539 #ifdef CONFIG_CMA
1540         {
1541                 int node;
1542
1543                 if (hugetlb_cma[nid]) {
1544                         page = cma_alloc(hugetlb_cma[nid], nr_pages,
1545                                         huge_page_order(h), true);
1546                         if (page)
1547                                 return page_folio(page);
1548                 }
1549
1550                 if (!(gfp_mask & __GFP_THISNODE)) {
1551                         for_each_node_mask(node, *nodemask) {
1552                                 if (node == nid || !hugetlb_cma[node])
1553                                         continue;
1554
1555                                 page = cma_alloc(hugetlb_cma[node], nr_pages,
1556                                                 huge_page_order(h), true);
1557                                 if (page)
1558                                         return page_folio(page);
1559                         }
1560                 }
1561         }
1562 #endif
1563
1564         page = alloc_contig_pages(nr_pages, gfp_mask, nid, nodemask);
1565         return page ? page_folio(page) : NULL;
1566 }
1567
1568 #else /* !CONFIG_CONTIG_ALLOC */
1569 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
1570                                         int nid, nodemask_t *nodemask)
1571 {
1572         return NULL;
1573 }
1574 #endif /* CONFIG_CONTIG_ALLOC */
1575
1576 #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
1577 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
1578                                         int nid, nodemask_t *nodemask)
1579 {
1580         return NULL;
1581 }
1582 static inline void free_gigantic_folio(struct folio *folio,
1583                                                 unsigned int order) { }
1584 static inline void destroy_compound_gigantic_folio(struct folio *folio,
1585                                                 unsigned int order) { }
1586 #endif
1587
1588 static inline void __clear_hugetlb_destructor(struct hstate *h,
1589                                                 struct folio *folio)
1590 {
1591         lockdep_assert_held(&hugetlb_lock);
1592
1593         folio_clear_hugetlb(folio);
1594 }
1595
1596 /*
1597  * Remove hugetlb folio from lists.
1598  * If vmemmap exists for the folio, update dtor so that the folio appears
1599  * as just a compound page.  Otherwise, wait until after allocating vmemmap
1600  * to update dtor.
1601  *
1602  * A reference is held on the folio, except in the case of demote.
1603  *
1604  * Must be called with hugetlb lock held.
1605  */
1606 static void __remove_hugetlb_folio(struct hstate *h, struct folio *folio,
1607                                                         bool adjust_surplus,
1608                                                         bool demote)
1609 {
1610         int nid = folio_nid(folio);
1611
1612         VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio(folio), folio);
1613         VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio_rsvd(folio), folio);
1614
1615         lockdep_assert_held(&hugetlb_lock);
1616         if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
1617                 return;
1618
1619         list_del(&folio->lru);
1620
1621         if (folio_test_hugetlb_freed(folio)) {
1622                 h->free_huge_pages--;
1623                 h->free_huge_pages_node[nid]--;
1624         }
1625         if (adjust_surplus) {
1626                 h->surplus_huge_pages--;
1627                 h->surplus_huge_pages_node[nid]--;
1628         }
1629
1630         /*
1631          * We can only clear the hugetlb destructor after allocating vmemmap
1632          * pages.  Otherwise, someone (memory error handling) may try to write
1633          * to tail struct pages.
1634          */
1635         if (!folio_test_hugetlb_vmemmap_optimized(folio))
1636                 __clear_hugetlb_destructor(h, folio);
1637
1638          /*
1639           * In the case of demote we do not ref count the page as it will soon
1640           * be turned into a page of smaller size.
1641          */
1642         if (!demote)
1643                 folio_ref_unfreeze(folio, 1);
1644
1645         h->nr_huge_pages--;
1646         h->nr_huge_pages_node[nid]--;
1647 }
1648
1649 static void remove_hugetlb_folio(struct hstate *h, struct folio *folio,
1650                                                         bool adjust_surplus)
1651 {
1652         __remove_hugetlb_folio(h, folio, adjust_surplus, false);
1653 }
1654
1655 static void remove_hugetlb_folio_for_demote(struct hstate *h, struct folio *folio,
1656                                                         bool adjust_surplus)
1657 {
1658         __remove_hugetlb_folio(h, folio, adjust_surplus, true);
1659 }
1660
1661 static void add_hugetlb_folio(struct hstate *h, struct folio *folio,
1662                              bool adjust_surplus)
1663 {
1664         int zeroed;
1665         int nid = folio_nid(folio);
1666
1667         VM_BUG_ON_FOLIO(!folio_test_hugetlb_vmemmap_optimized(folio), folio);
1668
1669         lockdep_assert_held(&hugetlb_lock);
1670
1671         INIT_LIST_HEAD(&folio->lru);
1672         h->nr_huge_pages++;
1673         h->nr_huge_pages_node[nid]++;
1674
1675         if (adjust_surplus) {
1676                 h->surplus_huge_pages++;
1677                 h->surplus_huge_pages_node[nid]++;
1678         }
1679
1680         folio_set_hugetlb(folio);
1681         folio_change_private(folio, NULL);
1682         /*
1683          * We have to set hugetlb_vmemmap_optimized again as above
1684          * folio_change_private(folio, NULL) cleared it.
1685          */
1686         folio_set_hugetlb_vmemmap_optimized(folio);
1687
1688         /*
1689          * This folio is about to be managed by the hugetlb allocator and
1690          * should have no users.  Drop our reference, and check for others
1691          * just in case.
1692          */
1693         zeroed = folio_put_testzero(folio);
1694         if (unlikely(!zeroed))
1695                 /*
1696                  * It is VERY unlikely soneone else has taken a ref
1697                  * on the folio.  In this case, we simply return as
1698                  * free_huge_folio() will be called when this other ref
1699                  * is dropped.
1700                  */
1701                 return;
1702
1703         arch_clear_hugepage_flags(&folio->page);
1704         enqueue_hugetlb_folio(h, folio);
1705 }
1706
1707 static void __update_and_free_hugetlb_folio(struct hstate *h,
1708                                                 struct folio *folio)
1709 {
1710         bool clear_dtor = folio_test_hugetlb_vmemmap_optimized(folio);
1711
1712         if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
1713                 return;
1714
1715         /*
1716          * If we don't know which subpages are hwpoisoned, we can't free
1717          * the hugepage, so it's leaked intentionally.
1718          */
1719         if (folio_test_hugetlb_raw_hwp_unreliable(folio))
1720                 return;
1721
1722         if (hugetlb_vmemmap_restore(h, &folio->page)) {
1723                 spin_lock_irq(&hugetlb_lock);
1724                 /*
1725                  * If we cannot allocate vmemmap pages, just refuse to free the
1726                  * page and put the page back on the hugetlb free list and treat
1727                  * as a surplus page.
1728                  */
1729                 add_hugetlb_folio(h, folio, true);
1730                 spin_unlock_irq(&hugetlb_lock);
1731                 return;
1732         }
1733
1734         /*
1735          * Move PageHWPoison flag from head page to the raw error pages,
1736          * which makes any healthy subpages reusable.
1737          */
1738         if (unlikely(folio_test_hwpoison(folio)))
1739                 folio_clear_hugetlb_hwpoison(folio);
1740
1741         /*
1742          * If vmemmap pages were allocated above, then we need to clear the
1743          * hugetlb destructor under the hugetlb lock.
1744          */
1745         if (clear_dtor) {
1746                 spin_lock_irq(&hugetlb_lock);
1747                 __clear_hugetlb_destructor(h, folio);
1748                 spin_unlock_irq(&hugetlb_lock);
1749         }
1750
1751         /*
1752          * Non-gigantic pages demoted from CMA allocated gigantic pages
1753          * need to be given back to CMA in free_gigantic_folio.
1754          */
1755         if (hstate_is_gigantic(h) ||
1756             hugetlb_cma_folio(folio, huge_page_order(h))) {
1757                 destroy_compound_gigantic_folio(folio, huge_page_order(h));
1758                 free_gigantic_folio(folio, huge_page_order(h));
1759         } else {
1760                 __free_pages(&folio->page, huge_page_order(h));
1761         }
1762 }
1763
1764 /*
1765  * As update_and_free_hugetlb_folio() can be called under any context, so we cannot
1766  * use GFP_KERNEL to allocate vmemmap pages. However, we can defer the
1767  * actual freeing in a workqueue to prevent from using GFP_ATOMIC to allocate
1768  * the vmemmap pages.
1769  *
1770  * free_hpage_workfn() locklessly retrieves the linked list of pages to be
1771  * freed and frees them one-by-one. As the page->mapping pointer is going
1772  * to be cleared in free_hpage_workfn() anyway, it is reused as the llist_node
1773  * structure of a lockless linked list of huge pages to be freed.
1774  */
1775 static LLIST_HEAD(hpage_freelist);
1776
1777 static void free_hpage_workfn(struct work_struct *work)
1778 {
1779         struct llist_node *node;
1780
1781         node = llist_del_all(&hpage_freelist);
1782
1783         while (node) {
1784                 struct page *page;
1785                 struct hstate *h;
1786
1787                 page = container_of((struct address_space **)node,
1788                                      struct page, mapping);
1789                 node = node->next;
1790                 page->mapping = NULL;
1791                 /*
1792                  * The VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio) in
1793                  * folio_hstate() is going to trigger because a previous call to
1794                  * remove_hugetlb_folio() will clear the hugetlb bit, so do
1795                  * not use folio_hstate() directly.
1796                  */
1797                 h = size_to_hstate(page_size(page));
1798
1799                 __update_and_free_hugetlb_folio(h, page_folio(page));
1800
1801                 cond_resched();
1802         }
1803 }
1804 static DECLARE_WORK(free_hpage_work, free_hpage_workfn);
1805
1806 static inline void flush_free_hpage_work(struct hstate *h)
1807 {
1808         if (hugetlb_vmemmap_optimizable(h))
1809                 flush_work(&free_hpage_work);
1810 }
1811
1812 static void update_and_free_hugetlb_folio(struct hstate *h, struct folio *folio,
1813                                  bool atomic)
1814 {
1815         if (!folio_test_hugetlb_vmemmap_optimized(folio) || !atomic) {
1816                 __update_and_free_hugetlb_folio(h, folio);
1817                 return;
1818         }
1819
1820         /*
1821          * Defer freeing to avoid using GFP_ATOMIC to allocate vmemmap pages.
1822          *
1823          * Only call schedule_work() if hpage_freelist is previously
1824          * empty. Otherwise, schedule_work() had been called but the workfn
1825          * hasn't retrieved the list yet.
1826          */
1827         if (llist_add((struct llist_node *)&folio->mapping, &hpage_freelist))
1828                 schedule_work(&free_hpage_work);
1829 }
1830
1831 static void update_and_free_pages_bulk(struct hstate *h, struct list_head *list)
1832 {
1833         struct page *page, *t_page;
1834         struct folio *folio;
1835
1836         list_for_each_entry_safe(page, t_page, list, lru) {
1837                 folio = page_folio(page);
1838                 update_and_free_hugetlb_folio(h, folio, false);
1839                 cond_resched();
1840         }
1841 }
1842
1843 struct hstate *size_to_hstate(unsigned long size)
1844 {
1845         struct hstate *h;
1846
1847         for_each_hstate(h) {
1848                 if (huge_page_size(h) == size)
1849                         return h;
1850         }
1851         return NULL;
1852 }
1853
1854 void free_huge_folio(struct folio *folio)
1855 {
1856         /*
1857          * Can't pass hstate in here because it is called from the
1858          * compound page destructor.
1859          */
1860         struct hstate *h = folio_hstate(folio);
1861         int nid = folio_nid(folio);
1862         struct hugepage_subpool *spool = hugetlb_folio_subpool(folio);
1863         bool restore_reserve;
1864         unsigned long flags;
1865
1866         VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
1867         VM_BUG_ON_FOLIO(folio_mapcount(folio), folio);
1868
1869         hugetlb_set_folio_subpool(folio, NULL);
1870         if (folio_test_anon(folio))
1871                 __ClearPageAnonExclusive(&folio->page);
1872         folio->mapping = NULL;
1873         restore_reserve = folio_test_hugetlb_restore_reserve(folio);
1874         folio_clear_hugetlb_restore_reserve(folio);
1875
1876         /*
1877          * If HPageRestoreReserve was set on page, page allocation consumed a
1878          * reservation.  If the page was associated with a subpool, there
1879          * would have been a page reserved in the subpool before allocation
1880          * via hugepage_subpool_get_pages().  Since we are 'restoring' the
1881          * reservation, do not call hugepage_subpool_put_pages() as this will
1882          * remove the reserved page from the subpool.
1883          */
1884         if (!restore_reserve) {
1885                 /*
1886                  * A return code of zero implies that the subpool will be
1887                  * under its minimum size if the reservation is not restored
1888                  * after page is free.  Therefore, force restore_reserve
1889                  * operation.
1890                  */
1891                 if (hugepage_subpool_put_pages(spool, 1) == 0)
1892                         restore_reserve = true;
1893         }
1894
1895         spin_lock_irqsave(&hugetlb_lock, flags);
1896         folio_clear_hugetlb_migratable(folio);
1897         hugetlb_cgroup_uncharge_folio(hstate_index(h),
1898                                      pages_per_huge_page(h), folio);
1899         hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
1900                                           pages_per_huge_page(h), folio);
1901         if (restore_reserve)
1902                 h->resv_huge_pages++;
1903
1904         if (folio_test_hugetlb_temporary(folio)) {
1905                 remove_hugetlb_folio(h, folio, false);
1906                 spin_unlock_irqrestore(&hugetlb_lock, flags);
1907                 update_and_free_hugetlb_folio(h, folio, true);
1908         } else if (h->surplus_huge_pages_node[nid]) {
1909                 /* remove the page from active list */
1910                 remove_hugetlb_folio(h, folio, true);
1911                 spin_unlock_irqrestore(&hugetlb_lock, flags);
1912                 update_and_free_hugetlb_folio(h, folio, true);
1913         } else {
1914                 arch_clear_hugepage_flags(&folio->page);
1915                 enqueue_hugetlb_folio(h, folio);
1916                 spin_unlock_irqrestore(&hugetlb_lock, flags);
1917         }
1918 }
1919
1920 /*
1921  * Must be called with the hugetlb lock held
1922  */
1923 static void __prep_account_new_huge_page(struct hstate *h, int nid)
1924 {
1925         lockdep_assert_held(&hugetlb_lock);
1926         h->nr_huge_pages++;
1927         h->nr_huge_pages_node[nid]++;
1928 }
1929
1930 static void __prep_new_hugetlb_folio(struct hstate *h, struct folio *folio)
1931 {
1932         hugetlb_vmemmap_optimize(h, &folio->page);
1933         INIT_LIST_HEAD(&folio->lru);
1934         folio_set_hugetlb(folio);
1935         hugetlb_set_folio_subpool(folio, NULL);
1936         set_hugetlb_cgroup(folio, NULL);
1937         set_hugetlb_cgroup_rsvd(folio, NULL);
1938 }
1939
1940 static void prep_new_hugetlb_folio(struct hstate *h, struct folio *folio, int nid)
1941 {
1942         __prep_new_hugetlb_folio(h, folio);
1943         spin_lock_irq(&hugetlb_lock);
1944         __prep_account_new_huge_page(h, nid);
1945         spin_unlock_irq(&hugetlb_lock);
1946 }
1947
1948 static bool __prep_compound_gigantic_folio(struct folio *folio,
1949                                         unsigned int order, bool demote)
1950 {
1951         int i, j;
1952         int nr_pages = 1 << order;
1953         struct page *p;
1954
1955         __folio_clear_reserved(folio);
1956         for (i = 0; i < nr_pages; i++) {
1957                 p = folio_page(folio, i);
1958
1959                 /*
1960                  * For gigantic hugepages allocated through bootmem at
1961                  * boot, it's safer to be consistent with the not-gigantic
1962                  * hugepages and clear the PG_reserved bit from all tail pages
1963                  * too.  Otherwise drivers using get_user_pages() to access tail
1964                  * pages may get the reference counting wrong if they see
1965                  * PG_reserved set on a tail page (despite the head page not
1966                  * having PG_reserved set).  Enforcing this consistency between
1967                  * head and tail pages allows drivers to optimize away a check
1968                  * on the head page when they need know if put_page() is needed
1969                  * after get_user_pages().
1970                  */
1971                 if (i != 0)     /* head page cleared above */
1972                         __ClearPageReserved(p);
1973                 /*
1974                  * Subtle and very unlikely
1975                  *
1976                  * Gigantic 'page allocators' such as memblock or cma will
1977                  * return a set of pages with each page ref counted.  We need
1978                  * to turn this set of pages into a compound page with tail
1979                  * page ref counts set to zero.  Code such as speculative page
1980                  * cache adding could take a ref on a 'to be' tail page.
1981                  * We need to respect any increased ref count, and only set
1982                  * the ref count to zero if count is currently 1.  If count
1983                  * is not 1, we return an error.  An error return indicates
1984                  * the set of pages can not be converted to a gigantic page.
1985                  * The caller who allocated the pages should then discard the
1986                  * pages using the appropriate free interface.
1987                  *
1988                  * In the case of demote, the ref count will be zero.
1989                  */
1990                 if (!demote) {
1991                         if (!page_ref_freeze(p, 1)) {
1992                                 pr_warn("HugeTLB page can not be used due to unexpected inflated ref count\n");
1993                                 goto out_error;
1994                         }
1995                 } else {
1996                         VM_BUG_ON_PAGE(page_count(p), p);
1997                 }
1998                 if (i != 0)
1999                         set_compound_head(p, &folio->page);
2000         }
2001         __folio_set_head(folio);
2002         /* we rely on prep_new_hugetlb_folio to set the destructor */
2003         folio_set_order(folio, order);
2004         atomic_set(&folio->_entire_mapcount, -1);
2005         atomic_set(&folio->_nr_pages_mapped, 0);
2006         atomic_set(&folio->_pincount, 0);
2007         return true;
2008
2009 out_error:
2010         /* undo page modifications made above */
2011         for (j = 0; j < i; j++) {
2012                 p = folio_page(folio, j);
2013                 if (j != 0)
2014                         clear_compound_head(p);
2015                 set_page_refcounted(p);
2016         }
2017         /* need to clear PG_reserved on remaining tail pages  */
2018         for (; j < nr_pages; j++) {
2019                 p = folio_page(folio, j);
2020                 __ClearPageReserved(p);
2021         }
2022         return false;
2023 }
2024
2025 static bool prep_compound_gigantic_folio(struct folio *folio,
2026                                                         unsigned int order)
2027 {
2028         return __prep_compound_gigantic_folio(folio, order, false);
2029 }
2030
2031 static bool prep_compound_gigantic_folio_for_demote(struct folio *folio,
2032                                                         unsigned int order)
2033 {
2034         return __prep_compound_gigantic_folio(folio, order, true);
2035 }
2036
2037 /*
2038  * PageHuge() only returns true for hugetlbfs pages, but not for normal or
2039  * transparent huge pages.  See the PageTransHuge() documentation for more
2040  * details.
2041  */
2042 int PageHuge(struct page *page)
2043 {
2044         struct folio *folio;
2045
2046         if (!PageCompound(page))
2047                 return 0;
2048         folio = page_folio(page);
2049         return folio_test_hugetlb(folio);
2050 }
2051 EXPORT_SYMBOL_GPL(PageHuge);
2052
2053 /*
2054  * Find and lock address space (mapping) in write mode.
2055  *
2056  * Upon entry, the page is locked which means that page_mapping() is
2057  * stable.  Due to locking order, we can only trylock_write.  If we can
2058  * not get the lock, simply return NULL to caller.
2059  */
2060 struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage)
2061 {
2062         struct address_space *mapping = page_mapping(hpage);
2063
2064         if (!mapping)
2065                 return mapping;
2066
2067         if (i_mmap_trylock_write(mapping))
2068                 return mapping;
2069
2070         return NULL;
2071 }
2072
2073 pgoff_t hugetlb_basepage_index(struct page *page)
2074 {
2075         struct page *page_head = compound_head(page);
2076         pgoff_t index = page_index(page_head);
2077         unsigned long compound_idx;
2078
2079         if (compound_order(page_head) > MAX_ORDER)
2080                 compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
2081         else
2082                 compound_idx = page - page_head;
2083
2084         return (index << compound_order(page_head)) + compound_idx;
2085 }
2086
2087 static struct folio *alloc_buddy_hugetlb_folio(struct hstate *h,
2088                 gfp_t gfp_mask, int nid, nodemask_t *nmask,
2089                 nodemask_t *node_alloc_noretry)
2090 {
2091         int order = huge_page_order(h);
2092         struct page *page;
2093         bool alloc_try_hard = true;
2094         bool retry = true;
2095
2096         /*
2097          * By default we always try hard to allocate the page with
2098          * __GFP_RETRY_MAYFAIL flag.  However, if we are allocating pages in
2099          * a loop (to adjust global huge page counts) and previous allocation
2100          * failed, do not continue to try hard on the same node.  Use the
2101          * node_alloc_noretry bitmap to manage this state information.
2102          */
2103         if (node_alloc_noretry && node_isset(nid, *node_alloc_noretry))
2104                 alloc_try_hard = false;
2105         gfp_mask |= __GFP_COMP|__GFP_NOWARN;
2106         if (alloc_try_hard)
2107                 gfp_mask |= __GFP_RETRY_MAYFAIL;
2108         if (nid == NUMA_NO_NODE)
2109                 nid = numa_mem_id();
2110 retry:
2111         page = __alloc_pages(gfp_mask, order, nid, nmask);
2112
2113         /* Freeze head page */
2114         if (page && !page_ref_freeze(page, 1)) {
2115                 __free_pages(page, order);
2116                 if (retry) {    /* retry once */
2117                         retry = false;
2118                         goto retry;
2119                 }
2120                 /* WOW!  twice in a row. */
2121                 pr_warn("HugeTLB head page unexpected inflated ref count\n");
2122                 page = NULL;
2123         }
2124
2125         /*
2126          * If we did not specify __GFP_RETRY_MAYFAIL, but still got a page this
2127          * indicates an overall state change.  Clear bit so that we resume
2128          * normal 'try hard' allocations.
2129          */
2130         if (node_alloc_noretry && page && !alloc_try_hard)
2131                 node_clear(nid, *node_alloc_noretry);
2132
2133         /*
2134          * If we tried hard to get a page but failed, set bit so that
2135          * subsequent attempts will not try as hard until there is an
2136          * overall state change.
2137          */
2138         if (node_alloc_noretry && !page && alloc_try_hard)
2139                 node_set(nid, *node_alloc_noretry);
2140
2141         if (!page) {
2142                 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
2143                 return NULL;
2144         }
2145
2146         __count_vm_event(HTLB_BUDDY_PGALLOC);
2147         return page_folio(page);
2148 }
2149
2150 /*
2151  * Common helper to allocate a fresh hugetlb page. All specific allocators
2152  * should use this function to get new hugetlb pages
2153  *
2154  * Note that returned page is 'frozen':  ref count of head page and all tail
2155  * pages is zero.
2156  */
2157 static struct folio *alloc_fresh_hugetlb_folio(struct hstate *h,
2158                 gfp_t gfp_mask, int nid, nodemask_t *nmask,
2159                 nodemask_t *node_alloc_noretry)
2160 {
2161         struct folio *folio;
2162         bool retry = false;
2163
2164 retry:
2165         if (hstate_is_gigantic(h))
2166                 folio = alloc_gigantic_folio(h, gfp_mask, nid, nmask);
2167         else
2168                 folio = alloc_buddy_hugetlb_folio(h, gfp_mask,
2169                                 nid, nmask, node_alloc_noretry);
2170         if (!folio)
2171                 return NULL;
2172         if (hstate_is_gigantic(h)) {
2173                 if (!prep_compound_gigantic_folio(folio, huge_page_order(h))) {
2174                         /*
2175                          * Rare failure to convert pages to compound page.
2176                          * Free pages and try again - ONCE!
2177                          */
2178                         free_gigantic_folio(folio, huge_page_order(h));
2179                         if (!retry) {
2180                                 retry = true;
2181                                 goto retry;
2182                         }
2183                         return NULL;
2184                 }
2185         }
2186         prep_new_hugetlb_folio(h, folio, folio_nid(folio));
2187
2188         return folio;
2189 }
2190
2191 /*
2192  * Allocates a fresh page to the hugetlb allocator pool in the node interleaved
2193  * manner.
2194  */
2195 static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
2196                                 nodemask_t *node_alloc_noretry)
2197 {
2198         struct folio *folio;
2199         int nr_nodes, node;
2200         gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
2201
2202         for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
2203                 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, node,
2204                                         nodes_allowed, node_alloc_noretry);
2205                 if (folio) {
2206                         free_huge_folio(folio); /* free it into the hugepage allocator */
2207                         return 1;
2208                 }
2209         }
2210
2211         return 0;
2212 }
2213
2214 /*
2215  * Remove huge page from pool from next node to free.  Attempt to keep
2216  * persistent huge pages more or less balanced over allowed nodes.
2217  * This routine only 'removes' the hugetlb page.  The caller must make
2218  * an additional call to free the page to low level allocators.
2219  * Called with hugetlb_lock locked.
2220  */
2221 static struct page *remove_pool_huge_page(struct hstate *h,
2222                                                 nodemask_t *nodes_allowed,
2223                                                  bool acct_surplus)
2224 {
2225         int nr_nodes, node;
2226         struct page *page = NULL;
2227         struct folio *folio;
2228
2229         lockdep_assert_held(&hugetlb_lock);
2230         for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2231                 /*
2232                  * If we're returning unused surplus pages, only examine
2233                  * nodes with surplus pages.
2234                  */
2235                 if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
2236                     !list_empty(&h->hugepage_freelists[node])) {
2237                         page = list_entry(h->hugepage_freelists[node].next,
2238                                           struct page, lru);
2239                         folio = page_folio(page);
2240                         remove_hugetlb_folio(h, folio, acct_surplus);
2241                         break;
2242                 }
2243         }
2244
2245         return page;
2246 }
2247
2248 /*
2249  * Dissolve a given free hugepage into free buddy pages. This function does
2250  * nothing for in-use hugepages and non-hugepages.
2251  * This function returns values like below:
2252  *
2253  *  -ENOMEM: failed to allocate vmemmap pages to free the freed hugepages
2254  *           when the system is under memory pressure and the feature of
2255  *           freeing unused vmemmap pages associated with each hugetlb page
2256  *           is enabled.
2257  *  -EBUSY:  failed to dissolved free hugepages or the hugepage is in-use
2258  *           (allocated or reserved.)
2259  *       0:  successfully dissolved free hugepages or the page is not a
2260  *           hugepage (considered as already dissolved)
2261  */
2262 int dissolve_free_huge_page(struct page *page)
2263 {
2264         int rc = -EBUSY;
2265         struct folio *folio = page_folio(page);
2266
2267 retry:
2268         /* Not to disrupt normal path by vainly holding hugetlb_lock */
2269         if (!folio_test_hugetlb(folio))
2270                 return 0;
2271
2272         spin_lock_irq(&hugetlb_lock);
2273         if (!folio_test_hugetlb(folio)) {
2274                 rc = 0;
2275                 goto out;
2276         }
2277
2278         if (!folio_ref_count(folio)) {
2279                 struct hstate *h = folio_hstate(folio);
2280                 if (!available_huge_pages(h))
2281                         goto out;
2282
2283                 /*
2284                  * We should make sure that the page is already on the free list
2285                  * when it is dissolved.
2286                  */
2287                 if (unlikely(!folio_test_hugetlb_freed(folio))) {
2288                         spin_unlock_irq(&hugetlb_lock);
2289                         cond_resched();
2290
2291                         /*
2292                          * Theoretically, we should return -EBUSY when we
2293                          * encounter this race. In fact, we have a chance
2294                          * to successfully dissolve the page if we do a
2295                          * retry. Because the race window is quite small.
2296                          * If we seize this opportunity, it is an optimization
2297                          * for increasing the success rate of dissolving page.
2298                          */
2299                         goto retry;
2300                 }
2301
2302                 remove_hugetlb_folio(h, folio, false);
2303                 h->max_huge_pages--;
2304                 spin_unlock_irq(&hugetlb_lock);
2305
2306                 /*
2307                  * Normally update_and_free_hugtlb_folio will allocate required vmemmmap
2308                  * before freeing the page.  update_and_free_hugtlb_folio will fail to
2309                  * free the page if it can not allocate required vmemmap.  We
2310                  * need to adjust max_huge_pages if the page is not freed.
2311                  * Attempt to allocate vmemmmap here so that we can take
2312                  * appropriate action on failure.
2313                  */
2314                 rc = hugetlb_vmemmap_restore(h, &folio->page);
2315                 if (!rc) {
2316                         update_and_free_hugetlb_folio(h, folio, false);
2317                 } else {
2318                         spin_lock_irq(&hugetlb_lock);
2319                         add_hugetlb_folio(h, folio, false);
2320                         h->max_huge_pages++;
2321                         spin_unlock_irq(&hugetlb_lock);
2322                 }
2323
2324                 return rc;
2325         }
2326 out:
2327         spin_unlock_irq(&hugetlb_lock);
2328         return rc;
2329 }
2330
2331 /*
2332  * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
2333  * make specified memory blocks removable from the system.
2334  * Note that this will dissolve a free gigantic hugepage completely, if any
2335  * part of it lies within the given range.
2336  * Also note that if dissolve_free_huge_page() returns with an error, all
2337  * free hugepages that were dissolved before that error are lost.
2338  */
2339 int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
2340 {
2341         unsigned long pfn;
2342         struct page *page;
2343         int rc = 0;
2344         unsigned int order;
2345         struct hstate *h;
2346
2347         if (!hugepages_supported())
2348                 return rc;
2349
2350         order = huge_page_order(&default_hstate);
2351         for_each_hstate(h)
2352                 order = min(order, huge_page_order(h));
2353
2354         for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order) {
2355                 page = pfn_to_page(pfn);
2356                 rc = dissolve_free_huge_page(page);
2357                 if (rc)
2358                         break;
2359         }
2360
2361         return rc;
2362 }
2363
2364 /*
2365  * Allocates a fresh surplus page from the page allocator.
2366  */
2367 static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h,
2368                                 gfp_t gfp_mask, int nid, nodemask_t *nmask)
2369 {
2370         struct folio *folio = NULL;
2371
2372         if (hstate_is_gigantic(h))
2373                 return NULL;
2374
2375         spin_lock_irq(&hugetlb_lock);
2376         if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages)
2377                 goto out_unlock;
2378         spin_unlock_irq(&hugetlb_lock);
2379
2380         folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL);
2381         if (!folio)
2382                 return NULL;
2383
2384         spin_lock_irq(&hugetlb_lock);
2385         /*
2386          * We could have raced with the pool size change.
2387          * Double check that and simply deallocate the new page
2388          * if we would end up overcommiting the surpluses. Abuse
2389          * temporary page to workaround the nasty free_huge_folio
2390          * codeflow
2391          */
2392         if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
2393                 folio_set_hugetlb_temporary(folio);
2394                 spin_unlock_irq(&hugetlb_lock);
2395                 free_huge_folio(folio);
2396                 return NULL;
2397         }
2398
2399         h->surplus_huge_pages++;
2400         h->surplus_huge_pages_node[folio_nid(folio)]++;
2401
2402 out_unlock:
2403         spin_unlock_irq(&hugetlb_lock);
2404
2405         return folio;
2406 }
2407
2408 static struct folio *alloc_migrate_hugetlb_folio(struct hstate *h, gfp_t gfp_mask,
2409                                      int nid, nodemask_t *nmask)
2410 {
2411         struct folio *folio;
2412
2413         if (hstate_is_gigantic(h))
2414                 return NULL;
2415
2416         folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL);
2417         if (!folio)
2418                 return NULL;
2419
2420         /* fresh huge pages are frozen */
2421         folio_ref_unfreeze(folio, 1);
2422         /*
2423          * We do not account these pages as surplus because they are only
2424          * temporary and will be released properly on the last reference
2425          */
2426         folio_set_hugetlb_temporary(folio);
2427
2428         return folio;
2429 }
2430
2431 /*
2432  * Use the VMA's mpolicy to allocate a huge page from the buddy.
2433  */
2434 static
2435 struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h,
2436                 struct vm_area_struct *vma, unsigned long addr)
2437 {
2438         struct folio *folio = NULL;
2439         struct mempolicy *mpol;
2440         gfp_t gfp_mask = htlb_alloc_mask(h);
2441         int nid;
2442         nodemask_t *nodemask;
2443
2444         nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
2445         if (mpol_is_preferred_many(mpol)) {
2446                 gfp_t gfp = gfp_mask | __GFP_NOWARN;
2447
2448                 gfp &=  ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2449                 folio = alloc_surplus_hugetlb_folio(h, gfp, nid, nodemask);
2450
2451                 /* Fallback to all nodes if page==NULL */
2452                 nodemask = NULL;
2453         }
2454
2455         if (!folio)
2456                 folio = alloc_surplus_hugetlb_folio(h, gfp_mask, nid, nodemask);
2457         mpol_cond_put(mpol);
2458         return folio;
2459 }
2460
2461 /* folio migration callback function */
2462 struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
2463                 nodemask_t *nmask, gfp_t gfp_mask)
2464 {
2465         spin_lock_irq(&hugetlb_lock);
2466         if (available_huge_pages(h)) {
2467                 struct folio *folio;
2468
2469                 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
2470                                                 preferred_nid, nmask);
2471                 if (folio) {
2472                         spin_unlock_irq(&hugetlb_lock);
2473                         return folio;
2474                 }
2475         }
2476         spin_unlock_irq(&hugetlb_lock);
2477
2478         return alloc_migrate_hugetlb_folio(h, gfp_mask, preferred_nid, nmask);
2479 }
2480
2481 /* mempolicy aware migration callback */
2482 struct folio *alloc_hugetlb_folio_vma(struct hstate *h, struct vm_area_struct *vma,
2483                 unsigned long address)
2484 {
2485         struct mempolicy *mpol;
2486         nodemask_t *nodemask;
2487         struct folio *folio;
2488         gfp_t gfp_mask;
2489         int node;
2490
2491         gfp_mask = htlb_alloc_mask(h);
2492         node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
2493         folio = alloc_hugetlb_folio_nodemask(h, node, nodemask, gfp_mask);
2494         mpol_cond_put(mpol);
2495
2496         return folio;
2497 }
2498
2499 /*
2500  * Increase the hugetlb pool such that it can accommodate a reservation
2501  * of size 'delta'.
2502  */
2503 static int gather_surplus_pages(struct hstate *h, long delta)
2504         __must_hold(&hugetlb_lock)
2505 {
2506         LIST_HEAD(surplus_list);
2507         struct folio *folio, *tmp;
2508         int ret;
2509         long i;
2510         long needed, allocated;
2511         bool alloc_ok = true;
2512
2513         lockdep_assert_held(&hugetlb_lock);
2514         needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
2515         if (needed <= 0) {
2516                 h->resv_huge_pages += delta;
2517                 return 0;
2518         }
2519
2520         allocated = 0;
2521
2522         ret = -ENOMEM;
2523 retry:
2524         spin_unlock_irq(&hugetlb_lock);
2525         for (i = 0; i < needed; i++) {
2526                 folio = alloc_surplus_hugetlb_folio(h, htlb_alloc_mask(h),
2527                                 NUMA_NO_NODE, NULL);
2528                 if (!folio) {
2529                         alloc_ok = false;
2530                         break;
2531                 }
2532                 list_add(&folio->lru, &surplus_list);
2533                 cond_resched();
2534         }
2535         allocated += i;
2536
2537         /*
2538          * After retaking hugetlb_lock, we need to recalculate 'needed'
2539          * because either resv_huge_pages or free_huge_pages may have changed.
2540          */
2541         spin_lock_irq(&hugetlb_lock);
2542         needed = (h->resv_huge_pages + delta) -
2543                         (h->free_huge_pages + allocated);
2544         if (needed > 0) {
2545                 if (alloc_ok)
2546                         goto retry;
2547                 /*
2548                  * We were not able to allocate enough pages to
2549                  * satisfy the entire reservation so we free what
2550                  * we've allocated so far.
2551                  */
2552                 goto free;
2553         }
2554         /*
2555          * The surplus_list now contains _at_least_ the number of extra pages
2556          * needed to accommodate the reservation.  Add the appropriate number
2557          * of pages to the hugetlb pool and free the extras back to the buddy
2558          * allocator.  Commit the entire reservation here to prevent another
2559          * process from stealing the pages as they are added to the pool but
2560          * before they are reserved.
2561          */
2562         needed += allocated;
2563         h->resv_huge_pages += delta;
2564         ret = 0;
2565
2566         /* Free the needed pages to the hugetlb pool */
2567         list_for_each_entry_safe(folio, tmp, &surplus_list, lru) {
2568                 if ((--needed) < 0)
2569                         break;
2570                 /* Add the page to the hugetlb allocator */
2571                 enqueue_hugetlb_folio(h, folio);
2572         }
2573 free:
2574         spin_unlock_irq(&hugetlb_lock);
2575
2576         /*
2577          * Free unnecessary surplus pages to the buddy allocator.
2578          * Pages have no ref count, call free_huge_folio directly.
2579          */
2580         list_for_each_entry_safe(folio, tmp, &surplus_list, lru)
2581                 free_huge_folio(folio);
2582         spin_lock_irq(&hugetlb_lock);
2583
2584         return ret;
2585 }
2586
2587 /*
2588  * This routine has two main purposes:
2589  * 1) Decrement the reservation count (resv_huge_pages) by the value passed
2590  *    in unused_resv_pages.  This corresponds to the prior adjustments made
2591  *    to the associated reservation map.
2592  * 2) Free any unused surplus pages that may have been allocated to satisfy
2593  *    the reservation.  As many as unused_resv_pages may be freed.
2594  */
2595 static void return_unused_surplus_pages(struct hstate *h,
2596                                         unsigned long unused_resv_pages)
2597 {
2598         unsigned long nr_pages;
2599         struct page *page;
2600         LIST_HEAD(page_list);
2601
2602         lockdep_assert_held(&hugetlb_lock);
2603         /* Uncommit the reservation */
2604         h->resv_huge_pages -= unused_resv_pages;
2605
2606         if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
2607                 goto out;
2608
2609         /*
2610          * Part (or even all) of the reservation could have been backed
2611          * by pre-allocated pages. Only free surplus pages.
2612          */
2613         nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
2614
2615         /*
2616          * We want to release as many surplus pages as possible, spread
2617          * evenly across all nodes with memory. Iterate across these nodes
2618          * until we can no longer free unreserved surplus pages. This occurs
2619          * when the nodes with surplus pages have no free pages.
2620          * remove_pool_huge_page() will balance the freed pages across the
2621          * on-line nodes with memory and will handle the hstate accounting.
2622          */
2623         while (nr_pages--) {
2624                 page = remove_pool_huge_page(h, &node_states[N_MEMORY], 1);
2625                 if (!page)
2626                         goto out;
2627
2628                 list_add(&page->lru, &page_list);
2629         }
2630
2631 out:
2632         spin_unlock_irq(&hugetlb_lock);
2633         update_and_free_pages_bulk(h, &page_list);
2634         spin_lock_irq(&hugetlb_lock);
2635 }
2636
2637
2638 /*
2639  * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
2640  * are used by the huge page allocation routines to manage reservations.
2641  *
2642  * vma_needs_reservation is called to determine if the huge page at addr
2643  * within the vma has an associated reservation.  If a reservation is
2644  * needed, the value 1 is returned.  The caller is then responsible for
2645  * managing the global reservation and subpool usage counts.  After
2646  * the huge page has been allocated, vma_commit_reservation is called
2647  * to add the page to the reservation map.  If the page allocation fails,
2648  * the reservation must be ended instead of committed.  vma_end_reservation
2649  * is called in such cases.
2650  *
2651  * In the normal case, vma_commit_reservation returns the same value
2652  * as the preceding vma_needs_reservation call.  The only time this
2653  * is not the case is if a reserve map was changed between calls.  It
2654  * is the responsibility of the caller to notice the difference and
2655  * take appropriate action.
2656  *
2657  * vma_add_reservation is used in error paths where a reservation must
2658  * be restored when a newly allocated huge page must be freed.  It is
2659  * to be called after calling vma_needs_reservation to determine if a
2660  * reservation exists.
2661  *
2662  * vma_del_reservation is used in error paths where an entry in the reserve
2663  * map was created during huge page allocation and must be removed.  It is to
2664  * be called after calling vma_needs_reservation to determine if a reservation
2665  * exists.
2666  */
2667 enum vma_resv_mode {
2668         VMA_NEEDS_RESV,
2669         VMA_COMMIT_RESV,
2670         VMA_END_RESV,
2671         VMA_ADD_RESV,
2672         VMA_DEL_RESV,
2673 };
2674 static long __vma_reservation_common(struct hstate *h,
2675                                 struct vm_area_struct *vma, unsigned long addr,
2676                                 enum vma_resv_mode mode)
2677 {
2678         struct resv_map *resv;
2679         pgoff_t idx;
2680         long ret;
2681         long dummy_out_regions_needed;
2682
2683         resv = vma_resv_map(vma);
2684         if (!resv)
2685                 return 1;
2686
2687         idx = vma_hugecache_offset(h, vma, addr);
2688         switch (mode) {
2689         case VMA_NEEDS_RESV:
2690                 ret = region_chg(resv, idx, idx + 1, &dummy_out_regions_needed);
2691                 /* We assume that vma_reservation_* routines always operate on
2692                  * 1 page, and that adding to resv map a 1 page entry can only
2693                  * ever require 1 region.
2694                  */
2695                 VM_BUG_ON(dummy_out_regions_needed != 1);
2696                 break;
2697         case VMA_COMMIT_RESV:
2698                 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2699                 /* region_add calls of range 1 should never fail. */
2700                 VM_BUG_ON(ret < 0);
2701                 break;
2702         case VMA_END_RESV:
2703                 region_abort(resv, idx, idx + 1, 1);
2704                 ret = 0;
2705                 break;
2706         case VMA_ADD_RESV:
2707                 if (vma->vm_flags & VM_MAYSHARE) {
2708                         ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2709                         /* region_add calls of range 1 should never fail. */
2710                         VM_BUG_ON(ret < 0);
2711                 } else {
2712                         region_abort(resv, idx, idx + 1, 1);
2713                         ret = region_del(resv, idx, idx + 1);
2714                 }
2715                 break;
2716         case VMA_DEL_RESV:
2717                 if (vma->vm_flags & VM_MAYSHARE) {
2718                         region_abort(resv, idx, idx + 1, 1);
2719                         ret = region_del(resv, idx, idx + 1);
2720                 } else {
2721                         ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2722                         /* region_add calls of range 1 should never fail. */
2723                         VM_BUG_ON(ret < 0);
2724                 }
2725                 break;
2726         default:
2727                 BUG();
2728         }
2729
2730         if (vma->vm_flags & VM_MAYSHARE || mode == VMA_DEL_RESV)
2731                 return ret;
2732         /*
2733          * We know private mapping must have HPAGE_RESV_OWNER set.
2734          *
2735          * In most cases, reserves always exist for private mappings.
2736          * However, a file associated with mapping could have been
2737          * hole punched or truncated after reserves were consumed.
2738          * As subsequent fault on such a range will not use reserves.
2739          * Subtle - The reserve map for private mappings has the
2740          * opposite meaning than that of shared mappings.  If NO
2741          * entry is in the reserve map, it means a reservation exists.
2742          * If an entry exists in the reserve map, it means the
2743          * reservation has already been consumed.  As a result, the
2744          * return value of this routine is the opposite of the
2745          * value returned from reserve map manipulation routines above.
2746          */
2747         if (ret > 0)
2748                 return 0;
2749         if (ret == 0)
2750                 return 1;
2751         return ret;
2752 }
2753
2754 static long vma_needs_reservation(struct hstate *h,
2755                         struct vm_area_struct *vma, unsigned long addr)
2756 {
2757         return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
2758 }
2759
2760 static long vma_commit_reservation(struct hstate *h,
2761                         struct vm_area_struct *vma, unsigned long addr)
2762 {
2763         return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
2764 }
2765
2766 static void vma_end_reservation(struct hstate *h,
2767                         struct vm_area_struct *vma, unsigned long addr)
2768 {
2769         (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
2770 }
2771
2772 static long vma_add_reservation(struct hstate *h,
2773                         struct vm_area_struct *vma, unsigned long addr)
2774 {
2775         return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
2776 }
2777
2778 static long vma_del_reservation(struct hstate *h,
2779                         struct vm_area_struct *vma, unsigned long addr)
2780 {
2781         return __vma_reservation_common(h, vma, addr, VMA_DEL_RESV);
2782 }
2783
2784 /*
2785  * This routine is called to restore reservation information on error paths.
2786  * It should ONLY be called for folios allocated via alloc_hugetlb_folio(),
2787  * and the hugetlb mutex should remain held when calling this routine.
2788  *
2789  * It handles two specific cases:
2790  * 1) A reservation was in place and the folio consumed the reservation.
2791  *    hugetlb_restore_reserve is set in the folio.
2792  * 2) No reservation was in place for the page, so hugetlb_restore_reserve is
2793  *    not set.  However, alloc_hugetlb_folio always updates the reserve map.
2794  *
2795  * In case 1, free_huge_folio later in the error path will increment the
2796  * global reserve count.  But, free_huge_folio does not have enough context
2797  * to adjust the reservation map.  This case deals primarily with private
2798  * mappings.  Adjust the reserve map here to be consistent with global
2799  * reserve count adjustments to be made by free_huge_folio.  Make sure the
2800  * reserve map indicates there is a reservation present.
2801  *
2802  * In case 2, simply undo reserve map modifications done by alloc_hugetlb_folio.
2803  */
2804 void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
2805                         unsigned long address, struct folio *folio)
2806 {
2807         long rc = vma_needs_reservation(h, vma, address);
2808
2809         if (folio_test_hugetlb_restore_reserve(folio)) {
2810                 if (unlikely(rc < 0))
2811                         /*
2812                          * Rare out of memory condition in reserve map
2813                          * manipulation.  Clear hugetlb_restore_reserve so
2814                          * that global reserve count will not be incremented
2815                          * by free_huge_folio.  This will make it appear
2816                          * as though the reservation for this folio was
2817                          * consumed.  This may prevent the task from
2818                          * faulting in the folio at a later time.  This
2819                          * is better than inconsistent global huge page
2820                          * accounting of reserve counts.
2821                          */
2822                         folio_clear_hugetlb_restore_reserve(folio);
2823                 else if (rc)
2824                         (void)vma_add_reservation(h, vma, address);
2825                 else
2826                         vma_end_reservation(h, vma, address);
2827         } else {
2828                 if (!rc) {
2829                         /*
2830                          * This indicates there is an entry in the reserve map
2831                          * not added by alloc_hugetlb_folio.  We know it was added
2832                          * before the alloc_hugetlb_folio call, otherwise
2833                          * hugetlb_restore_reserve would be set on the folio.
2834                          * Remove the entry so that a subsequent allocation
2835                          * does not consume a reservation.
2836                          */
2837                         rc = vma_del_reservation(h, vma, address);
2838                         if (rc < 0)
2839                                 /*
2840                                  * VERY rare out of memory condition.  Since
2841                                  * we can not delete the entry, set
2842                                  * hugetlb_restore_reserve so that the reserve
2843                                  * count will be incremented when the folio
2844                                  * is freed.  This reserve will be consumed
2845                                  * on a subsequent allocation.
2846                                  */
2847                                 folio_set_hugetlb_restore_reserve(folio);
2848                 } else if (rc < 0) {
2849                         /*
2850                          * Rare out of memory condition from
2851                          * vma_needs_reservation call.  Memory allocation is
2852                          * only attempted if a new entry is needed.  Therefore,
2853                          * this implies there is not an entry in the
2854                          * reserve map.
2855                          *
2856                          * For shared mappings, no entry in the map indicates
2857                          * no reservation.  We are done.
2858                          */
2859                         if (!(vma->vm_flags & VM_MAYSHARE))
2860                                 /*
2861                                  * For private mappings, no entry indicates
2862                                  * a reservation is present.  Since we can
2863                                  * not add an entry, set hugetlb_restore_reserve
2864                                  * on the folio so reserve count will be
2865                                  * incremented when freed.  This reserve will
2866                                  * be consumed on a subsequent allocation.
2867                                  */
2868                                 folio_set_hugetlb_restore_reserve(folio);
2869                 } else
2870                         /*
2871                          * No reservation present, do nothing
2872                          */
2873                          vma_end_reservation(h, vma, address);
2874         }
2875 }
2876
2877 /*
2878  * alloc_and_dissolve_hugetlb_folio - Allocate a new folio and dissolve
2879  * the old one
2880  * @h: struct hstate old page belongs to
2881  * @old_folio: Old folio to dissolve
2882  * @list: List to isolate the page in case we need to
2883  * Returns 0 on success, otherwise negated error.
2884  */
2885 static int alloc_and_dissolve_hugetlb_folio(struct hstate *h,
2886                         struct folio *old_folio, struct list_head *list)
2887 {
2888         gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
2889         int nid = folio_nid(old_folio);
2890         struct folio *new_folio;
2891         int ret = 0;
2892
2893         /*
2894          * Before dissolving the folio, we need to allocate a new one for the
2895          * pool to remain stable.  Here, we allocate the folio and 'prep' it
2896          * by doing everything but actually updating counters and adding to
2897          * the pool.  This simplifies and let us do most of the processing
2898          * under the lock.
2899          */
2900         new_folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid, NULL, NULL);
2901         if (!new_folio)
2902                 return -ENOMEM;
2903         __prep_new_hugetlb_folio(h, new_folio);
2904
2905 retry:
2906         spin_lock_irq(&hugetlb_lock);
2907         if (!folio_test_hugetlb(old_folio)) {
2908                 /*
2909                  * Freed from under us. Drop new_folio too.
2910                  */
2911                 goto free_new;
2912         } else if (folio_ref_count(old_folio)) {
2913                 bool isolated;
2914
2915                 /*
2916                  * Someone has grabbed the folio, try to isolate it here.
2917                  * Fail with -EBUSY if not possible.
2918                  */
2919                 spin_unlock_irq(&hugetlb_lock);
2920                 isolated = isolate_hugetlb(old_folio, list);
2921                 ret = isolated ? 0 : -EBUSY;
2922                 spin_lock_irq(&hugetlb_lock);
2923                 goto free_new;
2924         } else if (!folio_test_hugetlb_freed(old_folio)) {
2925                 /*
2926                  * Folio's refcount is 0 but it has not been enqueued in the
2927                  * freelist yet. Race window is small, so we can succeed here if
2928                  * we retry.
2929                  */
2930                 spin_unlock_irq(&hugetlb_lock);
2931                 cond_resched();
2932                 goto retry;
2933         } else {
2934                 /*
2935                  * Ok, old_folio is still a genuine free hugepage. Remove it from
2936                  * the freelist and decrease the counters. These will be
2937                  * incremented again when calling __prep_account_new_huge_page()
2938                  * and enqueue_hugetlb_folio() for new_folio. The counters will
2939                  * remain stable since this happens under the lock.
2940                  */
2941                 remove_hugetlb_folio(h, old_folio, false);
2942
2943                 /*
2944                  * Ref count on new_folio is already zero as it was dropped
2945                  * earlier.  It can be directly added to the pool free list.
2946                  */
2947                 __prep_account_new_huge_page(h, nid);
2948                 enqueue_hugetlb_folio(h, new_folio);
2949
2950                 /*
2951                  * Folio has been replaced, we can safely free the old one.
2952                  */
2953                 spin_unlock_irq(&hugetlb_lock);
2954                 update_and_free_hugetlb_folio(h, old_folio, false);
2955         }
2956
2957         return ret;
2958
2959 free_new:
2960         spin_unlock_irq(&hugetlb_lock);
2961         /* Folio has a zero ref count, but needs a ref to be freed */
2962         folio_ref_unfreeze(new_folio, 1);
2963         update_and_free_hugetlb_folio(h, new_folio, false);
2964
2965         return ret;
2966 }
2967
2968 int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list)
2969 {
2970         struct hstate *h;
2971         struct folio *folio = page_folio(page);
2972         int ret = -EBUSY;
2973
2974         /*
2975          * The page might have been dissolved from under our feet, so make sure
2976          * to carefully check the state under the lock.
2977          * Return success when racing as if we dissolved the page ourselves.
2978          */
2979         spin_lock_irq(&hugetlb_lock);
2980         if (folio_test_hugetlb(folio)) {
2981                 h = folio_hstate(folio);
2982         } else {
2983                 spin_unlock_irq(&hugetlb_lock);
2984                 return 0;
2985         }
2986         spin_unlock_irq(&hugetlb_lock);
2987
2988         /*
2989          * Fence off gigantic pages as there is a cyclic dependency between
2990          * alloc_contig_range and them. Return -ENOMEM as this has the effect
2991          * of bailing out right away without further retrying.
2992          */
2993         if (hstate_is_gigantic(h))
2994                 return -ENOMEM;
2995
2996         if (folio_ref_count(folio) && isolate_hugetlb(folio, list))
2997                 ret = 0;
2998         else if (!folio_ref_count(folio))
2999                 ret = alloc_and_dissolve_hugetlb_folio(h, folio, list);
3000
3001         return ret;
3002 }
3003
3004 struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
3005                                     unsigned long addr, int avoid_reserve)
3006 {
3007         struct hugepage_subpool *spool = subpool_vma(vma);
3008         struct hstate *h = hstate_vma(vma);
3009         struct folio *folio;
3010         long map_chg, map_commit;
3011         long gbl_chg;
3012         int ret, idx;
3013         struct hugetlb_cgroup *h_cg = NULL;
3014         bool deferred_reserve;
3015
3016         idx = hstate_index(h);
3017         /*
3018          * Examine the region/reserve map to determine if the process
3019          * has a reservation for the page to be allocated.  A return
3020          * code of zero indicates a reservation exists (no change).
3021          */
3022         map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
3023         if (map_chg < 0)
3024                 return ERR_PTR(-ENOMEM);
3025
3026         /*
3027          * Processes that did not create the mapping will have no
3028          * reserves as indicated by the region/reserve map. Check
3029          * that the allocation will not exceed the subpool limit.
3030          * Allocations for MAP_NORESERVE mappings also need to be
3031          * checked against any subpool limit.
3032          */
3033         if (map_chg || avoid_reserve) {
3034                 gbl_chg = hugepage_subpool_get_pages(spool, 1);
3035                 if (gbl_chg < 0) {
3036                         vma_end_reservation(h, vma, addr);
3037                         return ERR_PTR(-ENOSPC);
3038                 }
3039
3040                 /*
3041                  * Even though there was no reservation in the region/reserve
3042                  * map, there could be reservations associated with the
3043                  * subpool that can be used.  This would be indicated if the
3044                  * return value of hugepage_subpool_get_pages() is zero.
3045                  * However, if avoid_reserve is specified we still avoid even
3046                  * the subpool reservations.
3047                  */
3048                 if (avoid_reserve)
3049                         gbl_chg = 1;
3050         }
3051
3052         /* If this allocation is not consuming a reservation, charge it now.
3053          */
3054         deferred_reserve = map_chg || avoid_reserve;
3055         if (deferred_reserve) {
3056                 ret = hugetlb_cgroup_charge_cgroup_rsvd(
3057                         idx, pages_per_huge_page(h), &h_cg);
3058                 if (ret)
3059                         goto out_subpool_put;
3060         }
3061
3062         ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
3063         if (ret)
3064                 goto out_uncharge_cgroup_reservation;
3065
3066         spin_lock_irq(&hugetlb_lock);
3067         /*
3068          * glb_chg is passed to indicate whether or not a page must be taken
3069          * from the global free pool (global change).  gbl_chg == 0 indicates
3070          * a reservation exists for the allocation.
3071          */
3072         folio = dequeue_hugetlb_folio_vma(h, vma, addr, avoid_reserve, gbl_chg);
3073         if (!folio) {
3074                 spin_unlock_irq(&hugetlb_lock);
3075                 folio = alloc_buddy_hugetlb_folio_with_mpol(h, vma, addr);
3076                 if (!folio)
3077                         goto out_uncharge_cgroup;
3078                 spin_lock_irq(&hugetlb_lock);
3079                 if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
3080                         folio_set_hugetlb_restore_reserve(folio);
3081                         h->resv_huge_pages--;
3082                 }
3083                 list_add(&folio->lru, &h->hugepage_activelist);
3084                 folio_ref_unfreeze(folio, 1);
3085                 /* Fall through */
3086         }
3087
3088         hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, folio);
3089         /* If allocation is not consuming a reservation, also store the
3090          * hugetlb_cgroup pointer on the page.
3091          */
3092         if (deferred_reserve) {
3093                 hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h),
3094                                                   h_cg, folio);
3095         }
3096
3097         spin_unlock_irq(&hugetlb_lock);
3098
3099         hugetlb_set_folio_subpool(folio, spool);
3100
3101         map_commit = vma_commit_reservation(h, vma, addr);
3102         if (unlikely(map_chg > map_commit)) {
3103                 /*
3104                  * The page was added to the reservation map between
3105                  * vma_needs_reservation and vma_commit_reservation.
3106                  * This indicates a race with hugetlb_reserve_pages.
3107                  * Adjust for the subpool count incremented above AND
3108                  * in hugetlb_reserve_pages for the same page.  Also,
3109                  * the reservation count added in hugetlb_reserve_pages
3110                  * no longer applies.
3111                  */
3112                 long rsv_adjust;
3113
3114                 rsv_adjust = hugepage_subpool_put_pages(spool, 1);
3115                 hugetlb_acct_memory(h, -rsv_adjust);
3116                 if (deferred_reserve)
3117                         hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
3118                                         pages_per_huge_page(h), folio);
3119         }
3120         return folio;
3121
3122 out_uncharge_cgroup:
3123         hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
3124 out_uncharge_cgroup_reservation:
3125         if (deferred_reserve)
3126                 hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h),
3127                                                     h_cg);
3128 out_subpool_put:
3129         if (map_chg || avoid_reserve)
3130                 hugepage_subpool_put_pages(spool, 1);
3131         vma_end_reservation(h, vma, addr);
3132         return ERR_PTR(-ENOSPC);
3133 }
3134
3135 int alloc_bootmem_huge_page(struct hstate *h, int nid)
3136         __attribute__ ((weak, alias("__alloc_bootmem_huge_page")));
3137 int __alloc_bootmem_huge_page(struct hstate *h, int nid)
3138 {
3139         struct huge_bootmem_page *m = NULL; /* initialize for clang */
3140         int nr_nodes, node;
3141
3142         /* do node specific alloc */
3143         if (nid != NUMA_NO_NODE) {
3144                 m = memblock_alloc_try_nid_raw(huge_page_size(h), huge_page_size(h),
3145                                 0, MEMBLOCK_ALLOC_ACCESSIBLE, nid);
3146                 if (!m)
3147                         return 0;
3148                 goto found;
3149         }
3150         /* allocate from next node when distributing huge pages */
3151         for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
3152                 m = memblock_alloc_try_nid_raw(
3153                                 huge_page_size(h), huge_page_size(h),
3154                                 0, MEMBLOCK_ALLOC_ACCESSIBLE, node);
3155                 /*
3156                  * Use the beginning of the huge page to store the
3157                  * huge_bootmem_page struct (until gather_bootmem
3158                  * puts them into the mem_map).
3159                  */
3160                 if (!m)
3161                         return 0;
3162                 goto found;
3163         }
3164
3165 found:
3166         /* Put them into a private list first because mem_map is not up yet */
3167         INIT_LIST_HEAD(&m->list);
3168         list_add(&m->list, &huge_boot_pages);
3169         m->hstate = h;
3170         return 1;
3171 }
3172
3173 /*
3174  * Put bootmem huge pages into the standard lists after mem_map is up.
3175  * Note: This only applies to gigantic (order > MAX_ORDER) pages.
3176  */
3177 static void __init gather_bootmem_prealloc(void)
3178 {
3179         struct huge_bootmem_page *m;
3180
3181         list_for_each_entry(m, &huge_boot_pages, list) {
3182                 struct page *page = virt_to_page(m);
3183                 struct folio *folio = page_folio(page);
3184                 struct hstate *h = m->hstate;
3185
3186                 VM_BUG_ON(!hstate_is_gigantic(h));
3187                 WARN_ON(folio_ref_count(folio) != 1);
3188                 if (prep_compound_gigantic_folio(folio, huge_page_order(h))) {
3189                         WARN_ON(folio_test_reserved(folio));
3190                         prep_new_hugetlb_folio(h, folio, folio_nid(folio));
3191                         free_huge_folio(folio); /* add to the hugepage allocator */
3192                 } else {
3193                         /* VERY unlikely inflated ref count on a tail page */
3194                         free_gigantic_folio(folio, huge_page_order(h));
3195                 }
3196
3197                 /*
3198                  * We need to restore the 'stolen' pages to totalram_pages
3199                  * in order to fix confusing memory reports from free(1) and
3200                  * other side-effects, like CommitLimit going negative.
3201                  */
3202                 adjust_managed_page_count(page, pages_per_huge_page(h));
3203                 cond_resched();
3204         }
3205 }
3206 static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid)
3207 {
3208         unsigned long i;
3209         char buf[32];
3210
3211         for (i = 0; i < h->max_huge_pages_node[nid]; ++i) {
3212                 if (hstate_is_gigantic(h)) {
3213                         if (!alloc_bootmem_huge_page(h, nid))
3214                                 break;
3215                 } else {
3216                         struct folio *folio;
3217                         gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
3218
3219                         folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid,
3220                                         &node_states[N_MEMORY], NULL);
3221                         if (!folio)
3222                                 break;
3223                         free_huge_folio(folio); /* free it into the hugepage allocator */
3224                 }
3225                 cond_resched();
3226         }
3227         if (i == h->max_huge_pages_node[nid])
3228                 return;
3229
3230         string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3231         pr_warn("HugeTLB: allocating %u of page size %s failed node%d.  Only allocated %lu hugepages.\n",
3232                 h->max_huge_pages_node[nid], buf, nid, i);
3233         h->max_huge_pages -= (h->max_huge_pages_node[nid] - i);
3234         h->max_huge_pages_node[nid] = i;
3235 }
3236
3237 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
3238 {
3239         unsigned long i;
3240         nodemask_t *node_alloc_noretry;
3241         bool node_specific_alloc = false;
3242
3243         /* skip gigantic hugepages allocation if hugetlb_cma enabled */
3244         if (hstate_is_gigantic(h) && hugetlb_cma_size) {
3245                 pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n");
3246                 return;
3247         }
3248
3249         /* do node specific alloc */
3250         for_each_online_node(i) {
3251                 if (h->max_huge_pages_node[i] > 0) {
3252                         hugetlb_hstate_alloc_pages_onenode(h, i);
3253                         node_specific_alloc = true;
3254                 }
3255         }
3256
3257         if (node_specific_alloc)
3258                 return;
3259
3260         /* below will do all node balanced alloc */
3261         if (!hstate_is_gigantic(h)) {
3262                 /*
3263                  * Bit mask controlling how hard we retry per-node allocations.
3264                  * Ignore errors as lower level routines can deal with
3265                  * node_alloc_noretry == NULL.  If this kmalloc fails at boot
3266                  * time, we are likely in bigger trouble.
3267                  */
3268                 node_alloc_noretry = kmalloc(sizeof(*node_alloc_noretry),
3269                                                 GFP_KERNEL);
3270         } else {
3271                 /* allocations done at boot time */
3272                 node_alloc_noretry = NULL;
3273         }
3274
3275         /* bit mask controlling how hard we retry per-node allocations */
3276         if (node_alloc_noretry)
3277                 nodes_clear(*node_alloc_noretry);
3278
3279         for (i = 0; i < h->max_huge_pages; ++i) {
3280                 if (hstate_is_gigantic(h)) {
3281                         if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE))
3282                                 break;
3283                 } else if (!alloc_pool_huge_page(h,
3284                                          &node_states[N_MEMORY],
3285                                          node_alloc_noretry))
3286                         break;
3287                 cond_resched();
3288         }
3289         if (i < h->max_huge_pages) {
3290                 char buf[32];
3291
3292                 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3293                 pr_warn("HugeTLB: allocating %lu of page size %s failed.  Only allocated %lu hugepages.\n",
3294                         h->max_huge_pages, buf, i);
3295                 h->max_huge_pages = i;
3296         }
3297         kfree(node_alloc_noretry);
3298 }
3299
3300 static void __init hugetlb_init_hstates(void)
3301 {
3302         struct hstate *h, *h2;
3303
3304         for_each_hstate(h) {
3305                 /* oversize hugepages were init'ed in early boot */
3306                 if (!hstate_is_gigantic(h))
3307                         hugetlb_hstate_alloc_pages(h);
3308
3309                 /*
3310                  * Set demote order for each hstate.  Note that
3311                  * h->demote_order is initially 0.
3312                  * - We can not demote gigantic pages if runtime freeing
3313                  *   is not supported, so skip this.
3314                  * - If CMA allocation is possible, we can not demote
3315                  *   HUGETLB_PAGE_ORDER or smaller size pages.
3316                  */
3317                 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
3318                         continue;
3319                 if (hugetlb_cma_size && h->order <= HUGETLB_PAGE_ORDER)
3320                         continue;
3321                 for_each_hstate(h2) {
3322                         if (h2 == h)
3323                                 continue;
3324                         if (h2->order < h->order &&
3325                             h2->order > h->demote_order)
3326                                 h->demote_order = h2->order;
3327                 }
3328         }
3329 }
3330
3331 static void __init report_hugepages(void)
3332 {
3333         struct hstate *h;
3334
3335         for_each_hstate(h) {
3336                 char buf[32];
3337
3338                 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3339                 pr_info("HugeTLB: registered %s page size, pre-allocated %ld pages\n",
3340                         buf, h->free_huge_pages);
3341                 pr_info("HugeTLB: %d KiB vmemmap can be freed for a %s page\n",
3342                         hugetlb_vmemmap_optimizable_size(h) / SZ_1K, buf);
3343         }
3344 }
3345
3346 #ifdef CONFIG_HIGHMEM
3347 static void try_to_free_low(struct hstate *h, unsigned long count,
3348                                                 nodemask_t *nodes_allowed)
3349 {
3350         int i;
3351         LIST_HEAD(page_list);
3352
3353         lockdep_assert_held(&hugetlb_lock);
3354         if (hstate_is_gigantic(h))
3355                 return;
3356
3357         /*
3358          * Collect pages to be freed on a list, and free after dropping lock
3359          */
3360         for_each_node_mask(i, *nodes_allowed) {
3361                 struct page *page, *next;
3362                 struct list_head *freel = &h->hugepage_freelists[i];
3363                 list_for_each_entry_safe(page, next, freel, lru) {
3364                         if (count >= h->nr_huge_pages)
3365                                 goto out;
3366                         if (PageHighMem(page))
3367                                 continue;
3368                         remove_hugetlb_folio(h, page_folio(page), false);
3369                         list_add(&page->lru, &page_list);
3370                 }
3371         }
3372
3373 out:
3374         spin_unlock_irq(&hugetlb_lock);
3375         update_and_free_pages_bulk(h, &page_list);
3376         spin_lock_irq(&hugetlb_lock);
3377 }
3378 #else
3379 static inline void try_to_free_low(struct hstate *h, unsigned long count,
3380                                                 nodemask_t *nodes_allowed)
3381 {
3382 }
3383 #endif
3384
3385 /*
3386  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
3387  * balanced by operating on them in a round-robin fashion.
3388  * Returns 1 if an adjustment was made.
3389  */
3390 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
3391                                 int delta)
3392 {
3393         int nr_nodes, node;
3394
3395         lockdep_assert_held(&hugetlb_lock);
3396         VM_BUG_ON(delta != -1 && delta != 1);
3397
3398         if (delta < 0) {
3399                 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
3400                         if (h->surplus_huge_pages_node[node])
3401                                 goto found;
3402                 }
3403         } else {
3404                 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
3405                         if (h->surplus_huge_pages_node[node] <
3406                                         h->nr_huge_pages_node[node])
3407                                 goto found;
3408                 }
3409         }
3410         return 0;
3411
3412 found:
3413         h->surplus_huge_pages += delta;
3414         h->surplus_huge_pages_node[node] += delta;
3415         return 1;
3416 }
3417
3418 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
3419 static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
3420                               nodemask_t *nodes_allowed)
3421 {
3422         unsigned long min_count, ret;
3423         struct page *page;
3424         LIST_HEAD(page_list);
3425         NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL);
3426
3427         /*
3428          * Bit mask controlling how hard we retry per-node allocations.
3429          * If we can not allocate the bit mask, do not attempt to allocate
3430          * the requested huge pages.
3431          */
3432         if (node_alloc_noretry)
3433                 nodes_clear(*node_alloc_noretry);
3434         else
3435                 return -ENOMEM;
3436
3437         /*
3438          * resize_lock mutex prevents concurrent adjustments to number of
3439          * pages in hstate via the proc/sysfs interfaces.
3440          */
3441         mutex_lock(&h->resize_lock);
3442         flush_free_hpage_work(h);
3443         spin_lock_irq(&hugetlb_lock);
3444
3445         /*
3446          * Check for a node specific request.
3447          * Changing node specific huge page count may require a corresponding
3448          * change to the global count.  In any case, the passed node mask
3449          * (nodes_allowed) will restrict alloc/free to the specified node.
3450          */
3451         if (nid != NUMA_NO_NODE) {
3452                 unsigned long old_count = count;
3453
3454                 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
3455                 /*
3456                  * User may have specified a large count value which caused the
3457                  * above calculation to overflow.  In this case, they wanted
3458                  * to allocate as many huge pages as possible.  Set count to
3459                  * largest possible value to align with their intention.
3460                  */
3461                 if (count < old_count)
3462                         count = ULONG_MAX;
3463         }
3464
3465         /*
3466          * Gigantic pages runtime allocation depend on the capability for large
3467          * page range allocation.
3468          * If the system does not provide this feature, return an error when
3469          * the user tries to allocate gigantic pages but let the user free the
3470          * boottime allocated gigantic pages.
3471          */
3472         if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) {
3473                 if (count > persistent_huge_pages(h)) {
3474                         spin_unlock_irq(&hugetlb_lock);
3475                         mutex_unlock(&h->resize_lock);
3476                         NODEMASK_FREE(node_alloc_noretry);
3477                         return -EINVAL;
3478                 }
3479                 /* Fall through to decrease pool */
3480         }
3481
3482         /*
3483          * Increase the pool size
3484          * First take pages out of surplus state.  Then make up the
3485          * remaining difference by allocating fresh huge pages.
3486          *
3487          * We might race with alloc_surplus_hugetlb_folio() here and be unable
3488          * to convert a surplus huge page to a normal huge page. That is
3489          * not critical, though, it just means the overall size of the
3490          * pool might be one hugepage larger than it needs to be, but
3491          * within all the constraints specified by the sysctls.
3492          */
3493         while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
3494                 if (!adjust_pool_surplus(h, nodes_allowed, -1))
3495                         break;
3496         }
3497
3498         while (count > persistent_huge_pages(h)) {
3499                 /*
3500                  * If this allocation races such that we no longer need the
3501                  * page, free_huge_folio will handle it by freeing the page
3502                  * and reducing the surplus.
3503                  */
3504                 spin_unlock_irq(&hugetlb_lock);
3505
3506                 /* yield cpu to avoid soft lockup */
3507                 cond_resched();
3508
3509                 ret = alloc_pool_huge_page(h, nodes_allowed,
3510                                                 node_alloc_noretry);
3511                 spin_lock_irq(&hugetlb_lock);
3512                 if (!ret)
3513                         goto out;
3514
3515                 /* Bail for signals. Probably ctrl-c from user */
3516                 if (signal_pending(current))
3517                         goto out;
3518         }
3519
3520         /*
3521          * Decrease the pool size
3522          * First return free pages to the buddy allocator (being careful
3523          * to keep enough around to satisfy reservations).  Then place
3524          * pages into surplus state as needed so the pool will shrink
3525          * to the desired size as pages become free.
3526          *
3527          * By placing pages into the surplus state independent of the
3528          * overcommit value, we are allowing the surplus pool size to
3529          * exceed overcommit. There are few sane options here. Since
3530          * alloc_surplus_hugetlb_folio() is checking the global counter,
3531          * though, we'll note that we're not allowed to exceed surplus
3532          * and won't grow the pool anywhere else. Not until one of the
3533          * sysctls are changed, or the surplus pages go out of use.
3534          */
3535         min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
3536         min_count = max(count, min_count);
3537         try_to_free_low(h, min_count, nodes_allowed);
3538
3539         /*
3540          * Collect pages to be removed on list without dropping lock
3541          */
3542         while (min_count < persistent_huge_pages(h)) {
3543                 page = remove_pool_huge_page(h, nodes_allowed, 0);
3544                 if (!page)
3545                         break;
3546
3547                 list_add(&page->lru, &page_list);
3548         }
3549         /* free the pages after dropping lock */
3550         spin_unlock_irq(&hugetlb_lock);
3551         update_and_free_pages_bulk(h, &page_list);
3552         flush_free_hpage_work(h);
3553         spin_lock_irq(&hugetlb_lock);
3554
3555         while (count < persistent_huge_pages(h)) {
3556                 if (!adjust_pool_surplus(h, nodes_allowed, 1))
3557                         break;
3558         }
3559 out:
3560         h->max_huge_pages = persistent_huge_pages(h);
3561         spin_unlock_irq(&hugetlb_lock);
3562         mutex_unlock(&h->resize_lock);
3563
3564         NODEMASK_FREE(node_alloc_noretry);
3565
3566         return 0;
3567 }
3568
3569 static int demote_free_hugetlb_folio(struct hstate *h, struct folio *folio)
3570 {
3571         int i, nid = folio_nid(folio);
3572         struct hstate *target_hstate;
3573         struct page *subpage;
3574         struct folio *inner_folio;
3575         int rc = 0;
3576
3577         target_hstate = size_to_hstate(PAGE_SIZE << h->demote_order);
3578
3579         remove_hugetlb_folio_for_demote(h, folio, false);
3580         spin_unlock_irq(&hugetlb_lock);
3581
3582         rc = hugetlb_vmemmap_restore(h, &folio->page);
3583         if (rc) {
3584                 /* Allocation of vmemmmap failed, we can not demote folio */
3585                 spin_lock_irq(&hugetlb_lock);
3586                 folio_ref_unfreeze(folio, 1);
3587                 add_hugetlb_folio(h, folio, false);
3588                 return rc;
3589         }
3590
3591         /*
3592          * Use destroy_compound_hugetlb_folio_for_demote for all huge page
3593          * sizes as it will not ref count folios.
3594          */
3595         destroy_compound_hugetlb_folio_for_demote(folio, huge_page_order(h));
3596
3597         /*
3598          * Taking target hstate mutex synchronizes with set_max_huge_pages.
3599          * Without the mutex, pages added to target hstate could be marked
3600          * as surplus.
3601          *
3602          * Note that we already hold h->resize_lock.  To prevent deadlock,
3603          * use the convention of always taking larger size hstate mutex first.
3604          */
3605         mutex_lock(&target_hstate->resize_lock);
3606         for (i = 0; i < pages_per_huge_page(h);
3607                                 i += pages_per_huge_page(target_hstate)) {
3608                 subpage = folio_page(folio, i);
3609                 inner_folio = page_folio(subpage);
3610                 if (hstate_is_gigantic(target_hstate))
3611                         prep_compound_gigantic_folio_for_demote(inner_folio,
3612                                                         target_hstate->order);
3613                 else
3614                         prep_compound_page(subpage, target_hstate->order);
3615                 folio_change_private(inner_folio, NULL);
3616                 prep_new_hugetlb_folio(target_hstate, inner_folio, nid);
3617                 free_huge_folio(inner_folio);
3618         }
3619         mutex_unlock(&target_hstate->resize_lock);
3620
3621         spin_lock_irq(&hugetlb_lock);
3622
3623         /*
3624          * Not absolutely necessary, but for consistency update max_huge_pages
3625          * based on pool changes for the demoted page.
3626          */
3627         h->max_huge_pages--;
3628         target_hstate->max_huge_pages +=
3629                 pages_per_huge_page(h) / pages_per_huge_page(target_hstate);
3630
3631         return rc;
3632 }
3633
3634 static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
3635         __must_hold(&hugetlb_lock)
3636 {
3637         int nr_nodes, node;
3638         struct folio *folio;
3639
3640         lockdep_assert_held(&hugetlb_lock);
3641
3642         /* We should never get here if no demote order */
3643         if (!h->demote_order) {
3644                 pr_warn("HugeTLB: NULL demote order passed to demote_pool_huge_page.\n");
3645                 return -EINVAL;         /* internal error */
3646         }
3647
3648         for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
3649                 list_for_each_entry(folio, &h->hugepage_freelists[node], lru) {
3650                         if (folio_test_hwpoison(folio))
3651                                 continue;
3652                         return demote_free_hugetlb_folio(h, folio);
3653                 }
3654         }
3655
3656         /*
3657          * Only way to get here is if all pages on free lists are poisoned.
3658          * Return -EBUSY so that caller will not retry.
3659          */
3660         return -EBUSY;
3661 }
3662
3663 #define HSTATE_ATTR_RO(_name) \
3664         static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
3665
3666 #define HSTATE_ATTR_WO(_name) \
3667         static struct kobj_attribute _name##_attr = __ATTR_WO(_name)
3668
3669 #define HSTATE_ATTR(_name) \
3670         static struct kobj_attribute _name##_attr = __ATTR_RW(_name)
3671
3672 static struct kobject *hugepages_kobj;
3673 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
3674
3675 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
3676
3677 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
3678 {
3679         int i;
3680
3681         for (i = 0; i < HUGE_MAX_HSTATE; i++)
3682                 if (hstate_kobjs[i] == kobj) {
3683                         if (nidp)
3684                                 *nidp = NUMA_NO_NODE;
3685                         return &hstates[i];
3686                 }
3687
3688         return kobj_to_node_hstate(kobj, nidp);
3689 }
3690
3691 static ssize_t nr_hugepages_show_common(struct kobject *kobj,
3692                                         struct kobj_attribute *attr, char *buf)
3693 {
3694         struct hstate *h;
3695         unsigned long nr_huge_pages;
3696         int nid;
3697
3698         h = kobj_to_hstate(kobj, &nid);
3699         if (nid == NUMA_NO_NODE)
3700                 nr_huge_pages = h->nr_huge_pages;
3701         else
3702                 nr_huge_pages = h->nr_huge_pages_node[nid];
3703
3704         return sysfs_emit(buf, "%lu\n", nr_huge_pages);
3705 }
3706
3707 static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
3708                                            struct hstate *h, int nid,
3709                                            unsigned long count, size_t len)
3710 {
3711         int err;
3712         nodemask_t nodes_allowed, *n_mask;
3713
3714         if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
3715                 return -EINVAL;
3716
3717         if (nid == NUMA_NO_NODE) {
3718                 /*
3719                  * global hstate attribute
3720                  */
3721                 if (!(obey_mempolicy &&
3722                                 init_nodemask_of_mempolicy(&nodes_allowed)))
3723                         n_mask = &node_states[N_MEMORY];
3724                 else
3725                         n_mask = &nodes_allowed;
3726         } else {
3727                 /*
3728                  * Node specific request.  count adjustment happens in
3729                  * set_max_huge_pages() after acquiring hugetlb_lock.
3730                  */
3731                 init_nodemask_of_node(&nodes_allowed, nid);
3732                 n_mask = &nodes_allowed;
3733         }
3734
3735         err = set_max_huge_pages(h, count, nid, n_mask);
3736
3737         return err ? err : len;
3738 }
3739
3740 static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
3741                                          struct kobject *kobj, const char *buf,
3742                                          size_t len)
3743 {
3744         struct hstate *h;
3745         unsigned long count;
3746         int nid;
3747         int err;
3748
3749         err = kstrtoul(buf, 10, &count);
3750         if (err)
3751                 return err;
3752
3753         h = kobj_to_hstate(kobj, &nid);
3754         return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
3755 }
3756
3757 static ssize_t nr_hugepages_show(struct kobject *kobj,
3758                                        struct kobj_attribute *attr, char *buf)
3759 {
3760         return nr_hugepages_show_common(kobj, attr, buf);
3761 }
3762
3763 static ssize_t nr_hugepages_store(struct kobject *kobj,
3764                struct kobj_attribute *attr, const char *buf, size_t len)
3765 {
3766         return nr_hugepages_store_common(false, kobj, buf, len);
3767 }
3768 HSTATE_ATTR(nr_hugepages);
3769
3770 #ifdef CONFIG_NUMA
3771
3772 /*
3773  * hstate attribute for optionally mempolicy-based constraint on persistent
3774  * huge page alloc/free.
3775  */
3776 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
3777                                            struct kobj_attribute *attr,
3778                                            char *buf)
3779 {
3780         return nr_hugepages_show_common(kobj, attr, buf);
3781 }
3782
3783 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
3784                struct kobj_attribute *attr, const char *buf, size_t len)
3785 {
3786         return nr_hugepages_store_common(true, kobj, buf, len);
3787 }
3788 HSTATE_ATTR(nr_hugepages_mempolicy);
3789 #endif
3790
3791
3792 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
3793                                         struct kobj_attribute *attr, char *buf)
3794 {
3795         struct hstate *h = kobj_to_hstate(kobj, NULL);
3796         return sysfs_emit(buf, "%lu\n", h->nr_overcommit_huge_pages);
3797 }
3798
3799 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
3800                 struct kobj_attribute *attr, const char *buf, size_t count)
3801 {
3802         int err;
3803         unsigned long input;
3804         struct hstate *h = kobj_to_hstate(kobj, NULL);
3805
3806         if (hstate_is_gigantic(h))
3807                 return -EINVAL;
3808
3809         err = kstrtoul(buf, 10, &input);
3810         if (err)
3811                 return err;
3812
3813         spin_lock_irq(&hugetlb_lock);
3814         h->nr_overcommit_huge_pages = input;
3815         spin_unlock_irq(&hugetlb_lock);
3816
3817         return count;
3818 }
3819 HSTATE_ATTR(nr_overcommit_hugepages);
3820
3821 static ssize_t free_hugepages_show(struct kobject *kobj,
3822                                         struct kobj_attribute *attr, char *buf)
3823 {
3824         struct hstate *h;
3825         unsigned long free_huge_pages;
3826         int nid;
3827
3828         h = kobj_to_hstate(kobj, &nid);
3829         if (nid == NUMA_NO_NODE)
3830                 free_huge_pages = h->free_huge_pages;
3831         else
3832                 free_huge_pages = h->free_huge_pages_node[nid];
3833
3834         return sysfs_emit(buf, "%lu\n", free_huge_pages);
3835 }
3836 HSTATE_ATTR_RO(free_hugepages);
3837
3838 static ssize_t resv_hugepages_show(struct kobject *kobj,
3839                                         struct kobj_attribute *attr, char *buf)
3840 {
3841         struct hstate *h = kobj_to_hstate(kobj, NULL);
3842         return sysfs_emit(buf, "%lu\n", h->resv_huge_pages);
3843 }
3844 HSTATE_ATTR_RO(resv_hugepages);
3845
3846 static ssize_t surplus_hugepages_show(struct kobject *kobj,
3847                                         struct kobj_attribute *attr, char *buf)
3848 {
3849         struct hstate *h;
3850         unsigned long surplus_huge_pages;
3851         int nid;
3852
3853         h = kobj_to_hstate(kobj, &nid);
3854         if (nid == NUMA_NO_NODE)
3855                 surplus_huge_pages = h->surplus_huge_pages;
3856         else
3857                 surplus_huge_pages = h->surplus_huge_pages_node[nid];
3858
3859         return sysfs_emit(buf, "%lu\n", surplus_huge_pages);
3860 }
3861 HSTATE_ATTR_RO(surplus_hugepages);
3862
3863 static ssize_t demote_store(struct kobject *kobj,
3864                struct kobj_attribute *attr, const char *buf, size_t len)
3865 {
3866         unsigned long nr_demote;
3867         unsigned long nr_available;
3868         nodemask_t nodes_allowed, *n_mask;
3869         struct hstate *h;
3870         int err;
3871         int nid;
3872
3873         err = kstrtoul(buf, 10, &nr_demote);
3874         if (err)
3875                 return err;
3876         h = kobj_to_hstate(kobj, &nid);
3877
3878         if (nid != NUMA_NO_NODE) {
3879                 init_nodemask_of_node(&nodes_allowed, nid);
3880                 n_mask = &nodes_allowed;
3881         } else {
3882                 n_mask = &node_states[N_MEMORY];
3883         }
3884
3885         /* Synchronize with other sysfs operations modifying huge pages */
3886         mutex_lock(&h->resize_lock);
3887         spin_lock_irq(&hugetlb_lock);
3888
3889         while (nr_demote) {
3890                 /*
3891                  * Check for available pages to demote each time thorough the
3892                  * loop as demote_pool_huge_page will drop hugetlb_lock.
3893                  */
3894                 if (nid != NUMA_NO_NODE)
3895                         nr_available = h->free_huge_pages_node[nid];
3896                 else
3897                         nr_available = h->free_huge_pages;
3898                 nr_available -= h->resv_huge_pages;
3899                 if (!nr_available)
3900                         break;
3901
3902                 err = demote_pool_huge_page(h, n_mask);
3903                 if (err)
3904                         break;
3905
3906                 nr_demote--;
3907         }
3908
3909         spin_unlock_irq(&hugetlb_lock);
3910         mutex_unlock(&h->resize_lock);
3911
3912         if (err)
3913                 return err;
3914         return len;
3915 }
3916 HSTATE_ATTR_WO(demote);
3917
3918 static ssize_t demote_size_show(struct kobject *kobj,
3919                                         struct kobj_attribute *attr, char *buf)
3920 {
3921         struct hstate *h = kobj_to_hstate(kobj, NULL);
3922         unsigned long demote_size = (PAGE_SIZE << h->demote_order) / SZ_1K;
3923
3924         return sysfs_emit(buf, "%lukB\n", demote_size);
3925 }
3926
3927 static ssize_t demote_size_store(struct kobject *kobj,
3928                                         struct kobj_attribute *attr,
3929                                         const char *buf, size_t count)
3930 {
3931         struct hstate *h, *demote_hstate;
3932         unsigned long demote_size;
3933         unsigned int demote_order;
3934
3935         demote_size = (unsigned long)memparse(buf, NULL);
3936
3937         demote_hstate = size_to_hstate(demote_size);
3938         if (!demote_hstate)
3939                 return -EINVAL;
3940         demote_order = demote_hstate->order;
3941         if (demote_order < HUGETLB_PAGE_ORDER)
3942                 return -EINVAL;
3943
3944         /* demote order must be smaller than hstate order */
3945         h = kobj_to_hstate(kobj, NULL);
3946         if (demote_order >= h->order)
3947                 return -EINVAL;
3948
3949         /* resize_lock synchronizes access to demote size and writes */
3950         mutex_lock(&h->resize_lock);
3951         h->demote_order = demote_order;
3952         mutex_unlock(&h->resize_lock);
3953
3954         return count;
3955 }
3956 HSTATE_ATTR(demote_size);
3957
3958 static struct attribute *hstate_attrs[] = {
3959         &nr_hugepages_attr.attr,
3960         &nr_overcommit_hugepages_attr.attr,
3961         &free_hugepages_attr.attr,
3962         &resv_hugepages_attr.attr,
3963         &surplus_hugepages_attr.attr,
3964 #ifdef CONFIG_NUMA
3965         &nr_hugepages_mempolicy_attr.attr,
3966 #endif
3967         NULL,
3968 };
3969
3970 static const struct attribute_group hstate_attr_group = {
3971         .attrs = hstate_attrs,
3972 };
3973
3974 static struct attribute *hstate_demote_attrs[] = {
3975         &demote_size_attr.attr,
3976         &demote_attr.attr,
3977         NULL,
3978 };
3979
3980 static const struct attribute_group hstate_demote_attr_group = {
3981         .attrs = hstate_demote_attrs,
3982 };
3983
3984 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
3985                                     struct kobject **hstate_kobjs,
3986                                     const struct attribute_group *hstate_attr_group)
3987 {
3988         int retval;
3989         int hi = hstate_index(h);
3990
3991         hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
3992         if (!hstate_kobjs[hi])
3993                 return -ENOMEM;
3994
3995         retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
3996         if (retval) {
3997                 kobject_put(hstate_kobjs[hi]);
3998                 hstate_kobjs[hi] = NULL;
3999                 return retval;
4000         }
4001
4002         if (h->demote_order) {
4003                 retval = sysfs_create_group(hstate_kobjs[hi],
4004                                             &hstate_demote_attr_group);
4005                 if (retval) {
4006                         pr_warn("HugeTLB unable to create demote interfaces for %s\n", h->name);
4007                         sysfs_remove_group(hstate_kobjs[hi], hstate_attr_group);
4008                         kobject_put(hstate_kobjs[hi]);
4009                         hstate_kobjs[hi] = NULL;
4010                         return retval;
4011                 }
4012         }
4013
4014         return 0;
4015 }
4016
4017 #ifdef CONFIG_NUMA
4018 static bool hugetlb_sysfs_initialized __ro_after_init;
4019
4020 /*
4021  * node_hstate/s - associate per node hstate attributes, via their kobjects,
4022  * with node devices in node_devices[] using a parallel array.  The array
4023  * index of a node device or _hstate == node id.
4024  * This is here to avoid any static dependency of the node device driver, in
4025  * the base kernel, on the hugetlb module.
4026  */
4027 struct node_hstate {
4028         struct kobject          *hugepages_kobj;
4029         struct kobject          *hstate_kobjs[HUGE_MAX_HSTATE];
4030 };
4031 static struct node_hstate node_hstates[MAX_NUMNODES];
4032
4033 /*
4034  * A subset of global hstate attributes for node devices
4035  */
4036 static struct attribute *per_node_hstate_attrs[] = {
4037         &nr_hugepages_attr.attr,
4038         &free_hugepages_attr.attr,
4039         &surplus_hugepages_attr.attr,
4040         NULL,
4041 };
4042
4043 static const struct attribute_group per_node_hstate_attr_group = {
4044         .attrs = per_node_hstate_attrs,
4045 };
4046
4047 /*
4048  * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
4049  * Returns node id via non-NULL nidp.
4050  */
4051 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
4052 {
4053         int nid;
4054
4055         for (nid = 0; nid < nr_node_ids; nid++) {
4056                 struct node_hstate *nhs = &node_hstates[nid];
4057                 int i;
4058                 for (i = 0; i < HUGE_MAX_HSTATE; i++)
4059                         if (nhs->hstate_kobjs[i] == kobj) {
4060                                 if (nidp)
4061                                         *nidp = nid;
4062                                 return &hstates[i];
4063                         }
4064         }
4065
4066         BUG();
4067         return NULL;
4068 }
4069
4070 /*
4071  * Unregister hstate attributes from a single node device.
4072  * No-op if no hstate attributes attached.
4073  */
4074 void hugetlb_unregister_node(struct node *node)
4075 {
4076         struct hstate *h;
4077         struct node_hstate *nhs = &node_hstates[node->dev.id];
4078
4079         if (!nhs->hugepages_kobj)
4080                 return;         /* no hstate attributes */
4081
4082         for_each_hstate(h) {
4083                 int idx = hstate_index(h);
4084                 struct kobject *hstate_kobj = nhs->hstate_kobjs[idx];
4085
4086                 if (!hstate_kobj)
4087                         continue;
4088                 if (h->demote_order)
4089                         sysfs_remove_group(hstate_kobj, &hstate_demote_attr_group);
4090                 sysfs_remove_group(hstate_kobj, &per_node_hstate_attr_group);
4091                 kobject_put(hstate_kobj);
4092                 nhs->hstate_kobjs[idx] = NULL;
4093         }
4094
4095         kobject_put(nhs->hugepages_kobj);
4096         nhs->hugepages_kobj = NULL;
4097 }
4098
4099
4100 /*
4101  * Register hstate attributes for a single node device.
4102  * No-op if attributes already registered.
4103  */
4104 void hugetlb_register_node(struct node *node)
4105 {
4106         struct hstate *h;
4107         struct node_hstate *nhs = &node_hstates[node->dev.id];
4108         int err;
4109
4110         if (!hugetlb_sysfs_initialized)
4111                 return;
4112
4113         if (nhs->hugepages_kobj)
4114                 return;         /* already allocated */
4115
4116         nhs->hugepages_kobj = kobject_create_and_add("hugepages",
4117                                                         &node->dev.kobj);
4118         if (!nhs->hugepages_kobj)
4119                 return;
4120
4121         for_each_hstate(h) {
4122                 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
4123                                                 nhs->hstate_kobjs,
4124                                                 &per_node_hstate_attr_group);
4125                 if (err) {
4126                         pr_err("HugeTLB: Unable to add hstate %s for node %d\n",
4127                                 h->name, node->dev.id);
4128                         hugetlb_unregister_node(node);
4129                         break;
4130                 }
4131         }
4132 }
4133
4134 /*
4135  * hugetlb init time:  register hstate attributes for all registered node
4136  * devices of nodes that have memory.  All on-line nodes should have
4137  * registered their associated device by this time.
4138  */
4139 static void __init hugetlb_register_all_nodes(void)
4140 {
4141         int nid;
4142
4143         for_each_online_node(nid)
4144                 hugetlb_register_node(node_devices[nid]);
4145 }
4146 #else   /* !CONFIG_NUMA */
4147
4148 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
4149 {
4150         BUG();
4151         if (nidp)
4152                 *nidp = -1;
4153         return NULL;
4154 }
4155
4156 static void hugetlb_register_all_nodes(void) { }
4157
4158 #endif
4159
4160 #ifdef CONFIG_CMA
4161 static void __init hugetlb_cma_check(void);
4162 #else
4163 static inline __init void hugetlb_cma_check(void)
4164 {
4165 }
4166 #endif
4167
4168 static void __init hugetlb_sysfs_init(void)
4169 {
4170         struct hstate *h;
4171         int err;
4172
4173         hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
4174         if (!hugepages_kobj)
4175                 return;
4176
4177         for_each_hstate(h) {
4178                 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
4179                                          hstate_kobjs, &hstate_attr_group);
4180                 if (err)
4181                         pr_err("HugeTLB: Unable to add hstate %s", h->name);
4182         }
4183
4184 #ifdef CONFIG_NUMA
4185         hugetlb_sysfs_initialized = true;
4186 #endif
4187         hugetlb_register_all_nodes();
4188 }
4189
4190 #ifdef CONFIG_SYSCTL
4191 static void hugetlb_sysctl_init(void);
4192 #else
4193 static inline void hugetlb_sysctl_init(void) { }
4194 #endif
4195
4196 static int __init hugetlb_init(void)
4197 {
4198         int i;
4199
4200         BUILD_BUG_ON(sizeof_field(struct page, private) * BITS_PER_BYTE <
4201                         __NR_HPAGEFLAGS);
4202
4203         if (!hugepages_supported()) {
4204                 if (hugetlb_max_hstate || default_hstate_max_huge_pages)
4205                         pr_warn("HugeTLB: huge pages not supported, ignoring associated command-line parameters\n");
4206                 return 0;
4207         }
4208
4209         /*
4210          * Make sure HPAGE_SIZE (HUGETLB_PAGE_ORDER) hstate exists.  Some
4211          * architectures depend on setup being done here.
4212          */
4213         hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
4214         if (!parsed_default_hugepagesz) {
4215                 /*
4216                  * If we did not parse a default huge page size, set
4217                  * default_hstate_idx to HPAGE_SIZE hstate. And, if the
4218                  * number of huge pages for this default size was implicitly
4219                  * specified, set that here as well.
4220                  * Note that the implicit setting will overwrite an explicit
4221                  * setting.  A warning will be printed in this case.
4222                  */
4223                 default_hstate_idx = hstate_index(size_to_hstate(HPAGE_SIZE));
4224                 if (default_hstate_max_huge_pages) {
4225                         if (default_hstate.max_huge_pages) {
4226                                 char buf[32];
4227
4228                                 string_get_size(huge_page_size(&default_hstate),
4229                                         1, STRING_UNITS_2, buf, 32);
4230                                 pr_warn("HugeTLB: Ignoring hugepages=%lu associated with %s page size\n",
4231                                         default_hstate.max_huge_pages, buf);
4232                                 pr_warn("HugeTLB: Using hugepages=%lu for number of default huge pages\n",
4233                                         default_hstate_max_huge_pages);
4234                         }
4235                         default_hstate.max_huge_pages =
4236                                 default_hstate_max_huge_pages;
4237
4238                         for_each_online_node(i)
4239                                 default_hstate.max_huge_pages_node[i] =
4240                                         default_hugepages_in_node[i];
4241                 }
4242         }
4243
4244         hugetlb_cma_check();
4245         hugetlb_init_hstates();
4246         gather_bootmem_prealloc();
4247         report_hugepages();
4248
4249         hugetlb_sysfs_init();
4250         hugetlb_cgroup_file_init();
4251         hugetlb_sysctl_init();
4252
4253 #ifdef CONFIG_SMP
4254         num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
4255 #else
4256         num_fault_mutexes = 1;
4257 #endif
4258         hugetlb_fault_mutex_table =
4259                 kmalloc_array(num_fault_mutexes, sizeof(struct mutex),
4260                               GFP_KERNEL);
4261         BUG_ON(!hugetlb_fault_mutex_table);
4262
4263         for (i = 0; i < num_fault_mutexes; i++)
4264                 mutex_init(&hugetlb_fault_mutex_table[i]);
4265         return 0;
4266 }
4267 subsys_initcall(hugetlb_init);
4268
4269 /* Overwritten by architectures with more huge page sizes */
4270 bool __init __attribute((weak)) arch_hugetlb_valid_size(unsigned long size)
4271 {
4272         return size == HPAGE_SIZE;
4273 }
4274
4275 void __init hugetlb_add_hstate(unsigned int order)
4276 {
4277         struct hstate *h;
4278         unsigned long i;
4279
4280         if (size_to_hstate(PAGE_SIZE << order)) {
4281                 return;
4282         }
4283         BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
4284         BUG_ON(order == 0);
4285         h = &hstates[hugetlb_max_hstate++];
4286         mutex_init(&h->resize_lock);
4287         h->order = order;
4288         h->mask = ~(huge_page_size(h) - 1);
4289         for (i = 0; i < MAX_NUMNODES; ++i)
4290                 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
4291         INIT_LIST_HEAD(&h->hugepage_activelist);
4292         h->next_nid_to_alloc = first_memory_node;
4293         h->next_nid_to_free = first_memory_node;
4294         snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
4295                                         huge_page_size(h)/SZ_1K);
4296
4297         parsed_hstate = h;
4298 }
4299
4300 bool __init __weak hugetlb_node_alloc_supported(void)
4301 {
4302         return true;
4303 }
4304
4305 static void __init hugepages_clear_pages_in_node(void)
4306 {
4307         if (!hugetlb_max_hstate) {
4308                 default_hstate_max_huge_pages = 0;
4309                 memset(default_hugepages_in_node, 0,
4310                         sizeof(default_hugepages_in_node));
4311         } else {
4312                 parsed_hstate->max_huge_pages = 0;
4313                 memset(parsed_hstate->max_huge_pages_node, 0,
4314                         sizeof(parsed_hstate->max_huge_pages_node));
4315         }
4316 }
4317
4318 /*
4319  * hugepages command line processing
4320  * hugepages normally follows a valid hugepagsz or default_hugepagsz
4321  * specification.  If not, ignore the hugepages value.  hugepages can also
4322  * be the first huge page command line  option in which case it implicitly
4323  * specifies the number of huge pages for the default size.
4324  */
4325 static int __init hugepages_setup(char *s)
4326 {
4327         unsigned long *mhp;
4328         static unsigned long *last_mhp;
4329         int node = NUMA_NO_NODE;
4330         int count;
4331         unsigned long tmp;
4332         char *p = s;
4333
4334         if (!parsed_valid_hugepagesz) {
4335                 pr_warn("HugeTLB: hugepages=%s does not follow a valid hugepagesz, ignoring\n", s);
4336                 parsed_valid_hugepagesz = true;
4337                 return 1;
4338         }
4339
4340         /*
4341          * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter
4342          * yet, so this hugepages= parameter goes to the "default hstate".
4343          * Otherwise, it goes with the previously parsed hugepagesz or
4344          * default_hugepagesz.
4345          */
4346         else if (!hugetlb_max_hstate)
4347                 mhp = &default_hstate_max_huge_pages;
4348         else
4349                 mhp = &parsed_hstate->max_huge_pages;
4350
4351         if (mhp == last_mhp) {
4352                 pr_warn("HugeTLB: hugepages= specified twice without interleaving hugepagesz=, ignoring hugepages=%s\n", s);
4353                 return 1;
4354         }
4355
4356         while (*p) {
4357                 count = 0;
4358                 if (sscanf(p, "%lu%n", &tmp, &count) != 1)
4359                         goto invalid;
4360                 /* Parameter is node format */
4361                 if (p[count] == ':') {
4362                         if (!hugetlb_node_alloc_supported()) {
4363                                 pr_warn("HugeTLB: architecture can't support node specific alloc, ignoring!\n");
4364                                 return 1;
4365                         }
4366                         if (tmp >= MAX_NUMNODES || !node_online(tmp))
4367                                 goto invalid;
4368                         node = array_index_nospec(tmp, MAX_NUMNODES);
4369                         p += count + 1;
4370                         /* Parse hugepages */
4371                         if (sscanf(p, "%lu%n", &tmp, &count) != 1)
4372                                 goto invalid;
4373                         if (!hugetlb_max_hstate)
4374                                 default_hugepages_in_node[node] = tmp;
4375                         else
4376                                 parsed_hstate->max_huge_pages_node[node] = tmp;
4377                         *mhp += tmp;
4378                         /* Go to parse next node*/
4379                         if (p[count] == ',')
4380                                 p += count + 1;
4381                         else
4382                                 break;
4383                 } else {
4384                         if (p != s)
4385                                 goto invalid;
4386                         *mhp = tmp;
4387                         break;
4388                 }
4389         }
4390
4391         /*
4392          * Global state is always initialized later in hugetlb_init.
4393          * But we need to allocate gigantic hstates here early to still
4394          * use the bootmem allocator.
4395          */
4396         if (hugetlb_max_hstate && hstate_is_gigantic(parsed_hstate))
4397                 hugetlb_hstate_alloc_pages(parsed_hstate);
4398
4399         last_mhp = mhp;
4400
4401         return 1;
4402
4403 invalid:
4404         pr_warn("HugeTLB: Invalid hugepages parameter %s\n", p);
4405         hugepages_clear_pages_in_node();
4406         return 1;
4407 }
4408 __setup("hugepages=", hugepages_setup);
4409
4410 /*
4411  * hugepagesz command line processing
4412  * A specific huge page size can only be specified once with hugepagesz.
4413  * hugepagesz is followed by hugepages on the command line.  The global
4414  * variable 'parsed_valid_hugepagesz' is used to determine if prior
4415  * hugepagesz argument was valid.
4416  */
4417 static int __init hugepagesz_setup(char *s)
4418 {
4419         unsigned long size;
4420         struct hstate *h;
4421
4422         parsed_valid_hugepagesz = false;
4423         size = (unsigned long)memparse(s, NULL);
4424
4425         if (!arch_hugetlb_valid_size(size)) {
4426                 pr_err("HugeTLB: unsupported hugepagesz=%s\n", s);
4427                 return 1;
4428         }
4429
4430         h = size_to_hstate(size);
4431         if (h) {
4432                 /*
4433                  * hstate for this size already exists.  This is normally
4434                  * an error, but is allowed if the existing hstate is the
4435                  * default hstate.  More specifically, it is only allowed if
4436                  * the number of huge pages for the default hstate was not
4437                  * previously specified.
4438                  */
4439                 if (!parsed_default_hugepagesz ||  h != &default_hstate ||
4440                     default_hstate.max_huge_pages) {
4441                         pr_warn("HugeTLB: hugepagesz=%s specified twice, ignoring\n", s);
4442                         return 1;
4443                 }
4444
4445                 /*
4446                  * No need to call hugetlb_add_hstate() as hstate already
4447                  * exists.  But, do set parsed_hstate so that a following
4448                  * hugepages= parameter will be applied to this hstate.
4449                  */
4450                 parsed_hstate = h;
4451                 parsed_valid_hugepagesz = true;
4452                 return 1;
4453         }
4454
4455         hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
4456         parsed_valid_hugepagesz = true;
4457         return 1;
4458 }
4459 __setup("hugepagesz=", hugepagesz_setup);
4460
4461 /*
4462  * default_hugepagesz command line input
4463  * Only one instance of default_hugepagesz allowed on command line.
4464  */
4465 static int __init default_hugepagesz_setup(char *s)
4466 {
4467         unsigned long size;
4468         int i;
4469
4470         parsed_valid_hugepagesz = false;
4471         if (parsed_default_hugepagesz) {
4472                 pr_err("HugeTLB: default_hugepagesz previously specified, ignoring %s\n", s);
4473                 return 1;
4474         }
4475
4476         size = (unsigned long)memparse(s, NULL);
4477
4478         if (!arch_hugetlb_valid_size(size)) {
4479                 pr_err("HugeTLB: unsupported default_hugepagesz=%s\n", s);
4480                 return 1;
4481         }
4482
4483         hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
4484         parsed_valid_hugepagesz = true;
4485         parsed_default_hugepagesz = true;
4486         default_hstate_idx = hstate_index(size_to_hstate(size));
4487
4488         /*
4489          * The number of default huge pages (for this size) could have been
4490          * specified as the first hugetlb parameter: hugepages=X.  If so,
4491          * then default_hstate_max_huge_pages is set.  If the default huge
4492          * page size is gigantic (> MAX_ORDER), then the pages must be
4493          * allocated here from bootmem allocator.
4494          */
4495         if (default_hstate_max_huge_pages) {
4496                 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
4497                 for_each_online_node(i)
4498                         default_hstate.max_huge_pages_node[i] =
4499                                 default_hugepages_in_node[i];
4500                 if (hstate_is_gigantic(&default_hstate))
4501                         hugetlb_hstate_alloc_pages(&default_hstate);
4502                 default_hstate_max_huge_pages = 0;
4503         }
4504
4505         return 1;
4506 }
4507 __setup("default_hugepagesz=", default_hugepagesz_setup);
4508
4509 static nodemask_t *policy_mbind_nodemask(gfp_t gfp)
4510 {
4511 #ifdef CONFIG_NUMA
4512         struct mempolicy *mpol = get_task_policy(current);
4513
4514         /*
4515          * Only enforce MPOL_BIND policy which overlaps with cpuset policy
4516          * (from policy_nodemask) specifically for hugetlb case
4517          */
4518         if (mpol->mode == MPOL_BIND &&
4519                 (apply_policy_zone(mpol, gfp_zone(gfp)) &&
4520                  cpuset_nodemask_valid_mems_allowed(&mpol->nodes)))
4521                 return &mpol->nodes;
4522 #endif
4523         return NULL;
4524 }
4525
4526 static unsigned int allowed_mems_nr(struct hstate *h)
4527 {
4528         int node;
4529         unsigned int nr = 0;
4530         nodemask_t *mbind_nodemask;
4531         unsigned int *array = h->free_huge_pages_node;
4532         gfp_t gfp_mask = htlb_alloc_mask(h);
4533
4534         mbind_nodemask = policy_mbind_nodemask(gfp_mask);
4535         for_each_node_mask(node, cpuset_current_mems_allowed) {
4536                 if (!mbind_nodemask || node_isset(node, *mbind_nodemask))
4537                         nr += array[node];
4538         }
4539
4540         return nr;
4541 }
4542
4543 #ifdef CONFIG_SYSCTL
4544 static int proc_hugetlb_doulongvec_minmax(struct ctl_table *table, int write,
4545                                           void *buffer, size_t *length,
4546                                           loff_t *ppos, unsigned long *out)
4547 {
4548         struct ctl_table dup_table;
4549
4550         /*
4551          * In order to avoid races with __do_proc_doulongvec_minmax(), we
4552          * can duplicate the @table and alter the duplicate of it.
4553          */
4554         dup_table = *table;
4555         dup_table.data = out;
4556
4557         return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos);
4558 }
4559
4560 static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
4561                          struct ctl_table *table, int write,
4562                          void *buffer, size_t *length, loff_t *ppos)
4563 {
4564         struct hstate *h = &default_hstate;
4565         unsigned long tmp = h->max_huge_pages;
4566         int ret;
4567
4568         if (!hugepages_supported())
4569                 return -EOPNOTSUPP;
4570
4571         ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
4572                                              &tmp);
4573         if (ret)
4574                 goto out;
4575
4576         if (write)
4577                 ret = __nr_hugepages_store_common(obey_mempolicy, h,
4578                                                   NUMA_NO_NODE, tmp, *length);
4579 out:
4580         return ret;
4581 }
4582
4583 static int hugetlb_sysctl_handler(struct ctl_table *table, int write,
4584                           void *buffer, size_t *length, loff_t *ppos)
4585 {
4586
4587         return hugetlb_sysctl_handler_common(false, table, write,
4588                                                         buffer, length, ppos);
4589 }
4590
4591 #ifdef CONFIG_NUMA
4592 static int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
4593                           void *buffer, size_t *length, loff_t *ppos)
4594 {
4595         return hugetlb_sysctl_handler_common(true, table, write,
4596                                                         buffer, length, ppos);
4597 }
4598 #endif /* CONFIG_NUMA */
4599
4600 static int hugetlb_overcommit_handler(struct ctl_table *table, int write,
4601                 void *buffer, size_t *length, loff_t *ppos)
4602 {
4603         struct hstate *h = &default_hstate;
4604         unsigned long tmp;
4605         int ret;
4606
4607         if (!hugepages_supported())
4608                 return -EOPNOTSUPP;
4609
4610         tmp = h->nr_overcommit_huge_pages;
4611
4612         if (write && hstate_is_gigantic(h))
4613                 return -EINVAL;
4614
4615         ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
4616                                              &tmp);
4617         if (ret)
4618                 goto out;
4619
4620         if (write) {
4621                 spin_lock_irq(&hugetlb_lock);
4622                 h->nr_overcommit_huge_pages = tmp;
4623                 spin_unlock_irq(&hugetlb_lock);
4624         }
4625 out:
4626         return ret;
4627 }
4628
4629 static struct ctl_table hugetlb_table[] = {
4630         {
4631                 .procname       = "nr_hugepages",
4632                 .data           = NULL,
4633                 .maxlen         = sizeof(unsigned long),
4634                 .mode           = 0644,
4635                 .proc_handler   = hugetlb_sysctl_handler,
4636         },
4637 #ifdef CONFIG_NUMA
4638         {
4639                 .procname       = "nr_hugepages_mempolicy",
4640                 .data           = NULL,
4641                 .maxlen         = sizeof(unsigned long),
4642                 .mode           = 0644,
4643                 .proc_handler   = &hugetlb_mempolicy_sysctl_handler,
4644         },
4645 #endif
4646         {
4647                 .procname       = "hugetlb_shm_group",
4648                 .data           = &sysctl_hugetlb_shm_group,
4649                 .maxlen         = sizeof(gid_t),
4650                 .mode           = 0644,
4651                 .proc_handler   = proc_dointvec,
4652         },
4653         {
4654                 .procname       = "nr_overcommit_hugepages",
4655                 .data           = NULL,
4656                 .maxlen         = sizeof(unsigned long),
4657                 .mode           = 0644,
4658                 .proc_handler   = hugetlb_overcommit_handler,
4659         },
4660         { }
4661 };
4662
4663 static void hugetlb_sysctl_init(void)
4664 {
4665         register_sysctl_init("vm", hugetlb_table);
4666 }
4667 #endif /* CONFIG_SYSCTL */
4668
4669 void hugetlb_report_meminfo(struct seq_file *m)
4670 {
4671         struct hstate *h;
4672         unsigned long total = 0;
4673
4674         if (!hugepages_supported())
4675                 return;
4676
4677         for_each_hstate(h) {
4678                 unsigned long count = h->nr_huge_pages;
4679
4680                 total += huge_page_size(h) * count;
4681
4682                 if (h == &default_hstate)
4683                         seq_printf(m,
4684                                    "HugePages_Total:   %5lu\n"
4685                                    "HugePages_Free:    %5lu\n"
4686                                    "HugePages_Rsvd:    %5lu\n"
4687                                    "HugePages_Surp:    %5lu\n"
4688                                    "Hugepagesize:   %8lu kB\n",
4689                                    count,
4690                                    h->free_huge_pages,
4691                                    h->resv_huge_pages,
4692                                    h->surplus_huge_pages,
4693                                    huge_page_size(h) / SZ_1K);
4694         }
4695
4696         seq_printf(m, "Hugetlb:        %8lu kB\n", total / SZ_1K);
4697 }
4698
4699 int hugetlb_report_node_meminfo(char *buf, int len, int nid)
4700 {
4701         struct hstate *h = &default_hstate;
4702
4703         if (!hugepages_supported())
4704                 return 0;
4705
4706         return sysfs_emit_at(buf, len,
4707                              "Node %d HugePages_Total: %5u\n"
4708                              "Node %d HugePages_Free:  %5u\n"
4709                              "Node %d HugePages_Surp:  %5u\n",
4710                              nid, h->nr_huge_pages_node[nid],
4711                              nid, h->free_huge_pages_node[nid],
4712                              nid, h->surplus_huge_pages_node[nid]);
4713 }
4714
4715 void hugetlb_show_meminfo_node(int nid)
4716 {
4717         struct hstate *h;
4718
4719         if (!hugepages_supported())
4720                 return;
4721
4722         for_each_hstate(h)
4723                 printk("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
4724                         nid,
4725                         h->nr_huge_pages_node[nid],
4726                         h->free_huge_pages_node[nid],
4727                         h->surplus_huge_pages_node[nid],
4728                         huge_page_size(h) / SZ_1K);
4729 }
4730
4731 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
4732 {
4733         seq_printf(m, "HugetlbPages:\t%8lu kB\n",
4734                    K(atomic_long_read(&mm->hugetlb_usage)));
4735 }
4736
4737 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
4738 unsigned long hugetlb_total_pages(void)
4739 {
4740         struct hstate *h;
4741         unsigned long nr_total_pages = 0;
4742
4743         for_each_hstate(h)
4744                 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
4745         return nr_total_pages;
4746 }
4747
4748 static int hugetlb_acct_memory(struct hstate *h, long delta)
4749 {
4750         int ret = -ENOMEM;
4751
4752         if (!delta)
4753                 return 0;
4754
4755         spin_lock_irq(&hugetlb_lock);
4756         /*
4757          * When cpuset is configured, it breaks the strict hugetlb page
4758          * reservation as the accounting is done on a global variable. Such
4759          * reservation is completely rubbish in the presence of cpuset because
4760          * the reservation is not checked against page availability for the
4761          * current cpuset. Application can still potentially OOM'ed by kernel
4762          * with lack of free htlb page in cpuset that the task is in.
4763          * Attempt to enforce strict accounting with cpuset is almost
4764          * impossible (or too ugly) because cpuset is too fluid that
4765          * task or memory node can be dynamically moved between cpusets.
4766          *
4767          * The change of semantics for shared hugetlb mapping with cpuset is
4768          * undesirable. However, in order to preserve some of the semantics,
4769          * we fall back to check against current free page availability as
4770          * a best attempt and hopefully to minimize the impact of changing
4771          * semantics that cpuset has.
4772          *
4773          * Apart from cpuset, we also have memory policy mechanism that
4774          * also determines from which node the kernel will allocate memory
4775          * in a NUMA system. So similar to cpuset, we also should consider
4776          * the memory policy of the current task. Similar to the description
4777          * above.
4778          */
4779         if (delta > 0) {
4780                 if (gather_surplus_pages(h, delta) < 0)
4781                         goto out;
4782
4783                 if (delta > allowed_mems_nr(h)) {
4784                         return_unused_surplus_pages(h, delta);
4785                         goto out;
4786                 }
4787         }
4788
4789         ret = 0;
4790         if (delta < 0)
4791                 return_unused_surplus_pages(h, (unsigned long) -delta);
4792
4793 out:
4794         spin_unlock_irq(&hugetlb_lock);
4795         return ret;
4796 }
4797
4798 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
4799 {
4800         struct resv_map *resv = vma_resv_map(vma);
4801
4802         /*
4803          * HPAGE_RESV_OWNER indicates a private mapping.
4804          * This new VMA should share its siblings reservation map if present.
4805          * The VMA will only ever have a valid reservation map pointer where
4806          * it is being copied for another still existing VMA.  As that VMA
4807          * has a reference to the reservation map it cannot disappear until
4808          * after this open call completes.  It is therefore safe to take a
4809          * new reference here without additional locking.
4810          */
4811         if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
4812                 resv_map_dup_hugetlb_cgroup_uncharge_info(resv);
4813                 kref_get(&resv->refs);
4814         }
4815
4816         /*
4817          * vma_lock structure for sharable mappings is vma specific.
4818          * Clear old pointer (if copied via vm_area_dup) and allocate
4819          * new structure.  Before clearing, make sure vma_lock is not
4820          * for this vma.
4821          */
4822         if (vma->vm_flags & VM_MAYSHARE) {
4823                 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
4824
4825                 if (vma_lock) {
4826                         if (vma_lock->vma != vma) {
4827                                 vma->vm_private_data = NULL;
4828                                 hugetlb_vma_lock_alloc(vma);
4829                         } else
4830                                 pr_warn("HugeTLB: vma_lock already exists in %s.\n", __func__);
4831                 } else
4832                         hugetlb_vma_lock_alloc(vma);
4833         }
4834 }
4835
4836 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
4837 {
4838         struct hstate *h = hstate_vma(vma);
4839         struct resv_map *resv;
4840         struct hugepage_subpool *spool = subpool_vma(vma);
4841         unsigned long reserve, start, end;
4842         long gbl_reserve;
4843
4844         hugetlb_vma_lock_free(vma);
4845
4846         resv = vma_resv_map(vma);
4847         if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
4848                 return;
4849
4850         start = vma_hugecache_offset(h, vma, vma->vm_start);
4851         end = vma_hugecache_offset(h, vma, vma->vm_end);
4852
4853         reserve = (end - start) - region_count(resv, start, end);
4854         hugetlb_cgroup_uncharge_counter(resv, start, end);
4855         if (reserve) {
4856                 /*
4857                  * Decrement reserve counts.  The global reserve count may be
4858                  * adjusted if the subpool has a minimum size.
4859                  */
4860                 gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
4861                 hugetlb_acct_memory(h, -gbl_reserve);
4862         }
4863
4864         kref_put(&resv->refs, resv_map_release);
4865 }
4866
4867 static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
4868 {
4869         if (addr & ~(huge_page_mask(hstate_vma(vma))))
4870                 return -EINVAL;
4871
4872         /*
4873          * PMD sharing is only possible for PUD_SIZE-aligned address ranges
4874          * in HugeTLB VMAs. If we will lose PUD_SIZE alignment due to this
4875          * split, unshare PMDs in the PUD_SIZE interval surrounding addr now.
4876          */
4877         if (addr & ~PUD_MASK) {
4878                 /*
4879                  * hugetlb_vm_op_split is called right before we attempt to
4880                  * split the VMA. We will need to unshare PMDs in the old and
4881                  * new VMAs, so let's unshare before we split.
4882                  */
4883                 unsigned long floor = addr & PUD_MASK;
4884                 unsigned long ceil = floor + PUD_SIZE;
4885
4886                 if (floor >= vma->vm_start && ceil <= vma->vm_end)
4887                         hugetlb_unshare_pmds(vma, floor, ceil);
4888         }
4889
4890         return 0;
4891 }
4892
4893 static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
4894 {
4895         return huge_page_size(hstate_vma(vma));
4896 }
4897
4898 /*
4899  * We cannot handle pagefaults against hugetlb pages at all.  They cause
4900  * handle_mm_fault() to try to instantiate regular-sized pages in the
4901  * hugepage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
4902  * this far.
4903  */
4904 static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
4905 {
4906         BUG();
4907         return 0;
4908 }
4909
4910 /*
4911  * When a new function is introduced to vm_operations_struct and added
4912  * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops.
4913  * This is because under System V memory model, mappings created via
4914  * shmget/shmat with "huge page" specified are backed by hugetlbfs files,
4915  * their original vm_ops are overwritten with shm_vm_ops.
4916  */
4917 const struct vm_operations_struct hugetlb_vm_ops = {
4918         .fault = hugetlb_vm_op_fault,
4919         .open = hugetlb_vm_op_open,
4920         .close = hugetlb_vm_op_close,
4921         .may_split = hugetlb_vm_op_split,
4922         .pagesize = hugetlb_vm_op_pagesize,
4923 };
4924
4925 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
4926                                 int writable)
4927 {
4928         pte_t entry;
4929         unsigned int shift = huge_page_shift(hstate_vma(vma));
4930
4931         if (writable) {
4932                 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
4933                                          vma->vm_page_prot)));
4934         } else {
4935                 entry = huge_pte_wrprotect(mk_huge_pte(page,
4936                                            vma->vm_page_prot));
4937         }
4938         entry = pte_mkyoung(entry);
4939         entry = arch_make_huge_pte(entry, shift, vma->vm_flags);
4940
4941         return entry;
4942 }
4943
4944 static void set_huge_ptep_writable(struct vm_area_struct *vma,
4945                                    unsigned long address, pte_t *ptep)
4946 {
4947         pte_t entry;
4948
4949         entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
4950         if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
4951                 update_mmu_cache(vma, address, ptep);
4952 }
4953
4954 bool is_hugetlb_entry_migration(pte_t pte)
4955 {
4956         swp_entry_t swp;
4957
4958         if (huge_pte_none(pte) || pte_present(pte))
4959                 return false;
4960         swp = pte_to_swp_entry(pte);
4961         if (is_migration_entry(swp))
4962                 return true;
4963         else
4964                 return false;
4965 }
4966
4967 static bool is_hugetlb_entry_hwpoisoned(pte_t pte)
4968 {
4969         swp_entry_t swp;
4970
4971         if (huge_pte_none(pte) || pte_present(pte))
4972                 return false;
4973         swp = pte_to_swp_entry(pte);
4974         if (is_hwpoison_entry(swp))
4975                 return true;
4976         else
4977                 return false;
4978 }
4979
4980 static void
4981 hugetlb_install_folio(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr,
4982                       struct folio *new_folio, pte_t old, unsigned long sz)
4983 {
4984         pte_t newpte = make_huge_pte(vma, &new_folio->page, 1);
4985
4986         __folio_mark_uptodate(new_folio);
4987         hugepage_add_new_anon_rmap(new_folio, vma, addr);
4988         if (userfaultfd_wp(vma) && huge_pte_uffd_wp(old))
4989                 newpte = huge_pte_mkuffd_wp(newpte);
4990         set_huge_pte_at(vma->vm_mm, addr, ptep, newpte, sz);
4991         hugetlb_count_add(pages_per_huge_page(hstate_vma(vma)), vma->vm_mm);
4992         folio_set_hugetlb_migratable(new_folio);
4993 }
4994
4995 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
4996                             struct vm_area_struct *dst_vma,
4997                             struct vm_area_struct *src_vma)
4998 {
4999         pte_t *src_pte, *dst_pte, entry;
5000         struct folio *pte_folio;
5001         unsigned long addr;
5002         bool cow = is_cow_mapping(src_vma->vm_flags);
5003         struct hstate *h = hstate_vma(src_vma);
5004         unsigned long sz = huge_page_size(h);
5005         unsigned long npages = pages_per_huge_page(h);
5006         struct mmu_notifier_range range;
5007         unsigned long last_addr_mask;
5008         int ret = 0;
5009
5010         if (cow) {
5011                 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, src,
5012                                         src_vma->vm_start,
5013                                         src_vma->vm_end);
5014                 mmu_notifier_invalidate_range_start(&range);
5015                 vma_assert_write_locked(src_vma);
5016                 raw_write_seqcount_begin(&src->write_protect_seq);
5017         } else {
5018                 /*
5019                  * For shared mappings the vma lock must be held before
5020                  * calling hugetlb_walk() in the src vma. Otherwise, the
5021                  * returned ptep could go away if part of a shared pmd and
5022                  * another thread calls huge_pmd_unshare.
5023                  */
5024                 hugetlb_vma_lock_read(src_vma);
5025         }
5026
5027         last_addr_mask = hugetlb_mask_last_page(h);
5028         for (addr = src_vma->vm_start; addr < src_vma->vm_end; addr += sz) {
5029                 spinlock_t *src_ptl, *dst_ptl;
5030                 src_pte = hugetlb_walk(src_vma, addr, sz);
5031                 if (!src_pte) {
5032                         addr |= last_addr_mask;
5033                         continue;
5034                 }
5035                 dst_pte = huge_pte_alloc(dst, dst_vma, addr, sz);
5036                 if (!dst_pte) {
5037                         ret = -ENOMEM;
5038                         break;
5039                 }
5040
5041                 /*
5042                  * If the pagetables are shared don't copy or take references.
5043                  *
5044                  * dst_pte == src_pte is the common case of src/dest sharing.
5045                  * However, src could have 'unshared' and dst shares with
5046                  * another vma. So page_count of ptep page is checked instead
5047                  * to reliably determine whether pte is shared.
5048                  */
5049                 if (page_count(virt_to_page(dst_pte)) > 1) {
5050                         addr |= last_addr_mask;
5051                         continue;
5052                 }
5053
5054                 dst_ptl = huge_pte_lock(h, dst, dst_pte);
5055                 src_ptl = huge_pte_lockptr(h, src, src_pte);
5056                 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
5057                 entry = huge_ptep_get(src_pte);
5058 again:
5059                 if (huge_pte_none(entry)) {
5060                         /*
5061                          * Skip if src entry none.
5062                          */
5063                         ;
5064                 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) {
5065                         if (!userfaultfd_wp(dst_vma))
5066                                 entry = huge_pte_clear_uffd_wp(entry);
5067                         set_huge_pte_at(dst, addr, dst_pte, entry, sz);
5068                 } else if (unlikely(is_hugetlb_entry_migration(entry))) {
5069                         swp_entry_t swp_entry = pte_to_swp_entry(entry);
5070                         bool uffd_wp = pte_swp_uffd_wp(entry);
5071
5072                         if (!is_readable_migration_entry(swp_entry) && cow) {
5073                                 /*
5074                                  * COW mappings require pages in both
5075                                  * parent and child to be set to read.
5076                                  */
5077                                 swp_entry = make_readable_migration_entry(
5078                                                         swp_offset(swp_entry));
5079                                 entry = swp_entry_to_pte(swp_entry);
5080                                 if (userfaultfd_wp(src_vma) && uffd_wp)
5081                                         entry = pte_swp_mkuffd_wp(entry);
5082                                 set_huge_pte_at(src, addr, src_pte, entry, sz);
5083                         }
5084                         if (!userfaultfd_wp(dst_vma))
5085                                 entry = huge_pte_clear_uffd_wp(entry);
5086                         set_huge_pte_at(dst, addr, dst_pte, entry, sz);
5087                 } else if (unlikely(is_pte_marker(entry))) {
5088                         pte_marker marker = copy_pte_marker(
5089                                 pte_to_swp_entry(entry), dst_vma);
5090
5091                         if (marker)
5092                                 set_huge_pte_at(dst, addr, dst_pte,
5093                                                 make_pte_marker(marker), sz);
5094                 } else {
5095                         entry = huge_ptep_get(src_pte);
5096                         pte_folio = page_folio(pte_page(entry));
5097                         folio_get(pte_folio);
5098
5099                         /*
5100                          * Failing to duplicate the anon rmap is a rare case
5101                          * where we see pinned hugetlb pages while they're
5102                          * prone to COW. We need to do the COW earlier during
5103                          * fork.
5104                          *
5105                          * When pre-allocating the page or copying data, we
5106                          * need to be without the pgtable locks since we could
5107                          * sleep during the process.
5108                          */
5109                         if (!folio_test_anon(pte_folio)) {
5110                                 page_dup_file_rmap(&pte_folio->page, true);
5111                         } else if (page_try_dup_anon_rmap(&pte_folio->page,
5112                                                           true, src_vma)) {
5113                                 pte_t src_pte_old = entry;
5114                                 struct folio *new_folio;
5115
5116                                 spin_unlock(src_ptl);
5117                                 spin_unlock(dst_ptl);
5118                                 /* Do not use reserve as it's private owned */
5119                                 new_folio = alloc_hugetlb_folio(dst_vma, addr, 1);
5120                                 if (IS_ERR(new_folio)) {
5121                                         folio_put(pte_folio);
5122                                         ret = PTR_ERR(new_folio);
5123                                         break;
5124                                 }
5125                                 ret = copy_user_large_folio(new_folio,
5126                                                             pte_folio,
5127                                                             addr, dst_vma);
5128                                 folio_put(pte_folio);
5129                                 if (ret) {
5130                                         folio_put(new_folio);
5131                                         break;
5132                                 }
5133
5134                                 /* Install the new hugetlb folio if src pte stable */
5135                                 dst_ptl = huge_pte_lock(h, dst, dst_pte);
5136                                 src_ptl = huge_pte_lockptr(h, src, src_pte);
5137                                 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
5138                                 entry = huge_ptep_get(src_pte);
5139                                 if (!pte_same(src_pte_old, entry)) {
5140                                         restore_reserve_on_error(h, dst_vma, addr,
5141                                                                 new_folio);
5142                                         folio_put(new_folio);
5143                                         /* huge_ptep of dst_pte won't change as in child */
5144                                         goto again;
5145                                 }
5146                                 hugetlb_install_folio(dst_vma, dst_pte, addr,
5147                                                       new_folio, src_pte_old, sz);
5148                                 spin_unlock(src_ptl);
5149                                 spin_unlock(dst_ptl);
5150                                 continue;
5151                         }
5152
5153                         if (cow) {
5154                                 /*
5155                                  * No need to notify as we are downgrading page
5156                                  * table protection not changing it to point
5157                                  * to a new page.
5158                                  *
5159                                  * See Documentation/mm/mmu_notifier.rst
5160                                  */
5161                                 huge_ptep_set_wrprotect(src, addr, src_pte);
5162                                 entry = huge_pte_wrprotect(entry);
5163                         }
5164
5165                         if (!userfaultfd_wp(dst_vma))
5166                                 entry = huge_pte_clear_uffd_wp(entry);
5167
5168                         set_huge_pte_at(dst, addr, dst_pte, entry, sz);
5169                         hugetlb_count_add(npages, dst);
5170                 }
5171                 spin_unlock(src_ptl);
5172                 spin_unlock(dst_ptl);
5173         }
5174
5175         if (cow) {
5176                 raw_write_seqcount_end(&src->write_protect_seq);
5177                 mmu_notifier_invalidate_range_end(&range);
5178         } else {
5179                 hugetlb_vma_unlock_read(src_vma);
5180         }
5181
5182         return ret;
5183 }
5184
5185 static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr,
5186                           unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte,
5187                           unsigned long sz)
5188 {
5189         struct hstate *h = hstate_vma(vma);
5190         struct mm_struct *mm = vma->vm_mm;
5191         spinlock_t *src_ptl, *dst_ptl;
5192         pte_t pte;
5193
5194         dst_ptl = huge_pte_lock(h, mm, dst_pte);
5195         src_ptl = huge_pte_lockptr(h, mm, src_pte);
5196
5197         /*
5198          * We don't have to worry about the ordering of src and dst ptlocks
5199          * because exclusive mmap_lock (or the i_mmap_lock) prevents deadlock.
5200          */
5201         if (src_ptl != dst_ptl)
5202                 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
5203
5204         pte = huge_ptep_get_and_clear(mm, old_addr, src_pte);
5205         set_huge_pte_at(mm, new_addr, dst_pte, pte, sz);
5206
5207         if (src_ptl != dst_ptl)
5208                 spin_unlock(src_ptl);
5209         spin_unlock(dst_ptl);
5210 }
5211
5212 int move_hugetlb_page_tables(struct vm_area_struct *vma,
5213                              struct vm_area_struct *new_vma,
5214                              unsigned long old_addr, unsigned long new_addr,
5215                              unsigned long len)
5216 {
5217         struct hstate *h = hstate_vma(vma);
5218         struct address_space *mapping = vma->vm_file->f_mapping;
5219         unsigned long sz = huge_page_size(h);
5220         struct mm_struct *mm = vma->vm_mm;
5221         unsigned long old_end = old_addr + len;
5222         unsigned long last_addr_mask;
5223         pte_t *src_pte, *dst_pte;
5224         struct mmu_notifier_range range;
5225         bool shared_pmd = false;
5226
5227         mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, old_addr,
5228                                 old_end);
5229         adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
5230         /*
5231          * In case of shared PMDs, we should cover the maximum possible
5232          * range.
5233          */
5234         flush_cache_range(vma, range.start, range.end);
5235
5236         mmu_notifier_invalidate_range_start(&range);
5237         last_addr_mask = hugetlb_mask_last_page(h);
5238         /* Prevent race with file truncation */
5239         hugetlb_vma_lock_write(vma);
5240         i_mmap_lock_write(mapping);
5241         for (; old_addr < old_end; old_addr += sz, new_addr += sz) {
5242                 src_pte = hugetlb_walk(vma, old_addr, sz);
5243                 if (!src_pte) {
5244                         old_addr |= last_addr_mask;
5245                         new_addr |= last_addr_mask;
5246                         continue;
5247                 }
5248                 if (huge_pte_none(huge_ptep_get(src_pte)))
5249                         continue;
5250
5251                 if (huge_pmd_unshare(mm, vma, old_addr, src_pte)) {
5252                         shared_pmd = true;
5253                         old_addr |= last_addr_mask;
5254                         new_addr |= last_addr_mask;
5255                         continue;
5256                 }
5257
5258                 dst_pte = huge_pte_alloc(mm, new_vma, new_addr, sz);
5259                 if (!dst_pte)
5260                         break;
5261
5262                 move_huge_pte(vma, old_addr, new_addr, src_pte, dst_pte, sz);
5263         }
5264
5265         if (shared_pmd)
5266                 flush_hugetlb_tlb_range(vma, range.start, range.end);
5267         else
5268                 flush_hugetlb_tlb_range(vma, old_end - len, old_end);
5269         mmu_notifier_invalidate_range_end(&range);
5270         i_mmap_unlock_write(mapping);
5271         hugetlb_vma_unlock_write(vma);
5272
5273         return len + old_addr - old_end;
5274 }
5275
5276 static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
5277                                    unsigned long start, unsigned long end,
5278                                    struct page *ref_page, zap_flags_t zap_flags)
5279 {
5280         struct mm_struct *mm = vma->vm_mm;
5281         unsigned long address;
5282         pte_t *ptep;
5283         pte_t pte;
5284         spinlock_t *ptl;
5285         struct page *page;
5286         struct hstate *h = hstate_vma(vma);
5287         unsigned long sz = huge_page_size(h);
5288         unsigned long last_addr_mask;
5289         bool force_flush = false;
5290
5291         WARN_ON(!is_vm_hugetlb_page(vma));
5292         BUG_ON(start & ~huge_page_mask(h));
5293         BUG_ON(end & ~huge_page_mask(h));
5294
5295         /*
5296          * This is a hugetlb vma, all the pte entries should point
5297          * to huge page.
5298          */
5299         tlb_change_page_size(tlb, sz);
5300         tlb_start_vma(tlb, vma);
5301
5302         last_addr_mask = hugetlb_mask_last_page(h);
5303         address = start;
5304         for (; address < end; address += sz) {
5305                 ptep = hugetlb_walk(vma, address, sz);
5306                 if (!ptep) {
5307                         address |= last_addr_mask;
5308                         continue;
5309                 }
5310
5311                 ptl = huge_pte_lock(h, mm, ptep);
5312                 if (huge_pmd_unshare(mm, vma, address, ptep)) {
5313                         spin_unlock(ptl);
5314                         tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE);
5315                         force_flush = true;
5316                         address |= last_addr_mask;
5317                         continue;
5318                 }
5319
5320                 pte = huge_ptep_get(ptep);
5321                 if (huge_pte_none(pte)) {
5322                         spin_unlock(ptl);
5323                         continue;
5324                 }
5325
5326                 /*
5327                  * Migrating hugepage or HWPoisoned hugepage is already
5328                  * unmapped and its refcount is dropped, so just clear pte here.
5329                  */
5330                 if (unlikely(!pte_present(pte))) {
5331                         /*
5332                          * If the pte was wr-protected by uffd-wp in any of the
5333                          * swap forms, meanwhile the caller does not want to
5334                          * drop the uffd-wp bit in this zap, then replace the
5335                          * pte with a marker.
5336                          */
5337                         if (pte_swp_uffd_wp_any(pte) &&
5338                             !(zap_flags & ZAP_FLAG_DROP_MARKER))
5339                                 set_huge_pte_at(mm, address, ptep,
5340                                                 make_pte_marker(PTE_MARKER_UFFD_WP),
5341                                                 sz);
5342                         else
5343                                 huge_pte_clear(mm, address, ptep, sz);
5344                         spin_unlock(ptl);
5345                         continue;
5346                 }
5347
5348                 page = pte_page(pte);
5349                 /*
5350                  * If a reference page is supplied, it is because a specific
5351                  * page is being unmapped, not a range. Ensure the page we
5352                  * are about to unmap is the actual page of interest.
5353                  */
5354                 if (ref_page) {
5355                         if (page != ref_page) {
5356                                 spin_unlock(ptl);
5357                                 continue;
5358                         }
5359                         /*
5360                          * Mark the VMA as having unmapped its page so that
5361                          * future faults in this VMA will fail rather than
5362                          * looking like data was lost
5363                          */
5364                         set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
5365                 }
5366
5367                 pte = huge_ptep_get_and_clear(mm, address, ptep);
5368                 tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
5369                 if (huge_pte_dirty(pte))
5370                         set_page_dirty(page);
5371                 /* Leave a uffd-wp pte marker if needed */
5372                 if (huge_pte_uffd_wp(pte) &&
5373                     !(zap_flags & ZAP_FLAG_DROP_MARKER))
5374                         set_huge_pte_at(mm, address, ptep,
5375                                         make_pte_marker(PTE_MARKER_UFFD_WP),
5376                                         sz);
5377                 hugetlb_count_sub(pages_per_huge_page(h), mm);
5378                 page_remove_rmap(page, vma, true);
5379
5380                 spin_unlock(ptl);
5381                 tlb_remove_page_size(tlb, page, huge_page_size(h));
5382                 /*
5383                  * Bail out after unmapping reference page if supplied
5384                  */
5385                 if (ref_page)
5386                         break;
5387         }
5388         tlb_end_vma(tlb, vma);
5389
5390         /*
5391          * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We
5392          * could defer the flush until now, since by holding i_mmap_rwsem we
5393          * guaranteed that the last refernece would not be dropped. But we must
5394          * do the flushing before we return, as otherwise i_mmap_rwsem will be
5395          * dropped and the last reference to the shared PMDs page might be
5396          * dropped as well.
5397          *
5398          * In theory we could defer the freeing of the PMD pages as well, but
5399          * huge_pmd_unshare() relies on the exact page_count for the PMD page to
5400          * detect sharing, so we cannot defer the release of the page either.
5401          * Instead, do flush now.
5402          */
5403         if (force_flush)
5404                 tlb_flush_mmu_tlbonly(tlb);
5405 }
5406
5407 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
5408                           struct vm_area_struct *vma, unsigned long start,
5409                           unsigned long end, struct page *ref_page,
5410                           zap_flags_t zap_flags)
5411 {
5412         hugetlb_vma_lock_write(vma);
5413         i_mmap_lock_write(vma->vm_file->f_mapping);
5414
5415         /* mmu notification performed in caller */
5416         __unmap_hugepage_range(tlb, vma, start, end, ref_page, zap_flags);
5417
5418         if (zap_flags & ZAP_FLAG_UNMAP) {       /* final unmap */
5419                 /*
5420                  * Unlock and free the vma lock before releasing i_mmap_rwsem.
5421                  * When the vma_lock is freed, this makes the vma ineligible
5422                  * for pmd sharing.  And, i_mmap_rwsem is required to set up
5423                  * pmd sharing.  This is important as page tables for this
5424                  * unmapped range will be asynchrously deleted.  If the page
5425                  * tables are shared, there will be issues when accessed by
5426                  * someone else.
5427                  */
5428                 __hugetlb_vma_unlock_write_free(vma);
5429                 i_mmap_unlock_write(vma->vm_file->f_mapping);
5430         } else {
5431                 i_mmap_unlock_write(vma->vm_file->f_mapping);
5432                 hugetlb_vma_unlock_write(vma);
5433         }
5434 }
5435
5436 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
5437                           unsigned long end, struct page *ref_page,
5438                           zap_flags_t zap_flags)
5439 {
5440         struct mmu_notifier_range range;
5441         struct mmu_gather tlb;
5442
5443         mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
5444                                 start, end);
5445         adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
5446         mmu_notifier_invalidate_range_start(&range);
5447         tlb_gather_mmu(&tlb, vma->vm_mm);
5448
5449         __unmap_hugepage_range(&tlb, vma, start, end, ref_page, zap_flags);
5450
5451         mmu_notifier_invalidate_range_end(&range);
5452         tlb_finish_mmu(&tlb);
5453 }
5454
5455 /*
5456  * This is called when the original mapper is failing to COW a MAP_PRIVATE
5457  * mapping it owns the reserve page for. The intention is to unmap the page
5458  * from other VMAs and let the children be SIGKILLed if they are faulting the
5459  * same region.
5460  */
5461 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
5462                               struct page *page, unsigned long address)
5463 {
5464         struct hstate *h = hstate_vma(vma);
5465         struct vm_area_struct *iter_vma;
5466         struct address_space *mapping;
5467         pgoff_t pgoff;
5468
5469         /*
5470          * vm_pgoff is in PAGE_SIZE units, hence the different calculation
5471          * from page cache lookup which is in HPAGE_SIZE units.
5472          */
5473         address = address & huge_page_mask(h);
5474         pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
5475                         vma->vm_pgoff;
5476         mapping = vma->vm_file->f_mapping;
5477
5478         /*
5479          * Take the mapping lock for the duration of the table walk. As
5480          * this mapping should be shared between all the VMAs,
5481          * __unmap_hugepage_range() is called as the lock is already held
5482          */
5483         i_mmap_lock_write(mapping);
5484         vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
5485                 /* Do not unmap the current VMA */
5486                 if (iter_vma == vma)
5487                         continue;
5488
5489                 /*
5490                  * Shared VMAs have their own reserves and do not affect
5491                  * MAP_PRIVATE accounting but it is possible that a shared
5492                  * VMA is using the same page so check and skip such VMAs.
5493                  */
5494                 if (iter_vma->vm_flags & VM_MAYSHARE)
5495                         continue;
5496
5497                 /*
5498                  * Unmap the page from other VMAs without their own reserves.
5499                  * They get marked to be SIGKILLed if they fault in these
5500                  * areas. This is because a future no-page fault on this VMA
5501                  * could insert a zeroed page instead of the data existing
5502                  * from the time of fork. This would look like data corruption
5503                  */
5504                 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
5505                         unmap_hugepage_range(iter_vma, address,
5506                                              address + huge_page_size(h), page, 0);
5507         }
5508         i_mmap_unlock_write(mapping);
5509 }
5510
5511 /*
5512  * hugetlb_wp() should be called with page lock of the original hugepage held.
5513  * Called with hugetlb_fault_mutex_table held and pte_page locked so we
5514  * cannot race with other handlers or page migration.
5515  * Keep the pte_same checks anyway to make transition from the mutex easier.
5516  */
5517 static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
5518                        unsigned long address, pte_t *ptep, unsigned int flags,
5519                        struct folio *pagecache_folio, spinlock_t *ptl)
5520 {
5521         const bool unshare = flags & FAULT_FLAG_UNSHARE;
5522         pte_t pte = huge_ptep_get(ptep);
5523         struct hstate *h = hstate_vma(vma);
5524         struct folio *old_folio;
5525         struct folio *new_folio;
5526         int outside_reserve = 0;
5527         vm_fault_t ret = 0;
5528         unsigned long haddr = address & huge_page_mask(h);
5529         struct mmu_notifier_range range;
5530
5531         /*
5532          * Never handle CoW for uffd-wp protected pages.  It should be only
5533          * handled when the uffd-wp protection is removed.
5534          *
5535          * Note that only the CoW optimization path (in hugetlb_no_page())
5536          * can trigger this, because hugetlb_fault() will always resolve
5537          * uffd-wp bit first.
5538          */
5539         if (!unshare && huge_pte_uffd_wp(pte))
5540                 return 0;
5541
5542         /*
5543          * hugetlb does not support FOLL_FORCE-style write faults that keep the
5544          * PTE mapped R/O such as maybe_mkwrite() would do.
5545          */
5546         if (WARN_ON_ONCE(!unshare && !(vma->vm_flags & VM_WRITE)))
5547                 return VM_FAULT_SIGSEGV;
5548
5549         /* Let's take out MAP_SHARED mappings first. */
5550         if (vma->vm_flags & VM_MAYSHARE) {
5551                 set_huge_ptep_writable(vma, haddr, ptep);
5552                 return 0;
5553         }
5554
5555         old_folio = page_folio(pte_page(pte));
5556
5557         delayacct_wpcopy_start();
5558
5559 retry_avoidcopy:
5560         /*
5561          * If no-one else is actually using this page, we're the exclusive
5562          * owner and can reuse this page.
5563          */
5564         if (folio_mapcount(old_folio) == 1 && folio_test_anon(old_folio)) {
5565                 if (!PageAnonExclusive(&old_folio->page))
5566                         page_move_anon_rmap(&old_folio->page, vma);
5567                 if (likely(!unshare))
5568                         set_huge_ptep_writable(vma, haddr, ptep);
5569
5570                 delayacct_wpcopy_end();
5571                 return 0;
5572         }
5573         VM_BUG_ON_PAGE(folio_test_anon(old_folio) &&
5574                        PageAnonExclusive(&old_folio->page), &old_folio->page);
5575
5576         /*
5577          * If the process that created a MAP_PRIVATE mapping is about to
5578          * perform a COW due to a shared page count, attempt to satisfy
5579          * the allocation without using the existing reserves. The pagecache
5580          * page is used to determine if the reserve at this address was
5581          * consumed or not. If reserves were used, a partial faulted mapping
5582          * at the time of fork() could consume its reserves on COW instead
5583          * of the full address range.
5584          */
5585         if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
5586                         old_folio != pagecache_folio)
5587                 outside_reserve = 1;
5588
5589         folio_get(old_folio);
5590
5591         /*
5592          * Drop page table lock as buddy allocator may be called. It will
5593          * be acquired again before returning to the caller, as expected.
5594          */
5595         spin_unlock(ptl);
5596         new_folio = alloc_hugetlb_folio(vma, haddr, outside_reserve);
5597
5598         if (IS_ERR(new_folio)) {
5599                 /*
5600                  * If a process owning a MAP_PRIVATE mapping fails to COW,
5601                  * it is due to references held by a child and an insufficient
5602                  * huge page pool. To guarantee the original mappers
5603                  * reliability, unmap the page from child processes. The child
5604                  * may get SIGKILLed if it later faults.
5605                  */
5606                 if (outside_reserve) {
5607                         struct address_space *mapping = vma->vm_file->f_mapping;
5608                         pgoff_t idx;
5609                         u32 hash;
5610
5611                         folio_put(old_folio);
5612                         /*
5613                          * Drop hugetlb_fault_mutex and vma_lock before
5614                          * unmapping.  unmapping needs to hold vma_lock
5615                          * in write mode.  Dropping vma_lock in read mode
5616                          * here is OK as COW mappings do not interact with
5617                          * PMD sharing.
5618                          *
5619                          * Reacquire both after unmap operation.
5620                          */
5621                         idx = vma_hugecache_offset(h, vma, haddr);
5622                         hash = hugetlb_fault_mutex_hash(mapping, idx);
5623                         hugetlb_vma_unlock_read(vma);
5624                         mutex_unlock(&hugetlb_fault_mutex_table[hash]);
5625
5626                         unmap_ref_private(mm, vma, &old_folio->page, haddr);
5627
5628                         mutex_lock(&hugetlb_fault_mutex_table[hash]);
5629                         hugetlb_vma_lock_read(vma);
5630                         spin_lock(ptl);
5631                         ptep = hugetlb_walk(vma, haddr, huge_page_size(h));
5632                         if (likely(ptep &&
5633                                    pte_same(huge_ptep_get(ptep), pte)))
5634                                 goto retry_avoidcopy;
5635                         /*
5636                          * race occurs while re-acquiring page table
5637                          * lock, and our job is done.
5638                          */
5639                         delayacct_wpcopy_end();
5640                         return 0;
5641                 }
5642
5643                 ret = vmf_error(PTR_ERR(new_folio));
5644                 goto out_release_old;
5645         }
5646
5647         /*
5648          * When the original hugepage is shared one, it does not have
5649          * anon_vma prepared.
5650          */
5651         if (unlikely(anon_vma_prepare(vma))) {
5652                 ret = VM_FAULT_OOM;
5653                 goto out_release_all;
5654         }
5655
5656         if (copy_user_large_folio(new_folio, old_folio, address, vma)) {
5657                 ret = VM_FAULT_HWPOISON_LARGE;
5658                 goto out_release_all;
5659         }
5660         __folio_mark_uptodate(new_folio);
5661
5662         mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, haddr,
5663                                 haddr + huge_page_size(h));
5664         mmu_notifier_invalidate_range_start(&range);
5665
5666         /*
5667          * Retake the page table lock to check for racing updates
5668          * before the page tables are altered
5669          */
5670         spin_lock(ptl);
5671         ptep = hugetlb_walk(vma, haddr, huge_page_size(h));
5672         if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
5673                 pte_t newpte = make_huge_pte(vma, &new_folio->page, !unshare);
5674
5675                 /* Break COW or unshare */
5676                 huge_ptep_clear_flush(vma, haddr, ptep);
5677                 page_remove_rmap(&old_folio->page, vma, true);
5678                 hugepage_add_new_anon_rmap(new_folio, vma, haddr);
5679                 if (huge_pte_uffd_wp(pte))
5680                         newpte = huge_pte_mkuffd_wp(newpte);
5681                 set_huge_pte_at(mm, haddr, ptep, newpte, huge_page_size(h));
5682                 folio_set_hugetlb_migratable(new_folio);
5683                 /* Make the old page be freed below */
5684                 new_folio = old_folio;
5685         }
5686         spin_unlock(ptl);
5687         mmu_notifier_invalidate_range_end(&range);
5688 out_release_all:
5689         /*
5690          * No restore in case of successful pagetable update (Break COW or
5691          * unshare)
5692          */
5693         if (new_folio != old_folio)
5694                 restore_reserve_on_error(h, vma, haddr, new_folio);
5695         folio_put(new_folio);
5696 out_release_old:
5697         folio_put(old_folio);
5698
5699         spin_lock(ptl); /* Caller expects lock to be held */
5700
5701         delayacct_wpcopy_end();
5702         return ret;
5703 }
5704
5705 /*
5706  * Return whether there is a pagecache page to back given address within VMA.
5707  */
5708 static bool hugetlbfs_pagecache_present(struct hstate *h,
5709                         struct vm_area_struct *vma, unsigned long address)
5710 {
5711         struct address_space *mapping = vma->vm_file->f_mapping;
5712         pgoff_t idx = vma_hugecache_offset(h, vma, address);
5713         struct folio *folio;
5714
5715         folio = filemap_get_folio(mapping, idx);
5716         if (IS_ERR(folio))
5717                 return false;
5718         folio_put(folio);
5719         return true;
5720 }
5721
5722 int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping,
5723                            pgoff_t idx)
5724 {
5725         struct inode *inode = mapping->host;
5726         struct hstate *h = hstate_inode(inode);
5727         int err;
5728
5729         __folio_set_locked(folio);
5730         err = __filemap_add_folio(mapping, folio, idx, GFP_KERNEL, NULL);
5731
5732         if (unlikely(err)) {
5733                 __folio_clear_locked(folio);
5734                 return err;
5735         }
5736         folio_clear_hugetlb_restore_reserve(folio);
5737
5738         /*
5739          * mark folio dirty so that it will not be removed from cache/file
5740          * by non-hugetlbfs specific code paths.
5741          */
5742         folio_mark_dirty(folio);
5743
5744         spin_lock(&inode->i_lock);
5745         inode->i_blocks += blocks_per_huge_page(h);
5746         spin_unlock(&inode->i_lock);
5747         return 0;
5748 }
5749
5750 static inline vm_fault_t hugetlb_handle_userfault(struct vm_area_struct *vma,
5751                                                   struct address_space *mapping,
5752                                                   pgoff_t idx,
5753                                                   unsigned int flags,
5754                                                   unsigned long haddr,
5755                                                   unsigned long addr,
5756                                                   unsigned long reason)
5757 {
5758         u32 hash;
5759         struct vm_fault vmf = {
5760                 .vma = vma,
5761                 .address = haddr,
5762                 .real_address = addr,
5763                 .flags = flags,
5764
5765                 /*
5766                  * Hard to debug if it ends up being
5767                  * used by a callee that assumes
5768                  * something about the other
5769                  * uninitialized fields... same as in
5770                  * memory.c
5771                  */
5772         };
5773
5774         /*
5775          * vma_lock and hugetlb_fault_mutex must be dropped before handling
5776          * userfault. Also mmap_lock could be dropped due to handling
5777          * userfault, any vma operation should be careful from here.
5778          */
5779         hugetlb_vma_unlock_read(vma);
5780         hash = hugetlb_fault_mutex_hash(mapping, idx);
5781         mutex_unlock(&hugetlb_fault_mutex_table[hash]);
5782         return handle_userfault(&vmf, reason);
5783 }
5784
5785 /*
5786  * Recheck pte with pgtable lock.  Returns true if pte didn't change, or
5787  * false if pte changed or is changing.
5788  */
5789 static bool hugetlb_pte_stable(struct hstate *h, struct mm_struct *mm,
5790                                pte_t *ptep, pte_t old_pte)
5791 {
5792         spinlock_t *ptl;
5793         bool same;
5794
5795         ptl = huge_pte_lock(h, mm, ptep);
5796         same = pte_same(huge_ptep_get(ptep), old_pte);
5797         spin_unlock(ptl);
5798
5799         return same;
5800 }
5801
5802 static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
5803                         struct vm_area_struct *vma,
5804                         struct address_space *mapping, pgoff_t idx,
5805                         unsigned long address, pte_t *ptep,
5806                         pte_t old_pte, unsigned int flags)
5807 {
5808         struct hstate *h = hstate_vma(vma);
5809         vm_fault_t ret = VM_FAULT_SIGBUS;
5810         int anon_rmap = 0;
5811         unsigned long size;
5812         struct folio *folio;
5813         pte_t new_pte;
5814         spinlock_t *ptl;
5815         unsigned long haddr = address & huge_page_mask(h);
5816         bool new_folio, new_pagecache_folio = false;
5817         u32 hash = hugetlb_fault_mutex_hash(mapping, idx);
5818
5819         /*
5820          * Currently, we are forced to kill the process in the event the
5821          * original mapper has unmapped pages from the child due to a failed
5822          * COW/unsharing. Warn that such a situation has occurred as it may not
5823          * be obvious.
5824          */
5825         if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
5826                 pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
5827                            current->pid);
5828                 goto out;
5829         }
5830
5831         /*
5832          * Use page lock to guard against racing truncation
5833          * before we get page_table_lock.
5834          */
5835         new_folio = false;
5836         folio = filemap_lock_folio(mapping, idx);
5837         if (IS_ERR(folio)) {
5838                 size = i_size_read(mapping->host) >> huge_page_shift(h);
5839                 if (idx >= size)
5840                         goto out;
5841                 /* Check for page in userfault range */
5842                 if (userfaultfd_missing(vma)) {
5843                         /*
5844                          * Since hugetlb_no_page() was examining pte
5845                          * without pgtable lock, we need to re-test under
5846                          * lock because the pte may not be stable and could
5847                          * have changed from under us.  Try to detect
5848                          * either changed or during-changing ptes and retry
5849                          * properly when needed.
5850                          *
5851                          * Note that userfaultfd is actually fine with
5852                          * false positives (e.g. caused by pte changed),
5853                          * but not wrong logical events (e.g. caused by
5854                          * reading a pte during changing).  The latter can
5855                          * confuse the userspace, so the strictness is very
5856                          * much preferred.  E.g., MISSING event should
5857                          * never happen on the page after UFFDIO_COPY has
5858                          * correctly installed the page and returned.
5859                          */
5860                         if (!hugetlb_pte_stable(h, mm, ptep, old_pte)) {
5861                                 ret = 0;
5862                                 goto out;
5863                         }
5864
5865                         return hugetlb_handle_userfault(vma, mapping, idx, flags,
5866                                                         haddr, address,
5867                                                         VM_UFFD_MISSING);
5868                 }
5869
5870                 folio = alloc_hugetlb_folio(vma, haddr, 0);
5871                 if (IS_ERR(folio)) {
5872                         /*
5873                          * Returning error will result in faulting task being
5874                          * sent SIGBUS.  The hugetlb fault mutex prevents two
5875                          * tasks from racing to fault in the same page which
5876                          * could result in false unable to allocate errors.
5877                          * Page migration does not take the fault mutex, but
5878                          * does a clear then write of pte's under page table
5879                          * lock.  Page fault code could race with migration,
5880                          * notice the clear pte and try to allocate a page
5881                          * here.  Before returning error, get ptl and make
5882                          * sure there really is no pte entry.
5883                          */
5884                         if (hugetlb_pte_stable(h, mm, ptep, old_pte))
5885                                 ret = vmf_error(PTR_ERR(folio));
5886                         else
5887                                 ret = 0;
5888                         goto out;
5889                 }
5890                 clear_huge_page(&folio->page, address, pages_per_huge_page(h));
5891                 __folio_mark_uptodate(folio);
5892                 new_folio = true;
5893
5894                 if (vma->vm_flags & VM_MAYSHARE) {
5895                         int err = hugetlb_add_to_page_cache(folio, mapping, idx);
5896                         if (err) {
5897                                 /*
5898                                  * err can't be -EEXIST which implies someone
5899                                  * else consumed the reservation since hugetlb
5900                                  * fault mutex is held when add a hugetlb page
5901                                  * to the page cache. So it's safe to call
5902                                  * restore_reserve_on_error() here.
5903                                  */
5904                                 restore_reserve_on_error(h, vma, haddr, folio);
5905                                 folio_put(folio);
5906                                 goto out;
5907                         }
5908                         new_pagecache_folio = true;
5909                 } else {
5910                         folio_lock(folio);
5911                         if (unlikely(anon_vma_prepare(vma))) {
5912                                 ret = VM_FAULT_OOM;
5913                                 goto backout_unlocked;
5914                         }
5915                         anon_rmap = 1;
5916                 }
5917         } else {
5918                 /*
5919                  * If memory error occurs between mmap() and fault, some process
5920                  * don't have hwpoisoned swap entry for errored virtual address.
5921                  * So we need to block hugepage fault by PG_hwpoison bit check.
5922                  */
5923                 if (unlikely(folio_test_hwpoison(folio))) {
5924                         ret = VM_FAULT_HWPOISON_LARGE |
5925                                 VM_FAULT_SET_HINDEX(hstate_index(h));
5926                         goto backout_unlocked;
5927                 }
5928
5929                 /* Check for page in userfault range. */
5930                 if (userfaultfd_minor(vma)) {
5931                         folio_unlock(folio);
5932                         folio_put(folio);
5933                         /* See comment in userfaultfd_missing() block above */
5934                         if (!hugetlb_pte_stable(h, mm, ptep, old_pte)) {
5935                                 ret = 0;
5936                                 goto out;
5937                         }
5938                         return hugetlb_handle_userfault(vma, mapping, idx, flags,
5939                                                         haddr, address,
5940                                                         VM_UFFD_MINOR);
5941                 }
5942         }
5943
5944         /*
5945          * If we are going to COW a private mapping later, we examine the
5946          * pending reservations for this page now. This will ensure that
5947          * any allocations necessary to record that reservation occur outside
5948          * the spinlock.
5949          */
5950         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
5951                 if (vma_needs_reservation(h, vma, haddr) < 0) {
5952                         ret = VM_FAULT_OOM;
5953                         goto backout_unlocked;
5954                 }
5955                 /* Just decrements count, does not deallocate */
5956                 vma_end_reservation(h, vma, haddr);
5957         }
5958
5959         ptl = huge_pte_lock(h, mm, ptep);
5960         ret = 0;
5961         /* If pte changed from under us, retry */
5962         if (!pte_same(huge_ptep_get(ptep), old_pte))
5963                 goto backout;
5964
5965         if (anon_rmap)
5966                 hugepage_add_new_anon_rmap(folio, vma, haddr);
5967         else
5968                 page_dup_file_rmap(&folio->page, true);
5969         new_pte = make_huge_pte(vma, &folio->page, ((vma->vm_flags & VM_WRITE)
5970                                 && (vma->vm_flags & VM_SHARED)));
5971         /*
5972          * If this pte was previously wr-protected, keep it wr-protected even
5973          * if populated.
5974          */
5975         if (unlikely(pte_marker_uffd_wp(old_pte)))
5976                 new_pte = huge_pte_mkuffd_wp(new_pte);
5977         set_huge_pte_at(mm, haddr, ptep, new_pte, huge_page_size(h));
5978
5979         hugetlb_count_add(pages_per_huge_page(h), mm);
5980         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
5981                 /* Optimization, do the COW without a second fault */
5982                 ret = hugetlb_wp(mm, vma, address, ptep, flags, folio, ptl);
5983         }
5984
5985         spin_unlock(ptl);
5986
5987         /*
5988          * Only set hugetlb_migratable in newly allocated pages.  Existing pages
5989          * found in the pagecache may not have hugetlb_migratable if they have
5990          * been isolated for migration.
5991          */
5992         if (new_folio)
5993                 folio_set_hugetlb_migratable(folio);
5994
5995         folio_unlock(folio);
5996 out:
5997         hugetlb_vma_unlock_read(vma);
5998         mutex_unlock(&hugetlb_fault_mutex_table[hash]);
5999         return ret;
6000
6001 backout:
6002         spin_unlock(ptl);
6003 backout_unlocked:
6004         if (new_folio && !new_pagecache_folio)
6005                 restore_reserve_on_error(h, vma, haddr, folio);
6006
6007         folio_unlock(folio);
6008         folio_put(folio);
6009         goto out;
6010 }
6011
6012 #ifdef CONFIG_SMP
6013 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
6014 {
6015         unsigned long key[2];
6016         u32 hash;
6017
6018         key[0] = (unsigned long) mapping;
6019         key[1] = idx;
6020
6021         hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0);
6022
6023         return hash & (num_fault_mutexes - 1);
6024 }
6025 #else
6026 /*
6027  * For uniprocessor systems we always use a single mutex, so just
6028  * return 0 and avoid the hashing overhead.
6029  */
6030 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
6031 {
6032         return 0;
6033 }
6034 #endif
6035
6036 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
6037                         unsigned long address, unsigned int flags)
6038 {
6039         pte_t *ptep, entry;
6040         spinlock_t *ptl;
6041         vm_fault_t ret;
6042         u32 hash;
6043         pgoff_t idx;
6044         struct folio *folio = NULL;
6045         struct folio *pagecache_folio = NULL;
6046         struct hstate *h = hstate_vma(vma);
6047         struct address_space *mapping;
6048         int need_wait_lock = 0;
6049         unsigned long haddr = address & huge_page_mask(h);
6050
6051         /* TODO: Handle faults under the VMA lock */
6052         if (flags & FAULT_FLAG_VMA_LOCK) {
6053                 vma_end_read(vma);
6054                 return VM_FAULT_RETRY;
6055         }
6056
6057         /*
6058          * Serialize hugepage allocation and instantiation, so that we don't
6059          * get spurious allocation failures if two CPUs race to instantiate
6060          * the same page in the page cache.
6061          */
6062         mapping = vma->vm_file->f_mapping;
6063         idx = vma_hugecache_offset(h, vma, haddr);
6064         hash = hugetlb_fault_mutex_hash(mapping, idx);
6065         mutex_lock(&hugetlb_fault_mutex_table[hash]);
6066
6067         /*
6068          * Acquire vma lock before calling huge_pte_alloc and hold
6069          * until finished with ptep.  This prevents huge_pmd_unshare from
6070          * being called elsewhere and making the ptep no longer valid.
6071          */
6072         hugetlb_vma_lock_read(vma);
6073         ptep = huge_pte_alloc(mm, vma, haddr, huge_page_size(h));
6074         if (!ptep) {
6075                 hugetlb_vma_unlock_read(vma);
6076                 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
6077                 return VM_FAULT_OOM;
6078         }
6079
6080         entry = huge_ptep_get(ptep);
6081         if (huge_pte_none_mostly(entry)) {
6082                 if (is_pte_marker(entry)) {
6083                         pte_marker marker =
6084                                 pte_marker_get(pte_to_swp_entry(entry));
6085
6086                         if (marker & PTE_MARKER_POISONED) {
6087                                 ret = VM_FAULT_HWPOISON_LARGE;
6088                                 goto out_mutex;
6089                         }
6090                 }
6091
6092                 /*
6093                  * Other PTE markers should be handled the same way as none PTE.
6094                  *
6095                  * hugetlb_no_page will drop vma lock and hugetlb fault
6096                  * mutex internally, which make us return immediately.
6097                  */
6098                 return hugetlb_no_page(mm, vma, mapping, idx, address, ptep,
6099                                       entry, flags);
6100         }
6101
6102         ret = 0;
6103
6104         /*
6105          * entry could be a migration/hwpoison entry at this point, so this
6106          * check prevents the kernel from going below assuming that we have
6107          * an active hugepage in pagecache. This goto expects the 2nd page
6108          * fault, and is_hugetlb_entry_(migration|hwpoisoned) check will
6109          * properly handle it.
6110          */
6111         if (!pte_present(entry)) {
6112                 if (unlikely(is_hugetlb_entry_migration(entry))) {
6113                         /*
6114                          * Release the hugetlb fault lock now, but retain
6115                          * the vma lock, because it is needed to guard the
6116                          * huge_pte_lockptr() later in
6117                          * migration_entry_wait_huge(). The vma lock will
6118                          * be released there.
6119                          */
6120                         mutex_unlock(&hugetlb_fault_mutex_table[hash]);
6121                         migration_entry_wait_huge(vma, ptep);
6122                         return 0;
6123                 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
6124                         ret = VM_FAULT_HWPOISON_LARGE |
6125                             VM_FAULT_SET_HINDEX(hstate_index(h));
6126                 goto out_mutex;
6127         }
6128
6129         /*
6130          * If we are going to COW/unshare the mapping later, we examine the
6131          * pending reservations for this page now. This will ensure that any
6132          * allocations necessary to record that reservation occur outside the
6133          * spinlock. Also lookup the pagecache page now as it is used to
6134          * determine if a reservation has been consumed.
6135          */
6136         if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) &&
6137             !(vma->vm_flags & VM_MAYSHARE) && !huge_pte_write(entry)) {
6138                 if (vma_needs_reservation(h, vma, haddr) < 0) {
6139                         ret = VM_FAULT_OOM;
6140                         goto out_mutex;
6141                 }
6142                 /* Just decrements count, does not deallocate */
6143                 vma_end_reservation(h, vma, haddr);
6144
6145                 pagecache_folio = filemap_lock_folio(mapping, idx);
6146                 if (IS_ERR(pagecache_folio))
6147                         pagecache_folio = NULL;
6148         }
6149
6150         ptl = huge_pte_lock(h, mm, ptep);
6151
6152         /* Check for a racing update before calling hugetlb_wp() */
6153         if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
6154                 goto out_ptl;
6155
6156         /* Handle userfault-wp first, before trying to lock more pages */
6157         if (userfaultfd_wp(vma) && huge_pte_uffd_wp(huge_ptep_get(ptep)) &&
6158             (flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
6159                 struct vm_fault vmf = {
6160                         .vma = vma,
6161                         .address = haddr,
6162                         .real_address = address,
6163                         .flags = flags,
6164                 };
6165
6166                 spin_unlock(ptl);
6167                 if (pagecache_folio) {
6168                         folio_unlock(pagecache_folio);
6169                         folio_put(pagecache_folio);
6170                 }
6171                 hugetlb_vma_unlock_read(vma);
6172                 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
6173                 return handle_userfault(&vmf, VM_UFFD_WP);
6174         }
6175
6176         /*
6177          * hugetlb_wp() requires page locks of pte_page(entry) and
6178          * pagecache_folio, so here we need take the former one
6179          * when folio != pagecache_folio or !pagecache_folio.
6180          */
6181         folio = page_folio(pte_page(entry));
6182         if (folio != pagecache_folio)
6183                 if (!folio_trylock(folio)) {
6184                         need_wait_lock = 1;
6185                         goto out_ptl;
6186                 }
6187
6188         folio_get(folio);
6189
6190         if (flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) {
6191                 if (!huge_pte_write(entry)) {
6192                         ret = hugetlb_wp(mm, vma, address, ptep, flags,
6193                                          pagecache_folio, ptl);
6194                         goto out_put_page;
6195                 } else if (likely(flags & FAULT_FLAG_WRITE)) {
6196                         entry = huge_pte_mkdirty(entry);
6197                 }
6198         }
6199         entry = pte_mkyoung(entry);
6200         if (huge_ptep_set_access_flags(vma, haddr, ptep, entry,
6201                                                 flags & FAULT_FLAG_WRITE))
6202                 update_mmu_cache(vma, haddr, ptep);
6203 out_put_page:
6204         if (folio != pagecache_folio)
6205                 folio_unlock(folio);
6206         folio_put(folio);
6207 out_ptl:
6208         spin_unlock(ptl);
6209
6210         if (pagecache_folio) {
6211                 folio_unlock(pagecache_folio);
6212                 folio_put(pagecache_folio);
6213         }
6214 out_mutex:
6215         hugetlb_vma_unlock_read(vma);
6216         mutex_unlock(&hugetlb_fault_mutex_table[hash]);
6217         /*
6218          * Generally it's safe to hold refcount during waiting page lock. But
6219          * here we just wait to defer the next page fault to avoid busy loop and
6220          * the page is not used after unlocked before returning from the current
6221          * page fault. So we are safe from accessing freed page, even if we wait
6222          * here without taking refcount.
6223          */
6224         if (need_wait_lock)
6225                 folio_wait_locked(folio);
6226         return ret;
6227 }
6228
6229 #ifdef CONFIG_USERFAULTFD
6230 /*
6231  * Used by userfaultfd UFFDIO_* ioctls. Based on userfaultfd's mfill_atomic_pte
6232  * with modifications for hugetlb pages.
6233  */
6234 int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
6235                              struct vm_area_struct *dst_vma,
6236                              unsigned long dst_addr,
6237                              unsigned long src_addr,
6238                              uffd_flags_t flags,
6239                              struct folio **foliop)
6240 {
6241         struct mm_struct *dst_mm = dst_vma->vm_mm;
6242         bool is_continue = uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE);
6243         bool wp_enabled = (flags & MFILL_ATOMIC_WP);
6244         struct hstate *h = hstate_vma(dst_vma);
6245         struct address_space *mapping = dst_vma->vm_file->f_mapping;
6246         pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr);
6247         unsigned long size;
6248         int vm_shared = dst_vma->vm_flags & VM_SHARED;
6249         pte_t _dst_pte;
6250         spinlock_t *ptl;
6251         int ret = -ENOMEM;
6252         struct folio *folio;
6253         int writable;
6254         bool folio_in_pagecache = false;
6255
6256         if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
6257                 ptl = huge_pte_lock(h, dst_mm, dst_pte);
6258
6259                 /* Don't overwrite any existing PTEs (even markers) */
6260                 if (!huge_pte_none(huge_ptep_get(dst_pte))) {
6261                         spin_unlock(ptl);
6262                         return -EEXIST;
6263                 }
6264
6265                 _dst_pte = make_pte_marker(PTE_MARKER_POISONED);
6266                 set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte,
6267                                 huge_page_size(h));
6268
6269                 /* No need to invalidate - it was non-present before */
6270                 update_mmu_cache(dst_vma, dst_addr, dst_pte);
6271
6272                 spin_unlock(ptl);
6273                 return 0;
6274         }
6275
6276         if (is_continue) {
6277                 ret = -EFAULT;
6278                 folio = filemap_lock_folio(mapping, idx);
6279                 if (IS_ERR(folio))
6280                         goto out;
6281                 folio_in_pagecache = true;
6282         } else if (!*foliop) {
6283                 /* If a folio already exists, then it's UFFDIO_COPY for
6284                  * a non-missing case. Return -EEXIST.
6285                  */
6286                 if (vm_shared &&
6287                     hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
6288                         ret = -EEXIST;
6289                         goto out;
6290                 }
6291
6292                 folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0);
6293                 if (IS_ERR(folio)) {
6294                         ret = -ENOMEM;
6295                         goto out;
6296                 }
6297
6298                 ret = copy_folio_from_user(folio, (const void __user *) src_addr,
6299                                            false);
6300
6301                 /* fallback to copy_from_user outside mmap_lock */
6302                 if (unlikely(ret)) {
6303                         ret = -ENOENT;
6304                         /* Free the allocated folio which may have
6305                          * consumed a reservation.
6306                          */
6307                         restore_reserve_on_error(h, dst_vma, dst_addr, folio);
6308                         folio_put(folio);
6309
6310                         /* Allocate a temporary folio to hold the copied
6311                          * contents.
6312                          */
6313                         folio = alloc_hugetlb_folio_vma(h, dst_vma, dst_addr);
6314                         if (!folio) {
6315                                 ret = -ENOMEM;
6316                                 goto out;
6317                         }
6318                         *foliop = folio;
6319                         /* Set the outparam foliop and return to the caller to
6320                          * copy the contents outside the lock. Don't free the
6321                          * folio.
6322                          */
6323                         goto out;
6324                 }
6325         } else {
6326                 if (vm_shared &&
6327                     hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
6328                         folio_put(*foliop);
6329                         ret = -EEXIST;
6330                         *foliop = NULL;
6331                         goto out;
6332                 }
6333
6334                 folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0);
6335                 if (IS_ERR(folio)) {
6336                         folio_put(*foliop);
6337                         ret = -ENOMEM;
6338                         *foliop = NULL;
6339                         goto out;
6340                 }
6341                 ret = copy_user_large_folio(folio, *foliop, dst_addr, dst_vma);
6342                 folio_put(*foliop);
6343                 *foliop = NULL;
6344                 if (ret) {
6345                         folio_put(folio);
6346                         goto out;
6347                 }
6348         }
6349
6350         /*
6351          * The memory barrier inside __folio_mark_uptodate makes sure that
6352          * preceding stores to the page contents become visible before
6353          * the set_pte_at() write.
6354          */
6355         __folio_mark_uptodate(folio);
6356
6357         /* Add shared, newly allocated pages to the page cache. */
6358         if (vm_shared && !is_continue) {
6359                 size = i_size_read(mapping->host) >> huge_page_shift(h);
6360                 ret = -EFAULT;
6361                 if (idx >= size)
6362                         goto out_release_nounlock;
6363
6364                 /*
6365                  * Serialization between remove_inode_hugepages() and
6366                  * hugetlb_add_to_page_cache() below happens through the
6367                  * hugetlb_fault_mutex_table that here must be hold by
6368                  * the caller.
6369                  */
6370                 ret = hugetlb_add_to_page_cache(folio, mapping, idx);
6371                 if (ret)
6372                         goto out_release_nounlock;
6373                 folio_in_pagecache = true;
6374         }
6375
6376         ptl = huge_pte_lock(h, dst_mm, dst_pte);
6377
6378         ret = -EIO;
6379         if (folio_test_hwpoison(folio))
6380                 goto out_release_unlock;
6381
6382         /*
6383          * We allow to overwrite a pte marker: consider when both MISSING|WP
6384          * registered, we firstly wr-protect a none pte which has no page cache
6385          * page backing it, then access the page.
6386          */
6387         ret = -EEXIST;
6388         if (!huge_pte_none_mostly(huge_ptep_get(dst_pte)))
6389                 goto out_release_unlock;
6390
6391         if (folio_in_pagecache)
6392                 page_dup_file_rmap(&folio->page, true);
6393         else
6394                 hugepage_add_new_anon_rmap(folio, dst_vma, dst_addr);
6395
6396         /*
6397          * For either: (1) CONTINUE on a non-shared VMA, or (2) UFFDIO_COPY
6398          * with wp flag set, don't set pte write bit.
6399          */
6400         if (wp_enabled || (is_continue && !vm_shared))
6401                 writable = 0;
6402         else
6403                 writable = dst_vma->vm_flags & VM_WRITE;
6404
6405         _dst_pte = make_huge_pte(dst_vma, &folio->page, writable);
6406         /*
6407          * Always mark UFFDIO_COPY page dirty; note that this may not be
6408          * extremely important for hugetlbfs for now since swapping is not
6409          * supported, but we should still be clear in that this page cannot be
6410          * thrown away at will, even if write bit not set.
6411          */
6412         _dst_pte = huge_pte_mkdirty(_dst_pte);
6413         _dst_pte = pte_mkyoung(_dst_pte);
6414
6415         if (wp_enabled)
6416                 _dst_pte = huge_pte_mkuffd_wp(_dst_pte);
6417
6418         set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, huge_page_size(h));
6419
6420         hugetlb_count_add(pages_per_huge_page(h), dst_mm);
6421
6422         /* No need to invalidate - it was non-present before */
6423         update_mmu_cache(dst_vma, dst_addr, dst_pte);
6424
6425         spin_unlock(ptl);
6426         if (!is_continue)
6427                 folio_set_hugetlb_migratable(folio);
6428         if (vm_shared || is_continue)
6429                 folio_unlock(folio);
6430         ret = 0;
6431 out:
6432         return ret;
6433 out_release_unlock:
6434         spin_unlock(ptl);
6435         if (vm_shared || is_continue)
6436                 folio_unlock(folio);
6437 out_release_nounlock:
6438         if (!folio_in_pagecache)
6439                 restore_reserve_on_error(h, dst_vma, dst_addr, folio);
6440         folio_put(folio);
6441         goto out;
6442 }
6443 #endif /* CONFIG_USERFAULTFD */
6444
6445 struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
6446                                       unsigned long address, unsigned int flags,
6447                                       unsigned int *page_mask)
6448 {
6449         struct hstate *h = hstate_vma(vma);
6450         struct mm_struct *mm = vma->vm_mm;
6451         unsigned long haddr = address & huge_page_mask(h);
6452         struct page *page = NULL;
6453         spinlock_t *ptl;
6454         pte_t *pte, entry;
6455         int ret;
6456
6457         hugetlb_vma_lock_read(vma);
6458         pte = hugetlb_walk(vma, haddr, huge_page_size(h));
6459         if (!pte)
6460                 goto out_unlock;
6461
6462         ptl = huge_pte_lock(h, mm, pte);
6463         entry = huge_ptep_get(pte);
6464         if (pte_present(entry)) {
6465                 page = pte_page(entry);
6466
6467                 if (!huge_pte_write(entry)) {
6468                         if (flags & FOLL_WRITE) {
6469                                 page = NULL;
6470                                 goto out;
6471                         }
6472
6473                         if (gup_must_unshare(vma, flags, page)) {
6474                                 /* Tell the caller to do unsharing */
6475                                 page = ERR_PTR(-EMLINK);
6476                                 goto out;
6477                         }
6478                 }
6479
6480                 page += ((address & ~huge_page_mask(h)) >> PAGE_SHIFT);
6481
6482                 /*
6483                  * Note that page may be a sub-page, and with vmemmap
6484                  * optimizations the page struct may be read only.
6485                  * try_grab_page() will increase the ref count on the
6486                  * head page, so this will be OK.
6487                  *
6488                  * try_grab_page() should always be able to get the page here,
6489                  * because we hold the ptl lock and have verified pte_present().
6490                  */
6491                 ret = try_grab_page(page, flags);
6492
6493                 if (WARN_ON_ONCE(ret)) {
6494                         page = ERR_PTR(ret);
6495                         goto out;
6496                 }
6497
6498                 *page_mask = (1U << huge_page_order(h)) - 1;
6499         }
6500 out:
6501         spin_unlock(ptl);
6502 out_unlock:
6503         hugetlb_vma_unlock_read(vma);
6504
6505         /*
6506          * Fixup retval for dump requests: if pagecache doesn't exist,
6507          * don't try to allocate a new page but just skip it.
6508          */
6509         if (!page && (flags & FOLL_DUMP) &&
6510             !hugetlbfs_pagecache_present(h, vma, address))
6511                 page = ERR_PTR(-EFAULT);
6512
6513         return page;
6514 }
6515
6516 long hugetlb_change_protection(struct vm_area_struct *vma,
6517                 unsigned long address, unsigned long end,
6518                 pgprot_t newprot, unsigned long cp_flags)
6519 {
6520         struct mm_struct *mm = vma->vm_mm;
6521         unsigned long start = address;
6522         pte_t *ptep;
6523         pte_t pte;
6524         struct hstate *h = hstate_vma(vma);
6525         long pages = 0, psize = huge_page_size(h);
6526         bool shared_pmd = false;
6527         struct mmu_notifier_range range;
6528         unsigned long last_addr_mask;
6529         bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
6530         bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
6531
6532         /*
6533          * In the case of shared PMDs, the area to flush could be beyond
6534          * start/end.  Set range.start/range.end to cover the maximum possible
6535          * range if PMD sharing is possible.
6536          */
6537         mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA,
6538                                 0, mm, start, end);
6539         adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
6540
6541         BUG_ON(address >= end);
6542         flush_cache_range(vma, range.start, range.end);
6543
6544         mmu_notifier_invalidate_range_start(&range);
6545         hugetlb_vma_lock_write(vma);
6546         i_mmap_lock_write(vma->vm_file->f_mapping);
6547         last_addr_mask = hugetlb_mask_last_page(h);
6548         for (; address < end; address += psize) {
6549                 spinlock_t *ptl;
6550                 ptep = hugetlb_walk(vma, address, psize);
6551                 if (!ptep) {
6552                         if (!uffd_wp) {
6553                                 address |= last_addr_mask;
6554                                 continue;
6555                         }
6556                         /*
6557                          * Userfaultfd wr-protect requires pgtable
6558                          * pre-allocations to install pte markers.
6559                          */
6560                         ptep = huge_pte_alloc(mm, vma, address, psize);
6561                         if (!ptep) {
6562                                 pages = -ENOMEM;
6563                                 break;
6564                         }
6565                 }
6566                 ptl = huge_pte_lock(h, mm, ptep);
6567                 if (huge_pmd_unshare(mm, vma, address, ptep)) {
6568                         /*
6569                          * When uffd-wp is enabled on the vma, unshare
6570                          * shouldn't happen at all.  Warn about it if it
6571                          * happened due to some reason.
6572                          */
6573                         WARN_ON_ONCE(uffd_wp || uffd_wp_resolve);
6574                         pages++;
6575                         spin_unlock(ptl);
6576                         shared_pmd = true;
6577                         address |= last_addr_mask;
6578                         continue;
6579                 }
6580                 pte = huge_ptep_get(ptep);
6581                 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
6582                         /* Nothing to do. */
6583                 } else if (unlikely(is_hugetlb_entry_migration(pte))) {
6584                         swp_entry_t entry = pte_to_swp_entry(pte);
6585                         struct page *page = pfn_swap_entry_to_page(entry);
6586                         pte_t newpte = pte;
6587
6588                         if (is_writable_migration_entry(entry)) {
6589                                 if (PageAnon(page))
6590                                         entry = make_readable_exclusive_migration_entry(
6591                                                                 swp_offset(entry));
6592                                 else
6593                                         entry = make_readable_migration_entry(
6594                                                                 swp_offset(entry));
6595                                 newpte = swp_entry_to_pte(entry);
6596                                 pages++;
6597                         }
6598
6599                         if (uffd_wp)
6600                                 newpte = pte_swp_mkuffd_wp(newpte);
6601                         else if (uffd_wp_resolve)
6602                                 newpte = pte_swp_clear_uffd_wp(newpte);
6603                         if (!pte_same(pte, newpte))
6604                                 set_huge_pte_at(mm, address, ptep, newpte, psize);
6605                 } else if (unlikely(is_pte_marker(pte))) {
6606                         /* No other markers apply for now. */
6607                         WARN_ON_ONCE(!pte_marker_uffd_wp(pte));
6608                         if (uffd_wp_resolve)
6609                                 /* Safe to modify directly (non-present->none). */
6610                                 huge_pte_clear(mm, address, ptep, psize);
6611                 } else if (!huge_pte_none(pte)) {
6612                         pte_t old_pte;
6613                         unsigned int shift = huge_page_shift(hstate_vma(vma));
6614
6615                         old_pte = huge_ptep_modify_prot_start(vma, address, ptep);
6616                         pte = huge_pte_modify(old_pte, newprot);
6617                         pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
6618                         if (uffd_wp)
6619                                 pte = huge_pte_mkuffd_wp(pte);
6620                         else if (uffd_wp_resolve)
6621                                 pte = huge_pte_clear_uffd_wp(pte);
6622                         huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte);
6623                         pages++;
6624                 } else {
6625                         /* None pte */
6626                         if (unlikely(uffd_wp))
6627                                 /* Safe to modify directly (none->non-present). */
6628                                 set_huge_pte_at(mm, address, ptep,
6629                                                 make_pte_marker(PTE_MARKER_UFFD_WP),
6630                                                 psize);
6631                 }
6632                 spin_unlock(ptl);
6633         }
6634         /*
6635          * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
6636          * may have cleared our pud entry and done put_page on the page table:
6637          * once we release i_mmap_rwsem, another task can do the final put_page
6638          * and that page table be reused and filled with junk.  If we actually
6639          * did unshare a page of pmds, flush the range corresponding to the pud.
6640          */
6641         if (shared_pmd)
6642                 flush_hugetlb_tlb_range(vma, range.start, range.end);
6643         else
6644                 flush_hugetlb_tlb_range(vma, start, end);
6645         /*
6646          * No need to call mmu_notifier_arch_invalidate_secondary_tlbs() we are
6647          * downgrading page table protection not changing it to point to a new
6648          * page.
6649          *
6650          * See Documentation/mm/mmu_notifier.rst
6651          */
6652         i_mmap_unlock_write(vma->vm_file->f_mapping);
6653         hugetlb_vma_unlock_write(vma);
6654         mmu_notifier_invalidate_range_end(&range);
6655
6656         return pages > 0 ? (pages << h->order) : pages;
6657 }
6658
6659 /* Return true if reservation was successful, false otherwise.  */
6660 bool hugetlb_reserve_pages(struct inode *inode,
6661                                         long from, long to,
6662                                         struct vm_area_struct *vma,
6663                                         vm_flags_t vm_flags)
6664 {
6665         long chg = -1, add = -1;
6666         struct hstate *h = hstate_inode(inode);
6667         struct hugepage_subpool *spool = subpool_inode(inode);
6668         struct resv_map *resv_map;
6669         struct hugetlb_cgroup *h_cg = NULL;
6670         long gbl_reserve, regions_needed = 0;
6671
6672         /* This should never happen */
6673         if (from > to) {
6674                 VM_WARN(1, "%s called with a negative range\n", __func__);
6675                 return false;
6676         }
6677
6678         /*
6679          * vma specific semaphore used for pmd sharing and fault/truncation
6680          * synchronization
6681          */
6682         hugetlb_vma_lock_alloc(vma);
6683
6684         /*
6685          * Only apply hugepage reservation if asked. At fault time, an
6686          * attempt will be made for VM_NORESERVE to allocate a page
6687          * without using reserves
6688          */
6689         if (vm_flags & VM_NORESERVE)
6690                 return true;
6691
6692         /*
6693          * Shared mappings base their reservation on the number of pages that
6694          * are already allocated on behalf of the file. Private mappings need
6695          * to reserve the full area even if read-only as mprotect() may be
6696          * called to make the mapping read-write. Assume !vma is a shm mapping
6697          */
6698         if (!vma || vma->vm_flags & VM_MAYSHARE) {
6699                 /*
6700                  * resv_map can not be NULL as hugetlb_reserve_pages is only
6701                  * called for inodes for which resv_maps were created (see
6702                  * hugetlbfs_get_inode).
6703                  */
6704                 resv_map = inode_resv_map(inode);
6705
6706                 chg = region_chg(resv_map, from, to, &regions_needed);
6707         } else {
6708                 /* Private mapping. */
6709                 resv_map = resv_map_alloc();
6710                 if (!resv_map)
6711                         goto out_err;
6712
6713                 chg = to - from;
6714
6715                 set_vma_resv_map(vma, resv_map);
6716                 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
6717         }
6718
6719         if (chg < 0)
6720                 goto out_err;
6721
6722         if (hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h),
6723                                 chg * pages_per_huge_page(h), &h_cg) < 0)
6724                 goto out_err;
6725
6726         if (vma && !(vma->vm_flags & VM_MAYSHARE) && h_cg) {
6727                 /* For private mappings, the hugetlb_cgroup uncharge info hangs
6728                  * of the resv_map.
6729                  */
6730                 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, h_cg, h);
6731         }
6732
6733         /*
6734          * There must be enough pages in the subpool for the mapping. If
6735          * the subpool has a minimum size, there may be some global
6736          * reservations already in place (gbl_reserve).
6737          */
6738         gbl_reserve = hugepage_subpool_get_pages(spool, chg);
6739         if (gbl_reserve < 0)
6740                 goto out_uncharge_cgroup;
6741
6742         /*
6743          * Check enough hugepages are available for the reservation.
6744          * Hand the pages back to the subpool if there are not
6745          */
6746         if (hugetlb_acct_memory(h, gbl_reserve) < 0)
6747                 goto out_put_pages;
6748
6749         /*
6750          * Account for the reservations made. Shared mappings record regions
6751          * that have reservations as they are shared by multiple VMAs.
6752          * When the last VMA disappears, the region map says how much
6753          * the reservation was and the page cache tells how much of
6754          * the reservation was consumed. Private mappings are per-VMA and
6755          * only the consumed reservations are tracked. When the VMA
6756          * disappears, the original reservation is the VMA size and the
6757          * consumed reservations are stored in the map. Hence, nothing
6758          * else has to be done for private mappings here
6759          */
6760         if (!vma || vma->vm_flags & VM_MAYSHARE) {
6761                 add = region_add(resv_map, from, to, regions_needed, h, h_cg);
6762
6763                 if (unlikely(add < 0)) {
6764                         hugetlb_acct_memory(h, -gbl_reserve);
6765                         goto out_put_pages;
6766                 } else if (unlikely(chg > add)) {
6767                         /*
6768                          * pages in this range were added to the reserve
6769                          * map between region_chg and region_add.  This
6770                          * indicates a race with alloc_hugetlb_folio.  Adjust
6771                          * the subpool and reserve counts modified above
6772                          * based on the difference.
6773                          */
6774                         long rsv_adjust;
6775
6776                         /*
6777                          * hugetlb_cgroup_uncharge_cgroup_rsvd() will put the
6778                          * reference to h_cg->css. See comment below for detail.
6779                          */
6780                         hugetlb_cgroup_uncharge_cgroup_rsvd(
6781                                 hstate_index(h),
6782                                 (chg - add) * pages_per_huge_page(h), h_cg);
6783
6784                         rsv_adjust = hugepage_subpool_put_pages(spool,
6785                                                                 chg - add);
6786                         hugetlb_acct_memory(h, -rsv_adjust);
6787                 } else if (h_cg) {
6788                         /*
6789                          * The file_regions will hold their own reference to
6790                          * h_cg->css. So we should release the reference held
6791                          * via hugetlb_cgroup_charge_cgroup_rsvd() when we are
6792                          * done.
6793                          */
6794                         hugetlb_cgroup_put_rsvd_cgroup(h_cg);
6795                 }
6796         }
6797         return true;
6798
6799 out_put_pages:
6800         /* put back original number of pages, chg */
6801         (void)hugepage_subpool_put_pages(spool, chg);
6802 out_uncharge_cgroup:
6803         hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h),
6804                                             chg * pages_per_huge_page(h), h_cg);
6805 out_err:
6806         hugetlb_vma_lock_free(vma);
6807         if (!vma || vma->vm_flags & VM_MAYSHARE)
6808                 /* Only call region_abort if the region_chg succeeded but the
6809                  * region_add failed or didn't run.
6810                  */
6811                 if (chg >= 0 && add < 0)
6812                         region_abort(resv_map, from, to, regions_needed);
6813         if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
6814                 kref_put(&resv_map->refs, resv_map_release);
6815                 set_vma_resv_map(vma, NULL);
6816         }
6817         return false;
6818 }
6819
6820 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
6821                                                                 long freed)
6822 {
6823         struct hstate *h = hstate_inode(inode);
6824         struct resv_map *resv_map = inode_resv_map(inode);
6825         long chg = 0;
6826         struct hugepage_subpool *spool = subpool_inode(inode);
6827         long gbl_reserve;
6828
6829         /*
6830          * Since this routine can be called in the evict inode path for all
6831          * hugetlbfs inodes, resv_map could be NULL.
6832          */
6833         if (resv_map) {
6834                 chg = region_del(resv_map, start, end);
6835                 /*
6836                  * region_del() can fail in the rare case where a region
6837                  * must be split and another region descriptor can not be
6838                  * allocated.  If end == LONG_MAX, it will not fail.
6839                  */
6840                 if (chg < 0)
6841                         return chg;
6842         }
6843
6844         spin_lock(&inode->i_lock);
6845         inode->i_blocks -= (blocks_per_huge_page(h) * freed);
6846         spin_unlock(&inode->i_lock);
6847
6848         /*
6849          * If the subpool has a minimum size, the number of global
6850          * reservations to be released may be adjusted.
6851          *
6852          * Note that !resv_map implies freed == 0. So (chg - freed)
6853          * won't go negative.
6854          */
6855         gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
6856         hugetlb_acct_memory(h, -gbl_reserve);
6857
6858         return 0;
6859 }
6860
6861 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
6862 static unsigned long page_table_shareable(struct vm_area_struct *svma,
6863                                 struct vm_area_struct *vma,
6864                                 unsigned long addr, pgoff_t idx)
6865 {
6866         unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
6867                                 svma->vm_start;
6868         unsigned long sbase = saddr & PUD_MASK;
6869         unsigned long s_end = sbase + PUD_SIZE;
6870
6871         /* Allow segments to share if only one is marked locked */
6872         unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED_MASK;
6873         unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED_MASK;
6874
6875         /*
6876          * match the virtual addresses, permission and the alignment of the
6877          * page table page.
6878          *
6879          * Also, vma_lock (vm_private_data) is required for sharing.
6880          */
6881         if (pmd_index(addr) != pmd_index(saddr) ||
6882             vm_flags != svm_flags ||
6883             !range_in_vma(svma, sbase, s_end) ||
6884             !svma->vm_private_data)
6885                 return 0;
6886
6887         return saddr;
6888 }
6889
6890 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
6891 {
6892         unsigned long start = addr & PUD_MASK;
6893         unsigned long end = start + PUD_SIZE;
6894
6895 #ifdef CONFIG_USERFAULTFD
6896         if (uffd_disable_huge_pmd_share(vma))
6897                 return false;
6898 #endif
6899         /*
6900          * check on proper vm_flags and page table alignment
6901          */
6902         if (!(vma->vm_flags & VM_MAYSHARE))
6903                 return false;
6904         if (!vma->vm_private_data)      /* vma lock required for sharing */
6905                 return false;
6906         if (!range_in_vma(vma, start, end))
6907                 return false;
6908         return true;
6909 }
6910
6911 /*
6912  * Determine if start,end range within vma could be mapped by shared pmd.
6913  * If yes, adjust start and end to cover range associated with possible
6914  * shared pmd mappings.
6915  */
6916 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
6917                                 unsigned long *start, unsigned long *end)
6918 {
6919         unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE),
6920                 v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
6921
6922         /*
6923          * vma needs to span at least one aligned PUD size, and the range
6924          * must be at least partially within in.
6925          */
6926         if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) ||
6927                 (*end <= v_start) || (*start >= v_end))
6928                 return;
6929
6930         /* Extend the range to be PUD aligned for a worst case scenario */
6931         if (*start > v_start)
6932                 *start = ALIGN_DOWN(*start, PUD_SIZE);
6933
6934         if (*end < v_end)
6935                 *end = ALIGN(*end, PUD_SIZE);
6936 }
6937
6938 /*
6939  * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
6940  * and returns the corresponding pte. While this is not necessary for the
6941  * !shared pmd case because we can allocate the pmd later as well, it makes the
6942  * code much cleaner. pmd allocation is essential for the shared case because
6943  * pud has to be populated inside the same i_mmap_rwsem section - otherwise
6944  * racing tasks could either miss the sharing (see huge_pte_offset) or select a
6945  * bad pmd for sharing.
6946  */
6947 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
6948                       unsigned long addr, pud_t *pud)
6949 {
6950         struct address_space *mapping = vma->vm_file->f_mapping;
6951         pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
6952                         vma->vm_pgoff;
6953         struct vm_area_struct *svma;
6954         unsigned long saddr;
6955         pte_t *spte = NULL;
6956         pte_t *pte;
6957
6958         i_mmap_lock_read(mapping);
6959         vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
6960                 if (svma == vma)
6961                         continue;
6962
6963                 saddr = page_table_shareable(svma, vma, addr, idx);
6964                 if (saddr) {
6965                         spte = hugetlb_walk(svma, saddr,
6966                                             vma_mmu_pagesize(svma));
6967                         if (spte) {
6968                                 get_page(virt_to_page(spte));
6969                                 break;
6970                         }
6971                 }
6972         }
6973
6974         if (!spte)
6975                 goto out;
6976
6977         spin_lock(&mm->page_table_lock);
6978         if (pud_none(*pud)) {
6979                 pud_populate(mm, pud,
6980                                 (pmd_t *)((unsigned long)spte & PAGE_MASK));
6981                 mm_inc_nr_pmds(mm);
6982         } else {
6983                 put_page(virt_to_page(spte));
6984         }
6985         spin_unlock(&mm->page_table_lock);
6986 out:
6987         pte = (pte_t *)pmd_alloc(mm, pud, addr);
6988         i_mmap_unlock_read(mapping);
6989         return pte;
6990 }
6991
6992 /*
6993  * unmap huge page backed by shared pte.
6994  *
6995  * Hugetlb pte page is ref counted at the time of mapping.  If pte is shared
6996  * indicated by page_count > 1, unmap is achieved by clearing pud and
6997  * decrementing the ref count. If count == 1, the pte page is not shared.
6998  *
6999  * Called with page table lock held.
7000  *
7001  * returns: 1 successfully unmapped a shared pte page
7002  *          0 the underlying pte page is not shared, or it is the last user
7003  */
7004 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
7005                                         unsigned long addr, pte_t *ptep)
7006 {
7007         pgd_t *pgd = pgd_offset(mm, addr);
7008         p4d_t *p4d = p4d_offset(pgd, addr);
7009         pud_t *pud = pud_offset(p4d, addr);
7010
7011         i_mmap_assert_write_locked(vma->vm_file->f_mapping);
7012         hugetlb_vma_assert_locked(vma);
7013         BUG_ON(page_count(virt_to_page(ptep)) == 0);
7014         if (page_count(virt_to_page(ptep)) == 1)
7015                 return 0;
7016
7017         pud_clear(pud);
7018         put_page(virt_to_page(ptep));
7019         mm_dec_nr_pmds(mm);
7020         return 1;
7021 }
7022
7023 #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
7024
7025 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
7026                       unsigned long addr, pud_t *pud)
7027 {
7028         return NULL;
7029 }
7030
7031 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
7032                                 unsigned long addr, pte_t *ptep)
7033 {
7034         return 0;
7035 }
7036
7037 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
7038                                 unsigned long *start, unsigned long *end)
7039 {
7040 }
7041
7042 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
7043 {
7044         return false;
7045 }
7046 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
7047
7048 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
7049 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
7050                         unsigned long addr, unsigned long sz)
7051 {
7052         pgd_t *pgd;
7053         p4d_t *p4d;
7054         pud_t *pud;
7055         pte_t *pte = NULL;
7056
7057         pgd = pgd_offset(mm, addr);
7058         p4d = p4d_alloc(mm, pgd, addr);
7059         if (!p4d)
7060                 return NULL;
7061         pud = pud_alloc(mm, p4d, addr);
7062         if (pud) {
7063                 if (sz == PUD_SIZE) {
7064                         pte = (pte_t *)pud;
7065                 } else {
7066                         BUG_ON(sz != PMD_SIZE);
7067                         if (want_pmd_share(vma, addr) && pud_none(*pud))
7068                                 pte = huge_pmd_share(mm, vma, addr, pud);
7069                         else
7070                                 pte = (pte_t *)pmd_alloc(mm, pud, addr);
7071                 }
7072         }
7073
7074         if (pte) {
7075                 pte_t pteval = ptep_get_lockless(pte);
7076
7077                 BUG_ON(pte_present(pteval) && !pte_huge(pteval));
7078         }
7079
7080         return pte;
7081 }
7082
7083 /*
7084  * huge_pte_offset() - Walk the page table to resolve the hugepage
7085  * entry at address @addr
7086  *
7087  * Return: Pointer to page table entry (PUD or PMD) for
7088  * address @addr, or NULL if a !p*d_present() entry is encountered and the
7089  * size @sz doesn't match the hugepage size at this level of the page
7090  * table.
7091  */
7092 pte_t *huge_pte_offset(struct mm_struct *mm,
7093                        unsigned long addr, unsigned long sz)
7094 {
7095         pgd_t *pgd;
7096         p4d_t *p4d;
7097         pud_t *pud;
7098         pmd_t *pmd;
7099
7100         pgd = pgd_offset(mm, addr);
7101         if (!pgd_present(*pgd))
7102                 return NULL;
7103         p4d = p4d_offset(pgd, addr);
7104         if (!p4d_present(*p4d))
7105                 return NULL;
7106
7107         pud = pud_offset(p4d, addr);
7108         if (sz == PUD_SIZE)
7109                 /* must be pud huge, non-present or none */
7110                 return (pte_t *)pud;
7111         if (!pud_present(*pud))
7112                 return NULL;
7113         /* must have a valid entry and size to go further */
7114
7115         pmd = pmd_offset(pud, addr);
7116         /* must be pmd huge, non-present or none */
7117         return (pte_t *)pmd;
7118 }
7119
7120 /*
7121  * Return a mask that can be used to update an address to the last huge
7122  * page in a page table page mapping size.  Used to skip non-present
7123  * page table entries when linearly scanning address ranges.  Architectures
7124  * with unique huge page to page table relationships can define their own
7125  * version of this routine.
7126  */
7127 unsigned long hugetlb_mask_last_page(struct hstate *h)
7128 {
7129         unsigned long hp_size = huge_page_size(h);
7130
7131         if (hp_size == PUD_SIZE)
7132                 return P4D_SIZE - PUD_SIZE;
7133         else if (hp_size == PMD_SIZE)
7134                 return PUD_SIZE - PMD_SIZE;
7135         else
7136                 return 0UL;
7137 }
7138
7139 #else
7140
7141 /* See description above.  Architectures can provide their own version. */
7142 __weak unsigned long hugetlb_mask_last_page(struct hstate *h)
7143 {
7144 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
7145         if (huge_page_size(h) == PMD_SIZE)
7146                 return PUD_SIZE - PMD_SIZE;
7147 #endif
7148         return 0UL;
7149 }
7150
7151 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
7152
7153 /*
7154  * These functions are overwritable if your architecture needs its own
7155  * behavior.
7156  */
7157 bool isolate_hugetlb(struct folio *folio, struct list_head *list)
7158 {
7159         bool ret = true;
7160
7161         spin_lock_irq(&hugetlb_lock);
7162         if (!folio_test_hugetlb(folio) ||
7163             !folio_test_hugetlb_migratable(folio) ||
7164             !folio_try_get(folio)) {
7165                 ret = false;
7166                 goto unlock;
7167         }
7168         folio_clear_hugetlb_migratable(folio);
7169         list_move_tail(&folio->lru, list);
7170 unlock:
7171         spin_unlock_irq(&hugetlb_lock);
7172         return ret;
7173 }
7174
7175 int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison)
7176 {
7177         int ret = 0;
7178
7179         *hugetlb = false;
7180         spin_lock_irq(&hugetlb_lock);
7181         if (folio_test_hugetlb(folio)) {
7182                 *hugetlb = true;
7183                 if (folio_test_hugetlb_freed(folio))
7184                         ret = 0;
7185                 else if (folio_test_hugetlb_migratable(folio) || unpoison)
7186                         ret = folio_try_get(folio);
7187                 else
7188                         ret = -EBUSY;
7189         }
7190         spin_unlock_irq(&hugetlb_lock);
7191         return ret;
7192 }
7193
7194 int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
7195                                 bool *migratable_cleared)
7196 {
7197         int ret;
7198
7199         spin_lock_irq(&hugetlb_lock);
7200         ret = __get_huge_page_for_hwpoison(pfn, flags, migratable_cleared);
7201         spin_unlock_irq(&hugetlb_lock);
7202         return ret;
7203 }
7204
7205 void folio_putback_active_hugetlb(struct folio *folio)
7206 {
7207         spin_lock_irq(&hugetlb_lock);
7208         folio_set_hugetlb_migratable(folio);
7209         list_move_tail(&folio->lru, &(folio_hstate(folio))->hugepage_activelist);
7210         spin_unlock_irq(&hugetlb_lock);
7211         folio_put(folio);
7212 }
7213
7214 void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason)
7215 {
7216         struct hstate *h = folio_hstate(old_folio);
7217
7218         hugetlb_cgroup_migrate(old_folio, new_folio);
7219         set_page_owner_migrate_reason(&new_folio->page, reason);
7220
7221         /*
7222          * transfer temporary state of the new hugetlb folio. This is
7223          * reverse to other transitions because the newpage is going to
7224          * be final while the old one will be freed so it takes over
7225          * the temporary status.
7226          *
7227          * Also note that we have to transfer the per-node surplus state
7228          * here as well otherwise the global surplus count will not match
7229          * the per-node's.
7230          */
7231         if (folio_test_hugetlb_temporary(new_folio)) {
7232                 int old_nid = folio_nid(old_folio);
7233                 int new_nid = folio_nid(new_folio);
7234
7235                 folio_set_hugetlb_temporary(old_folio);
7236                 folio_clear_hugetlb_temporary(new_folio);
7237
7238
7239                 /*
7240                  * There is no need to transfer the per-node surplus state
7241                  * when we do not cross the node.
7242                  */
7243                 if (new_nid == old_nid)
7244                         return;
7245                 spin_lock_irq(&hugetlb_lock);
7246                 if (h->surplus_huge_pages_node[old_nid]) {
7247                         h->surplus_huge_pages_node[old_nid]--;
7248                         h->surplus_huge_pages_node[new_nid]++;
7249                 }
7250                 spin_unlock_irq(&hugetlb_lock);
7251         }
7252 }
7253
7254 static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
7255                                    unsigned long start,
7256                                    unsigned long end)
7257 {
7258         struct hstate *h = hstate_vma(vma);
7259         unsigned long sz = huge_page_size(h);
7260         struct mm_struct *mm = vma->vm_mm;
7261         struct mmu_notifier_range range;
7262         unsigned long address;
7263         spinlock_t *ptl;
7264         pte_t *ptep;
7265
7266         if (!(vma->vm_flags & VM_MAYSHARE))
7267                 return;
7268
7269         if (start >= end)
7270                 return;
7271
7272         flush_cache_range(vma, start, end);
7273         /*
7274          * No need to call adjust_range_if_pmd_sharing_possible(), because
7275          * we have already done the PUD_SIZE alignment.
7276          */
7277         mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
7278                                 start, end);
7279         mmu_notifier_invalidate_range_start(&range);
7280         hugetlb_vma_lock_write(vma);
7281         i_mmap_lock_write(vma->vm_file->f_mapping);
7282         for (address = start; address < end; address += PUD_SIZE) {
7283                 ptep = hugetlb_walk(vma, address, sz);
7284                 if (!ptep)
7285                         continue;
7286                 ptl = huge_pte_lock(h, mm, ptep);
7287                 huge_pmd_unshare(mm, vma, address, ptep);
7288                 spin_unlock(ptl);
7289         }
7290         flush_hugetlb_tlb_range(vma, start, end);
7291         i_mmap_unlock_write(vma->vm_file->f_mapping);
7292         hugetlb_vma_unlock_write(vma);
7293         /*
7294          * No need to call mmu_notifier_arch_invalidate_secondary_tlbs(), see
7295          * Documentation/mm/mmu_notifier.rst.
7296          */
7297         mmu_notifier_invalidate_range_end(&range);
7298 }
7299
7300 /*
7301  * This function will unconditionally remove all the shared pmd pgtable entries
7302  * within the specific vma for a hugetlbfs memory range.
7303  */
7304 void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
7305 {
7306         hugetlb_unshare_pmds(vma, ALIGN(vma->vm_start, PUD_SIZE),
7307                         ALIGN_DOWN(vma->vm_end, PUD_SIZE));
7308 }
7309
7310 #ifdef CONFIG_CMA
7311 static bool cma_reserve_called __initdata;
7312
7313 static int __init cmdline_parse_hugetlb_cma(char *p)
7314 {
7315         int nid, count = 0;
7316         unsigned long tmp;
7317         char *s = p;
7318
7319         while (*s) {
7320                 if (sscanf(s, "%lu%n", &tmp, &count) != 1)
7321                         break;
7322
7323                 if (s[count] == ':') {
7324                         if (tmp >= MAX_NUMNODES)
7325                                 break;
7326                         nid = array_index_nospec(tmp, MAX_NUMNODES);
7327
7328                         s += count + 1;
7329                         tmp = memparse(s, &s);
7330                         hugetlb_cma_size_in_node[nid] = tmp;
7331                         hugetlb_cma_size += tmp;
7332
7333                         /*
7334                          * Skip the separator if have one, otherwise
7335                          * break the parsing.
7336                          */
7337                         if (*s == ',')
7338                                 s++;
7339                         else
7340                                 break;
7341                 } else {
7342                         hugetlb_cma_size = memparse(p, &p);
7343                         break;
7344                 }
7345         }
7346
7347         return 0;
7348 }
7349
7350 early_param("hugetlb_cma", cmdline_parse_hugetlb_cma);
7351
7352 void __init hugetlb_cma_reserve(int order)
7353 {
7354         unsigned long size, reserved, per_node;
7355         bool node_specific_cma_alloc = false;
7356         int nid;
7357
7358         cma_reserve_called = true;
7359
7360         if (!hugetlb_cma_size)
7361                 return;
7362
7363         for (nid = 0; nid < MAX_NUMNODES; nid++) {
7364                 if (hugetlb_cma_size_in_node[nid] == 0)
7365                         continue;
7366
7367                 if (!node_online(nid)) {
7368                         pr_warn("hugetlb_cma: invalid node %d specified\n", nid);
7369                         hugetlb_cma_size -= hugetlb_cma_size_in_node[nid];
7370                         hugetlb_cma_size_in_node[nid] = 0;
7371                         continue;
7372                 }
7373
7374                 if (hugetlb_cma_size_in_node[nid] < (PAGE_SIZE << order)) {
7375                         pr_warn("hugetlb_cma: cma area of node %d should be at least %lu MiB\n",
7376                                 nid, (PAGE_SIZE << order) / SZ_1M);
7377                         hugetlb_cma_size -= hugetlb_cma_size_in_node[nid];
7378                         hugetlb_cma_size_in_node[nid] = 0;
7379                 } else {
7380                         node_specific_cma_alloc = true;
7381                 }
7382         }
7383
7384         /* Validate the CMA size again in case some invalid nodes specified. */
7385         if (!hugetlb_cma_size)
7386                 return;
7387
7388         if (hugetlb_cma_size < (PAGE_SIZE << order)) {
7389                 pr_warn("hugetlb_cma: cma area should be at least %lu MiB\n",
7390                         (PAGE_SIZE << order) / SZ_1M);
7391                 hugetlb_cma_size = 0;
7392                 return;
7393         }
7394
7395         if (!node_specific_cma_alloc) {
7396                 /*
7397                  * If 3 GB area is requested on a machine with 4 numa nodes,
7398                  * let's allocate 1 GB on first three nodes and ignore the last one.
7399                  */
7400                 per_node = DIV_ROUND_UP(hugetlb_cma_size, nr_online_nodes);
7401                 pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n",
7402                         hugetlb_cma_size / SZ_1M, per_node / SZ_1M);
7403         }
7404
7405         reserved = 0;
7406         for_each_online_node(nid) {
7407                 int res;
7408                 char name[CMA_MAX_NAME];
7409
7410                 if (node_specific_cma_alloc) {
7411                         if (hugetlb_cma_size_in_node[nid] == 0)
7412                                 continue;
7413
7414                         size = hugetlb_cma_size_in_node[nid];
7415                 } else {
7416                         size = min(per_node, hugetlb_cma_size - reserved);
7417                 }
7418
7419                 size = round_up(size, PAGE_SIZE << order);
7420
7421                 snprintf(name, sizeof(name), "hugetlb%d", nid);
7422                 /*
7423                  * Note that 'order per bit' is based on smallest size that
7424                  * may be returned to CMA allocator in the case of
7425                  * huge page demotion.
7426                  */
7427                 res = cma_declare_contiguous_nid(0, size, 0,
7428                                                 PAGE_SIZE << HUGETLB_PAGE_ORDER,
7429                                                  0, false, name,
7430                                                  &hugetlb_cma[nid], nid);
7431                 if (res) {
7432                         pr_warn("hugetlb_cma: reservation failed: err %d, node %d",
7433                                 res, nid);
7434                         continue;
7435                 }
7436
7437                 reserved += size;
7438                 pr_info("hugetlb_cma: reserved %lu MiB on node %d\n",
7439                         size / SZ_1M, nid);
7440
7441                 if (reserved >= hugetlb_cma_size)
7442                         break;
7443         }
7444
7445         if (!reserved)
7446                 /*
7447                  * hugetlb_cma_size is used to determine if allocations from
7448                  * cma are possible.  Set to zero if no cma regions are set up.
7449                  */
7450                 hugetlb_cma_size = 0;
7451 }
7452
7453 static void __init hugetlb_cma_check(void)
7454 {
7455         if (!hugetlb_cma_size || cma_reserve_called)
7456                 return;
7457
7458         pr_warn("hugetlb_cma: the option isn't supported by current arch\n");
7459 }
7460
7461 #endif /* CONFIG_CMA */