1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright 2013 Red Hat Inc.
5 * Authors: Jérôme Glisse <jglisse@redhat.com>
8 * Refer to include/linux/hmm.h for information about heterogeneous memory
9 * management or HMM for short.
11 #include <linux/pagewalk.h>
12 #include <linux/hmm.h>
13 #include <linux/init.h>
14 #include <linux/rmap.h>
15 #include <linux/swap.h>
16 #include <linux/slab.h>
17 #include <linux/sched.h>
18 #include <linux/mmzone.h>
19 #include <linux/pagemap.h>
20 #include <linux/swapops.h>
21 #include <linux/hugetlb.h>
22 #include <linux/memremap.h>
23 #include <linux/sched/mm.h>
24 #include <linux/jump_label.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/mmu_notifier.h>
27 #include <linux/memory_hotplug.h>
32 struct hmm_range *range;
37 HMM_NEED_FAULT = 1 << 0,
38 HMM_NEED_WRITE_FAULT = 1 << 1,
39 HMM_NEED_ALL_BITS = HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT,
42 static int hmm_pfns_fill(unsigned long addr, unsigned long end,
43 struct hmm_range *range, unsigned long cpu_flags)
45 unsigned long i = (addr - range->start) >> PAGE_SHIFT;
47 for (; addr < end; addr += PAGE_SIZE, i++)
48 range->hmm_pfns[i] = cpu_flags;
53 * hmm_vma_fault() - fault in a range lacking valid pmd or pte(s)
54 * @addr: range virtual start address (inclusive)
55 * @end: range virtual end address (exclusive)
56 * @required_fault: HMM_NEED_* flags
57 * @walk: mm_walk structure
58 * Return: -EBUSY after page fault, or page fault error
60 * This function will be called whenever pmd_none() or pte_none() returns true,
61 * or whenever there is no page directory covering the virtual address range.
63 static int hmm_vma_fault(unsigned long addr, unsigned long end,
64 unsigned int required_fault, struct mm_walk *walk)
66 struct hmm_vma_walk *hmm_vma_walk = walk->private;
67 struct vm_area_struct *vma = walk->vma;
68 unsigned int fault_flags = FAULT_FLAG_REMOTE;
70 WARN_ON_ONCE(!required_fault);
71 hmm_vma_walk->last = addr;
73 if (required_fault & HMM_NEED_WRITE_FAULT) {
74 if (!(vma->vm_flags & VM_WRITE))
76 fault_flags |= FAULT_FLAG_WRITE;
79 for (; addr < end; addr += PAGE_SIZE)
80 if (handle_mm_fault(vma, addr, fault_flags, NULL) &
86 static unsigned int hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
87 unsigned long pfn_req_flags,
88 unsigned long cpu_flags)
90 struct hmm_range *range = hmm_vma_walk->range;
93 * So we not only consider the individual per page request we also
94 * consider the default flags requested for the range. The API can
95 * be used 2 ways. The first one where the HMM user coalesces
96 * multiple page faults into one request and sets flags per pfn for
97 * those faults. The second one where the HMM user wants to pre-
98 * fault a range with specific flags. For the latter one it is a
99 * waste to have the user pre-fill the pfn arrays with a default
102 pfn_req_flags &= range->pfn_flags_mask;
103 pfn_req_flags |= range->default_flags;
105 /* We aren't ask to do anything ... */
106 if (!(pfn_req_flags & HMM_PFN_REQ_FAULT))
109 /* Need to write fault ? */
110 if ((pfn_req_flags & HMM_PFN_REQ_WRITE) &&
111 !(cpu_flags & HMM_PFN_WRITE))
112 return HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT;
114 /* If CPU page table is not valid then we need to fault */
115 if (!(cpu_flags & HMM_PFN_VALID))
116 return HMM_NEED_FAULT;
121 hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
122 const unsigned long hmm_pfns[], unsigned long npages,
123 unsigned long cpu_flags)
125 struct hmm_range *range = hmm_vma_walk->range;
126 unsigned int required_fault = 0;
130 * If the default flags do not request to fault pages, and the mask does
131 * not allow for individual pages to be faulted, then
132 * hmm_pte_need_fault() will always return 0.
134 if (!((range->default_flags | range->pfn_flags_mask) &
138 for (i = 0; i < npages; ++i) {
139 required_fault |= hmm_pte_need_fault(hmm_vma_walk, hmm_pfns[i],
141 if (required_fault == HMM_NEED_ALL_BITS)
142 return required_fault;
144 return required_fault;
147 static int hmm_vma_walk_hole(unsigned long addr, unsigned long end,
148 __always_unused int depth, struct mm_walk *walk)
150 struct hmm_vma_walk *hmm_vma_walk = walk->private;
151 struct hmm_range *range = hmm_vma_walk->range;
152 unsigned int required_fault;
153 unsigned long i, npages;
154 unsigned long *hmm_pfns;
156 i = (addr - range->start) >> PAGE_SHIFT;
157 npages = (end - addr) >> PAGE_SHIFT;
158 hmm_pfns = &range->hmm_pfns[i];
160 hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0);
164 return hmm_pfns_fill(addr, end, range, HMM_PFN_ERROR);
167 return hmm_vma_fault(addr, end, required_fault, walk);
168 return hmm_pfns_fill(addr, end, range, 0);
171 static inline unsigned long hmm_pfn_flags_order(unsigned long order)
173 return order << HMM_PFN_ORDER_SHIFT;
176 static inline unsigned long pmd_to_hmm_pfn_flags(struct hmm_range *range,
179 if (pmd_protnone(pmd))
181 return (pmd_write(pmd) ? (HMM_PFN_VALID | HMM_PFN_WRITE) :
183 hmm_pfn_flags_order(PMD_SHIFT - PAGE_SHIFT);
186 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
187 static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
188 unsigned long end, unsigned long hmm_pfns[],
191 struct hmm_vma_walk *hmm_vma_walk = walk->private;
192 struct hmm_range *range = hmm_vma_walk->range;
193 unsigned long pfn, npages, i;
194 unsigned int required_fault;
195 unsigned long cpu_flags;
197 npages = (end - addr) >> PAGE_SHIFT;
198 cpu_flags = pmd_to_hmm_pfn_flags(range, pmd);
200 hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, cpu_flags);
202 return hmm_vma_fault(addr, end, required_fault, walk);
204 pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
205 for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++)
206 hmm_pfns[i] = pfn | cpu_flags;
209 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
210 /* stub to allow the code below to compile */
211 int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
212 unsigned long end, unsigned long hmm_pfns[], pmd_t pmd);
213 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
215 static inline bool hmm_is_device_private_entry(struct hmm_range *range,
218 return is_device_private_entry(entry) &&
219 pfn_swap_entry_to_page(entry)->pgmap->owner ==
220 range->dev_private_owner;
223 static inline unsigned long pte_to_hmm_pfn_flags(struct hmm_range *range,
226 if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte))
228 return pte_write(pte) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : HMM_PFN_VALID;
231 static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
232 unsigned long end, pmd_t *pmdp, pte_t *ptep,
233 unsigned long *hmm_pfn)
235 struct hmm_vma_walk *hmm_vma_walk = walk->private;
236 struct hmm_range *range = hmm_vma_walk->range;
237 unsigned int required_fault;
238 unsigned long cpu_flags;
240 uint64_t pfn_req_flags = *hmm_pfn;
244 hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0);
251 if (!pte_present(pte)) {
252 swp_entry_t entry = pte_to_swp_entry(pte);
255 * Never fault in device private pages, but just report
256 * the PFN even if not present.
258 if (hmm_is_device_private_entry(range, entry)) {
259 cpu_flags = HMM_PFN_VALID;
260 if (is_writable_device_private_entry(entry))
261 cpu_flags |= HMM_PFN_WRITE;
262 *hmm_pfn = swp_offset(entry) | cpu_flags;
267 hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0);
268 if (!required_fault) {
273 if (!non_swap_entry(entry))
276 if (is_device_exclusive_entry(entry))
279 if (is_migration_entry(entry)) {
281 hmm_vma_walk->last = addr;
282 migration_entry_wait(walk->mm, pmdp, addr);
286 /* Report error for everything else */
291 cpu_flags = pte_to_hmm_pfn_flags(range, pte);
293 hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags);
298 * Bypass devmap pte such as DAX page when all pfn requested
299 * flags(pfn_req_flags) are fulfilled.
300 * Since each architecture defines a struct page for the zero page, just
301 * fall through and treat it like a normal page.
303 if (!vm_normal_page(walk->vma, addr, pte) &&
305 !is_zero_pfn(pte_pfn(pte))) {
306 if (hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0)) {
310 *hmm_pfn = HMM_PFN_ERROR;
314 *hmm_pfn = pte_pfn(pte) | cpu_flags;
319 /* Fault any virtual address we were asked to fault */
320 return hmm_vma_fault(addr, end, required_fault, walk);
323 static int hmm_vma_walk_pmd(pmd_t *pmdp,
326 struct mm_walk *walk)
328 struct hmm_vma_walk *hmm_vma_walk = walk->private;
329 struct hmm_range *range = hmm_vma_walk->range;
330 unsigned long *hmm_pfns =
331 &range->hmm_pfns[(start - range->start) >> PAGE_SHIFT];
332 unsigned long npages = (end - start) >> PAGE_SHIFT;
333 unsigned long addr = start;
338 pmd = READ_ONCE(*pmdp);
340 return hmm_vma_walk_hole(start, end, -1, walk);
342 if (thp_migration_supported() && is_pmd_migration_entry(pmd)) {
343 if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0)) {
344 hmm_vma_walk->last = addr;
345 pmd_migration_entry_wait(walk->mm, pmdp);
348 return hmm_pfns_fill(start, end, range, 0);
351 if (!pmd_present(pmd)) {
352 if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0))
354 return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
357 if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
359 * No need to take pmd_lock here, even if some other thread
360 * is splitting the huge pmd we will get that event through
361 * mmu_notifier callback.
363 * So just read pmd value and check again it's a transparent
364 * huge or device mapping one and compute corresponding pfn
367 pmd = pmd_read_atomic(pmdp);
369 if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
372 return hmm_vma_handle_pmd(walk, addr, end, hmm_pfns, pmd);
376 * We have handled all the valid cases above ie either none, migration,
377 * huge or transparent huge. At this point either it is a valid pmd
378 * entry pointing to pte directory or it is a bad pmd that will not
382 if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0))
384 return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
387 ptep = pte_offset_map(pmdp, addr);
388 for (; addr < end; addr += PAGE_SIZE, ptep++, hmm_pfns++) {
391 r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, hmm_pfns);
393 /* hmm_vma_handle_pte() did pte_unmap() */
401 #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && \
402 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
403 static inline unsigned long pud_to_hmm_pfn_flags(struct hmm_range *range,
406 if (!pud_present(pud))
408 return (pud_write(pud) ? (HMM_PFN_VALID | HMM_PFN_WRITE) :
410 hmm_pfn_flags_order(PUD_SHIFT - PAGE_SHIFT);
413 static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end,
414 struct mm_walk *walk)
416 struct hmm_vma_walk *hmm_vma_walk = walk->private;
417 struct hmm_range *range = hmm_vma_walk->range;
418 unsigned long addr = start;
421 spinlock_t *ptl = pud_trans_huge_lock(pudp, walk->vma);
426 /* Normally we don't want to split the huge page */
427 walk->action = ACTION_CONTINUE;
429 pud = READ_ONCE(*pudp);
432 return hmm_vma_walk_hole(start, end, -1, walk);
435 if (pud_huge(pud) && pud_devmap(pud)) {
436 unsigned long i, npages, pfn;
437 unsigned int required_fault;
438 unsigned long *hmm_pfns;
439 unsigned long cpu_flags;
441 if (!pud_present(pud)) {
443 return hmm_vma_walk_hole(start, end, -1, walk);
446 i = (addr - range->start) >> PAGE_SHIFT;
447 npages = (end - addr) >> PAGE_SHIFT;
448 hmm_pfns = &range->hmm_pfns[i];
450 cpu_flags = pud_to_hmm_pfn_flags(range, pud);
451 required_fault = hmm_range_need_fault(hmm_vma_walk, hmm_pfns,
453 if (required_fault) {
455 return hmm_vma_fault(addr, end, required_fault, walk);
458 pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
459 for (i = 0; i < npages; ++i, ++pfn)
460 hmm_pfns[i] = pfn | cpu_flags;
464 /* Ask for the PUD to be split */
465 walk->action = ACTION_SUBTREE;
472 #define hmm_vma_walk_pud NULL
475 #ifdef CONFIG_HUGETLB_PAGE
476 static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
477 unsigned long start, unsigned long end,
478 struct mm_walk *walk)
480 unsigned long addr = start, i, pfn;
481 struct hmm_vma_walk *hmm_vma_walk = walk->private;
482 struct hmm_range *range = hmm_vma_walk->range;
483 struct vm_area_struct *vma = walk->vma;
484 unsigned int required_fault;
485 unsigned long pfn_req_flags;
486 unsigned long cpu_flags;
490 ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
491 entry = huge_ptep_get(pte);
493 i = (start - range->start) >> PAGE_SHIFT;
494 pfn_req_flags = range->hmm_pfns[i];
495 cpu_flags = pte_to_hmm_pfn_flags(range, entry) |
496 hmm_pfn_flags_order(huge_page_order(hstate_vma(vma)));
498 hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags);
499 if (required_fault) {
501 return hmm_vma_fault(addr, end, required_fault, walk);
504 pfn = pte_pfn(entry) + ((start & ~hmask) >> PAGE_SHIFT);
505 for (; addr < end; addr += PAGE_SIZE, i++, pfn++)
506 range->hmm_pfns[i] = pfn | cpu_flags;
512 #define hmm_vma_walk_hugetlb_entry NULL
513 #endif /* CONFIG_HUGETLB_PAGE */
515 static int hmm_vma_walk_test(unsigned long start, unsigned long end,
516 struct mm_walk *walk)
518 struct hmm_vma_walk *hmm_vma_walk = walk->private;
519 struct hmm_range *range = hmm_vma_walk->range;
520 struct vm_area_struct *vma = walk->vma;
522 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)) &&
523 vma->vm_flags & VM_READ)
527 * vma ranges that don't have struct page backing them or map I/O
528 * devices directly cannot be handled by hmm_range_fault().
530 * If the vma does not allow read access, then assume that it does not
531 * allow write access either. HMM does not support architectures that
532 * allow write without read.
534 * If a fault is requested for an unsupported range then it is a hard
537 if (hmm_range_need_fault(hmm_vma_walk,
539 ((start - range->start) >> PAGE_SHIFT),
540 (end - start) >> PAGE_SHIFT, 0))
543 hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
545 /* Skip this vma and continue processing the next vma. */
549 static const struct mm_walk_ops hmm_walk_ops = {
550 .pud_entry = hmm_vma_walk_pud,
551 .pmd_entry = hmm_vma_walk_pmd,
552 .pte_hole = hmm_vma_walk_hole,
553 .hugetlb_entry = hmm_vma_walk_hugetlb_entry,
554 .test_walk = hmm_vma_walk_test,
558 * hmm_range_fault - try to fault some address in a virtual address range
559 * @range: argument structure
561 * Returns 0 on success or one of the following error codes:
563 * -EINVAL: Invalid arguments or mm or virtual address is in an invalid vma
564 * (e.g., device file vma).
565 * -ENOMEM: Out of memory.
566 * -EPERM: Invalid permission (e.g., asking for write and range is read
568 * -EBUSY: The range has been invalidated and the caller needs to wait for
569 * the invalidation to finish.
570 * -EFAULT: A page was requested to be valid and could not be made valid
571 * ie it has no backing VMA or it is illegal to access
573 * This is similar to get_user_pages(), except that it can read the page tables
574 * without mutating them (ie causing faults).
576 int hmm_range_fault(struct hmm_range *range)
578 struct hmm_vma_walk hmm_vma_walk = {
580 .last = range->start,
582 struct mm_struct *mm = range->notifier->mm;
585 mmap_assert_locked(mm);
588 /* If range is no longer valid force retry. */
589 if (mmu_interval_check_retry(range->notifier,
590 range->notifier_seq))
592 ret = walk_page_range(mm, hmm_vma_walk.last, range->end,
593 &hmm_walk_ops, &hmm_vma_walk);
595 * When -EBUSY is returned the loop restarts with
596 * hmm_vma_walk.last set to an address that has not been stored
597 * in pfns. All entries < last in the pfn array are set to their
598 * output, and all >= are still at their input values.
600 } while (ret == -EBUSY);
603 EXPORT_SYMBOL(hmm_range_fault);