1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 1994-2006 Linus Torvalds
9 * The mincore() system call.
11 #include <linux/pagemap.h>
12 #include <linux/gfp.h>
13 #include <linux/pagewalk.h>
14 #include <linux/mman.h>
15 #include <linux/syscalls.h>
16 #include <linux/swap.h>
17 #include <linux/swapops.h>
18 #include <linux/shmem_fs.h>
19 #include <linux/hugetlb.h>
20 #include <linux/pgtable.h>
22 #include <linux/uaccess.h>
25 static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr,
26 unsigned long end, struct mm_walk *walk)
28 #ifdef CONFIG_HUGETLB_PAGE
29 unsigned char present;
30 unsigned char *vec = walk->private;
33 * Hugepages under user process are always in RAM and never
34 * swapped out, but theoretically it needs to be checked.
36 present = pte && !huge_pte_none(huge_ptep_get(pte));
37 for (; addr != end; vec++, addr += PAGE_SIZE)
47 * Later we can get more picky about what "in core" means precisely.
48 * For now, simply check to see if the page is in the page cache,
49 * and is up to date; i.e. that no page-in operation would be required
50 * at this time if an application were to map and access this page.
52 static unsigned char mincore_page(struct address_space *mapping, pgoff_t index)
54 unsigned char present = 0;
58 * When tmpfs swaps out a page from a file, any process mapping that
59 * file will not get a swp_entry_t in its pte, but rather it is like
60 * any other file mapping (ie. marked !present and faulted in with
61 * tmpfs's .fault). So swapped out tmpfs mappings are tested here.
63 folio = filemap_get_incore_folio(mapping, index);
65 present = folio_test_uptodate(folio);
72 static int __mincore_unmapped_range(unsigned long addr, unsigned long end,
73 struct vm_area_struct *vma, unsigned char *vec)
75 unsigned long nr = (end - addr) >> PAGE_SHIFT;
81 pgoff = linear_page_index(vma, addr);
82 for (i = 0; i < nr; i++, pgoff++)
83 vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff);
85 for (i = 0; i < nr; i++)
91 static int mincore_unmapped_range(unsigned long addr, unsigned long end,
92 __always_unused int depth,
95 walk->private += __mincore_unmapped_range(addr, end,
96 walk->vma, walk->private);
100 static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
101 struct mm_walk *walk)
104 struct vm_area_struct *vma = walk->vma;
106 unsigned char *vec = walk->private;
107 int nr = (end - addr) >> PAGE_SHIFT;
109 ptl = pmd_trans_huge_lock(pmd, vma);
116 if (pmd_trans_unstable(pmd)) {
117 __mincore_unmapped_range(addr, end, vma, vec);
121 ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
122 for (; addr != end; ptep++, addr += PAGE_SIZE) {
125 /* We need to do cache lookup too for pte markers */
126 if (pte_none_mostly(pte))
127 __mincore_unmapped_range(addr, addr + PAGE_SIZE,
129 else if (pte_present(pte))
131 else { /* pte is a swap entry */
132 swp_entry_t entry = pte_to_swp_entry(pte);
134 if (non_swap_entry(entry)) {
136 * migration or hwpoison entries are always
142 *vec = mincore_page(swap_address_space(entry),
152 pte_unmap_unlock(ptep - 1, ptl);
159 static inline bool can_do_mincore(struct vm_area_struct *vma)
161 if (vma_is_anonymous(vma))
166 * Reveal pagecache information only for non-anonymous mappings that
167 * correspond to the files the calling process could (if tried) open
168 * for writing; otherwise we'd be including shared non-exclusive
169 * mappings, which opens a side channel.
171 return inode_owner_or_capable(&nop_mnt_idmap,
172 file_inode(vma->vm_file)) ||
173 file_permission(vma->vm_file, MAY_WRITE) == 0;
176 static const struct mm_walk_ops mincore_walk_ops = {
177 .pmd_entry = mincore_pte_range,
178 .pte_hole = mincore_unmapped_range,
179 .hugetlb_entry = mincore_hugetlb,
183 * Do a chunk of "sys_mincore()". We've already checked
184 * all the arguments, we hold the mmap semaphore: we should
185 * just return the amount of info we're asked for.
187 static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *vec)
189 struct vm_area_struct *vma;
193 vma = vma_lookup(current->mm, addr);
196 end = min(vma->vm_end, addr + (pages << PAGE_SHIFT));
197 if (!can_do_mincore(vma)) {
198 unsigned long pages = DIV_ROUND_UP(end - addr, PAGE_SIZE);
199 memset(vec, 1, pages);
202 err = walk_page_range(vma->vm_mm, addr, end, &mincore_walk_ops, vec);
205 return (end - addr) >> PAGE_SHIFT;
209 * The mincore(2) system call.
211 * mincore() returns the memory residency status of the pages in the
212 * current process's address space specified by [addr, addr + len).
213 * The status is returned in a vector of bytes. The least significant
214 * bit of each byte is 1 if the referenced page is in memory, otherwise
217 * Because the status of a page can change after mincore() checks it
218 * but before it returns to the application, the returned vector may
219 * contain stale information. Only locked pages are guaranteed to
224 * -EFAULT - vec points to an illegal address
225 * -EINVAL - addr is not a multiple of PAGE_SIZE
226 * -ENOMEM - Addresses in the range [addr, addr + len] are
227 * invalid for the address space of this process, or
228 * specify one or more pages which are not currently
230 * -EAGAIN - A kernel resource was temporarily unavailable.
232 SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len,
233 unsigned char __user *, vec)
239 start = untagged_addr(start);
241 /* Check the start address: needs to be page-aligned.. */
242 if (start & ~PAGE_MASK)
245 /* ..and we need to be passed a valid user-space range */
246 if (!access_ok((void __user *) start, len))
249 /* This also avoids any overflows on PAGE_ALIGN */
250 pages = len >> PAGE_SHIFT;
251 pages += (offset_in_page(len)) != 0;
253 if (!access_ok(vec, pages))
256 tmp = (void *) __get_free_page(GFP_USER);
263 * Do at most PAGE_SIZE entries per iteration, due to
264 * the temporary buffer size.
266 mmap_read_lock(current->mm);
267 retval = do_mincore(start, min(pages, PAGE_SIZE), tmp);
268 mmap_read_unlock(current->mm);
272 if (copy_to_user(vec, tmp, retval)) {
278 start += retval << PAGE_SHIFT;
281 free_page((unsigned long) tmp);