1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2002, Linus Torvalds
7 * 11Jan2003 Andrew Morton
11 #include <linux/kernel.h>
12 #include <linux/file.h>
15 #include <linux/pagemap.h>
16 #include <linux/backing-dev.h>
17 #include <linux/fadvise.h>
18 #include <linux/writeback.h>
19 #include <linux/syscalls.h>
20 #include <linux/swap.h>
22 #include <asm/unistd.h>
27 * POSIX_FADV_WILLNEED could set PG_Referenced, and POSIX_FADV_NOREUSE could
28 * deactivate the pages and clear PG_Referenced.
31 int generic_fadvise(struct file *file, loff_t offset, loff_t len, int advice)
34 struct address_space *mapping;
35 struct backing_dev_info *bdi;
36 loff_t endbyte; /* inclusive */
39 unsigned long nrpages;
41 inode = file_inode(file);
42 if (S_ISFIFO(inode->i_mode))
45 mapping = file->f_mapping;
46 if (!mapping || len < 0)
49 bdi = inode_to_bdi(mapping->host);
51 if (IS_DAX(inode) || (bdi == &noop_backing_dev_info)) {
53 case POSIX_FADV_NORMAL:
54 case POSIX_FADV_RANDOM:
55 case POSIX_FADV_SEQUENTIAL:
56 case POSIX_FADV_WILLNEED:
57 case POSIX_FADV_NOREUSE:
58 case POSIX_FADV_DONTNEED:
59 /* no bad return value, but ignore advice */
68 * Careful about overflows. Len == 0 means "as much as possible". Use
69 * unsigned math because signed overflows are undefined and UBSan
72 endbyte = (u64)offset + (u64)len;
73 if (!len || endbyte < len)
76 endbyte--; /* inclusive */
79 case POSIX_FADV_NORMAL:
80 file->f_ra.ra_pages = bdi->ra_pages;
81 spin_lock(&file->f_lock);
82 file->f_mode &= ~(FMODE_RANDOM | FMODE_NOREUSE);
83 spin_unlock(&file->f_lock);
85 case POSIX_FADV_RANDOM:
86 spin_lock(&file->f_lock);
87 file->f_mode |= FMODE_RANDOM;
88 spin_unlock(&file->f_lock);
90 case POSIX_FADV_SEQUENTIAL:
91 file->f_ra.ra_pages = bdi->ra_pages * 2;
92 spin_lock(&file->f_lock);
93 file->f_mode &= ~FMODE_RANDOM;
94 spin_unlock(&file->f_lock);
96 case POSIX_FADV_WILLNEED:
97 /* First and last PARTIAL page! */
98 start_index = offset >> PAGE_SHIFT;
99 end_index = endbyte >> PAGE_SHIFT;
101 /* Careful about overflow on the "+1" */
102 nrpages = end_index - start_index + 1;
106 force_page_cache_readahead(mapping, file, start_index, nrpages);
108 case POSIX_FADV_NOREUSE:
109 spin_lock(&file->f_lock);
110 file->f_mode |= FMODE_NOREUSE;
111 spin_unlock(&file->f_lock);
113 case POSIX_FADV_DONTNEED:
114 __filemap_fdatawrite_range(mapping, offset, endbyte,
118 * First and last FULL page! Partial pages are deliberately
119 * preserved on the expectation that it is better to preserve
120 * needed memory than to discard unneeded memory.
122 start_index = (offset+(PAGE_SIZE-1)) >> PAGE_SHIFT;
123 end_index = (endbyte >> PAGE_SHIFT);
125 * The page at end_index will be inclusively discarded according
126 * by invalidate_mapping_pages(), so subtracting 1 from
127 * end_index means we will skip the last page. But if endbyte
128 * is page aligned or is at the end of file, we should not skip
129 * that page - discarding the last page is safe enough.
131 if ((endbyte & ~PAGE_MASK) != ~PAGE_MASK &&
132 endbyte != inode->i_size - 1) {
133 /* First page is tricky as 0 - 1 = -1, but pgoff_t
134 * is unsigned, so the end_index >= start_index
135 * check below would be true and we'll discard the whole
136 * file cache which is not what was asked.
144 if (end_index >= start_index) {
145 unsigned long nr_failed = 0;
148 * It's common to FADV_DONTNEED right after
149 * the read or write that instantiates the
150 * pages, in which case there will be some
151 * sitting on the local LRU cache. Try to
152 * avoid the expensive remote drain and the
153 * second cache tree walk below by flushing
154 * them out right away.
158 mapping_try_invalidate(mapping, start_index, end_index,
162 * The failures may be due to the folio being
163 * in the LRU cache of a remote CPU. Drain all
164 * caches and try again.
168 invalidate_mapping_pages(mapping, start_index,
178 EXPORT_SYMBOL(generic_fadvise);
180 int vfs_fadvise(struct file *file, loff_t offset, loff_t len, int advice)
182 if (file->f_op->fadvise)
183 return file->f_op->fadvise(file, offset, len, advice);
185 return generic_fadvise(file, offset, len, advice);
187 EXPORT_SYMBOL(vfs_fadvise);
189 #ifdef CONFIG_ADVISE_SYSCALLS
191 int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
193 struct fd f = fdget(fd);
199 ret = vfs_fadvise(f.file, offset, len, advice);
205 SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice)
207 return ksys_fadvise64_64(fd, offset, len, advice);
210 #ifdef __ARCH_WANT_SYS_FADVISE64
212 SYSCALL_DEFINE4(fadvise64, int, fd, loff_t, offset, size_t, len, int, advice)
214 return ksys_fadvise64_64(fd, offset, len, advice);
219 #if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_FADVISE64_64)
221 COMPAT_SYSCALL_DEFINE6(fadvise64_64, int, fd, compat_arg_u64_dual(offset),
222 compat_arg_u64_dual(len), int, advice)
224 return ksys_fadvise64_64(fd, compat_arg_u64_glue(offset),
225 compat_arg_u64_glue(len), advice);