1 // SPDX-License-Identifier: GPL-2.0
5 * Basic functional testing of madvise MADV_DONTNEED and MADV_REMOVE
8 * Before running this test, make sure the administrator has pre-allocated
9 * at least MIN_FREE_PAGES hugetlb pages and they are free. In addition,
10 * the test takes an argument that is the path to a file in a hugetlbfs
11 * filesystem. Therefore, a hugetlbfs filesystem must be mounted on some
22 #define USAGE "USAGE: %s <hugepagefile_name>\n"
23 #define MIN_FREE_PAGES 20
24 #define NR_HUGE_PAGES 10 /* common number of pages to map/allocate */
26 #define validate_free_pages(exp_free) \
28 int fhp = get_free_hugepages(); \
29 if (fhp != (exp_free)) { \
30 printf("Unexpected number of free huge " \
31 "pages line %d\n", __LINE__); \
36 unsigned long huge_page_size;
37 unsigned long base_page_size;
40 * default_huge_page_size copied from mlock2-tests.c
42 unsigned long default_huge_page_size(void)
44 unsigned long hps = 0;
47 FILE *f = fopen("/proc/meminfo", "r");
51 while (getline(&line, &linelen, f) > 0) {
52 if (sscanf(line, "Hugepagesize: %lu kB", &hps) == 1) {
63 unsigned long get_free_hugepages(void)
65 unsigned long fhp = 0;
68 FILE *f = fopen("/proc/meminfo", "r");
72 while (getline(&line, &linelen, f) > 0) {
73 if (sscanf(line, "HugePages_Free: %lu", &fhp) == 1)
82 void write_fault_pages(void *addr, unsigned long nr_pages)
86 for (i = 0; i < nr_pages; i++)
87 *((unsigned long *)(addr + (i * huge_page_size))) = i;
90 void read_fault_pages(void *addr, unsigned long nr_pages)
92 unsigned long dummy = 0;
95 for (i = 0; i < nr_pages; i++)
96 dummy += *((unsigned long *)(addr + (i * huge_page_size)));
99 int main(int argc, char **argv)
101 unsigned long free_hugepages;
107 printf(USAGE, argv[0]);
111 huge_page_size = default_huge_page_size();
112 if (!huge_page_size) {
113 printf("Unable to determine huge page size, exiting!\n");
116 base_page_size = sysconf(_SC_PAGE_SIZE);
117 if (!huge_page_size) {
118 printf("Unable to determine base page size, exiting!\n");
122 free_hugepages = get_free_hugepages();
123 if (free_hugepages < MIN_FREE_PAGES) {
124 printf("Not enough free huge pages to test, exiting!\n");
128 fd = open(argv[1], O_CREAT | O_RDWR, 0755);
130 perror("Open failed");
135 * Test validity of MADV_DONTNEED addr and length arguments. mmap
136 * size is NR_HUGE_PAGES + 2. One page at the beginning and end of
137 * the mapping will be unmapped so we KNOW there is nothing mapped
140 addr = mmap(NULL, (NR_HUGE_PAGES + 2) * huge_page_size,
141 PROT_READ | PROT_WRITE,
142 MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB,
144 if (addr == MAP_FAILED) {
148 if (munmap(addr, huge_page_size) ||
149 munmap(addr + (NR_HUGE_PAGES + 1) * huge_page_size,
154 addr = addr + huge_page_size;
156 write_fault_pages(addr, NR_HUGE_PAGES);
157 validate_free_pages(free_hugepages - NR_HUGE_PAGES);
159 /* addr before mapping should fail */
160 ret = madvise(addr - base_page_size, NR_HUGE_PAGES * huge_page_size,
163 printf("Unexpected success of madvise call with invalid addr line %d\n",
168 /* addr + length after mapping should fail */
169 ret = madvise(addr, (NR_HUGE_PAGES * huge_page_size) + base_page_size,
172 printf("Unexpected success of madvise call with invalid length line %d\n",
177 (void)munmap(addr, NR_HUGE_PAGES * huge_page_size);
180 * Test alignment of MADV_DONTNEED addr and length arguments
182 addr = mmap(NULL, NR_HUGE_PAGES * huge_page_size,
183 PROT_READ | PROT_WRITE,
184 MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB,
186 if (addr == MAP_FAILED) {
190 write_fault_pages(addr, NR_HUGE_PAGES);
191 validate_free_pages(free_hugepages - NR_HUGE_PAGES);
193 /* addr is not huge page size aligned and should fail */
194 ret = madvise(addr + base_page_size,
195 NR_HUGE_PAGES * huge_page_size - base_page_size,
198 printf("Unexpected success of madvise call with unaligned start address %d\n",
203 /* addr + length should be aligned up to huge page size */
205 ((NR_HUGE_PAGES - 1) * huge_page_size) + base_page_size,
211 /* should free all pages in mapping */
212 validate_free_pages(free_hugepages);
214 (void)munmap(addr, NR_HUGE_PAGES * huge_page_size);
217 * Test MADV_DONTNEED on anonymous private mapping
219 addr = mmap(NULL, NR_HUGE_PAGES * huge_page_size,
220 PROT_READ | PROT_WRITE,
221 MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB,
223 if (addr == MAP_FAILED) {
227 write_fault_pages(addr, NR_HUGE_PAGES);
228 validate_free_pages(free_hugepages - NR_HUGE_PAGES);
230 if (madvise(addr, NR_HUGE_PAGES * huge_page_size, MADV_DONTNEED)) {
235 /* should free all pages in mapping */
236 validate_free_pages(free_hugepages);
238 (void)munmap(addr, NR_HUGE_PAGES * huge_page_size);
241 * Test MADV_DONTNEED on private mapping of hugetlb file
243 if (fallocate(fd, 0, 0, NR_HUGE_PAGES * huge_page_size)) {
247 validate_free_pages(free_hugepages - NR_HUGE_PAGES);
249 addr = mmap(NULL, NR_HUGE_PAGES * huge_page_size,
250 PROT_READ | PROT_WRITE,
252 if (addr == MAP_FAILED) {
257 /* read should not consume any pages */
258 read_fault_pages(addr, NR_HUGE_PAGES);
259 validate_free_pages(free_hugepages - NR_HUGE_PAGES);
261 /* madvise should not free any pages */
262 if (madvise(addr, NR_HUGE_PAGES * huge_page_size, MADV_DONTNEED)) {
266 validate_free_pages(free_hugepages - NR_HUGE_PAGES);
268 /* writes should allocate private pages */
269 write_fault_pages(addr, NR_HUGE_PAGES);
270 validate_free_pages(free_hugepages - (2 * NR_HUGE_PAGES));
272 /* madvise should free private pages */
273 if (madvise(addr, NR_HUGE_PAGES * huge_page_size, MADV_DONTNEED)) {
277 validate_free_pages(free_hugepages - NR_HUGE_PAGES);
279 /* writes should allocate private pages */
280 write_fault_pages(addr, NR_HUGE_PAGES);
281 validate_free_pages(free_hugepages - (2 * NR_HUGE_PAGES));
284 * The fallocate below certainly should free the pages associated
285 * with the file. However, pages in the private mapping are also
286 * freed. This is not the 'correct' behavior, but is expected
287 * because this is how it has worked since the initial hugetlb
290 if (fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
291 0, NR_HUGE_PAGES * huge_page_size)) {
295 validate_free_pages(free_hugepages);
297 (void)munmap(addr, NR_HUGE_PAGES * huge_page_size);
300 * Test MADV_DONTNEED on shared mapping of hugetlb file
302 if (fallocate(fd, 0, 0, NR_HUGE_PAGES * huge_page_size)) {
306 validate_free_pages(free_hugepages - NR_HUGE_PAGES);
308 addr = mmap(NULL, NR_HUGE_PAGES * huge_page_size,
309 PROT_READ | PROT_WRITE,
311 if (addr == MAP_FAILED) {
316 /* write should not consume any pages */
317 write_fault_pages(addr, NR_HUGE_PAGES);
318 validate_free_pages(free_hugepages - NR_HUGE_PAGES);
320 /* madvise should not free any pages */
321 if (madvise(addr, NR_HUGE_PAGES * huge_page_size, MADV_DONTNEED)) {
325 validate_free_pages(free_hugepages - NR_HUGE_PAGES);
328 * Test MADV_REMOVE on shared mapping of hugetlb file
330 * madvise is same as hole punch and should free all pages.
332 if (madvise(addr, NR_HUGE_PAGES * huge_page_size, MADV_REMOVE)) {
336 validate_free_pages(free_hugepages);
337 (void)munmap(addr, NR_HUGE_PAGES * huge_page_size);
340 * Test MADV_REMOVE on shared and private mapping of hugetlb file
342 if (fallocate(fd, 0, 0, NR_HUGE_PAGES * huge_page_size)) {
346 validate_free_pages(free_hugepages - NR_HUGE_PAGES);
348 addr = mmap(NULL, NR_HUGE_PAGES * huge_page_size,
349 PROT_READ | PROT_WRITE,
351 if (addr == MAP_FAILED) {
356 /* shared write should not consume any additional pages */
357 write_fault_pages(addr, NR_HUGE_PAGES);
358 validate_free_pages(free_hugepages - NR_HUGE_PAGES);
360 addr2 = mmap(NULL, NR_HUGE_PAGES * huge_page_size,
361 PROT_READ | PROT_WRITE,
363 if (addr2 == MAP_FAILED) {
368 /* private read should not consume any pages */
369 read_fault_pages(addr2, NR_HUGE_PAGES);
370 validate_free_pages(free_hugepages - NR_HUGE_PAGES);
372 /* private write should consume additional pages */
373 write_fault_pages(addr2, NR_HUGE_PAGES);
374 validate_free_pages(free_hugepages - (2 * NR_HUGE_PAGES));
376 /* madvise of shared mapping should not free any pages */
377 if (madvise(addr, NR_HUGE_PAGES * huge_page_size, MADV_DONTNEED)) {
381 validate_free_pages(free_hugepages - (2 * NR_HUGE_PAGES));
383 /* madvise of private mapping should free private pages */
384 if (madvise(addr2, NR_HUGE_PAGES * huge_page_size, MADV_DONTNEED)) {
388 validate_free_pages(free_hugepages - NR_HUGE_PAGES);
390 /* private write should consume additional pages again */
391 write_fault_pages(addr2, NR_HUGE_PAGES);
392 validate_free_pages(free_hugepages - (2 * NR_HUGE_PAGES));
395 * madvise should free both file and private pages although this is
396 * not correct. private pages should not be freed, but this is
397 * expected. See comment associated with FALLOC_FL_PUNCH_HOLE call.
399 if (madvise(addr, NR_HUGE_PAGES * huge_page_size, MADV_REMOVE)) {
403 validate_free_pages(free_hugepages);
405 (void)munmap(addr, NR_HUGE_PAGES * huge_page_size);
406 (void)munmap(addr2, NR_HUGE_PAGES * huge_page_size);