xas_set(&xas, start);
for (index = start; index < end; index++) {
struct page *page = xas_next(&xas);
+ struct folio *folio;
VM_BUG_ON(index != xas.xa_index);
if (is_shmem) {
}
if (xa_is_value(page) || !PageUptodate(page)) {
- struct folio *folio;
-
xas_unlock_irq(&xas);
/* swap in or instantiate fallocated page */
if (shmem_get_folio(mapping->host, index,
goto out_unlock;
}
- if (page_mapping(page) != mapping) {
+ folio = page_folio(page);
+
+ if (folio_mapping(folio) != mapping) {
result = SCAN_TRUNCATED;
goto out_unlock;
}
- if (!is_shmem && (PageDirty(page) ||
- PageWriteback(page))) {
+ if (!is_shmem && (folio_test_dirty(folio) ||
+ folio_test_writeback(folio))) {
/*
* khugepaged only works on read-only fd, so this
* page is dirty because it hasn't been flushed
goto out_unlock;
}
- if (isolate_lru_page(page)) {
+ if (folio_isolate_lru(folio)) {
result = SCAN_DEL_PAGE_LRU;
goto out_unlock;
}
- if (page_has_private(page) &&
- !try_to_release_page(page, GFP_KERNEL)) {
+ if (folio_has_private(folio) &&
+ !filemap_release_folio(folio, GFP_KERNEL)) {
result = SCAN_PAGE_HAS_PRIVATE;
- putback_lru_page(page);
+ folio_putback_lru(folio);
goto out_unlock;
}
- if (page_mapped(page))
- try_to_unmap(page_folio(page),
+ if (folio_mapped(folio))
+ try_to_unmap(folio,
TTU_IGNORE_MLOCK | TTU_BATCH_FLUSH);
xas_lock_irq(&xas);