PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
__CLEARPAGEFLAG(Dirty, dirty, PF_HEAD)
PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
+ TESTCLEARFLAG(LRU, lru, PF_HEAD)
PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
TESTCLEARFLAG(Active, active, PF_HEAD)
PAGEFLAG(Workingset, workingset, PF_HEAD)
*/
int __isolate_lru_page(struct page *page, isolate_mode_t mode)
{
- int ret = -EINVAL;
+ int ret = -EBUSY;
/* Only take pages on the LRU. */
if (!PageLRU(page))
if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE))
return ret;
- ret = -EBUSY;
-
/*
* To minimise LRU disruption, the caller can indicate that it only
* wants to isolate pages it will be able to operate on without
* sure the page is not being freed elsewhere -- the
* page release code relies on it.
*/
- ClearPageLRU(page);
- ret = 0;
+ if (TestClearPageLRU(page))
+ ret = 0;
+ else
+ put_page(page);
}
return ret;
page = lru_to_page(src);
prefetchw_prev_lru_page(page, src, flags);
- VM_BUG_ON_PAGE(!PageLRU(page), page);
-
nr_pages = compound_nr(page);
total_scan += nr_pages;
VM_BUG_ON_PAGE(!page_count(page), page);
WARN_RATELIMIT(PageTail(page), "trying to isolate tail page");
- if (PageLRU(page)) {
+ if (TestClearPageLRU(page)) {
pg_data_t *pgdat = page_pgdat(page);
struct lruvec *lruvec;
- spin_lock_irq(&pgdat->lru_lock);
+ get_page(page);
lruvec = mem_cgroup_page_lruvec(page, pgdat);
- if (PageLRU(page)) {
- int lru = page_lru(page);
- get_page(page);
- ClearPageLRU(page);
- del_page_from_lru_list(page, lruvec, lru);
- ret = 0;
- }
+ spin_lock_irq(&pgdat->lru_lock);
+ del_page_from_lru_list(page, lruvec, page_lru(page));
spin_unlock_irq(&pgdat->lru_lock);
+ ret = 0;
}
+
return ret;
}
nr_pages = thp_nr_pages(page);
pgscanned += nr_pages;
+ /* block memcg migration during page moving between lru */
+ if (!TestClearPageLRU(page))
+ continue;
+
if (pagepgdat != pgdat) {
if (pgdat)
spin_unlock_irq(&pgdat->lru_lock);
}
lruvec = mem_cgroup_page_lruvec(page, pgdat);
- if (!PageLRU(page) || !PageUnevictable(page))
- continue;
-
- if (page_evictable(page)) {
+ if (page_evictable(page) && PageUnevictable(page)) {
enum lru_list lru = page_lru_base_type(page);
VM_BUG_ON_PAGE(PageActive(page), page);
add_page_to_lru_list(page, lruvec, lru);
pgrescued += nr_pages;
}
+ SetPageLRU(page);
}
if (pgdat) {
__count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
__count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
spin_unlock_irq(&pgdat->lru_lock);
+ } else if (pgscanned) {
+ count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
}
}
EXPORT_SYMBOL_GPL(check_move_unevictable_pages);