if (scan >= nr_to_scan)
break;
- page = pc->page;
if (unlikely(!PageCgroupUsed(pc)))
continue;
+
+ page = pc->page;
+
if (unlikely(!PageLRU(page)))
continue;
}
static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
+ struct page *page,
struct page_cgroup *pc,
enum charge_type ctype,
int page_size)
* Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
* if they exceeds softlimit.
*/
- memcg_check_events(mem, pc->page);
+ memcg_check_events(mem, page);
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/**
* mem_cgroup_move_account - move account of the page
+ * @page: the page
* @pc: page_cgroup of the page.
* @from: mem_cgroup which the page is moved from.
* @to: mem_cgroup which the page is moved to. @from != @to.
* true, this function does "uncharge" from old cgroup, but it doesn't if
* @uncharge is false, so a caller should do "uncharge".
*/
-static int mem_cgroup_move_account(struct page_cgroup *pc,
+static int mem_cgroup_move_account(struct page *page, struct page_cgroup *pc,
struct mem_cgroup *from, struct mem_cgroup *to,
bool uncharge, int charge_size)
{
int ret;
VM_BUG_ON(from == to);
- VM_BUG_ON(PageLRU(pc->page));
+ VM_BUG_ON(PageLRU(page));
/*
* The page is isolated from LRU. So, collapse function
* will not handle this page. But page splitting can happen.
* hold it.
*/
ret = -EBUSY;
- if (charge_size > PAGE_SIZE && !PageTransHuge(pc->page))
+ if (charge_size > PAGE_SIZE && !PageTransHuge(page))
goto out;
lock_page_cgroup(pc);
/*
* check events
*/
- memcg_check_events(to, pc->page);
- memcg_check_events(from, pc->page);
+ memcg_check_events(to, page);
+ memcg_check_events(from, page);
out:
return ret;
}
* move charges to its parent.
*/
-static int mem_cgroup_move_parent(struct page_cgroup *pc,
+static int mem_cgroup_move_parent(struct page *page,
+ struct page_cgroup *pc,
struct mem_cgroup *child,
gfp_t gfp_mask)
{
- struct page *page = pc->page;
struct cgroup *cg = child->css.cgroup;
struct cgroup *pcg = cg->parent;
struct mem_cgroup *parent;
if (page_size > PAGE_SIZE)
flags = compound_lock_irqsave(page);
- ret = mem_cgroup_move_account(pc, child, parent, true, page_size);
+ ret = mem_cgroup_move_account(page, pc, child, parent, true, page_size);
if (ret)
mem_cgroup_cancel_charge(parent, page_size);
if (ret || !mem)
return ret;
- __mem_cgroup_commit_charge(mem, pc, ctype, page_size);
+ __mem_cgroup_commit_charge(mem, page, pc, ctype, page_size);
return 0;
}
cgroup_exclude_rmdir(&ptr->css);
pc = lookup_page_cgroup(page);
mem_cgroup_lru_del_before_commit_swapcache(page);
- __mem_cgroup_commit_charge(ptr, pc, ctype, PAGE_SIZE);
+ __mem_cgroup_commit_charge(ptr, page, pc, ctype, PAGE_SIZE);
mem_cgroup_lru_add_after_commit_swapcache(page);
/*
* Now swap is on-memory. This means this page may be
ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
else
ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
- __mem_cgroup_commit_charge(mem, pc, ctype, PAGE_SIZE);
+ __mem_cgroup_commit_charge(mem, page, pc, ctype, PAGE_SIZE);
return ret;
}
loop += 256;
busy = NULL;
while (loop--) {
+ struct page *page;
+
ret = 0;
spin_lock_irqsave(&zone->lru_lock, flags);
if (list_empty(list)) {
}
spin_unlock_irqrestore(&zone->lru_lock, flags);
- ret = mem_cgroup_move_parent(pc, mem, GFP_KERNEL);
+ page = pc->page;
+
+ ret = mem_cgroup_move_parent(page, pc, mem, GFP_KERNEL);
if (ret == -ENOMEM)
break;
if (isolate_lru_page(page))
goto put;
pc = lookup_page_cgroup(page);
- if (!mem_cgroup_move_account(pc,
+ if (!mem_cgroup_move_account(page, pc,
mc.from, mc.to, false, PAGE_SIZE)) {
mc.precharge--;
/* we uncharge from mc.from later. */