static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
struct page *page,
unsigned int nr_pages,
- struct page_cgroup *pc,
enum charge_type ctype,
bool lrucare)
{
+ struct page_cgroup *pc = lookup_page_cgroup(page);
struct zone *uninitialized_var(zone);
bool was_on_lru = false;
bool anon;
{
struct mem_cgroup *memcg = NULL;
unsigned int nr_pages = 1;
- struct page_cgroup *pc;
bool oom = true;
int ret;
oom = false;
}
- pc = lookup_page_cgroup(page);
ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom);
if (ret == -ENOMEM)
return ret;
- __mem_cgroup_commit_charge(memcg, page, nr_pages, pc, ctype, false);
+ __mem_cgroup_commit_charge(memcg, page, nr_pages, ctype, false);
return 0;
}
__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg,
enum charge_type ctype)
{
- struct page_cgroup *pc;
-
if (mem_cgroup_disabled())
return;
if (!memcg)
return;
cgroup_exclude_rmdir(&memcg->css);
- pc = lookup_page_cgroup(page);
- __mem_cgroup_commit_charge(memcg, page, 1, pc, ctype, true);
+ __mem_cgroup_commit_charge(memcg, page, 1, ctype, true);
/*
* Now swap is on-memory. This means this page may be
* counted both as mem and swap....double count.
* page. In the case new page is migrated but not remapped, new page's
* mapcount will be finally 0 and we call uncharge in end_migration().
*/
- pc = lookup_page_cgroup(newpage);
if (PageAnon(page))
ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
else if (page_is_file_cache(page))
ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
else
ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
- __mem_cgroup_commit_charge(memcg, newpage, 1, pc, ctype, false);
+ __mem_cgroup_commit_charge(memcg, newpage, 1, ctype, false);
return ret;
}
* the newpage may be on LRU(or pagevec for LRU) already. We lock
* LRU while we overwrite pc->mem_cgroup.
*/
- pc = lookup_page_cgroup(newpage);
- __mem_cgroup_commit_charge(memcg, newpage, 1, pc, type, true);
+ __mem_cgroup_commit_charge(memcg, newpage, 1, type, true);
}
#ifdef CONFIG_DEBUG_VM
swap_buffers:
/* Swap primary and spare array */
thresholds->spare = thresholds->primary;
+ /* If all events are unregistered, free the spare array */
+ if (!new) {
+ kfree(thresholds->spare);
+ thresholds->spare = NULL;
+ }
+
rcu_assign_pointer(thresholds->primary, new);
/* To be sure that nobody uses thresholds */
* part of thp split is not executed yet.
*/
if (pmd_trans_huge_lock(pmd, vma) == 1) {
- if (!mc.precharge) {
+ if (mc.precharge < HPAGE_PMD_NR) {
spin_unlock(&vma->vm_mm->page_table_lock);
return 0;
}