gfp_t gfp_mask);
int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem);
-static inline struct mem_cgroup *mm_cgroup(const struct mm_struct *mm)
-{
- return rcu_dereference(mm->mem_cgroup);
-}
+#define vm_match_cgroup(mm, cgroup) \
+ ((cgroup) == rcu_dereference((mm)->mem_cgroup))
extern int mem_cgroup_prepare_migration(struct page *page);
extern void mem_cgroup_end_migration(struct page *page);
return 0;
}
-static inline struct mem_cgroup *mm_cgroup(const struct mm_struct *mm)
+static inline int vm_match_cgroup(struct mm_struct *mm, struct mem_cgroup *mem)
{
- return NULL;
+ return 1;
}
static inline int task_in_mem_cgroup(struct task_struct *task,
int ret;
task_lock(task);
- ret = task->mm && mm_cgroup(task->mm) == mem;
+ ret = task->mm && vm_match_cgroup(task->mm, mem);
task_unlock(task);
return ret;
}
* counting on behalf of references from different
* cgroups
*/
- if (mem_cont && (mm_cgroup(vma->vm_mm) != mem_cont))
+ if (mem_cont && !vm_match_cgroup(vma->vm_mm, mem_cont))
continue;
referenced += page_referenced_one(page, vma, &mapcount);
if (!mapcount)
* counting on behalf of references from different
* cgroups
*/
- if (mem_cont && (mm_cgroup(vma->vm_mm) != mem_cont))
+ if (mem_cont && !vm_match_cgroup(vma->vm_mm, mem_cont))
continue;
if ((vma->vm_flags & (VM_LOCKED|VM_MAYSHARE))
== (VM_LOCKED|VM_MAYSHARE)) {