1 #ifndef __LINUX_PAGE_CGROUP_H
2 #define __LINUX_PAGE_CGROUP_H
5 /* flags for mem_cgroup */
6 PCG_LOCK, /* Lock for pc->mem_cgroup and following bits. */
7 PCG_CACHE, /* charged as cache */
8 PCG_USED, /* this object is in use. */
9 PCG_MIGRATION, /* under page migration */
10 /* flags for mem_cgroup and file and I/O status */
11 PCG_MOVE_LOCK, /* For race between move_account v.s. following bits */
12 PCG_FILE_MAPPED, /* page is accounted as "mapped" */
13 /* No lock in page_cgroup */
14 PCG_ACCT_LRU, /* page has been accounted for (under lru_lock) */
18 #ifndef __GENERATING_BOUNDS_H
19 #include <generated/bounds.h>
21 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
22 #include <linux/bit_spinlock.h>
25 * Page Cgroup can be considered as an extended mem_map.
26 * A page_cgroup page is associated with every page descriptor. The
27 * page_cgroup helps us identify information about the cgroup
28 * All page cgroups are allocated at boot or memory hotplug event,
29 * then the page cgroup for pfn always exists.
33 struct mem_cgroup *mem_cgroup;
36 void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat);
38 #ifdef CONFIG_SPARSEMEM
39 static inline void __init page_cgroup_init_flatmem(void)
42 extern void __init page_cgroup_init(void);
44 void __init page_cgroup_init_flatmem(void);
45 static inline void __init page_cgroup_init(void)
50 struct page_cgroup *lookup_page_cgroup(struct page *page);
51 struct page *lookup_cgroup_page(struct page_cgroup *pc);
53 #define TESTPCGFLAG(uname, lname) \
54 static inline int PageCgroup##uname(struct page_cgroup *pc) \
55 { return test_bit(PCG_##lname, &pc->flags); }
57 #define SETPCGFLAG(uname, lname) \
58 static inline void SetPageCgroup##uname(struct page_cgroup *pc)\
59 { set_bit(PCG_##lname, &pc->flags); }
61 #define CLEARPCGFLAG(uname, lname) \
62 static inline void ClearPageCgroup##uname(struct page_cgroup *pc) \
63 { clear_bit(PCG_##lname, &pc->flags); }
65 #define TESTCLEARPCGFLAG(uname, lname) \
66 static inline int TestClearPageCgroup##uname(struct page_cgroup *pc) \
67 { return test_and_clear_bit(PCG_##lname, &pc->flags); }
69 /* Cache flag is set only once (at allocation) */
70 TESTPCGFLAG(Cache, CACHE)
71 CLEARPCGFLAG(Cache, CACHE)
72 SETPCGFLAG(Cache, CACHE)
74 TESTPCGFLAG(Used, USED)
75 CLEARPCGFLAG(Used, USED)
76 SETPCGFLAG(Used, USED)
78 SETPCGFLAG(AcctLRU, ACCT_LRU)
79 CLEARPCGFLAG(AcctLRU, ACCT_LRU)
80 TESTPCGFLAG(AcctLRU, ACCT_LRU)
81 TESTCLEARPCGFLAG(AcctLRU, ACCT_LRU)
84 SETPCGFLAG(FileMapped, FILE_MAPPED)
85 CLEARPCGFLAG(FileMapped, FILE_MAPPED)
86 TESTPCGFLAG(FileMapped, FILE_MAPPED)
88 SETPCGFLAG(Migration, MIGRATION)
89 CLEARPCGFLAG(Migration, MIGRATION)
90 TESTPCGFLAG(Migration, MIGRATION)
92 static inline void lock_page_cgroup(struct page_cgroup *pc)
95 * Don't take this lock in IRQ context.
96 * This lock is for pc->mem_cgroup, USED, CACHE, MIGRATION
98 bit_spin_lock(PCG_LOCK, &pc->flags);
101 static inline void unlock_page_cgroup(struct page_cgroup *pc)
103 bit_spin_unlock(PCG_LOCK, &pc->flags);
106 static inline void move_lock_page_cgroup(struct page_cgroup *pc,
107 unsigned long *flags)
110 * We know updates to pc->flags of page cache's stats are from both of
111 * usual context or IRQ context. Disable IRQ to avoid deadlock.
113 local_irq_save(*flags);
114 bit_spin_lock(PCG_MOVE_LOCK, &pc->flags);
117 static inline void move_unlock_page_cgroup(struct page_cgroup *pc,
118 unsigned long *flags)
120 bit_spin_unlock(PCG_MOVE_LOCK, &pc->flags);
121 local_irq_restore(*flags);
124 #ifdef CONFIG_SPARSEMEM
125 #define PCG_ARRAYID_WIDTH SECTIONS_SHIFT
127 #define PCG_ARRAYID_WIDTH NODES_SHIFT
130 #if (PCG_ARRAYID_WIDTH > BITS_PER_LONG - NR_PCG_FLAGS)
131 #error Not enough space left in pc->flags to store page_cgroup array IDs
134 /* pc->flags: ARRAY-ID | FLAGS */
136 #define PCG_ARRAYID_MASK ((1UL << PCG_ARRAYID_WIDTH) - 1)
138 #define PCG_ARRAYID_OFFSET (BITS_PER_LONG - PCG_ARRAYID_WIDTH)
140 * Zero the shift count for non-existent fields, to prevent compiler
141 * warnings and ensure references are optimized away.
143 #define PCG_ARRAYID_SHIFT (PCG_ARRAYID_OFFSET * (PCG_ARRAYID_WIDTH != 0))
145 static inline void set_page_cgroup_array_id(struct page_cgroup *pc,
148 pc->flags &= ~(PCG_ARRAYID_MASK << PCG_ARRAYID_SHIFT);
149 pc->flags |= (id & PCG_ARRAYID_MASK) << PCG_ARRAYID_SHIFT;
152 static inline unsigned long page_cgroup_array_id(struct page_cgroup *pc)
154 return (pc->flags >> PCG_ARRAYID_SHIFT) & PCG_ARRAYID_MASK;
157 #else /* CONFIG_CGROUP_MEM_RES_CTLR */
160 static inline void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
164 static inline struct page_cgroup *lookup_page_cgroup(struct page *page)
169 static inline void page_cgroup_init(void)
173 static inline void __init page_cgroup_init_flatmem(void)
177 #endif /* CONFIG_CGROUP_MEM_RES_CTLR */
179 #include <linux/swap.h>
181 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
182 extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
183 unsigned short old, unsigned short new);
184 extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id);
185 extern unsigned short lookup_swap_cgroup(swp_entry_t ent);
186 extern int swap_cgroup_swapon(int type, unsigned long max_pages);
187 extern void swap_cgroup_swapoff(int type);
191 unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id)
197 unsigned short lookup_swap_cgroup(swp_entry_t ent)
203 swap_cgroup_swapon(int type, unsigned long max_pages)
208 static inline void swap_cgroup_swapoff(int type)
213 #endif /* CONFIG_CGROUP_MEM_RES_CTLR_SWAP */
215 #endif /* !__GENERATING_BOUNDS_H */
217 #endif /* __LINUX_PAGE_CGROUP_H */