1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef LINUX_MM_INLINE_H
3 #define LINUX_MM_INLINE_H
5 #include <linux/huge_mm.h>
6 #include <linux/swap.h>
9 * page_is_file_cache - should the page be on a file LRU or anon LRU?
10 * @page: the page to test
12 * Returns 1 if @page is page cache page backed by a regular filesystem,
13 * or 0 if @page is anonymous, tmpfs or otherwise ram or swap backed.
14 * Used by functions that manipulate the LRU lists, to sort a page
15 * onto the right LRU list.
17 * We would like to get this info without a page flag, but the state
18 * needs to survive until the page is last deleted from the LRU, which
19 * could be as far down as __page_cache_release.
21 static inline int page_is_file_cache(struct page *page)
23 return !PageSwapBacked(page);
26 static __always_inline void __update_lru_size(struct lruvec *lruvec,
27 enum lru_list lru, enum zone_type zid,
30 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
32 lockdep_assert_held(&lruvec_pgdat(lruvec)->lru_lock);
33 WARN_ON_ONCE(nr_pages != (int)nr_pages);
35 __mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages);
36 __mod_zone_page_state(&pgdat->node_zones[zid],
37 NR_ZONE_LRU_BASE + lru, nr_pages);
40 static __always_inline void update_lru_size(struct lruvec *lruvec,
41 enum lru_list lru, enum zone_type zid,
44 __update_lru_size(lruvec, lru, zid, nr_pages);
46 mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages);
51 * page_lru_base_type - which LRU list type should a page be on?
52 * @page: the page to test
54 * Used for LRU list index arithmetic.
56 * Returns the base LRU type - file or anon - @page should be on.
58 static inline enum lru_list page_lru_base_type(struct page *page)
60 if (page_is_file_cache(page))
61 return LRU_INACTIVE_FILE;
62 return LRU_INACTIVE_ANON;
66 * page_off_lru - which LRU list was page on? clearing its lru flags.
67 * @page: the page to test
69 * Returns the LRU list a page was on, as an index into the array of LRU
70 * lists; and clears its Unevictable or Active flags, ready for freeing.
72 static __always_inline enum lru_list page_off_lru(struct page *page)
76 if (PageUnevictable(page)) {
77 __ClearPageUnevictable(page);
78 lru = LRU_UNEVICTABLE;
80 lru = page_lru_base_type(page);
81 if (PageActive(page)) {
82 __ClearPageActive(page);
90 * page_lru - which LRU list should a page be on?
91 * @page: the page to test
93 * Returns the LRU list a page should be on, as an index
94 * into the array of LRU lists.
96 static __always_inline enum lru_list page_lru(struct page *page)
100 if (PageUnevictable(page))
101 lru = LRU_UNEVICTABLE;
103 lru = page_lru_base_type(page);
104 if (PageActive(page))
110 #ifdef CONFIG_LRU_GEN
112 static inline bool lru_gen_enabled(void)
117 static inline bool lru_gen_in_fault(void)
119 return current->in_lru_fault;
122 static inline int lru_gen_from_seq(unsigned long seq)
124 return seq % MAX_NR_GENS;
127 static inline int page_lru_gen(struct page *page)
129 unsigned long flags = READ_ONCE(page->flags);
131 return ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
134 static inline bool lru_gen_is_active(struct lruvec *lruvec, int gen)
136 unsigned long max_seq = lruvec->lrugen.max_seq;
138 VM_WARN_ON_ONCE(gen >= MAX_NR_GENS);
140 /* see the comment on MIN_NR_GENS */
141 return gen == lru_gen_from_seq(max_seq) || gen == lru_gen_from_seq(max_seq - 1);
144 static inline void lru_gen_update_size(struct lruvec *lruvec, struct page *page,
145 int old_gen, int new_gen)
147 int type = page_is_file_cache(page);
148 int zone = page_zonenum(page);
149 int delta = hpage_nr_pages(page);
150 enum lru_list lru = type * LRU_INACTIVE_FILE;
151 struct lru_gen_struct *lrugen = &lruvec->lrugen;
153 VM_WARN_ON_ONCE(old_gen != -1 && old_gen >= MAX_NR_GENS);
154 VM_WARN_ON_ONCE(new_gen != -1 && new_gen >= MAX_NR_GENS);
155 VM_WARN_ON_ONCE(old_gen == -1 && new_gen == -1);
158 WRITE_ONCE(lrugen->nr_pages[old_gen][type][zone],
159 lrugen->nr_pages[old_gen][type][zone] - delta);
161 WRITE_ONCE(lrugen->nr_pages[new_gen][type][zone],
162 lrugen->nr_pages[new_gen][type][zone] + delta);
166 if (lru_gen_is_active(lruvec, new_gen))
168 __update_lru_size(lruvec, lru, zone, delta);
174 if (lru_gen_is_active(lruvec, old_gen))
176 __update_lru_size(lruvec, lru, zone, -delta);
181 static inline bool lru_gen_add_page(struct lruvec *lruvec, struct page *page, bool reclaiming)
185 int gen = page_lru_gen(page);
186 int type = page_is_file_cache(page);
187 int zone = page_zonenum(page);
188 struct lru_gen_struct *lrugen = &lruvec->lrugen;
190 VM_WARN_ON_ONCE_PAGE(gen != -1, page);
192 if (PageUnevictable(page))
195 * There are three common cases for this page:
196 * 1. If it's hot, e.g., freshly faulted in or previously hot and
197 * migrated, add it to the youngest generation.
198 * 2. If it's cold but can't be evicted immediately, i.e., an anon page
199 * not in swapcache or a dirty page pending writeback, add it to the
200 * second oldest generation.
201 * 3. Everything else (clean, cold) is added to the oldest generation.
203 if (PageActive(page))
204 seq = lrugen->max_seq;
205 else if ((type == LRU_GEN_ANON && !PageSwapCache(page)) ||
206 (PageReclaim(page) &&
207 (PageDirty(page) || PageWriteback(page))))
208 seq = lrugen->min_seq[type] + 1;
210 seq = lrugen->min_seq[type];
212 gen = lru_gen_from_seq(seq);
213 flags = (gen + 1UL) << LRU_GEN_PGOFF;
214 /* see the comment on MIN_NR_GENS about PG_active */
215 set_mask_bits(&page->flags, LRU_GEN_MASK | BIT(PG_active), flags);
217 lru_gen_update_size(lruvec, page, -1, gen);
218 /* for rotate_reclaimable_page() */
220 list_add_tail(&page->lru, &lrugen->lists[gen][type][zone]);
222 list_add(&page->lru, &lrugen->lists[gen][type][zone]);
227 static inline bool lru_gen_del_page(struct lruvec *lruvec, struct page *page, bool reclaiming)
230 int gen = page_lru_gen(page);
235 VM_WARN_ON_ONCE_PAGE(PageActive(page), page);
236 VM_WARN_ON_ONCE_PAGE(PageUnevictable(page), page);
238 /* for migrate_page_states() */
239 flags = !reclaiming && lru_gen_is_active(lruvec, gen) ? BIT(PG_active) : 0;
240 flags = set_mask_bits(&page->flags, LRU_GEN_MASK, flags);
241 gen = ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
243 lru_gen_update_size(lruvec, page, gen, -1);
244 list_del(&page->lru);
249 #else /* !CONFIG_LRU_GEN */
251 static inline bool lru_gen_enabled(void)
256 static inline bool lru_gen_in_fault(void)
261 static inline bool lru_gen_add_page(struct lruvec *lruvec, struct page *page, bool reclaiming)
266 static inline bool lru_gen_del_page(struct lruvec *lruvec, struct page *page, bool reclaiming)
271 #endif /* CONFIG_LRU_GEN */
273 static __always_inline void add_page_to_lru_list(struct page *page,
274 struct lruvec *lruvec, enum lru_list lru)
276 if (lru_gen_add_page(lruvec, page, false))
279 update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page));
280 list_add(&page->lru, &lruvec->lists[lru]);
283 static __always_inline void add_page_to_lru_list_tail(struct page *page,
284 struct lruvec *lruvec, enum lru_list lru)
286 if (lru_gen_add_page(lruvec, page, true))
289 update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page));
290 list_add_tail(&page->lru, &lruvec->lists[lru]);
293 static __always_inline void del_page_from_lru_list(struct page *page,
294 struct lruvec *lruvec, enum lru_list lru)
296 if (lru_gen_del_page(lruvec, page, false))
299 list_del(&page->lru);
300 update_lru_size(lruvec, lru, page_zonenum(page), -hpage_nr_pages(page));