1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef LINUX_MM_INLINE_H
3 #define LINUX_MM_INLINE_H
5 #include <linux/huge_mm.h>
6 #include <linux/swap.h>
9 * page_is_file_cache - should the page be on a file LRU or anon LRU?
10 * @page: the page to test
12 * Returns 1 if @page is page cache page backed by a regular filesystem,
13 * or 0 if @page is anonymous, tmpfs or otherwise ram or swap backed.
14 * Used by functions that manipulate the LRU lists, to sort a page
15 * onto the right LRU list.
17 * We would like to get this info without a page flag, but the state
18 * needs to survive until the page is last deleted from the LRU, which
19 * could be as far down as __page_cache_release.
21 static inline int page_is_file_cache(struct page *page)
23 return !PageSwapBacked(page);
26 static __always_inline void __update_lru_size(struct lruvec *lruvec,
27 enum lru_list lru, enum zone_type zid,
30 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
32 lockdep_assert_held(&lruvec_pgdat(lruvec)->lru_lock);
33 WARN_ON_ONCE(nr_pages != (int)nr_pages);
35 __mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages);
36 __mod_zone_page_state(&pgdat->node_zones[zid],
37 NR_ZONE_LRU_BASE + lru, nr_pages);
40 static __always_inline void update_lru_size(struct lruvec *lruvec,
41 enum lru_list lru, enum zone_type zid,
44 __update_lru_size(lruvec, lru, zid, nr_pages);
46 mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages);
51 * page_lru_base_type - which LRU list type should a page be on?
52 * @page: the page to test
54 * Used for LRU list index arithmetic.
56 * Returns the base LRU type - file or anon - @page should be on.
58 static inline enum lru_list page_lru_base_type(struct page *page)
60 if (page_is_file_cache(page))
61 return LRU_INACTIVE_FILE;
62 return LRU_INACTIVE_ANON;
66 * page_off_lru - which LRU list was page on? clearing its lru flags.
67 * @page: the page to test
69 * Returns the LRU list a page was on, as an index into the array of LRU
70 * lists; and clears its Unevictable or Active flags, ready for freeing.
72 static __always_inline enum lru_list page_off_lru(struct page *page)
76 if (PageUnevictable(page)) {
77 __ClearPageUnevictable(page);
78 lru = LRU_UNEVICTABLE;
80 lru = page_lru_base_type(page);
81 if (PageActive(page)) {
82 __ClearPageActive(page);
90 * page_lru - which LRU list should a page be on?
91 * @page: the page to test
93 * Returns the LRU list a page should be on, as an index
94 * into the array of LRU lists.
96 static __always_inline enum lru_list page_lru(struct page *page)
100 if (PageUnevictable(page))
101 lru = LRU_UNEVICTABLE;
103 lru = page_lru_base_type(page);
104 if (PageActive(page))
110 #ifdef CONFIG_LRU_GEN
112 static inline bool lru_gen_enabled(void)
117 static inline bool lru_gen_in_fault(void)
119 return current->in_lru_fault;
122 static inline int lru_gen_from_seq(unsigned long seq)
124 return seq % MAX_NR_GENS;
127 static inline int lru_hist_from_seq(unsigned long seq)
129 return seq % NR_HIST_GENS;
132 static inline int lru_tier_from_refs(int refs)
134 VM_WARN_ON_ONCE(refs > BIT(LRU_REFS_WIDTH));
136 /* see the comment in page_lru_refs() */
137 return order_base_2(refs + 1);
140 static inline int page_lru_refs(struct page *page)
142 unsigned long flags = READ_ONCE(page->flags);
143 bool workingset = flags & BIT(PG_workingset);
146 * Return the number of accesses beyond PG_referenced, i.e., N-1 if the
147 * total number of accesses is N>1, since N=0,1 both map to the first
148 * tier. lru_tier_from_refs() will account for this off-by-one. Also see
149 * the comment on MAX_NR_TIERS.
151 return ((flags & LRU_REFS_MASK) >> LRU_REFS_PGOFF) + workingset;
154 static inline int page_lru_gen(struct page *page)
156 unsigned long flags = READ_ONCE(page->flags);
158 return ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
161 static inline bool lru_gen_is_active(struct lruvec *lruvec, int gen)
163 unsigned long max_seq = lruvec->lrugen.max_seq;
165 VM_WARN_ON_ONCE(gen >= MAX_NR_GENS);
167 /* see the comment on MIN_NR_GENS */
168 return gen == lru_gen_from_seq(max_seq) || gen == lru_gen_from_seq(max_seq - 1);
171 static inline void lru_gen_update_size(struct lruvec *lruvec, struct page *page,
172 int old_gen, int new_gen)
174 int type = page_is_file_cache(page);
175 int zone = page_zonenum(page);
176 int delta = hpage_nr_pages(page);
177 enum lru_list lru = type * LRU_INACTIVE_FILE;
178 struct lru_gen_struct *lrugen = &lruvec->lrugen;
180 VM_WARN_ON_ONCE(old_gen != -1 && old_gen >= MAX_NR_GENS);
181 VM_WARN_ON_ONCE(new_gen != -1 && new_gen >= MAX_NR_GENS);
182 VM_WARN_ON_ONCE(old_gen == -1 && new_gen == -1);
185 WRITE_ONCE(lrugen->nr_pages[old_gen][type][zone],
186 lrugen->nr_pages[old_gen][type][zone] - delta);
188 WRITE_ONCE(lrugen->nr_pages[new_gen][type][zone],
189 lrugen->nr_pages[new_gen][type][zone] + delta);
193 if (lru_gen_is_active(lruvec, new_gen))
195 __update_lru_size(lruvec, lru, zone, delta);
201 if (lru_gen_is_active(lruvec, old_gen))
203 __update_lru_size(lruvec, lru, zone, -delta);
208 if (!lru_gen_is_active(lruvec, old_gen) && lru_gen_is_active(lruvec, new_gen)) {
209 __update_lru_size(lruvec, lru, zone, -delta);
210 __update_lru_size(lruvec, lru + LRU_ACTIVE, zone, delta);
213 /* demotion requires isolation, e.g., lru_deactivate_fn() */
214 VM_WARN_ON_ONCE(lru_gen_is_active(lruvec, old_gen) && !lru_gen_is_active(lruvec, new_gen));
217 static inline bool lru_gen_add_page(struct lruvec *lruvec, struct page *page, bool reclaiming)
221 int gen = page_lru_gen(page);
222 int type = page_is_file_cache(page);
223 int zone = page_zonenum(page);
224 struct lru_gen_struct *lrugen = &lruvec->lrugen;
226 VM_WARN_ON_ONCE_PAGE(gen != -1, page);
228 if (PageUnevictable(page))
231 * There are three common cases for this page:
232 * 1. If it's hot, e.g., freshly faulted in or previously hot and
233 * migrated, add it to the youngest generation.
234 * 2. If it's cold but can't be evicted immediately, i.e., an anon page
235 * not in swapcache or a dirty page pending writeback, add it to the
236 * second oldest generation.
237 * 3. Everything else (clean, cold) is added to the oldest generation.
239 if (PageActive(page))
240 seq = lrugen->max_seq;
241 else if ((type == LRU_GEN_ANON && !PageSwapCache(page)) ||
242 (PageReclaim(page) &&
243 (PageDirty(page) || PageWriteback(page))))
244 seq = lrugen->min_seq[type] + 1;
246 seq = lrugen->min_seq[type];
248 gen = lru_gen_from_seq(seq);
249 flags = (gen + 1UL) << LRU_GEN_PGOFF;
250 /* see the comment on MIN_NR_GENS about PG_active */
251 set_mask_bits(&page->flags, LRU_GEN_MASK | BIT(PG_active), flags);
253 lru_gen_update_size(lruvec, page, -1, gen);
254 /* for rotate_reclaimable_page() */
256 list_add_tail(&page->lru, &lrugen->lists[gen][type][zone]);
258 list_add(&page->lru, &lrugen->lists[gen][type][zone]);
263 static inline bool lru_gen_del_page(struct lruvec *lruvec, struct page *page, bool reclaiming)
266 int gen = page_lru_gen(page);
271 VM_WARN_ON_ONCE_PAGE(PageActive(page), page);
272 VM_WARN_ON_ONCE_PAGE(PageUnevictable(page), page);
274 /* for migrate_page_states() */
275 flags = !reclaiming && lru_gen_is_active(lruvec, gen) ? BIT(PG_active) : 0;
276 flags = set_mask_bits(&page->flags, LRU_GEN_MASK, flags);
277 gen = ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
279 lru_gen_update_size(lruvec, page, gen, -1);
280 list_del(&page->lru);
285 #else /* !CONFIG_LRU_GEN */
287 static inline bool lru_gen_enabled(void)
292 static inline bool lru_gen_in_fault(void)
297 static inline bool lru_gen_add_page(struct lruvec *lruvec, struct page *page, bool reclaiming)
302 static inline bool lru_gen_del_page(struct lruvec *lruvec, struct page *page, bool reclaiming)
307 #endif /* CONFIG_LRU_GEN */
309 static __always_inline void add_page_to_lru_list(struct page *page,
310 struct lruvec *lruvec, enum lru_list lru)
312 if (lru_gen_add_page(lruvec, page, false))
315 update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page));
316 list_add(&page->lru, &lruvec->lists[lru]);
319 static __always_inline void add_page_to_lru_list_tail(struct page *page,
320 struct lruvec *lruvec, enum lru_list lru)
322 if (lru_gen_add_page(lruvec, page, true))
325 update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page));
326 list_add_tail(&page->lru, &lruvec->lists[lru]);
329 static __always_inline void del_page_from_lru_list(struct page *page,
330 struct lruvec *lruvec, enum lru_list lru)
332 if (lru_gen_del_page(lruvec, page, false))
335 list_del(&page->lru);
336 update_lru_size(lruvec, lru, page_zonenum(page), -hpage_nr_pages(page));