1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef LINUX_MM_INLINE_H
3 #define LINUX_MM_INLINE_H
5 #include <linux/huge_mm.h>
6 #include <linux/swap.h>
9 * page_is_file_cache - should the page be on a file LRU or anon LRU?
10 * @page: the page to test
12 * Returns 1 if @page is page cache page backed by a regular filesystem,
13 * or 0 if @page is anonymous, tmpfs or otherwise ram or swap backed.
14 * Used by functions that manipulate the LRU lists, to sort a page
15 * onto the right LRU list.
17 * We would like to get this info without a page flag, but the state
18 * needs to survive until the page is last deleted from the LRU, which
19 * could be as far down as __page_cache_release.
21 static inline int page_is_file_cache(struct page *page)
23 return !PageSwapBacked(page);
26 static __always_inline void __update_lru_size(struct lruvec *lruvec,
27 enum lru_list lru, enum zone_type zid,
30 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
32 lockdep_assert_held(&lruvec_pgdat(lruvec)->lru_lock);
33 WARN_ON_ONCE(nr_pages != (int)nr_pages);
35 __mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages);
36 __mod_zone_page_state(&pgdat->node_zones[zid],
37 NR_ZONE_LRU_BASE + lru, nr_pages);
40 static __always_inline void update_lru_size(struct lruvec *lruvec,
41 enum lru_list lru, enum zone_type zid,
44 __update_lru_size(lruvec, lru, zid, nr_pages);
46 mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages);
51 * page_lru_base_type - which LRU list type should a page be on?
52 * @page: the page to test
54 * Used for LRU list index arithmetic.
56 * Returns the base LRU type - file or anon - @page should be on.
58 static inline enum lru_list page_lru_base_type(struct page *page)
60 if (page_is_file_cache(page))
61 return LRU_INACTIVE_FILE;
62 return LRU_INACTIVE_ANON;
66 * page_off_lru - which LRU list was page on? clearing its lru flags.
67 * @page: the page to test
69 * Returns the LRU list a page was on, as an index into the array of LRU
70 * lists; and clears its Unevictable or Active flags, ready for freeing.
72 static __always_inline enum lru_list page_off_lru(struct page *page)
76 if (PageUnevictable(page)) {
77 __ClearPageUnevictable(page);
78 lru = LRU_UNEVICTABLE;
80 lru = page_lru_base_type(page);
81 if (PageActive(page)) {
82 __ClearPageActive(page);
90 * page_lru - which LRU list should a page be on?
91 * @page: the page to test
93 * Returns the LRU list a page should be on, as an index
94 * into the array of LRU lists.
96 static __always_inline enum lru_list page_lru(struct page *page)
100 if (PageUnevictable(page))
101 lru = LRU_UNEVICTABLE;
103 lru = page_lru_base_type(page);
104 if (PageActive(page))
110 #ifdef CONFIG_LRU_GEN
112 #ifdef CONFIG_LRU_GEN_ENABLED
113 static inline bool lru_gen_enabled(void)
115 DECLARE_STATIC_KEY_TRUE(lru_gen_caps[NR_LRU_GEN_CAPS]);
117 return static_branch_likely(&lru_gen_caps[LRU_GEN_CORE]);
120 static inline bool lru_gen_enabled(void)
122 DECLARE_STATIC_KEY_FALSE(lru_gen_caps[NR_LRU_GEN_CAPS]);
124 return static_branch_unlikely(&lru_gen_caps[LRU_GEN_CORE]);
128 static inline bool lru_gen_in_fault(void)
130 return current->in_lru_fault;
133 static inline int lru_gen_from_seq(unsigned long seq)
135 return seq % MAX_NR_GENS;
138 static inline int lru_hist_from_seq(unsigned long seq)
140 return seq % NR_HIST_GENS;
143 static inline int lru_tier_from_refs(int refs)
145 VM_WARN_ON_ONCE(refs > BIT(LRU_REFS_WIDTH));
147 /* see the comment in page_lru_refs() */
148 return order_base_2(refs + 1);
151 static inline int page_lru_refs(struct page *page)
153 unsigned long flags = READ_ONCE(page->flags);
154 bool workingset = flags & BIT(PG_workingset);
157 * Return the number of accesses beyond PG_referenced, i.e., N-1 if the
158 * total number of accesses is N>1, since N=0,1 both map to the first
159 * tier. lru_tier_from_refs() will account for this off-by-one. Also see
160 * the comment on MAX_NR_TIERS.
162 return ((flags & LRU_REFS_MASK) >> LRU_REFS_PGOFF) + workingset;
165 static inline int page_lru_gen(struct page *page)
167 unsigned long flags = READ_ONCE(page->flags);
169 return ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
172 static inline bool lru_gen_is_active(struct lruvec *lruvec, int gen)
174 unsigned long max_seq = lruvec->lrugen.max_seq;
176 VM_WARN_ON_ONCE(gen >= MAX_NR_GENS);
178 /* see the comment on MIN_NR_GENS */
179 return gen == lru_gen_from_seq(max_seq) || gen == lru_gen_from_seq(max_seq - 1);
182 static inline void lru_gen_update_size(struct lruvec *lruvec, struct page *page,
183 int old_gen, int new_gen)
185 int type = page_is_file_cache(page);
186 int zone = page_zonenum(page);
187 int delta = hpage_nr_pages(page);
188 enum lru_list lru = type * LRU_INACTIVE_FILE;
189 struct lru_gen_struct *lrugen = &lruvec->lrugen;
191 VM_WARN_ON_ONCE(old_gen != -1 && old_gen >= MAX_NR_GENS);
192 VM_WARN_ON_ONCE(new_gen != -1 && new_gen >= MAX_NR_GENS);
193 VM_WARN_ON_ONCE(old_gen == -1 && new_gen == -1);
196 WRITE_ONCE(lrugen->nr_pages[old_gen][type][zone],
197 lrugen->nr_pages[old_gen][type][zone] - delta);
199 WRITE_ONCE(lrugen->nr_pages[new_gen][type][zone],
200 lrugen->nr_pages[new_gen][type][zone] + delta);
204 if (lru_gen_is_active(lruvec, new_gen))
206 __update_lru_size(lruvec, lru, zone, delta);
212 if (lru_gen_is_active(lruvec, old_gen))
214 __update_lru_size(lruvec, lru, zone, -delta);
219 if (!lru_gen_is_active(lruvec, old_gen) && lru_gen_is_active(lruvec, new_gen)) {
220 __update_lru_size(lruvec, lru, zone, -delta);
221 __update_lru_size(lruvec, lru + LRU_ACTIVE, zone, delta);
224 /* demotion requires isolation, e.g., lru_deactivate_fn() */
225 VM_WARN_ON_ONCE(lru_gen_is_active(lruvec, old_gen) && !lru_gen_is_active(lruvec, new_gen));
228 static inline bool lru_gen_add_page(struct lruvec *lruvec, struct page *page, bool reclaiming)
232 int gen = page_lru_gen(page);
233 int type = page_is_file_cache(page);
234 int zone = page_zonenum(page);
235 struct lru_gen_struct *lrugen = &lruvec->lrugen;
237 VM_WARN_ON_ONCE_PAGE(gen != -1, page);
239 if (PageUnevictable(page) || !lrugen->enabled)
242 * There are three common cases for this page:
243 * 1. If it's hot, e.g., freshly faulted in or previously hot and
244 * migrated, add it to the youngest generation.
245 * 2. If it's cold but can't be evicted immediately, i.e., an anon page
246 * not in swapcache or a dirty page pending writeback, add it to the
247 * second oldest generation.
248 * 3. Everything else (clean, cold) is added to the oldest generation.
250 if (PageActive(page))
251 seq = lrugen->max_seq;
252 else if ((type == LRU_GEN_ANON && !PageSwapCache(page)) ||
253 (PageReclaim(page) &&
254 (PageDirty(page) || PageWriteback(page))))
255 seq = lrugen->min_seq[type] + 1;
257 seq = lrugen->min_seq[type];
259 gen = lru_gen_from_seq(seq);
260 flags = (gen + 1UL) << LRU_GEN_PGOFF;
261 /* see the comment on MIN_NR_GENS about PG_active */
262 set_mask_bits(&page->flags, LRU_GEN_MASK | BIT(PG_active), flags);
264 lru_gen_update_size(lruvec, page, -1, gen);
265 /* for rotate_reclaimable_page() */
267 list_add_tail(&page->lru, &lrugen->lists[gen][type][zone]);
269 list_add(&page->lru, &lrugen->lists[gen][type][zone]);
274 static inline bool lru_gen_del_page(struct lruvec *lruvec, struct page *page, bool reclaiming)
277 int gen = page_lru_gen(page);
282 VM_WARN_ON_ONCE_PAGE(PageActive(page), page);
283 VM_WARN_ON_ONCE_PAGE(PageUnevictable(page), page);
285 /* for migrate_page_states() */
286 flags = !reclaiming && lru_gen_is_active(lruvec, gen) ? BIT(PG_active) : 0;
287 flags = set_mask_bits(&page->flags, LRU_GEN_MASK, flags);
288 gen = ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
290 lru_gen_update_size(lruvec, page, gen, -1);
291 list_del(&page->lru);
296 #else /* !CONFIG_LRU_GEN */
298 static inline bool lru_gen_enabled(void)
303 static inline bool lru_gen_in_fault(void)
308 static inline bool lru_gen_add_page(struct lruvec *lruvec, struct page *page, bool reclaiming)
313 static inline bool lru_gen_del_page(struct lruvec *lruvec, struct page *page, bool reclaiming)
318 #endif /* CONFIG_LRU_GEN */
320 static __always_inline void add_page_to_lru_list(struct page *page,
321 struct lruvec *lruvec, enum lru_list lru)
323 if (lru_gen_add_page(lruvec, page, false))
326 update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page));
327 list_add(&page->lru, &lruvec->lists[lru]);
330 static __always_inline void add_page_to_lru_list_tail(struct page *page,
331 struct lruvec *lruvec, enum lru_list lru)
333 if (lru_gen_add_page(lruvec, page, true))
336 update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page));
337 list_add_tail(&page->lru, &lruvec->lists[lru]);
340 static __always_inline void del_page_from_lru_list(struct page *page,
341 struct lruvec *lruvec, enum lru_list lru)
343 if (lru_gen_del_page(lruvec, page, false))
346 list_del(&page->lru);
347 update_lru_size(lruvec, lru, page_zonenum(page), -hpage_nr_pages(page));