1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2013 Red Hat, Inc., Johannes Weiner
8 #include <linux/memcontrol.h>
9 #include <linux/mm_inline.h>
10 #include <linux/writeback.h>
11 #include <linux/shmem_fs.h>
12 #include <linux/pagemap.h>
13 #include <linux/atomic.h>
14 #include <linux/module.h>
15 #include <linux/swap.h>
16 #include <linux/dax.h>
23 * Per node, two clock lists are maintained for file pages: the
24 * inactive and the active list. Freshly faulted pages start out at
25 * the head of the inactive list and page reclaim scans pages from the
26 * tail. Pages that are accessed multiple times on the inactive list
27 * are promoted to the active list, to protect them from reclaim,
28 * whereas active pages are demoted to the inactive list when the
29 * active list grows too big.
31 * fault ------------------------+
33 * +--------------+ | +-------------+
34 * reclaim <- | inactive | <-+-- demotion | active | <--+
35 * +--------------+ +-------------+ |
37 * +-------------- promotion ------------------+
40 * Access frequency and refault distance
42 * A workload is thrashing when its pages are frequently used but they
43 * are evicted from the inactive list every time before another access
44 * would have promoted them to the active list.
46 * In cases where the average access distance between thrashing pages
47 * is bigger than the size of memory there is nothing that can be
48 * done - the thrashing set could never fit into memory under any
51 * However, the average access distance could be bigger than the
52 * inactive list, yet smaller than the size of memory. In this case,
53 * the set could fit into memory if it weren't for the currently
54 * active pages - which may be used more, hopefully less frequently:
56 * +-memory available to cache-+
58 * +-inactive------+-active----+
59 * a b | c d e f g h i | J K L M N |
60 * +---------------+-----------+
62 * It is prohibitively expensive to accurately track access frequency
63 * of pages. But a reasonable approximation can be made to measure
64 * thrashing on the inactive list, after which refaulting pages can be
65 * activated optimistically to compete with the existing active pages.
67 * Approximating inactive page access frequency - Observations:
69 * 1. When a page is accessed for the first time, it is added to the
70 * head of the inactive list, slides every existing inactive page
71 * towards the tail by one slot, and pushes the current tail page
74 * 2. When a page is accessed for the second time, it is promoted to
75 * the active list, shrinking the inactive list by one slot. This
76 * also slides all inactive pages that were faulted into the cache
77 * more recently than the activated page towards the tail of the
82 * 1. The sum of evictions and activations between any two points in
83 * time indicate the minimum number of inactive pages accessed in
86 * 2. Moving one inactive page N page slots towards the tail of the
87 * list requires at least N inactive page accesses.
91 * 1. When a page is finally evicted from memory, the number of
92 * inactive pages accessed while the page was in cache is at least
93 * the number of page slots on the inactive list.
95 * 2. In addition, measuring the sum of evictions and activations (E)
96 * at the time of a page's eviction, and comparing it to another
97 * reading (R) at the time the page faults back into memory tells
98 * the minimum number of accesses while the page was not cached.
99 * This is called the refault distance.
101 * Because the first access of the page was the fault and the second
102 * access the refault, we combine the in-cache distance with the
103 * out-of-cache distance to get the complete minimum access distance
106 * NR_inactive + (R - E)
108 * And knowing the minimum access distance of a page, we can easily
109 * tell if the page would be able to stay in cache assuming all page
110 * slots in the cache were available:
112 * NR_inactive + (R - E) <= NR_inactive + NR_active
114 * If we have swap we should consider about NR_inactive_anon and
115 * NR_active_anon, so for page cache and anonymous respectively:
117 * NR_inactive_file + (R - E) <= NR_inactive_file + NR_active_file
118 * + NR_inactive_anon + NR_active_anon
120 * NR_inactive_anon + (R - E) <= NR_inactive_anon + NR_active_anon
121 * + NR_inactive_file + NR_active_file
123 * Which can be further simplified to:
125 * (R - E) <= NR_active_file + NR_inactive_anon + NR_active_anon
127 * (R - E) <= NR_active_anon + NR_inactive_file + NR_active_file
129 * Put into words, the refault distance (out-of-cache) can be seen as
130 * a deficit in inactive list space (in-cache). If the inactive list
131 * had (R - E) more page slots, the page would not have been evicted
132 * in between accesses, but activated instead. And on a full system,
133 * the only thing eating into inactive list space is active pages.
136 * Refaulting inactive pages
138 * All that is known about the active list is that the pages have been
139 * accessed more than once in the past. This means that at any given
140 * time there is actually a good chance that pages on the active list
141 * are no longer in active use.
143 * So when a refault distance of (R - E) is observed and there are at
144 * least (R - E) pages in the userspace workingset, the refaulting page
145 * is activated optimistically in the hope that (R - E) pages are actually
146 * used less frequently than the refaulting page - or even not used at
149 * That means if inactive cache is refaulting with a suitable refault
150 * distance, we assume the cache workingset is transitioning and put
151 * pressure on the current workingset.
153 * If this is wrong and demotion kicks in, the pages which are truly
154 * used more frequently will be reactivated while the less frequently
155 * used once will be evicted from memory.
157 * But if this is right, the stale pages will be pushed out of memory
158 * and the used pages get to stay in cache.
160 * Refaulting active pages
162 * If on the other hand the refaulting pages have recently been
163 * deactivated, it means that the active list is no longer protecting
164 * actively used cache from reclaim. The cache is NOT transitioning to
165 * a different workingset; the existing workingset is thrashing in the
166 * space allocated to the page cache.
171 * For each node's LRU lists, a counter for inactive evictions and
172 * activations is maintained (node->nonresident_age).
174 * On eviction, a snapshot of this counter (along with some bits to
175 * identify the node) is stored in the now empty page cache
176 * slot of the evicted page. This is called a shadow entry.
178 * On cache misses for which there are shadow entries, an eligible
179 * refault distance will immediately activate the refaulting page.
182 #define WORKINGSET_SHIFT 1
183 #define EVICTION_SHIFT ((BITS_PER_LONG - BITS_PER_XA_VALUE) + \
184 WORKINGSET_SHIFT + NODES_SHIFT + \
186 #define EVICTION_MASK (~0UL >> EVICTION_SHIFT)
189 * Eviction timestamps need to be able to cover the full range of
190 * actionable refaults. However, bits are tight in the xarray
191 * entry, and after storing the identifier for the lruvec there might
192 * not be enough left to represent every single actionable refault. In
193 * that case, we have to sacrifice granularity for distance, and group
194 * evictions into coarser buckets by shaving off lower timestamp bits.
196 static unsigned int bucket_order __read_mostly;
198 static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction,
201 eviction &= EVICTION_MASK;
202 eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid;
203 eviction = (eviction << NODES_SHIFT) | pgdat->node_id;
204 eviction = (eviction << WORKINGSET_SHIFT) | workingset;
206 return xa_mk_value(eviction);
209 static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat,
210 unsigned long *evictionp, bool *workingsetp)
212 unsigned long entry = xa_to_value(shadow);
216 workingset = entry & ((1UL << WORKINGSET_SHIFT) - 1);
217 entry >>= WORKINGSET_SHIFT;
218 nid = entry & ((1UL << NODES_SHIFT) - 1);
219 entry >>= NODES_SHIFT;
220 memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1);
221 entry >>= MEM_CGROUP_ID_SHIFT;
224 *pgdat = NODE_DATA(nid);
226 *workingsetp = workingset;
229 #ifdef CONFIG_LRU_GEN
231 static void *lru_gen_eviction(struct folio *folio)
235 unsigned long min_seq;
236 struct lruvec *lruvec;
237 struct lru_gen_folio *lrugen;
238 int type = folio_is_file_lru(folio);
239 int delta = folio_nr_pages(folio);
240 int refs = folio_lru_refs(folio);
241 int tier = lru_tier_from_refs(refs);
242 struct mem_cgroup *memcg = folio_memcg(folio);
243 struct pglist_data *pgdat = folio_pgdat(folio);
245 BUILD_BUG_ON(LRU_GEN_WIDTH + LRU_REFS_WIDTH > BITS_PER_LONG - EVICTION_SHIFT);
247 lruvec = mem_cgroup_lruvec(memcg, pgdat);
248 lrugen = &lruvec->lrugen;
249 min_seq = READ_ONCE(lrugen->min_seq[type]);
250 token = (min_seq << LRU_REFS_WIDTH) | max(refs - 1, 0);
252 hist = lru_hist_from_seq(min_seq);
253 atomic_long_add(delta, &lrugen->evicted[hist][type][tier]);
255 return pack_shadow(mem_cgroup_id(memcg), pgdat, token, refs);
258 static void lru_gen_refault(struct folio *folio, void *shadow)
260 int hist, tier, refs;
264 unsigned long min_seq;
265 struct lruvec *lruvec;
266 struct lru_gen_folio *lrugen;
267 struct mem_cgroup *memcg;
268 struct pglist_data *pgdat;
269 int type = folio_is_file_lru(folio);
270 int delta = folio_nr_pages(folio);
272 unpack_shadow(shadow, &memcg_id, &pgdat, &token, &workingset);
274 if (pgdat != folio_pgdat(folio))
279 memcg = folio_memcg_rcu(folio);
280 if (memcg_id != mem_cgroup_id(memcg))
283 lruvec = mem_cgroup_lruvec(memcg, pgdat);
284 lrugen = &lruvec->lrugen;
286 min_seq = READ_ONCE(lrugen->min_seq[type]);
287 if ((token >> LRU_REFS_WIDTH) != (min_seq & (EVICTION_MASK >> LRU_REFS_WIDTH)))
290 hist = lru_hist_from_seq(min_seq);
291 /* see the comment in folio_lru_refs() */
292 refs = (token & (BIT(LRU_REFS_WIDTH) - 1)) + workingset;
293 tier = lru_tier_from_refs(refs);
295 atomic_long_add(delta, &lrugen->refaulted[hist][type][tier]);
296 mod_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + type, delta);
299 * Count the following two cases as stalls:
300 * 1. For pages accessed through page tables, hotter pages pushed out
301 * hot pages which refaulted immediately.
302 * 2. For pages accessed multiple times through file descriptors,
303 * numbers of accesses might have been out of the range.
305 if (lru_gen_in_fault() || refs == BIT(LRU_REFS_WIDTH)) {
306 folio_set_workingset(folio);
307 mod_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + type, delta);
313 #else /* !CONFIG_LRU_GEN */
315 static void *lru_gen_eviction(struct folio *folio)
320 static void lru_gen_refault(struct folio *folio, void *shadow)
324 #endif /* CONFIG_LRU_GEN */
327 * workingset_age_nonresident - age non-resident entries as LRU ages
328 * @lruvec: the lruvec that was aged
329 * @nr_pages: the number of pages to count
331 * As in-memory pages are aged, non-resident pages need to be aged as
332 * well, in order for the refault distances later on to be comparable
333 * to the in-memory dimensions. This function allows reclaim and LRU
334 * operations to drive the non-resident aging along in parallel.
336 void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages)
339 * Reclaiming a cgroup means reclaiming all its children in a
340 * round-robin fashion. That means that each cgroup has an LRU
341 * order that is composed of the LRU orders of its child
342 * cgroups; and every page has an LRU position not just in the
343 * cgroup that owns it, but in all of that group's ancestors.
345 * So when the physical inactive list of a leaf cgroup ages,
346 * the virtual inactive lists of all its parents, including
347 * the root cgroup's, age as well.
350 atomic_long_add(nr_pages, &lruvec->nonresident_age);
351 } while ((lruvec = parent_lruvec(lruvec)));
355 * workingset_eviction - note the eviction of a folio from memory
356 * @target_memcg: the cgroup that is causing the reclaim
357 * @folio: the folio being evicted
359 * Return: a shadow entry to be stored in @folio->mapping->i_pages in place
360 * of the evicted @folio so that a later refault can be detected.
362 void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg)
364 struct pglist_data *pgdat = folio_pgdat(folio);
365 unsigned long eviction;
366 struct lruvec *lruvec;
369 /* Folio is fully exclusive and pins folio's memory cgroup pointer */
370 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
371 VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
372 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
374 if (lru_gen_enabled())
375 return lru_gen_eviction(folio);
377 lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
378 /* XXX: target_memcg can be NULL, go through lruvec */
379 memcgid = mem_cgroup_id(lruvec_memcg(lruvec));
380 eviction = atomic_long_read(&lruvec->nonresident_age);
381 eviction >>= bucket_order;
382 workingset_age_nonresident(lruvec, folio_nr_pages(folio));
383 return pack_shadow(memcgid, pgdat, eviction,
384 folio_test_workingset(folio));
388 * workingset_refault - Evaluate the refault of a previously evicted folio.
389 * @folio: The freshly allocated replacement folio.
390 * @shadow: Shadow entry of the evicted folio.
392 * Calculates and evaluates the refault distance of the previously
393 * evicted folio in the context of the node and the memcg whose memory
394 * pressure caused the eviction.
396 void workingset_refault(struct folio *folio, void *shadow)
398 bool file = folio_is_file_lru(folio);
399 struct mem_cgroup *eviction_memcg;
400 struct lruvec *eviction_lruvec;
401 unsigned long refault_distance;
402 unsigned long workingset_size;
403 struct pglist_data *pgdat;
404 struct mem_cgroup *memcg;
405 unsigned long eviction;
406 struct lruvec *lruvec;
407 unsigned long refault;
412 if (lru_gen_enabled()) {
413 lru_gen_refault(folio, shadow);
417 unpack_shadow(shadow, &memcgid, &pgdat, &eviction, &workingset);
418 eviction <<= bucket_order;
420 /* Flush stats (and potentially sleep) before holding RCU read lock */
421 mem_cgroup_flush_stats_ratelimited();
425 * Look up the memcg associated with the stored ID. It might
426 * have been deleted since the folio's eviction.
428 * Note that in rare events the ID could have been recycled
429 * for a new cgroup that refaults a shared folio. This is
430 * impossible to tell from the available data. However, this
431 * should be a rare and limited disturbance, and activations
432 * are always speculative anyway. Ultimately, it's the aging
433 * algorithm's job to shake out the minimum access frequency
434 * for the active cache.
436 * XXX: On !CONFIG_MEMCG, this will always return NULL; it
437 * would be better if the root_mem_cgroup existed in all
438 * configurations instead.
440 eviction_memcg = mem_cgroup_from_id(memcgid);
441 if (!mem_cgroup_disabled() && !eviction_memcg)
443 eviction_lruvec = mem_cgroup_lruvec(eviction_memcg, pgdat);
444 refault = atomic_long_read(&eviction_lruvec->nonresident_age);
447 * Calculate the refault distance
449 * The unsigned subtraction here gives an accurate distance
450 * across nonresident_age overflows in most cases. There is a
451 * special case: usually, shadow entries have a short lifetime
452 * and are either refaulted or reclaimed along with the inode
453 * before they get too old. But it is not impossible for the
454 * nonresident_age to lap a shadow entry in the field, which
455 * can then result in a false small refault distance, leading
456 * to a false activation should this old entry actually
457 * refault again. However, earlier kernels used to deactivate
458 * unconditionally with *every* reclaim invocation for the
459 * longest time, so the occasional inappropriate activation
460 * leading to pressure on the active list is not a problem.
462 refault_distance = (refault - eviction) & EVICTION_MASK;
465 * The activation decision for this folio is made at the level
466 * where the eviction occurred, as that is where the LRU order
467 * during folio reclaim is being determined.
469 * However, the cgroup that will own the folio is the one that
470 * is actually experiencing the refault event.
472 nr = folio_nr_pages(folio);
473 memcg = folio_memcg(folio);
474 pgdat = folio_pgdat(folio);
475 lruvec = mem_cgroup_lruvec(memcg, pgdat);
477 mod_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file, nr);
479 * Compare the distance to the existing workingset size. We
480 * don't activate pages that couldn't stay resident even if
481 * all the memory was available to the workingset. Whether
482 * workingset competition needs to consider anon or not depends
483 * on having free swap space.
485 workingset_size = lruvec_page_state(eviction_lruvec, NR_ACTIVE_FILE);
487 workingset_size += lruvec_page_state(eviction_lruvec,
490 if (mem_cgroup_get_nr_swap_pages(eviction_memcg) > 0) {
491 workingset_size += lruvec_page_state(eviction_lruvec,
494 workingset_size += lruvec_page_state(eviction_lruvec,
498 if (refault_distance > workingset_size)
501 folio_set_active(folio);
502 workingset_age_nonresident(lruvec, nr);
503 mod_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + file, nr);
505 /* Folio was active prior to eviction */
507 folio_set_workingset(folio);
509 * XXX: Move to folio_add_lru() when it supports new vs
512 lru_note_cost_refault(folio);
513 mod_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + file, nr);
520 * workingset_activation - note a page activation
521 * @folio: Folio that is being activated.
523 void workingset_activation(struct folio *folio)
525 struct mem_cgroup *memcg;
529 * Filter non-memcg pages here, e.g. unmap can call
530 * mark_page_accessed() on VDSO pages.
532 * XXX: See workingset_refault() - this should return
533 * root_mem_cgroup even for !CONFIG_MEMCG.
535 memcg = folio_memcg_rcu(folio);
536 if (!mem_cgroup_disabled() && !memcg)
538 workingset_age_nonresident(folio_lruvec(folio), folio_nr_pages(folio));
544 * Shadow entries reflect the share of the working set that does not
545 * fit into memory, so their number depends on the access pattern of
546 * the workload. In most cases, they will refault or get reclaimed
547 * along with the inode, but a (malicious) workload that streams
548 * through files with a total size several times that of available
549 * memory, while preventing the inodes from being reclaimed, can
550 * create excessive amounts of shadow nodes. To keep a lid on this,
551 * track shadow nodes and reclaim them when they grow way past the
552 * point where they would still be useful.
555 struct list_lru shadow_nodes;
557 void workingset_update_node(struct xa_node *node)
559 struct address_space *mapping;
562 * Track non-empty nodes that contain only shadow entries;
563 * unlink those that contain pages or are being freed.
565 * Avoid acquiring the list_lru lock when the nodes are
566 * already where they should be. The list_empty() test is safe
567 * as node->private_list is protected by the i_pages lock.
569 mapping = container_of(node->array, struct address_space, i_pages);
570 lockdep_assert_held(&mapping->i_pages.xa_lock);
572 if (node->count && node->count == node->nr_values) {
573 if (list_empty(&node->private_list)) {
574 list_lru_add(&shadow_nodes, &node->private_list);
575 __inc_lruvec_kmem_state(node, WORKINGSET_NODES);
578 if (!list_empty(&node->private_list)) {
579 list_lru_del(&shadow_nodes, &node->private_list);
580 __dec_lruvec_kmem_state(node, WORKINGSET_NODES);
585 static unsigned long count_shadow_nodes(struct shrinker *shrinker,
586 struct shrink_control *sc)
588 unsigned long max_nodes;
592 nodes = list_lru_shrink_count(&shadow_nodes, sc);
597 * Approximate a reasonable limit for the nodes
598 * containing shadow entries. We don't need to keep more
599 * shadow entries than possible pages on the active list,
600 * since refault distances bigger than that are dismissed.
602 * The size of the active list converges toward 100% of
603 * overall page cache as memory grows, with only a tiny
604 * inactive list. Assume the total cache size for that.
606 * Nodes might be sparsely populated, with only one shadow
607 * entry in the extreme case. Obviously, we cannot keep one
608 * node for every eligible shadow entry, so compromise on a
609 * worst-case density of 1/8th. Below that, not all eligible
610 * refaults can be detected anymore.
612 * On 64-bit with 7 xa_nodes per page and 64 slots
613 * each, this will reclaim shadow entries when they consume
614 * ~1.8% of available memory:
616 * PAGE_SIZE / xa_nodes / node_entries * 8 / PAGE_SIZE
620 struct lruvec *lruvec;
623 lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid));
624 for (pages = 0, i = 0; i < NR_LRU_LISTS; i++)
625 pages += lruvec_page_state_local(lruvec,
627 pages += lruvec_page_state_local(
628 lruvec, NR_SLAB_RECLAIMABLE_B) >> PAGE_SHIFT;
629 pages += lruvec_page_state_local(
630 lruvec, NR_SLAB_UNRECLAIMABLE_B) >> PAGE_SHIFT;
633 pages = node_present_pages(sc->nid);
635 max_nodes = pages >> (XA_CHUNK_SHIFT - 3);
637 if (nodes <= max_nodes)
639 return nodes - max_nodes;
642 static enum lru_status shadow_lru_isolate(struct list_head *item,
643 struct list_lru_one *lru,
644 spinlock_t *lru_lock,
645 void *arg) __must_hold(lru_lock)
647 struct xa_node *node = container_of(item, struct xa_node, private_list);
648 struct address_space *mapping;
652 * Page cache insertions and deletions synchronously maintain
653 * the shadow node LRU under the i_pages lock and the
654 * lru_lock. Because the page cache tree is emptied before
655 * the inode can be destroyed, holding the lru_lock pins any
656 * address_space that has nodes on the LRU.
658 * We can then safely transition to the i_pages lock to
659 * pin only the address_space of the particular node we want
660 * to reclaim, take the node off-LRU, and drop the lru_lock.
663 mapping = container_of(node->array, struct address_space, i_pages);
665 /* Coming from the list, invert the lock order */
666 if (!xa_trylock(&mapping->i_pages)) {
667 spin_unlock_irq(lru_lock);
672 /* For page cache we need to hold i_lock */
673 if (mapping->host != NULL) {
674 if (!spin_trylock(&mapping->host->i_lock)) {
675 xa_unlock(&mapping->i_pages);
676 spin_unlock_irq(lru_lock);
682 list_lru_isolate(lru, item);
683 __dec_lruvec_kmem_state(node, WORKINGSET_NODES);
685 spin_unlock(lru_lock);
688 * The nodes should only contain one or more shadow entries,
689 * no pages, so we expect to be able to remove them all and
690 * delete and free the empty node afterwards.
692 if (WARN_ON_ONCE(!node->nr_values))
694 if (WARN_ON_ONCE(node->count != node->nr_values))
696 xa_delete_node(node, workingset_update_node);
697 __inc_lruvec_kmem_state(node, WORKINGSET_NODERECLAIM);
700 xa_unlock_irq(&mapping->i_pages);
701 if (mapping->host != NULL) {
702 if (mapping_shrinkable(mapping))
703 inode_add_lru(mapping->host);
704 spin_unlock(&mapping->host->i_lock);
706 ret = LRU_REMOVED_RETRY;
709 spin_lock_irq(lru_lock);
713 static unsigned long scan_shadow_nodes(struct shrinker *shrinker,
714 struct shrink_control *sc)
716 /* list_lru lock nests inside the IRQ-safe i_pages lock */
717 return list_lru_shrink_walk_irq(&shadow_nodes, sc, shadow_lru_isolate,
721 static struct shrinker workingset_shadow_shrinker = {
722 .count_objects = count_shadow_nodes,
723 .scan_objects = scan_shadow_nodes,
724 .seeks = 0, /* ->count reports only fully expendable nodes */
725 .flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE,
729 * Our list_lru->lock is IRQ-safe as it nests inside the IRQ-safe
732 static struct lock_class_key shadow_nodes_key;
734 static int __init workingset_init(void)
736 unsigned int timestamp_bits;
737 unsigned int max_order;
740 BUILD_BUG_ON(BITS_PER_LONG < EVICTION_SHIFT);
742 * Calculate the eviction bucket size to cover the longest
743 * actionable refault distance, which is currently half of
744 * memory (totalram_pages/2). However, memory hotplug may add
745 * some more pages at runtime, so keep working with up to
746 * double the initial memory by using totalram_pages as-is.
748 timestamp_bits = BITS_PER_LONG - EVICTION_SHIFT;
749 max_order = fls_long(totalram_pages() - 1);
750 if (max_order > timestamp_bits)
751 bucket_order = max_order - timestamp_bits;
752 pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
753 timestamp_bits, max_order, bucket_order);
755 ret = prealloc_shrinker(&workingset_shadow_shrinker, "mm-shadow");
758 ret = __list_lru_init(&shadow_nodes, true, &shadow_nodes_key,
759 &workingset_shadow_shrinker);
762 register_shrinker_prepared(&workingset_shadow_shrinker);
765 free_prealloced_shrinker(&workingset_shadow_shrinker);
769 module_init(workingset_init);