1 // SPDX-License-Identifier: GPL-2.0-only
5 * This code provides lightweight version of KSM.
7 * Copyright (C) 2020 Samsung Electronics Co., Ltd.
8 * Author: Sung-hun Kim (sfoon.kim@samsung.com)
12 * Memory merging support.
14 * This code enables dynamic sharing of identical pages found in different
15 * memory areas, even if they are not shared by fork()
17 * Copyright (C) 2008-2009 Red Hat, Inc.
25 #include <linux/errno.h>
28 #include <linux/mman.h>
29 #include <linux/sched.h>
30 #include <linux/sched/mm.h>
31 #include <linux/sched/coredump.h>
32 #include <linux/rwsem.h>
33 #include <linux/pagemap.h>
34 #include <linux/rmap.h>
35 #include <linux/spinlock.h>
36 #include <linux/xxhash.h>
37 #include <linux/delay.h>
38 #include <linux/kthread.h>
39 #include <linux/wait.h>
40 #include <linux/slab.h>
41 #include <linux/rbtree.h>
42 #include <linux/memory.h>
43 #include <linux/mmu_notifier.h>
44 #include <linux/swap.h>
45 #include <linux/ksm.h>
46 #include <linux/hashtable.h>
47 #include <linux/freezer.h>
48 #include <linux/oom.h>
49 #include <linux/numa.h>
51 #include <asm/tlbflush.h>
56 #define DO_NUMA(x) do { (x); } while (0)
59 #define DO_NUMA(x) do { } while (0)
62 #define ksm_debug(fmt, ...) \
63 printk(KERN_DEBUG "[ksm:%s:%d] " fmt "\n", __func__, __LINE__, ##__VA_ARGS__)
64 #define ksm_err(fmt, ...) \
65 printk(KERN_ERR "[ksm:%s:%d] " fmt "\n", __func__, __LINE__, ##__VA_ARGS__)
70 * A few notes about the KSM scanning process,
71 * to make it easier to understand the data structures below:
73 * In order to reduce excessive scanning, KSM sorts the memory pages by their
74 * contents into a data structure that holds pointers to the pages' locations.
76 * Since the contents of the pages may change at any moment, KSM cannot just
77 * insert the pages into a normal sorted tree and expect it to find anything.
78 * Therefore KSM uses two data structures - the stable and the unstable tree.
80 * The stable tree holds pointers to all the merged pages (ksm pages), sorted
81 * by their contents. Because each such page is write-protected, searching on
82 * this tree is fully assured to be working (except when pages are unmapped),
83 * and therefore this tree is called the stable tree.
85 * The stable tree node includes information required for reverse
86 * mapping from a KSM page to virtual addresses that map this page.
88 * In order to avoid large latencies of the rmap walks on KSM pages,
89 * KSM maintains two types of nodes in the stable tree:
91 * * the regular nodes that keep the reverse mapping structures in a
93 * * the "chains" that link nodes ("dups") that represent the same
94 * write protected memory content, but each "dup" corresponds to a
95 * different KSM page copy of that content
97 * Internally, the regular nodes, "dups" and "chains" are represented
98 * using the same :c:type:`struct stable_node` structure.
100 * In addition to the stable tree, KSM uses a second data structure called the
101 * unstable tree: this tree holds pointers to pages which have been found to
102 * be "unchanged for a period of time". The unstable tree sorts these pages
103 * by their contents, but since they are not write-protected, KSM cannot rely
104 * upon the unstable tree to work correctly - the unstable tree is liable to
105 * be corrupted as its contents are modified, and so it is called unstable.
107 * KSM solves this problem by several techniques:
109 * 1) The unstable tree is flushed every time KSM completes scanning all
110 * memory areas, and then the tree is rebuilt again from the beginning.
111 * 2) KSM will only insert into the unstable tree, pages whose hash value
112 * has not changed since the previous scan of all memory areas.
113 * 3) The unstable tree is a RedBlack Tree - so its balancing is based on the
114 * colors of the nodes and not on their contents, assuring that even when
115 * the tree gets "corrupted" it won't get out of balance, so scanning time
116 * remains the same (also, searching and inserting nodes in an rbtree uses
117 * the same algorithm, so we have no overhead when we flush and rebuild).
118 * 4) KSM never flushes the stable tree, which means that even if it were to
119 * take 10 attempts to find a page in the unstable tree, once it is found,
120 * it is secured in the stable tree. (When we scan a new page, we first
121 * compare it against the stable tree, and then against the unstable tree.)
123 * If the merge_across_nodes tunable is unset, then KSM maintains multiple
124 * stable trees and multiple unstable trees: one of each for each NUMA node.
128 * A few notes about lightweight KSM.
130 * A smart crawler leverages semantics of tasks in Tizen.
131 * When the application goes to background, it is attached to freezer
132 * task group. LKSM crawler hooks this event and adds a "frozen task"
133 * to candidate list for scanning.
137 /* merge window size */
141 * struct mm_slot - ksm information per mm that is being scanned
142 * @link: link to the mm_slots hash list
143 * @mm_list: link into the mm_slots list, rooted in ksm_mm_head
144 * @rmap_list: head for this mm_slot's singly-linked list of rmap_items
145 * @mm: the mm that this information is valid for
147 * extension - added for LKSM
148 * @state: state of mm_slot (frozen, listed, scanned, newcomer)
149 * @merge_idx: merge window index to store the number of currently merged pages
150 * @nr_merged_win: merge window to keep recent three numbers
151 * @nr_merged: sum of nr_merged_win, used to maintain vips_list (ordered list)
152 * @ordered_list: list ordered by nr_merged
153 * @scanning_size: number of anonymous pages in mm_struct
154 * @fault_cnt: last read count of page fault (minor + major)
155 * @elapsed: elapsed scanning time
156 * @nr_scans: number of scanning pages (can be different with scanning_size)
159 struct hlist_node link;
160 struct list_head mm_list;
161 struct list_head scan_list;
162 struct rmap_item *rmap_list;
163 struct mm_struct *mm;
168 int nr_merged_win[MERGE_WIN];
170 struct rb_node ordered_list;
172 unsigned long scanning_size; /* in number of pages */
173 unsigned long fault_cnt;
174 unsigned long elapsed;
177 #ifdef CONFIG_LKSM_FILTER
178 /* used for releasing lksm_region */
179 struct list_head ref_list;
186 * scanning mode of LKSM:
187 * LKSM_SCAN_PARTIAL: perform deduplication on subset of processes
188 * LKSM_SCAN_FULL: perform deduplication on full set of processes
190 enum lksm_scan_mode {
197 * struct ksm_scan - cursor for scanning
198 * @address: the next address inside that to be scanned
199 * @rmap_list: link to the next rmap to be scanned in the rmap_list
200 * @mm_slot: the current mm_slot we are scanning
201 * @remove_mm_list: temporary list for batching flush of removed slots
202 * @nr_scannable: the number of remaining unscanned scannable slots
203 * @nr_frozen: the number of remaining unscanned frozen slots
204 * @scan_round: scanning round (partial + full)
205 * @nr_full_scan: the number of full scanning
206 * @scan_mode: coverage of current scanning
208 * There is only the one ksm_scan instance of this cursor structure.
211 unsigned long address;
212 struct rmap_item **rmap_list;
214 struct mm_slot *mm_slot;
215 struct list_head remove_mm_list;
217 /* statistics of scanning targets */
218 atomic_t nr_scannable;
221 unsigned long scan_round;
222 unsigned long nr_full_scan;
224 enum lksm_scan_mode scan_mode;
226 #ifdef CONFIG_LKSM_FILTER
227 struct lksm_region *region;
228 unsigned long vma_base_addr;
229 struct vm_area_struct *cached_vma;
230 #endif /* CONFIG_LKSM_FILTER */
234 * struct stable_node - node of the stable rbtree
235 * @node: rb node of this ksm page in the stable tree
236 * @head: (overlaying parent) &migrate_nodes indicates temporarily on that list
237 * @hlist_dup: linked into the stable_node->hlist with a stable_node chain
238 * @list: linked into migrate_nodes, pending placement in the proper node tree
239 * @hlist: hlist head of rmap_items using this ksm page
240 * @kpfn: page frame number of this ksm page (perhaps temporarily on wrong nid)
241 * @chain_prune_time: time of the last full garbage collection
242 * @rmap_hlist_len: number of rmap_item entries in hlist or STABLE_NODE_CHAIN
243 * @nid: NUMA node id of stable tree in which linked (may not match kpfn)
247 struct rb_node node; /* when node of stable tree */
248 struct { /* when listed for migration */
249 struct list_head *head;
251 struct hlist_node hlist_dup;
252 struct list_head list;
256 struct hlist_head hlist;
259 unsigned long chain_prune_time;
262 * STABLE_NODE_CHAIN can be any negative number in
263 * rmap_hlist_len negative range, but better not -1 to be able
264 * to reliably detect underflows.
266 #define STABLE_NODE_CHAIN -1024
274 * struct rmap_item - reverse mapping item for virtual addresses
275 * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list
276 * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree
277 * @nid: NUMA node id of unstable tree in which linked (may not match page)
278 * @region: pointer to the mapped region (LKSM feature)
279 * @mm: the memory structure this rmap_item is pointing into
280 * @address: the virtual address this rmap_item tracks (+ flags in low bits)
281 * @oldchecksum: previous checksum of the page at that virtual address
282 * @node: rb node of this rmap_item in the unstable tree
283 * @head: pointer to stable_node heading this list in the stable tree
284 * @base_addr: used for calculating offset of the address (LKSM feature)
285 * @hlist: link into hlist of rmap_items hanging off that stable_node
288 struct rmap_item *rmap_list;
290 struct anon_vma *anon_vma; /* when stable */
292 int nid; /* when node of unstable tree */
294 #ifdef CONFIG_LKSM_FILTER
295 struct lksm_region *region; /* when unstable */
298 struct mm_struct *mm;
299 unsigned long address; /* + low bits used for flags below */
300 unsigned int oldchecksum; /* when unstable (LSB is a frozen bit) */
302 struct rb_node node; /* when node of unstable tree */
303 struct { /* when listed from stable tree */
304 #ifdef CONFIG_LKSM_FILTER
306 struct stable_node *head;
307 unsigned long base_addr; /* temporal storage for merge */
310 struct stable_node *head;
311 #endif /* CONFIG_LKSM_FILTER */
312 struct hlist_node hlist;
317 #define SEQNR_MASK 0x0ff /* low bits of unstable tree scan_round */
318 #define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */
319 #define STABLE_FLAG 0x200 /* is listed from the stable tree */
320 #define KSM_FLAG_MASK (SEQNR_MASK|UNSTABLE_FLAG|STABLE_FLAG)
321 /* to mask all the flags */
323 /* The stable and unstable tree heads */
324 static struct rb_root one_stable_tree[1] = { RB_ROOT };
325 static struct rb_root one_unstable_tree[1] = { RB_ROOT };
326 static struct rb_root *root_stable_tree = one_stable_tree;
327 static struct rb_root *root_unstable_tree = one_unstable_tree;
329 #define LKSM_NODE_ID 0
331 /* Recently migrated nodes of stable tree, pending proper placement */
332 static LIST_HEAD(migrate_nodes);
333 #define STABLE_NODE_DUP_HEAD ((struct list_head *)&migrate_nodes.prev)
335 /* list for VIP processes */
336 static struct rb_root vips_list = RB_ROOT;
337 static int lksm_max_vips = 20;
339 #define MM_SLOTS_HASH_BITS 10
340 static DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
341 static DEFINE_HASHTABLE(task_slots_hash, MM_SLOTS_HASH_BITS);
344 * two list heads in LKSM:
345 * - ksm_mm_head: a head for traversing whole list of processes,
346 not used for scanning itself
347 * - ksm_scan_head: a head for a list of currently scanning processes
349 static struct mm_slot ksm_mm_head = {
350 .mm_list = LIST_HEAD_INIT(ksm_mm_head.mm_list),
353 static struct mm_slot ksm_scan_head = {
354 .scan_list = LIST_HEAD_INIT(ksm_scan_head.scan_list),
357 static struct ksm_scan ksm_scan = {
358 .mm_slot = &ksm_scan_head,
361 static struct kmem_cache *rmap_item_cache;
362 static struct kmem_cache *stable_node_cache;
363 static struct kmem_cache *mm_slot_cache;
364 static struct kmem_cache *task_slot_cache;
366 /* The number of nodes in the stable tree */
367 static unsigned long ksm_pages_shared;
369 /* The number of page slots additionally sharing those nodes */
370 static unsigned long ksm_pages_sharing;
372 /* The number of nodes in the unstable tree */
373 static unsigned long ksm_pages_unshared;
375 /* The number of rmap_items in use: to calculate pages_volatile */
376 static unsigned long ksm_rmap_items;
378 /* The number of stable_node chains */
379 static unsigned long ksm_stable_node_chains;
381 /* The number of stable_node dups linked to the stable_node chains */
382 static unsigned long ksm_stable_node_dups;
384 /* Delay in pruning stale stable_node_dups in the stable_node_chains */
385 static int ksm_stable_node_chains_prune_millisecs = 2000;
387 /* Maximum number of page slots sharing a stable node */
388 static int ksm_max_page_sharing = 256;
390 /* Number of pages ksmd should scan in one batch */
391 static unsigned int ksm_thread_pages_to_scan = 100;
393 /* Milliseconds ksmd should sleep between batches */
394 static unsigned int ksm_thread_sleep_millisecs = 20;
396 /* Checksum of an empty (zeroed) page */
397 static unsigned int zero_checksum __read_mostly;
399 /* Processes tracked by KSM thread */
400 static unsigned int ksm_nr_added_process;
402 /* Whether to merge empty (zeroed) pages with actual zero pages */
403 static bool ksm_use_zero_pages __read_mostly;
405 /* An indicator for KSM scanning */
406 static atomic_t ksm_one_shot_scanning;
408 /* Boosting when the scanner performs partial scan */
409 static unsigned int lksm_boosted_pages_to_scan = 100;
410 static unsigned int lksm_default_pages_to_scan = 100;
413 /* Zeroed when merging across nodes is not allowed */
414 static unsigned int ksm_merge_across_nodes = 1;
415 static int ksm_nr_node_ids = 1;
417 #define ksm_merge_across_nodes 1U
418 #define ksm_nr_node_ids 1
422 * Default policy for KSM_RUN_ONESHOT:
423 * KSM performs both scannings only when the user requests it.
424 * If scanning is ended, both crawler and scanner threads are blocked until
425 * the next request is coming.
427 #define KSM_RUN_STOP 0
428 #define KSM_RUN_MERGE 1
429 #define KSM_RUN_UNMERGE 2
430 #define KSM_RUN_OFFLINE 4
431 #define KSM_RUN_ONESHOT 8
433 static unsigned long ksm_run = KSM_RUN_STOP;
434 static atomic_t ksm_state; /* 0: in crawling 1: in scanning */
436 #define lksm_check_scan_state(ksm_state) (atomic_read(&ksm_state) == 1)
437 #define lksm_set_scan_state(ksm_state) (atomic_set(&ksm_state, 1))
438 #define lksm_clear_scan_state(ksm_state) (atomic_set(&ksm_state, 0))
441 struct task_struct *task;
443 unsigned long inserted;
444 struct list_head list;
445 struct hlist_node hlist;
450 * When a process stops running on forground (e.g., going to background),
451 * the system daemon (e.g., resourced) puts it to cgroup_freezer.
452 * Once a process joins into freezer cgroup, the system kernel does not count
453 * it as a runnable process, and thus it cannot be scheduled on CPU.
454 * So, I regard processes in freezer cgroup as a frozen state and that can be
455 * good candidates of memory deduplication.
457 * LKSM provides a hook to catch the moment that the process is being frozen.
458 * With the hook, ksm crawler can get candidate list for memory deduplication.
459 * (see kernel/cgroup_freezer.c)
461 #define FROZEN_BIT 0x01
462 #define LISTED_BIT 0x02
464 #define lksm_test_rmap_frozen(rmap_item) (rmap_item->oldchecksum & FROZEN_BIT)
465 #define lksm_set_rmap_frozen(rmap_item) (rmap_item->oldchecksum |= FROZEN_BIT)
466 #define lksm_clear_rmap_frozen(rmap_item) (rmap_item->oldchecksum &= ~FROZEN_BIT)
467 #define lksm_clear_checksum_frozen(checksum) (checksum &= ~FROZEN_BIT)
469 #define KSM_MM_FROZEN 0x01
470 #define KSM_MM_LISTED 0x02
471 #define KSM_MM_NEWCOMER 0x04
472 #define KSM_MM_SCANNED 0x08
473 #ifdef CONFIG_LKSM_FILTER
474 #define KSM_MM_PREPARED 0x10
477 #define lksm_test_mm_state(mm_slot, bit) (mm_slot->state & bit)
478 #define lksm_set_mm_state(mm_slot, bit) (mm_slot->state |= bit)
479 #define lksm_clear_mm_state(mm_slot, bit) (mm_slot->state &= ~bit)
481 #ifdef CONFIG_LKSM_FILTER
482 #define LKSM_REGION_HASH_BITS 10
483 static DEFINE_HASHTABLE(lksm_region_hash, LKSM_REGION_HASH_BITS);
484 spinlock_t lksm_region_lock;
487 * LKSM uses the filter when the region is scanned more than
488 * LKSM_REGION_MATURE round
490 #define LKSM_REGION_MATURE 5
491 #define lksm_region_mature(round, region) \
492 ((round - region->scan_round) > LKSM_REGION_MATURE)
494 enum lksm_region_type {
497 LKSM_REGION_FILE1, /* file mapped region: data section */
498 LKSM_REGION_FILE2, /* file mapped region: bss section */
499 LKSM_REGION_CONFLICT, /* conflicted regions: do not filtering */
503 static const char * const region_type_str[] = {
512 /* sharing statistics for each region type */
513 static int region_share[LKSM_REGION_UNKNOWN + 1];
516 * lksm_region: A region represents a physical mapped area.
517 * Each process can have its own instance of a region, namely vma.
518 * Regions for not-a-file-mapped areas like heap and stack just have
519 * abstract representations as symbols.
521 * LKSM leverages the region for offset-based filtering.
522 * Each region has a filter which records offsets of addresses of
523 * shared pages in the region.
524 * If once a region is matured, LKSM uses the filter to skip scanning of
527 * @type: type of region, refer above enumeration
528 * @len: length of filter (in the number of 64-bit variables)
529 * @ino: inode number if the region is mapped to file
530 * @merge_cnt: the number of merged pages in the region
531 * @filter_cnt: the number of set bits in filter
532 * @scan_round: the birth scan round of this region
533 * @conflict: the count of size changed, clue for conflict
534 * @refcount: if it reaches zero, the region will be freed
535 * @hnode: hash node for finding region by ino
536 * @next: data region can have a next (bss) region
537 * @prev: reverse pointer to data region
539 * A few notes about bitmap filter variable:
540 * LKSM uses bitmap filter for skipping scan of unsharable pages.
541 * If a region is smaller than 256KB (<= 64 pages),
542 * it can be covered by a bitmap stored in a 64-bit variable.
543 * LKSM only allocates a bitmap array as a filter when the region is
544 * larger than 256KB, otherwise it uses a 64-bit variable as a filter.
546 * @filter: when the region is bigger than 64 pages
547 * @single_filter: when the region is smaller than or equal to 64 pages
549 #define SINGLE_FILTER_LEN 1 /* a region can be covered by single variable */
552 enum lksm_region_type type;
560 struct hlist_node hnode;
561 struct lksm_region *next;
562 struct lksm_region *prev;
564 unsigned long *filter;
565 unsigned long single_filter;
571 * Contains references from processes to regions
574 struct lksm_region_ref {
575 struct list_head list; /* listed by mm_slot */
576 struct lksm_region *region;
579 /* the number of registered lksm_regions */
580 static unsigned int lksm_nr_regions;
582 /* the upper limit for region lookup */
583 #define LKSM_REGION_ITER_MAX 8
585 #define lksm_region_size(start, end) ((end - start) >> PAGE_SHIFT)
586 #define lksm_bitmap_size(size) ((size >> 6) + ((size % BITS_PER_LONG) ? 1 : 0))
588 /* all processes share one lksm_region for their heaps */
589 static struct lksm_region heap_region, unknown_region;
591 static void lksm_register_file_anon_region(struct mm_slot *slot,
592 struct vm_area_struct *vma);
593 static struct lksm_region *lksm_find_region(struct vm_area_struct *vma);
594 #endif /* CONFIG_LKSM_FILTER */
596 static int initial_round = 3;
597 static unsigned long ksm_crawl_round;
598 static unsigned long crawler_sleep;
600 /* statistical information */
601 static int lksm_nr_merged; /* global merge count */
602 static int lksm_nr_broken; /* global broken count */
603 static int lksm_nr_scanned_slot; /* global scanned slot count */
604 static int lksm_slot_nr_merged; /* per-slot merge count */
605 static int lksm_slot_nr_broken; /* per-slot broken count */
607 /* initially, KSM takes small full scan interval */
608 #define DEFAULT_FULL_SCAN_INTERVAL 60000 /* 60 seconds */
609 static unsigned long full_scan_interval = 100;
611 /* statistical information about scanning time */
612 static unsigned long lksm_last_scan_time;
613 static unsigned long lksm_proc_scan_time;
615 /* stuffs for pruning short-lived task */
616 #define KSM_SHORT_TASK_TIME 100
617 static unsigned long short_lived_thresh = KSM_SHORT_TASK_TIME;
619 #define get_task_runtime(task) (task->se.sum_exec_runtime)
620 #define ms_to_ns(ms) (ms * 1000 * 1000)
621 #define check_short_task(task) \
622 (get_task_runtime(task) < ms_to_ns(short_lived_thresh))
624 static void wait_while_offlining(void);
625 static struct mm_slot *__ksm_enter_alloc_slot(struct mm_struct *mm, int frozen);
627 static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait);
628 static DECLARE_WAIT_QUEUE_HEAD(ksm_iter_wait);
629 static DEFINE_MUTEX(ksm_thread_mutex);
630 static DEFINE_SPINLOCK(ksm_mmlist_lock);
631 static DECLARE_WAIT_QUEUE_HEAD(ksm_crawl_wait);
633 #define KSM_KMEM_CACHE(__struct, __flags) kmem_cache_create("ksm_"#__struct,\
634 sizeof(struct __struct), __alignof__(struct __struct),\
637 static int __init ksm_slab_init(void)
639 rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0);
640 if (!rmap_item_cache)
643 stable_node_cache = KSM_KMEM_CACHE(stable_node, 0);
644 if (!stable_node_cache)
647 mm_slot_cache = KSM_KMEM_CACHE(mm_slot, 0);
650 task_slot_cache = KSM_KMEM_CACHE(task_slot, 0);
651 if (!task_slot_cache)
657 kmem_cache_destroy(mm_slot_cache);
659 kmem_cache_destroy(stable_node_cache);
661 kmem_cache_destroy(rmap_item_cache);
666 static void __init ksm_slab_free(void)
668 kmem_cache_destroy(mm_slot_cache);
669 kmem_cache_destroy(stable_node_cache);
670 kmem_cache_destroy(rmap_item_cache);
671 mm_slot_cache = NULL;
674 static __always_inline bool is_stable_node_chain(struct stable_node *chain)
676 return chain->rmap_hlist_len == STABLE_NODE_CHAIN;
679 static __always_inline bool is_stable_node_dup(struct stable_node *dup)
681 return dup->head == STABLE_NODE_DUP_HEAD;
684 static inline void stable_node_chain_add_dup(struct stable_node *dup,
685 struct stable_node *chain)
687 VM_BUG_ON(is_stable_node_dup(dup));
688 dup->head = STABLE_NODE_DUP_HEAD;
689 VM_BUG_ON(!is_stable_node_chain(chain));
690 hlist_add_head(&dup->hlist_dup, &chain->hlist);
691 ksm_stable_node_dups++;
694 static inline void __stable_node_dup_del(struct stable_node *dup)
696 VM_BUG_ON(!is_stable_node_dup(dup));
697 hlist_del(&dup->hlist_dup);
698 ksm_stable_node_dups--;
701 static inline void stable_node_dup_del(struct stable_node *dup)
703 VM_BUG_ON(is_stable_node_chain(dup));
704 if (is_stable_node_dup(dup))
705 __stable_node_dup_del(dup);
707 rb_erase(&dup->node, root_stable_tree + NUMA(dup->nid));
708 #ifdef CONFIG_DEBUG_VM
713 static inline struct rmap_item *alloc_rmap_item(void)
715 struct rmap_item *rmap_item;
717 rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL |
718 __GFP_NORETRY | __GFP_NOWARN);
724 static inline void free_rmap_item(struct rmap_item *rmap_item)
727 rmap_item->mm = NULL; /* debug safety */
728 kmem_cache_free(rmap_item_cache, rmap_item);
731 static inline struct stable_node *alloc_stable_node(void)
734 * The allocation can take too long with GFP_KERNEL when memory is under
735 * pressure, which may lead to hung task warnings. Adding __GFP_HIGH
736 * grants access to memory reserves, helping to avoid this problem.
738 return kmem_cache_alloc(stable_node_cache, GFP_KERNEL | __GFP_HIGH);
741 static inline void free_stable_node(struct stable_node *stable_node)
743 VM_BUG_ON(stable_node->rmap_hlist_len &&
744 !is_stable_node_chain(stable_node));
745 kmem_cache_free(stable_node_cache, stable_node);
748 static inline struct mm_slot *alloc_mm_slot(void)
750 if (!mm_slot_cache) /* initialization failed */
752 return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
755 static inline void free_mm_slot(struct mm_slot *mm_slot)
757 kmem_cache_free(mm_slot_cache, mm_slot);
760 static struct mm_slot *get_mm_slot(struct mm_struct *mm)
762 struct mm_slot *slot;
764 hash_for_each_possible(mm_slots_hash, slot, link, (unsigned long)mm)
771 static void insert_to_mm_slots_hash(struct mm_struct *mm,
772 struct mm_slot *mm_slot)
775 hash_add(mm_slots_hash, &mm_slot->link, (unsigned long)mm);
778 static inline struct task_slot *alloc_task_slot(void)
780 if (!task_slot_cache)
782 return kmem_cache_zalloc(task_slot_cache, GFP_NOWAIT);
785 static inline void free_task_slot(struct task_slot *task_slot)
787 kmem_cache_free(task_slot_cache, task_slot);
790 static struct task_slot *get_task_slot(struct task_struct *task)
792 struct task_slot *slot;
794 hash_for_each_possible(task_slots_hash, slot, hlist,
796 if (slot->task == task)
801 static inline void insert_to_task_slots_hash(struct task_slot *slot)
803 hash_add(task_slots_hash, &slot->hlist, (unsigned long)slot->task);
807 * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's
808 * page tables after it has passed through ksm_exit() - which, if necessary,
809 * takes mmap_lock briefly to serialize against them. ksm_exit() does not set
810 * a special flag: they can just back out as soon as mm_users goes to zero.
811 * ksm_test_exit() is used throughout to make this test for exit: in some
812 * places for correctness, in some places just to avoid unnecessary work.
814 static inline bool ksm_test_exit(struct mm_struct *mm)
816 return atomic_read(&mm->mm_users) == 0;
820 * We use break_ksm to break COW on a ksm page: it's a stripped down
822 * if (get_user_pages(addr, 1, 1, 1, &page, NULL) == 1)
825 * but taking great care only to touch a ksm page, in a VM_MERGEABLE vma,
826 * in case the application has unmapped and remapped mm,addr meanwhile.
827 * Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP
828 * mmap of /dev/mem or /dev/kmem, where we would not want to touch it.
830 * FAULT_FLAG/FOLL_REMOTE are because we do this outside the context
831 * of the process that owns 'vma'. We also do not want to enforce
832 * protection keys here anyway.
834 static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
841 page = follow_page(vma, addr,
842 FOLL_GET | FOLL_MIGRATION | FOLL_REMOTE);
843 if (IS_ERR_OR_NULL(page))
846 ret = handle_mm_fault(vma, addr,
847 FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE, NULL);
849 ret = VM_FAULT_WRITE;
851 } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM)));
853 * We must loop because handle_mm_fault() may back out if there's
854 * any difficulty e.g. if pte accessed bit gets updated concurrently.
856 * VM_FAULT_WRITE is what we have been hoping for: it indicates that
857 * COW has been broken, even if the vma does not permit VM_WRITE;
858 * but note that a concurrent fault might break PageKsm for us.
860 * VM_FAULT_SIGBUS could occur if we race with truncation of the
861 * backing file, which also invalidates anonymous pages: that's
862 * okay, that truncation will have unmapped the PageKsm for us.
864 * VM_FAULT_OOM: at the time of writing (late July 2009), setting
865 * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the
866 * current task has TIF_MEMDIE set, and will be OOM killed on return
867 * to user; and ksmd, having no mm, would never be chosen for that.
869 * But if the mm is in a limited mem_cgroup, then the fault may fail
870 * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and
871 * even ksmd can fail in this way - though it's usually breaking ksm
872 * just to undo a merge it made a moment before, so unlikely to oom.
874 * That's a pity: we might therefore have more kernel pages allocated
875 * than we're counting as nodes in the stable tree; but ksm_do_scan
876 * will retry to break_cow on each pass, so should recover the page
877 * in due course. The important thing is to not let VM_MERGEABLE
878 * be cleared while any such pages might remain in the area.
880 return (ret & VM_FAULT_OOM) ? -ENOMEM : 0;
883 static struct vm_area_struct *find_mergeable_vma(struct mm_struct *mm,
886 struct vm_area_struct *vma;
887 if (ksm_test_exit(mm))
889 vma = find_vma(mm, addr);
890 if (!vma || vma->vm_start > addr)
892 if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
897 static void break_cow(struct rmap_item *rmap_item)
899 struct mm_struct *mm = rmap_item->mm;
900 unsigned long addr = rmap_item->address;
901 struct vm_area_struct *vma;
904 * It is not an accident that whenever we want to break COW
905 * to undo, we also need to drop a reference to the anon_vma.
907 put_anon_vma(rmap_item->anon_vma);
909 down_read(&mm->mmap_lock);
910 vma = find_mergeable_vma(mm, addr);
912 break_ksm(vma, addr);
913 up_read(&mm->mmap_lock);
916 static struct page *get_mergeable_page(struct rmap_item *rmap_item)
918 struct mm_struct *mm = rmap_item->mm;
919 unsigned long addr = rmap_item->address;
920 struct vm_area_struct *vma;
923 down_read(&mm->mmap_lock);
924 vma = find_mergeable_vma(mm, addr);
928 page = follow_page(vma, addr, FOLL_GET);
929 if (IS_ERR_OR_NULL(page))
931 if (PageAnon(page)) {
932 flush_anon_page(vma, page, addr);
933 flush_dcache_page(page);
939 up_read(&mm->mmap_lock);
943 #ifdef CONFIG_LKSM_FILTER
944 static inline int is_heap(struct vm_area_struct *vma)
946 return vma->vm_start <= vma->vm_mm->brk &&
947 vma->vm_end >= vma->vm_mm->start_brk;
950 /* below code is copied from fs/proc/task_mmu.c */
952 static int is_stack(struct vm_area_struct *vma)
954 return vma->vm_start <= vma->vm_mm->start_stack &&
955 vma->vm_end >= vma->vm_mm->start_stack;
958 static int is_exec(struct vm_area_struct *vma)
960 return (vma->vm_flags & VM_EXEC);
962 #endif /* CONFIG_LKSM_FILTER */
965 * ksm_join: a wrapper function of ksm_enter.
966 * The function sets VM_MERGEABLE flag of vmas in the given mm_struct.
968 static int ksm_join(struct mm_struct *mm, int frozen)
970 struct vm_area_struct *vma;
971 struct mm_slot *slot;
972 int newly_allocated = 0;
974 if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) {
975 slot = __ksm_enter_alloc_slot(mm, frozen);
980 slot = get_mm_slot(mm);
982 ksm_err("there is no mm_slot for %p", mm);
987 for (vma = mm->mmap; vma; vma = vma->vm_next) {
988 if (vma->vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE |
989 VM_PFNMAP | VM_IO | VM_DONTEXPAND |
990 VM_HUGETLB | VM_MIXEDMAP))
992 vma->vm_flags |= VM_MERGEABLE;
993 #ifdef CONFIG_LKSM_FILTER
995 * Many page sharings come from library pages because processes
996 * are sharing runtime framwork of the OS.
997 * Thus, anonymous pages related with file-mapped areas can show
998 * sharing patterns which can be exploited in LKSM while other
999 * anonymous regions (e.g., heap) don't.
1000 * LKSM only tracks file-related regions to make filters.
1002 if (!is_heap(vma) && !is_stack(vma) &&
1003 !is_exec(vma) && vma->anon_vma)
1004 lksm_register_file_anon_region(slot, vma);
1008 return newly_allocated;
1011 #define ksm_join_write_lock(mm, frozen, ret) do {\
1012 down_write(&mm->mmap_lock); \
1013 ret = ksm_join(mm, frozen); \
1014 up_write(&mm->mmap_lock); \
1017 #ifdef CONFIG_LKSM_FILTER
1018 static void lksm_region_ref_append
1019 (struct mm_slot *slot, struct lksm_region *region)
1021 struct lksm_region_ref *ref;
1024 ref = kzalloc(sizeof(struct lksm_region_ref), GFP_KERNEL);
1027 ref->region = region;
1028 list_add_tail(&ref->list, &slot->ref_list);
1030 atomic_inc(®ion->refcount);
1033 static void lksm_region_free(struct lksm_region *region)
1035 unsigned long flags;
1037 spin_lock_irqsave(&lksm_region_lock, flags);
1038 if (!region->next) {
1040 if (atomic_read(®ion->prev->refcount) == 0) {
1041 hash_del(®ion->prev->hnode);
1042 if (region->prev->len > SINGLE_FILTER_LEN)
1043 kfree(region->prev->filter);
1044 kfree(region->prev);
1046 region->prev->next = NULL;
1048 hash_del(®ion->hnode);
1049 if (region->len > SINGLE_FILTER_LEN)
1050 kfree(region->filter);
1053 spin_unlock_irqrestore(&lksm_region_lock, flags);
1056 static void lksm_region_ref_remove(struct lksm_region_ref *ref)
1058 list_del_init(&ref->list);
1059 if (atomic_dec_and_test(&ref->region->refcount))
1060 lksm_region_free(ref->region);
1064 static void lksm_region_ref_list_release(struct mm_slot *slot)
1066 struct lksm_region_ref *ref, *next;
1068 list_for_each_entry_safe(ref, next, &slot->ref_list, list) {
1069 lksm_region_ref_remove(ref);
1072 #endif /* CONFIG_LKSM_FILTER */
1075 * This helper is used for getting right index into array of tree roots.
1076 * When merge_across_nodes knob is set to 1, there are only two rb-trees for
1077 * stable and unstable pages from all nodes with roots in index 0. Otherwise,
1078 * every node has its own stable and unstable tree.
1080 static inline int get_kpfn_nid(unsigned long kpfn)
1082 return ksm_merge_across_nodes ? 0 : NUMA(pfn_to_nid(kpfn));
1085 static struct stable_node *alloc_stable_node_chain(struct stable_node *dup,
1086 struct rb_root *root)
1088 struct stable_node *chain = alloc_stable_node();
1089 VM_BUG_ON(is_stable_node_chain(dup));
1090 if (likely(chain)) {
1091 INIT_HLIST_HEAD(&chain->hlist);
1092 chain->chain_prune_time = jiffies;
1093 chain->rmap_hlist_len = STABLE_NODE_CHAIN;
1094 #if defined (CONFIG_DEBUG_VM) && defined(CONFIG_NUMA)
1095 chain->nid = NUMA_NO_NODE; /* debug */
1097 ksm_stable_node_chains++;
1100 * Put the stable node chain in the first dimension of
1101 * the stable tree and at the same time remove the old
1104 rb_replace_node(&dup->node, &chain->node, root);
1107 * Move the old stable node to the second dimension
1108 * queued in the hlist_dup. The invariant is that all
1109 * dup stable_nodes in the chain->hlist point to pages
1110 * that are wrprotected and have the exact same
1113 stable_node_chain_add_dup(dup, chain);
1118 static inline void free_stable_node_chain(struct stable_node *chain,
1119 struct rb_root *root)
1121 rb_erase(&chain->node, root);
1122 free_stable_node(chain);
1123 ksm_stable_node_chains--;
1126 static void remove_node_from_stable_tree(struct stable_node *stable_node)
1128 struct rmap_item *rmap_item;
1130 /* check it's not STABLE_NODE_CHAIN or negative */
1131 BUG_ON(stable_node->rmap_hlist_len < 0);
1133 hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
1134 if (rmap_item->hlist.next) {
1135 ksm_pages_sharing--;
1136 lksm_slot_nr_broken++;
1140 VM_BUG_ON(stable_node->rmap_hlist_len <= 0);
1141 stable_node->rmap_hlist_len--;
1142 put_anon_vma(rmap_item->anon_vma);
1143 rmap_item->address &= PAGE_MASK;
1148 * We need the second aligned pointer of the migrate_nodes
1149 * list_head to stay clear from the rb_parent_color union
1150 * (aligned and different than any node) and also different
1151 * from &migrate_nodes. This will verify that future list.h changes
1152 * don't break STABLE_NODE_DUP_HEAD. Only recent gcc can handle it.
1154 #if defined(GCC_VERSION) && GCC_VERSION >= 40903
1155 BUILD_BUG_ON(STABLE_NODE_DUP_HEAD <= &migrate_nodes);
1156 BUILD_BUG_ON(STABLE_NODE_DUP_HEAD >= &migrate_nodes + 1);
1159 if (stable_node->head == &migrate_nodes)
1160 list_del(&stable_node->list);
1162 stable_node_dup_del(stable_node);
1163 free_stable_node(stable_node);
1166 enum get_ksm_page_flags {
1167 GET_KSM_PAGE_NOLOCK,
1169 GET_KSM_PAGE_TRYLOCK
1173 * get_ksm_page: checks if the page indicated by the stable node
1174 * is still its ksm page, despite having held no reference to it.
1175 * In which case we can trust the content of the page, and it
1176 * returns the gotten page; but if the page has now been zapped,
1177 * remove the stale node from the stable tree and return NULL.
1178 * But beware, the stable node's page might be being migrated.
1180 * You would expect the stable_node to hold a reference to the ksm page.
1181 * But if it increments the page's count, swapping out has to wait for
1182 * ksmd to come around again before it can free the page, which may take
1183 * seconds or even minutes: much too unresponsive. So instead we use a
1184 * "keyhole reference": access to the ksm page from the stable node peeps
1185 * out through its keyhole to see if that page still holds the right key,
1186 * pointing back to this stable node. This relies on freeing a PageAnon
1187 * page to reset its page->mapping to NULL, and relies on no other use of
1188 * a page to put something that might look like our key in page->mapping.
1189 * is on its way to being freed; but it is an anomaly to bear in mind.
1191 static struct page *get_ksm_page(struct stable_node *stable_node,
1192 enum get_ksm_page_flags flags)
1195 void *expected_mapping;
1198 expected_mapping = (void *)((unsigned long)stable_node |
1201 kpfn = READ_ONCE(stable_node->kpfn); /* Address dependency. */
1202 page = pfn_to_page(kpfn);
1203 if (READ_ONCE(page->mapping) != expected_mapping)
1207 * We cannot do anything with the page while its refcount is 0.
1208 * Usually 0 means free, or tail of a higher-order page: in which
1209 * case this node is no longer referenced, and should be freed;
1210 * however, it might mean that the page is under page_ref_freeze().
1211 * The __remove_mapping() case is easy, again the node is now stale;
1212 * the same is in reuse_ksm_page() case; but if page is swapcache
1213 * in migrate_page_move_mapping(), it might still be our page,
1214 * in which case it's essential to keep the node.
1216 while (!get_page_unless_zero(page)) {
1218 * Another check for page->mapping != expected_mapping would
1219 * work here too. We have chosen the !PageSwapCache test to
1220 * optimize the common case, when the page is or is about to
1221 * be freed: PageSwapCache is cleared (under spin_lock_irq)
1222 * in the ref_freeze section of __remove_mapping(); but Anon
1223 * page->mapping reset to NULL later, in free_pages_prepare().
1225 if (!PageSwapCache(page))
1230 if (READ_ONCE(page->mapping) != expected_mapping) {
1235 if (flags == GET_KSM_PAGE_TRYLOCK) {
1236 if (!trylock_page(page)) {
1238 return ERR_PTR(-EBUSY);
1240 } else if (flags == GET_KSM_PAGE_LOCK)
1243 if (flags != GET_KSM_PAGE_NOLOCK) {
1244 if (READ_ONCE(page->mapping) != expected_mapping) {
1254 * We come here from above when page->mapping or !PageSwapCache
1255 * suggests that the node is stale; but it might be under migration.
1256 * We need smp_rmb(), matching the smp_wmb() in ksm_migrate_page(),
1257 * before checking whether node->kpfn has been changed.
1260 if (READ_ONCE(stable_node->kpfn) != kpfn)
1262 remove_node_from_stable_tree(stable_node);
1267 * Removing rmap_item from stable or unstable tree.
1268 * This function will clean the information from the stable/unstable tree.
1270 static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
1272 if (rmap_item->address & STABLE_FLAG) {
1273 struct stable_node *stable_node;
1276 stable_node = rmap_item->head;
1277 page = get_ksm_page(stable_node, GET_KSM_PAGE_LOCK);
1281 hlist_del(&rmap_item->hlist);
1285 if (!hlist_empty(&stable_node->hlist)) {
1286 ksm_pages_sharing--;
1287 lksm_slot_nr_broken++;
1291 VM_BUG_ON(stable_node->rmap_hlist_len <= 0);
1292 stable_node->rmap_hlist_len--;
1294 put_anon_vma(rmap_item->anon_vma);
1295 rmap_item->address &= PAGE_MASK;
1297 } else if (rmap_item->address & UNSTABLE_FLAG) {
1300 * Usually ksmd can and must skip the rb_erase, because
1301 * root_unstable_tree was already reset to RB_ROOT.
1302 * But be careful when an mm is exiting: do the rb_erase
1303 * if this rmap_item was inserted by this scan, rather
1304 * than left over from before.
1306 age = (unsigned char)(ksm_scan.scan_round - rmap_item->address);
1308 rb_erase(&rmap_item->node,
1309 root_unstable_tree + NUMA(rmap_item->nid));
1311 RB_CLEAR_NODE(&rmap_item->node);
1313 ksm_pages_unshared--;
1314 rmap_item->address &= PAGE_MASK;
1317 cond_resched(); /* we're called from many long loops */
1320 static void remove_trailing_rmap_items(struct mm_slot *mm_slot,
1321 struct rmap_item **rmap_list)
1323 while (*rmap_list) {
1324 struct rmap_item *rmap_item = *rmap_list;
1325 *rmap_list = rmap_item->rmap_list;
1326 remove_rmap_item_from_tree(rmap_item);
1327 free_rmap_item(rmap_item);
1332 * Though it's very tempting to unmerge rmap_items from stable tree rather
1333 * than check every pte of a given vma, the locking doesn't quite work for
1334 * that - an rmap_item is assigned to the stable tree after inserting ksm
1335 * page and upping mmap_lock. Nor does it fit with the way we skip dup'ing
1336 * rmap_items from parent to child at fork time (so as not to waste time
1337 * if exit comes before the next scan reaches it).
1339 * Similarly, although we'd like to remove rmap_items (so updating counts
1340 * and freeing memory) when unmerging an area, it's easier to leave that
1341 * to the next pass of ksmd - consider, for example, how ksmd might be
1342 * in cmp_and_merge_page on one of the rmap_items we would be removing.
1344 static int unmerge_ksm_pages(struct vm_area_struct *vma,
1345 unsigned long start, unsigned long end)
1350 for (addr = start; addr < end && !err; addr += PAGE_SIZE) {
1351 if (ksm_test_exit(vma->vm_mm))
1353 if (signal_pending(current))
1356 err = break_ksm(vma, addr);
1361 static inline struct stable_node *page_stable_node(struct page *page)
1363 return PageKsm(page) ? page_rmapping(page) : NULL;
1366 static inline void set_page_stable_node(struct page *page,
1367 struct stable_node *stable_node)
1369 page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM);
1374 * Only called through the sysfs control interface:
1376 static int remove_stable_node(struct stable_node *stable_node)
1381 page = get_ksm_page(stable_node, GET_KSM_PAGE_LOCK);
1384 * get_ksm_page did remove_node_from_stable_tree itself.
1390 * Page could be still mapped if this races with __mmput() running in
1391 * between ksm_exit() and exit_mmap(). Just refuse to let
1392 * merge_across_nodes/max_page_sharing be switched.
1395 if (!page_mapped(page)) {
1397 * The stable node did not yet appear stale to get_ksm_page(),
1398 * since that allows for an unmapped ksm page to be recognized
1399 * right up until it is freed; but the node is safe to remove.
1400 * This page might be in a pagevec waiting to be freed,
1401 * or it might be PageSwapCache (perhaps under writeback),
1402 * or it might have been removed from swapcache a moment ago.
1404 set_page_stable_node(page, NULL);
1405 remove_node_from_stable_tree(stable_node);
1414 static int remove_stable_node_chain(struct stable_node *stable_node,
1415 struct rb_root *root)
1417 struct stable_node *dup;
1418 struct hlist_node *hlist_safe;
1420 if (!is_stable_node_chain(stable_node)) {
1421 VM_BUG_ON(is_stable_node_dup(stable_node));
1422 if (remove_stable_node(stable_node))
1428 hlist_for_each_entry_safe(dup, hlist_safe,
1429 &stable_node->hlist, hlist_dup) {
1430 VM_BUG_ON(!is_stable_node_dup(dup));
1431 if (remove_stable_node(dup))
1434 BUG_ON(!hlist_empty(&stable_node->hlist));
1435 free_stable_node_chain(stable_node, root);
1439 static int remove_all_stable_nodes(void)
1441 struct stable_node *stable_node, *next;
1445 for (nid = 0; nid < ksm_nr_node_ids; nid++) {
1446 while (root_stable_tree[nid].rb_node) {
1447 stable_node = rb_entry(root_stable_tree[nid].rb_node,
1448 struct stable_node, node);
1449 if (remove_stable_node_chain(stable_node,
1450 root_stable_tree + nid)) {
1452 break; /* proceed to next nid */
1457 list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) {
1458 if (remove_stable_node(stable_node))
1465 static int unmerge_and_remove_all_rmap_items(void)
1467 struct mm_slot *mm_slot;
1468 struct mm_struct *mm;
1469 struct vm_area_struct *vma;
1472 spin_lock(&ksm_mmlist_lock);
1473 ksm_scan.mm_slot = list_entry(ksm_mm_head.mm_list.next,
1474 struct mm_slot, mm_list);
1475 spin_unlock(&ksm_mmlist_lock);
1477 for (mm_slot = ksm_scan.mm_slot;
1478 mm_slot != &ksm_mm_head; mm_slot = ksm_scan.mm_slot) {
1480 down_read(&mm->mmap_lock);
1481 for (vma = mm->mmap; vma; vma = vma->vm_next) {
1482 if (ksm_test_exit(mm))
1484 if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
1486 err = unmerge_ksm_pages(vma,
1487 vma->vm_start, vma->vm_end);
1492 remove_trailing_rmap_items(mm_slot, &mm_slot->rmap_list);
1493 up_read(&mm->mmap_lock);
1495 spin_lock(&ksm_mmlist_lock);
1496 ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next,
1497 struct mm_slot, mm_list);
1498 if (ksm_test_exit(mm)) {
1499 hash_del(&mm_slot->link);
1500 list_del(&mm_slot->mm_list);
1501 spin_unlock(&ksm_mmlist_lock);
1503 free_mm_slot(mm_slot);
1504 clear_bit(MMF_VM_MERGEABLE, &mm->flags);
1507 spin_unlock(&ksm_mmlist_lock);
1510 /* Clean up stable nodes, but don't worry if some are still busy */
1511 remove_all_stable_nodes();
1512 ksm_scan.scan_round = 0;
1516 up_read(&mm->mmap_lock);
1517 spin_lock(&ksm_mmlist_lock);
1518 ksm_scan.mm_slot = &ksm_mm_head;
1519 spin_unlock(&ksm_mmlist_lock);
1522 #endif /* CONFIG_SYSFS */
1524 static u32 calc_checksum(struct page *page)
1527 void *addr = kmap_atomic(page);
1528 checksum = xxhash(addr, PAGE_SIZE, 0);
1529 kunmap_atomic(addr);
1530 return lksm_clear_checksum_frozen(checksum);
1533 static int write_protect_page(struct vm_area_struct *vma, struct page *page,
1536 struct mm_struct *mm = vma->vm_mm;
1537 struct page_vma_mapped_walk pvmw = {
1543 struct mmu_notifier_range range;
1545 pvmw.address = page_address_in_vma(page, vma);
1546 if (pvmw.address == -EFAULT)
1549 BUG_ON(PageTransCompound(page));
1551 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
1553 pvmw.address + PAGE_SIZE);
1554 mmu_notifier_invalidate_range_start(&range);
1556 if (!page_vma_mapped_walk(&pvmw))
1558 if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?"))
1561 if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte) ||
1562 (pte_protnone(*pvmw.pte) && pte_savedwrite(*pvmw.pte)) ||
1563 mm_tlb_flush_pending(mm)) {
1566 swapped = PageSwapCache(page);
1567 flush_cache_page(vma, pvmw.address, page_to_pfn(page));
1569 * Ok this is tricky, when get_user_pages_fast() run it doesn't
1570 * take any lock, therefore the check that we are going to make
1571 * with the pagecount against the mapcount is racey and
1572 * O_DIRECT can happen right after the check.
1573 * So we clear the pte and flush the tlb before the check
1574 * this assure us that no O_DIRECT can happen after the check
1575 * or in the middle of the check.
1577 * No need to notify as we are downgrading page table to read
1578 * only not changing it to point to a new page.
1580 * See Documentation/vm/mmu_notifier.rst
1582 entry = ptep_clear_flush(vma, pvmw.address, pvmw.pte);
1584 * Check that no O_DIRECT or similar I/O is in progress on the
1587 if (page_mapcount(page) + 1 + swapped != page_count(page)) {
1588 set_pte_at(mm, pvmw.address, pvmw.pte, entry);
1591 if (pte_dirty(entry))
1592 set_page_dirty(page);
1594 if (pte_protnone(entry))
1595 entry = pte_mkclean(pte_clear_savedwrite(entry));
1597 entry = pte_mkclean(pte_wrprotect(entry));
1598 set_pte_at_notify(mm, pvmw.address, pvmw.pte, entry);
1600 *orig_pte = *pvmw.pte;
1604 page_vma_mapped_walk_done(&pvmw);
1606 mmu_notifier_invalidate_range_end(&range);
1612 * replace_page - replace page in vma by new ksm page
1613 * @vma: vma that holds the pte pointing to page
1614 * @page: the page we are replacing by kpage
1615 * @kpage: the ksm page we replace page by
1616 * @orig_pte: the original value of the pte
1618 * Returns 0 on success, -EFAULT on failure.
1620 static int replace_page(struct vm_area_struct *vma, struct page *page,
1621 struct page *kpage, pte_t orig_pte)
1623 struct mm_struct *mm = vma->vm_mm;
1630 struct mmu_notifier_range range;
1632 addr = page_address_in_vma(page, vma);
1633 if (addr == -EFAULT)
1636 pmd = mm_find_pmd(mm, addr);
1640 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, addr,
1642 mmu_notifier_invalidate_range_start(&range);
1644 ptep = pte_offset_map_lock(mm, pmd, addr, &ptl);
1645 if (!pte_same(*ptep, orig_pte)) {
1646 pte_unmap_unlock(ptep, ptl);
1651 * No need to check ksm_use_zero_pages here: we can only have a
1652 * zero_page here if ksm_use_zero_pages was enabled alreaady.
1654 if (!is_zero_pfn(page_to_pfn(kpage))) {
1656 page_add_anon_rmap(kpage, vma, addr, false);
1657 newpte = mk_pte(kpage, vma->vm_page_prot);
1659 newpte = pte_mkspecial(pfn_pte(page_to_pfn(kpage),
1660 vma->vm_page_prot));
1662 * We're replacing an anonymous page with a zero page, which is
1663 * not anonymous. We need to do proper accounting otherwise we
1664 * will get wrong values in /proc, and a BUG message in dmesg
1665 * when tearing down the mm.
1667 dec_mm_counter(mm, MM_ANONPAGES);
1670 flush_cache_page(vma, addr, pte_pfn(*ptep));
1672 * No need to notify as we are replacing a read only page with another
1673 * read only page with the same content.
1675 * See Documentation/vm/mmu_notifier.rst
1677 ptep_clear_flush(vma, addr, ptep);
1678 set_pte_at_notify(mm, addr, ptep, newpte);
1680 page_remove_rmap(page, false);
1681 if (!page_mapped(page))
1682 try_to_free_swap(page);
1685 pte_unmap_unlock(ptep, ptl);
1688 mmu_notifier_invalidate_range_end(&range);
1694 * try_to_merge_one_page - take two pages and merge them into one
1695 * @vma: the vma that holds the pte pointing to page
1696 * @page: the PageAnon page that we want to replace with kpage
1697 * @kpage: the PageKsm page that we want to map instead of page,
1698 * or NULL the first time when we want to use page as kpage.
1700 * This function returns 0 if the pages were merged, -EFAULT otherwise.
1702 static int try_to_merge_one_page(struct vm_area_struct *vma,
1703 struct page *page, struct page *kpage)
1705 pte_t orig_pte = __pte(0);
1708 if (page == kpage) /* ksm page forked */
1711 if (!PageAnon(page))
1715 * We need the page lock to read a stable PageSwapCache in
1716 * write_protect_page(). We use trylock_page() instead of
1717 * lock_page() because we don't want to wait here - we
1718 * prefer to continue scanning and merging different pages,
1719 * then come back to this page when it is unlocked.
1721 if (!trylock_page(page))
1724 if (PageTransCompound(page)) {
1725 if (split_huge_page(page))
1730 * If this anonymous page is mapped only here, its pte may need
1731 * to be write-protected. If it's mapped elsewhere, all of its
1732 * ptes are necessarily already write-protected. But in either
1733 * case, we need to lock and check page_count is not raised.
1735 if (write_protect_page(vma, page, &orig_pte) == 0) {
1738 * While we hold page lock, upgrade page from
1739 * PageAnon+anon_vma to PageKsm+NULL stable_node:
1740 * stable_tree_insert() will update stable_node.
1742 set_page_stable_node(page, NULL);
1743 mark_page_accessed(page);
1745 * Page reclaim just frees a clean page with no dirty
1746 * ptes: make sure that the ksm page would be swapped.
1748 if (!PageDirty(page))
1751 } else if (pages_identical(page, kpage))
1752 err = replace_page(vma, page, kpage, orig_pte);
1755 if ((vma->vm_flags & VM_LOCKED) && kpage && !err) {
1756 munlock_vma_page(page);
1757 if (!PageMlocked(kpage)) {
1760 mlock_vma_page(kpage);
1761 page = kpage; /* for final unlock */
1772 * try_to_merge_with_ksm_page - like try_to_merge_two_pages,
1773 * but no new kernel page is allocated: kpage must already be a ksm page.
1775 * This function returns 0 if the pages were merged, -EFAULT otherwise.
1777 static int try_to_merge_with_ksm_page(struct rmap_item *rmap_item,
1778 struct page *page, struct page *kpage)
1780 struct mm_struct *mm = rmap_item->mm;
1781 struct vm_area_struct *vma;
1784 down_read(&mm->mmap_lock);
1785 vma = find_mergeable_vma(mm, rmap_item->address);
1789 err = try_to_merge_one_page(vma, page, kpage);
1793 /* Unstable nid is in union with stable anon_vma: remove first */
1794 remove_rmap_item_from_tree(rmap_item);
1796 #ifdef CONFIG_LKSM_FILTER
1797 /* node is removed from tree, base_addr can be safely used */
1798 rmap_item->base_addr = vma->vm_start;
1800 /* Must get reference to anon_vma while still holding mmap_lock */
1801 rmap_item->anon_vma = vma->anon_vma;
1802 get_anon_vma(vma->anon_vma);
1804 up_read(&mm->mmap_lock);
1809 * try_to_merge_two_pages - take two identical pages and prepare them
1810 * to be merged into one page.
1812 * This function returns the kpage if we successfully merged two identical
1813 * pages into one ksm page, NULL otherwise.
1815 * Note that this function upgrades page to ksm page: if one of the pages
1816 * is already a ksm page, try_to_merge_with_ksm_page should be used.
1818 static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item,
1820 struct rmap_item *tree_rmap_item,
1821 struct page *tree_page)
1825 err = try_to_merge_with_ksm_page(rmap_item, page, NULL);
1827 err = try_to_merge_with_ksm_page(tree_rmap_item,
1830 * If that fails, we have a ksm page with only one pte
1831 * pointing to it: so break it.
1834 break_cow(rmap_item);
1836 return err ? NULL : page;
1839 static __always_inline
1840 bool __is_page_sharing_candidate(struct stable_node *stable_node, int offset)
1842 VM_BUG_ON(stable_node->rmap_hlist_len < 0);
1844 * Check that at least one mapping still exists, otherwise
1845 * there's no much point to merge and share with this
1846 * stable_node, as the underlying tree_page of the other
1847 * sharer is going to be freed soon.
1849 return stable_node->rmap_hlist_len &&
1850 stable_node->rmap_hlist_len + offset < ksm_max_page_sharing;
1853 static __always_inline
1854 bool is_page_sharing_candidate(struct stable_node *stable_node)
1856 return __is_page_sharing_candidate(stable_node, 0);
1859 static struct page *stable_node_dup(struct stable_node **_stable_node_dup,
1860 struct stable_node **_stable_node,
1861 struct rb_root *root,
1862 bool prune_stale_stable_nodes)
1864 struct stable_node *dup, *found = NULL, *stable_node = *_stable_node;
1865 struct hlist_node *hlist_safe;
1866 struct page *_tree_page, *tree_page = NULL;
1868 int found_rmap_hlist_len;
1870 if (!prune_stale_stable_nodes ||
1871 time_before(jiffies, stable_node->chain_prune_time +
1873 ksm_stable_node_chains_prune_millisecs)))
1874 prune_stale_stable_nodes = false;
1876 stable_node->chain_prune_time = jiffies;
1878 hlist_for_each_entry_safe(dup, hlist_safe,
1879 &stable_node->hlist, hlist_dup) {
1882 * We must walk all stable_node_dup to prune the stale
1883 * stable nodes during lookup.
1885 * get_ksm_page can drop the nodes from the
1886 * stable_node->hlist if they point to freed pages
1887 * (that's why we do a _safe walk). The "dup"
1888 * stable_node parameter itself will be freed from
1889 * under us if it returns NULL.
1891 _tree_page = get_ksm_page(dup, GET_KSM_PAGE_NOLOCK);
1895 if (is_page_sharing_candidate(dup)) {
1897 dup->rmap_hlist_len > found_rmap_hlist_len) {
1899 put_page(tree_page);
1901 found_rmap_hlist_len = found->rmap_hlist_len;
1902 tree_page = _tree_page;
1904 /* skip put_page for found dup */
1905 if (!prune_stale_stable_nodes)
1910 put_page(_tree_page);
1915 * nr is counting all dups in the chain only if
1916 * prune_stale_stable_nodes is true, otherwise we may
1917 * break the loop at nr == 1 even if there are
1920 if (prune_stale_stable_nodes && nr == 1) {
1922 * If there's not just one entry it would
1923 * corrupt memory, better BUG_ON. In KSM
1924 * context with no lock held it's not even
1927 BUG_ON(stable_node->hlist.first->next);
1930 * There's just one entry and it is below the
1931 * deduplication limit so drop the chain.
1933 rb_replace_node(&stable_node->node, &found->node,
1935 free_stable_node(stable_node);
1936 ksm_stable_node_chains--;
1937 ksm_stable_node_dups--;
1939 * NOTE: the caller depends on the stable_node
1940 * to be equal to stable_node_dup if the chain
1943 *_stable_node = found;
1945 * Just for robustneess as stable_node is
1946 * otherwise left as a stable pointer, the
1947 * compiler shall optimize it away at build
1951 } else if (stable_node->hlist.first != &found->hlist_dup &&
1952 __is_page_sharing_candidate(found, 1)) {
1954 * If the found stable_node dup can accept one
1955 * more future merge (in addition to the one
1956 * that is underway) and is not at the head of
1957 * the chain, put it there so next search will
1958 * be quicker in the !prune_stale_stable_nodes
1961 * NOTE: it would be inaccurate to use nr > 1
1962 * instead of checking the hlist.first pointer
1963 * directly, because in the
1964 * prune_stale_stable_nodes case "nr" isn't
1965 * the position of the found dup in the chain,
1966 * but the total number of dups in the chain.
1968 hlist_del(&found->hlist_dup);
1969 hlist_add_head(&found->hlist_dup,
1970 &stable_node->hlist);
1974 *_stable_node_dup = found;
1978 static struct stable_node *stable_node_dup_any(struct stable_node *stable_node,
1979 struct rb_root *root)
1981 if (!is_stable_node_chain(stable_node))
1983 if (hlist_empty(&stable_node->hlist)) {
1984 free_stable_node_chain(stable_node, root);
1987 return hlist_entry(stable_node->hlist.first,
1988 typeof(*stable_node), hlist_dup);
1992 * Like for get_ksm_page, this function can free the *_stable_node and
1993 * *_stable_node_dup if the returned tree_page is NULL.
1995 * It can also free and overwrite *_stable_node with the found
1996 * stable_node_dup if the chain is collapsed (in which case
1997 * *_stable_node will be equal to *_stable_node_dup like if the chain
1998 * never existed). It's up to the caller to verify tree_page is not
1999 * NULL before dereferencing *_stable_node or *_stable_node_dup.
2001 * *_stable_node_dup is really a second output parameter of this
2002 * function and will be overwritten in all cases, the caller doesn't
2003 * need to initialize it.
2005 static struct page *__stable_node_chain(struct stable_node **_stable_node_dup,
2006 struct stable_node **_stable_node,
2007 struct rb_root *root,
2008 bool prune_stale_stable_nodes)
2010 struct stable_node *stable_node = *_stable_node;
2011 if (!is_stable_node_chain(stable_node)) {
2012 if (is_page_sharing_candidate(stable_node)) {
2013 *_stable_node_dup = stable_node;
2014 return get_ksm_page(stable_node, GET_KSM_PAGE_NOLOCK);
2017 * _stable_node_dup set to NULL means the stable_node
2018 * reached the ksm_max_page_sharing limit.
2020 *_stable_node_dup = NULL;
2023 return stable_node_dup(_stable_node_dup, _stable_node, root,
2024 prune_stale_stable_nodes);
2027 static __always_inline struct page *chain_prune(struct stable_node **s_n_d,
2028 struct stable_node **s_n,
2029 struct rb_root *root)
2031 return __stable_node_chain(s_n_d, s_n, root, true);
2034 static __always_inline struct page *chain(struct stable_node **s_n_d,
2035 struct stable_node *s_n,
2036 struct rb_root *root)
2038 struct stable_node *old_stable_node = s_n;
2039 struct page *tree_page;
2041 tree_page = __stable_node_chain(s_n_d, &s_n, root, false);
2042 /* not pruning dups so s_n cannot have changed */
2043 VM_BUG_ON(s_n != old_stable_node);
2048 * stable_tree_search - search for page inside the stable tree
2050 * This function checks if there is a page inside the stable tree
2051 * with identical content to the page that we are scanning right now.
2053 * This function returns the stable tree node of identical content if found,
2056 static struct page *stable_tree_search(struct page *page)
2059 struct rb_root *root;
2060 struct rb_node **new;
2061 struct rb_node *parent;
2062 struct stable_node *stable_node, *stable_node_dup, *stable_node_any;
2063 struct stable_node *page_node;
2065 page_node = page_stable_node(page);
2066 if (page_node && page_node->head != &migrate_nodes) {
2067 /* ksm page forked */
2072 nid = get_kpfn_nid(page_to_pfn(page));
2073 root = root_stable_tree + nid;
2075 new = &root->rb_node;
2079 struct page *tree_page;
2083 stable_node = rb_entry(*new, struct stable_node, node);
2084 stable_node_any = NULL;
2085 tree_page = chain_prune(&stable_node_dup, &stable_node, root);
2087 * NOTE: stable_node may have been freed by
2088 * chain_prune() if the returned stable_node_dup is
2089 * not NULL. stable_node_dup may have been inserted in
2090 * the rbtree instead as a regular stable_node (in
2091 * order to collapse the stable_node chain if a single
2092 * stable_node dup was found in it). In such case the
2093 * stable_node is overwritten by the calleee to point
2094 * to the stable_node_dup that was collapsed in the
2095 * stable rbtree and stable_node will be equal to
2096 * stable_node_dup like if the chain never existed.
2098 if (!stable_node_dup) {
2100 * Either all stable_node dups were full in
2101 * this stable_node chain, or this chain was
2102 * empty and should be rb_erased.
2104 stable_node_any = stable_node_dup_any(stable_node,
2106 if (!stable_node_any) {
2107 /* rb_erase just run */
2111 * Take any of the stable_node dups page of
2112 * this stable_node chain to let the tree walk
2113 * continue. All KSM pages belonging to the
2114 * stable_node dups in a stable_node chain
2115 * have the same content and they're
2116 * wrprotected at all times. Any will work
2117 * fine to continue the walk.
2119 tree_page = get_ksm_page(stable_node_any,
2120 GET_KSM_PAGE_NOLOCK);
2122 VM_BUG_ON(!stable_node_dup ^ !!stable_node_any);
2125 * If we walked over a stale stable_node,
2126 * get_ksm_page() will call rb_erase() and it
2127 * may rebalance the tree from under us. So
2128 * restart the search from scratch. Returning
2129 * NULL would be safe too, but we'd generate
2130 * false negative insertions just because some
2131 * stable_node was stale.
2136 ret = memcmp_pages(page, tree_page);
2137 put_page(tree_page);
2141 new = &parent->rb_left;
2143 new = &parent->rb_right;
2146 VM_BUG_ON(page_node->head != &migrate_nodes);
2148 * Test if the migrated page should be merged
2149 * into a stable node dup. If the mapcount is
2150 * 1 we can migrate it with another KSM page
2151 * without adding it to the chain.
2153 if (page_mapcount(page) > 1)
2157 if (!stable_node_dup) {
2159 * If the stable_node is a chain and
2160 * we got a payload match in memcmp
2161 * but we cannot merge the scanned
2162 * page in any of the existing
2163 * stable_node dups because they're
2164 * all full, we need to wait the
2165 * scanned page to find itself a match
2166 * in the unstable tree to create a
2167 * brand new KSM page to add later to
2168 * the dups of this stable_node.
2174 * Lock and unlock the stable_node's page (which
2175 * might already have been migrated) so that page
2176 * migration is sure to notice its raised count.
2177 * It would be more elegant to return stable_node
2178 * than kpage, but that involves more changes.
2180 tree_page = get_ksm_page(stable_node_dup,
2181 GET_KSM_PAGE_TRYLOCK);
2183 if (PTR_ERR(tree_page) == -EBUSY)
2184 return ERR_PTR(-EBUSY);
2186 if (unlikely(!tree_page))
2188 * The tree may have been rebalanced,
2189 * so re-evaluate parent and new.
2192 unlock_page(tree_page);
2194 if (get_kpfn_nid(stable_node_dup->kpfn) !=
2195 NUMA(stable_node_dup->nid)) {
2196 put_page(tree_page);
2206 list_del(&page_node->list);
2207 DO_NUMA(page_node->nid = nid);
2208 rb_link_node(&page_node->node, parent, new);
2209 rb_insert_color(&page_node->node, root);
2211 if (is_page_sharing_candidate(page_node)) {
2219 * If stable_node was a chain and chain_prune collapsed it,
2220 * stable_node has been updated to be the new regular
2221 * stable_node. A collapse of the chain is indistinguishable
2222 * from the case there was no chain in the stable
2223 * rbtree. Otherwise stable_node is the chain and
2224 * stable_node_dup is the dup to replace.
2226 if (stable_node_dup == stable_node) {
2227 VM_BUG_ON(is_stable_node_chain(stable_node_dup));
2228 VM_BUG_ON(is_stable_node_dup(stable_node_dup));
2229 /* there is no chain */
2231 VM_BUG_ON(page_node->head != &migrate_nodes);
2232 list_del(&page_node->list);
2233 DO_NUMA(page_node->nid = nid);
2234 rb_replace_node(&stable_node_dup->node,
2237 if (is_page_sharing_candidate(page_node))
2242 rb_erase(&stable_node_dup->node, root);
2246 VM_BUG_ON(!is_stable_node_chain(stable_node));
2247 __stable_node_dup_del(stable_node_dup);
2249 VM_BUG_ON(page_node->head != &migrate_nodes);
2250 list_del(&page_node->list);
2251 DO_NUMA(page_node->nid = nid);
2252 stable_node_chain_add_dup(page_node, stable_node);
2253 if (is_page_sharing_candidate(page_node))
2261 stable_node_dup->head = &migrate_nodes;
2262 list_add(&stable_node_dup->list, stable_node_dup->head);
2266 /* stable_node_dup could be null if it reached the limit */
2267 if (!stable_node_dup)
2268 stable_node_dup = stable_node_any;
2270 * If stable_node was a chain and chain_prune collapsed it,
2271 * stable_node has been updated to be the new regular
2272 * stable_node. A collapse of the chain is indistinguishable
2273 * from the case there was no chain in the stable
2274 * rbtree. Otherwise stable_node is the chain and
2275 * stable_node_dup is the dup to replace.
2277 if (stable_node_dup == stable_node) {
2278 VM_BUG_ON(is_stable_node_chain(stable_node_dup));
2279 VM_BUG_ON(is_stable_node_dup(stable_node_dup));
2280 /* chain is missing so create it */
2281 stable_node = alloc_stable_node_chain(stable_node_dup,
2287 * Add this stable_node dup that was
2288 * migrated to the stable_node chain
2289 * of the current nid for this page
2292 VM_BUG_ON(!is_stable_node_chain(stable_node));
2293 VM_BUG_ON(!is_stable_node_dup(stable_node_dup));
2294 VM_BUG_ON(page_node->head != &migrate_nodes);
2295 list_del(&page_node->list);
2296 DO_NUMA(page_node->nid = nid);
2297 stable_node_chain_add_dup(page_node, stable_node);
2302 * stable_tree_insert - insert stable tree node pointing to new ksm page
2303 * into the stable tree.
2305 * This function returns the stable tree node just allocated on success,
2308 static struct stable_node *stable_tree_insert(struct page *kpage)
2312 struct rb_root *root;
2313 struct rb_node **new;
2314 struct rb_node *parent;
2315 struct stable_node *stable_node, *stable_node_dup, *stable_node_any;
2316 bool need_chain = false;
2318 kpfn = page_to_pfn(kpage);
2319 nid = get_kpfn_nid(kpfn);
2320 root = root_stable_tree + nid;
2323 new = &root->rb_node;
2326 struct page *tree_page;
2330 stable_node = rb_entry(*new, struct stable_node, node);
2331 stable_node_any = NULL;
2332 tree_page = chain(&stable_node_dup, stable_node, root);
2333 if (!stable_node_dup) {
2335 * Either all stable_node dups were full in
2336 * this stable_node chain, or this chain was
2337 * empty and should be rb_erased.
2339 stable_node_any = stable_node_dup_any(stable_node,
2341 if (!stable_node_any) {
2342 /* rb_erase just run */
2346 * Take any of the stable_node dups page of
2347 * this stable_node chain to let the tree walk
2348 * continue. All KSM pages belonging to the
2349 * stable_node dups in a stable_node chain
2350 * have the same content and they're
2351 * wrprotected at all times. Any will work
2352 * fine to continue the walk.
2354 tree_page = get_ksm_page(stable_node_any,
2355 GET_KSM_PAGE_NOLOCK);
2357 VM_BUG_ON(!stable_node_dup ^ !!stable_node_any);
2360 * If we walked over a stale stable_node,
2361 * get_ksm_page() will call rb_erase() and it
2362 * may rebalance the tree from under us. So
2363 * restart the search from scratch. Returning
2364 * NULL would be safe too, but we'd generate
2365 * false negative insertions just because some
2366 * stable_node was stale.
2371 ret = memcmp_pages(kpage, tree_page);
2372 put_page(tree_page);
2376 new = &parent->rb_left;
2378 new = &parent->rb_right;
2385 stable_node_dup = alloc_stable_node();
2386 if (!stable_node_dup)
2389 INIT_HLIST_HEAD(&stable_node_dup->hlist);
2390 stable_node_dup->kpfn = kpfn;
2391 set_page_stable_node(kpage, stable_node_dup);
2392 stable_node_dup->rmap_hlist_len = 0;
2393 DO_NUMA(stable_node_dup->nid = nid);
2395 rb_link_node(&stable_node_dup->node, parent, new);
2396 rb_insert_color(&stable_node_dup->node, root);
2398 if (!is_stable_node_chain(stable_node)) {
2399 struct stable_node *orig = stable_node;
2400 /* chain is missing so create it */
2401 stable_node = alloc_stable_node_chain(orig, root);
2403 free_stable_node(stable_node_dup);
2407 stable_node_chain_add_dup(stable_node_dup, stable_node);
2410 return stable_node_dup;
2414 * unstable_tree_search_insert - search for identical page,
2415 * else insert rmap_item into the unstable tree.
2417 * This function searches for a page in the unstable tree identical to the
2418 * page currently being scanned; and if no identical page is found in the
2419 * tree, we insert rmap_item as a new object into the unstable tree.
2421 * This function returns pointer to rmap_item found to be identical
2422 * to the currently scanned page, NULL otherwise.
2424 * This function does both searching and inserting, because they share
2425 * the same walking algorithm in an rbtree.
2428 struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item,
2430 struct page **tree_pagep)
2432 struct rb_node **new;
2433 struct rb_root *root;
2434 struct rb_node *parent = NULL;
2437 nid = get_kpfn_nid(page_to_pfn(page));
2438 root = root_unstable_tree + nid;
2439 new = &root->rb_node;
2442 struct rmap_item *tree_rmap_item;
2443 struct page *tree_page;
2447 tree_rmap_item = rb_entry(*new, struct rmap_item, node);
2448 tree_page = get_mergeable_page(tree_rmap_item);
2453 * Don't substitute a ksm page for a forked page.
2455 if (page == tree_page) {
2456 put_page(tree_page);
2460 ret = memcmp_pages(page, tree_page);
2464 put_page(tree_page);
2465 new = &parent->rb_left;
2466 } else if (ret > 0) {
2467 put_page(tree_page);
2468 new = &parent->rb_right;
2469 } else if (!ksm_merge_across_nodes &&
2470 page_to_nid(tree_page) != nid) {
2472 * If tree_page has been migrated to another NUMA node,
2473 * it will be flushed out and put in the right unstable
2474 * tree next time: only merge with it when across_nodes.
2476 put_page(tree_page);
2479 *tree_pagep = tree_page;
2480 return tree_rmap_item;
2484 rmap_item->address |= UNSTABLE_FLAG;
2485 rmap_item->address |= (ksm_scan.scan_round & SEQNR_MASK);
2486 DO_NUMA(rmap_item->nid = nid);
2487 rb_link_node(&rmap_item->node, parent, new);
2488 rb_insert_color(&rmap_item->node, root);
2490 #ifdef CONFIG_LKSM_FILTER
2491 rmap_item->region = ksm_scan.region;
2493 ksm_pages_unshared++;
2498 * stable_tree_append - add another rmap_item to the linked list of
2499 * rmap_items hanging off a given node of the stable tree, all sharing
2500 * the same ksm page.
2502 static void stable_tree_append(struct rmap_item *rmap_item,
2503 struct stable_node *stable_node,
2504 bool max_page_sharing_bypass)
2507 * rmap won't find this mapping if we don't insert the
2508 * rmap_item in the right stable_node
2509 * duplicate. page_migration could break later if rmap breaks,
2510 * so we can as well crash here. We really need to check for
2511 * rmap_hlist_len == STABLE_NODE_CHAIN, but we can as well check
2512 * for other negative values as an undeflow if detected here
2513 * for the first time (and not when decreasing rmap_hlist_len)
2514 * would be sign of memory corruption in the stable_node.
2516 BUG_ON(stable_node->rmap_hlist_len < 0);
2518 stable_node->rmap_hlist_len++;
2519 if (!max_page_sharing_bypass)
2520 /* possibly non fatal but unexpected overflow, only warn */
2521 WARN_ON_ONCE(stable_node->rmap_hlist_len >
2522 ksm_max_page_sharing);
2524 rmap_item->head = stable_node;
2525 rmap_item->address |= STABLE_FLAG;
2526 hlist_add_head(&rmap_item->hlist, &stable_node->hlist);
2528 if (rmap_item->hlist.next) {
2529 ksm_pages_sharing++;
2530 lksm_slot_nr_merged++;
2536 #ifdef CONFIG_LKSM_FILTER
2537 static inline void stable_tree_append_region(struct rmap_item *rmap_item,
2538 struct stable_node *stable_node,
2539 struct lksm_region *region,
2540 bool max_page_sharing_bypass)
2542 if (region->type == LKSM_REGION_FILE1
2543 || region->type == LKSM_REGION_FILE2) {
2545 unsigned long offset =
2546 (rmap_item->address - rmap_item->base_addr) >> PAGE_SHIFT;
2547 if (unlikely(region->filter_cnt == 0)
2548 && region->len > SINGLE_FILTER_LEN
2549 && !region->filter) {
2550 region->filter = kcalloc(region->len, sizeof(long), GFP_KERNEL);
2551 if (!region->filter) {
2552 ksm_err("fail to allocate memory for filter");
2556 if (region->len > SINGLE_FILTER_LEN)
2557 ret = test_and_set_bit(offset, region->filter);
2559 ret = test_and_set_bit(offset, ®ion->single_filter);
2561 region->filter_cnt++;
2563 region->merge_cnt++;
2564 region_share[region->type]++;
2566 stable_tree_append(rmap_item, stable_node, max_page_sharing_bypass);
2568 #endif /* CONFIG_LKSM_FILTER */
2571 * cmp_and_merge_page - first see if page can be merged into the stable tree;
2572 * if not, compare checksum to previous and if it's the same, see if page can
2573 * be inserted into the unstable tree, or merged with a page already there and
2574 * both transferred to the stable tree.
2576 * @page: the page that we are searching identical page to.
2577 * @rmap_item: the reverse mapping into the virtual address of this page
2579 static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
2581 struct mm_struct *mm = rmap_item->mm;
2582 struct rmap_item *tree_rmap_item;
2583 struct page *tree_page = NULL;
2584 struct stable_node *stable_node;
2586 unsigned int checksum;
2588 bool max_page_sharing_bypass = false;
2590 stable_node = page_stable_node(page);
2592 if (stable_node->head != &migrate_nodes &&
2593 get_kpfn_nid(READ_ONCE(stable_node->kpfn)) !=
2594 NUMA(stable_node->nid)) {
2595 stable_node_dup_del(stable_node);
2596 stable_node->head = &migrate_nodes;
2597 list_add(&stable_node->list, stable_node->head);
2599 if (stable_node->head != &migrate_nodes &&
2600 rmap_item->head == stable_node)
2603 * If it's a KSM fork, allow it to go over the sharing limit
2606 if (!is_page_sharing_candidate(stable_node))
2607 max_page_sharing_bypass = true;
2610 /* We first start with searching the page inside the stable tree */
2611 kpage = stable_tree_search(page);
2612 if (kpage == page && rmap_item->head == stable_node) {
2617 remove_rmap_item_from_tree(rmap_item);
2620 if (PTR_ERR(kpage) == -EBUSY)
2623 err = try_to_merge_with_ksm_page(rmap_item, page, kpage);
2626 * The page was successfully merged:
2627 * add its rmap_item to the stable tree.
2630 #ifdef CONFIG_LKSM_FILTER
2631 stable_tree_append_region(rmap_item, page_stable_node(kpage),
2632 ksm_scan.region, max_page_sharing_bypass);
2634 stable_tree_append(rmap_item, page_stable_node(kpage),
2635 max_page_sharing_bypass);
2644 * LKSM: In LKSM, KSM is running in a event-triggered manner.
2645 * Because of that scanning is much infrequently performed.
2646 * We just skip caculation of checksum for LKSM to catch scanning
2649 if (ksm_scan.scan_round < initial_round
2650 && !lksm_test_rmap_frozen(rmap_item)) {
2651 checksum = calc_checksum(page);
2652 if (rmap_item->oldchecksum != checksum) {
2653 rmap_item->oldchecksum = checksum;
2659 * Same checksum as an empty page. We attempt to merge it with the
2660 * appropriate zero page if the user enabled this via sysfs.
2662 if (ksm_use_zero_pages && (checksum == zero_checksum)) {
2663 struct vm_area_struct *vma;
2665 down_read(&mm->mmap_lock);
2666 vma = find_mergeable_vma(mm, rmap_item->address);
2668 err = try_to_merge_one_page(vma, page,
2669 ZERO_PAGE(rmap_item->address));
2672 * If the vma is out of date, we do not need to
2677 up_read(&mm->mmap_lock);
2679 * In case of failure, the page was not really empty, so we
2680 * need to continue. Otherwise we're done.
2686 unstable_tree_search_insert(rmap_item, page, &tree_page);
2687 if (tree_rmap_item) {
2689 #ifdef CONFIG_LKSM_FILTER
2690 struct lksm_region *tree_region = tree_rmap_item->region;
2692 kpage = try_to_merge_two_pages(rmap_item, page,
2693 tree_rmap_item, tree_page);
2695 * If both pages we tried to merge belong to the same compound
2696 * page, then we actually ended up increasing the reference
2697 * count of the same compound page twice, and split_huge_page
2699 * Here we set a flag if that happened, and we use it later to
2700 * try split_huge_page again. Since we call put_page right
2701 * afterwards, the reference count will be correct and
2702 * split_huge_page should succeed.
2704 split = PageTransCompound(page)
2705 && compound_head(page) == compound_head(tree_page);
2706 put_page(tree_page);
2709 * The pages were successfully merged: insert new
2710 * node in the stable tree and add both rmap_items.
2713 stable_node = stable_tree_insert(kpage);
2715 #ifdef CONFIG_LKSM_FILTER
2716 stable_tree_append_region(tree_rmap_item, stable_node,
2717 tree_region, false);
2718 stable_tree_append_region(rmap_item, stable_node,
2719 ksm_scan.region, false);
2721 stable_tree_append(tree_rmap_item, stable_node,
2723 stable_tree_append(rmap_item, stable_node,
2730 * If we fail to insert the page into the stable tree,
2731 * we will have 2 virtual addresses that are pointing
2732 * to a ksm page left outside the stable tree,
2733 * in which case we need to break_cow on both.
2736 break_cow(tree_rmap_item);
2737 break_cow(rmap_item);
2738 #ifdef CONFIG_LKSM_FILTER
2739 tree_rmap_item->region = tree_region;
2740 rmap_item->region = ksm_scan.region;
2745 * We are here if we tried to merge two pages and
2746 * failed because they both belonged to the same
2747 * compound page. We will split the page now, but no
2748 * merging will take place.
2749 * We do not want to add the cost of a full lock; if
2750 * the page is locked, it is better to skip it and
2751 * perhaps try again later.
2753 if (!trylock_page(page))
2755 split_huge_page(page);
2761 static struct rmap_item *get_next_rmap_item(struct mm_slot *mm_slot,
2762 struct rmap_item **rmap_list,
2765 struct rmap_item *rmap_item;
2767 while (*rmap_list) {
2768 rmap_item = *rmap_list;
2769 if ((rmap_item->address & PAGE_MASK) == addr) {
2770 if (lksm_test_mm_state(mm_slot, KSM_MM_FROZEN)
2771 && rmap_item->address & UNSTABLE_FLAG)
2772 lksm_set_rmap_frozen(rmap_item);
2774 lksm_clear_rmap_frozen(rmap_item);
2777 if (rmap_item->address > addr)
2779 *rmap_list = rmap_item->rmap_list;
2780 remove_rmap_item_from_tree(rmap_item);
2781 free_rmap_item(rmap_item);
2784 rmap_item = alloc_rmap_item();
2786 /* It has already been zeroed */
2787 rmap_item->mm = mm_slot->mm;
2788 rmap_item->address = addr;
2789 rmap_item->rmap_list = *rmap_list;
2790 #ifdef CONFIG_LKSM_FILTER
2791 rmap_item->region = ksm_scan.region;
2793 *rmap_list = rmap_item;
2794 if (lksm_test_mm_state(mm_slot, FROZEN_BIT))
2795 lksm_set_rmap_frozen(rmap_item);
2797 lksm_clear_rmap_frozen(rmap_item);
2803 * lksm_flush_removed_mm_list:
2804 * batched flushing out removed mm_slots by lksm_remove_mm_slot
2806 static void lksm_flush_removed_mm_list(void)
2808 struct mm_slot *head, *next, *slot;
2810 spin_lock(&ksm_mmlist_lock);
2811 head = list_first_entry_or_null(&ksm_scan.remove_mm_list,
2812 struct mm_slot, mm_list);
2814 spin_unlock(&ksm_mmlist_lock);
2818 list_del_init(&ksm_scan.remove_mm_list);
2819 spin_unlock(&ksm_mmlist_lock);
2821 if (!list_empty(&head->mm_list)) {
2822 list_for_each_entry_safe(slot, next, &head->mm_list, mm_list) {
2823 list_del(&slot->mm_list);
2827 remove_trailing_rmap_items(slot, &slot->rmap_list);
2828 #ifdef CONFIG_LKSM_FILTER
2829 lksm_region_ref_list_release(slot);
2831 clear_bit(MMF_VM_MERGEABLE, &slot->mm->flags);
2838 remove_trailing_rmap_items(head, &head->rmap_list);
2839 #ifdef CONFIG_LKSM_FILTER
2840 lksm_region_ref_list_release(head);
2842 clear_bit(MMF_VM_MERGEABLE, &head->mm->flags);
2848 * remove mm_slot from lists
2849 * LKSM defers releasing stuffs at the end of scanning
2851 static inline void lksm_remove_mm_slot(struct mm_slot *slot)
2853 hash_del(&slot->link);
2854 list_del_init(&slot->scan_list);
2855 list_move(&slot->mm_list, &ksm_scan.remove_mm_list);
2856 if (!RB_EMPTY_NODE(&slot->ordered_list)) {
2857 rb_erase(&slot->ordered_list, &vips_list);
2858 RB_CLEAR_NODE(&slot->ordered_list);
2862 /* caller must hold ksm_mmlist_lock */
2863 static struct mm_slot *lksm_get_unscanned_mm_slot(struct mm_slot *slot)
2865 struct mm_slot *next;
2867 list_for_each_entry_safe_continue(slot, next, &ksm_scan_head.scan_list,
2869 if (ksm_test_exit(slot->mm)) {
2870 if (lksm_test_mm_state(slot, KSM_MM_FROZEN))
2871 atomic_dec(&ksm_scan.nr_frozen);
2873 atomic_dec(&ksm_scan.nr_scannable);
2874 lksm_remove_mm_slot(slot);
2878 lksm_nr_scanned_slot++;
2885 /* caller must hold ksm_mmlist_lock */
2886 static void lksm_insert_mm_slot_ordered(struct mm_slot *slot)
2888 struct rb_root *root;
2889 struct rb_node **new;
2890 struct rb_node *parent;
2891 struct mm_slot *temp_slot;
2895 new = &root->rb_node;
2898 temp_slot = rb_entry(*new, struct mm_slot, ordered_list);
2901 if (slot->nr_merged > temp_slot->nr_merged)
2902 new = &parent->rb_left;
2904 new = &parent->rb_right;
2907 rb_link_node(&slot->ordered_list, parent, new);
2908 rb_insert_color(&slot->ordered_list, root);
2911 #ifdef CONFIG_LKSM_FILTER
2913 * most vmas grow up except stack.
2914 * the given value of size must be same with orig's one.
2917 static inline void __lksm_copy_filter
2918 (unsigned long *orig, unsigned long *newer, unsigned long size)
2921 *(newer++) = *(orig++);
2924 static inline void lksm_copy_filter
2925 (struct lksm_region *region, unsigned long *filter)
2927 if (region->len > SINGLE_FILTER_LEN) {
2929 __lksm_copy_filter(region->filter, filter, region->len);
2931 __lksm_copy_filter(®ion->single_filter, filter, region->len);
2934 static struct vm_area_struct *lksm_find_next_vma
2935 (struct mm_struct *mm, struct mm_slot *slot)
2937 struct vm_area_struct *vma;
2938 struct lksm_region *region;
2940 if (ksm_test_exit(mm))
2943 vma = find_vma(mm, ksm_scan.address);
2944 for (; vma; vma = vma->vm_next) {
2945 if (!(vma->vm_flags & VM_MERGEABLE))
2947 if (ksm_scan.address < vma->vm_start)
2948 ksm_scan.address = vma->vm_start;
2949 if (!vma->anon_vma) {
2950 ksm_scan.address = vma->vm_end;
2954 if (ksm_scan.cached_vma == vma)
2955 region = ksm_scan.region;
2957 region = lksm_find_region(vma);
2958 ksm_scan.cached_vma = vma;
2959 ksm_scan.vma_base_addr = vma->vm_start;
2962 if (!region || region->type == LKSM_REGION_CONFLICT)
2963 region = &unknown_region;
2964 else if (region->type != LKSM_REGION_HEAP
2965 && region->type != LKSM_REGION_CONFLICT
2966 && region->type != LKSM_REGION_UNKNOWN) {
2967 unsigned long size = lksm_region_size(vma->vm_start, vma->vm_end);
2968 unsigned long len = (size > BITS_PER_LONG) ? lksm_bitmap_size(size)
2969 : SINGLE_FILTER_LEN;
2971 if (len > SINGLE_FILTER_LEN && unlikely(region->len != len)) {
2973 if (region->conflict > 1) {
2974 region->type = LKSM_REGION_CONFLICT;
2975 if (region->len > SINGLE_FILTER_LEN)
2976 kfree(region->filter);
2977 region->filter = NULL;
2978 region->len = SINGLE_FILTER_LEN;
2979 /* conflicted regions will be unfiltered */
2980 region = &unknown_region;
2983 if (region->len < len) {
2984 unsigned long *filter;
2985 ksm_debug("size of region(%p) is changed: %lu -> %lu (size: %lu)",
2986 region, region->len, len, size);
2987 filter = kcalloc(len, sizeof(long), GFP_KERNEL);
2989 ksm_err("fail to allocate memory for filter");
2992 if (region->filter_cnt > 0)
2993 lksm_copy_filter(region, filter);
2994 if (region->len > SINGLE_FILTER_LEN)
2995 kfree(region->filter);
2996 region->filter = filter;
3002 if (ksm_scan.region != region)
3003 ksm_scan.region = region;
3009 static inline unsigned long lksm_get_next_filtered_address
3010 (struct lksm_region *region, unsigned long addr, unsigned long base)
3012 unsigned long next_offset, curr_offset, nbits;
3014 curr_offset = (addr - base) >> PAGE_SHIFT;
3015 nbits = region->len * BITS_PER_LONG;
3017 if (region->len > SINGLE_FILTER_LEN)
3018 next_offset = find_next_bit(region->filter, nbits, curr_offset);
3020 next_offset = find_next_bit(®ion->single_filter,
3021 nbits, curr_offset);
3023 return (next_offset << PAGE_SHIFT) + base;
3026 #define lksm_region_skipped(region) \
3027 (region->len > 0 && !region->filter)
3029 static struct rmap_item *__scan_next_rmap_item(struct page **page,
3030 struct mm_struct *mm, struct mm_slot *slot)
3032 struct vm_area_struct *vma;
3033 struct rmap_item *rmap_item;
3038 vma = lksm_find_next_vma(mm, slot);
3040 while (vma && ksm_scan.address < vma->vm_end) {
3041 if (ksm_test_exit(mm)) {
3045 if (!lksm_test_mm_state(slot, KSM_MM_NEWCOMER)
3046 && !lksm_test_mm_state(slot, KSM_MM_FROZEN)
3047 && ksm_scan.region->type != LKSM_REGION_HEAP
3048 && ksm_scan.region->type != LKSM_REGION_UNKNOWN
3049 && lksm_region_mature(ksm_scan.scan_round, ksm_scan.region)
3050 && !lksm_region_skipped(ksm_scan.region)) {
3051 if (ksm_scan.region->filter_cnt > 0) {
3052 addr = lksm_get_next_filtered_address(ksm_scan.region,
3053 ksm_scan.address, ksm_scan.vma_base_addr);
3054 ksm_scan.address = addr;
3055 if (ksm_scan.address >= vma->vm_end)
3057 if (ksm_scan.address < vma->vm_start) {
3058 ksm_debug("address(%lu) is less than vm_start(%lu)",
3059 ksm_scan.address, vma->vm_start);
3063 ksm_scan.address = vma->vm_end;
3067 *page = follow_page(vma, ksm_scan.address, FOLL_GET);
3068 if (IS_ERR_OR_NULL(*page)) {
3069 ksm_scan.address += PAGE_SIZE;
3073 if (PageAnon(*page)) {
3074 flush_anon_page(vma, *page, ksm_scan.address);
3075 flush_dcache_page(*page);
3076 rmap_item = get_next_rmap_item(slot,
3077 ksm_scan.rmap_list, ksm_scan.address);
3079 ksm_scan.rmap_list =
3080 &rmap_item->rmap_list;
3081 ksm_scan.address += PAGE_SIZE;
3087 ksm_scan.address += PAGE_SIZE;
3092 /* clean-up a scanned region */
3093 ksm_scan.region = NULL;
3094 ksm_scan.cached_vma = NULL;
3095 ksm_scan.vma_base_addr = 0;
3097 return NULL; /* no scannable rmap item */
3100 #else /* CONFIG_LKSM_FILTER */
3102 static struct rmap_item *__scan_next_rmap_item(struct page **page,
3103 struct mm_struct *mm, struct mm_slot *slot)
3105 struct vm_area_struct *vma;
3106 struct rmap_item *rmap_item;
3108 if (ksm_test_exit(mm))
3111 vma = find_vma(mm, ksm_scan.address);
3113 for (; vma; vma = vma->vm_next) {
3114 if (!(vma->vm_flags & VM_MERGEABLE))
3116 if (ksm_scan.address < vma->vm_start)
3117 ksm_scan.address = vma->vm_start;
3119 ksm_scan.address = vma->vm_end;
3121 while (ksm_scan.address < vma->vm_end) {
3122 if (ksm_test_exit(mm))
3124 *page = follow_page(vma, ksm_scan.address, FOLL_GET);
3125 if (IS_ERR_OR_NULL(*page)) {
3126 ksm_scan.address += PAGE_SIZE;
3130 if (PageAnon(*page)) {
3131 flush_anon_page(vma, *page, ksm_scan.address);
3132 flush_dcache_page(*page);
3133 rmap_item = get_next_rmap_item(slot,
3134 ksm_scan.rmap_list, ksm_scan.address);
3136 ksm_scan.rmap_list =
3137 &rmap_item->rmap_list;
3138 ksm_scan.address += PAGE_SIZE;
3144 ksm_scan.address += PAGE_SIZE;
3152 #endif /* CONFIG_LKSM_FILTER */
3154 static inline int sum_merge_win(int merge_win[], int len)
3158 for (i = 0; i < len; i++)
3159 sum += merge_win[i];
3163 static inline int lksm_account_mm_slot_nr_merge(struct mm_slot *slot, int nr_merged)
3165 slot->nr_merged_win[slot->merge_idx++] = nr_merged;
3166 if (slot->merge_idx == MERGE_WIN)
3167 slot->merge_idx = 0;
3168 slot->nr_merged = sum_merge_win(slot->nr_merged_win, MERGE_WIN);
3169 return slot->nr_merged;
3172 static struct rmap_item *scan_get_next_rmap_item(struct page **page)
3174 struct mm_struct *mm;
3175 struct mm_slot *slot;
3176 struct rmap_item *rmap_item;
3178 if (list_empty(&ksm_scan_head.scan_list))
3181 slot = ksm_scan.mm_slot;
3182 if (slot == &ksm_scan_head) {
3184 * A number of pages can hang around indefinitely on per-cpu
3185 * pagevecs, raised page count preventing write_protect_page
3186 * from merging them. Though it doesn't really matter much,
3187 * it is puzzling to see some stuck in pages_volatile until
3188 * other activity jostles them out, and they also prevented
3189 * LTP's KSM test from succeeding deterministically; so drain
3190 * them here (here rather than on entry to ksm_do_scan(),
3191 * so we don't IPI too often when pages_to_scan is set low).
3193 lru_add_drain_all();
3195 if (ksm_scan.scan_round < ksm_crawl_round) {
3196 ksm_scan.scan_round = ksm_crawl_round;
3197 root_unstable_tree[LKSM_NODE_ID] = RB_ROOT;
3200 spin_lock(&ksm_mmlist_lock);
3201 slot = lksm_get_unscanned_mm_slot(slot);
3202 ksm_scan.mm_slot = slot;
3203 spin_unlock(&ksm_mmlist_lock);
3206 * Although we tested list_empty() above, a racing __ksm_exit
3207 * of the last mm on the list may have removed it since then.
3209 if (slot == &ksm_scan_head)
3212 slot->elapsed = get_jiffies_64();
3214 ksm_scan.address = 0;
3215 ksm_scan.rmap_list = &slot->rmap_list;
3218 if (unlikely(!ksm_scan.rmap_list))
3219 ksm_scan.rmap_list = &slot->rmap_list;
3223 down_read(&mm->mmap_lock);
3224 rmap_item = __scan_next_rmap_item(page, mm, slot);
3228 up_read(&mm->mmap_lock);
3232 if (ksm_test_exit(mm)) {
3233 ksm_scan.address = 0;
3234 ksm_scan.rmap_list = &slot->rmap_list;
3237 * Nuke all the rmap_items that are above this current rmap:
3238 * because there were no VM_MERGEABLE vmas with such addresses.
3240 remove_trailing_rmap_items(slot, ksm_scan.rmap_list);
3242 spin_lock(&ksm_mmlist_lock);
3243 ksm_scan.mm_slot = lksm_get_unscanned_mm_slot(slot);
3245 if (ksm_scan.address == 0) {
3247 * We've completed a full scan of all vmas, holding mmap_lock
3248 * throughout, and found no VM_MERGEABLE: so do the same as
3249 * __ksm_exit does to remove this mm from all our lists now.
3250 * This applies either when cleaning up after __ksm_exit
3251 * (but beware: we can reach here even before __ksm_exit),
3252 * or when all VM_MERGEABLE areas have been unmapped (and
3253 * mmap_lock then protects against race with MADV_MERGEABLE).
3255 up_read(&mm->mmap_lock);
3256 if (lksm_test_mm_state(slot, KSM_MM_FROZEN))
3257 atomic_dec(&ksm_scan.nr_frozen);
3259 atomic_dec(&ksm_scan.nr_scannable);
3260 lksm_remove_mm_slot(slot);
3261 spin_unlock(&ksm_mmlist_lock);
3263 lksm_slot_nr_merged = 0;
3264 lksm_slot_nr_broken = 0;
3266 int newcomer = 0, frozen = 0;
3268 up_read(&mm->mmap_lock);
3270 if (lksm_test_mm_state(slot, KSM_MM_NEWCOMER)) {
3271 lksm_clear_mm_state(slot, KSM_MM_NEWCOMER);
3274 if (lksm_test_mm_state(slot, KSM_MM_FROZEN)) {
3275 lksm_clear_mm_state(slot, KSM_MM_FROZEN);
3277 atomic_dec(&ksm_scan.nr_frozen);
3279 atomic_dec(&ksm_scan.nr_scannable);
3280 lksm_set_mm_state(slot, KSM_MM_SCANNED);
3282 list_del_init(&slot->scan_list);
3283 if (!RB_EMPTY_NODE(&slot->ordered_list)) {
3284 rb_erase(&slot->ordered_list, &vips_list);
3285 RB_CLEAR_NODE(&slot->ordered_list);
3287 if (lksm_account_mm_slot_nr_merge(slot, lksm_slot_nr_merged))
3288 lksm_insert_mm_slot_ordered(slot);
3290 slot->elapsed = get_jiffies_64() - slot->elapsed;
3291 spin_unlock(&ksm_mmlist_lock);
3293 if (ksm_test_exit(slot->mm))
3294 ksm_debug("slot(%p:%p) is exited", slot, slot->mm);
3296 ksm_debug("slot-%d(%s) %d merged %d scanned %lu pages "
3297 "(sum: %d) - (%s, %s) takes %u msecs (nr_scannable: %d)",
3298 task_pid_nr(slot->mm->owner), slot->mm->owner->comm,
3299 lksm_slot_nr_merged - lksm_slot_nr_broken, slot->nr_scans,
3300 slot->scanning_size, slot->nr_merged,
3301 newcomer ? "new" : "old",
3302 frozen ? "frozen" : "normal",
3303 jiffies_to_msecs(slot->elapsed),
3304 atomic_read(&ksm_scan.nr_scannable));
3306 lksm_slot_nr_merged = 0;
3307 lksm_slot_nr_broken = 0;
3310 /* Repeat until we've completed scanning the whole list */
3311 slot = ksm_scan.mm_slot;
3312 if (slot != &ksm_scan_head) {
3313 slot->elapsed = get_jiffies_64();
3321 * ksm_do_scan - the ksm scanner main worker function.
3322 * @scan_npages: number of pages we want to scan before we return.
3324 static int ksm_do_scan(unsigned int scan_npages)
3326 struct rmap_item *rmap_item;
3329 while (scan_npages-- && likely(!freezing(current))) {
3331 rmap_item = scan_get_next_rmap_item(&page);
3333 return 1; /* need sleep */
3334 cmp_and_merge_page(page, rmap_item);
3340 static int ksmd_should_run(void)
3342 return (ksm_run & KSM_RUN_MERGE) &&
3343 !list_empty(&ksm_scan_head.scan_list);
3346 static void lksm_scan_wrapup_wait(void)
3348 if (ksm_scan.scan_mode == LKSM_SCAN_PARTIAL) {
3349 if (ksm_thread_pages_to_scan != lksm_default_pages_to_scan)
3350 ksm_thread_pages_to_scan = lksm_default_pages_to_scan;
3351 } else if (ksm_scan.scan_mode == LKSM_SCAN_FULL)
3352 ksm_scan.nr_full_scan++;
3356 lksm_nr_scanned_slot = 0;
3358 ksm_scan.scan_mode = LKSM_SCAN_NONE;
3359 if (ksm_run & KSM_RUN_ONESHOT)
3360 atomic_set(&ksm_one_shot_scanning, LKSM_SCAN_NONE);
3362 lksm_clear_scan_state(ksm_state);
3364 wait_event_freezable(ksm_thread_wait,
3365 (lksm_check_scan_state(ksm_state) && ksmd_should_run())
3366 || kthread_should_stop());
3369 static int lksm_scan_thread(void *nothing)
3371 unsigned long begin, elapsed;
3372 unsigned int sleep_ms;
3373 int need_to_sleep = 0;
3376 set_user_nice(current, 5);
3378 ksm_debug("KSM_SCAND pid: %d", task_pid_nr(current));
3379 while (!kthread_should_stop()) {
3380 mutex_lock(&ksm_thread_mutex);
3381 wait_while_offlining();
3382 if (ksmd_should_run())
3383 need_to_sleep = ksm_do_scan(ksm_thread_pages_to_scan);
3384 mutex_unlock(&ksm_thread_mutex);
3388 if (need_to_sleep) {
3389 if (!ksmd_should_run()) {
3390 /* if no one left in scanning list, go to sleep for a while */
3391 lksm_flush_removed_mm_list();
3393 elapsed = get_jiffies_64() - begin;
3394 lksm_last_scan_time = elapsed;
3395 lksm_proc_scan_time = elapsed / lksm_nr_scanned_slot;
3397 ksm_debug("Scanning(%d) takes %u ms, %d/%d-pages "
3398 "are merged/broken (nr_scannable: %d, nr_frozen: %d)",
3399 lksm_nr_scanned_slot,
3400 jiffies_to_msecs(lksm_last_scan_time),
3401 lksm_nr_merged, lksm_nr_broken,
3402 atomic_read(&ksm_scan.nr_scannable),
3403 atomic_read(&ksm_scan.nr_frozen));
3405 lksm_scan_wrapup_wait();
3407 ksm_debug("Start %lu-th scanning: nr_scannable(%d) "
3409 ksm_scan.scan_round,
3410 atomic_read(&ksm_scan.nr_scannable),
3411 atomic_read(&ksm_scan.nr_frozen));
3413 if (ksm_scan.scan_mode == LKSM_SCAN_PARTIAL) {
3414 if (lksm_boosted_pages_to_scan !=
3415 ksm_thread_pages_to_scan) {
3416 ksm_thread_pages_to_scan = lksm_boosted_pages_to_scan;
3417 ksm_debug("set pages_to_scan to %u",
3418 lksm_boosted_pages_to_scan);
3421 begin = get_jiffies_64();
3423 /* new scanning targets are coming */
3424 sleep_ms = READ_ONCE(ksm_thread_sleep_millisecs);
3425 wait_event_interruptible_timeout(ksm_iter_wait,
3426 sleep_ms != READ_ONCE(ksm_thread_sleep_millisecs),
3427 msecs_to_jiffies(sleep_ms));
3430 } else if (ksmd_should_run()) {
3432 sleep_ms = READ_ONCE(ksm_thread_sleep_millisecs);
3433 wait_event_interruptible_timeout(ksm_iter_wait,
3434 sleep_ms != READ_ONCE(ksm_thread_sleep_millisecs),
3435 msecs_to_jiffies(sleep_ms));
3437 /* wait for activating ksm */
3438 if (likely(ksm_scan.scan_round > 0)) {
3439 lksm_flush_removed_mm_list();
3441 elapsed = get_jiffies_64() - begin;
3442 lksm_last_scan_time = elapsed;
3443 lksm_proc_scan_time = elapsed / lksm_nr_scanned_slot;
3445 ksm_debug("Scanning(%d) takes %u ms, %d/%d-pages are merged/broken",
3446 lksm_nr_scanned_slot, jiffies_to_msecs(lksm_last_scan_time),
3447 lksm_nr_merged, lksm_nr_broken);
3449 lksm_scan_wrapup_wait();
3451 wait_event_freezable(ksm_thread_wait,
3452 (lksm_check_scan_state(ksm_state) && ksmd_should_run())
3453 || kthread_should_stop());
3455 ksm_debug("Start %lu-th scanning: nr_scannable(%d) nr_frozen(%d)",
3456 ksm_scan.scan_round,
3457 atomic_read(&ksm_scan.nr_scannable),
3458 atomic_read(&ksm_scan.nr_frozen));
3460 if (ksm_scan.scan_mode == LKSM_SCAN_PARTIAL) {
3461 ksm_thread_pages_to_scan = lksm_boosted_pages_to_scan;
3462 ksm_debug("set pages_to_scan to %u",
3463 lksm_boosted_pages_to_scan);
3465 begin = get_jiffies_64();
3472 * lksm crawler declaration & definition part
3474 static struct task_struct *ksm_crawld;
3476 LIST_HEAD(frozen_task_list);
3477 DEFINE_SPINLOCK(frozen_task_lock);
3483 static atomic_t crawl_state;
3486 LKSM_TASK_SLOT_NONE = 0,
3487 LKSM_TASK_SLOT_REMOVED,
3490 static inline int lksm_count_and_clear_mm_slots
3491 (struct mm_slot *head, unsigned long *delay)
3494 struct mm_slot *slot;
3496 spin_lock(&ksm_mmlist_lock);
3497 list_for_each_entry(slot, &head->mm_list, mm_list) {
3498 if (list_empty(&slot->scan_list)) {
3499 lksm_clear_mm_state(slot, KSM_MM_SCANNED);
3501 slot->scanning_size = get_mm_counter(slot->mm, MM_ANONPAGES);
3502 list_add_tail(&slot->scan_list, &ksm_scan_head.scan_list);
3503 *delay += slot->elapsed;
3507 spin_unlock(&ksm_mmlist_lock);
3511 static int lksm_prepare_frozen_scan(void)
3513 int nr_frozen = 0, nr_added = 0, err;
3514 struct task_struct *task;
3515 struct task_slot *task_slot;
3516 struct mm_struct *mm;
3518 spin_lock(&frozen_task_lock);
3519 nr_frozen = atomic_read(&ksm_scan.nr_frozen);
3520 if (list_empty(&frozen_task_list)) {
3521 spin_unlock(&frozen_task_lock);
3525 ksm_debug("prepare frozen scan: round(%lu)", ksm_crawl_round);
3526 task_slot = list_first_entry_or_null(&frozen_task_list,
3527 struct task_slot, list);
3529 list_del(&task_slot->list);
3530 hash_del(&task_slot->hlist);
3531 spin_unlock(&frozen_task_lock);
3533 task = task_slot->task;
3534 if (ksm_run & KSM_RUN_UNMERGE) {
3535 put_task_struct(task);
3536 free_task_slot(task_slot);
3537 goto clean_up_abort;
3540 mm = get_task_mm(task);
3542 if (!mm || ksm_test_exit(mm))
3546 ksm_join_write_lock(mm, task_slot->frozen, err);
3552 free_task_slot(task_slot);
3553 put_task_struct(task);
3559 spin_lock(&frozen_task_lock);
3560 task_slot = list_first_entry_or_null(&frozen_task_list,
3561 struct task_slot, list);
3563 spin_unlock(&frozen_task_lock);
3564 atomic_add(nr_added, &ksm_scan.nr_frozen);
3566 return nr_added + nr_frozen;
3569 spin_lock(&frozen_task_lock);
3570 task_slot = list_first_entry_or_null(&frozen_task_list,
3571 struct task_slot, list);
3573 list_del(&task_slot->list);
3574 hash_del(&task_slot->hlist);
3575 spin_unlock(&frozen_task_lock);
3577 task = task_slot->task;
3578 put_task_struct(task);
3579 free_task_slot(task_slot);
3581 spin_lock(&frozen_task_lock);
3582 task_slot = list_first_entry_or_null(&frozen_task_list,
3583 struct task_slot, list);
3585 spin_unlock(&frozen_task_lock);
3590 /* this function make a list of new processes and vip processes */
3591 static int lksm_prepare_partial_scan(void)
3593 int ret, nr_frozen = 0, nr_added = 0, nr_scannable = 0;
3594 unsigned long delay = 0;
3595 unsigned long fault_cnt = 0;
3596 struct task_struct *task;
3597 struct mm_struct *mm;
3598 struct mm_slot *mm_slot;
3599 struct list_head recheck_list;
3600 struct rb_node *node;
3602 ksm_debug("prepare partial scan: round(%lu)", ksm_crawl_round);
3603 INIT_LIST_HEAD(&recheck_list);
3605 nr_frozen = lksm_prepare_frozen_scan();
3608 for_each_process(task) {
3609 if (task == current || task_pid_nr(task) == 0
3610 || check_short_task(task))
3612 if (ksm_run & KSM_RUN_UNMERGE) {
3617 mm = get_task_mm(task);
3620 ksm_join_write_lock(mm, KSM_TASK_UNFROZEN, ret);
3627 if (nr_added + nr_frozen >= lksm_max_vips) {
3628 ksm_debug("nr_scannable(%d) already fulfilled skip vips",
3629 nr_added + nr_frozen);
3633 spin_lock(&ksm_mmlist_lock);
3634 node = rb_first(&vips_list);
3636 ksm_debug("empty vip list");
3637 spin_unlock(&ksm_mmlist_lock);
3640 mm_slot = rb_entry(node, struct mm_slot, ordered_list);
3641 while (nr_scannable + nr_added + nr_frozen < lksm_max_vips) {
3642 if (ksm_run & KSM_RUN_UNMERGE) {
3643 spin_unlock(&ksm_mmlist_lock);
3649 if (ksm_test_exit(mm_slot->mm)) {
3650 if (!lksm_test_mm_state(mm_slot, KSM_MM_SCANNED))
3651 atomic_dec(&ksm_scan.nr_scannable);
3652 lksm_remove_mm_slot(mm_slot);
3655 if (!lksm_test_mm_state(mm_slot, KSM_MM_LISTED))
3658 /* prunning by fault count */
3659 fault_cnt = mm_slot->mm->owner->maj_flt + mm_slot->mm->owner->min_flt;
3660 if (mm_slot->fault_cnt == fault_cnt)
3663 mm_slot->fault_cnt = fault_cnt;
3664 mm_slot->scanning_size = get_mm_counter(mm_slot->mm, MM_ANONPAGES);
3665 mm_slot->nr_scans = 0;
3666 delay += mm_slot->elapsed;
3667 ksm_debug("slot(nr_merged: %d, scanning_size: %lu) task(%s)",
3668 mm_slot->nr_merged, mm_slot->scanning_size,
3669 mm_slot->mm->owner->comm);
3670 list_move_tail(&mm_slot->scan_list, &recheck_list);
3671 lksm_clear_mm_state(mm_slot, KSM_MM_SCANNED);
3672 #ifdef CONFIG_LKSM_FILTER
3673 /* to prevent mm_slot termination on __ksm_exit */
3674 lksm_set_mm_state(mm_slot, KSM_MM_PREPARED);
3679 node = rb_next(node);
3682 mm_slot = rb_entry(node, struct mm_slot, ordered_list);
3684 spin_unlock(&ksm_mmlist_lock);
3685 #ifdef CONFIG_LKSM_FILTER
3686 list_for_each_entry(mm_slot, &recheck_list, scan_list) {
3687 if (ksm_test_exit(mm_slot->mm))
3689 mm_slot->nr_scans = 0;
3690 /* check new maps */
3691 down_read(&mm_slot->mm->mmap_lock);
3692 ksm_join(mm_slot->mm, KSM_TASK_UNFROZEN);
3693 up_read(&mm_slot->mm->mmap_lock);
3697 spin_lock(&ksm_mmlist_lock);
3698 if (!list_empty(&recheck_list)) {
3699 #ifdef CONFIG_LKSM_FILTER
3700 list_for_each_entry(mm_slot, &recheck_list, scan_list)
3701 lksm_clear_mm_state(mm_slot, KSM_MM_PREPARED);
3703 list_splice(&recheck_list, &ksm_scan_head.scan_list);
3705 spin_unlock(&ksm_mmlist_lock);
3707 ksm_scan.scan_mode = LKSM_SCAN_PARTIAL;
3710 atomic_add(nr_scannable + nr_added, &ksm_scan.nr_scannable);
3711 ksm_debug("nr_frozen: %d nr_added: %d nr_scannable: %d - %d",
3712 nr_frozen, nr_added, nr_scannable, atomic_read(&ksm_scan.nr_scannable));
3714 return nr_frozen + nr_added + nr_scannable;
3717 static int lksm_prepare_full_scan(unsigned long *next_fullscan)
3719 int ret, nr_frozen = 0, nr_added = 0, nr_scannable = 0, nr_target;
3720 unsigned long delay = 0;
3721 struct task_struct *task;
3722 struct mm_struct *mm;
3724 ksm_debug("prepare full scan: round(%lu)", ksm_crawl_round);
3726 nr_frozen = lksm_prepare_frozen_scan();
3728 for_each_process(task) {
3729 if (task == current || task_pid_nr(task) == 0
3730 || check_short_task(task))
3732 if (ksm_run & KSM_RUN_UNMERGE) {
3737 mm = get_task_mm(task);
3740 ksm_join_write_lock(mm, KSM_TASK_UNFROZEN, ret);
3746 nr_scannable = lksm_count_and_clear_mm_slots(&ksm_mm_head, &delay);
3747 nr_target = nr_scannable + nr_added + nr_frozen;
3749 /* calculate crawler's sleep time */
3750 delay += msecs_to_jiffies((nr_frozen + nr_added) * lksm_proc_scan_time);
3751 *next_fullscan = jiffies + delay + msecs_to_jiffies(full_scan_interval);
3753 ksm_scan.scan_mode = LKSM_SCAN_FULL;
3756 atomic_add(nr_scannable + nr_added, &ksm_scan.nr_scannable);
3757 ksm_debug("nr_frozen: %d nr_added: %d nr_scannable: %d - %d",
3758 nr_frozen, nr_added, nr_scannable,
3759 atomic_read(&ksm_scan.nr_scannable));
3764 static int lksm_do_wait_userspace_event(unsigned long sleep_time)
3766 wait_event_freezable(ksm_crawl_wait,
3767 kthread_should_stop() ||
3768 (atomic_read(&ksm_one_shot_scanning) > 0));
3769 return atomic_read(&ksm_one_shot_scanning);
3772 static int lksm_do_wait_frozen_event(unsigned long sleep_time)
3776 spin_lock_irq(&frozen_task_lock);
3777 if (list_empty(&frozen_task_list))
3778 /* wait until candidate list is filled */
3779 wait_event_interruptible_lock_irq_timeout(
3781 kthread_should_stop()
3782 || !list_empty(&frozen_task_list)
3783 || !list_empty(&ksm_scan_head.scan_list),
3784 frozen_task_lock, sleep_time);
3786 if (!list_empty(&frozen_task_list) ||
3787 !list_empty(&ksm_scan_head.scan_list))
3789 spin_unlock_irq(&frozen_task_lock);
3794 static inline void lksm_wake_up_scan_thread(void)
3796 ksm_debug("wake up lksm_scan_thread");
3797 lksm_set_scan_state(ksm_state);
3798 wake_up(&ksm_thread_wait);
3801 #define LKSM_CRAWL_FROZEN_EVENT_WAIT 100 /* 100ms */
3803 static void lksm_do_crawl_once
3804 (unsigned long *next_fscan, unsigned long sleep_time)
3809 /* cralwer thread waits for trigger event from userspace */
3810 scan_mode = lksm_do_wait_userspace_event(sleep_time);
3812 if (scan_mode == LKSM_SCAN_PARTIAL) {
3813 atomic_set(&crawl_state, KSM_CRAWL_RUN);
3814 msleep(LKSM_CRAWL_FROZEN_EVENT_WAIT);
3815 nr_added = lksm_prepare_partial_scan();
3816 } else if (scan_mode == LKSM_SCAN_FULL) {
3817 atomic_set(&crawl_state, KSM_CRAWL_RUN);
3818 nr_added = lksm_prepare_full_scan(next_fscan);
3824 lksm_wake_up_scan_thread();
3826 ksm_debug("No one can be scanned!");
3827 atomic_set(&ksm_one_shot_scanning, LKSM_SCAN_NONE);
3829 atomic_set(&crawl_state, KSM_CRAWL_SLEEP);
3832 static void lksm_do_crawl_periodic
3833 (unsigned long *next_fscan, unsigned long sleep_time)
3837 if (time_is_before_eq_jiffies(*next_fscan)) {
3838 atomic_set(&crawl_state, KSM_CRAWL_RUN);
3839 nr_added = lksm_prepare_full_scan(next_fscan);
3840 } else if (lksm_do_wait_frozen_event(sleep_time)) {
3841 atomic_set(&crawl_state, KSM_CRAWL_RUN);
3842 msleep(LKSM_CRAWL_FROZEN_EVENT_WAIT);
3843 nr_added = lksm_prepare_partial_scan();
3849 lksm_wake_up_scan_thread();
3850 atomic_set(&crawl_state, KSM_CRAWL_SLEEP);
3853 static int lksm_crawl_thread(void *data)
3856 unsigned long next_fscan = jiffies; /* next full scan */
3857 unsigned long sleep_time = crawler_sleep;
3860 set_user_nice(current, 5);
3862 ksm_debug("KSM_CRAWLD pid: %d", task_pid_nr(current));
3863 wait_event_freezable(ksm_crawl_wait,
3864 kthread_should_stop() || ksm_run & KSM_RUN_MERGE);
3866 while (!kthread_should_stop() && ksm_crawl_round < initial_round) {
3870 if ((ksm_run & KSM_RUN_MERGE) &&
3871 !lksm_check_scan_state(ksm_state) &&
3872 time_is_before_eq_jiffies(next_fscan)) {
3873 nr_added = lksm_prepare_full_scan(&next_fscan);
3875 lksm_wake_up_scan_thread();
3878 next_fscan = jiffies + sleep_time;
3881 wait_event_interruptible_timeout(ksm_crawl_wait,
3882 kthread_should_stop() || !lksm_check_scan_state(ksm_state),
3886 /* initialization loop done */
3887 full_scan_interval = DEFAULT_FULL_SCAN_INTERVAL;
3888 next_fscan = jiffies + msecs_to_jiffies(full_scan_interval);
3889 atomic_set(&crawl_state, KSM_CRAWL_SLEEP);
3891 /* normal operation loop */
3892 while (!kthread_should_stop()) {
3893 if (ksm_run & KSM_RUN_ONESHOT) {
3894 if (!lksm_check_scan_state(ksm_state))
3895 lksm_do_crawl_once(&next_fscan, sleep_time);
3897 /* wait until scanning done */
3898 wait_event_freezable(ksm_crawl_wait,
3899 !lksm_check_scan_state(ksm_state)
3900 || kthread_should_stop());
3901 } else if (ksm_run & KSM_RUN_MERGE) {
3902 if (!lksm_check_scan_state(ksm_state))
3903 lksm_do_crawl_periodic(&next_fscan, sleep_time);
3905 /* wait until scanning done */
3906 wait_event_interruptible_timeout(ksm_crawl_wait,
3907 !lksm_check_scan_state(ksm_state)
3908 || kthread_should_stop(),
3912 ksm_debug("ksm is not activated");
3913 wait_event_freezable(ksm_crawl_wait,
3914 kthread_should_stop() || (ksm_run & KSM_RUN_MERGE));
3921 int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
3922 unsigned long end, int advice, unsigned long *vm_flags)
3924 struct mm_struct *mm = vma->vm_mm;
3928 case MADV_MERGEABLE:
3930 * Be somewhat over-protective for now!
3932 if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE |
3933 VM_PFNMAP | VM_IO | VM_DONTEXPAND |
3934 VM_HUGETLB | VM_MIXEDMAP))
3935 return 0; /* just ignore the advice */
3937 if (vma_is_dax(vma))
3941 if (*vm_flags & VM_SAO)
3945 if (*vm_flags & VM_SPARC_ADI)
3949 if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) {
3950 err = __ksm_enter(mm, KSM_TASK_UNFROZEN);
3955 *vm_flags |= VM_MERGEABLE;
3958 case MADV_UNMERGEABLE:
3959 if (!(*vm_flags & VM_MERGEABLE))
3960 return 0; /* just ignore the advice */
3962 if (vma->anon_vma) {
3963 err = unmerge_ksm_pages(vma, start, end);
3968 *vm_flags &= ~VM_MERGEABLE;
3975 static struct mm_slot *__ksm_enter_alloc_slot(struct mm_struct *mm, int frozen)
3977 struct mm_slot *mm_slot;
3979 mm_slot = alloc_mm_slot();
3983 if (frozen == KSM_TASK_FROZEN)
3984 lksm_set_mm_state(mm_slot, KSM_MM_FROZEN | KSM_MM_NEWCOMER);
3986 lksm_set_mm_state(mm_slot, KSM_MM_LISTED | KSM_MM_NEWCOMER);
3988 lksm_clear_mm_state(mm_slot, KSM_MM_SCANNED);
3989 RB_CLEAR_NODE(&mm_slot->ordered_list);
3990 mm_slot->fault_cnt = mm->owner->maj_flt + mm->owner->min_flt;
3991 mm_slot->scanning_size = get_mm_counter(mm, MM_ANONPAGES);
3993 spin_lock(&ksm_mmlist_lock);
3994 insert_to_mm_slots_hash(mm, mm_slot);
3996 * When KSM_RUN_MERGE (or KSM_RUN_STOP),
3997 * insert just behind the scanning cursor, to let the area settle
3998 * down a little; when fork is followed by immediate exec, we don't
3999 * want ksmd to waste time setting up and tearing down an rmap_list.
4001 * But when KSM_RUN_UNMERGE, it's important to insert ahead of its
4002 * scanning cursor, otherwise KSM pages in newly forked mms will be
4003 * missed: then we might as well insert at the end of the list.
4005 if (ksm_run & KSM_RUN_UNMERGE)
4006 list_add_tail(&mm_slot->mm_list, &ksm_mm_head.mm_list);
4008 list_add_tail(&mm_slot->scan_list, &ksm_scan_head.scan_list);
4009 list_add_tail(&mm_slot->mm_list, &ksm_mm_head.mm_list);
4011 ksm_nr_added_process++;
4012 spin_unlock(&ksm_mmlist_lock);
4013 #ifdef CONFIG_LKSM_FILTER
4014 INIT_LIST_HEAD(&mm_slot->ref_list);
4016 set_bit(MMF_VM_MERGEABLE, &mm->flags);
4017 atomic_inc(&mm->mm_count);
4022 int __ksm_enter(struct mm_struct *mm, int frozen)
4024 if (!__ksm_enter_alloc_slot(mm, frozen))
4029 void __ksm_exit(struct mm_struct *mm)
4031 struct mm_slot *mm_slot;
4032 int easy_to_free = 0;
4035 * This process is exiting: if it's straightforward (as is the
4036 * case when ksmd was never running), free mm_slot immediately.
4037 * But if it's at the cursor or has rmap_items linked to it, use
4038 * mmap_lock to synchronize with any break_cows before pagetables
4039 * are freed, and leave the mm_slot on the list for ksmd to free.
4040 * Beware: ksm may already have noticed it exiting and freed the slot.
4043 spin_lock(&ksm_mmlist_lock);
4044 mm_slot = get_mm_slot(mm);
4046 spin_unlock(&ksm_mmlist_lock);
4050 if (ksm_scan.mm_slot != mm_slot) {
4051 #ifdef CONFIG_LKSM_FILTER
4052 if (lksm_test_mm_state(mm_slot, KSM_MM_PREPARED))
4053 goto deferring_free;
4055 if (!mm_slot->rmap_list) {
4056 hash_del(&mm_slot->link);
4057 list_del(&mm_slot->mm_list);
4058 list_del(&mm_slot->scan_list);
4059 if (!RB_EMPTY_NODE(&mm_slot->ordered_list)) {
4060 rb_erase(&mm_slot->ordered_list, &vips_list);
4061 RB_CLEAR_NODE(&mm_slot->ordered_list);
4065 lksm_remove_mm_slot(mm_slot);
4066 if (lksm_test_mm_state(mm_slot, KSM_MM_FROZEN))
4067 atomic_dec(&ksm_scan.nr_frozen);
4068 else if (!lksm_test_mm_state(mm_slot, KSM_MM_SCANNED))
4069 atomic_dec(&ksm_scan.nr_scannable);
4071 #ifdef CONFIG_LKSM_FILTER
4074 ksm_nr_added_process--;
4075 spin_unlock(&ksm_mmlist_lock);
4078 #ifdef CONFIG_LKSM_FILTER
4079 lksm_region_ref_list_release(mm_slot);
4081 free_mm_slot(mm_slot);
4082 clear_bit(MMF_VM_MERGEABLE, &mm->flags);
4084 } else if (mm_slot) {
4085 down_write(&mm->mmap_lock);
4086 up_write(&mm->mmap_lock);
4090 struct page *ksm_might_need_to_copy(struct page *page,
4091 struct vm_area_struct *vma, unsigned long address)
4093 struct anon_vma *anon_vma = page_anon_vma(page);
4094 struct page *new_page;
4096 if (PageKsm(page)) {
4097 if (page_stable_node(page) &&
4098 !(ksm_run & KSM_RUN_UNMERGE))
4099 return page; /* no need to copy it */
4100 } else if (!anon_vma) {
4101 return page; /* no need to copy it */
4102 } else if (anon_vma->root == vma->anon_vma->root &&
4103 page->index == linear_page_index(vma, address)) {
4104 return page; /* still no need to copy it */
4106 if (!PageUptodate(page))
4107 return page; /* let do_swap_page report the error */
4109 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
4111 copy_user_highpage(new_page, page, address, vma);
4113 SetPageDirty(new_page);
4114 __SetPageUptodate(new_page);
4115 __SetPageLocked(new_page);
4121 void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
4123 struct stable_node *stable_node;
4124 struct rmap_item *rmap_item;
4125 int search_new_forks = 0;
4127 VM_BUG_ON_PAGE(!PageKsm(page), page);
4130 * Rely on the page lock to protect against concurrent modifications
4131 * to that page's node of the stable tree.
4133 VM_BUG_ON_PAGE(!PageLocked(page), page);
4135 stable_node = page_stable_node(page);
4139 hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
4140 struct anon_vma *anon_vma = rmap_item->anon_vma;
4141 struct anon_vma_chain *vmac;
4142 struct vm_area_struct *vma;
4145 anon_vma_lock_read(anon_vma);
4146 anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root,
4153 /* Ignore the stable/unstable/sqnr flags */
4154 addr = rmap_item->address & ~KSM_FLAG_MASK;
4156 if (addr < vma->vm_start || addr >= vma->vm_end)
4159 * Initially we examine only the vma which covers this
4160 * rmap_item; but later, if there is still work to do,
4161 * we examine covering vmas in other mms: in case they
4162 * were forked from the original since ksmd passed.
4164 if ((rmap_item->mm == vma->vm_mm) == search_new_forks)
4167 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
4170 if (!rwc->rmap_one(page, vma, addr, rwc->arg)) {
4171 anon_vma_unlock_read(anon_vma);
4174 if (rwc->done && rwc->done(page)) {
4175 anon_vma_unlock_read(anon_vma);
4179 anon_vma_unlock_read(anon_vma);
4181 if (!search_new_forks++)
4185 bool reuse_ksm_page(struct page *page,
4186 struct vm_area_struct *vma,
4187 unsigned long address)
4189 #ifdef CONFIG_DEBUG_VM
4190 if (WARN_ON(is_zero_pfn(page_to_pfn(page))) ||
4191 WARN_ON(!page_mapped(page)) ||
4192 WARN_ON(!PageLocked(page))) {
4193 dump_page(page, "reuse_ksm_page");
4198 if (PageSwapCache(page) || !page_stable_node(page))
4200 /* Prohibit parallel get_ksm_page() */
4201 if (!page_ref_freeze(page, 1))
4204 page_move_anon_rmap(page, vma);
4205 page->index = linear_page_index(vma, address);
4206 page_ref_unfreeze(page, 1);
4210 #ifdef CONFIG_MIGRATION
4211 void ksm_migrate_page(struct page *newpage, struct page *oldpage)
4213 struct stable_node *stable_node;
4215 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
4216 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
4217 VM_BUG_ON_PAGE(newpage->mapping != oldpage->mapping, newpage);
4219 stable_node = page_stable_node(newpage);
4221 VM_BUG_ON_PAGE(stable_node->kpfn != page_to_pfn(oldpage), oldpage);
4222 stable_node->kpfn = page_to_pfn(newpage);
4224 * newpage->mapping was set in advance; now we need smp_wmb()
4225 * to make sure that the new stable_node->kpfn is visible
4226 * to get_ksm_page() before it can see that oldpage->mapping
4227 * has gone stale (or that PageSwapCache has been cleared).
4230 set_page_stable_node(oldpage, NULL);
4233 #endif /* CONFIG_MIGRATION */
4235 #ifdef CONFIG_MEMORY_HOTREMOVE
4236 static void wait_while_offlining(void)
4238 while (ksm_run & KSM_RUN_OFFLINE) {
4239 mutex_unlock(&ksm_thread_mutex);
4240 wait_on_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE),
4241 TASK_UNINTERRUPTIBLE);
4242 mutex_lock(&ksm_thread_mutex);
4246 static bool stable_node_dup_remove_range(struct stable_node *stable_node,
4247 unsigned long start_pfn,
4248 unsigned long end_pfn)
4250 if (stable_node->kpfn >= start_pfn &&
4251 stable_node->kpfn < end_pfn) {
4253 * Don't get_ksm_page, page has already gone:
4254 * which is why we keep kpfn instead of page*
4256 remove_node_from_stable_tree(stable_node);
4262 static bool stable_node_chain_remove_range(struct stable_node *stable_node,
4263 unsigned long start_pfn,
4264 unsigned long end_pfn,
4265 struct rb_root *root)
4267 struct stable_node *dup;
4268 struct hlist_node *hlist_safe;
4270 if (!is_stable_node_chain(stable_node)) {
4271 VM_BUG_ON(is_stable_node_dup(stable_node));
4272 return stable_node_dup_remove_range(stable_node, start_pfn,
4276 hlist_for_each_entry_safe(dup, hlist_safe,
4277 &stable_node->hlist, hlist_dup) {
4278 VM_BUG_ON(!is_stable_node_dup(dup));
4279 stable_node_dup_remove_range(dup, start_pfn, end_pfn);
4281 if (hlist_empty(&stable_node->hlist)) {
4282 free_stable_node_chain(stable_node, root);
4283 return true; /* notify caller that tree was rebalanced */
4288 static void ksm_check_stable_tree(unsigned long start_pfn,
4289 unsigned long end_pfn)
4291 struct stable_node *stable_node, *next;
4292 struct rb_node *node;
4295 for (nid = 0; nid < ksm_nr_node_ids; nid++) {
4296 node = rb_first(root_stable_tree + nid);
4298 stable_node = rb_entry(node, struct stable_node, node);
4299 if (stable_node_chain_remove_range(stable_node,
4303 node = rb_first(root_stable_tree + nid);
4305 node = rb_next(node);
4309 list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) {
4310 if (stable_node->kpfn >= start_pfn &&
4311 stable_node->kpfn < end_pfn)
4312 remove_node_from_stable_tree(stable_node);
4317 static int ksm_memory_callback(struct notifier_block *self,
4318 unsigned long action, void *arg)
4320 struct memory_notify *mn = arg;
4323 case MEM_GOING_OFFLINE:
4325 * Prevent ksm_do_scan(), unmerge_and_remove_all_rmap_items()
4326 * and remove_all_stable_nodes() while memory is going offline:
4327 * it is unsafe for them to touch the stable tree at this time.
4328 * But unmerge_ksm_pages(), rmap lookups and other entry points
4329 * which do not need the ksm_thread_mutex are all safe.
4331 mutex_lock(&ksm_thread_mutex);
4332 ksm_run |= KSM_RUN_OFFLINE;
4333 mutex_unlock(&ksm_thread_mutex);
4338 * Most of the work is done by page migration; but there might
4339 * be a few stable_nodes left over, still pointing to struct
4340 * pages which have been offlined: prune those from the tree,
4341 * otherwise get_ksm_page() might later try to access a
4342 * non-existent struct page.
4344 ksm_check_stable_tree(mn->start_pfn,
4345 mn->start_pfn + mn->nr_pages);
4348 case MEM_CANCEL_OFFLINE:
4349 mutex_lock(&ksm_thread_mutex);
4350 ksm_run &= ~KSM_RUN_OFFLINE;
4351 mutex_unlock(&ksm_thread_mutex);
4353 smp_mb(); /* wake_up_bit advises this */
4354 wake_up_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE));
4360 static void wait_while_offlining(void)
4363 #endif /* CONFIG_MEMORY_HOTREMOVE */
4367 * This all compiles without CONFIG_SYSFS, but is a waste of space.
4370 #define KSM_ATTR_RO(_name) \
4371 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
4372 #define KSM_ATTR(_name) \
4373 static struct kobj_attribute _name##_attr = \
4374 __ATTR(_name, 0644, _name##_show, _name##_store)
4376 static ssize_t sleep_millisecs_show(struct kobject *kobj,
4377 struct kobj_attribute *attr, char *buf)
4379 return sprintf(buf, "%u\n", ksm_thread_sleep_millisecs);
4382 static ssize_t sleep_millisecs_store(struct kobject *kobj,
4383 struct kobj_attribute *attr,
4384 const char *buf, size_t count)
4386 unsigned long msecs;
4389 err = kstrtoul(buf, 10, &msecs);
4390 if (err || msecs > UINT_MAX)
4393 ksm_thread_sleep_millisecs = msecs;
4394 wake_up_interruptible(&ksm_iter_wait);
4398 KSM_ATTR(sleep_millisecs);
4400 static ssize_t pages_to_scan_show(struct kobject *kobj,
4401 struct kobj_attribute *attr, char *buf)
4403 return sprintf(buf, "%u\n", ksm_thread_pages_to_scan);
4406 static ssize_t pages_to_scan_store(struct kobject *kobj,
4407 struct kobj_attribute *attr,
4408 const char *buf, size_t count)
4411 unsigned long nr_pages;
4413 err = kstrtoul(buf, 10, &nr_pages);
4414 if (err || nr_pages > UINT_MAX)
4417 ksm_thread_pages_to_scan = nr_pages;
4421 KSM_ATTR(pages_to_scan);
4423 static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr,
4426 if (ksm_run & KSM_RUN_ONESHOT)
4427 return sprintf(buf, "%u\n", KSM_RUN_ONESHOT);
4429 return sprintf(buf, "%lu\n", ksm_run);
4432 static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr,
4433 const char *buf, size_t count)
4436 unsigned long flags;
4438 err = kstrtoul(buf, 10, &flags);
4439 if (err || flags > UINT_MAX)
4441 if (flags > KSM_RUN_ONESHOT)
4445 * KSM_RUN_MERGE sets ksmd running, and 0 stops it running.
4446 * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items,
4447 * breaking COW to free the pages_shared (but leaves mm_slots
4448 * on the list for when ksmd may be set running again).
4451 mutex_lock(&ksm_thread_mutex);
4452 wait_while_offlining();
4453 if (ksm_run != flags) {
4454 if (flags == KSM_RUN_ONESHOT)
4455 ksm_run = KSM_RUN_MERGE | KSM_RUN_ONESHOT;
4458 if (flags & KSM_RUN_UNMERGE) {
4459 set_current_oom_origin();
4460 err = unmerge_and_remove_all_rmap_items();
4461 clear_current_oom_origin();
4463 ksm_run = KSM_RUN_STOP;
4468 mutex_unlock(&ksm_thread_mutex);
4470 if (ksm_run & KSM_RUN_MERGE) {
4471 ksm_debug("activate KSM");
4472 wake_up(&ksm_crawl_wait);
4480 static ssize_t merge_across_nodes_show(struct kobject *kobj,
4481 struct kobj_attribute *attr, char *buf)
4483 return sprintf(buf, "%u\n", ksm_merge_across_nodes);
4486 static ssize_t merge_across_nodes_store(struct kobject *kobj,
4487 struct kobj_attribute *attr,
4488 const char *buf, size_t count)
4493 err = kstrtoul(buf, 10, &knob);
4499 mutex_lock(&ksm_thread_mutex);
4500 wait_while_offlining();
4501 if (ksm_merge_across_nodes != knob) {
4502 if (ksm_pages_shared || remove_all_stable_nodes())
4504 else if (root_stable_tree == one_stable_tree) {
4505 struct rb_root *buf;
4507 * This is the first time that we switch away from the
4508 * default of merging across nodes: must now allocate
4509 * a buffer to hold as many roots as may be needed.
4510 * Allocate stable and unstable together:
4511 * MAXSMP NODES_SHIFT 10 will use 16kB.
4513 buf = kcalloc(nr_node_ids + nr_node_ids, sizeof(*buf),
4515 /* Let us assume that RB_ROOT is NULL is zero */
4519 root_stable_tree = buf;
4520 root_unstable_tree = buf + nr_node_ids;
4521 /* Stable tree is empty but not the unstable */
4522 root_unstable_tree[0] = one_unstable_tree[0];
4526 ksm_merge_across_nodes = knob;
4527 ksm_nr_node_ids = knob ? 1 : nr_node_ids;
4530 mutex_unlock(&ksm_thread_mutex);
4532 return err ? err : count;
4534 KSM_ATTR(merge_across_nodes);
4537 static ssize_t use_zero_pages_show(struct kobject *kobj,
4538 struct kobj_attribute *attr, char *buf)
4540 return sprintf(buf, "%u\n", ksm_use_zero_pages);
4542 static ssize_t use_zero_pages_store(struct kobject *kobj,
4543 struct kobj_attribute *attr,
4544 const char *buf, size_t count)
4549 err = kstrtobool(buf, &value);
4553 ksm_use_zero_pages = value;
4557 KSM_ATTR(use_zero_pages);
4559 static ssize_t max_page_sharing_show(struct kobject *kobj,
4560 struct kobj_attribute *attr, char *buf)
4562 return sprintf(buf, "%u\n", ksm_max_page_sharing);
4565 static ssize_t max_page_sharing_store(struct kobject *kobj,
4566 struct kobj_attribute *attr,
4567 const char *buf, size_t count)
4572 err = kstrtoint(buf, 10, &knob);
4576 * When a KSM page is created it is shared by 2 mappings. This
4577 * being a signed comparison, it implicitly verifies it's not
4583 if (READ_ONCE(ksm_max_page_sharing) == knob)
4586 mutex_lock(&ksm_thread_mutex);
4587 wait_while_offlining();
4588 if (ksm_max_page_sharing != knob) {
4589 if (ksm_pages_shared || remove_all_stable_nodes())
4592 ksm_max_page_sharing = knob;
4594 mutex_unlock(&ksm_thread_mutex);
4596 return err ? err : count;
4598 KSM_ATTR(max_page_sharing);
4600 static ssize_t pages_shared_show(struct kobject *kobj,
4601 struct kobj_attribute *attr, char *buf)
4603 return sprintf(buf, "%lu\n", ksm_pages_shared);
4605 KSM_ATTR_RO(pages_shared);
4607 static ssize_t pages_sharing_show(struct kobject *kobj,
4608 struct kobj_attribute *attr, char *buf)
4610 return sprintf(buf, "%lu\n", ksm_pages_sharing);
4612 KSM_ATTR_RO(pages_sharing);
4614 static ssize_t pages_unshared_show(struct kobject *kobj,
4615 struct kobj_attribute *attr, char *buf)
4617 return sprintf(buf, "%lu\n", ksm_pages_unshared);
4619 KSM_ATTR_RO(pages_unshared);
4621 static ssize_t pages_volatile_show(struct kobject *kobj,
4622 struct kobj_attribute *attr, char *buf)
4624 long ksm_pages_volatile;
4626 ksm_pages_volatile = ksm_rmap_items - ksm_pages_shared
4627 - ksm_pages_sharing - ksm_pages_unshared;
4629 * It was not worth any locking to calculate that statistic,
4630 * but it might therefore sometimes be negative: conceal that.
4632 if (ksm_pages_volatile < 0)
4633 ksm_pages_volatile = 0;
4634 return sprintf(buf, "%ld\n", ksm_pages_volatile);
4636 KSM_ATTR_RO(pages_volatile);
4638 static ssize_t stable_node_dups_show(struct kobject *kobj,
4639 struct kobj_attribute *attr, char *buf)
4641 return sprintf(buf, "%lu\n", ksm_stable_node_dups);
4643 KSM_ATTR_RO(stable_node_dups);
4645 static ssize_t stable_node_chains_show(struct kobject *kobj,
4646 struct kobj_attribute *attr, char *buf)
4648 return sprintf(buf, "%lu\n", ksm_stable_node_chains);
4650 KSM_ATTR_RO(stable_node_chains);
4653 stable_node_chains_prune_millisecs_show(struct kobject *kobj,
4654 struct kobj_attribute *attr,
4657 return sprintf(buf, "%u\n", ksm_stable_node_chains_prune_millisecs);
4661 stable_node_chains_prune_millisecs_store(struct kobject *kobj,
4662 struct kobj_attribute *attr,
4663 const char *buf, size_t count)
4665 unsigned long msecs;
4668 err = kstrtoul(buf, 10, &msecs);
4669 if (err || msecs > UINT_MAX)
4672 ksm_stable_node_chains_prune_millisecs = msecs;
4676 KSM_ATTR(stable_node_chains_prune_millisecs);
4678 static ssize_t full_scans_show(struct kobject *kobj,
4679 struct kobj_attribute *attr, char *buf)
4681 return sprintf(buf, "%lu\n", ksm_scan.nr_full_scan);
4683 KSM_ATTR_RO(full_scans);
4685 static ssize_t scanning_process_show(struct kobject *kobj,
4686 struct kobj_attribute *attr, char *buf)
4688 return sprintf(buf, "%u\n", ksm_nr_added_process);
4690 KSM_ATTR_RO(scanning_process);
4692 static ssize_t full_scan_interval_show(struct kobject *kobj,
4693 struct kobj_attribute *attr, char *buf)
4695 return sprintf(buf, "%lu\n", full_scan_interval);
4698 static ssize_t full_scan_interval_store(struct kobject *kbj,
4699 struct kobj_attribute *attr, const char *buf, size_t count)
4702 unsigned long interval;
4704 err = kstrtoul(buf, 10, &interval);
4705 if (err || interval > UINT_MAX)
4708 full_scan_interval = interval;
4711 KSM_ATTR(full_scan_interval);
4713 static ssize_t one_shot_scanning_show(struct kobject *kobj,
4714 struct kobj_attribute *attr, char *buf)
4716 return sprintf(buf, "%d\n", atomic_read(&ksm_one_shot_scanning));
4719 static ssize_t one_shot_scanning_store(struct kobject *kbj,
4720 struct kobj_attribute *attr, const char *buf, size_t count)
4724 err = kstrtoint(buf, 10, &val);
4725 if (err || (val != LKSM_SCAN_PARTIAL && val != LKSM_SCAN_FULL)) {
4726 ksm_err("wrong value: %d", val);
4730 if (!atomic_cmpxchg(&ksm_one_shot_scanning, LKSM_SCAN_NONE, val)) {
4731 wake_up(&ksm_crawl_wait);
4734 ksm_debug("ksm is still scanning");
4737 KSM_ATTR(one_shot_scanning);
4739 static ssize_t scan_boost_show(struct kobject *kobj,
4740 struct kobj_attribute *attr, char *buf)
4742 return sprintf(buf, "%u\n", lksm_boosted_pages_to_scan);
4745 static ssize_t scan_boost_store(struct kobject *kbj,
4746 struct kobj_attribute *attr, const char *buf, size_t count)
4750 err = kstrtoint(buf, 10, &val);
4751 /* lksm_boosted_pages_to_scan must presence in from 100 to 10000 */
4752 if (err || val < 100 || val > 10000) {
4753 ksm_err("wrong value: %d", val);
4757 lksm_boosted_pages_to_scan = (unsigned int) val;
4761 KSM_ATTR(scan_boost);
4763 #ifdef CONFIG_LKSM_FILTER
4764 static ssize_t nr_regions_show(struct kobject *kobj,
4765 struct kobj_attribute *attr, char *buf)
4767 return sprintf(buf, "%u\n", lksm_nr_regions);
4769 KSM_ATTR_RO(nr_regions);
4771 static ssize_t region_share_show(struct kobject *obj,
4772 struct kobj_attribute *attr, char *buf)
4774 return sprintf(buf, "%s:%d %s:%d %s:%d %s:%d %s:%d\n",
4775 region_type_str[0], region_share[0], region_type_str[1], region_share[1],
4776 region_type_str[2], region_share[2], region_type_str[3], region_share[3],
4777 region_type_str[4], region_share[4]);
4779 KSM_ATTR_RO(region_share);
4780 #endif /* CONFIG_LKSM_FILTER */
4782 static struct attribute *ksm_attrs[] = {
4783 &sleep_millisecs_attr.attr,
4784 &pages_to_scan_attr.attr,
4786 &pages_shared_attr.attr,
4787 &pages_sharing_attr.attr,
4788 &pages_unshared_attr.attr,
4789 &pages_volatile_attr.attr,
4790 &full_scans_attr.attr,
4792 &merge_across_nodes_attr.attr,
4794 &max_page_sharing_attr.attr,
4795 &stable_node_chains_attr.attr,
4796 &stable_node_dups_attr.attr,
4797 &stable_node_chains_prune_millisecs_attr.attr,
4798 &use_zero_pages_attr.attr,
4799 &scanning_process_attr.attr,
4800 &full_scan_interval_attr.attr,
4801 &one_shot_scanning_attr.attr,
4802 &scan_boost_attr.attr,
4803 #ifdef CONFIG_LKSM_FILTER
4804 &nr_regions_attr.attr,
4805 ®ion_share_attr.attr,
4810 static const struct attribute_group ksm_attr_group = {
4814 #endif /* CONFIG_SYSFS */
4816 #ifdef CONFIG_LKSM_FILTER
4817 static inline void init_lksm_region
4818 (struct lksm_region *region, unsigned long ino, int type, unsigned long len)
4821 region->type = type;
4825 /* if region is newly allocated, the function returns true. */
4826 static void lksm_insert_region
4827 (struct lksm_region **region, unsigned long ino,
4828 struct vm_area_struct *vma, int type)
4830 int need_hash_add = 0;
4831 unsigned long len, size;
4832 struct lksm_region *next = NULL;
4833 unsigned long flags;
4835 size = lksm_region_size(vma->vm_start, vma->vm_end);
4836 len = (size > BITS_PER_LONG) ? lksm_bitmap_size(size) : SINGLE_FILTER_LEN;
4839 *region = kzalloc(sizeof(struct lksm_region), GFP_KERNEL);
4841 ksm_err("region allocation failed");
4844 init_lksm_region(*region, ino, LKSM_REGION_FILE1, len);
4845 (*region)->scan_round = ksm_crawl_round;
4846 atomic_set(&(*region)->refcount, 0);
4851 if (!(*region)->next && type == LKSM_REGION_FILE2) {
4852 next = kzalloc(sizeof(struct lksm_region), GFP_KERNEL);
4857 ksm_err("region allocation failed");
4860 init_lksm_region(next, ino, LKSM_REGION_FILE2, len);
4861 atomic_set(&next->refcount, 0);
4862 next->scan_round = ksm_crawl_round;
4866 if (need_hash_add || next) {
4867 spin_lock_irqsave(&lksm_region_lock, flags);
4869 hash_add(lksm_region_hash, &(*region)->hnode, ino);
4871 (*region)->next = next;
4872 next->prev = *region;
4874 spin_unlock_irqrestore(&lksm_region_lock, flags);
4878 static inline struct lksm_region *lksm_hash_find_region(unsigned long ino)
4880 struct lksm_region *region;
4882 hash_for_each_possible(lksm_region_hash, region, hnode, ino)
4883 if (region->ino == ino)
4888 static void lksm_register_file_anon_region
4889 (struct mm_slot *slot, struct vm_area_struct *vma)
4891 struct lksm_region *region;
4892 struct file *file = NULL;
4893 struct inode *inode;
4894 unsigned long flags;
4898 file = vma->vm_file;
4899 type = LKSM_REGION_FILE1;
4900 } else if (vma->vm_prev) {
4901 /* LKSM should deal with .NET libraries */
4902 struct vm_area_struct *prev = vma->vm_prev;
4903 if (prev->vm_flags & VM_MERGEABLE && prev->vm_file) {
4904 /* Linux standard map structure */
4905 file = prev->vm_file;
4906 type = LKSM_REGION_FILE2;
4908 /* DLL map structure */
4911 while (i <= LKSM_REGION_ITER_MAX && prev) {
4913 file = prev->vm_file;
4914 else if (prev->vm_file && file != prev->vm_file)
4917 if (prev->vm_flags & VM_MERGEABLE && file) {
4921 prev = prev->vm_prev;
4925 type = LKSM_REGION_FILE2;
4932 inode = file_inode(file);
4935 spin_lock_irqsave(&lksm_region_lock, flags);
4936 region = lksm_hash_find_region(inode->i_ino);
4937 spin_unlock_irqrestore(&lksm_region_lock, flags);
4939 lksm_insert_region(®ion, inode->i_ino, vma, type);
4941 if (type == LKSM_REGION_FILE1)
4942 lksm_region_ref_append(slot, region);
4944 lksm_region_ref_append(slot, region->next);
4949 static struct lksm_region *lksm_find_region(struct vm_area_struct *vma)
4951 struct lksm_region *region = NULL;
4952 struct file *file = NULL;
4953 struct inode *inode;
4954 unsigned long ino = 0, flags;
4958 return &heap_region;
4959 else if (is_stack(vma))
4961 else if (!vma->anon_vma)
4963 else if (is_exec(vma))
4967 /* check thread stack */
4968 file = vma->vm_file;
4969 type = LKSM_REGION_FILE1;
4970 } else if (vma->vm_prev) {
4971 struct vm_area_struct *prev = vma->vm_prev;
4972 if (prev->vm_flags & VM_MERGEABLE && prev->vm_file) {
4973 /* Linux standard map structure */
4974 file = prev->vm_file;
4975 type = LKSM_REGION_FILE2;
4977 /* DLL map structure */
4980 while (i <= LKSM_REGION_ITER_MAX && prev) {
4982 file = prev->vm_file;
4983 else if (prev->vm_file && file != prev->vm_file)
4986 if (prev->vm_flags & VM_MERGEABLE && file) {
4990 prev = prev->vm_prev;
4994 type = LKSM_REGION_FILE2;
5001 inode = file_inode(file);
5005 if (ksm_scan.region && ksm_scan.region->ino == ino) {
5006 if (ksm_scan.region->type == type)
5007 return ksm_scan.region;
5008 else if (ksm_scan.region->type == LKSM_REGION_FILE1)
5009 region = ksm_scan.region;
5011 spin_lock_irqsave(&lksm_region_lock, flags);
5012 region = lksm_hash_find_region(ino);
5013 spin_unlock_irqrestore(&lksm_region_lock, flags);
5017 if (region && type == LKSM_REGION_FILE2) {
5018 if (!region->next) {
5019 lksm_insert_region(®ion, ino, vma, type);
5020 BUG_ON(!region->next);
5022 return region->next;
5026 #endif /* CONFIG_LKSM_FILTER */
5028 static inline int __lksm_remove_candidate(struct task_struct *task)
5030 int ret = LKSM_TASK_SLOT_NONE;
5031 struct task_slot *slot = get_task_slot(task);
5034 list_del(&slot->list);
5035 hash_del(&slot->hlist);
5036 free_task_slot(slot);
5037 ret = LKSM_TASK_SLOT_REMOVED;
5042 /* called by ksm_exit */
5043 void lksm_remove_candidate(struct mm_struct *mm)
5048 struct mm_slot *mm_slot;
5050 spin_lock(&ksm_mmlist_lock);
5051 mm_slot = get_mm_slot(mm);
5052 if (mm_slot && mm_slot != ksm_scan.mm_slot) {
5053 list_move(&mm_slot->mm_list, &ksm_scan.remove_mm_list);
5054 if (lksm_test_mm_state(mm_slot, KSM_MM_FROZEN))
5055 atomic_dec(&ksm_scan.nr_frozen);
5056 else if (!lksm_test_mm_state(mm_slot, KSM_MM_SCANNED))
5057 atomic_dec(&ksm_scan.nr_scannable);
5059 spin_unlock(&ksm_mmlist_lock);
5063 spin_lock(&frozen_task_lock);
5064 ret = __lksm_remove_candidate(mm->owner);
5065 spin_unlock(&frozen_task_lock);
5066 if (ret == LKSM_TASK_SLOT_REMOVED)
5067 put_task_struct(mm->owner);
5070 static int lksm_task_frozen(struct task_struct *task)
5072 int need_wakeup = 0;
5073 struct mm_struct *mm = task->mm;
5074 struct mm_slot *mm_slot;
5075 struct task_slot *task_slot;
5077 if (mm && test_bit(MMF_VM_MERGEABLE, &mm->flags)) {
5078 /* a mergeable task becoming frozen */
5079 spin_lock(&ksm_mmlist_lock);
5080 mm_slot = get_mm_slot(mm);
5083 if (mm_slot != ksm_scan.mm_slot
5084 && lksm_test_mm_state(mm_slot, KSM_MM_LISTED)) {
5085 if (list_empty(&mm_slot->scan_list))
5086 list_add_tail(&mm_slot->scan_list, &ksm_scan_head.scan_list);
5087 if (!lksm_test_mm_state(mm_slot, KSM_MM_SCANNED))
5088 atomic_dec(&ksm_scan.nr_scannable);
5089 lksm_clear_mm_state(mm_slot, KSM_MM_LISTED);
5090 lksm_set_mm_state(mm_slot, KSM_MM_FROZEN);
5091 atomic_inc(&ksm_scan.nr_frozen);
5093 need_wakeup = (ksm_run == KSM_RUN_MERGE);
5094 ksm_debug("lksm_task_frozen called for task(%s): %p (nr_frozen: %d)",
5095 task->comm, task, atomic_read(&ksm_scan.nr_frozen));
5097 spin_unlock(&ksm_mmlist_lock);
5099 task_slot = alloc_task_slot();
5101 ksm_err("[ksm_tizen] Cannot allocate memory for task_slot\n");
5105 task_slot->task = task;
5106 task_slot->frozen = KSM_TASK_FROZEN;
5107 task_slot->inserted = jiffies;
5109 get_task_struct(task);
5111 spin_lock(&frozen_task_lock);
5112 list_add(&task_slot->list, &frozen_task_list);
5113 insert_to_task_slots_hash(task_slot);
5114 spin_unlock(&frozen_task_lock);
5116 need_wakeup = (ksm_run == KSM_RUN_MERGE);
5117 ksm_debug("task-%d(%s) is added to frozen task list",
5118 task_pid_nr(task), task->comm);
5121 if (need_wakeup && atomic_read(&crawl_state) == KSM_CRAWL_SLEEP)
5122 wake_up(&ksm_crawl_wait);
5127 static int lksm_task_thawed(struct task_struct *task)
5129 struct mm_struct *mm = task->mm;
5130 struct mm_slot *mm_slot;
5131 struct task_slot *task_slot;
5133 if (mm && test_bit(MMF_VM_MERGEABLE, &mm->flags)) {
5134 /* a frozen task becoming thawed */
5135 spin_lock(&ksm_mmlist_lock);
5136 mm_slot = get_mm_slot(mm);
5139 if (lksm_test_mm_state(mm_slot, KSM_MM_FROZEN)
5140 && ksm_scan.mm_slot != mm_slot) {
5141 if (!lksm_test_mm_state(mm_slot, KSM_MM_SCANNED))
5142 atomic_inc(&ksm_scan.nr_scannable);
5144 list_del_init(&mm_slot->scan_list);
5145 lksm_clear_mm_state(mm_slot, KSM_MM_FROZEN);
5146 lksm_set_mm_state(mm_slot, KSM_MM_LISTED);
5147 atomic_dec(&ksm_scan.nr_frozen);
5148 ksm_debug("nr_frozen: %d nr_scannable: %d",
5149 atomic_read(&ksm_scan.nr_frozen),
5150 atomic_read(&ksm_scan.nr_scannable));
5152 spin_unlock(&ksm_mmlist_lock);
5154 /* just remove task slot, it will be cared by full_scan */
5155 spin_lock(&frozen_task_lock);
5156 task_slot = get_task_slot(task);
5158 list_del(&task_slot->list);
5159 hash_del(&task_slot->hlist);
5161 spin_unlock(&frozen_task_lock);
5163 free_task_slot(task_slot);
5164 put_task_struct(task);
5165 ksm_debug("task-%d(%s) is removed from frozen task list",
5166 task_pid_nr(task), task->comm);
5174 * lksm_hint: a hook for construct candidate list
5175 * this function cannot sleep
5177 int lksm_hint(struct task_struct *task, int frozen)
5180 * If lksm_hint is called by ksm_fork, the task yet has its own
5181 * mm_struct because it does not completes mm_struct initialization.
5182 * Thus, we skip this check and put the task into candidate list.
5184 if (frozen == KSM_TASK_FROZEN)
5185 return lksm_task_frozen(task);
5186 else if (frozen == KSM_TASK_THAWED)
5187 return lksm_task_thawed(task);
5192 static void __init lksm_init(void)
5194 ksm_crawld = kthread_create(lksm_crawl_thread, NULL, "ksm_crawld");
5196 if (ksm_crawld == NULL) {
5197 printk(KERN_ALERT "fail to create ksm crawler daemon\n");
5201 atomic_set(&ksm_scan.nr_frozen, 0);
5202 atomic_set(&ksm_scan.nr_scannable, 0);
5203 atomic_set(&ksm_state, 0);
5204 INIT_LIST_HEAD(&ksm_scan.remove_mm_list);
5206 crawler_sleep = msecs_to_jiffies(1000);
5207 #ifdef CONFIG_LKSM_FILTER
5208 init_lksm_region(&heap_region, 0, LKSM_REGION_HEAP, 0);
5209 heap_region.merge_cnt = 0;
5210 heap_region.filter_cnt = 0;
5211 heap_region.filter = NULL;
5213 init_lksm_region(&unknown_region, 0, LKSM_REGION_UNKNOWN, 0);
5214 unknown_region.merge_cnt = 0;
5215 unknown_region.filter_cnt = 0;
5216 unknown_region.filter = NULL;
5218 spin_lock_init(&lksm_region_lock);
5219 #endif /* CONFIG_LKSM_FILTER */
5220 wake_up_process(ksm_crawld);
5223 static int __init ksm_init(void)
5225 struct task_struct *ksm_thread;
5228 /* The correct value depends on page size and endianness */
5229 zero_checksum = calc_checksum(ZERO_PAGE(0));
5230 /* Default to false for backwards compatibility */
5231 ksm_use_zero_pages = false;
5233 err = ksm_slab_init();
5237 ksm_thread = kthread_run(lksm_scan_thread, NULL, "ksmd");
5238 if (IS_ERR(ksm_thread)) {
5239 pr_err("ksm: creating kthread failed\n");
5240 err = PTR_ERR(ksm_thread);
5245 err = sysfs_create_group(mm_kobj, &ksm_attr_group);
5247 pr_err("ksm: register sysfs failed\n");
5248 kthread_stop(ksm_thread);
5252 ksm_run = KSM_RUN_MERGE; /* no way for user to start it */
5254 #endif /* CONFIG_SYSFS */
5256 #ifdef CONFIG_MEMORY_HOTREMOVE
5257 /* There is no significance to this priority 100 */
5258 hotplug_memory_notifier(ksm_memory_callback, 100);
5267 subsys_initcall(ksm_init);