4 * Memory merging support.
6 * This code enables dynamic sharing of identical pages found in different
7 * memory areas, even if they are not shared by fork().
10 #include <linux/bitops.h>
12 #include <linux/pagemap.h>
13 #include <linux/rmap.h>
14 #include <linux/sched.h>
19 struct page *ksm_does_need_to_copy(struct page *page,
20 struct vm_area_struct *vma, unsigned long address);
23 int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
24 unsigned long end, int advice, unsigned long *vm_flags);
25 int __ksm_enter(struct mm_struct *mm);
26 void __ksm_exit(struct mm_struct *mm);
28 static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
30 if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
31 return __ksm_enter(mm);
35 static inline void ksm_exit(struct mm_struct *mm)
37 if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
42 * A KSM page is one of those write-protected "shared pages" or "merged pages"
43 * which KSM maps into multiple mms, wherever identical anonymous page content
44 * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
45 * anon_vma, but to that page's node of the stable tree.
47 static inline int PageKsm(struct page *page)
49 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
50 (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
53 static inline struct stable_node *page_stable_node(struct page *page)
55 return PageKsm(page) ? page_rmapping(page) : NULL;
58 static inline void set_page_stable_node(struct page *page,
59 struct stable_node *stable_node)
61 page->mapping = (void *)stable_node +
62 (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
66 * When do_swap_page() first faults in from swap what used to be a KSM page,
67 * no problem, it will be assigned to this vma's anon_vma; but thereafter,
68 * it might be faulted into a different anon_vma (or perhaps to a different
69 * offset in the same anon_vma). do_swap_page() cannot do all the locking
70 * needed to reconstitute a cross-anon_vma KSM page: for now it has to make
71 * a copy, and leave remerging the pages to a later pass of ksmd.
73 * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
74 * but what if the vma was unmerged while the page was swapped out?
76 static inline int ksm_might_need_to_copy(struct page *page,
77 struct vm_area_struct *vma, unsigned long address)
79 struct anon_vma *anon_vma = page_anon_vma(page);
82 (anon_vma->root != vma->anon_vma->root ||
83 page->index != linear_page_index(vma, address));
86 int page_referenced_ksm(struct page *page,
87 struct mem_cgroup *memcg, unsigned long *vm_flags);
88 int try_to_unmap_ksm(struct page *page, enum ttu_flags flags);
89 int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *,
90 struct vm_area_struct *, unsigned long, void *), void *arg);
91 void ksm_migrate_page(struct page *newpage, struct page *oldpage);
93 #else /* !CONFIG_KSM */
95 static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
100 static inline void ksm_exit(struct mm_struct *mm)
104 static inline int PageKsm(struct page *page)
110 static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
111 unsigned long end, int advice, unsigned long *vm_flags)
116 static inline int ksm_might_need_to_copy(struct page *page,
117 struct vm_area_struct *vma, unsigned long address)
122 static inline int page_referenced_ksm(struct page *page,
123 struct mem_cgroup *memcg, unsigned long *vm_flags)
128 static inline int try_to_unmap_ksm(struct page *page, enum ttu_flags flags)
133 static inline int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page*,
134 struct vm_area_struct *, unsigned long, void *), void *arg)
139 static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage)
142 #endif /* CONFIG_MMU */
143 #endif /* !CONFIG_KSM */
145 #endif /* __LINUX_KSM_H */