4 * Memory merging support.
6 * This code enables dynamic sharing of identical pages found in different
7 * memory areas, even if they are not shared by fork().
10 #include <linux/bitops.h>
12 #include <linux/pagemap.h>
13 #include <linux/rmap.h>
14 #include <linux/sched.h>
20 int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
21 unsigned long end, int advice, unsigned long *vm_flags);
22 int __ksm_enter(struct mm_struct *mm);
23 void __ksm_exit(struct mm_struct *mm);
25 static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
27 if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
28 return __ksm_enter(mm);
32 static inline void ksm_exit(struct mm_struct *mm)
34 if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
39 * A KSM page is one of those write-protected "shared pages" or "merged pages"
40 * which KSM maps into multiple mms, wherever identical anonymous page content
41 * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
42 * anon_vma, but to that page's node of the stable tree.
44 static inline int PageKsm(struct page *page)
46 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
47 (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
50 static inline struct stable_node *page_stable_node(struct page *page)
52 return PageKsm(page) ? page_rmapping(page) : NULL;
55 static inline void set_page_stable_node(struct page *page,
56 struct stable_node *stable_node)
58 page->mapping = (void *)stable_node +
59 (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
63 * When do_swap_page() first faults in from swap what used to be a KSM page,
64 * no problem, it will be assigned to this vma's anon_vma; but thereafter,
65 * it might be faulted into a different anon_vma (or perhaps to a different
66 * offset in the same anon_vma). do_swap_page() cannot do all the locking
67 * needed to reconstitute a cross-anon_vma KSM page: for now it has to make
68 * a copy, and leave remerging the pages to a later pass of ksmd.
70 * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
71 * but what if the vma was unmerged while the page was swapped out?
73 struct page *ksm_might_need_to_copy(struct page *page,
74 struct vm_area_struct *vma, unsigned long address);
76 int page_referenced_ksm(struct page *page,
77 struct mem_cgroup *memcg, unsigned long *vm_flags);
78 int try_to_unmap_ksm(struct page *page, enum ttu_flags flags);
79 int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *,
80 struct vm_area_struct *, unsigned long, void *), void *arg);
81 void ksm_migrate_page(struct page *newpage, struct page *oldpage);
83 #else /* !CONFIG_KSM */
85 static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
90 static inline void ksm_exit(struct mm_struct *mm)
94 static inline int PageKsm(struct page *page)
100 static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
101 unsigned long end, int advice, unsigned long *vm_flags)
106 static inline struct page *ksm_might_need_to_copy(struct page *page,
107 struct vm_area_struct *vma, unsigned long address)
112 static inline int page_referenced_ksm(struct page *page,
113 struct mem_cgroup *memcg, unsigned long *vm_flags)
118 static inline int try_to_unmap_ksm(struct page *page, enum ttu_flags flags)
123 static inline int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page*,
124 struct vm_area_struct *, unsigned long, void *), void *arg)
129 static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage)
132 #endif /* CONFIG_MMU */
133 #endif /* !CONFIG_KSM */
135 #endif /* __LINUX_KSM_H */