mm/cow: don't bother write protecting already write-protected pages
[platform/kernel/linux-rpi.git] / mm / interval_tree.c
1 /*
2  * mm/interval_tree.c - interval tree for mapping->i_mmap
3  *
4  * Copyright (C) 2012, Michel Lespinasse <walken@google.com>
5  *
6  * This file is released under the GPL v2.
7  */
8
9 #include <linux/mm.h>
10 #include <linux/fs.h>
11 #include <linux/rmap.h>
12 #include <linux/interval_tree_generic.h>
13
14 static inline unsigned long vma_start_pgoff(struct vm_area_struct *v)
15 {
16         return v->vm_pgoff;
17 }
18
19 static inline unsigned long vma_last_pgoff(struct vm_area_struct *v)
20 {
21         return v->vm_pgoff + vma_pages(v) - 1;
22 }
23
24 INTERVAL_TREE_DEFINE(struct vm_area_struct, shared.rb,
25                      unsigned long, shared.rb_subtree_last,
26                      vma_start_pgoff, vma_last_pgoff,, vma_interval_tree)
27
28 /* Insert node immediately after prev in the interval tree */
29 void vma_interval_tree_insert_after(struct vm_area_struct *node,
30                                     struct vm_area_struct *prev,
31                                     struct rb_root_cached *root)
32 {
33         struct rb_node **link;
34         struct vm_area_struct *parent;
35         unsigned long last = vma_last_pgoff(node);
36
37         VM_BUG_ON_VMA(vma_start_pgoff(node) != vma_start_pgoff(prev), node);
38
39         if (!prev->shared.rb.rb_right) {
40                 parent = prev;
41                 link = &prev->shared.rb.rb_right;
42         } else {
43                 parent = rb_entry(prev->shared.rb.rb_right,
44                                   struct vm_area_struct, shared.rb);
45                 if (parent->shared.rb_subtree_last < last)
46                         parent->shared.rb_subtree_last = last;
47                 while (parent->shared.rb.rb_left) {
48                         parent = rb_entry(parent->shared.rb.rb_left,
49                                 struct vm_area_struct, shared.rb);
50                         if (parent->shared.rb_subtree_last < last)
51                                 parent->shared.rb_subtree_last = last;
52                 }
53                 link = &parent->shared.rb.rb_left;
54         }
55
56         node->shared.rb_subtree_last = last;
57         rb_link_node(&node->shared.rb, &parent->shared.rb, link);
58         rb_insert_augmented(&node->shared.rb, &root->rb_root,
59                             &vma_interval_tree_augment);
60 }
61
62 static inline unsigned long avc_start_pgoff(struct anon_vma_chain *avc)
63 {
64         return vma_start_pgoff(avc->vma);
65 }
66
67 static inline unsigned long avc_last_pgoff(struct anon_vma_chain *avc)
68 {
69         return vma_last_pgoff(avc->vma);
70 }
71
72 INTERVAL_TREE_DEFINE(struct anon_vma_chain, rb, unsigned long, rb_subtree_last,
73                      avc_start_pgoff, avc_last_pgoff,
74                      static inline, __anon_vma_interval_tree)
75
76 void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
77                                    struct rb_root_cached *root)
78 {
79 #ifdef CONFIG_DEBUG_VM_RB
80         node->cached_vma_start = avc_start_pgoff(node);
81         node->cached_vma_last = avc_last_pgoff(node);
82 #endif
83         __anon_vma_interval_tree_insert(node, root);
84 }
85
86 void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
87                                    struct rb_root_cached *root)
88 {
89         __anon_vma_interval_tree_remove(node, root);
90 }
91
92 struct anon_vma_chain *
93 anon_vma_interval_tree_iter_first(struct rb_root_cached *root,
94                                   unsigned long first, unsigned long last)
95 {
96         return __anon_vma_interval_tree_iter_first(root, first, last);
97 }
98
99 struct anon_vma_chain *
100 anon_vma_interval_tree_iter_next(struct anon_vma_chain *node,
101                                  unsigned long first, unsigned long last)
102 {
103         return __anon_vma_interval_tree_iter_next(node, first, last);
104 }
105
106 #ifdef CONFIG_DEBUG_VM_RB
107 void anon_vma_interval_tree_verify(struct anon_vma_chain *node)
108 {
109         WARN_ON_ONCE(node->cached_vma_start != avc_start_pgoff(node));
110         WARN_ON_ONCE(node->cached_vma_last != avc_last_pgoff(node));
111 }
112 #endif