list_lru: per-node API
[platform/adaptation/renesas_rcar/renesas_kernel.git] / mm / list_lru.c
1 /*
2  * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
3  * Authors: David Chinner and Glauber Costa
4  *
5  * Generic LRU infrastructure
6  */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/mm.h>
10 #include <linux/list_lru.h>
11
12 bool list_lru_add(struct list_lru *lru, struct list_head *item)
13 {
14         int nid = page_to_nid(virt_to_page(item));
15         struct list_lru_node *nlru = &lru->node[nid];
16
17         spin_lock(&nlru->lock);
18         WARN_ON_ONCE(nlru->nr_items < 0);
19         if (list_empty(item)) {
20                 list_add_tail(item, &nlru->list);
21                 if (nlru->nr_items++ == 0)
22                         node_set(nid, lru->active_nodes);
23                 spin_unlock(&nlru->lock);
24                 return true;
25         }
26         spin_unlock(&nlru->lock);
27         return false;
28 }
29 EXPORT_SYMBOL_GPL(list_lru_add);
30
31 bool list_lru_del(struct list_lru *lru, struct list_head *item)
32 {
33         int nid = page_to_nid(virt_to_page(item));
34         struct list_lru_node *nlru = &lru->node[nid];
35
36         spin_lock(&nlru->lock);
37         if (!list_empty(item)) {
38                 list_del_init(item);
39                 if (--nlru->nr_items == 0)
40                         node_clear(nid, lru->active_nodes);
41                 WARN_ON_ONCE(nlru->nr_items < 0);
42                 spin_unlock(&nlru->lock);
43                 return true;
44         }
45         spin_unlock(&nlru->lock);
46         return false;
47 }
48 EXPORT_SYMBOL_GPL(list_lru_del);
49
50 unsigned long
51 list_lru_count_node(struct list_lru *lru, int nid)
52 {
53         unsigned long count = 0;
54         struct list_lru_node *nlru = &lru->node[nid];
55
56         spin_lock(&nlru->lock);
57         WARN_ON_ONCE(nlru->nr_items < 0);
58         count += nlru->nr_items;
59         spin_unlock(&nlru->lock);
60
61         return count;
62 }
63 EXPORT_SYMBOL_GPL(list_lru_count_node);
64
65 unsigned long
66 list_lru_walk_node(struct list_lru *lru, int nid, list_lru_walk_cb isolate,
67                    void *cb_arg, unsigned long *nr_to_walk)
68 {
69
70         struct list_lru_node    *nlru = &lru->node[nid];
71         struct list_head *item, *n;
72         unsigned long isolated = 0;
73
74         spin_lock(&nlru->lock);
75 restart:
76         list_for_each_safe(item, n, &nlru->list) {
77                 enum lru_status ret;
78
79                 /*
80                  * decrement nr_to_walk first so that we don't livelock if we
81                  * get stuck on large numbesr of LRU_RETRY items
82                  */
83                 if (--(*nr_to_walk) == 0)
84                         break;
85
86                 ret = isolate(item, &nlru->lock, cb_arg);
87                 switch (ret) {
88                 case LRU_REMOVED:
89                         if (--nlru->nr_items == 0)
90                                 node_clear(nid, lru->active_nodes);
91                         WARN_ON_ONCE(nlru->nr_items < 0);
92                         isolated++;
93                         break;
94                 case LRU_ROTATE:
95                         list_move_tail(item, &nlru->list);
96                         break;
97                 case LRU_SKIP:
98                         break;
99                 case LRU_RETRY:
100                         /*
101                          * The lru lock has been dropped, our list traversal is
102                          * now invalid and so we have to restart from scratch.
103                          */
104                         goto restart;
105                 default:
106                         BUG();
107                 }
108         }
109
110         spin_unlock(&nlru->lock);
111         return isolated;
112 }
113 EXPORT_SYMBOL_GPL(list_lru_walk_node);
114
115 static unsigned long list_lru_dispose_all_node(struct list_lru *lru, int nid,
116                                                list_lru_dispose_cb dispose)
117 {
118         struct list_lru_node    *nlru = &lru->node[nid];
119         LIST_HEAD(dispose_list);
120         unsigned long disposed = 0;
121
122         spin_lock(&nlru->lock);
123         while (!list_empty(&nlru->list)) {
124                 list_splice_init(&nlru->list, &dispose_list);
125                 disposed += nlru->nr_items;
126                 nlru->nr_items = 0;
127                 node_clear(nid, lru->active_nodes);
128                 spin_unlock(&nlru->lock);
129
130                 dispose(&dispose_list);
131
132                 spin_lock(&nlru->lock);
133         }
134         spin_unlock(&nlru->lock);
135         return disposed;
136 }
137
138 unsigned long list_lru_dispose_all(struct list_lru *lru,
139                                    list_lru_dispose_cb dispose)
140 {
141         unsigned long disposed;
142         unsigned long total = 0;
143         int nid;
144
145         do {
146                 disposed = 0;
147                 for_each_node_mask(nid, lru->active_nodes) {
148                         disposed += list_lru_dispose_all_node(lru, nid,
149                                                               dispose);
150                 }
151                 total += disposed;
152         } while (disposed != 0);
153
154         return total;
155 }
156
157 int list_lru_init(struct list_lru *lru)
158 {
159         int i;
160
161         nodes_clear(lru->active_nodes);
162         for (i = 0; i < MAX_NUMNODES; i++) {
163                 spin_lock_init(&lru->node[i].lock);
164                 INIT_LIST_HEAD(&lru->node[i].list);
165                 lru->node[i].nr_items = 0;
166         }
167         return 0;
168 }
169 EXPORT_SYMBOL_GPL(list_lru_init);