2 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
3 * Authors: David Chinner and Glauber Costa
5 * Generic LRU infrastructure
7 #include <linux/kernel.h>
8 #include <linux/module.h>
10 #include <linux/list_lru.h>
12 bool list_lru_add(struct list_lru *lru, struct list_head *item)
14 int nid = page_to_nid(virt_to_page(item));
15 struct list_lru_node *nlru = &lru->node[nid];
17 spin_lock(&nlru->lock);
18 WARN_ON_ONCE(nlru->nr_items < 0);
19 if (list_empty(item)) {
20 list_add_tail(item, &nlru->list);
21 if (nlru->nr_items++ == 0)
22 node_set(nid, lru->active_nodes);
23 spin_unlock(&nlru->lock);
26 spin_unlock(&nlru->lock);
29 EXPORT_SYMBOL_GPL(list_lru_add);
31 bool list_lru_del(struct list_lru *lru, struct list_head *item)
33 int nid = page_to_nid(virt_to_page(item));
34 struct list_lru_node *nlru = &lru->node[nid];
36 spin_lock(&nlru->lock);
37 if (!list_empty(item)) {
39 if (--nlru->nr_items == 0)
40 node_clear(nid, lru->active_nodes);
41 WARN_ON_ONCE(nlru->nr_items < 0);
42 spin_unlock(&nlru->lock);
45 spin_unlock(&nlru->lock);
48 EXPORT_SYMBOL_GPL(list_lru_del);
50 unsigned long list_lru_count(struct list_lru *lru)
52 unsigned long count = 0;
55 for_each_node_mask(nid, lru->active_nodes) {
56 struct list_lru_node *nlru = &lru->node[nid];
58 spin_lock(&nlru->lock);
59 WARN_ON_ONCE(nlru->nr_items < 0);
60 count += nlru->nr_items;
61 spin_unlock(&nlru->lock);
66 EXPORT_SYMBOL_GPL(list_lru_count);
69 list_lru_walk_node(struct list_lru *lru, int nid, list_lru_walk_cb isolate,
70 void *cb_arg, unsigned long *nr_to_walk)
73 struct list_lru_node *nlru = &lru->node[nid];
74 struct list_head *item, *n;
75 unsigned long isolated = 0;
77 spin_lock(&nlru->lock);
79 list_for_each_safe(item, n, &nlru->list) {
83 * decrement nr_to_walk first so that we don't livelock if we
84 * get stuck on large numbesr of LRU_RETRY items
86 if (--(*nr_to_walk) == 0)
89 ret = isolate(item, &nlru->lock, cb_arg);
92 if (--nlru->nr_items == 0)
93 node_clear(nid, lru->active_nodes);
94 WARN_ON_ONCE(nlru->nr_items < 0);
98 list_move_tail(item, &nlru->list);
104 * The lru lock has been dropped, our list traversal is
105 * now invalid and so we have to restart from scratch.
113 spin_unlock(&nlru->lock);
116 EXPORT_SYMBOL_GPL(list_lru_walk_node);
118 unsigned long list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate,
119 void *cb_arg, unsigned long nr_to_walk)
121 unsigned long isolated = 0;
124 for_each_node_mask(nid, lru->active_nodes) {
125 isolated += list_lru_walk_node(lru, nid, isolate,
126 cb_arg, &nr_to_walk);
132 EXPORT_SYMBOL_GPL(list_lru_walk);
134 static unsigned long list_lru_dispose_all_node(struct list_lru *lru, int nid,
135 list_lru_dispose_cb dispose)
137 struct list_lru_node *nlru = &lru->node[nid];
138 LIST_HEAD(dispose_list);
139 unsigned long disposed = 0;
141 spin_lock(&nlru->lock);
142 while (!list_empty(&nlru->list)) {
143 list_splice_init(&nlru->list, &dispose_list);
144 disposed += nlru->nr_items;
146 node_clear(nid, lru->active_nodes);
147 spin_unlock(&nlru->lock);
149 dispose(&dispose_list);
151 spin_lock(&nlru->lock);
153 spin_unlock(&nlru->lock);
157 unsigned long list_lru_dispose_all(struct list_lru *lru,
158 list_lru_dispose_cb dispose)
160 unsigned long disposed;
161 unsigned long total = 0;
166 for_each_node_mask(nid, lru->active_nodes) {
167 disposed += list_lru_dispose_all_node(lru, nid,
171 } while (disposed != 0);
176 int list_lru_init(struct list_lru *lru)
180 nodes_clear(lru->active_nodes);
181 for (i = 0; i < MAX_NUMNODES; i++) {
182 spin_lock_init(&lru->node[i].lock);
183 INIT_LIST_HEAD(&lru->node[i].list);
184 lru->node[i].nr_items = 0;
188 EXPORT_SYMBOL_GPL(list_lru_init);