2 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
3 * Authors: David Chinner and Glauber Costa
5 * Generic LRU infrastructure
7 #include <linux/kernel.h>
8 #include <linux/module.h>
10 #include <linux/list_lru.h>
12 bool list_lru_add(struct list_lru *lru, struct list_head *item)
14 int nid = page_to_nid(virt_to_page(item));
15 struct list_lru_node *nlru = &lru->node[nid];
17 spin_lock(&nlru->lock);
18 WARN_ON_ONCE(nlru->nr_items < 0);
19 if (list_empty(item)) {
20 list_add_tail(item, &nlru->list);
21 if (nlru->nr_items++ == 0)
22 node_set(nid, lru->active_nodes);
23 spin_unlock(&nlru->lock);
26 spin_unlock(&nlru->lock);
29 EXPORT_SYMBOL_GPL(list_lru_add);
31 bool list_lru_del(struct list_lru *lru, struct list_head *item)
33 int nid = page_to_nid(virt_to_page(item));
34 struct list_lru_node *nlru = &lru->node[nid];
36 spin_lock(&nlru->lock);
37 if (!list_empty(item)) {
39 if (--nlru->nr_items == 0)
40 node_clear(nid, lru->active_nodes);
41 WARN_ON_ONCE(nlru->nr_items < 0);
42 spin_unlock(&nlru->lock);
45 spin_unlock(&nlru->lock);
48 EXPORT_SYMBOL_GPL(list_lru_del);
51 list_lru_count_node(struct list_lru *lru, int nid)
53 unsigned long count = 0;
54 struct list_lru_node *nlru = &lru->node[nid];
56 spin_lock(&nlru->lock);
57 WARN_ON_ONCE(nlru->nr_items < 0);
58 count += nlru->nr_items;
59 spin_unlock(&nlru->lock);
63 EXPORT_SYMBOL_GPL(list_lru_count_node);
66 list_lru_walk_node(struct list_lru *lru, int nid, list_lru_walk_cb isolate,
67 void *cb_arg, unsigned long *nr_to_walk)
70 struct list_lru_node *nlru = &lru->node[nid];
71 struct list_head *item, *n;
72 unsigned long isolated = 0;
74 spin_lock(&nlru->lock);
76 list_for_each_safe(item, n, &nlru->list) {
80 * decrement nr_to_walk first so that we don't livelock if we
81 * get stuck on large numbesr of LRU_RETRY items
83 if (--(*nr_to_walk) == 0)
86 ret = isolate(item, &nlru->lock, cb_arg);
89 if (--nlru->nr_items == 0)
90 node_clear(nid, lru->active_nodes);
91 WARN_ON_ONCE(nlru->nr_items < 0);
95 list_move_tail(item, &nlru->list);
101 * The lru lock has been dropped, our list traversal is
102 * now invalid and so we have to restart from scratch.
110 spin_unlock(&nlru->lock);
113 EXPORT_SYMBOL_GPL(list_lru_walk_node);
115 static unsigned long list_lru_dispose_all_node(struct list_lru *lru, int nid,
116 list_lru_dispose_cb dispose)
118 struct list_lru_node *nlru = &lru->node[nid];
119 LIST_HEAD(dispose_list);
120 unsigned long disposed = 0;
122 spin_lock(&nlru->lock);
123 while (!list_empty(&nlru->list)) {
124 list_splice_init(&nlru->list, &dispose_list);
125 disposed += nlru->nr_items;
127 node_clear(nid, lru->active_nodes);
128 spin_unlock(&nlru->lock);
130 dispose(&dispose_list);
132 spin_lock(&nlru->lock);
134 spin_unlock(&nlru->lock);
138 unsigned long list_lru_dispose_all(struct list_lru *lru,
139 list_lru_dispose_cb dispose)
141 unsigned long disposed;
142 unsigned long total = 0;
147 for_each_node_mask(nid, lru->active_nodes) {
148 disposed += list_lru_dispose_all_node(lru, nid,
152 } while (disposed != 0);
157 int list_lru_init(struct list_lru *lru)
161 nodes_clear(lru->active_nodes);
162 for (i = 0; i < MAX_NUMNODES; i++) {
163 spin_lock_init(&lru->node[i].lock);
164 INIT_LIST_HEAD(&lru->node[i].list);
165 lru->node[i].nr_items = 0;
169 EXPORT_SYMBOL_GPL(list_lru_init);