2 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
3 * Authors: David Chinner and Glauber Costa
5 * Generic LRU infrastructure
7 #include <linux/kernel.h>
8 #include <linux/module.h>
10 #include <linux/list_lru.h>
11 #include <linux/slab.h>
12 #include <linux/mutex.h>
13 #include <linux/memcontrol.h>
15 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
16 static LIST_HEAD(list_lrus);
17 static DEFINE_MUTEX(list_lrus_mutex);
19 static void list_lru_register(struct list_lru *lru)
21 mutex_lock(&list_lrus_mutex);
22 list_add(&lru->list, &list_lrus);
23 mutex_unlock(&list_lrus_mutex);
26 static void list_lru_unregister(struct list_lru *lru)
28 mutex_lock(&list_lrus_mutex);
30 mutex_unlock(&list_lrus_mutex);
33 static void list_lru_register(struct list_lru *lru)
37 static void list_lru_unregister(struct list_lru *lru)
40 #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
42 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
43 static inline bool list_lru_memcg_aware(struct list_lru *lru)
46 * This needs node 0 to be always present, even
47 * in the systems supporting sparse numa ids.
49 return !!lru->node[0].memcg_lrus;
52 static inline struct list_lru_one *
53 list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
56 * The lock protects the array of per cgroup lists from relocation
57 * (see memcg_update_list_lru_node).
59 lockdep_assert_held(&nlru->lock);
60 if (nlru->memcg_lrus && idx >= 0)
61 return nlru->memcg_lrus->lru[idx];
66 static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr)
70 if (!memcg_kmem_enabled())
72 page = virt_to_head_page(ptr);
73 return page->mem_cgroup;
76 static inline struct list_lru_one *
77 list_lru_from_kmem(struct list_lru_node *nlru, void *ptr)
79 struct mem_cgroup *memcg;
81 if (!nlru->memcg_lrus)
84 memcg = mem_cgroup_from_kmem(ptr);
88 return list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
91 static inline bool list_lru_memcg_aware(struct list_lru *lru)
96 static inline struct list_lru_one *
97 list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
102 static inline struct list_lru_one *
103 list_lru_from_kmem(struct list_lru_node *nlru, void *ptr)
107 #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
109 bool list_lru_add(struct list_lru *lru, struct list_head *item)
111 int nid = page_to_nid(virt_to_page(item));
112 struct list_lru_node *nlru = &lru->node[nid];
113 struct list_lru_one *l;
115 spin_lock(&nlru->lock);
116 if (list_empty(item)) {
117 l = list_lru_from_kmem(nlru, item);
118 list_add_tail(item, &l->list);
120 spin_unlock(&nlru->lock);
123 spin_unlock(&nlru->lock);
126 EXPORT_SYMBOL_GPL(list_lru_add);
128 bool list_lru_del(struct list_lru *lru, struct list_head *item)
130 int nid = page_to_nid(virt_to_page(item));
131 struct list_lru_node *nlru = &lru->node[nid];
132 struct list_lru_one *l;
134 spin_lock(&nlru->lock);
135 if (!list_empty(item)) {
136 l = list_lru_from_kmem(nlru, item);
139 spin_unlock(&nlru->lock);
142 spin_unlock(&nlru->lock);
145 EXPORT_SYMBOL_GPL(list_lru_del);
147 void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
152 EXPORT_SYMBOL_GPL(list_lru_isolate);
154 void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
155 struct list_head *head)
157 list_move(item, head);
160 EXPORT_SYMBOL_GPL(list_lru_isolate_move);
162 static unsigned long __list_lru_count_one(struct list_lru *lru,
163 int nid, int memcg_idx)
165 struct list_lru_node *nlru = &lru->node[nid];
166 struct list_lru_one *l;
169 spin_lock(&nlru->lock);
170 l = list_lru_from_memcg_idx(nlru, memcg_idx);
172 spin_unlock(&nlru->lock);
177 unsigned long list_lru_count_one(struct list_lru *lru,
178 int nid, struct mem_cgroup *memcg)
180 return __list_lru_count_one(lru, nid, memcg_cache_id(memcg));
182 EXPORT_SYMBOL_GPL(list_lru_count_one);
184 unsigned long list_lru_count_node(struct list_lru *lru, int nid)
189 count += __list_lru_count_one(lru, nid, -1);
190 if (list_lru_memcg_aware(lru)) {
191 for_each_memcg_cache_index(memcg_idx)
192 count += __list_lru_count_one(lru, nid, memcg_idx);
196 EXPORT_SYMBOL_GPL(list_lru_count_node);
199 __list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx,
200 list_lru_walk_cb isolate, void *cb_arg,
201 unsigned long *nr_to_walk)
204 struct list_lru_node *nlru = &lru->node[nid];
205 struct list_lru_one *l;
206 struct list_head *item, *n;
207 unsigned long isolated = 0;
209 spin_lock(&nlru->lock);
210 l = list_lru_from_memcg_idx(nlru, memcg_idx);
212 list_for_each_safe(item, n, &l->list) {
216 * decrement nr_to_walk first so that we don't livelock if we
217 * get stuck on large numbesr of LRU_RETRY items
223 ret = isolate(item, l, &nlru->lock, cb_arg);
225 case LRU_REMOVED_RETRY:
226 assert_spin_locked(&nlru->lock);
230 * If the lru lock has been dropped, our list
231 * traversal is now invalid and so we have to
232 * restart from scratch.
234 if (ret == LRU_REMOVED_RETRY)
238 list_move_tail(item, &l->list);
244 * The lru lock has been dropped, our list traversal is
245 * now invalid and so we have to restart from scratch.
247 assert_spin_locked(&nlru->lock);
254 spin_unlock(&nlru->lock);
259 list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
260 list_lru_walk_cb isolate, void *cb_arg,
261 unsigned long *nr_to_walk)
263 return __list_lru_walk_one(lru, nid, memcg_cache_id(memcg),
264 isolate, cb_arg, nr_to_walk);
266 EXPORT_SYMBOL_GPL(list_lru_walk_one);
268 unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
269 list_lru_walk_cb isolate, void *cb_arg,
270 unsigned long *nr_to_walk)
275 isolated += __list_lru_walk_one(lru, nid, -1, isolate, cb_arg,
277 if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
278 for_each_memcg_cache_index(memcg_idx) {
279 isolated += __list_lru_walk_one(lru, nid, memcg_idx,
280 isolate, cb_arg, nr_to_walk);
281 if (*nr_to_walk <= 0)
287 EXPORT_SYMBOL_GPL(list_lru_walk_node);
289 static void init_one_lru(struct list_lru_one *l)
291 INIT_LIST_HEAD(&l->list);
295 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
296 static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus,
301 for (i = begin; i < end; i++)
302 kfree(memcg_lrus->lru[i]);
305 static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus,
310 for (i = begin; i < end; i++) {
311 struct list_lru_one *l;
313 l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL);
318 memcg_lrus->lru[i] = l;
322 __memcg_destroy_list_lru_node(memcg_lrus, begin, i - 1);
326 static int memcg_init_list_lru_node(struct list_lru_node *nlru)
328 int size = memcg_nr_cache_ids;
330 nlru->memcg_lrus = kmalloc(size * sizeof(void *), GFP_KERNEL);
331 if (!nlru->memcg_lrus)
334 if (__memcg_init_list_lru_node(nlru->memcg_lrus, 0, size)) {
335 kfree(nlru->memcg_lrus);
342 static void memcg_destroy_list_lru_node(struct list_lru_node *nlru)
344 __memcg_destroy_list_lru_node(nlru->memcg_lrus, 0, memcg_nr_cache_ids);
345 kfree(nlru->memcg_lrus);
348 static int memcg_update_list_lru_node(struct list_lru_node *nlru,
349 int old_size, int new_size)
351 struct list_lru_memcg *old, *new;
353 BUG_ON(old_size > new_size);
355 old = nlru->memcg_lrus;
356 new = kmalloc(new_size * sizeof(void *), GFP_KERNEL);
360 if (__memcg_init_list_lru_node(new, old_size, new_size)) {
365 memcpy(new, old, old_size * sizeof(void *));
368 * The lock guarantees that we won't race with a reader
369 * (see list_lru_from_memcg_idx).
371 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
372 * we have to use IRQ-safe primitives here to avoid deadlock.
374 spin_lock_irq(&nlru->lock);
375 nlru->memcg_lrus = new;
376 spin_unlock_irq(&nlru->lock);
382 static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru,
383 int old_size, int new_size)
385 /* do not bother shrinking the array back to the old size, because we
386 * cannot handle allocation failures here */
387 __memcg_destroy_list_lru_node(nlru->memcg_lrus, old_size, new_size);
390 static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
398 if (memcg_init_list_lru_node(&lru->node[i]))
403 for (i = i - 1; i >= 0; i--) {
404 if (!lru->node[i].memcg_lrus)
406 memcg_destroy_list_lru_node(&lru->node[i]);
411 static void memcg_destroy_list_lru(struct list_lru *lru)
415 if (!list_lru_memcg_aware(lru))
419 memcg_destroy_list_lru_node(&lru->node[i]);
422 static int memcg_update_list_lru(struct list_lru *lru,
423 int old_size, int new_size)
427 if (!list_lru_memcg_aware(lru))
431 if (memcg_update_list_lru_node(&lru->node[i],
437 for (i = i - 1; i >= 0; i--) {
438 if (!lru->node[i].memcg_lrus)
441 memcg_cancel_update_list_lru_node(&lru->node[i],
447 static void memcg_cancel_update_list_lru(struct list_lru *lru,
448 int old_size, int new_size)
452 if (!list_lru_memcg_aware(lru))
456 memcg_cancel_update_list_lru_node(&lru->node[i],
460 int memcg_update_all_list_lrus(int new_size)
463 struct list_lru *lru;
464 int old_size = memcg_nr_cache_ids;
466 mutex_lock(&list_lrus_mutex);
467 list_for_each_entry(lru, &list_lrus, list) {
468 ret = memcg_update_list_lru(lru, old_size, new_size);
473 mutex_unlock(&list_lrus_mutex);
476 list_for_each_entry_continue_reverse(lru, &list_lrus, list)
477 memcg_cancel_update_list_lru(lru, old_size, new_size);
481 static void memcg_drain_list_lru_node(struct list_lru_node *nlru,
482 int src_idx, int dst_idx)
484 struct list_lru_one *src, *dst;
487 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
488 * we have to use IRQ-safe primitives here to avoid deadlock.
490 spin_lock_irq(&nlru->lock);
492 src = list_lru_from_memcg_idx(nlru, src_idx);
493 dst = list_lru_from_memcg_idx(nlru, dst_idx);
495 list_splice_init(&src->list, &dst->list);
496 dst->nr_items += src->nr_items;
499 spin_unlock_irq(&nlru->lock);
502 static void memcg_drain_list_lru(struct list_lru *lru,
503 int src_idx, int dst_idx)
507 if (!list_lru_memcg_aware(lru))
511 memcg_drain_list_lru_node(&lru->node[i], src_idx, dst_idx);
514 void memcg_drain_all_list_lrus(int src_idx, int dst_idx)
516 struct list_lru *lru;
518 mutex_lock(&list_lrus_mutex);
519 list_for_each_entry(lru, &list_lrus, list)
520 memcg_drain_list_lru(lru, src_idx, dst_idx);
521 mutex_unlock(&list_lrus_mutex);
524 static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
529 static void memcg_destroy_list_lru(struct list_lru *lru)
532 #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
534 int __list_lru_init(struct list_lru *lru, bool memcg_aware,
535 struct lock_class_key *key)
538 size_t size = sizeof(*lru->node) * nr_node_ids;
541 memcg_get_cache_ids();
543 lru->node = kzalloc(size, GFP_KERNEL);
548 spin_lock_init(&lru->node[i].lock);
550 lockdep_set_class(&lru->node[i].lock, key);
551 init_one_lru(&lru->node[i].lru);
554 err = memcg_init_list_lru(lru, memcg_aware);
557 /* Do this so a list_lru_destroy() doesn't crash: */
562 list_lru_register(lru);
564 memcg_put_cache_ids();
567 EXPORT_SYMBOL_GPL(__list_lru_init);
569 void list_lru_destroy(struct list_lru *lru)
571 /* Already destroyed or not yet initialized? */
575 memcg_get_cache_ids();
577 list_lru_unregister(lru);
579 memcg_destroy_list_lru(lru);
583 memcg_put_cache_ids();
585 EXPORT_SYMBOL_GPL(list_lru_destroy);