mbcache: fix shrinker function return value
authorAndreas Gruenbacher <agruen@suse.de>
Wed, 21 Jul 2010 17:44:45 +0000 (19:44 +0200)
committerAl Viro <viro@zeniv.linux.org.uk>
Mon, 9 Aug 2010 20:48:47 +0000 (16:48 -0400)
The shrinker function is supposed to return the number of cache
entries after shrinking, not before shrinking.  Fix that.

Based on a patch from Wang Sheng-Hui <crosslonelyover@gmail.com>.

Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
fs/mbcache.c

index 8a2cbd8..cf4e6cd 100644 (file)
@@ -176,22 +176,12 @@ static int
 mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
 {
        LIST_HEAD(free_list);
-       struct list_head *l, *ltmp;
+       struct mb_cache *cache;
+       struct mb_cache_entry *entry, *tmp;
        int count = 0;
 
-       spin_lock(&mb_cache_spinlock);
-       list_for_each(l, &mb_cache_list) {
-               struct mb_cache *cache =
-                       list_entry(l, struct mb_cache, c_cache_list);
-               mb_debug("cache %s (%d)", cache->c_name,
-                         atomic_read(&cache->c_entry_count));
-               count += atomic_read(&cache->c_entry_count);
-       }
        mb_debug("trying to free %d entries", nr_to_scan);
-       if (nr_to_scan == 0) {
-               spin_unlock(&mb_cache_spinlock);
-               goto out;
-       }
+       spin_lock(&mb_cache_spinlock);
        while (nr_to_scan-- && !list_empty(&mb_cache_lru_list)) {
                struct mb_cache_entry *ce =
                        list_entry(mb_cache_lru_list.next,
@@ -199,12 +189,15 @@ mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
                list_move_tail(&ce->e_lru_list, &free_list);
                __mb_cache_entry_unhash(ce);
        }
+       list_for_each_entry(cache, &mb_cache_list, c_cache_list) {
+               mb_debug("cache %s (%d)", cache->c_name,
+                         atomic_read(&cache->c_entry_count));
+               count += atomic_read(&cache->c_entry_count);
+       }
        spin_unlock(&mb_cache_spinlock);
-       list_for_each_safe(l, ltmp, &free_list) {
-               __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry,
-                                                  e_lru_list), gfp_mask);
+       list_for_each_entry_safe(entry, tmp, &free_list, e_lru_list) {
+               __mb_cache_entry_forget(entry, gfp_mask);
        }
-out:
        return (count / 100) * sysctl_vfs_cache_pressure;
 }