mm: per-thread vma caching
[platform/adaptation/renesas_rcar/renesas_kernel.git] / mm / frontswap.c
index 538367e..c30eec5 100644 (file)
@@ -319,7 +319,7 @@ void __frontswap_invalidate_area(unsigned type)
                        return;
                frontswap_ops->invalidate_area(type);
                atomic_set(&sis->frontswap_pages, 0);
-               memset(sis->frontswap_map, 0, sis->max / sizeof(long));
+               bitmap_zero(sis->frontswap_map, sis->max);
        }
        clear_bit(type, need_init);
 }
@@ -327,15 +327,12 @@ EXPORT_SYMBOL(__frontswap_invalidate_area);
 
 static unsigned long __frontswap_curr_pages(void)
 {
-       int type;
        unsigned long totalpages = 0;
        struct swap_info_struct *si = NULL;
 
        assert_spin_locked(&swap_lock);
-       for (type = swap_list.head; type >= 0; type = si->next) {
-               si = swap_info[type];
+       plist_for_each_entry(si, &swap_active_head, list)
                totalpages += atomic_read(&si->frontswap_pages);
-       }
        return totalpages;
 }
 
@@ -347,11 +344,9 @@ static int __frontswap_unuse_pages(unsigned long total, unsigned long *unused,
        int si_frontswap_pages;
        unsigned long total_pages_to_unuse = total;
        unsigned long pages = 0, pages_to_unuse = 0;
-       int type;
 
        assert_spin_locked(&swap_lock);
-       for (type = swap_list.head; type >= 0; type = si->next) {
-               si = swap_info[type];
+       plist_for_each_entry(si, &swap_active_head, list) {
                si_frontswap_pages = atomic_read(&si->frontswap_pages);
                if (total_pages_to_unuse < si_frontswap_pages) {
                        pages = pages_to_unuse = total_pages_to_unuse;
@@ -366,7 +361,7 @@ static int __frontswap_unuse_pages(unsigned long total, unsigned long *unused,
                }
                vm_unacct_memory(pages);
                *unused = pages_to_unuse;
-               *swapid = type;
+               *swapid = si->type;
                ret = 0;
                break;
        }
@@ -413,7 +408,7 @@ void frontswap_shrink(unsigned long target_pages)
        /*
         * we don't want to hold swap_lock while doing a very
         * lengthy try_to_unuse, but swap_list may change
-        * so restart scan from swap_list.head each time
+        * so restart scan from swap_active_head each time
         */
        spin_lock(&swap_lock);
        ret = __frontswap_shrink(target_pages, &pages_to_unuse, &type);