mm, compaction: add per-zone migration pfn cache for async compaction
[platform/adaptation/renesas_rcar/renesas_kernel.git] / mm / truncate.c
index 855c38c..827ad8d 100644 (file)
 #include <linux/rmap.h>
 #include "internal.h"
 
+static void clear_exceptional_entry(struct address_space *mapping,
+                                   pgoff_t index, void *entry)
+{
+       /* Handled by shmem itself */
+       if (shmem_mapping(mapping))
+               return;
+
+       spin_lock_irq(&mapping->tree_lock);
+       /*
+        * Regular page slots are stabilized by the page lock even
+        * without the tree itself locked.  These unlocked entries
+        * need verification under the tree lock.
+        */
+       radix_tree_delete_item(&mapping->page_tree, index, entry);
+       spin_unlock_irq(&mapping->tree_lock);
+}
 
 /**
  * do_invalidatepage - invalidate part or all of a page
@@ -209,6 +225,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
        unsigned int    partial_start;  /* inclusive */
        unsigned int    partial_end;    /* exclusive */
        struct pagevec  pvec;
+       pgoff_t         indices[PAGEVEC_SIZE];
        pgoff_t         index;
        int             i;
 
@@ -239,17 +256,23 @@ void truncate_inode_pages_range(struct address_space *mapping,
 
        pagevec_init(&pvec, 0);
        index = start;
-       while (index < end && pagevec_lookup(&pvec, mapping, index,
-                       min(end - index, (pgoff_t)PAGEVEC_SIZE))) {
+       while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
+                       min(end - index, (pgoff_t)PAGEVEC_SIZE),
+                       indices)) {
                mem_cgroup_uncharge_start();
                for (i = 0; i < pagevec_count(&pvec); i++) {
                        struct page *page = pvec.pages[i];
 
                        /* We rely upon deletion not changing page->index */
-                       index = page->index;
+                       index = indices[i];
                        if (index >= end)
                                break;
 
+                       if (radix_tree_exceptional_entry(page)) {
+                               clear_exceptional_entry(mapping, index, page);
+                               continue;
+                       }
+
                        if (!trylock_page(page))
                                continue;
                        WARN_ON(page->index != index);
@@ -260,6 +283,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
                        truncate_inode_page(mapping, page);
                        unlock_page(page);
                }
+               pagevec_remove_exceptionals(&pvec);
                pagevec_release(&pvec);
                mem_cgroup_uncharge_end();
                cond_resched();
@@ -308,14 +332,16 @@ void truncate_inode_pages_range(struct address_space *mapping,
        index = start;
        for ( ; ; ) {
                cond_resched();
-               if (!pagevec_lookup(&pvec, mapping, index,
-                       min(end - index, (pgoff_t)PAGEVEC_SIZE))) {
+               if (!pagevec_lookup_entries(&pvec, mapping, index,
+                       min(end - index, (pgoff_t)PAGEVEC_SIZE),
+                       indices)) {
                        if (index == start)
                                break;
                        index = start;
                        continue;
                }
-               if (index == start && pvec.pages[0]->index >= end) {
+               if (index == start && indices[0] >= end) {
+                       pagevec_remove_exceptionals(&pvec);
                        pagevec_release(&pvec);
                        break;
                }
@@ -324,16 +350,22 @@ void truncate_inode_pages_range(struct address_space *mapping,
                        struct page *page = pvec.pages[i];
 
                        /* We rely upon deletion not changing page->index */
-                       index = page->index;
+                       index = indices[i];
                        if (index >= end)
                                break;
 
+                       if (radix_tree_exceptional_entry(page)) {
+                               clear_exceptional_entry(mapping, index, page);
+                               continue;
+                       }
+
                        lock_page(page);
                        WARN_ON(page->index != index);
                        wait_on_page_writeback(page);
                        truncate_inode_page(mapping, page);
                        unlock_page(page);
                }
+               pagevec_remove_exceptionals(&pvec);
                pagevec_release(&pvec);
                mem_cgroup_uncharge_end();
                index++;
@@ -376,6 +408,7 @@ EXPORT_SYMBOL(truncate_inode_pages);
 unsigned long invalidate_mapping_pages(struct address_space *mapping,
                pgoff_t start, pgoff_t end)
 {
+       pgoff_t indices[PAGEVEC_SIZE];
        struct pagevec pvec;
        pgoff_t index = start;
        unsigned long ret;
@@ -391,17 +424,23 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
         */
 
        pagevec_init(&pvec, 0);
-       while (index <= end && pagevec_lookup(&pvec, mapping, index,
-                       min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
+       while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
+                       min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
+                       indices)) {
                mem_cgroup_uncharge_start();
                for (i = 0; i < pagevec_count(&pvec); i++) {
                        struct page *page = pvec.pages[i];
 
                        /* We rely upon deletion not changing page->index */
-                       index = page->index;
+                       index = indices[i];
                        if (index > end)
                                break;
 
+                       if (radix_tree_exceptional_entry(page)) {
+                               clear_exceptional_entry(mapping, index, page);
+                               continue;
+                       }
+
                        if (!trylock_page(page))
                                continue;
                        WARN_ON(page->index != index);
@@ -415,6 +454,7 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
                                deactivate_page(page);
                        count += ret;
                }
+               pagevec_remove_exceptionals(&pvec);
                pagevec_release(&pvec);
                mem_cgroup_uncharge_end();
                cond_resched();
@@ -482,6 +522,7 @@ static int do_launder_page(struct address_space *mapping, struct page *page)
 int invalidate_inode_pages2_range(struct address_space *mapping,
                                  pgoff_t start, pgoff_t end)
 {
+       pgoff_t indices[PAGEVEC_SIZE];
        struct pagevec pvec;
        pgoff_t index;
        int i;
@@ -492,17 +533,23 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
        cleancache_invalidate_inode(mapping);
        pagevec_init(&pvec, 0);
        index = start;
-       while (index <= end && pagevec_lookup(&pvec, mapping, index,
-                       min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
+       while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
+                       min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
+                       indices)) {
                mem_cgroup_uncharge_start();
                for (i = 0; i < pagevec_count(&pvec); i++) {
                        struct page *page = pvec.pages[i];
 
                        /* We rely upon deletion not changing page->index */
-                       index = page->index;
+                       index = indices[i];
                        if (index > end)
                                break;
 
+                       if (radix_tree_exceptional_entry(page)) {
+                               clear_exceptional_entry(mapping, index, page);
+                               continue;
+                       }
+
                        lock_page(page);
                        WARN_ON(page->index != index);
                        if (page->mapping != mapping) {
@@ -540,6 +587,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
                                ret = ret2;
                        unlock_page(page);
                }
+               pagevec_remove_exceptionals(&pvec);
                pagevec_release(&pvec);
                mem_cgroup_uncharge_end();
                cond_resched();
@@ -649,7 +697,6 @@ void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
        struct page *page;
        pgoff_t index;
 
-       WARN_ON(!mutex_is_locked(&inode->i_mutex));
        WARN_ON(to > inode->i_size);
 
        if (from >= to || bsize == PAGE_CACHE_SIZE)