Merge tag 'backport/v3.14.24-ltsi-rc1/pci-rcar-gen2-to-v3.16' into backport/v3.14...
[platform/adaptation/renesas_rcar/renesas_kernel.git] / mm / truncate.c
index 353b683..827ad8d 100644 (file)
 #include <linux/buffer_head.h> /* grr. try_to_release_page,
                                   do_invalidatepage */
 #include <linux/cleancache.h>
+#include <linux/rmap.h>
 #include "internal.h"
 
+static void clear_exceptional_entry(struct address_space *mapping,
+                                   pgoff_t index, void *entry)
+{
+       /* Handled by shmem itself */
+       if (shmem_mapping(mapping))
+               return;
+
+       spin_lock_irq(&mapping->tree_lock);
+       /*
+        * Regular page slots are stabilized by the page lock even
+        * without the tree itself locked.  These unlocked entries
+        * need verification under the tree lock.
+        */
+       radix_tree_delete_item(&mapping->page_tree, index, entry);
+       spin_unlock_irq(&mapping->tree_lock);
+}
 
 /**
  * do_invalidatepage - invalidate part or all of a page
@@ -208,6 +225,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
        unsigned int    partial_start;  /* inclusive */
        unsigned int    partial_end;    /* exclusive */
        struct pagevec  pvec;
+       pgoff_t         indices[PAGEVEC_SIZE];
        pgoff_t         index;
        int             i;
 
@@ -238,17 +256,23 @@ void truncate_inode_pages_range(struct address_space *mapping,
 
        pagevec_init(&pvec, 0);
        index = start;
-       while (index < end && pagevec_lookup(&pvec, mapping, index,
-                       min(end - index, (pgoff_t)PAGEVEC_SIZE))) {
+       while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
+                       min(end - index, (pgoff_t)PAGEVEC_SIZE),
+                       indices)) {
                mem_cgroup_uncharge_start();
                for (i = 0; i < pagevec_count(&pvec); i++) {
                        struct page *page = pvec.pages[i];
 
                        /* We rely upon deletion not changing page->index */
-                       index = page->index;
+                       index = indices[i];
                        if (index >= end)
                                break;
 
+                       if (radix_tree_exceptional_entry(page)) {
+                               clear_exceptional_entry(mapping, index, page);
+                               continue;
+                       }
+
                        if (!trylock_page(page))
                                continue;
                        WARN_ON(page->index != index);
@@ -259,6 +283,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
                        truncate_inode_page(mapping, page);
                        unlock_page(page);
                }
+               pagevec_remove_exceptionals(&pvec);
                pagevec_release(&pvec);
                mem_cgroup_uncharge_end();
                cond_resched();
@@ -307,14 +332,16 @@ void truncate_inode_pages_range(struct address_space *mapping,
        index = start;
        for ( ; ; ) {
                cond_resched();
-               if (!pagevec_lookup(&pvec, mapping, index,
-                       min(end - index, (pgoff_t)PAGEVEC_SIZE))) {
+               if (!pagevec_lookup_entries(&pvec, mapping, index,
+                       min(end - index, (pgoff_t)PAGEVEC_SIZE),
+                       indices)) {
                        if (index == start)
                                break;
                        index = start;
                        continue;
                }
-               if (index == start && pvec.pages[0]->index >= end) {
+               if (index == start && indices[0] >= end) {
+                       pagevec_remove_exceptionals(&pvec);
                        pagevec_release(&pvec);
                        break;
                }
@@ -323,16 +350,22 @@ void truncate_inode_pages_range(struct address_space *mapping,
                        struct page *page = pvec.pages[i];
 
                        /* We rely upon deletion not changing page->index */
-                       index = page->index;
+                       index = indices[i];
                        if (index >= end)
                                break;
 
+                       if (radix_tree_exceptional_entry(page)) {
+                               clear_exceptional_entry(mapping, index, page);
+                               continue;
+                       }
+
                        lock_page(page);
                        WARN_ON(page->index != index);
                        wait_on_page_writeback(page);
                        truncate_inode_page(mapping, page);
                        unlock_page(page);
                }
+               pagevec_remove_exceptionals(&pvec);
                pagevec_release(&pvec);
                mem_cgroup_uncharge_end();
                index++;
@@ -375,6 +408,7 @@ EXPORT_SYMBOL(truncate_inode_pages);
 unsigned long invalidate_mapping_pages(struct address_space *mapping,
                pgoff_t start, pgoff_t end)
 {
+       pgoff_t indices[PAGEVEC_SIZE];
        struct pagevec pvec;
        pgoff_t index = start;
        unsigned long ret;
@@ -390,17 +424,23 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
         */
 
        pagevec_init(&pvec, 0);
-       while (index <= end && pagevec_lookup(&pvec, mapping, index,
-                       min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
+       while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
+                       min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
+                       indices)) {
                mem_cgroup_uncharge_start();
                for (i = 0; i < pagevec_count(&pvec); i++) {
                        struct page *page = pvec.pages[i];
 
                        /* We rely upon deletion not changing page->index */
-                       index = page->index;
+                       index = indices[i];
                        if (index > end)
                                break;
 
+                       if (radix_tree_exceptional_entry(page)) {
+                               clear_exceptional_entry(mapping, index, page);
+                               continue;
+                       }
+
                        if (!trylock_page(page))
                                continue;
                        WARN_ON(page->index != index);
@@ -414,6 +454,7 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
                                deactivate_page(page);
                        count += ret;
                }
+               pagevec_remove_exceptionals(&pvec);
                pagevec_release(&pvec);
                mem_cgroup_uncharge_end();
                cond_resched();
@@ -481,6 +522,7 @@ static int do_launder_page(struct address_space *mapping, struct page *page)
 int invalidate_inode_pages2_range(struct address_space *mapping,
                                  pgoff_t start, pgoff_t end)
 {
+       pgoff_t indices[PAGEVEC_SIZE];
        struct pagevec pvec;
        pgoff_t index;
        int i;
@@ -491,17 +533,23 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
        cleancache_invalidate_inode(mapping);
        pagevec_init(&pvec, 0);
        index = start;
-       while (index <= end && pagevec_lookup(&pvec, mapping, index,
-                       min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
+       while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
+                       min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
+                       indices)) {
                mem_cgroup_uncharge_start();
                for (i = 0; i < pagevec_count(&pvec); i++) {
                        struct page *page = pvec.pages[i];
 
                        /* We rely upon deletion not changing page->index */
-                       index = page->index;
+                       index = indices[i];
                        if (index > end)
                                break;
 
+                       if (radix_tree_exceptional_entry(page)) {
+                               clear_exceptional_entry(mapping, index, page);
+                               continue;
+                       }
+
                        lock_page(page);
                        WARN_ON(page->index != index);
                        if (page->mapping != mapping) {
@@ -539,6 +587,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
                                ret = ret2;
                        unlock_page(page);
                }
+               pagevec_remove_exceptionals(&pvec);
                pagevec_release(&pvec);
                mem_cgroup_uncharge_end();
                cond_resched();
@@ -613,12 +662,67 @@ EXPORT_SYMBOL(truncate_pagecache);
  */
 void truncate_setsize(struct inode *inode, loff_t newsize)
 {
+       loff_t oldsize = inode->i_size;
+
        i_size_write(inode, newsize);
+       if (newsize > oldsize)
+               pagecache_isize_extended(inode, oldsize, newsize);
        truncate_pagecache(inode, newsize);
 }
 EXPORT_SYMBOL(truncate_setsize);
 
 /**
+ * pagecache_isize_extended - update pagecache after extension of i_size
+ * @inode:     inode for which i_size was extended
+ * @from:      original inode size
+ * @to:                new inode size
+ *
+ * Handle extension of inode size either caused by extending truncate or by
+ * write starting after current i_size. We mark the page straddling current
+ * i_size RO so that page_mkwrite() is called on the nearest write access to
+ * the page.  This way filesystem can be sure that page_mkwrite() is called on
+ * the page before user writes to the page via mmap after the i_size has been
+ * changed.
+ *
+ * The function must be called after i_size is updated so that page fault
+ * coming after we unlock the page will already see the new i_size.
+ * The function must be called while we still hold i_mutex - this not only
+ * makes sure i_size is stable but also that userspace cannot observe new
+ * i_size value before we are prepared to store mmap writes at new inode size.
+ */
+void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
+{
+       int bsize = 1 << inode->i_blkbits;
+       loff_t rounded_from;
+       struct page *page;
+       pgoff_t index;
+
+       WARN_ON(to > inode->i_size);
+
+       if (from >= to || bsize == PAGE_CACHE_SIZE)
+               return;
+       /* Page straddling @from will not have any hole block created? */
+       rounded_from = round_up(from, bsize);
+       if (to <= rounded_from || !(rounded_from & (PAGE_CACHE_SIZE - 1)))
+               return;
+
+       index = from >> PAGE_CACHE_SHIFT;
+       page = find_lock_page(inode->i_mapping, index);
+       /* Page not cached? Nothing to do */
+       if (!page)
+               return;
+       /*
+        * See clear_page_dirty_for_io() for details why set_page_dirty()
+        * is needed.
+        */
+       if (page_mkclean(page))
+               set_page_dirty(page);
+       unlock_page(page);
+       page_cache_release(page);
+}
+EXPORT_SYMBOL(pagecache_isize_extended);
+
+/**
  * truncate_pagecache_range - unmap and remove pagecache that is hole-punched
  * @inode: inode
  * @lstart: offset of beginning of hole