mm: split ->readpages calls to avoid non-contiguous pages lists
authorChristoph Hellwig <hch@lst.de>
Fri, 1 Jun 2018 16:03:06 +0000 (09:03 -0700)
committerDarrick J. Wong <darrick.wong@oracle.com>
Sat, 2 Jun 2018 01:37:32 +0000 (18:37 -0700)
That way file systems don't have to go spotting for non-contiguous pages
and work around them.  It also kicks off I/O earlier, allowing it to
finish earlier and reduce latency.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
mm/readahead.c

index fa4d4b7..e273f0d 100644 (file)
@@ -140,8 +140,8 @@ out:
 }
 
 /*
- * __do_page_cache_readahead() actually reads a chunk of disk.  It allocates all
- * the pages first, then submits them all for I/O. This avoids the very bad
+ * __do_page_cache_readahead() actually reads a chunk of disk.  It allocates
+ * the pages first, then submits them for I/O. This avoids the very bad
  * behaviour which would occur if page allocations are causing VM writeback.
  * We really don't want to intermingle reads and writes like that.
  *
@@ -177,8 +177,18 @@ unsigned int __do_page_cache_readahead(struct address_space *mapping,
                rcu_read_lock();
                page = radix_tree_lookup(&mapping->i_pages, page_offset);
                rcu_read_unlock();
-               if (page && !radix_tree_exceptional_entry(page))
+               if (page && !radix_tree_exceptional_entry(page)) {
+                       /*
+                        * Page already present?  Kick off the current batch of
+                        * contiguous pages before continuing with the next
+                        * batch.
+                        */
+                       if (nr_pages)
+                               read_pages(mapping, filp, &page_pool, nr_pages,
+                                               gfp_mask);
+                       nr_pages = 0;
                        continue;
+               }
 
                page = __page_cache_alloc(gfp_mask);
                if (!page)