Merge tag 'v3.14.25' into backport/v3.14.24-ltsi-rc1+v3.14.25/snapshot-merge.wip
[platform/adaptation/renesas_rcar/renesas_kernel.git] / mm / swap.c
index d1100b6..c8048d7 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -57,7 +57,7 @@ static void __page_cache_release(struct page *page)
 
                spin_lock_irqsave(&zone->lru_lock, flags);
                lruvec = mem_cgroup_page_lruvec(page, zone);
-               VM_BUG_ON(!PageLRU(page));
+               VM_BUG_ON_PAGE(!PageLRU(page), page);
                __ClearPageLRU(page);
                del_page_from_lru_list(page, lruvec, page_off_lru(page));
                spin_unlock_irqrestore(&zone->lru_lock, flags);
@@ -98,7 +98,7 @@ static void put_compound_page(struct page *page)
        }
 
        /* __split_huge_page_refcount can run under us */
-       page_head = compound_trans_head(page);
+       page_head = compound_head(page);
 
        /*
         * THP can not break up slab pages so avoid taking
@@ -130,8 +130,8 @@ static void put_compound_page(struct page *page)
                         * __split_huge_page_refcount cannot race
                         * here.
                         */
-                       VM_BUG_ON(!PageHead(page_head));
-                       VM_BUG_ON(page_mapcount(page) != 0);
+                       VM_BUG_ON_PAGE(!PageHead(page_head), page_head);
+                       VM_BUG_ON_PAGE(page_mapcount(page) != 0, page);
                        if (put_page_testzero(page_head)) {
                                /*
                                 * If this is the tail of a slab
@@ -148,7 +148,7 @@ static void put_compound_page(struct page *page)
                                 * the compound page enters the buddy
                                 * allocator.
                                 */
-                               VM_BUG_ON(PageSlab(page_head));
+                               VM_BUG_ON_PAGE(PageSlab(page_head), page_head);
                                __put_compound_page(page_head);
                        }
                        return;
@@ -199,7 +199,7 @@ out_put_single:
                                __put_single_page(page);
                        return;
                }
-               VM_BUG_ON(page_head != page->first_page);
+               VM_BUG_ON_PAGE(page_head != page->first_page, page);
                /*
                 * We can release the refcount taken by
                 * get_page_unless_zero() now that
@@ -207,12 +207,12 @@ out_put_single:
                 * compound_lock.
                 */
                if (put_page_testzero(page_head))
-                       VM_BUG_ON(1);
+                       VM_BUG_ON_PAGE(1, page_head);
                /* __split_huge_page_refcount will wait now */
-               VM_BUG_ON(page_mapcount(page) <= 0);
+               VM_BUG_ON_PAGE(page_mapcount(page) <= 0, page);
                atomic_dec(&page->_mapcount);
-               VM_BUG_ON(atomic_read(&page_head->_count) <= 0);
-               VM_BUG_ON(atomic_read(&page->_count) != 0);
+               VM_BUG_ON_PAGE(atomic_read(&page_head->_count) <= 0, page_head);
+               VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page);
                compound_unlock_irqrestore(page_head, flags);
 
                if (put_page_testzero(page_head)) {
@@ -223,7 +223,7 @@ out_put_single:
                }
        } else {
                /* page_head is a dangling pointer */
-               VM_BUG_ON(PageTail(page));
+               VM_BUG_ON_PAGE(PageTail(page), page);
                goto out_put_single;
        }
 }
@@ -253,7 +253,7 @@ bool __get_page_tail(struct page *page)
         */
        unsigned long flags;
        bool got;
-       struct page *page_head = compound_trans_head(page);
+       struct page *page_head = compound_head(page);
 
        /* Ref to put_compound_page() comment. */
        if (!__compound_tail_refcounted(page_head)) {
@@ -264,7 +264,7 @@ bool __get_page_tail(struct page *page)
                         * page. __split_huge_page_refcount
                         * cannot race here.
                         */
-                       VM_BUG_ON(!PageHead(page_head));
+                       VM_BUG_ON_PAGE(!PageHead(page_head), page_head);
                        __get_page_tail_foll(page, true);
                        return true;
                } else {
@@ -604,8 +604,8 @@ EXPORT_SYMBOL(__lru_cache_add);
  */
 void lru_cache_add(struct page *page)
 {
-       VM_BUG_ON(PageActive(page) && PageUnevictable(page));
-       VM_BUG_ON(PageLRU(page));
+       VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page);
+       VM_BUG_ON_PAGE(PageLRU(page), page);
        __lru_cache_add(page);
 }
 
@@ -846,7 +846,7 @@ void release_pages(struct page **pages, int nr, int cold)
                        }
 
                        lruvec = mem_cgroup_page_lruvec(page, zone);
-                       VM_BUG_ON(!PageLRU(page));
+                       VM_BUG_ON_PAGE(!PageLRU(page), page);
                        __ClearPageLRU(page);
                        del_page_from_lru_list(page, lruvec, page_off_lru(page));
                }
@@ -888,9 +888,9 @@ void lru_add_page_tail(struct page *page, struct page *page_tail,
 {
        const int file = 0;
 
-       VM_BUG_ON(!PageHead(page));
-       VM_BUG_ON(PageCompound(page_tail));
-       VM_BUG_ON(PageLRU(page_tail));
+       VM_BUG_ON_PAGE(!PageHead(page), page);
+       VM_BUG_ON_PAGE(PageCompound(page_tail), page);
+       VM_BUG_ON_PAGE(PageLRU(page_tail), page);
        VM_BUG_ON(NR_CPUS != 1 &&
                  !spin_is_locked(&lruvec_zone(lruvec)->lru_lock));
 
@@ -929,7 +929,7 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
        int active = PageActive(page);
        enum lru_list lru = page_lru(page);
 
-       VM_BUG_ON(PageLRU(page));
+       VM_BUG_ON_PAGE(PageLRU(page), page);
 
        SetPageLRU(page);
        add_page_to_lru_list(page, lruvec, lru);
@@ -948,6 +948,57 @@ void __pagevec_lru_add(struct pagevec *pvec)
 EXPORT_SYMBOL(__pagevec_lru_add);
 
 /**
+ * pagevec_lookup_entries - gang pagecache lookup
+ * @pvec:      Where the resulting entries are placed
+ * @mapping:   The address_space to search
+ * @start:     The starting entry index
+ * @nr_entries:        The maximum number of entries
+ * @indices:   The cache indices corresponding to the entries in @pvec
+ *
+ * pagevec_lookup_entries() will search for and return a group of up
+ * to @nr_entries pages and shadow entries in the mapping.  All
+ * entries are placed in @pvec.  pagevec_lookup_entries() takes a
+ * reference against actual pages in @pvec.
+ *
+ * The search returns a group of mapping-contiguous entries with
+ * ascending indexes.  There may be holes in the indices due to
+ * not-present entries.
+ *
+ * pagevec_lookup_entries() returns the number of entries which were
+ * found.
+ */
+unsigned pagevec_lookup_entries(struct pagevec *pvec,
+                               struct address_space *mapping,
+                               pgoff_t start, unsigned nr_pages,
+                               pgoff_t *indices)
+{
+       pvec->nr = find_get_entries(mapping, start, nr_pages,
+                                   pvec->pages, indices);
+       return pagevec_count(pvec);
+}
+
+/**
+ * pagevec_remove_exceptionals - pagevec exceptionals pruning
+ * @pvec:      The pagevec to prune
+ *
+ * pagevec_lookup_entries() fills both pages and exceptional radix
+ * tree entries into the pagevec.  This function prunes all
+ * exceptionals from @pvec without leaving holes, so that it can be
+ * passed on to page-only pagevec operations.
+ */
+void pagevec_remove_exceptionals(struct pagevec *pvec)
+{
+       int i, j;
+
+       for (i = 0, j = 0; i < pagevec_count(pvec); i++) {
+               struct page *page = pvec->pages[i];
+               if (!radix_tree_exceptional_entry(page))
+                       pvec->pages[j++] = page;
+       }
+       pvec->nr = j;
+}
+
+/**
  * pagevec_lookup - gang pagecache lookup
  * @pvec:      Where the resulting pages are placed
  * @mapping:   The address_space to search