mm: make per-memcg LRU lists exclusive
[platform/adaptation/renesas_rcar/renesas_kernel.git] / mm / vmscan.c
index 26f4a8a..813aae8 100644 (file)
@@ -103,8 +103,11 @@ struct scan_control {
         */
        reclaim_mode_t reclaim_mode;
 
-       /* Which cgroup do we reclaim from */
-       struct mem_cgroup *mem_cgroup;
+       /*
+        * The memory cgroup that hit its limit and as a result is the
+        * primary target of this reclaim invocation.
+        */
+       struct mem_cgroup *target_mem_cgroup;
 
        /*
         * Nodemask of nodes allowed by the caller. If NULL, all nodes
@@ -113,6 +116,11 @@ struct scan_control {
        nodemask_t      *nodemask;
 };
 
+struct mem_cgroup_zone {
+       struct mem_cgroup *mem_cgroup;
+       struct zone *zone;
+};
+
 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
 
 #ifdef ARCH_HAS_PREFETCH
@@ -153,28 +161,45 @@ static LIST_HEAD(shrinker_list);
 static DECLARE_RWSEM(shrinker_rwsem);
 
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
-#define scanning_global_lru(sc)        (!(sc)->mem_cgroup)
+static bool global_reclaim(struct scan_control *sc)
+{
+       return !sc->target_mem_cgroup;
+}
+
+static bool scanning_global_lru(struct mem_cgroup_zone *mz)
+{
+       return !mz->mem_cgroup;
+}
 #else
-#define scanning_global_lru(sc)        (1)
+static bool global_reclaim(struct scan_control *sc)
+{
+       return true;
+}
+
+static bool scanning_global_lru(struct mem_cgroup_zone *mz)
+{
+       return true;
+}
 #endif
 
-static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone,
-                                                 struct scan_control *sc)
+static struct zone_reclaim_stat *get_reclaim_stat(struct mem_cgroup_zone *mz)
 {
-       if (!scanning_global_lru(sc))
-               return mem_cgroup_get_reclaim_stat(sc->mem_cgroup, zone);
+       if (!scanning_global_lru(mz))
+               return mem_cgroup_get_reclaim_stat(mz->mem_cgroup, mz->zone);
 
-       return &zone->reclaim_stat;
+       return &mz->zone->reclaim_stat;
 }
 
-static unsigned long zone_nr_lru_pages(struct zone *zone,
-                               struct scan_control *sc, enum lru_list lru)
+static unsigned long zone_nr_lru_pages(struct mem_cgroup_zone *mz,
+                                      enum lru_list lru)
 {
-       if (!scanning_global_lru(sc))
-               return mem_cgroup_zone_nr_lru_pages(sc->mem_cgroup,
-                               zone_to_nid(zone), zone_idx(zone), BIT(lru));
+       if (!scanning_global_lru(mz))
+               return mem_cgroup_zone_nr_lru_pages(mz->mem_cgroup,
+                                                   zone_to_nid(mz->zone),
+                                                   zone_idx(mz->zone),
+                                                   BIT(lru));
 
-       return zone_page_state(zone, NR_LRU_BASE + lru);
+       return zone_page_state(mz->zone, NR_LRU_BASE + lru);
 }
 
 
@@ -677,12 +702,13 @@ enum page_references {
 };
 
 static enum page_references page_check_references(struct page *page,
+                                                 struct mem_cgroup_zone *mz,
                                                  struct scan_control *sc)
 {
        int referenced_ptes, referenced_page;
        unsigned long vm_flags;
 
-       referenced_ptes = page_referenced(page, 1, sc->mem_cgroup, &vm_flags);
+       referenced_ptes = page_referenced(page, 1, mz->mem_cgroup, &vm_flags);
        referenced_page = TestClearPageReferenced(page);
 
        /* Lumpy reclaim - ignore references */
@@ -738,7 +764,7 @@ static enum page_references page_check_references(struct page *page,
  * shrink_page_list() returns the number of reclaimed pages
  */
 static unsigned long shrink_page_list(struct list_head *page_list,
-                                     struct zone *zone,
+                                     struct mem_cgroup_zone *mz,
                                      struct scan_control *sc,
                                      int priority,
                                      unsigned long *ret_nr_dirty,
@@ -769,7 +795,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                        goto keep;
 
                VM_BUG_ON(PageActive(page));
-               VM_BUG_ON(page_zone(page) != zone);
+               VM_BUG_ON(page_zone(page) != mz->zone);
 
                sc->nr_scanned++;
 
@@ -803,7 +829,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                        }
                }
 
-               references = page_check_references(page, sc);
+               references = page_check_references(page, mz, sc);
                switch (references) {
                case PAGEREF_ACTIVATE:
                        goto activate_locked;
@@ -994,8 +1020,8 @@ keep_lumpy:
         * back off and wait for congestion to clear because further reclaim
         * will encounter the same problem
         */
-       if (nr_dirty && nr_dirty == nr_congested && scanning_global_lru(sc))
-               zone_set_flag(zone, ZONE_CONGESTED);
+       if (nr_dirty && nr_dirty == nr_congested && global_reclaim(sc))
+               zone_set_flag(mz->zone, ZONE_CONGESTED);
 
        free_hot_cold_page_list(&free_pages, 1);
 
@@ -1113,15 +1139,14 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
 
                switch (__isolate_lru_page(page, mode, file)) {
                case 0:
+                       mem_cgroup_lru_del(page);
                        list_move(&page->lru, dst);
-                       mem_cgroup_del_lru(page);
                        nr_taken += hpage_nr_pages(page);
                        break;
 
                case -EBUSY:
                        /* else it is being freed elsewhere */
                        list_move(&page->lru, src);
-                       mem_cgroup_rotate_lru_list(page, page_lru(page));
                        continue;
 
                default:
@@ -1171,8 +1196,8 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
                                break;
 
                        if (__isolate_lru_page(cursor_page, mode, file) == 0) {
+                               mem_cgroup_lru_del(cursor_page);
                                list_move(&cursor_page->lru, dst);
-                               mem_cgroup_del_lru(cursor_page);
                                nr_taken += hpage_nr_pages(cursor_page);
                                nr_lumpy_taken++;
                                if (PageDirty(cursor_page))
@@ -1213,19 +1238,21 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
        return nr_taken;
 }
 
-static unsigned long isolate_pages_global(unsigned long nr,
-                                       struct list_head *dst,
-                                       unsigned long *scanned, int order,
-                                       isolate_mode_t mode,
-                                       struct zone *z, int active, int file)
+static unsigned long isolate_pages(unsigned long nr, struct mem_cgroup_zone *mz,
+                                  struct list_head *dst,
+                                  unsigned long *scanned, int order,
+                                  isolate_mode_t mode, int active, int file)
 {
+       struct lruvec *lruvec;
        int lru = LRU_BASE;
+
+       lruvec = mem_cgroup_zone_lruvec(mz->zone, mz->mem_cgroup);
        if (active)
                lru += LRU_ACTIVE;
        if (file)
                lru += LRU_FILE;
-       return isolate_lru_pages(nr, &z->lru[lru].list, dst, scanned, order,
-                                                               mode, file);
+       return isolate_lru_pages(nr, &lruvec->lists[lru], dst,
+                                scanned, order, mode, file);
 }
 
 /*
@@ -1313,7 +1340,7 @@ static int too_many_isolated(struct zone *zone, int file,
        if (current_is_kswapd())
                return 0;
 
-       if (!scanning_global_lru(sc))
+       if (!global_reclaim(sc))
                return 0;
 
        if (file) {
@@ -1331,13 +1358,14 @@ static int too_many_isolated(struct zone *zone, int file,
  * TODO: Try merging with migrations version of putback_lru_pages
  */
 static noinline_for_stack void
-putback_lru_pages(struct zone *zone, struct scan_control *sc,
-                               unsigned long nr_anon, unsigned long nr_file,
-                               struct list_head *page_list)
+putback_lru_pages(struct mem_cgroup_zone *mz, struct scan_control *sc,
+                 unsigned long nr_anon, unsigned long nr_file,
+                 struct list_head *page_list)
 {
        struct page *page;
        struct pagevec pvec;
-       struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
+       struct zone *zone = mz->zone;
+       struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
 
        pagevec_init(&pvec, 1);
 
@@ -1377,15 +1405,17 @@ putback_lru_pages(struct zone *zone, struct scan_control *sc,
        pagevec_release(&pvec);
 }
 
-static noinline_for_stack void update_isolated_counts(struct zone *zone,
-                                       struct scan_control *sc,
-                                       unsigned long *nr_anon,
-                                       unsigned long *nr_file,
-                                       struct list_head *isolated_list)
+static noinline_for_stack void
+update_isolated_counts(struct mem_cgroup_zone *mz,
+                      struct scan_control *sc,
+                      unsigned long *nr_anon,
+                      unsigned long *nr_file,
+                      struct list_head *isolated_list)
 {
        unsigned long nr_active;
+       struct zone *zone = mz->zone;
        unsigned int count[NR_LRU_LISTS] = { 0, };
-       struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
+       struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
 
        nr_active = clear_active_flags(isolated_list, count);
        __count_vm_events(PGDEACTIVATE, nr_active);
@@ -1454,8 +1484,8 @@ static inline bool should_reclaim_stall(unsigned long nr_taken,
  * of reclaimed pages
  */
 static noinline_for_stack unsigned long
-shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
-                       struct scan_control *sc, int priority, int file)
+shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
+                    struct scan_control *sc, int priority, int file)
 {
        LIST_HEAD(page_list);
        unsigned long nr_scanned;
@@ -1466,6 +1496,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
        unsigned long nr_dirty = 0;
        unsigned long nr_writeback = 0;
        isolate_mode_t reclaim_mode = ISOLATE_INACTIVE;
+       struct zone *zone = mz->zone;
 
        while (unlikely(too_many_isolated(zone, file, sc))) {
                congestion_wait(BLK_RW_ASYNC, HZ/10);
@@ -1488,9 +1519,10 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
 
        spin_lock_irq(&zone->lru_lock);
 
-       if (scanning_global_lru(sc)) {
-               nr_taken = isolate_pages_global(nr_to_scan, &page_list,
-                       &nr_scanned, sc->order, reclaim_mode, zone, 0, file);
+       nr_taken = isolate_pages(nr_to_scan, mz, &page_list,
+                                &nr_scanned, sc->order,
+                                reclaim_mode, 0, file);
+       if (global_reclaim(sc)) {
                zone->pages_scanned += nr_scanned;
                if (current_is_kswapd())
                        __count_zone_vm_events(PGSCAN_KSWAPD, zone,
@@ -1498,14 +1530,6 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
                else
                        __count_zone_vm_events(PGSCAN_DIRECT, zone,
                                               nr_scanned);
-       } else {
-               nr_taken = mem_cgroup_isolate_pages(nr_to_scan, &page_list,
-                       &nr_scanned, sc->order, reclaim_mode, zone,
-                       sc->mem_cgroup, 0, file);
-               /*
-                * mem_cgroup_isolate_pages() keeps track of
-                * scanned pages on its own.
-                */
        }
 
        if (nr_taken == 0) {
@@ -1513,17 +1537,17 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
                return 0;
        }
 
-       update_isolated_counts(zone, sc, &nr_anon, &nr_file, &page_list);
+       update_isolated_counts(mz, sc, &nr_anon, &nr_file, &page_list);
 
        spin_unlock_irq(&zone->lru_lock);
 
-       nr_reclaimed = shrink_page_list(&page_list, zone, sc, priority,
+       nr_reclaimed = shrink_page_list(&page_list, mz, sc, priority,
                                                &nr_dirty, &nr_writeback);
 
        /* Check if we should syncronously wait for writeback */
        if (should_reclaim_stall(nr_taken, nr_reclaimed, priority, sc)) {
                set_reclaim_mode(priority, sc, true);
-               nr_reclaimed += shrink_page_list(&page_list, zone, sc,
+               nr_reclaimed += shrink_page_list(&page_list, mz, sc,
                                        priority, &nr_dirty, &nr_writeback);
        }
 
@@ -1532,7 +1556,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
                __count_vm_events(KSWAPD_STEAL, nr_reclaimed);
        __count_zone_vm_events(PGSTEAL, zone, nr_reclaimed);
 
-       putback_lru_pages(zone, sc, nr_anon, nr_file, &page_list);
+       putback_lru_pages(mz, sc, nr_anon, nr_file, &page_list);
 
        /*
         * If reclaim is isolating dirty pages under writeback, it implies
@@ -1597,13 +1621,15 @@ static void move_active_pages_to_lru(struct zone *zone,
        pagevec_init(&pvec, 1);
 
        while (!list_empty(list)) {
+               struct lruvec *lruvec;
+
                page = lru_to_page(list);
 
                VM_BUG_ON(PageLRU(page));
                SetPageLRU(page);
 
-               list_move(&page->lru, &zone->lru[lru].list);
-               mem_cgroup_add_lru_list(page, lru);
+               lruvec = mem_cgroup_lru_add_list(zone, page, lru);
+               list_move(&page->lru, &lruvec->lists[lru]);
                pgmoved += hpage_nr_pages(page);
 
                if (!pagevec_add(&pvec, page) || list_empty(list)) {
@@ -1619,8 +1645,10 @@ static void move_active_pages_to_lru(struct zone *zone,
                __count_vm_events(PGDEACTIVATE, pgmoved);
 }
 
-static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
-                       struct scan_control *sc, int priority, int file)
+static void shrink_active_list(unsigned long nr_pages,
+                              struct mem_cgroup_zone *mz,
+                              struct scan_control *sc,
+                              int priority, int file)
 {
        unsigned long nr_taken;
        unsigned long pgscanned;
@@ -1629,9 +1657,10 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
        LIST_HEAD(l_active);
        LIST_HEAD(l_inactive);
        struct page *page;
-       struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
+       struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
        unsigned long nr_rotated = 0;
        isolate_mode_t reclaim_mode = ISOLATE_ACTIVE;
+       struct zone *zone = mz->zone;
 
        lru_add_drain();
 
@@ -1641,22 +1670,13 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
                reclaim_mode |= ISOLATE_CLEAN;
 
        spin_lock_irq(&zone->lru_lock);
-       if (scanning_global_lru(sc)) {
-               nr_taken = isolate_pages_global(nr_pages, &l_hold,
-                                               &pgscanned, sc->order,
-                                               reclaim_mode, zone,
-                                               1, file);
+
+       nr_taken = isolate_pages(nr_pages, mz, &l_hold,
+                                &pgscanned, sc->order,
+                                reclaim_mode, 1, file);
+
+       if (global_reclaim(sc))
                zone->pages_scanned += pgscanned;
-       } else {
-               nr_taken = mem_cgroup_isolate_pages(nr_pages, &l_hold,
-                                               &pgscanned, sc->order,
-                                               reclaim_mode, zone,
-                                               sc->mem_cgroup, 1, file);
-               /*
-                * mem_cgroup_isolate_pages() keeps track of
-                * scanned pages on its own.
-                */
-       }
 
        reclaim_stat->recent_scanned[file] += nr_taken;
 
@@ -1678,7 +1698,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
                        continue;
                }
 
-               if (page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) {
+               if (page_referenced(page, 0, mz->mem_cgroup, &vm_flags)) {
                        nr_rotated += hpage_nr_pages(page);
                        /*
                         * Identify referenced, file-backed active pages and
@@ -1741,10 +1761,8 @@ static int inactive_anon_is_low_global(struct zone *zone)
  * Returns true if the zone does not have enough inactive anon pages,
  * meaning some active anon pages need to be deactivated.
  */
-static int inactive_anon_is_low(struct zone *zone, struct scan_control *sc)
+static int inactive_anon_is_low(struct mem_cgroup_zone *mz)
 {
-       int low;
-
        /*
         * If we don't have swap space, anonymous page deactivation
         * is pointless.
@@ -1752,15 +1770,14 @@ static int inactive_anon_is_low(struct zone *zone, struct scan_control *sc)
        if (!total_swap_pages)
                return 0;
 
-       if (scanning_global_lru(sc))
-               low = inactive_anon_is_low_global(zone);
-       else
-               low = mem_cgroup_inactive_anon_is_low(sc->mem_cgroup, zone);
-       return low;
+       if (!scanning_global_lru(mz))
+               return mem_cgroup_inactive_anon_is_low(mz->mem_cgroup,
+                                                      mz->zone);
+
+       return inactive_anon_is_low_global(mz->zone);
 }
 #else
-static inline int inactive_anon_is_low(struct zone *zone,
-                                       struct scan_control *sc)
+static inline int inactive_anon_is_low(struct mem_cgroup_zone *mz)
 {
        return 0;
 }
@@ -1778,8 +1795,7 @@ static int inactive_file_is_low_global(struct zone *zone)
 
 /**
  * inactive_file_is_low - check if file pages need to be deactivated
- * @zone: zone to check
- * @sc:   scan control of this context
+ * @mz: memory cgroup and zone to check
  *
  * When the system is doing streaming IO, memory pressure here
  * ensures that active file pages get deactivated, until more
@@ -1791,45 +1807,44 @@ static int inactive_file_is_low_global(struct zone *zone)
  * This uses a different ratio than the anonymous pages, because
  * the page cache uses a use-once replacement algorithm.
  */
-static int inactive_file_is_low(struct zone *zone, struct scan_control *sc)
+static int inactive_file_is_low(struct mem_cgroup_zone *mz)
 {
-       int low;
+       if (!scanning_global_lru(mz))
+               return mem_cgroup_inactive_file_is_low(mz->mem_cgroup,
+                                                      mz->zone);
 
-       if (scanning_global_lru(sc))
-               low = inactive_file_is_low_global(zone);
-       else
-               low = mem_cgroup_inactive_file_is_low(sc->mem_cgroup, zone);
-       return low;
+       return inactive_file_is_low_global(mz->zone);
 }
 
-static int inactive_list_is_low(struct zone *zone, struct scan_control *sc,
-                               int file)
+static int inactive_list_is_low(struct mem_cgroup_zone *mz, int file)
 {
        if (file)
-               return inactive_file_is_low(zone, sc);
+               return inactive_file_is_low(mz);
        else
-               return inactive_anon_is_low(zone, sc);
+               return inactive_anon_is_low(mz);
 }
 
 static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
-       struct zone *zone, struct scan_control *sc, int priority)
+                                struct mem_cgroup_zone *mz,
+                                struct scan_control *sc, int priority)
 {
        int file = is_file_lru(lru);
 
        if (is_active_lru(lru)) {
-               if (inactive_list_is_low(zone, sc, file))
-                   shrink_active_list(nr_to_scan, zone, sc, priority, file);
+               if (inactive_list_is_low(mz, file))
+                       shrink_active_list(nr_to_scan, mz, sc, priority, file);
                return 0;
        }
 
-       return shrink_inactive_list(nr_to_scan, zone, sc, priority, file);
+       return shrink_inactive_list(nr_to_scan, mz, sc, priority, file);
 }
 
-static int vmscan_swappiness(struct scan_control *sc)
+static int vmscan_swappiness(struct mem_cgroup_zone *mz,
+                            struct scan_control *sc)
 {
-       if (scanning_global_lru(sc))
+       if (global_reclaim(sc))
                return vm_swappiness;
-       return mem_cgroup_swappiness(sc->mem_cgroup);
+       return mem_cgroup_swappiness(mz->mem_cgroup);
 }
 
 /*
@@ -1840,13 +1855,13 @@ static int vmscan_swappiness(struct scan_control *sc)
  *
  * nr[0] = anon pages to scan; nr[1] = file pages to scan
  */
-static void get_scan_count(struct zone *zone, struct scan_control *sc,
-                                       unsigned long *nr, int priority)
+static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc,
+                          unsigned long *nr, int priority)
 {
        unsigned long anon, file, free;
        unsigned long anon_prio, file_prio;
        unsigned long ap, fp;
-       struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
+       struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
        u64 fraction[2], denominator;
        enum lru_list l;
        int noswap = 0;
@@ -1862,9 +1877,9 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
         * latencies, so it's better to scan a minimum amount there as
         * well.
         */
-       if (scanning_global_lru(sc) && current_is_kswapd())
+       if (current_is_kswapd() && mz->zone->all_unreclaimable)
                force_scan = true;
-       if (!scanning_global_lru(sc))
+       if (!global_reclaim(sc))
                force_scan = true;
 
        /* If we have no swap space, do not bother scanning anon pages. */
@@ -1876,16 +1891,16 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
                goto out;
        }
 
-       anon  = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) +
-               zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
-       file  = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) +
-               zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
+       anon  = zone_nr_lru_pages(mz, LRU_ACTIVE_ANON) +
+               zone_nr_lru_pages(mz, LRU_INACTIVE_ANON);
+       file  = zone_nr_lru_pages(mz, LRU_ACTIVE_FILE) +
+               zone_nr_lru_pages(mz, LRU_INACTIVE_FILE);
 
-       if (scanning_global_lru(sc)) {
-               free  = zone_page_state(zone, NR_FREE_PAGES);
+       if (global_reclaim(sc)) {
+               free  = zone_page_state(mz->zone, NR_FREE_PAGES);
                /* If we have very few page cache pages,
                   force-scan anon pages. */
-               if (unlikely(file + free <= high_wmark_pages(zone))) {
+               if (unlikely(file + free <= high_wmark_pages(mz->zone))) {
                        fraction[0] = 1;
                        fraction[1] = 0;
                        denominator = 1;
@@ -1897,8 +1912,8 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
         * With swappiness at 100, anonymous and file have the same priority.
         * This scanning priority is essentially the inverse of IO cost.
         */
-       anon_prio = vmscan_swappiness(sc);
-       file_prio = 200 - vmscan_swappiness(sc);
+       anon_prio = vmscan_swappiness(mz, sc);
+       file_prio = 200 - vmscan_swappiness(mz, sc);
 
        /*
         * OK, so we have swap space and a fair amount of page cache
@@ -1911,7 +1926,7 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
         *
         * anon in [0], file in [1]
         */
-       spin_lock_irq(&zone->lru_lock);
+       spin_lock_irq(&mz->zone->lru_lock);
        if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
                reclaim_stat->recent_scanned[0] /= 2;
                reclaim_stat->recent_rotated[0] /= 2;
@@ -1932,7 +1947,7 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
 
        fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1);
        fp /= reclaim_stat->recent_rotated[1] + 1;
-       spin_unlock_irq(&zone->lru_lock);
+       spin_unlock_irq(&mz->zone->lru_lock);
 
        fraction[0] = ap;
        fraction[1] = fp;
@@ -1942,7 +1957,7 @@ out:
                int file = is_file_lru(l);
                unsigned long scan;
 
-               scan = zone_nr_lru_pages(zone, sc, l);
+               scan = zone_nr_lru_pages(mz, l);
                if (priority || noswap) {
                        scan >>= priority;
                        if (!scan && force_scan)
@@ -1960,7 +1975,7 @@ out:
  * back to the allocator and call try_to_compact_zone(), we ensure that
  * there are enough free pages for it to be likely successful
  */
-static inline bool should_continue_reclaim(struct zone *zone,
+static inline bool should_continue_reclaim(struct mem_cgroup_zone *mz,
                                        unsigned long nr_reclaimed,
                                        unsigned long nr_scanned,
                                        struct scan_control *sc)
@@ -2000,15 +2015,15 @@ static inline bool should_continue_reclaim(struct zone *zone,
         * inactive lists are large enough, continue reclaiming
         */
        pages_for_compaction = (2UL << sc->order);
-       inactive_lru_pages = zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
+       inactive_lru_pages = zone_nr_lru_pages(mz, LRU_INACTIVE_FILE);
        if (nr_swap_pages > 0)
-               inactive_lru_pages += zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
+               inactive_lru_pages += zone_nr_lru_pages(mz, LRU_INACTIVE_ANON);
        if (sc->nr_reclaimed < pages_for_compaction &&
                        inactive_lru_pages > pages_for_compaction)
                return true;
 
        /* If compaction would go ahead or the allocation would succeed, stop */
-       switch (compaction_suitable(zone, sc->order)) {
+       switch (compaction_suitable(mz->zone, sc->order)) {
        case COMPACT_PARTIAL:
        case COMPACT_CONTINUE:
                return false;
@@ -2020,8 +2035,8 @@ static inline bool should_continue_reclaim(struct zone *zone,
 /*
  * This is a basic per-zone page freer.  Used by both kswapd and direct reclaim.
  */
-static void shrink_zone(int priority, struct zone *zone,
-                               struct scan_control *sc)
+static void shrink_mem_cgroup_zone(int priority, struct mem_cgroup_zone *mz,
+                                  struct scan_control *sc)
 {
        unsigned long nr[NR_LRU_LISTS];
        unsigned long nr_to_scan;
@@ -2033,7 +2048,7 @@ static void shrink_zone(int priority, struct zone *zone,
 restart:
        nr_reclaimed = 0;
        nr_scanned = sc->nr_scanned;
-       get_scan_count(zone, sc, nr, priority);
+       get_scan_count(mz, sc, nr, priority);
 
        blk_start_plug(&plug);
        while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
@@ -2045,7 +2060,7 @@ restart:
                                nr[l] -= nr_to_scan;
 
                                nr_reclaimed += shrink_list(l, nr_to_scan,
-                                                           zone, sc, priority);
+                                                           mz, sc, priority);
                        }
                }
                /*
@@ -2066,17 +2081,53 @@ restart:
         * Even if we did not try to evict anon pages at all, we want to
         * rebalance the anon lru active/inactive ratio.
         */
-       if (inactive_anon_is_low(zone, sc))
-               shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0);
+       if (inactive_anon_is_low(mz))
+               shrink_active_list(SWAP_CLUSTER_MAX, mz, sc, priority, 0);
 
        /* reclaim/compaction might need reclaim to continue */
-       if (should_continue_reclaim(zone, nr_reclaimed,
+       if (should_continue_reclaim(mz, nr_reclaimed,
                                        sc->nr_scanned - nr_scanned, sc))
                goto restart;
 
        throttle_vm_writeout(sc->gfp_mask);
 }
 
+static void shrink_zone(int priority, struct zone *zone,
+                       struct scan_control *sc)
+{
+       struct mem_cgroup *root = sc->target_mem_cgroup;
+       struct mem_cgroup_reclaim_cookie reclaim = {
+               .zone = zone,
+               .priority = priority,
+       };
+       struct mem_cgroup *memcg;
+
+       memcg = mem_cgroup_iter(root, NULL, &reclaim);
+       do {
+               struct mem_cgroup_zone mz = {
+                       .mem_cgroup = memcg,
+                       .zone = zone,
+               };
+
+               shrink_mem_cgroup_zone(priority, &mz, sc);
+               /*
+                * Limit reclaim has historically picked one memcg and
+                * scanned it with decreasing priority levels until
+                * nr_to_reclaim had been reclaimed.  This priority
+                * cycle is thus over after a single memcg.
+                *
+                * Direct reclaim and kswapd, on the other hand, have
+                * to scan all memory cgroups to fulfill the overall
+                * scan target for the zone.
+                */
+               if (!global_reclaim(sc)) {
+                       mem_cgroup_iter_break(root, memcg);
+                       break;
+               }
+               memcg = mem_cgroup_iter(root, memcg, &reclaim);
+       } while (memcg);
+}
+
 /*
  * This is the direct reclaim path, for page-allocating processes.  We only
  * try to reclaim pages from zones which will satisfy the caller's allocation
@@ -2114,7 +2165,7 @@ static bool shrink_zones(int priority, struct zonelist *zonelist,
                 * Take care memory controller reclaiming has small influence
                 * to global LRU.
                 */
-               if (scanning_global_lru(sc)) {
+               if (global_reclaim(sc)) {
                        if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
                                continue;
                        if (zone->all_unreclaimable && priority != DEF_PRIORITY)
@@ -2212,13 +2263,13 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
        get_mems_allowed();
        delayacct_freepages_start();
 
-       if (scanning_global_lru(sc))
+       if (global_reclaim(sc))
                count_vm_event(ALLOCSTALL);
 
        for (priority = DEF_PRIORITY; priority >= 0; priority--) {
                sc->nr_scanned = 0;
                if (!priority)
-                       disable_swap_token(sc->mem_cgroup);
+                       disable_swap_token(sc->target_mem_cgroup);
                if (shrink_zones(priority, zonelist, sc))
                        break;
 
@@ -2226,7 +2277,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
                 * Don't shrink slabs when reclaiming memory from
                 * over limit cgroups
                 */
-               if (scanning_global_lru(sc)) {
+               if (global_reclaim(sc)) {
                        unsigned long lru_pages = 0;
                        for_each_zone_zonelist(zone, z, zonelist,
                                        gfp_zone(sc->gfp_mask)) {
@@ -2288,7 +2339,7 @@ out:
                return 0;
 
        /* top priority shrink_zones still had more to do? don't OOM, then */
-       if (scanning_global_lru(sc) && !all_unreclaimable(zonelist, sc))
+       if (global_reclaim(sc) && !all_unreclaimable(zonelist, sc))
                return 1;
 
        return 0;
@@ -2305,7 +2356,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
                .may_unmap = 1,
                .may_swap = 1,
                .order = order,
-               .mem_cgroup = NULL,
+               .target_mem_cgroup = NULL,
                .nodemask = nodemask,
        };
        struct shrink_control shrink = {
@@ -2337,7 +2388,11 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
                .may_unmap = 1,
                .may_swap = !noswap,
                .order = 0,
+               .target_mem_cgroup = mem,
+       };
+       struct mem_cgroup_zone mz = {
                .mem_cgroup = mem,
+               .zone = zone,
        };
 
        sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
@@ -2354,7 +2409,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
         * will pick up pages from other mem cgroup's as well. We hack
         * the priority and make it zero.
         */
-       shrink_zone(0, zone, &sc);
+       shrink_mem_cgroup_zone(0, &mz, &sc);
 
        trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
 
@@ -2375,7 +2430,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
                .may_swap = !noswap,
                .nr_to_reclaim = SWAP_CLUSTER_MAX,
                .order = 0,
-               .mem_cgroup = mem_cont,
+               .target_mem_cgroup = mem_cont,
                .nodemask = NULL, /* we don't care the placement */
                .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
                                (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
@@ -2405,6 +2460,29 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
 }
 #endif
 
+static void age_active_anon(struct zone *zone, struct scan_control *sc,
+                           int priority)
+{
+       struct mem_cgroup *memcg;
+
+       if (!total_swap_pages)
+               return;
+
+       memcg = mem_cgroup_iter(NULL, NULL, NULL);
+       do {
+               struct mem_cgroup_zone mz = {
+                       .mem_cgroup = memcg,
+                       .zone = zone,
+               };
+
+               if (inactive_anon_is_low(&mz))
+                       shrink_active_list(SWAP_CLUSTER_MAX, &mz,
+                                          sc, priority, 0);
+
+               memcg = mem_cgroup_iter(NULL, memcg, NULL);
+       } while (memcg);
+}
+
 /*
  * pgdat_balanced is used when checking if a node is balanced for high-order
  * allocations. Only zones that meet watermarks and are in a zone allowed
@@ -2525,7 +2603,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
                 */
                .nr_to_reclaim = ULONG_MAX,
                .order = order,
-               .mem_cgroup = NULL,
+               .target_mem_cgroup = NULL,
        };
        struct shrink_control shrink = {
                .gfp_mask = sc.gfp_mask,
@@ -2564,9 +2642,7 @@ loop_again:
                         * Do some background aging of the anon list, to give
                         * pages a chance to be referenced before reclaiming.
                         */
-                       if (inactive_anon_is_low(zone, &sc))
-                               shrink_active_list(SWAP_CLUSTER_MAX, zone,
-                                                       &sc, priority, 0);
+                       age_active_anon(zone, &sc, priority);
 
                        if (!zone_watermark_ok_safe(zone, order,
                                        high_wmark_pages(zone), 0, 0)) {
@@ -3355,16 +3431,18 @@ int page_evictable(struct page *page, struct vm_area_struct *vma)
  */
 static void check_move_unevictable_page(struct page *page, struct zone *zone)
 {
-       VM_BUG_ON(PageActive(page));
+       struct lruvec *lruvec;
 
+       VM_BUG_ON(PageActive(page));
 retry:
        ClearPageUnevictable(page);
        if (page_evictable(page, NULL)) {
                enum lru_list l = page_lru_base_type(page);
 
                __dec_zone_state(zone, NR_UNEVICTABLE);
-               list_move(&page->lru, &zone->lru[l].list);
-               mem_cgroup_move_lists(page, LRU_UNEVICTABLE, l);
+               lruvec = mem_cgroup_lru_move_lists(zone, page,
+                                                  LRU_UNEVICTABLE, l);
+               list_move(&page->lru, &lruvec->lists[l]);
                __inc_zone_state(zone, NR_INACTIVE_ANON + l);
                __count_vm_event(UNEVICTABLE_PGRESCUED);
        } else {
@@ -3372,8 +3450,9 @@ retry:
                 * rotate unevictable list
                 */
                SetPageUnevictable(page);
-               list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list);
-               mem_cgroup_rotate_lru_list(page, LRU_UNEVICTABLE);
+               lruvec = mem_cgroup_lru_move_lists(zone, page, LRU_UNEVICTABLE,
+                                                  LRU_UNEVICTABLE);
+               list_move(&page->lru, &lruvec->lists[LRU_UNEVICTABLE]);
                if (page_evictable(page, NULL))
                        goto retry;
        }