mm: fix calculation of dirtyable memory
authorSonny Rao <sonnyrao@chromium.org>
Thu, 20 Dec 2012 23:05:07 +0000 (15:05 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 21 Dec 2012 01:40:18 +0000 (17:40 -0800)
The system uses global_dirtyable_memory() to calculate number of
dirtyable pages/pages that can be allocated to the page cache.  A bug
causes an underflow thus making the page count look like a big unsigned
number.  This in turn confuses the dirty writeback throttling to
aggressively write back pages as they become dirty (usually 1 page at a
time).  This generally only affects systems with highmem because the
underflowed count gets subtracted from the global count of dirtyable
memory.

The problem was introduced with v3.2-4896-gab8fabd

Fix is to ensure we don't get an underflowed total of either highmem or
global dirtyable memory.

Signed-off-by: Sonny Rao <sonnyrao@chromium.org>
Signed-off-by: Puneet Kumar <puneetster@chromium.org>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Tested-by: Damien Wyart <damien.wyart@free.fr>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/page-writeback.c

index 6f42712..0713bfb 100644 (file)
@@ -201,6 +201,18 @@ static unsigned long highmem_dirtyable_memory(unsigned long total)
                     zone_reclaimable_pages(z) - z->dirty_balance_reserve;
        }
        /*
+        * Unreclaimable memory (kernel memory or anonymous memory
+        * without swap) can bring down the dirtyable pages below
+        * the zone's dirty balance reserve and the above calculation
+        * will underflow.  However we still want to add in nodes
+        * which are below threshold (negative values) to get a more
+        * accurate calculation but make sure that the total never
+        * underflows.
+        */
+       if ((long)x < 0)
+               x = 0;
+
+       /*
         * Make sure that the number of highmem pages is never larger
         * than the number of the total dirtyable memory. This can only
         * occur in very strange VM situations but we want to make sure
@@ -222,8 +234,8 @@ static unsigned long global_dirtyable_memory(void)
 {
        unsigned long x;
 
-       x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages() -
-           dirty_balance_reserve;
+       x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();
+       x -= min(x, dirty_balance_reserve);
 
        if (!vm_highmem_is_dirtyable)
                x -= highmem_dirtyable_memory(x);
@@ -290,9 +302,12 @@ static unsigned long zone_dirtyable_memory(struct zone *zone)
         * highmem zone can hold its share of dirty pages, so we don't
         * care about vm_highmem_is_dirtyable here.
         */
-       return zone_page_state(zone, NR_FREE_PAGES) +
-              zone_reclaimable_pages(zone) -
-              zone->dirty_balance_reserve;
+       unsigned long nr_pages = zone_page_state(zone, NR_FREE_PAGES) +
+               zone_reclaimable_pages(zone);
+
+       /* don't allow this to underflow */
+       nr_pages -= min(nr_pages, zone->dirty_balance_reserve);
+       return nr_pages;
 }
 
 /**