Revert "percpu: free percpu allocation info for uniprocessor system"
[platform/adaptation/renesas_rcar/renesas_kernel.git] / mm / page-writeback.c
index 6380758..9f45f87 100644 (file)
@@ -191,6 +191,26 @@ static unsigned long writeout_period_time = 0;
  * global dirtyable memory first.
  */
 
+/**
+ * zone_dirtyable_memory - number of dirtyable pages in a zone
+ * @zone: the zone
+ *
+ * Returns the zone's number of pages potentially available for dirty
+ * page cache.  This is the base value for the per-zone dirty limits.
+ */
+static unsigned long zone_dirtyable_memory(struct zone *zone)
+{
+       unsigned long nr_pages;
+
+       nr_pages = zone_page_state(zone, NR_FREE_PAGES);
+       nr_pages -= min(nr_pages, zone->dirty_balance_reserve);
+
+       nr_pages += zone_page_state(zone, NR_INACTIVE_FILE);
+       nr_pages += zone_page_state(zone, NR_ACTIVE_FILE);
+
+       return nr_pages;
+}
+
 static unsigned long highmem_dirtyable_memory(unsigned long total)
 {
 #ifdef CONFIG_HIGHMEM
@@ -198,11 +218,9 @@ static unsigned long highmem_dirtyable_memory(unsigned long total)
        unsigned long x = 0;
 
        for_each_node_state(node, N_HIGH_MEMORY) {
-               struct zone *z =
-                       &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
+               struct zone *z = &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
 
-               x += zone_page_state(z, NR_FREE_PAGES) +
-                    zone_reclaimable_pages(z) - z->dirty_balance_reserve;
+               x += zone_dirtyable_memory(z);
        }
        /*
         * Unreclaimable memory (kernel memory or anonymous memory
@@ -238,9 +256,12 @@ static unsigned long global_dirtyable_memory(void)
 {
        unsigned long x;
 
-       x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();
+       x = global_page_state(NR_FREE_PAGES);
        x -= min(x, dirty_balance_reserve);
 
+       x += global_page_state(NR_INACTIVE_FILE);
+       x += global_page_state(NR_ACTIVE_FILE);
+
        if (!vm_highmem_is_dirtyable)
                x -= highmem_dirtyable_memory(x);
 
@@ -289,32 +310,6 @@ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
 }
 
 /**
- * zone_dirtyable_memory - number of dirtyable pages in a zone
- * @zone: the zone
- *
- * Returns the zone's number of pages potentially available for dirty
- * page cache.  This is the base value for the per-zone dirty limits.
- */
-static unsigned long zone_dirtyable_memory(struct zone *zone)
-{
-       /*
-        * The effective global number of dirtyable pages may exclude
-        * highmem as a big-picture measure to keep the ratio between
-        * dirty memory and lowmem reasonable.
-        *
-        * But this function is purely about the individual zone and a
-        * highmem zone can hold its share of dirty pages, so we don't
-        * care about vm_highmem_is_dirtyable here.
-        */
-       unsigned long nr_pages = zone_page_state(zone, NR_FREE_PAGES) +
-               zone_reclaimable_pages(zone);
-
-       /* don't allow this to underflow */
-       nr_pages -= min(nr_pages, zone->dirty_balance_reserve);
-       return nr_pages;
-}
-
-/**
  * zone_dirty_limit - maximum number of dirty pages allowed in a zone
  * @zone: the zone
  *
@@ -598,14 +593,14 @@ unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty)
  * (5) the closer to setpoint, the smaller |df/dx| (and the reverse)
  *     => fast response on large errors; small oscillation near setpoint
  */
-static inline long long pos_ratio_polynom(unsigned long setpoint,
+static long long pos_ratio_polynom(unsigned long setpoint,
                                          unsigned long dirty,
                                          unsigned long limit)
 {
        long long pos_ratio;
        long x;
 
-       x = div_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT,
+       x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT,
                    limit - setpoint + 1);
        pos_ratio = x;
        pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
@@ -847,7 +842,7 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
        x_intercept = bdi_setpoint + span;
 
        if (bdi_dirty < x_intercept - span / 4) {
-               pos_ratio = div_u64(pos_ratio * (x_intercept - bdi_dirty),
+               pos_ratio = div64_u64(pos_ratio * (x_intercept - bdi_dirty),
                                    x_intercept - bdi_setpoint + 1);
        } else
                pos_ratio /= 4;
@@ -1329,9 +1324,9 @@ static inline void bdi_dirty_limits(struct backing_dev_info *bdi,
        *bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
 
        if (bdi_bg_thresh)
-               *bdi_bg_thresh = div_u64((u64)*bdi_thresh *
-                                        background_thresh,
-                                        dirty_thresh);
+               *bdi_bg_thresh = dirty_thresh ? div_u64((u64)*bdi_thresh *
+                                                       background_thresh,
+                                                       dirty_thresh) : 0;
 
        /*
         * In order to avoid the stacked BDI deadlock we need
@@ -2178,11 +2173,12 @@ int __set_page_dirty_nobuffers(struct page *page)
        if (!TestSetPageDirty(page)) {
                struct address_space *mapping = page_mapping(page);
                struct address_space *mapping2;
+               unsigned long flags;
 
                if (!mapping)
                        return 1;
 
-               spin_lock_irq(&mapping->tree_lock);
+               spin_lock_irqsave(&mapping->tree_lock, flags);
                mapping2 = page_mapping(page);
                if (mapping2) { /* Race with truncate? */
                        BUG_ON(mapping2 != mapping);
@@ -2191,7 +2187,7 @@ int __set_page_dirty_nobuffers(struct page *page)
                        radix_tree_tag_set(&mapping->page_tree,
                                page_index(page), PAGECACHE_TAG_DIRTY);
                }
-               spin_unlock_irq(&mapping->tree_lock);
+               spin_unlock_irqrestore(&mapping->tree_lock, flags);
                if (mapping->host) {
                        /* !PageAnon && !swapper_space */
                        __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
@@ -2402,7 +2398,7 @@ int test_clear_page_writeback(struct page *page)
        return ret;
 }
 
-int test_set_page_writeback(struct page *page)
+int __test_set_page_writeback(struct page *page, bool keep_write)
 {
        struct address_space *mapping = page_mapping(page);
        int ret;
@@ -2427,9 +2423,10 @@ int test_set_page_writeback(struct page *page)
                        radix_tree_tag_clear(&mapping->page_tree,
                                                page_index(page),
                                                PAGECACHE_TAG_DIRTY);
-               radix_tree_tag_clear(&mapping->page_tree,
-                                    page_index(page),
-                                    PAGECACHE_TAG_TOWRITE);
+               if (!keep_write)
+                       radix_tree_tag_clear(&mapping->page_tree,
+                                               page_index(page),
+                                               PAGECACHE_TAG_TOWRITE);
                spin_unlock_irqrestore(&mapping->tree_lock, flags);
        } else {
                ret = TestSetPageWriteback(page);
@@ -2440,7 +2437,7 @@ int test_set_page_writeback(struct page *page)
        return ret;
 
 }
-EXPORT_SYMBOL(test_set_page_writeback);
+EXPORT_SYMBOL(__test_set_page_writeback);
 
 /*
  * Return true if any of the pages in the mapping are marked with the