projects
/
platform
/
adaptation
/
renesas_rcar
/
renesas_kernel.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Revert "percpu: free percpu allocation info for uniprocessor system"
[platform/adaptation/renesas_rcar/renesas_kernel.git]
/
mm
/
vmscan.c
diff --git
a/mm/vmscan.c
b/mm/vmscan.c
index
6ef876c
..
0c0b36e
100644
(file)
--- a/
mm/vmscan.c
+++ b/
mm/vmscan.c
@@
-224,15
+224,15
@@
shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker,
unsigned long freed = 0;
unsigned long long delta;
long total_scan;
unsigned long freed = 0;
unsigned long long delta;
long total_scan;
- long
max_pass
;
+ long
freeable
;
long nr;
long new_nr;
int nid = shrinkctl->nid;
long batch_size = shrinker->batch ? shrinker->batch
: SHRINK_BATCH;
long nr;
long new_nr;
int nid = shrinkctl->nid;
long batch_size = shrinker->batch ? shrinker->batch
: SHRINK_BATCH;
-
max_pass
= shrinker->count_objects(shrinker, shrinkctl);
- if (
max_pass
== 0)
+
freeable
= shrinker->count_objects(shrinker, shrinkctl);
+ if (
freeable
== 0)
return 0;
/*
return 0;
/*
@@
-244,14
+244,14
@@
shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker,
total_scan = nr;
delta = (4 * nr_pages_scanned) / shrinker->seeks;
total_scan = nr;
delta = (4 * nr_pages_scanned) / shrinker->seeks;
- delta *=
max_pass
;
+ delta *=
freeable
;
do_div(delta, lru_pages + 1);
total_scan += delta;
if (total_scan < 0) {
printk(KERN_ERR
"shrink_slab: %pF negative objects to delete nr=%ld\n",
shrinker->scan_objects, total_scan);
do_div(delta, lru_pages + 1);
total_scan += delta;
if (total_scan < 0) {
printk(KERN_ERR
"shrink_slab: %pF negative objects to delete nr=%ld\n",
shrinker->scan_objects, total_scan);
- total_scan =
max_pass
;
+ total_scan =
freeable
;
}
/*
}
/*
@@
-260,26
+260,26
@@
shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker,
* shrinkers to return -1 all the time. This results in a large
* nr being built up so when a shrink that can do some work
* comes along it empties the entire cache due to nr >>>
* shrinkers to return -1 all the time. This results in a large
* nr being built up so when a shrink that can do some work
* comes along it empties the entire cache due to nr >>>
- *
max_pass.
This is bad for sustaining a working set in
+ *
freeable.
This is bad for sustaining a working set in
* memory.
*
* Hence only allow the shrinker to scan the entire cache when
* a large delta change is calculated directly.
*/
* memory.
*
* Hence only allow the shrinker to scan the entire cache when
* a large delta change is calculated directly.
*/
- if (delta <
max_pass
/ 4)
- total_scan = min(total_scan,
max_pass
/ 2);
+ if (delta <
freeable
/ 4)
+ total_scan = min(total_scan,
freeable
/ 2);
/*
* Avoid risking looping forever due to too large nr value:
* never try to free more than twice the estimate number of
* freeable entries.
*/
/*
* Avoid risking looping forever due to too large nr value:
* never try to free more than twice the estimate number of
* freeable entries.
*/
- if (total_scan >
max_pass
* 2)
- total_scan =
max_pass
* 2;
+ if (total_scan >
freeable
* 2)
+ total_scan =
freeable
* 2;
trace_mm_shrink_slab_start(shrinker, shrinkctl, nr,
nr_pages_scanned, lru_pages,
trace_mm_shrink_slab_start(shrinker, shrinkctl, nr,
nr_pages_scanned, lru_pages,
-
max_pass
, delta, total_scan);
+
freeable
, delta, total_scan);
/*
* Normally, we should not scan less than batch_size objects in one
/*
* Normally, we should not scan less than batch_size objects in one
@@
-292,12
+292,12
@@
shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker,
*
* We detect the "tight on memory" situations by looking at the total
* number of objects we want to scan (total_scan). If it is greater
*
* We detect the "tight on memory" situations by looking at the total
* number of objects we want to scan (total_scan). If it is greater
- * than the total number of objects on slab (
max_pass
), we must be
+ * than the total number of objects on slab (
freeable
), we must be
* scanning at high prio and therefore should try to reclaim as much as
* possible.
*/
while (total_scan >= batch_size ||
* scanning at high prio and therefore should try to reclaim as much as
* possible.
*/
while (total_scan >= batch_size ||
- total_scan >=
max_pass
) {
+ total_scan >=
freeable
) {
unsigned long ret;
unsigned long nr_to_scan = min(batch_size, total_scan);
unsigned long ret;
unsigned long nr_to_scan = min(batch_size, total_scan);
@@
-1144,7
+1144,7
@@
unsigned long reclaim_clean_pages_from_list(struct zone *zone,
TTU_UNMAP|TTU_IGNORE_ACCESS,
&dummy1, &dummy2, &dummy3, &dummy4, &dummy5, true);
list_splice(&clean_pages, page_list);
TTU_UNMAP|TTU_IGNORE_ACCESS,
&dummy1, &dummy2, &dummy3, &dummy4, &dummy5, true);
list_splice(&clean_pages, page_list);
-
__
mod_zone_page_state(zone, NR_ISOLATED_FILE, -ret);
+ mod_zone_page_state(zone, NR_ISOLATED_FILE, -ret);
return ret;
}
return ret;
}
@@
-1540,19
+1540,18
@@
shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
* If dirty pages are scanned that are not queued for IO, it
* implies that flushers are not keeping up. In this case, flag
* the zone ZONE_TAIL_LRU_DIRTY and kswapd will start writing
* If dirty pages are scanned that are not queued for IO, it
* implies that flushers are not keeping up. In this case, flag
* the zone ZONE_TAIL_LRU_DIRTY and kswapd will start writing
- * pages from reclaim context. It will forcibly stall in the
- * next check.
+ * pages from reclaim context.
*/
if (nr_unqueued_dirty == nr_taken)
zone_set_flag(zone, ZONE_TAIL_LRU_DIRTY);
/*
*/
if (nr_unqueued_dirty == nr_taken)
zone_set_flag(zone, ZONE_TAIL_LRU_DIRTY);
/*
- * I
n addition, if kswapd scans pages marked marked for
- *
immediate reclaim and under writeback (nr_immediate), it
- *
implies
that pages are cycling through the LRU faster than
+ * I
f kswapd scans pages marked marked for immediate
+ *
reclaim and under writeback (nr_immediate), it implies
+ * that pages are cycling through the LRU faster than
* they are written so also forcibly stall.
*/
* they are written so also forcibly stall.
*/
- if (nr_
unqueued_dirty == nr_taken || nr_
immediate)
+ if (nr_immediate)
congestion_wait(BLK_RW_ASYNC, HZ/10);
}
congestion_wait(BLK_RW_ASYNC, HZ/10);
}
@@
-2425,8
+2424,8
@@
static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
unsigned long lru_pages = 0;
nodes_clear(shrink->nodes_to_scan);
unsigned long lru_pages = 0;
nodes_clear(shrink->nodes_to_scan);
- for_each_zone_zonelist(zone, z, zonelist,
- gfp_zone(sc->gfp_mask)) {
+ for_each_zone_zonelist
_nodemask
(zone, z, zonelist,
+ gfp_zone(sc->gfp_mask)
, sc->nodemask
) {
if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
continue;
if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
continue;