* the caller that it should consider retrying the allocation instead of
* further reclaim.
*/
-static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc,
- struct shrink_control *shrink)
+static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
{
struct zoneref *z;
struct zone *zone;
unsigned long lru_pages = 0;
bool aborted_reclaim = false;
struct reclaim_state *reclaim_state = current->reclaim_state;
+ struct shrink_control shrink = {
+ .gfp_mask = sc->gfp_mask,
+ };
/*
* If the number of buffer_heads in the machine exceeds the maximum
if (buffer_heads_over_limit)
sc->gfp_mask |= __GFP_HIGHMEM;
- nodes_clear(shrink->nodes_to_scan);
+ nodes_clear(shrink.nodes_to_scan);
for_each_zone_zonelist_nodemask(zone, z, zonelist,
gfp_zone(sc->gfp_mask), sc->nodemask) {
continue;
lru_pages += zone_reclaimable_pages(zone);
- node_set(zone_to_nid(zone), shrink->nodes_to_scan);
+ node_set(zone_to_nid(zone), shrink.nodes_to_scan);
if (sc->priority != DEF_PRIORITY &&
!zone_reclaimable(zone))
* pages.
*/
if (global_reclaim(sc)) {
- shrink_slab(shrink, sc->nr_scanned, lru_pages);
+ shrink_slab(&shrink, sc->nr_scanned, lru_pages);
if (reclaim_state) {
sc->nr_reclaimed += reclaim_state->reclaimed_slab;
reclaim_state->reclaimed_slab = 0;
* else, the number of pages reclaimed
*/
static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
- struct scan_control *sc,
- struct shrink_control *shrink)
+ struct scan_control *sc)
{
unsigned long total_scanned = 0;
unsigned long writeback_threshold;
vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup,
sc->priority);
sc->nr_scanned = 0;
- aborted_reclaim = shrink_zones(zonelist, sc, shrink);
+ aborted_reclaim = shrink_zones(zonelist, sc);
total_scanned += sc->nr_scanned;
if (sc->nr_reclaimed >= sc->nr_to_reclaim)
.target_mem_cgroup = NULL,
.nodemask = nodemask,
};
- struct shrink_control shrink = {
- .gfp_mask = sc.gfp_mask,
- };
/*
* Do not enter reclaim if fatal signal was delivered while throttled.
sc.may_writepage,
gfp_mask);
- nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
+ nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
};
- struct shrink_control shrink = {
- .gfp_mask = sc.gfp_mask,
- };
/*
* Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't
sc.may_writepage,
sc.gfp_mask);
- nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
+ nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
.order = 0,
.priority = DEF_PRIORITY,
};
- struct shrink_control shrink = {
- .gfp_mask = sc.gfp_mask,
- };
struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
struct task_struct *p = current;
unsigned long nr_reclaimed;
reclaim_state.reclaimed_slab = 0;
p->reclaim_state = &reclaim_state;
- nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
+ nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
p->reclaim_state = NULL;
lockdep_clear_current_reclaim_state();