X-Git-Url: http://review.tizen.org/git/?a=blobdiff_plain;f=mm%2Fvmscan.c;h=ba4e87df3fc6b0ce9739fe9700f2efc570776e57;hb=dfc8d636cdb95f7b792d5ba8c9f3b295809c125d;hp=c26986c85ce01b1d15a8914065a829890054c210;hpb=0b6c135ea9d563f77fe121deff6f19af222b3b0e;p=platform%2Fadaptation%2Frenesas_rcar%2Frenesas_kernel.git diff --git a/mm/vmscan.c b/mm/vmscan.c index c26986c..ba4e87d 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -579,6 +579,40 @@ redo: put_page(page); /* drop ref from isolate */ } +enum page_references { + PAGEREF_RECLAIM, + PAGEREF_RECLAIM_CLEAN, + PAGEREF_ACTIVATE, +}; + +static enum page_references page_check_references(struct page *page, + struct scan_control *sc) +{ + unsigned long vm_flags; + int referenced; + + referenced = page_referenced(page, 1, sc->mem_cgroup, &vm_flags); + if (!referenced) + return PAGEREF_RECLAIM; + + /* Lumpy reclaim - ignore references */ + if (sc->order > PAGE_ALLOC_COSTLY_ORDER) + return PAGEREF_RECLAIM; + + /* + * Mlock lost the isolation race with us. Let try_to_unmap() + * move the page to the unevictable list. + */ + if (vm_flags & VM_LOCKED) + return PAGEREF_RECLAIM; + + if (page_mapping_inuse(page)) + return PAGEREF_ACTIVATE; + + /* Reclaim if clean, defer dirty pages to writeback */ + return PAGEREF_RECLAIM_CLEAN; +} + /* * shrink_page_list() returns the number of reclaimed pages */ @@ -590,16 +624,15 @@ static unsigned long shrink_page_list(struct list_head *page_list, struct pagevec freed_pvec; int pgactivate = 0; unsigned long nr_reclaimed = 0; - unsigned long vm_flags; cond_resched(); pagevec_init(&freed_pvec, 1); while (!list_empty(page_list)) { + enum page_references references; struct address_space *mapping; struct page *page; int may_enter_fs; - int referenced; cond_resched(); @@ -641,17 +674,14 @@ static unsigned long shrink_page_list(struct list_head *page_list, goto keep_locked; } - referenced = page_referenced(page, 1, - sc->mem_cgroup, &vm_flags); - /* - * In active use or really unfreeable? Activate it. - * If page which have PG_mlocked lost isoltation race, - * try_to_unmap moves it to unevictable list - */ - if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && - referenced && page_mapping_inuse(page) - && !(vm_flags & VM_LOCKED)) + references = page_check_references(page, sc); + switch (references) { + case PAGEREF_ACTIVATE: goto activate_locked; + case PAGEREF_RECLAIM: + case PAGEREF_RECLAIM_CLEAN: + ; /* try to reclaim the page below */ + } /* * Anonymous process memory has backing store? @@ -685,7 +715,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, } if (PageDirty(page)) { - if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && referenced) + if (references == PAGEREF_RECLAIM_CLEAN) goto keep_locked; if (!may_enter_fs) goto keep_locked; @@ -1501,6 +1531,13 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc, unsigned long ap, fp; struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); + /* If we have no swap space, do not bother scanning anon pages. */ + if (!sc->may_swap || (nr_swap_pages <= 0)) { + percent[0] = 0; + percent[1] = 100; + return; + } + anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) + zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON); file = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) + @@ -1598,22 +1635,20 @@ static void shrink_zone(int priority, struct zone *zone, unsigned long nr_reclaimed = sc->nr_reclaimed; unsigned long nr_to_reclaim = sc->nr_to_reclaim; struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); - int noswap = 0; - /* If we have no swap space, do not bother scanning anon pages. */ - if (!sc->may_swap || (nr_swap_pages <= 0)) { - noswap = 1; - percent[0] = 0; - percent[1] = 100; - } else - get_scan_ratio(zone, sc, percent); + get_scan_ratio(zone, sc, percent); for_each_evictable_lru(l) { int file = is_file_lru(l); unsigned long scan; + if (percent[file] == 0) { + nr[l] = 0; + continue; + } + scan = zone_nr_lru_pages(zone, sc, l); - if (priority || noswap) { + if (priority) { scan >>= priority; scan = (scan * percent[file]) / 100; } @@ -1694,8 +1729,7 @@ static void shrink_zones(int priority, struct zonelist *zonelist, continue; note_zone_scanning_priority(zone, priority); - if (zone_is_all_unreclaimable(zone) && - priority != DEF_PRIORITY) + if (zone->all_unreclaimable && priority != DEF_PRIORITY) continue; /* Let kswapd poll it */ sc->all_unreclaimable = 0; } else { @@ -1922,7 +1956,7 @@ static int sleeping_prematurely(pg_data_t *pgdat, int order, long remaining) if (!populated_zone(zone)) continue; - if (zone_is_all_unreclaimable(zone)) + if (zone->all_unreclaimable) continue; if (!zone_watermark_ok(zone, order, high_wmark_pages(zone), @@ -2012,8 +2046,7 @@ loop_again: if (!populated_zone(zone)) continue; - if (zone_is_all_unreclaimable(zone) && - priority != DEF_PRIORITY) + if (zone->all_unreclaimable && priority != DEF_PRIORITY) continue; /* @@ -2056,13 +2089,9 @@ loop_again: if (!populated_zone(zone)) continue; - if (zone_is_all_unreclaimable(zone) && - priority != DEF_PRIORITY) + if (zone->all_unreclaimable && priority != DEF_PRIORITY) continue; - if (!zone_watermark_ok(zone, order, - high_wmark_pages(zone), end_zone, 0)) - all_zones_ok = 0; temp_priority[i] = priority; sc.nr_scanned = 0; note_zone_scanning_priority(zone, priority); @@ -2087,12 +2116,11 @@ loop_again: lru_pages); sc.nr_reclaimed += reclaim_state->reclaimed_slab; total_scanned += sc.nr_scanned; - if (zone_is_all_unreclaimable(zone)) + if (zone->all_unreclaimable) continue; - if (nr_slab == 0 && zone->pages_scanned >= - (zone_reclaimable_pages(zone) * 6)) - zone_set_flag(zone, - ZONE_ALL_UNRECLAIMABLE); + if (nr_slab == 0 && + zone->pages_scanned >= (zone_reclaimable_pages(zone) * 6)) + zone->all_unreclaimable = 1; /* * If we've done a decent amount of scanning and * the reclaim ratio is low, start doing writepage @@ -2102,13 +2130,18 @@ loop_again: total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2) sc.may_writepage = 1; - /* - * We are still under min water mark. it mean we have - * GFP_ATOMIC allocation failure risk. Hurry up! - */ - if (!zone_watermark_ok(zone, order, min_wmark_pages(zone), - end_zone, 0)) - has_under_min_watermark_zone = 1; + if (!zone_watermark_ok(zone, order, + high_wmark_pages(zone), end_zone, 0)) { + all_zones_ok = 0; + /* + * We are still under min water mark. This + * means that we have a GFP_ATOMIC allocation + * failure risk. Hurry up! + */ + if (!zone_watermark_ok(zone, order, + min_wmark_pages(zone), end_zone, 0)) + has_under_min_watermark_zone = 1; + } } if (all_zones_ok) @@ -2550,6 +2583,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) * and RECLAIM_SWAP. */ p->flags |= PF_MEMALLOC | PF_SWAPWRITE; + lockdep_set_current_reclaim_state(gfp_mask); reclaim_state.reclaimed_slab = 0; p->reclaim_state = &reclaim_state; @@ -2593,6 +2627,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) p->reclaim_state = NULL; current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE); + lockdep_clear_current_reclaim_state(); return sc.nr_reclaimed >= nr_pages; } @@ -2615,7 +2650,7 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages) return ZONE_RECLAIM_FULL; - if (zone_is_all_unreclaimable(zone)) + if (zone->all_unreclaimable) return ZONE_RECLAIM_FULL; /*