From: Rik van Riel Date: Sun, 19 Oct 2008 03:26:30 +0000 (-0700) Subject: define page_file_cache() function X-Git-Tag: v3.12-rc1~17750 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=b2e185384f534781fd22f5ce170b2ad26f97df70;p=kernel%2Fkernel-generic.git define page_file_cache() function Define page_file_cache() function to answer the question: is page backed by a file? Originally part of Rik van Riel's split-lru patch. Extracted to make available for other, independent reclaim patches. Moved inline function to linux/mm_inline.h where it will be needed by subsequent "split LRU" and "noreclaim" patches. Unfortunately this needs to use a page flag, since the PG_swapbacked state needs to be preserved all the way to the point where the page is last removed from the LRU. Trying to derive the status from other info in the page resulted in wrong VM statistics in earlier split VM patchsets. The total number of page flags in use on a 32 bit machine after this patch is 19. [akpm@linux-foundation.org: fix up out-of-order merge fallout] [hugh@veritas.com: splitlru: shmem_getpage SetPageSwapBacked sooner[ Signed-off-by: Rik van Riel Signed-off-by: Lee Schermerhorn Signed-off-by: MinChan Kim Signed-off-by: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 2704729..96e9704 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -1,3 +1,28 @@ +#ifndef LINUX_MM_INLINE_H +#define LINUX_MM_INLINE_H + +/** + * page_is_file_cache - should the page be on a file LRU or anon LRU? + * @page: the page to test + * + * Returns !0 if @page is page cache page backed by a regular filesystem, + * or 0 if @page is anonymous, tmpfs or otherwise ram or swap backed. + * Used by functions that manipulate the LRU lists, to sort a page + * onto the right LRU list. + * + * We would like to get this info without a page flag, but the state + * needs to survive until the page is last deleted from the LRU, which + * could be as far down as __page_cache_release. + */ +static inline int page_is_file_cache(struct page *page) +{ + if (PageSwapBacked(page)) + return 0; + + /* The page is page cache backed by a normal filesystem. */ + return 1; +} + static inline void add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l) { @@ -65,3 +90,5 @@ static inline enum lru_list page_lru(struct page *page) return lru; } + +#endif diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index c74d3e8..57b688c 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -93,6 +93,7 @@ enum pageflags { PG_mappedtodisk, /* Has blocks allocated on-disk */ PG_reclaim, /* To be reclaimed asap */ PG_buddy, /* Page is free, on buddy lists */ + PG_swapbacked, /* Page is backed by RAM/swap */ #ifdef CONFIG_IA64_UNCACHED_ALLOCATOR PG_uncached, /* Page has been mapped as uncached */ #endif @@ -176,6 +177,7 @@ PAGEFLAG(SavePinned, savepinned); /* Xen */ PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved) PAGEFLAG(Private, private) __CLEARPAGEFLAG(Private, private) __SETPAGEFLAG(Private, private) +PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked) __PAGEFLAG(SlobPage, slob_page) __PAGEFLAG(SlobFree, slob_free) @@ -334,7 +336,8 @@ static inline void __ClearPageTail(struct page *page) * Flags checked in bad_page(). Pages on the free list should not have * these flags set. It they are, there is a problem. */ -#define PAGE_FLAGS_CLEAR_WHEN_BAD (PAGE_FLAGS | 1 << PG_reclaim | 1 << PG_dirty) +#define PAGE_FLAGS_CLEAR_WHEN_BAD (PAGE_FLAGS | \ + 1 << PG_reclaim | 1 << PG_dirty | 1 << PG_swapbacked) /* * Flags checked when a page is freed. Pages being freed should not have @@ -347,7 +350,8 @@ static inline void __ClearPageTail(struct page *page) * Pages being prepped should not have these flags set. It they are, there * is a problem. */ -#define PAGE_FLAGS_CHECK_AT_PREP (PAGE_FLAGS | 1 << PG_reserved | 1 << PG_dirty) +#define PAGE_FLAGS_CHECK_AT_PREP (PAGE_FLAGS | \ + 1 << PG_reserved | 1 << PG_dirty | 1 << PG_swapbacked) #endif /* !__GENERATING_BOUNDS_H */ #endif /* PAGE_FLAGS_H */ diff --git a/mm/memory.c b/mm/memory.c index 1002f47..7512933 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1888,6 +1888,7 @@ gotten: ptep_clear_flush_notify(vma, address, page_table); set_pte_at(mm, address, page_table, entry); update_mmu_cache(vma, address, entry); + SetPageSwapBacked(new_page); lru_cache_add_active(new_page); page_add_new_anon_rmap(new_page, vma, address); @@ -2382,6 +2383,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, if (!pte_none(*page_table)) goto release; inc_mm_counter(mm, anon_rss); + SetPageSwapBacked(page); lru_cache_add_active(page); page_add_new_anon_rmap(page, vma, address); set_pte_at(mm, address, page_table, entry); @@ -2523,6 +2525,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, set_pte_at(mm, address, page_table, entry); if (anon) { inc_mm_counter(mm, anon_rss); + SetPageSwapBacked(page); lru_cache_add_active(page); page_add_new_anon_rmap(page, vma, address); } else { diff --git a/mm/migrate.c b/mm/migrate.c index ad15b5e..c073274 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -572,6 +572,8 @@ static int move_to_new_page(struct page *newpage, struct page *page) /* Prepare mapping for the new page.*/ newpage->index = page->index; newpage->mapping = page->mapping; + if (PageSwapBacked(page)) + SetPageSwapBacked(newpage); mapping = page_mapping(page); if (!mapping) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ee7a96e..2099904 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -462,6 +462,8 @@ static inline int free_pages_check(struct page *page) bad_page(page); if (PageDirty(page)) __ClearPageDirty(page); + if (PageSwapBacked(page)) + __ClearPageSwapBacked(page); /* * For now, we report if PG_reserved was found set, but do not * clear it, and do not free the page. But we shall soon need diff --git a/mm/shmem.c b/mm/shmem.c index d87958a..fd421ed 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1367,6 +1367,7 @@ repeat: error = -ENOMEM; goto failed; } + SetPageSwapBacked(filepage); /* Precharge page while we can wait, compensate after */ error = mem_cgroup_cache_charge(filepage, current->mm, diff --git a/mm/swap_state.c b/mm/swap_state.c index 797c383..7a3ece0 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -75,6 +75,7 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) BUG_ON(!PageLocked(page)); BUG_ON(PageSwapCache(page)); BUG_ON(PagePrivate(page)); + BUG_ON(!PageSwapBacked(page)); error = radix_tree_preload(gfp_mask); if (!error) { page_cache_get(page); @@ -303,6 +304,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, * May fail (-ENOMEM) if radix-tree node allocation failed. */ set_page_locked(new_page); + SetPageSwapBacked(new_page); err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL); if (likely(!err)) { /* @@ -312,6 +314,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, swap_readpage(NULL, new_page); return new_page; } + ClearPageSwapBacked(new_page); clear_page_locked(new_page); swap_free(entry); } while (err != -ENOMEM);