From 0c35bbadc59f5ed105c34471143eceb4c0dd9c95 Mon Sep 17 00:00:00 2001 From: Martin Hicks Date: Tue, 21 Jun 2005 17:14:42 -0700 Subject: [PATCH] [PATCH] VM: add __GFP_NORECLAIM When using the early zone reclaim, it was noticed that allocating new pages that should be spread across the whole system caused eviction of local pages. This adds a new GFP flag to prevent early reclaim from happening during certain allocation attempts. The example that is implemented here is for page cache pages. We want page cache pages to be spread across the whole system, and we don't want page cache pages to evict other pages to get local memory. Signed-off-by: Martin Hicks Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/gfp.h | 3 ++- include/linux/pagemap.h | 4 ++-- mm/page_alloc.c | 2 ++ 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/include/linux/gfp.h b/include/linux/gfp.h index af7407e..208535f 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -39,6 +39,7 @@ struct vm_area_struct; #define __GFP_COMP 0x4000u /* Add compound page metadata */ #define __GFP_ZERO 0x8000u /* Return zeroed page on success */ #define __GFP_NOMEMALLOC 0x10000u /* Don't use emergency reserves */ +#define __GFP_NORECLAIM 0x20000u /* No realy zone reclaim during allocation */ #define __GFP_BITS_SHIFT 20 /* Room for 20 __GFP_FOO bits */ #define __GFP_BITS_MASK ((1 << __GFP_BITS_SHIFT) - 1) @@ -47,7 +48,7 @@ struct vm_area_struct; #define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \ __GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \ __GFP_NOFAIL|__GFP_NORETRY|__GFP_NO_GROW|__GFP_COMP| \ - __GFP_NOMEMALLOC) + __GFP_NOMEMALLOC|__GFP_NORECLAIM) #define GFP_ATOMIC (__GFP_HIGH) #define GFP_NOIO (__GFP_WAIT) diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 0422031..d9a2564 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -52,12 +52,12 @@ void release_pages(struct page **pages, int nr, int cold); static inline struct page *page_cache_alloc(struct address_space *x) { - return alloc_pages(mapping_gfp_mask(x), 0); + return alloc_pages(mapping_gfp_mask(x)|__GFP_NORECLAIM, 0); } static inline struct page *page_cache_alloc_cold(struct address_space *x) { - return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD, 0); + return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD|__GFP_NORECLAIM, 0); } typedef int filler_t(void *, struct page *); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 3c0f69d..a9da20b 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -729,6 +729,8 @@ should_reclaim_zone(struct zone *z, unsigned int gfp_mask) { if (!z->reclaim_pages) return 0; + if (gfp_mask & __GFP_NORECLAIM) + return 0; return 1; } -- 2.7.4