Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64...
[platform/kernel/linux-rpi.git] / mm / page_alloc.c
index 0422058..e7af86e 100644 (file)
@@ -382,7 +382,7 @@ int page_group_by_mobility_disabled __read_mostly;
 static DEFINE_STATIC_KEY_TRUE(deferred_pages);
 
 /*
- * Calling kasan_free_pages() only after deferred memory initialization
+ * Calling kasan_poison_pages() only after deferred memory initialization
  * has completed. Poisoning pages during deferred memory init will greatly
  * lengthen the process and cause problem in large memory systems as the
  * deferred pages initialization is done with interrupt disabled.
@@ -394,15 +394,12 @@ static DEFINE_STATIC_KEY_TRUE(deferred_pages);
  * on-demand allocation and then freed again before the deferred pages
  * initialization is done, but this is not likely to happen.
  */
-static inline void kasan_free_nondeferred_pages(struct page *page, int order,
-                                               bool init, fpi_t fpi_flags)
+static inline bool should_skip_kasan_poison(struct page *page, fpi_t fpi_flags)
 {
-       if (static_branch_unlikely(&deferred_pages))
-               return;
-       if (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
-                       (fpi_flags & FPI_SKIP_KASAN_POISON))
-               return;
-       kasan_free_pages(page, order, init);
+       return static_branch_unlikely(&deferred_pages) ||
+              (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
+               (fpi_flags & FPI_SKIP_KASAN_POISON)) ||
+              PageSkipKASanPoison(page);
 }
 
 /* Returns true if the struct page for the pfn is uninitialised */
@@ -453,13 +450,11 @@ defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
        return false;
 }
 #else
-static inline void kasan_free_nondeferred_pages(struct page *page, int order,
-                                               bool init, fpi_t fpi_flags)
+static inline bool should_skip_kasan_poison(struct page *page, fpi_t fpi_flags)
 {
-       if (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
-                       (fpi_flags & FPI_SKIP_KASAN_POISON))
-               return;
-       kasan_free_pages(page, order, init);
+       return (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
+               (fpi_flags & FPI_SKIP_KASAN_POISON)) ||
+              PageSkipKASanPoison(page);
 }
 
 static inline bool early_page_uninitialised(unsigned long pfn)
@@ -1226,10 +1221,16 @@ out:
        return ret;
 }
 
-static void kernel_init_free_pages(struct page *page, int numpages)
+static void kernel_init_free_pages(struct page *page, int numpages, bool zero_tags)
 {
        int i;
 
+       if (zero_tags) {
+               for (i = 0; i < numpages; i++)
+                       tag_clear_highpage(page + i);
+               return;
+       }
+
        /* s390's use of memset() could override KASAN redzones. */
        kasan_disable_current();
        for (i = 0; i < numpages; i++) {
@@ -1245,7 +1246,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
                        unsigned int order, bool check_free, fpi_t fpi_flags)
 {
        int bad = 0;
-       bool init;
+       bool skip_kasan_poison = should_skip_kasan_poison(page, fpi_flags);
 
        VM_BUG_ON_PAGE(PageTail(page), page);
 
@@ -1314,10 +1315,17 @@ static __always_inline bool free_pages_prepare(struct page *page,
         * With hardware tag-based KASAN, memory tags must be set before the
         * page becomes unavailable via debug_pagealloc or arch_free_page.
         */
-       init = want_init_on_free();
-       if (init && !kasan_has_integrated_init())
-               kernel_init_free_pages(page, 1 << order);
-       kasan_free_nondeferred_pages(page, order, init, fpi_flags);
+       if (kasan_has_integrated_init()) {
+               if (!skip_kasan_poison)
+                       kasan_free_pages(page, order);
+       } else {
+               bool init = want_init_on_free();
+
+               if (init)
+                       kernel_init_free_pages(page, 1 << order, false);
+               if (!skip_kasan_poison)
+                       kasan_poison_pages(page, order, init);
+       }
 
        /*
         * arch_free_page() can make the page's contents inaccessible.  s390
@@ -2324,8 +2332,6 @@ static bool check_new_pages(struct page *page, unsigned int order)
 inline void post_alloc_hook(struct page *page, unsigned int order,
                                gfp_t gfp_flags)
 {
-       bool init;
-
        set_page_private(page, 0);
        set_page_refcounted(page);
 
@@ -2344,10 +2350,16 @@ inline void post_alloc_hook(struct page *page, unsigned int order,
         * kasan_alloc_pages and kernel_init_free_pages must be
         * kept together to avoid discrepancies in behavior.
         */
-       init = !want_init_on_free() && want_init_on_alloc(gfp_flags);
-       kasan_alloc_pages(page, order, init);
-       if (init && !kasan_has_integrated_init())
-               kernel_init_free_pages(page, 1 << order);
+       if (kasan_has_integrated_init()) {
+               kasan_alloc_pages(page, order, gfp_flags);
+       } else {
+               bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags);
+
+               kasan_unpoison_pages(page, order, init);
+               if (init)
+                       kernel_init_free_pages(page, 1 << order,
+                                              gfp_flags & __GFP_ZEROTAGS);
+       }
 
        set_page_owner(page, order, gfp_flags);
 }