kasan, mm: optimize krealloc poisoning
authorAndrey Konovalov <andreyknvl@google.com>
Fri, 26 Feb 2021 01:20:23 +0000 (17:20 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 26 Feb 2021 17:41:03 +0000 (09:41 -0800)
Currently, krealloc() always calls ksize(), which unpoisons the whole
object including the redzone.  This is inefficient, as kasan_krealloc()
repoisons the redzone for objects that fit into the same buffer.

This patch changes krealloc() instrumentation to use uninstrumented
__ksize() that doesn't unpoison the memory.  Instead, kasan_kreallos() is
changed to unpoison the memory excluding the redzone.

For objects that don't fit into the old allocation, this patch disables
KASAN accessibility checks when copying memory into a new object instead
of unpoisoning it.

Link: https://lkml.kernel.org/r/9bef90327c9cb109d736c40115684fd32f49e6b0.1612546384.git.andreyknvl@google.com
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
Reviewed-by: Marco Elver <elver@google.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Branislav Rankov <Branislav.Rankov@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Kevin Brodsky <kevin.brodsky@arm.com>
Cc: Peter Collingbourne <pcc@google.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/kasan/common.c
mm/slab_common.c

index 8a3d663..1e51064 100644 (file)
@@ -476,7 +476,7 @@ static void *____kasan_kmalloc(struct kmem_cache *cache, const void *object,
 
        /*
         * The object has already been unpoisoned by kasan_slab_alloc() for
-        * kmalloc() or by ksize() for krealloc().
+        * kmalloc() or by kasan_krealloc() for krealloc().
         */
 
        /*
@@ -526,7 +526,7 @@ void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
 
        /*
         * The object has already been unpoisoned by kasan_alloc_pages() for
-        * alloc_pages() or by ksize() for krealloc().
+        * alloc_pages() or by kasan_krealloc() for krealloc().
         */
 
        /*
@@ -554,8 +554,16 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag
        if (unlikely(object == ZERO_SIZE_PTR))
                return (void *)object;
 
+       /*
+        * Unpoison the object's data.
+        * Part of it might already have been unpoisoned, but it's unknown
+        * how big that part is.
+        */
+       kasan_unpoison(object, size);
+
        page = virt_to_head_page(object);
 
+       /* Piggy-back on kmalloc() instrumentation to poison the redzone. */
        if (unlikely(!PageSlab(page)))
                return __kasan_kmalloc_large(object, size, flags);
        else
index 4aedb84..88e8339 100644 (file)
@@ -1136,19 +1136,27 @@ static __always_inline void *__do_krealloc(const void *p, size_t new_size,
        void *ret;
        size_t ks;
 
-       if (likely(!ZERO_OR_NULL_PTR(p)) && !kasan_check_byte(p))
-               return NULL;
-
-       ks = ksize(p);
+       /* Don't use instrumented ksize to allow precise KASAN poisoning. */
+       if (likely(!ZERO_OR_NULL_PTR(p))) {
+               if (!kasan_check_byte(p))
+                       return NULL;
+               ks = kfence_ksize(p) ?: __ksize(p);
+       } else
+               ks = 0;
 
+       /* If the object still fits, repoison it precisely. */
        if (ks >= new_size) {
                p = kasan_krealloc((void *)p, new_size, flags);
                return (void *)p;
        }
 
        ret = kmalloc_track_caller(new_size, flags);
-       if (ret && p)
-               memcpy(ret, p, ks);
+       if (ret && p) {
+               /* Disable KASAN checks as the object's redzone is accessed. */
+               kasan_disable_current();
+               memcpy(ret, kasan_reset_tag(p), ks);
+               kasan_enable_current();
+       }
 
        return ret;
 }