mm/usercopy: Check kmap addresses properly
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 10 Jan 2022 23:15:27 +0000 (23:15 +0000)
committerKees Cook <keescook@chromium.org>
Wed, 13 Apr 2022 19:15:50 +0000 (12:15 -0700)
If you are copying to an address in the kmap region, you may not copy
across a page boundary, no matter what the size of the underlying
allocation.  You can't kmap() a slab page because slab pages always
come from low memory.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: Kees Cook <keescook@chromium.org>
Signed-off-by: Kees Cook <keescook@chromium.org>
Link: https://lore.kernel.org/r/20220110231530.665970-2-willy@infradead.org
arch/x86/include/asm/highmem.h
include/linux/highmem-internal.h
mm/usercopy.c

index 032e020..731ee7c 100644 (file)
@@ -26,6 +26,7 @@
 #include <asm/tlbflush.h>
 #include <asm/paravirt.h>
 #include <asm/fixmap.h>
+#include <asm/pgtable_areas.h>
 
 /* declarations for highmem.c */
 extern unsigned long highstart_pfn, highend_pfn;
index a77be56..337bd9f 100644 (file)
@@ -149,6 +149,11 @@ static inline void totalhigh_pages_add(long count)
        atomic_long_add(count, &_totalhigh_pages);
 }
 
+static inline bool is_kmap_addr(const void *x)
+{
+       unsigned long addr = (unsigned long)x;
+       return addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP);
+}
 #else /* CONFIG_HIGHMEM */
 
 static inline struct page *kmap_to_page(void *addr)
@@ -234,6 +239,11 @@ static inline void __kunmap_atomic(void *addr)
 static inline unsigned int nr_free_highpages(void) { return 0; }
 static inline unsigned long totalhigh_pages(void) { return 0UL; }
 
+static inline bool is_kmap_addr(const void *x)
+{
+       return false;
+}
+
 #endif /* CONFIG_HIGHMEM */
 
 /*
index 2c235d5..ff13e77 100644 (file)
@@ -229,12 +229,16 @@ static inline void check_heap_object(const void *ptr, unsigned long n,
        if (!virt_addr_valid(ptr))
                return;
 
-       /*
-        * When CONFIG_HIGHMEM=y, kmap_to_page() will give either the
-        * highmem page or fallback to virt_to_page(). The following
-        * is effectively a highmem-aware virt_to_slab().
-        */
-       folio = page_folio(kmap_to_page((void *)ptr));
+       if (is_kmap_addr(ptr)) {
+               unsigned long page_end = (unsigned long)ptr | (PAGE_SIZE - 1);
+
+               if ((unsigned long)ptr + n - 1 > page_end)
+                       usercopy_abort("kmap", NULL, to_user,
+                                       offset_in_page(ptr), n);
+               return;
+       }
+
+       folio = virt_to_folio(ptr);
 
        if (folio_test_slab(folio)) {
                /* Check slab allocator for flags and size. */