void kasan_init_slab_obj(struct kmem_cache *cache, const void *object);
void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags);
+#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
+void kasan_kmalloc_save(const void *ptr, size_t size, gfp_t flags);
+#endif
void kasan_kfree_large(const void *ptr);
void kasan_poison_kfree(void *ptr);
void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size,
const void *object) {}
static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {}
+#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
+static inline void kasan_kmalloc_save(const void *ptr, size_t size,
+ gfp_t flags)
+{
+}
+#endif
static inline void kasan_kfree_large(const void *ptr) {}
static inline void kasan_poison_kfree(void *ptr) {}
static inline void kasan_kmalloc(struct kmem_cache *s, const void *object,
KASAN_PAGE_REDZONE);
}
+#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
+void kasan_kmalloc_save(const void *ptr, size_t size, gfp_t flags)
+{
+ struct page *page;
+ unsigned long redzone_start;
+ unsigned long redzone_end;
+
+ if (gfpflags_allow_blocking(flags))
+ quarantine_reduce();
+
+ if (unlikely(ptr == NULL))
+ return;
+
+ page = virt_to_page(ptr);
+ redzone_start = round_up((unsigned long)(ptr + size),
+ KASAN_SHADOW_SCALE_SIZE);
+ redzone_end = (unsigned long)ptr + PAGE_ALIGN(size);
+
+ kasan_unpoison_shadow(ptr, size);
+ kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
+ KASAN_PAGE_REDZONE);
+}
+
+#endif
+
void kasan_krealloc(const void *object, size_t size, gfp_t flags)
{
struct page *page;
{
void *ret;
struct page *page;
+#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
+ int saved = 0;
+#endif
flags |= __GFP_COMP;
#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
- if (size < (PAGE_SIZE * (1 << order)))
+ if (size < (PAGE_SIZE * (1 << order))) {
page = aml_slub_alloc_large(size, flags, order);
- else
+ saved = 1;
+ } else
page = alloc_pages(flags, order);
#else
page = alloc_pages(flags, order);
#endif
ret = page ? page_address(page) : NULL;
kmemleak_alloc(ret, size, 1, flags);
+#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
+ /* only need poison used pages */
+ if (saved && ret)
+ kasan_kmalloc_save(ret, size, flags);
+ else
+ kasan_kmalloc_large(ret, size, flags);
+#else
kasan_kmalloc_large(ret, size, flags);
+#endif
return ret;
}
EXPORT_SYMBOL(kmalloc_order);
__func__, page_address(page), nr_pages, obj);
for (i = 0; i < nr_pages; i++) {
__free_pages(page, 0);
+ kasan_free_pages(page, 0);
page++;
}
}
page = virt_to_head_page(x);
if (unlikely(!PageSlab(page))) {
BUG_ON(!PageCompound(page));
- kfree_hook(x);
#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
+ kmemleak_free(x);
if (unlikely(PageOwnerPriv1(page)))
aml_slub_free_large(page, x);
- else
+ else {
__free_pages(page, compound_order(page));
+ kasan_kfree_large(x);
+ }
return;
#else
+ kfree_hook(x);
__free_pages(page, compound_order(page));
return;
#endif /* CONFIG_AMLOGIC_MEMORY_EXTEND */