From: Jiang Liu Date: Thu, 6 Jun 2013 16:07:30 +0000 (+0800) Subject: zram: optimize memory operations with clear_page()/copy_page() X-Git-Tag: v3.11-rc1~158^2~222 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=42e99bd975fdd24d2bf1a24ebb8b0b42bab8ba65;p=platform%2Fkernel%2Flinux-stable.git zram: optimize memory operations with clear_page()/copy_page() Some architectures provides architecture-specific, optimized version of clear_page()/copy_page(), which may have better performance than memset()/memcpy(). So use clear_page()/copy_page() to optimize zram performance if possible. Signed-off-by: Jiang Liu Signed-off-by: Greg Kroah-Hartman --- diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c index c138a89..4b30fe5 100644 --- a/drivers/staging/zram/zram_drv.c +++ b/drivers/staging/zram/zram_drv.c @@ -128,23 +128,26 @@ static void zram_free_page(struct zram *zram, size_t index) meta->table[index].size = 0; } +static inline int is_partial_io(struct bio_vec *bvec) +{ + return bvec->bv_len != PAGE_SIZE; +} + static void handle_zero_page(struct bio_vec *bvec) { struct page *page = bvec->bv_page; void *user_mem; user_mem = kmap_atomic(page); - memset(user_mem + bvec->bv_offset, 0, bvec->bv_len); + if (is_partial_io(bvec)) + memset(user_mem + bvec->bv_offset, 0, bvec->bv_len); + else + clear_page(user_mem); kunmap_atomic(user_mem); flush_dcache_page(page); } -static inline int is_partial_io(struct bio_vec *bvec) -{ - return bvec->bv_len != PAGE_SIZE; -} - static int zram_decompress_page(struct zram *zram, char *mem, u32 index) { int ret = LZO_E_OK; @@ -154,13 +157,13 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index) unsigned long handle = meta->table[index].handle; if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) { - memset(mem, 0, PAGE_SIZE); + clear_page(mem); return 0; } cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO); if (meta->table[index].size == PAGE_SIZE) - memcpy(mem, cmem, PAGE_SIZE); + copy_page(mem, cmem); else ret = lzo1x_decompress_safe(cmem, meta->table[index].size, mem, &clen); @@ -309,11 +312,13 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, } cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO); - if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) + if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) { src = kmap_atomic(page); - memcpy(cmem, src, clen); - if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) + copy_page(cmem, src); kunmap_atomic(src); + } else { + memcpy(cmem, src, clen); + } zs_unmap_object(meta->mem_pool, handle);