block: zram: Add support for dynamic compressor switch
authorDongwoo Lee <dwoo08.lee@samsung.com>
Thu, 4 Jun 2020 04:54:35 +0000 (13:54 +0900)
committerHoegeun Kwon <hoegeun.kwon@samsung.com>
Mon, 6 Jul 2020 09:09:48 +0000 (18:09 +0900)
Orginally, the compression algorithm cannot be changed until
resetting it if zram is once initialized by setting disksize. Since
this, however, zram can have multiple compressor and switch them
dynamically.

With this, zram uses the algorithm which is fast but has low
compression ratio at first, and can change to the one that is slightly
slower but having higher ratio when the target get bothered by low
memory.

Change-Id: I3fd817e299a76284b8b28e318a4822107e6a5f6d
Signed-off-by: Dongwoo Lee <dwoo08.lee@samsung.com>
drivers/block/zram/zcomp.c
drivers/block/zram/zcomp.h
drivers/block/zram/zram_drv.c
drivers/block/zram/zram_drv.h

index 1a8564a..a02f08d 100644 (file)
@@ -63,24 +63,6 @@ static struct zcomp_strm *zcomp_strm_alloc(struct zcomp *comp)
        return zstrm;
 }
 
-bool zcomp_available_algorithm(const char *comp)
-{
-       int i;
-
-       i = __sysfs_match_string(backends, -1, comp);
-       if (i >= 0)
-               return true;
-
-       /*
-        * Crypto does not ignore a trailing new line symbol,
-        * so make sure you don't supply a string containing
-        * one.
-        * This also means that we permit zcomp initialisation
-        * with any compressing algorithm known to crypto api.
-        */
-       return crypto_has_comp(comp, 0, 0) == 1;
-}
-
 /* show available compressors */
 ssize_t zcomp_available_show(const char *comp, char *buf)
 {
@@ -202,29 +184,43 @@ cleanup:
        return ret;
 }
 
-void zcomp_destroy(struct zcomp *comp)
+static void zcomp_destroy(struct zcomp *comp)
 {
        cpuhp_state_remove_instance(CPUHP_ZCOMP_PREPARE, &comp->node);
        free_percpu(comp->stream);
        kfree(comp);
 }
 
+void zcomp_reset(struct list_head *blist)
+{
+       struct list_head *curr, *next;
+       struct zcomp *comp;
+       int i;
+
+       list_for_each_safe(curr, next, blist) {
+               comp = list_entry(curr, struct zcomp, list);
+               list_del(&comp->list);
+
+               i = __sysfs_match_string(backends, -1, comp->name);
+               if (i < 0)
+                       kfree(comp->name);
+
+               zcomp_destroy(comp);
+       }
+}
+
 /*
- * search available compressors for requested algorithm.
  * allocate new zcomp and initialize it. return compressing
  * backend pointer or ERR_PTR if things went bad. ERR_PTR(-EINVAL)
  * if requested algorithm is not supported, ERR_PTR(-ENOMEM) in
  * case of allocation error, or any other error potentially
  * returned by zcomp_init().
  */
-struct zcomp *zcomp_create(const char *compress)
+static struct zcomp *zcomp_create(const char *compress)
 {
        struct zcomp *comp;
        int error;
 
-       if (!zcomp_available_algorithm(compress))
-               return ERR_PTR(-EINVAL);
-
        comp = kzalloc(sizeof(struct zcomp), GFP_KERNEL);
        if (!comp)
                return ERR_PTR(-ENOMEM);
@@ -237,3 +233,43 @@ struct zcomp *zcomp_create(const char *compress)
        }
        return comp;
 }
+
+struct zcomp *zcomp_get_instance(struct list_head *blist, const char *name)
+{
+       struct zcomp *comp;
+       const char *comp_name = NULL;
+       int i;
+
+       list_for_each_entry(comp, blist, list)
+               if (sysfs_streq(name, comp->name))
+                       return comp;
+
+       i = __sysfs_match_string(backends, -1, name);
+       if (i >= 0)
+               comp_name = backends[i];
+       else {
+               /*
+                * Crypto does not ignore a trailing new line symbol,
+                * so make sure you don't supply a string containing
+                * one.
+                * This also means that we permit zcomp initialisation
+                * with any compressing algorithm known to crypto api.
+                */
+               if (crypto_has_comp(name, 0, 0) == 1)
+                       comp_name = kstrdup(name, GFP_KERNEL);
+               else
+                       return ERR_PTR(-ENOENT);
+       }
+
+       comp = zcomp_create(comp_name);
+       if (IS_ERR(comp)) {
+               pr_err("Cannot initialise %s compressing backend\n", name);
+               if (i < 0)
+                       kfree(comp_name);
+               return comp;
+       }
+
+       list_add(&comp->list, blist);
+
+       return comp;
+}
index 1806475..aff688d 100644 (file)
@@ -17,15 +17,15 @@ struct zcomp {
        struct zcomp_strm * __percpu *stream;
        const char *name;
        struct hlist_node node;
+       struct list_head list;
 };
 
 int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node);
 int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node);
 ssize_t zcomp_available_show(const char *comp, char *buf);
-bool zcomp_available_algorithm(const char *comp);
 
-struct zcomp *zcomp_create(const char *comp);
-void zcomp_destroy(struct zcomp *comp);
+struct zcomp *zcomp_get_instance(struct list_head *blist, const char *name);
+void zcomp_reset(struct list_head *blist);
 
 struct zcomp_strm *zcomp_stream_get(struct zcomp *comp);
 void zcomp_stream_put(struct zcomp *comp);
index 1bf4a90..6335e66 100644 (file)
@@ -121,6 +121,17 @@ static unsigned long zram_get_element(struct zram *zram, u32 index)
        return zram->table[index].element;
 }
 
+static inline void zram_set_compressor(struct zram *zram, u32 index,
+                       struct zcomp *comp)
+{
+       zram->table[index].compressor = comp;
+}
+
+static struct zcomp *zram_get_compressor(struct zram *zram, u32 index)
+{
+       return zram->table[index].compressor;
+}
+
 static size_t zram_get_obj_size(struct zram *zram, u32 index)
 {
        return zram->table[index].flags & (BIT(ZRAM_FLAG_SHIFT) - 1);
@@ -985,6 +996,7 @@ static ssize_t comp_algorithm_store(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t len)
 {
        struct zram *zram = dev_to_zram(dev);
+       struct zcomp *comp;
        char compressor[ARRAY_SIZE(zram->compressor)];
        size_t sz;
 
@@ -994,16 +1006,12 @@ static ssize_t comp_algorithm_store(struct device *dev,
        if (sz > 0 && compressor[sz - 1] == '\n')
                compressor[sz - 1] = 0x00;
 
-       if (!zcomp_available_algorithm(compressor))
+       comp = zcomp_get_instance(&zram->backend_list, compressor);
+       if (IS_ERR_OR_NULL(comp))
                return -EINVAL;
 
        down_write(&zram->init_lock);
-       if (init_done(zram)) {
-               up_write(&zram->init_lock);
-               pr_info("Can't change algorithm for initialized device\n");
-               return -EBUSY;
-       }
-
+       zram->comp = comp;
        strcpy(zram->compressor, compressor);
        up_write(&zram->init_lock);
        return len;
@@ -1198,6 +1206,7 @@ static void zram_free_page(struct zram *zram, size_t index)
                return;
 
        zs_free(zram->mem_pool, handle);
+       zram->table[index].compressor = NULL;
 
        atomic64_sub(zram_get_obj_size(zram, index),
                        &zram->stats.compr_data_size);
@@ -1253,12 +1262,13 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
                kunmap_atomic(dst);
                ret = 0;
        } else {
-               struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp);
+               struct zcomp *comp = zram_get_compressor(zram, index);
+               struct zcomp_strm *zstrm = zcomp_stream_get(comp);
 
                dst = kmap_atomic(page);
                ret = zcomp_decompress(zstrm, src, size, dst);
                kunmap_atomic(dst);
-               zcomp_stream_put(zram->comp);
+               zcomp_stream_put(comp);
        }
        zs_unmap_object(zram->mem_pool, handle);
        zram_slot_unlock(zram, index);
@@ -1312,6 +1322,7 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
        unsigned int comp_len = 0;
        void *src, *dst, *mem;
        struct zcomp_strm *zstrm;
+       struct zcomp *comp = NULL;
        struct page *page = bvec->bv_page;
        unsigned long element = 0;
        enum zram_pageflags flags = 0;
@@ -1327,13 +1338,14 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
        kunmap_atomic(mem);
 
 compress_again:
-       zstrm = zcomp_stream_get(zram->comp);
+       comp = zram->comp;
+       zstrm = zcomp_stream_get(comp);
        src = kmap_atomic(page);
        ret = zcomp_compress(zstrm, src, &comp_len);
        kunmap_atomic(src);
 
        if (unlikely(ret)) {
-               zcomp_stream_put(zram->comp);
+               zcomp_stream_put(comp);
                pr_err("Compression failed! err=%d\n", ret);
                zs_free(zram->mem_pool, handle);
                return ret;
@@ -1361,7 +1373,7 @@ compress_again:
                                __GFP_HIGHMEM |
                                __GFP_MOVABLE);
        if (!handle) {
-               zcomp_stream_put(zram->comp);
+               zcomp_stream_put(comp);
                atomic64_inc(&zram->stats.writestall);
                handle = zs_malloc(zram->mem_pool, comp_len,
                                GFP_NOIO | __GFP_HIGHMEM |
@@ -1375,7 +1387,7 @@ compress_again:
        update_used_max(zram, alloced_pages);
 
        if (zram->limit_pages && alloced_pages > zram->limit_pages) {
-               zcomp_stream_put(zram->comp);
+               zcomp_stream_put(comp);
                zs_free(zram->mem_pool, handle);
                return -ENOMEM;
        }
@@ -1389,7 +1401,7 @@ compress_again:
        if (comp_len == PAGE_SIZE)
                kunmap_atomic(src);
 
-       zcomp_stream_put(zram->comp);
+       zcomp_stream_put(comp);
        zs_unmap_object(zram->mem_pool, handle);
        atomic64_add(comp_len, &zram->stats.compr_data_size);
 out:
@@ -1411,6 +1423,8 @@ out:
        }  else {
                zram_set_handle(zram, index, handle);
                zram_set_obj_size(zram, index, comp_len);
+               if (comp_len < PAGE_SIZE)
+                       zram_set_compressor(zram, index, comp);
        }
        zram_slot_unlock(zram, index);
 
@@ -1675,7 +1689,6 @@ out:
 
 static void zram_reset_device(struct zram *zram)
 {
-       struct zcomp *comp;
        u64 disksize;
 
        down_write(&zram->init_lock);
@@ -1687,7 +1700,6 @@ static void zram_reset_device(struct zram *zram)
                return;
        }
 
-       comp = zram->comp;
        disksize = zram->disksize;
        zram->disksize = 0;
 
@@ -1698,7 +1710,8 @@ static void zram_reset_device(struct zram *zram)
        /* I/O operation under all of CPU are done so let's free */
        zram_meta_free(zram, disksize);
        memset(&zram->stats, 0, sizeof(zram->stats));
-       zcomp_destroy(comp);
+       zram->comp = NULL;
+       zcomp_reset(&zram->backend_list);
        reset_bdev(zram);
 }
 
@@ -1727,10 +1740,8 @@ static ssize_t disksize_store(struct device *dev,
                goto out_unlock;
        }
 
-       comp = zcomp_create(zram->compressor);
-       if (IS_ERR(comp)) {
-               pr_err("Cannot initialise %s compressing backend\n",
-                               zram->compressor);
+       comp = zcomp_get_instance(&zram->backend_list, zram->compressor);
+       if (IS_ERR_OR_NULL(comp)) {
                err = PTR_ERR(comp);
                goto out_free_meta;
        }
@@ -1953,6 +1964,7 @@ static int zram_add(void)
        device_add_disk(NULL, zram->disk, zram_disk_attr_groups);
 
        strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
+       INIT_LIST_HEAD(&zram->backend_list);
 
        zram_debugfs_register(zram);
        pr_info("Added device: %s\n", zram->disk->disk_name);
index f2fd46d..83b320f 100644 (file)
@@ -63,6 +63,7 @@ struct zram_table_entry {
                unsigned long element;
        };
        unsigned long flags;
+       struct zcomp *compressor;
 #ifdef CONFIG_ZRAM_MEMORY_TRACKING
        ktime_t ac_time;
 #endif
@@ -112,6 +113,7 @@ struct zram {
         * zram is claimed so open request will be failed
         */
        bool claim; /* Protected by bdev->bd_mutex */
+       struct list_head backend_list;
        struct file *backing_dev;
 #ifdef CONFIG_ZRAM_WRITEBACK
        spinlock_t wb_limit_lock;