zram: remove double compression logic
authorAlexey Romanov <avromanov@sberdevices.ru>
Fri, 13 May 2022 03:23:07 +0000 (20:23 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 13 May 2022 14:20:18 +0000 (07:20 -0700)
The 2nd trial allocation under per-cpu presumption has been used to
prevent regression of allocation failure.  However, it makes trouble for
maintenance without significant benefit.  The slowpath branch is executed
extremely rarely: getting there is problematic.  Therefore, we delete this
branch.

Since b09ab054b69b ("zram: support BDI_CAP_STABLE_WRITES"), zram has used
QUEUE_FLAG_STABLE_WRITES to prevent buffer change between 1st and 2nd
memory allocations.  Since we remove second trial memory allocation logic,
we could remove the STABLE_WRITES flag because there is no change buffer
to be modified under us.

Link: https://lkml.kernel.org/r/20220505094443.11728-1-avromanov@sberdevices.ru
Signed-off-by: Alexey Romanov <avromanov@sberdevices.ru>
Signed-off-by: Dmitry Rokosov <ddrokosov@sberdevices.ru>
Acked-by: Minchan Kim <minchan@kernel.org>
Reviewed-by: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Nitin Gupta <ngupta@vflare.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
drivers/block/zram/zram_drv.c
drivers/block/zram/zram_drv.h

index 8562a7c..7e84965 100644 (file)
@@ -1144,15 +1144,14 @@ static ssize_t bd_stat_show(struct device *dev,
 static ssize_t debug_stat_show(struct device *dev,
                struct device_attribute *attr, char *buf)
 {
-       int version = 1;
+       int version = 2;
        struct zram *zram = dev_to_zram(dev);
        ssize_t ret;
 
        down_read(&zram->init_lock);
        ret = scnprintf(buf, PAGE_SIZE,
-                       "version: %d\n%8llu %8llu\n",
+                       "version: %d\n%8llu\n",
                        version,
-                       (u64)atomic64_read(&zram->stats.writestall),
                        (u64)atomic64_read(&zram->stats.miss_free));
        up_read(&zram->init_lock);
 
@@ -1368,7 +1367,6 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
        }
        kunmap_atomic(mem);
 
-compress_again:
        zstrm = zcomp_stream_get(zram->comp);
        src = kmap_atomic(page);
        ret = zcomp_compress(zstrm, src, &comp_len);
@@ -1377,39 +1375,20 @@ compress_again:
        if (unlikely(ret)) {
                zcomp_stream_put(zram->comp);
                pr_err("Compression failed! err=%d\n", ret);
-               zs_free(zram->mem_pool, handle);
                return ret;
        }
 
        if (comp_len >= huge_class_size)
                comp_len = PAGE_SIZE;
-       /*
-        * handle allocation has 2 paths:
-        * a) fast path is executed with preemption disabled (for
-        *  per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear,
-        *  since we can't sleep;
-        * b) slow path enables preemption and attempts to allocate
-        *  the page with __GFP_DIRECT_RECLAIM bit set. we have to
-        *  put per-cpu compression stream and, thus, to re-do
-        *  the compression once handle is allocated.
-        *
-        * if we have a 'non-null' handle here then we are coming
-        * from the slow path and handle has already been allocated.
-        */
-       if (!handle)
-               handle = zs_malloc(zram->mem_pool, comp_len,
-                               __GFP_KSWAPD_RECLAIM |
-                               __GFP_NOWARN |
-                               __GFP_HIGHMEM |
-                               __GFP_MOVABLE);
-       if (!handle) {
+
+       handle = zs_malloc(zram->mem_pool, comp_len,
+                       __GFP_KSWAPD_RECLAIM |
+                       __GFP_NOWARN |
+                       __GFP_HIGHMEM |
+                       __GFP_MOVABLE);
+
+       if (unlikely(!handle)) {
                zcomp_stream_put(zram->comp);
-               atomic64_inc(&zram->stats.writestall);
-               handle = zs_malloc(zram->mem_pool, comp_len,
-                               GFP_NOIO | __GFP_HIGHMEM |
-                               __GFP_MOVABLE);
-               if (handle)
-                       goto compress_again;
                return -ENOMEM;
        }
 
@@ -1967,7 +1946,6 @@ static int zram_add(void)
        if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
                blk_queue_max_write_zeroes_sectors(zram->disk->queue, UINT_MAX);
 
-       blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, zram->disk->queue);
        ret = device_add_disk(NULL, zram->disk, zram_disk_groups);
        if (ret)
                goto out_cleanup_disk;
index 80c3b43..158c91e 100644 (file)
@@ -81,7 +81,6 @@ struct zram_stats {
        atomic64_t huge_pages_since;    /* no. of huge pages since zram set up */
        atomic64_t pages_stored;        /* no. of pages currently stored */
        atomic_long_t max_used_pages;   /* no. of maximum pages stored */
-       atomic64_t writestall;          /* no. of write slow paths */
        atomic64_t miss_free;           /* no. of missed free */
 #ifdef CONFIG_ZRAM_WRITEBACK
        atomic64_t bd_count;            /* no. of pages in backing device */