brd: Support discard
authorHoegeun Kwon <hoegeun.kwon@samsung.com>
Mon, 7 Nov 2022 04:11:09 +0000 (13:11 +0900)
committerHoegeun Kwon <hoegeun.kwon@samsung.com>
Wed, 23 Nov 2022 02:31:04 +0000 (11:31 +0900)
To support ramdisk flush, discard is required. So, revert the commit.
Also, this commit has been modified to match the interface of the
kernel v5.15.

This reverts commit f09a06a193d942a12c1a33c153388b3962222006.

Change-Id: Ic66d8331b63e96ed35ae7ca48cb7c7349559ea58
Signed-off-by: Hoegeun Kwon <hoegeun.kwon@samsung.com>
drivers/block/brd.c

index 530b312..f6585ba 100644 (file)
@@ -124,6 +124,28 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
        return page;
 }
 
+static void brd_free_page(struct brd_device *brd, sector_t sector)
+{
+       struct page *page;
+       pgoff_t idx;
+
+       spin_lock(&brd->brd_lock);
+       idx = sector >> PAGE_SECTORS_SHIFT;
+       page = radix_tree_delete(&brd->brd_pages, idx);
+       spin_unlock(&brd->brd_lock);
+       if (page)
+               __free_page(page);
+}
+
+static void brd_zero_page(struct brd_device *brd, sector_t sector)
+{
+       struct page *page;
+
+       page = brd_lookup_page(brd, sector);
+       if (page)
+               clear_highpage(page);
+}
+
 /*
  * Free all backing store pages and radix tree. This must only be called when
  * there are no other users of the device.
@@ -186,6 +208,24 @@ static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n)
        return 0;
 }
 
+static void discard_from_brd(struct brd_device *brd,
+               sector_t sector, size_t n)
+{
+       while (n >= PAGE_SIZE) {
+               /*
+                * Don't want to actually discard pages here because
+                * re-allocating the pages can result in writeback
+                * deadlocks under heavy load.
+                */
+               if (0)
+                       brd_free_page(brd, sector);
+               else
+                       brd_zero_page(brd, sector);
+               sector += PAGE_SIZE >> SECTOR_SHIFT;
+               n -= PAGE_SIZE;
+       }
+}
+
 /*
  * Copy n bytes from src to the brd starting at sector. Does not sleep.
  */
@@ -289,6 +329,14 @@ static blk_qc_t brd_submit_bio(struct bio *bio)
        struct bio_vec bvec;
        struct bvec_iter iter;
 
+       if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
+               if (sector & ((PAGE_SIZE >> SECTOR_SHIFT) - 1) ||
+                               bio->bi_iter.bi_size & ~PAGE_MASK)
+                       goto io_error;
+               discard_from_brd(brd, sector, bio->bi_iter.bi_size);
+               goto out;
+       }
+
        bio_for_each_segment(bvec, bio, iter) {
                unsigned int len = bvec.bv_len;
                int err;
@@ -304,6 +352,7 @@ static blk_qc_t brd_submit_bio(struct bio *bio)
                sector += len >> SECTOR_SHIFT;
        }
 
+out:
        bio_endio(bio);
        return BLK_QC_T_NONE;
 io_error:
@@ -419,6 +468,10 @@ static int brd_alloc(int i)
         */
        blk_queue_physical_block_size(disk->queue, PAGE_SIZE);
 
+       disk->queue->limits.discard_granularity = PAGE_SIZE;
+       blk_queue_max_discard_sectors(disk->queue, UINT_MAX);
+       blk_queue_flag_set(QUEUE_FLAG_DISCARD, disk->queue);
+
        /* Tell the block layer that this is not a rotational device */
        blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
        blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue);