Revert "brd: remove discard support" 82/239982/2
authorSeung-Woo Kim <sw0312.kim@samsung.com>
Fri, 31 Jul 2020 06:18:06 +0000 (15:18 +0900)
committerSeung-Woo Kim <sw0312.kim@samsung.com>
Mon, 3 Aug 2020 07:14:55 +0000 (07:14 +0000)
This reverts commit f09a06a193d942a12c1a33c153388b3962222006.

To support ramdisk flush, discard is required. So, revert the commit.
Note: struct queue_limit::discard_zeroes_data flag setting is
removed and queue_flag_set_unlocked() is replaced
blk_queue_flag_set().

Change-Id: Iead94d2263ce7ab688e57de587fa77f649285b10
Signed-off-by: Seung-Woo Kim <sw0312.kim@samsung.com>
drivers/block/brd.c

index 047ae4f..502f72d 100644 (file)
@@ -125,6 +125,28 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
        return page;
 }
 
+static void brd_free_page(struct brd_device *brd, sector_t sector)
+{
+       struct page *page;
+       pgoff_t idx;
+
+       spin_lock(&brd->brd_lock);
+       idx = sector >> PAGE_SECTORS_SHIFT;
+       page = radix_tree_delete(&brd->brd_pages, idx);
+       spin_unlock(&brd->brd_lock);
+       if (page)
+               __free_page(page);
+}
+
+static void brd_zero_page(struct brd_device *brd, sector_t sector)
+{
+       struct page *page;
+
+       page = brd_lookup_page(brd, sector);
+       if (page)
+               clear_highpage(page);
+}
+
 /*
  * Free all backing store pages and radix tree. This must only be called when
  * there are no other users of the device.
@@ -187,6 +209,24 @@ static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n)
        return 0;
 }
 
+static void discard_from_brd(struct brd_device *brd,
+                       sector_t sector, size_t n)
+{
+       while (n >= PAGE_SIZE) {
+               /*
+                * Don't want to actually discard pages here because
+                * re-allocating the pages can result in writeback
+                * deadlocks under heavy load.
+                */
+               if (0)
+                       brd_free_page(brd, sector);
+               else
+                       brd_zero_page(brd, sector);
+               sector += PAGE_SIZE >> SECTOR_SHIFT;
+               n -= PAGE_SIZE;
+       }
+}
+
 /*
  * Copy n bytes from src to the brd starting at sector. Does not sleep.
  */
@@ -294,6 +334,14 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
        if (bio_end_sector(bio) > get_capacity(bio->bi_disk))
                goto io_error;
 
+       if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
+               if (sector & ((PAGE_SIZE >> SECTOR_SHIFT) - 1) ||
+                   bio->bi_iter.bi_size & ~PAGE_MASK)
+                       goto io_error;
+               discard_from_brd(brd, sector, bio->bi_iter.bi_size);
+               goto out;
+       }
+
        bio_for_each_segment(bvec, bio, iter) {
                unsigned int len = bvec.bv_len;
                int err;
@@ -305,6 +353,7 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
                sector += len >> SECTOR_SHIFT;
        }
 
+out:
        bio_endio(bio);
        return BLK_QC_T_NONE;
 io_error:
@@ -392,6 +441,10 @@ static struct brd_device *brd_alloc(int i)
         *  is harmless)
         */
        blk_queue_physical_block_size(brd->brd_queue, PAGE_SIZE);
+
+       brd->brd_queue->limits.discard_granularity = PAGE_SIZE;
+       blk_queue_max_discard_sectors(brd->brd_queue, UINT_MAX);
+       blk_queue_flag_set(QUEUE_FLAG_DISCARD, brd->brd_queue);
        disk = brd->brd_disk = alloc_disk(max_part);
        if (!disk)
                goto out_free_queue;