return page;
}
+static void brd_free_page(struct brd_device *brd, sector_t sector)
+{
+ struct page *page;
+ pgoff_t idx;
+
+ spin_lock(&brd->brd_lock);
+ idx = sector >> PAGE_SECTORS_SHIFT;
+ page = radix_tree_delete(&brd->brd_pages, idx);
+ spin_unlock(&brd->brd_lock);
+ if (page)
+ __free_page(page);
+}
+
+static void brd_zero_page(struct brd_device *brd, sector_t sector)
+{
+ struct page *page;
+
+ page = brd_lookup_page(brd, sector);
+ if (page)
+ clear_highpage(page);
+}
+
/*
* Free all backing store pages and radix tree. This must only be called when
* there are no other users of the device.
return 0;
}
+static void discard_from_brd(struct brd_device *brd,
+ sector_t sector, size_t n)
+{
+ while (n >= PAGE_SIZE) {
+ /*
+ * Don't want to actually discard pages here because
+ * re-allocating the pages can result in writeback
+ * deadlocks under heavy load.
+ */
+ if (0)
+ brd_free_page(brd, sector);
+ else
+ brd_zero_page(brd, sector);
+ sector += PAGE_SIZE >> SECTOR_SHIFT;
+ n -= PAGE_SIZE;
+ }
+}
+
/*
* Copy n bytes from src to the brd starting at sector. Does not sleep.
*/
if (bio_end_sector(bio) > get_capacity(bio->bi_disk))
goto io_error;
+ if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
+ if (sector & ((PAGE_SIZE >> SECTOR_SHIFT) - 1) ||
+ bio->bi_iter.bi_size & ~PAGE_MASK)
+ goto io_error;
+ discard_from_brd(brd, sector, bio->bi_iter.bi_size);
+ goto out;
+ }
+
bio_for_each_segment(bvec, bio, iter) {
unsigned int len = bvec.bv_len;
int err;
sector += len >> SECTOR_SHIFT;
}
+out:
bio_endio(bio);
return BLK_QC_T_NONE;
io_error:
* is harmless)
*/
blk_queue_physical_block_size(brd->brd_queue, PAGE_SIZE);
+
+ brd->brd_queue->limits.discard_granularity = PAGE_SIZE;
+ blk_queue_max_discard_sectors(brd->brd_queue, UINT_MAX);
+ blk_queue_flag_set(QUEUE_FLAG_DISCARD, brd->brd_queue);
disk = brd->brd_disk = alloc_disk(max_part);
if (!disk)
goto out_free_queue;