2 * Functions related to generic helpers functions
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/blkdev.h>
8 #include <linux/scatterlist.h>
15 struct completion *wait;
18 static void bio_batch_end_io(struct bio *bio, int err)
20 struct bio_batch *bb = bio->bi_private;
22 if (err && (err != -EOPNOTSUPP))
23 clear_bit(BIO_UPTODATE, &bb->flags);
24 if (atomic_dec_and_test(&bb->done))
30 * blkdev_issue_discard - queue a discard
31 * @bdev: blockdev to issue discard for
32 * @sector: start sector
33 * @nr_sects: number of sectors to discard
34 * @gfp_mask: memory allocation flags (for bio_alloc)
35 * @flags: BLKDEV_IFL_* flags to control behaviour
38 * Issue a discard request for the sectors in question.
40 int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
41 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
43 DECLARE_COMPLETION_ONSTACK(wait);
44 struct request_queue *q = bdev_get_queue(bdev);
45 int type = REQ_WRITE | REQ_DISCARD;
46 unsigned int max_discard_sectors;
47 unsigned int granularity, alignment, mask;
55 if (!blk_queue_discard(q))
58 /* Zero-sector (unknown) and one-sector granularities are the same. */
59 granularity = max(q->limits.discard_granularity >> 9, 1U);
60 mask = granularity - 1;
61 alignment = (bdev_discard_alignment(bdev) >> 9) & mask;
64 * Ensure that max_discard_sectors is of the proper
65 * granularity, so that requests stay aligned after a split.
67 max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
68 max_discard_sectors = round_down(max_discard_sectors, granularity);
69 if (unlikely(!max_discard_sectors)) {
70 /* Avoid infinite loop below. Being cautious never hurts. */
74 if (flags & BLKDEV_DISCARD_SECURE) {
75 if (!blk_queue_secdiscard(q))
80 atomic_set(&bb.done, 1);
81 bb.flags = 1 << BIO_UPTODATE;
85 unsigned int req_sects;
88 bio = bio_alloc(gfp_mask, 1);
94 req_sects = min_t(sector_t, nr_sects, max_discard_sectors);
97 * If splitting a request, and the next starting sector would be
98 * misaligned, stop the discard at the previous aligned sector.
100 end_sect = sector + req_sects;
101 if (req_sects < nr_sects && (end_sect & mask) != alignment) {
103 round_down(end_sect - alignment, granularity)
105 req_sects = end_sect - sector;
108 bio->bi_sector = sector;
109 bio->bi_end_io = bio_batch_end_io;
111 bio->bi_private = &bb;
113 bio->bi_size = req_sects << 9;
114 nr_sects -= req_sects;
117 atomic_inc(&bb.done);
118 submit_bio(type, bio);
121 /* Wait for bios in-flight */
122 if (!atomic_dec_and_test(&bb.done))
123 wait_for_completion(&wait);
125 if (!test_bit(BIO_UPTODATE, &bb.flags))
130 EXPORT_SYMBOL(blkdev_issue_discard);
133 * blkdev_issue_zeroout - generate number of zero filed write bios
134 * @bdev: blockdev to issue
135 * @sector: start sector
136 * @nr_sects: number of sectors to write
137 * @gfp_mask: memory allocation flags (for bio_alloc)
140 * Generate and issue number of bios with zerofiled pages.
143 int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
144 sector_t nr_sects, gfp_t gfp_mask)
150 DECLARE_COMPLETION_ONSTACK(wait);
152 atomic_set(&bb.done, 1);
153 bb.flags = 1 << BIO_UPTODATE;
157 while (nr_sects != 0) {
158 bio = bio_alloc(gfp_mask,
159 min(nr_sects, (sector_t)BIO_MAX_PAGES));
165 bio->bi_sector = sector;
167 bio->bi_end_io = bio_batch_end_io;
168 bio->bi_private = &bb;
170 while (nr_sects != 0) {
171 sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
172 ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
173 nr_sects -= ret >> 9;
179 atomic_inc(&bb.done);
180 submit_bio(WRITE, bio);
183 /* Wait for bios in-flight */
184 if (!atomic_dec_and_test(&bb.done))
185 wait_for_completion(&wait);
187 if (!test_bit(BIO_UPTODATE, &bb.flags))
188 /* One of bios in the batch was completed with error.*/
193 EXPORT_SYMBOL(blkdev_issue_zeroout);