2 * Functions related to generic helpers functions
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/blkdev.h>
8 #include <linux/scatterlist.h>
15 struct completion *wait;
18 static void bio_batch_end_io(struct bio *bio, int err)
20 struct bio_batch *bb = bio->bi_private;
22 if (err && (err != -EOPNOTSUPP))
23 clear_bit(BIO_UPTODATE, &bb->flags);
24 if (atomic_dec_and_test(&bb->done))
30 * blkdev_issue_discard - queue a discard
31 * @bdev: blockdev to issue discard for
32 * @sector: start sector
33 * @nr_sects: number of sectors to discard
34 * @gfp_mask: memory allocation flags (for bio_alloc)
35 * @flags: BLKDEV_IFL_* flags to control behaviour
38 * Issue a discard request for the sectors in question.
40 int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
41 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
43 DECLARE_COMPLETION_ONSTACK(wait);
44 struct request_queue *q = bdev_get_queue(bdev);
45 int type = REQ_WRITE | REQ_DISCARD;
46 unsigned int max_discard_sectors, granularity;
56 if (!blk_queue_discard(q))
59 /* Zero-sector (unknown) and one-sector granularities are the same. */
60 granularity = max(q->limits.discard_granularity >> 9, 1U);
61 alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
64 * Ensure that max_discard_sectors is of the proper
65 * granularity, so that requests stay aligned after a split.
67 max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
68 max_discard_sectors -= max_discard_sectors % granularity;
69 if (unlikely(!max_discard_sectors)) {
70 /* Avoid infinite loop below. Being cautious never hurts. */
74 if (flags & BLKDEV_DISCARD_SECURE) {
75 if (!blk_queue_secdiscard(q))
80 atomic_set(&bb.done, 1);
81 bb.flags = 1 << BIO_UPTODATE;
84 blk_start_plug(&plug);
86 unsigned int req_sects;
87 sector_t end_sect, tmp;
89 bio = bio_alloc(gfp_mask, 1);
95 req_sects = min_t(sector_t, nr_sects, max_discard_sectors);
98 * If splitting a request, and the next starting sector would be
99 * misaligned, stop the discard at the previous aligned sector.
101 end_sect = sector + req_sects;
103 if (req_sects < nr_sects &&
104 sector_div(tmp, granularity) != alignment) {
105 end_sect = end_sect - alignment;
106 sector_div(end_sect, granularity);
107 end_sect = end_sect * granularity + alignment;
108 req_sects = end_sect - sector;
111 bio->bi_iter.bi_sector = sector;
112 bio->bi_end_io = bio_batch_end_io;
114 bio->bi_private = &bb;
116 bio->bi_iter.bi_size = req_sects << 9;
117 nr_sects -= req_sects;
120 atomic_inc(&bb.done);
121 submit_bio(type, bio);
124 * We can loop for a long time in here, if someone does
125 * full device discards (like mkfs). Be nice and allow
126 * us to schedule out to avoid softlocking if preempt
131 blk_finish_plug(&plug);
133 /* Wait for bios in-flight */
134 if (!atomic_dec_and_test(&bb.done))
135 wait_for_completion_io(&wait);
137 if (!test_bit(BIO_UPTODATE, &bb.flags))
142 EXPORT_SYMBOL(blkdev_issue_discard);
145 * blkdev_issue_write_same - queue a write same operation
146 * @bdev: target blockdev
147 * @sector: start sector
148 * @nr_sects: number of sectors to write
149 * @gfp_mask: memory allocation flags (for bio_alloc)
150 * @page: page containing data to write
153 * Issue a write same request for the sectors in question.
155 int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
156 sector_t nr_sects, gfp_t gfp_mask,
159 DECLARE_COMPLETION_ONSTACK(wait);
160 struct request_queue *q = bdev_get_queue(bdev);
161 unsigned int max_write_same_sectors;
169 max_write_same_sectors = q->limits.max_write_same_sectors;
171 if (max_write_same_sectors == 0)
174 atomic_set(&bb.done, 1);
175 bb.flags = 1 << BIO_UPTODATE;
179 bio = bio_alloc(gfp_mask, 1);
185 bio->bi_iter.bi_sector = sector;
186 bio->bi_end_io = bio_batch_end_io;
188 bio->bi_private = &bb;
190 bio->bi_io_vec->bv_page = page;
191 bio->bi_io_vec->bv_offset = 0;
192 bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
194 if (nr_sects > max_write_same_sectors) {
195 bio->bi_iter.bi_size = max_write_same_sectors << 9;
196 nr_sects -= max_write_same_sectors;
197 sector += max_write_same_sectors;
199 bio->bi_iter.bi_size = nr_sects << 9;
203 atomic_inc(&bb.done);
204 submit_bio(REQ_WRITE | REQ_WRITE_SAME, bio);
207 /* Wait for bios in-flight */
208 if (!atomic_dec_and_test(&bb.done))
209 wait_for_completion_io(&wait);
211 if (!test_bit(BIO_UPTODATE, &bb.flags))
216 EXPORT_SYMBOL(blkdev_issue_write_same);
219 * blkdev_issue_zeroout - generate number of zero filed write bios
220 * @bdev: blockdev to issue
221 * @sector: start sector
222 * @nr_sects: number of sectors to write
223 * @gfp_mask: memory allocation flags (for bio_alloc)
226 * Generate and issue number of bios with zerofiled pages.
229 static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
230 sector_t nr_sects, gfp_t gfp_mask)
236 DECLARE_COMPLETION_ONSTACK(wait);
238 atomic_set(&bb.done, 1);
239 bb.flags = 1 << BIO_UPTODATE;
243 while (nr_sects != 0) {
244 bio = bio_alloc(gfp_mask,
245 min(nr_sects, (sector_t)BIO_MAX_PAGES));
251 bio->bi_iter.bi_sector = sector;
253 bio->bi_end_io = bio_batch_end_io;
254 bio->bi_private = &bb;
256 while (nr_sects != 0) {
257 sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
258 ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
259 nr_sects -= ret >> 9;
265 atomic_inc(&bb.done);
266 submit_bio(WRITE, bio);
269 /* Wait for bios in-flight */
270 if (!atomic_dec_and_test(&bb.done))
271 wait_for_completion_io(&wait);
273 if (!test_bit(BIO_UPTODATE, &bb.flags))
274 /* One of bios in the batch was completed with error.*/
281 * blkdev_issue_zeroout - zero-fill a block range
282 * @bdev: blockdev to write
283 * @sector: start sector
284 * @nr_sects: number of sectors to write
285 * @gfp_mask: memory allocation flags (for bio_alloc)
286 * @discard: whether to discard the block range
289 * Zero-fill a block range. If the discard flag is set and the block
290 * device guarantees that subsequent READ operations to the block range
291 * in question will return zeroes, the blocks will be discarded. Should
292 * the discard request fail, if the discard flag is not set, or if
293 * discard_zeroes_data is not supported, this function will resort to
294 * zeroing the blocks manually, thus provisioning (allocating,
295 * anchoring) them. If the block device supports the WRITE SAME command
296 * blkdev_issue_zeroout() will use it to optimize the process of
297 * clearing the block range. Otherwise the zeroing will be performed
298 * using regular WRITE calls.
301 int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
302 sector_t nr_sects, gfp_t gfp_mask, bool discard)
304 struct request_queue *q = bdev_get_queue(bdev);
306 if (discard && blk_queue_discard(q) && q->limits.discard_zeroes_data &&
307 blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, 0) == 0)
310 if (bdev_write_same(bdev) &&
311 blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
315 return __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask);
317 EXPORT_SYMBOL(blkdev_issue_zeroout);