1 // SPDX-License-Identifier: GPL-2.0
3 * Functions related to generic helpers functions
5 #include <linux/kernel.h>
6 #include <linux/module.h>
8 #include <linux/blkdev.h>
9 #include <linux/scatterlist.h>
13 static struct bio *next_bio(struct bio *bio, unsigned int nr_pages,
16 struct bio *new = bio_alloc(gfp, nr_pages);
26 int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
27 sector_t nr_sects, gfp_t gfp_mask, int flags,
30 struct request_queue *q = bdev_get_queue(bdev);
31 struct bio *bio = *biop;
38 if (bdev_read_only(bdev))
41 if (flags & BLKDEV_DISCARD_SECURE) {
42 if (!blk_queue_secure_erase(q))
44 op = REQ_OP_SECURE_ERASE;
46 if (!blk_queue_discard(q))
51 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
52 if ((sector | nr_sects) & bs_mask)
56 unsigned int req_sects = nr_sects;
61 if (req_sects > UINT_MAX >> 9)
62 req_sects = UINT_MAX >> 9;
64 end_sect = sector + req_sects;
66 bio = next_bio(bio, 0, gfp_mask);
67 bio->bi_iter.bi_sector = sector;
68 bio_set_dev(bio, bdev);
69 bio_set_op_attrs(bio, op, 0);
71 bio->bi_iter.bi_size = req_sects << 9;
72 nr_sects -= req_sects;
76 * We can loop for a long time in here, if someone does
77 * full device discards (like mkfs). Be nice and allow
78 * us to schedule out to avoid softlocking if preempt
95 EXPORT_SYMBOL(__blkdev_issue_discard);
98 * blkdev_issue_discard - queue a discard
99 * @bdev: blockdev to issue discard for
100 * @sector: start sector
101 * @nr_sects: number of sectors to discard
102 * @gfp_mask: memory allocation flags (for bio_alloc)
103 * @flags: BLKDEV_DISCARD_* flags to control behaviour
106 * Issue a discard request for the sectors in question.
108 int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
109 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
111 struct bio *bio = NULL;
112 struct blk_plug plug;
115 blk_start_plug(&plug);
116 ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags,
119 ret = submit_bio_wait(bio);
120 if (ret == -EOPNOTSUPP)
124 blk_finish_plug(&plug);
128 EXPORT_SYMBOL(blkdev_issue_discard);
131 * __blkdev_issue_write_same - generate number of bios with same page
132 * @bdev: target blockdev
133 * @sector: start sector
134 * @nr_sects: number of sectors to write
135 * @gfp_mask: memory allocation flags (for bio_alloc)
136 * @page: page containing data to write
137 * @biop: pointer to anchor bio
140 * Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page.
142 static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
143 sector_t nr_sects, gfp_t gfp_mask, struct page *page,
146 struct request_queue *q = bdev_get_queue(bdev);
147 unsigned int max_write_same_sectors;
148 struct bio *bio = *biop;
154 if (bdev_read_only(bdev))
157 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
158 if ((sector | nr_sects) & bs_mask)
161 if (!bdev_write_same(bdev))
164 /* Ensure that max_write_same_sectors doesn't overflow bi_size */
165 max_write_same_sectors = UINT_MAX >> 9;
168 bio = next_bio(bio, 1, gfp_mask);
169 bio->bi_iter.bi_sector = sector;
170 bio_set_dev(bio, bdev);
172 bio->bi_io_vec->bv_page = page;
173 bio->bi_io_vec->bv_offset = 0;
174 bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
175 bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0);
177 if (nr_sects > max_write_same_sectors) {
178 bio->bi_iter.bi_size = max_write_same_sectors << 9;
179 nr_sects -= max_write_same_sectors;
180 sector += max_write_same_sectors;
182 bio->bi_iter.bi_size = nr_sects << 9;
193 * blkdev_issue_write_same - queue a write same operation
194 * @bdev: target blockdev
195 * @sector: start sector
196 * @nr_sects: number of sectors to write
197 * @gfp_mask: memory allocation flags (for bio_alloc)
198 * @page: page containing data
201 * Issue a write same request for the sectors in question.
203 int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
204 sector_t nr_sects, gfp_t gfp_mask,
207 struct bio *bio = NULL;
208 struct blk_plug plug;
211 blk_start_plug(&plug);
212 ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page,
214 if (ret == 0 && bio) {
215 ret = submit_bio_wait(bio);
218 blk_finish_plug(&plug);
221 EXPORT_SYMBOL(blkdev_issue_write_same);
223 static int __blkdev_issue_write_zeroes(struct block_device *bdev,
224 sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
225 struct bio **biop, unsigned flags)
227 struct bio *bio = *biop;
228 unsigned int max_write_zeroes_sectors;
229 struct request_queue *q = bdev_get_queue(bdev);
234 if (bdev_read_only(bdev))
237 /* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */
238 max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev);
240 if (max_write_zeroes_sectors == 0)
244 bio = next_bio(bio, 0, gfp_mask);
245 bio->bi_iter.bi_sector = sector;
246 bio_set_dev(bio, bdev);
247 bio->bi_opf = REQ_OP_WRITE_ZEROES;
248 if (flags & BLKDEV_ZERO_NOUNMAP)
249 bio->bi_opf |= REQ_NOUNMAP;
251 if (nr_sects > max_write_zeroes_sectors) {
252 bio->bi_iter.bi_size = max_write_zeroes_sectors << 9;
253 nr_sects -= max_write_zeroes_sectors;
254 sector += max_write_zeroes_sectors;
256 bio->bi_iter.bi_size = nr_sects << 9;
267 * Convert a number of 512B sectors to a number of pages.
268 * The result is limited to a number of pages that can fit into a BIO.
269 * Also make sure that the result is always at least 1 (page) for the cases
270 * where nr_sects is lower than the number of sectors in a page.
272 static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects)
274 sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512);
276 return min(pages, (sector_t)BIO_MAX_PAGES);
279 static int __blkdev_issue_zero_pages(struct block_device *bdev,
280 sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
283 struct request_queue *q = bdev_get_queue(bdev);
284 struct bio *bio = *biop;
291 if (bdev_read_only(bdev))
294 while (nr_sects != 0) {
295 bio = next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects),
297 bio->bi_iter.bi_sector = sector;
298 bio_set_dev(bio, bdev);
299 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
301 while (nr_sects != 0) {
302 sz = min((sector_t) PAGE_SIZE, nr_sects << 9);
303 bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0);
304 nr_sects -= bi_size >> 9;
305 sector += bi_size >> 9;
317 * __blkdev_issue_zeroout - generate number of zero filed write bios
318 * @bdev: blockdev to issue
319 * @sector: start sector
320 * @nr_sects: number of sectors to write
321 * @gfp_mask: memory allocation flags (for bio_alloc)
322 * @biop: pointer to anchor bio
323 * @flags: controls detailed behavior
326 * Zero-fill a block range, either using hardware offload or by explicitly
327 * writing zeroes to the device.
329 * If a device is using logical block provisioning, the underlying space will
330 * not be released if %flags contains BLKDEV_ZERO_NOUNMAP.
332 * If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return
333 * -EOPNOTSUPP if no explicit hardware offload for zeroing is provided.
335 int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
336 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
342 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
343 if ((sector | nr_sects) & bs_mask)
346 ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
348 if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK))
351 return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask,
354 EXPORT_SYMBOL(__blkdev_issue_zeroout);
357 * blkdev_issue_zeroout - zero-fill a block range
358 * @bdev: blockdev to write
359 * @sector: start sector
360 * @nr_sects: number of sectors to write
361 * @gfp_mask: memory allocation flags (for bio_alloc)
362 * @flags: controls detailed behavior
365 * Zero-fill a block range, either using hardware offload or by explicitly
366 * writing zeroes to the device. See __blkdev_issue_zeroout() for the
367 * valid values for %flags.
369 int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
370 sector_t nr_sects, gfp_t gfp_mask, unsigned flags)
375 struct blk_plug plug;
376 bool try_write_zeroes = !!bdev_write_zeroes_sectors(bdev);
378 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
379 if ((sector | nr_sects) & bs_mask)
384 blk_start_plug(&plug);
385 if (try_write_zeroes) {
386 ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects,
387 gfp_mask, &bio, flags);
388 } else if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
389 ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects,
392 /* No zeroing offload support */
395 if (ret == 0 && bio) {
396 ret = submit_bio_wait(bio);
399 blk_finish_plug(&plug);
400 if (ret && try_write_zeroes) {
401 if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
402 try_write_zeroes = false;
405 if (!bdev_write_zeroes_sectors(bdev)) {
407 * Zeroing offload support was indicated, but the
408 * device reported ILLEGAL REQUEST (for some devices
409 * there is no non-destructive way to verify whether
410 * WRITE ZEROES is actually supported).
418 EXPORT_SYMBOL(blkdev_issue_zeroout);