1 /* SPDX-License-Identifier: GPL-2.0 */
5 #include <linux/blk-crypto.h>
6 #include <linux/memblock.h> /* for max_pfn/max_low_pfn */
8 #include "blk-crypto-internal.h"
12 /* Max future timer expiry for timeouts */
13 #define BLK_MAX_TIMEOUT (5 * HZ)
15 extern struct dentry *blk_debugfs_root;
17 struct blk_flush_queue {
18 unsigned int flush_pending_idx:1;
19 unsigned int flush_running_idx:1;
20 blk_status_t rq_status;
21 unsigned long flush_pending_since;
22 struct list_head flush_queue[2];
23 struct list_head flush_data_in_flight;
24 struct request *flush_rq;
26 spinlock_t mq_flush_lock;
29 extern struct kmem_cache *blk_requestq_cachep;
30 extern struct kmem_cache *blk_requestq_srcu_cachep;
31 extern struct kobj_type blk_queue_ktype;
32 extern struct ida blk_queue_ida;
34 bool is_flush_rq(struct request *req);
36 struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
38 void blk_free_flush_queue(struct blk_flush_queue *q);
40 void blk_freeze_queue(struct request_queue *q);
41 void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
42 void blk_queue_start_drain(struct request_queue *q);
43 int __bio_queue_enter(struct request_queue *q, struct bio *bio);
44 void submit_bio_noacct_nocheck(struct bio *bio);
46 static inline bool blk_try_enter_queue(struct request_queue *q, bool pm)
49 if (!percpu_ref_tryget_live_rcu(&q->q_usage_counter))
53 * The code that increments the pm_only counter must ensure that the
54 * counter is globally visible before the queue is unfrozen.
56 if (blk_queue_pm_only(q) &&
57 (!pm || queue_rpm_status(q) == RPM_SUSPENDED))
70 static inline int bio_queue_enter(struct bio *bio)
72 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
74 if (blk_try_enter_queue(q, false))
76 return __bio_queue_enter(q, bio);
79 #define BIO_INLINE_VECS 4
80 struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
82 void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs);
84 static inline bool biovec_phys_mergeable(struct request_queue *q,
85 struct bio_vec *vec1, struct bio_vec *vec2)
87 unsigned long mask = queue_segment_boundary(q);
88 phys_addr_t addr1 = page_to_phys(vec1->bv_page) + vec1->bv_offset;
89 phys_addr_t addr2 = page_to_phys(vec2->bv_page) + vec2->bv_offset;
91 if (addr1 + vec1->bv_len != addr2)
93 if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page))
95 if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask))
100 static inline bool __bvec_gap_to_prev(struct request_queue *q,
101 struct bio_vec *bprv, unsigned int offset)
103 return (offset & queue_virt_boundary(q)) ||
104 ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
108 * Check if adding a bio_vec after bprv with offset would create a gap in
109 * the SG list. Most drivers don't care about this, but some do.
111 static inline bool bvec_gap_to_prev(struct request_queue *q,
112 struct bio_vec *bprv, unsigned int offset)
114 if (!queue_virt_boundary(q))
116 return __bvec_gap_to_prev(q, bprv, offset);
119 static inline bool rq_mergeable(struct request *rq)
121 if (blk_rq_is_passthrough(rq))
124 if (req_op(rq) == REQ_OP_FLUSH)
127 if (req_op(rq) == REQ_OP_WRITE_ZEROES)
130 if (req_op(rq) == REQ_OP_ZONE_APPEND)
133 if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
135 if (rq->rq_flags & RQF_NOMERGE_FLAGS)
142 * There are two different ways to handle DISCARD merges:
143 * 1) If max_discard_segments > 1, the driver treats every bio as a range and
144 * send the bios to controller together. The ranges don't need to be
146 * 2) Otherwise, the request will be normal read/write requests. The ranges
147 * need to be contiguous.
149 static inline bool blk_discard_mergable(struct request *req)
151 if (req_op(req) == REQ_OP_DISCARD &&
152 queue_max_discard_segments(req->q) > 1)
157 static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
160 if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
161 return min(q->limits.max_discard_sectors,
162 UINT_MAX >> SECTOR_SHIFT);
164 if (unlikely(op == REQ_OP_WRITE_ZEROES))
165 return q->limits.max_write_zeroes_sectors;
167 return q->limits.max_sectors;
170 #ifdef CONFIG_BLK_DEV_INTEGRITY
171 void blk_flush_integrity(void);
172 bool __bio_integrity_endio(struct bio *);
173 void bio_integrity_free(struct bio *bio);
174 static inline bool bio_integrity_endio(struct bio *bio)
176 if (bio_integrity(bio))
177 return __bio_integrity_endio(bio);
181 bool blk_integrity_merge_rq(struct request_queue *, struct request *,
183 bool blk_integrity_merge_bio(struct request_queue *, struct request *,
186 static inline bool integrity_req_gap_back_merge(struct request *req,
189 struct bio_integrity_payload *bip = bio_integrity(req->bio);
190 struct bio_integrity_payload *bip_next = bio_integrity(next);
192 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
193 bip_next->bip_vec[0].bv_offset);
196 static inline bool integrity_req_gap_front_merge(struct request *req,
199 struct bio_integrity_payload *bip = bio_integrity(bio);
200 struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
202 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
203 bip_next->bip_vec[0].bv_offset);
206 int blk_integrity_add(struct gendisk *disk);
207 void blk_integrity_del(struct gendisk *);
208 #else /* CONFIG_BLK_DEV_INTEGRITY */
209 static inline bool blk_integrity_merge_rq(struct request_queue *rq,
210 struct request *r1, struct request *r2)
214 static inline bool blk_integrity_merge_bio(struct request_queue *rq,
215 struct request *r, struct bio *b)
219 static inline bool integrity_req_gap_back_merge(struct request *req,
224 static inline bool integrity_req_gap_front_merge(struct request *req,
230 static inline void blk_flush_integrity(void)
233 static inline bool bio_integrity_endio(struct bio *bio)
237 static inline void bio_integrity_free(struct bio *bio)
240 static inline int blk_integrity_add(struct gendisk *disk)
244 static inline void blk_integrity_del(struct gendisk *disk)
247 #endif /* CONFIG_BLK_DEV_INTEGRITY */
249 unsigned long blk_rq_timeout(unsigned long timeout);
250 void blk_add_timer(struct request *req);
251 const char *blk_status_to_str(blk_status_t status);
253 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
254 unsigned int nr_segs);
255 bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
256 struct bio *bio, unsigned int nr_segs);
261 #define BLK_MAX_REQUEST_COUNT 32
262 #define BLK_PLUG_FLUSH_SIZE (128 * 1024)
265 * Internal elevator interface
267 #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
269 void blk_insert_flush(struct request *rq);
271 int elevator_switch_mq(struct request_queue *q,
272 struct elevator_type *new_e);
273 void elevator_exit(struct request_queue *q);
274 int elv_register_queue(struct request_queue *q, bool uevent);
275 void elv_unregister_queue(struct request_queue *q);
277 ssize_t part_size_show(struct device *dev, struct device_attribute *attr,
279 ssize_t part_stat_show(struct device *dev, struct device_attribute *attr,
281 ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
283 ssize_t part_fail_show(struct device *dev, struct device_attribute *attr,
285 ssize_t part_fail_store(struct device *dev, struct device_attribute *attr,
286 const char *buf, size_t count);
287 ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
288 ssize_t part_timeout_store(struct device *, struct device_attribute *,
289 const char *, size_t);
291 static inline bool blk_may_split(struct request_queue *q, struct bio *bio)
293 switch (bio_op(bio)) {
295 case REQ_OP_SECURE_ERASE:
296 case REQ_OP_WRITE_ZEROES:
297 return true; /* non-trivial splitting decisions */
303 * All drivers must accept single-segments bios that are <= PAGE_SIZE.
304 * This is a quick and dirty check that relies on the fact that
305 * bi_io_vec[0] is always valid if a bio has data. The check might
306 * lead to occasional false negatives when bios are cloned, but compared
307 * to the performance impact of cloned bios themselves the loop below
308 * doesn't matter anyway.
310 return q->limits.chunk_sectors || bio->bi_vcnt != 1 ||
311 bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > PAGE_SIZE;
314 void __blk_queue_split(struct request_queue *q, struct bio **bio,
315 unsigned int *nr_segs);
316 int ll_back_merge_fn(struct request *req, struct bio *bio,
317 unsigned int nr_segs);
318 bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
319 struct request *next);
320 unsigned int blk_recalc_rq_segments(struct request *rq);
321 void blk_rq_set_mixed_merge(struct request *rq);
322 bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
323 enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
325 int blk_dev_init(void);
328 * Contribute to IO statistics IFF:
330 * a) it's attached to a gendisk, and
331 * b) the queue had IO stats enabled when this request was started
333 static inline bool blk_do_io_stat(struct request *rq)
335 return (rq->rq_flags & RQF_IO_STAT) && !blk_rq_is_passthrough(rq);
338 void update_io_ticks(struct block_device *part, unsigned long now, bool end);
340 static inline void req_set_nomerge(struct request_queue *q, struct request *req)
342 req->cmd_flags |= REQ_NOMERGE;
343 if (req == q->last_merge)
344 q->last_merge = NULL;
348 * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size
349 * is defined as 'unsigned int', meantime it has to aligned to with logical
350 * block size which is the minimum accepted unit by hardware.
352 static inline unsigned int bio_allowed_max_sectors(struct request_queue *q)
354 return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9;
358 * Internal io_context interface
360 struct io_cq *ioc_find_get_icq(struct request_queue *q);
361 struct io_cq *ioc_lookup_icq(struct request_queue *q);
362 #ifdef CONFIG_BLK_ICQ
363 void ioc_clear_queue(struct request_queue *q);
365 static inline void ioc_clear_queue(struct request_queue *q)
368 #endif /* CONFIG_BLK_ICQ */
370 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
371 extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page);
372 extern ssize_t blk_throtl_sample_time_store(struct request_queue *q,
373 const char *page, size_t count);
374 extern void blk_throtl_bio_endio(struct bio *bio);
375 extern void blk_throtl_stat_add(struct request *rq, u64 time);
377 static inline void blk_throtl_bio_endio(struct bio *bio) { }
378 static inline void blk_throtl_stat_add(struct request *rq, u64 time) { }
381 void __blk_queue_bounce(struct request_queue *q, struct bio **bio);
383 static inline bool blk_queue_may_bounce(struct request_queue *q)
385 return IS_ENABLED(CONFIG_BOUNCE) &&
386 q->limits.bounce == BLK_BOUNCE_HIGH &&
387 max_low_pfn >= max_pfn;
390 static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
392 if (unlikely(blk_queue_may_bounce(q) && bio_has_data(*bio)))
393 __blk_queue_bounce(q, bio);
396 #ifdef CONFIG_BLK_CGROUP_IOLATENCY
397 extern int blk_iolatency_init(struct request_queue *q);
399 static inline int blk_iolatency_init(struct request_queue *q) { return 0; }
402 #ifdef CONFIG_BLK_DEV_ZONED
403 void disk_free_zone_bitmaps(struct gendisk *disk);
404 void disk_clear_zone_settings(struct gendisk *disk);
406 static inline void disk_free_zone_bitmaps(struct gendisk *disk) {}
407 static inline void disk_clear_zone_settings(struct gendisk *disk) {}
410 int blk_alloc_ext_minor(void);
411 void blk_free_ext_minor(unsigned int minor);
412 #define ADDPART_FLAG_NONE 0
413 #define ADDPART_FLAG_RAID 1
414 #define ADDPART_FLAG_WHOLEDISK 2
415 int bdev_add_partition(struct gendisk *disk, int partno, sector_t start,
417 int bdev_del_partition(struct gendisk *disk, int partno);
418 int bdev_resize_partition(struct gendisk *disk, int partno, sector_t start,
420 void blk_drop_partitions(struct gendisk *disk);
422 struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
423 struct lock_class_key *lkclass);
425 int bio_add_hw_page(struct request_queue *q, struct bio *bio,
426 struct page *page, unsigned int len, unsigned int offset,
427 unsigned int max_sectors, bool *same_page);
429 static inline struct kmem_cache *blk_get_queue_kmem_cache(bool srcu)
432 return blk_requestq_srcu_cachep;
433 return blk_requestq_cachep;
435 struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu);
437 int disk_scan_partitions(struct gendisk *disk, fmode_t mode);
439 int disk_alloc_events(struct gendisk *disk);
440 void disk_add_events(struct gendisk *disk);
441 void disk_del_events(struct gendisk *disk);
442 void disk_release_events(struct gendisk *disk);
443 void disk_block_events(struct gendisk *disk);
444 void disk_unblock_events(struct gendisk *disk);
445 void disk_flush_events(struct gendisk *disk, unsigned int mask);
446 extern struct device_attribute dev_attr_events;
447 extern struct device_attribute dev_attr_events_async;
448 extern struct device_attribute dev_attr_events_poll_msecs;
450 extern struct attribute_group blk_trace_attr_group;
452 long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
453 long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
455 extern const struct address_space_operations def_blk_aops;
457 int disk_register_independent_access_ranges(struct gendisk *disk);
458 void disk_unregister_independent_access_ranges(struct gendisk *disk);
460 #ifdef CONFIG_FAIL_MAKE_REQUEST
461 bool should_fail_request(struct block_device *part, unsigned int bytes);
462 #else /* CONFIG_FAIL_MAKE_REQUEST */
463 static inline bool should_fail_request(struct block_device *part,
468 #endif /* CONFIG_FAIL_MAKE_REQUEST */
471 * Optimized request reference counting. Ideally we'd make timeouts be more
472 * clever, as that's the only reason we need references at all... But until
473 * this happens, this is faster than using refcount_t. Also see:
475 * abc54d634334 ("io_uring: switch to atomic_t for io_kiocb reference count")
477 #define req_ref_zero_or_close_to_overflow(req) \
478 ((unsigned int) atomic_read(&(req->ref)) + 127u <= 127u)
480 static inline bool req_ref_inc_not_zero(struct request *req)
482 return atomic_inc_not_zero(&req->ref);
485 static inline bool req_ref_put_and_test(struct request *req)
487 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
488 return atomic_dec_and_test(&req->ref);
491 static inline void req_ref_set(struct request *req, int value)
493 atomic_set(&req->ref, value);
496 static inline int req_ref_read(struct request *req)
498 return atomic_read(&req->ref);
501 #endif /* BLK_INTERNAL_H */