Lockdep complains about lock inversion between ioc->lock and bfqd->lock:
bfqd -> ioc:
put_io_context+0x33/0x90 -> ioc->lock grabbed
blk_mq_free_request+0x51/0x140
blk_put_request+0xe/0x10
blk_attempt_req_merge+0x1d/0x30
elv_attempt_insert_merge+0x56/0xa0
blk_mq_sched_try_insert_merge+0x4b/0x60
bfq_insert_requests+0x9e/0x18c0 -> bfqd->lock grabbed
blk_mq_sched_insert_requests+0xd6/0x2b0
blk_mq_flush_plug_list+0x154/0x280
blk_finish_plug+0x40/0x60
ext4_writepages+0x696/0x1320
do_writepages+0x1c/0x80
__filemap_fdatawrite_range+0xd7/0x120
sync_file_range+0xac/0xf0
ioc->bfqd:
bfq_exit_icq+0xa3/0xe0 -> bfqd->lock grabbed
put_io_context_active+0x78/0xb0 -> ioc->lock grabbed
exit_io_context+0x48/0x50
do_exit+0x7e9/0xdd0
do_group_exit+0x54/0xc0
To avoid this inversion we change blk_mq_sched_try_insert_merge() to not
free the merged request but rather leave that upto the caller similarly
to blk_mq_sched_try_merge(). And in bfq_insert_requests() we make sure
to free all the merged requests after dropping bfqd->lock.
Fixes: aee69d78dec0 ("block, bfq: introduce the BFQ-v0 I/O scheduler as an extra scheduler")
Reviewed-by: Ming Lei <ming.lei@redhat.com>
Acked-by: Paolo Valente <paolo.valente@linaro.org>
Signed-off-by: Jan Kara <jack@suse.cz>
Link: https://lore.kernel.org/r/20210623093634.27879-3-jack@suse.cz
Signed-off-by: Jens Axboe <axboe@kernel.dk>
ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
+ spin_unlock_irq(&bfqd->lock);
if (free)
blk_mq_free_request(free);
- spin_unlock_irq(&bfqd->lock);
return ret;
}
struct bfq_queue *bfqq;
bool idle_timer_disabled = false;
unsigned int cmd_flags;
+ LIST_HEAD(free);
#ifdef CONFIG_BFQ_GROUP_IOSCHED
if (!cgroup_subsys_on_dfl(io_cgrp_subsys) && rq->bio)
bfqg_stats_update_legacy_io(q, rq);
#endif
spin_lock_irq(&bfqd->lock);
- if (blk_mq_sched_try_insert_merge(q, rq)) {
+ if (blk_mq_sched_try_insert_merge(q, rq, &free)) {
spin_unlock_irq(&bfqd->lock);
+ blk_mq_free_requests(&free);
return;
}
return NULL;
}
-int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
- struct request *next)
+/*
+ * Try to merge 'next' into 'rq'. Return true if the merge happened, false
+ * otherwise. The caller is responsible for freeing 'next' if the merge
+ * happened.
+ */
+bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
+ struct request *next)
{
- struct request *free;
-
- free = attempt_merge(q, rq, next);
- if (free) {
- blk_put_request(free);
- return 1;
- }
-
- return 0;
+ return attempt_merge(q, rq, next);
}
bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
return ret;
}
-bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq)
+bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
+ struct list_head *free)
{
- return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq);
+ return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq, free);
}
EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge);
unsigned int nr_segs, struct request **merged_request);
bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
unsigned int nr_segs);
-bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
+bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
+ struct list_head *free);
void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx);
void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
return NULL;
}
+/* Free all requests on the list */
+static inline void blk_mq_free_requests(struct list_head *list)
+{
+ while (!list_empty(list)) {
+ struct request *rq = list_entry_rq(list->next);
+
+ list_del_init(&rq->queuelist);
+ blk_mq_free_request(rq);
+ }
+}
+
/*
* For shared tag users, we track the number of currently active users
* and attempt to provide a fair share of the tag depth for each of them.
void __blk_queue_split(struct bio **bio, unsigned int *nr_segs);
int ll_back_merge_fn(struct request *req, struct bio *bio,
unsigned int nr_segs);
-int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
+bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
struct request *next);
unsigned int blk_recalc_rq_segments(struct request *rq);
void blk_rq_set_mixed_merge(struct request *rq);
* we can append 'rq' to an existing request, so we can throw 'rq' away
* afterwards.
*
- * Returns true if we merged, false otherwise
+ * Returns true if we merged, false otherwise. 'free' will contain all
+ * requests that need to be freed.
*/
-bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq)
+bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq,
+ struct list_head *free)
{
struct request *__rq;
bool ret;
/*
* First try one-hit cache.
*/
- if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq))
+ if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq)) {
+ list_add(&rq->queuelist, free);
return true;
+ }
if (blk_queue_noxmerges(q))
return false;
if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
break;
+ list_add(&rq->queuelist, free);
/* The merged request could be merged with others, try again */
ret = true;
rq = __rq;
struct dd_per_prio *per_prio;
enum dd_prio prio;
struct dd_blkcg *blkcg;
+ LIST_HEAD(free);
lockdep_assert_held(&dd->lock);
WARN_ON_ONCE(rq->elv.priv[0]);
rq->elv.priv[0] = blkcg;
- if (blk_mq_sched_try_insert_merge(q, rq))
+ if (blk_mq_sched_try_insert_merge(q, rq, &free)) {
+ blk_mq_free_requests(&free);
return;
+ }
trace_block_rq_insert(rq);
struct request *);
extern void elv_merged_request(struct request_queue *, struct request *,
enum elv_merge);
-extern bool elv_attempt_insert_merge(struct request_queue *, struct request *);
+extern bool elv_attempt_insert_merge(struct request_queue *, struct request *,
+ struct list_head *);
extern struct request *elv_former_request(struct request_queue *, struct request *);
extern struct request *elv_latter_request(struct request_queue *, struct request *);
void elevator_init_mq(struct request_queue *q);