Merge tag 'nfs-for-5.19-2' of git://git.linux-nfs.org/projects/anna/linux-nfs
[platform/kernel/linux-starfive.git] / block / blk-mq.c
index e9bf950..33145ba 100644 (file)
@@ -579,6 +579,8 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
        if (!blk_mq_hw_queue_mapped(data.hctx))
                goto out_queue_exit;
        cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask);
+       if (cpu >= nr_cpu_ids)
+               goto out_queue_exit;
        data.ctx = __blk_mq_get_ctx(q, cpu);
 
        if (!q->elevator)
@@ -2141,20 +2143,6 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
 EXPORT_SYMBOL(blk_mq_run_hw_queue);
 
 /*
- * Is the request queue handled by an IO scheduler that does not respect
- * hardware queues when dispatching?
- */
-static bool blk_mq_has_sqsched(struct request_queue *q)
-{
-       struct elevator_queue *e = q->elevator;
-
-       if (e && e->type->ops.dispatch_request &&
-           !(e->type->elevator_features & ELEVATOR_F_MQ_AWARE))
-               return true;
-       return false;
-}
-
-/*
  * Return prefered queue to dispatch from (if any) for non-mq aware IO
  * scheduler.
  */
@@ -2186,7 +2174,7 @@ void blk_mq_run_hw_queues(struct request_queue *q, bool async)
        unsigned long i;
 
        sq_hctx = NULL;
-       if (blk_mq_has_sqsched(q))
+       if (blk_queue_sq_sched(q))
                sq_hctx = blk_mq_get_sq_hctx(q);
        queue_for_each_hw_ctx(q, hctx, i) {
                if (blk_mq_hctx_stopped(hctx))
@@ -2214,7 +2202,7 @@ void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs)
        unsigned long i;
 
        sq_hctx = NULL;
-       if (blk_mq_has_sqsched(q))
+       if (blk_queue_sq_sched(q))
                sq_hctx = blk_mq_get_sq_hctx(q);
        queue_for_each_hw_ctx(q, hctx, i) {
                if (blk_mq_hctx_stopped(hctx))
@@ -3443,8 +3431,9 @@ static void blk_mq_exit_hctx(struct request_queue *q,
        if (blk_mq_hw_queue_mapped(hctx))
                blk_mq_tag_idle(hctx);
 
-       blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx],
-                       set->queue_depth, flush_rq);
+       if (blk_queue_init_done(q))
+               blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx],
+                               set->queue_depth, flush_rq);
        if (set->ops->exit_request)
                set->ops->exit_request(set, flush_rq, hctx_idx);
 
@@ -4438,12 +4427,14 @@ static bool blk_mq_elv_switch_none(struct list_head *head,
        if (!qe)
                return false;
 
+       /* q->elevator needs protection from ->sysfs_lock */
+       mutex_lock(&q->sysfs_lock);
+
        INIT_LIST_HEAD(&qe->node);
        qe->q = q;
        qe->type = q->elevator->type;
        list_add(&qe->node, head);
 
-       mutex_lock(&q->sysfs_lock);
        /*
         * After elevator_switch_mq, the previous elevator_queue will be
         * released by elevator_release. The reference of the io scheduler