wifi: brcmfmac: Adjust n_channels usage for __counted_by
[platform/kernel/linux-starfive.git] / block / blk-mq.c
index 953f083..257b0ad 100644 (file)
@@ -43,6 +43,7 @@
 #include "blk-ioprio.h"
 
 static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
+static DEFINE_PER_CPU(call_single_data_t, blk_cpu_csd);
 
 static void blk_mq_insert_request(struct request *rq, blk_insert_t flags);
 static void blk_mq_request_bypass_insert(struct request *rq,
@@ -1174,15 +1175,11 @@ static inline bool blk_mq_complete_need_ipi(struct request *rq)
 
 static void blk_mq_complete_send_ipi(struct request *rq)
 {
-       struct llist_head *list;
        unsigned int cpu;
 
        cpu = rq->mq_ctx->cpu;
-       list = &per_cpu(blk_cpu_done, cpu);
-       if (llist_add(&rq->ipi_list, list)) {
-               INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq);
-               smp_call_function_single_async(cpu, &rq->csd);
-       }
+       if (llist_add(&rq->ipi_list, &per_cpu(blk_cpu_done, cpu)))
+               smp_call_function_single_async(cpu, &per_cpu(blk_cpu_csd, cpu));
 }
 
 static void blk_mq_raise_softirq(struct request *rq)
@@ -1343,7 +1340,7 @@ void blk_execute_rq_nowait(struct request *rq, bool at_head)
        }
 
        blk_mq_insert_request(rq, at_head ? BLK_MQ_INSERT_AT_HEAD : 0);
-       blk_mq_run_hw_queue(hctx, false);
+       blk_mq_run_hw_queue(hctx, hctx->flags & BLK_MQ_F_BLOCKING);
 }
 EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
 
@@ -1514,14 +1511,26 @@ void blk_mq_delay_kick_requeue_list(struct request_queue *q,
 }
 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
 
+static bool blk_is_flush_data_rq(struct request *rq)
+{
+       return (rq->rq_flags & RQF_FLUSH_SEQ) && !is_flush_rq(rq);
+}
+
 static bool blk_mq_rq_inflight(struct request *rq, void *priv)
 {
        /*
         * If we find a request that isn't idle we know the queue is busy
         * as it's checked in the iter.
         * Return false to stop the iteration.
+        *
+        * In case of queue quiesce, if one flush data request is completed,
+        * don't count it as inflight given the flush sequence is suspended,
+        * and the original flush data request is invisible to driver, just
+        * like other pending requests because of quiesce
         */
-       if (blk_mq_request_started(rq)) {
+       if (blk_mq_request_started(rq) && !(blk_queue_quiesced(rq->q) &&
+                               blk_is_flush_data_rq(rq) &&
+                               blk_mq_request_completed(rq))) {
                bool *busy = priv;
 
                *busy = true;
@@ -1862,6 +1871,22 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
        __add_wait_queue(wq, wait);
 
        /*
+        * Add one explicit barrier since blk_mq_get_driver_tag() may
+        * not imply barrier in case of failure.
+        *
+        * Order adding us to wait queue and allocating driver tag.
+        *
+        * The pair is the one implied in sbitmap_queue_wake_up() which
+        * orders clearing sbitmap tag bits and waitqueue_active() in
+        * __sbitmap_queue_wake_up(), since waitqueue_active() is lockless
+        *
+        * Otherwise, re-order of adding wait queue and getting driver tag
+        * may cause __sbitmap_queue_wake_up() to wake up nothing because
+        * the waitqueue_active() may not observe us in wait queue.
+        */
+       smp_mb();
+
+       /*
         * It's possible that a tag was freed in the window between the
         * allocation failure and adding the hardware queue to the wait
         * queue.
@@ -2242,6 +2267,8 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
         */
        WARN_ON_ONCE(!async && in_interrupt());
 
+       might_sleep_if(!async && hctx->flags & BLK_MQ_F_BLOCKING);
+
        /*
         * When queue is quiesced, we may be switching io scheduler, or
         * updating nr_hw_queues, or other things, and we can't run queue
@@ -2257,8 +2284,7 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
        if (!need_run)
                return;
 
-       if (async || (hctx->flags & BLK_MQ_F_BLOCKING) ||
-           !cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)) {
+       if (async || !cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)) {
                blk_mq_delay_run_hw_queue(hctx, 0);
                return;
        }
@@ -2393,7 +2419,7 @@ void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
 {
        clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
 
-       blk_mq_run_hw_queue(hctx, false);
+       blk_mq_run_hw_queue(hctx, hctx->flags & BLK_MQ_F_BLOCKING);
 }
 EXPORT_SYMBOL(blk_mq_start_hw_queue);
 
@@ -2423,7 +2449,8 @@ void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
        unsigned long i;
 
        queue_for_each_hw_ctx(q, hctx, i)
-               blk_mq_start_stopped_hw_queue(hctx, async);
+               blk_mq_start_stopped_hw_queue(hctx, async ||
+                                       (hctx->flags & BLK_MQ_F_BLOCKING));
 }
 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
 
@@ -2481,6 +2508,8 @@ static void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx,
        list_for_each_entry(rq, list, queuelist) {
                BUG_ON(rq->mq_ctx != ctx);
                trace_block_rq_insert(rq);
+               if (rq->cmd_flags & REQ_NOWAIT)
+                       run_queue_async = true;
        }
 
        spin_lock(&ctx->lock);
@@ -2641,7 +2670,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
 
        if ((rq->rq_flags & RQF_USE_SCHED) || !blk_mq_get_budget_and_tag(rq)) {
                blk_mq_insert_request(rq, 0);
-               blk_mq_run_hw_queue(hctx, false);
+               blk_mq_run_hw_queue(hctx, rq->cmd_flags & REQ_NOWAIT);
                return;
        }
 
@@ -2874,11 +2903,8 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
        };
        struct request *rq;
 
-       if (unlikely(bio_queue_enter(bio)))
-               return NULL;
-
        if (blk_mq_attempt_bio_merge(q, bio, nsegs))
-               goto queue_exit;
+               return NULL;
 
        rq_qos_throttle(q, bio);
 
@@ -2894,35 +2920,23 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
        rq_qos_cleanup(q, bio);
        if (bio->bi_opf & REQ_NOWAIT)
                bio_wouldblock_error(bio);
-queue_exit:
-       blk_queue_exit(q);
        return NULL;
 }
 
-static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
-               struct blk_plug *plug, struct bio **bio, unsigned int nsegs)
+/* return true if this @rq can be used for @bio */
+static bool blk_mq_can_use_cached_rq(struct request *rq, struct blk_plug *plug,
+               struct bio *bio)
 {
-       struct request *rq;
-       enum hctx_type type, hctx_type;
+       enum hctx_type type = blk_mq_get_hctx_type(bio->bi_opf);
+       enum hctx_type hctx_type = rq->mq_hctx->type;
 
-       if (!plug)
-               return NULL;
-       rq = rq_list_peek(&plug->cached_rq);
-       if (!rq || rq->q != q)
-               return NULL;
-
-       if (blk_mq_attempt_bio_merge(q, *bio, nsegs)) {
-               *bio = NULL;
-               return NULL;
-       }
+       WARN_ON_ONCE(rq_list_peek(&plug->cached_rq) != rq);
 
-       type = blk_mq_get_hctx_type((*bio)->bi_opf);
-       hctx_type = rq->mq_hctx->type;
        if (type != hctx_type &&
            !(type == HCTX_TYPE_READ && hctx_type == HCTX_TYPE_DEFAULT))
-               return NULL;
-       if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf))
-               return NULL;
+               return false;
+       if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf))
+               return false;
 
        /*
         * If any qos ->throttle() end up blocking, we will have flushed the
@@ -2930,12 +2944,12 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
         * before we throttle.
         */
        plug->cached_rq = rq_list_next(rq);
-       rq_qos_throttle(q, *bio);
+       rq_qos_throttle(rq->q, bio);
 
        blk_mq_rq_time_init(rq, 0);
-       rq->cmd_flags = (*bio)->bi_opf;
+       rq->cmd_flags = bio->bi_opf;
        INIT_LIST_HEAD(&rq->queuelist);
-       return rq;
+       return true;
 }
 
 static void bio_set_ioprio(struct bio *bio)
@@ -2965,31 +2979,51 @@ void blk_mq_submit_bio(struct bio *bio)
        struct blk_plug *plug = blk_mq_plug(bio);
        const int is_sync = op_is_sync(bio->bi_opf);
        struct blk_mq_hw_ctx *hctx;
-       struct request *rq;
+       struct request *rq = NULL;
        unsigned int nr_segs = 1;
        blk_status_t ret;
 
        bio = blk_queue_bounce(bio, q);
-       if (bio_may_exceed_limits(bio, &q->limits)) {
-               bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
-               if (!bio)
-                       return;
-       }
-
-       if (!bio_integrity_prep(bio))
-               return;
-
        bio_set_ioprio(bio);
 
-       rq = blk_mq_get_cached_request(q, plug, &bio, nr_segs);
-       if (!rq) {
-               if (!bio)
+       if (plug) {
+               rq = rq_list_peek(&plug->cached_rq);
+               if (rq && rq->q != q)
+                       rq = NULL;
+       }
+       if (rq) {
+               if (unlikely(bio_may_exceed_limits(bio, &q->limits))) {
+                       bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
+                       if (!bio)
+                               return;
+               }
+               if (!bio_integrity_prep(bio))
                        return;
-               rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
-               if (unlikely(!rq))
+               if (blk_mq_attempt_bio_merge(q, bio, nr_segs))
+                       return;
+               if (blk_mq_can_use_cached_rq(rq, plug, bio))
+                       goto done;
+               percpu_ref_get(&q->q_usage_counter);
+       } else {
+               if (unlikely(bio_queue_enter(bio)))
                        return;
+               if (unlikely(bio_may_exceed_limits(bio, &q->limits))) {
+                       bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
+                       if (!bio)
+                               goto fail;
+               }
+               if (!bio_integrity_prep(bio))
+                       goto fail;
        }
 
+       rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
+       if (unlikely(!rq)) {
+fail:
+               blk_queue_exit(q);
+               return;
+       }
+
+done:
        trace_block_getrq(bio);
 
        rq_qos_track(q, rq, bio);
@@ -4402,6 +4436,7 @@ static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set,
                                       int new_nr_hw_queues)
 {
        struct blk_mq_tags **new_tags;
+       int i;
 
        if (set->nr_hw_queues >= new_nr_hw_queues)
                goto done;
@@ -4416,6 +4451,16 @@ static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set,
                       sizeof(*set->tags));
        kfree(set->tags);
        set->tags = new_tags;
+
+       for (i = set->nr_hw_queues; i < new_nr_hw_queues; i++) {
+               if (!__blk_mq_alloc_map_and_rqs(set, i)) {
+                       while (--i >= set->nr_hw_queues)
+                               __blk_mq_free_map_and_rqs(set, i);
+                       return -ENOMEM;
+               }
+               cond_resched();
+       }
+
 done:
        set->nr_hw_queues = new_nr_hw_queues;
        return 0;
@@ -4704,7 +4749,8 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
 {
        struct request_queue *q;
        LIST_HEAD(head);
-       int prev_nr_hw_queues;
+       int prev_nr_hw_queues = set->nr_hw_queues;
+       int i;
 
        lockdep_assert_held(&set->tag_list_lock);
 
@@ -4731,7 +4777,6 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
                blk_mq_sysfs_unregister_hctxs(q);
        }
 
-       prev_nr_hw_queues = set->nr_hw_queues;
        if (blk_mq_realloc_tag_set_tags(set, nr_hw_queues) < 0)
                goto reregister;
 
@@ -4749,7 +4794,6 @@ fallback:
                                __blk_mq_free_map_and_rqs(set, i);
 
                        set->nr_hw_queues = prev_nr_hw_queues;
-                       blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
                        goto fallback;
                }
                blk_mq_map_swqueue(q);
@@ -4767,6 +4811,10 @@ switch_back:
 
        list_for_each_entry(q, &set->tag_list, tag_set_list)
                blk_mq_unfreeze_queue(q);
+
+       /* Free the excess tags when nr_hw_queues shrink. */
+       for (i = set->nr_hw_queues; i < prev_nr_hw_queues; i++)
+               __blk_mq_free_map_and_rqs(set, i);
 }
 
 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
@@ -4853,6 +4901,9 @@ static int __init blk_mq_init(void)
 
        for_each_possible_cpu(i)
                init_llist_head(&per_cpu(blk_cpu_done, i));
+       for_each_possible_cpu(i)
+               INIT_CSD(&per_cpu(blk_cpu_csd, i),
+                        __blk_mq_complete_request_remote, NULL);
        open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);
 
        cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD,