Merge tag 'mlx5-updates-2019-04-02' of git://git.kernel.org/pub/scm/linux/kernel...
[platform/kernel/linux-starfive.git] / block / blk-mq.c
index 9437a5e..3ff3d7b 100644 (file)
@@ -59,7 +59,8 @@ static int blk_mq_poll_stats_bkt(const struct request *rq)
 }
 
 /*
- * Check if any of the ctx's have pending work in this hardware queue
+ * Check if any of the ctx, dispatch list or elevator
+ * have pending work in this hardware queue.
  */
 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
 {
@@ -331,7 +332,6 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
 #if defined(CONFIG_BLK_DEV_INTEGRITY)
        rq->nr_integrity_segments = 0;
 #endif
-       rq->special = NULL;
        /* tag was already set */
        rq->extra_len = 0;
        WRITE_ONCE(rq->deadline, 0);
@@ -340,7 +340,6 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
 
        rq->end_io = NULL;
        rq->end_io_data = NULL;
-       rq->next_rq = NULL;
 
        data->ctx->rq_dispatched[op_is_sync(op)]++;
        refcount_set(&rq->ref, 1);
@@ -364,7 +363,7 @@ static struct request *blk_mq_get_request(struct request_queue *q,
        }
        if (likely(!data->hctx))
                data->hctx = blk_mq_map_queue(q, data->cmd_flags,
-                                               data->ctx->cpu);
+                                               data->ctx);
        if (data->cmd_flags & REQ_NOWAIT)
                data->flags |= BLK_MQ_REQ_NOWAIT;
 
@@ -550,8 +549,6 @@ inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
                rq_qos_done(rq->q, rq);
                rq->end_io(rq, error);
        } else {
-               if (unlikely(blk_bidi_rq(rq)))
-                       blk_mq_free_request(rq->next_rq);
                blk_mq_free_request(rq);
        }
 }
@@ -786,7 +783,6 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
        if (kick_requeue_list)
                blk_mq_kick_requeue_list(q);
 }
-EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
 
 void blk_mq_kick_requeue_list(struct request_queue *q)
 {
@@ -1076,7 +1072,13 @@ static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
        hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
 
        spin_lock(&hctx->dispatch_wait_lock);
-       list_del_init(&wait->entry);
+       if (!list_empty(&wait->entry)) {
+               struct sbitmap_queue *sbq;
+
+               list_del_init(&wait->entry);
+               sbq = &hctx->tags->bitmap_tags;
+               atomic_dec(&sbq->ws_active);
+       }
        spin_unlock(&hctx->dispatch_wait_lock);
 
        blk_mq_run_hw_queue(hctx, true);
@@ -1092,13 +1094,13 @@ static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
 static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
                                 struct request *rq)
 {
+       struct sbitmap_queue *sbq = &hctx->tags->bitmap_tags;
        struct wait_queue_head *wq;
        wait_queue_entry_t *wait;
        bool ret;
 
        if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) {
-               if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
-                       set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
+               blk_mq_sched_mark_restart_hctx(hctx);
 
                /*
                 * It's possible that a tag was freed in the window between the
@@ -1115,7 +1117,7 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
        if (!list_empty_careful(&wait->entry))
                return false;
 
-       wq = &bt_wait_ptr(&hctx->tags->bitmap_tags, hctx)->wait;
+       wq = &bt_wait_ptr(sbq, hctx)->wait;
 
        spin_lock_irq(&wq->lock);
        spin_lock(&hctx->dispatch_wait_lock);
@@ -1125,6 +1127,7 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
                return false;
        }
 
+       atomic_inc(&sbq->ws_active);
        wait->flags &= ~WQ_FLAG_EXCLUSIVE;
        __add_wait_queue(wq, wait);
 
@@ -1145,6 +1148,7 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
         * someone else gets the wakeup.
         */
        list_del_init(&wait->entry);
+       atomic_dec(&sbq->ws_active);
        spin_unlock(&hctx->dispatch_wait_lock);
        spin_unlock_irq(&wq->lock);
 
@@ -2069,7 +2073,7 @@ struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
        struct blk_mq_tags *tags;
        int node;
 
-       node = blk_mq_hw_queue_to_node(&set->map[0], hctx_idx);
+       node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx);
        if (node == NUMA_NO_NODE)
                node = set->numa_node;
 
@@ -2125,7 +2129,7 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
        size_t rq_size, left;
        int node;
 
-       node = blk_mq_hw_queue_to_node(&set->map[0], hctx_idx);
+       node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx);
        if (node == NUMA_NO_NODE)
                node = set->numa_node;
 
@@ -2424,7 +2428,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
         * If the cpu isn't present, the cpu is mapped to first hctx.
         */
        for_each_possible_cpu(i) {
-               hctx_idx = set->map[0].mq_map[i];
+               hctx_idx = set->map[HCTX_TYPE_DEFAULT].mq_map[i];
                /* unmapped hw queue can be remapped after CPU topo changed */
                if (!set->tags[hctx_idx] &&
                    !__blk_mq_alloc_rq_map(set, hctx_idx)) {
@@ -2434,16 +2438,19 @@ static void blk_mq_map_swqueue(struct request_queue *q)
                         * case, remap the current ctx to hctx[0] which
                         * is guaranteed to always have tags allocated
                         */
-                       set->map[0].mq_map[i] = 0;
+                       set->map[HCTX_TYPE_DEFAULT].mq_map[i] = 0;
                }
 
                ctx = per_cpu_ptr(q->queue_ctx, i);
                for (j = 0; j < set->nr_maps; j++) {
-                       if (!set->map[j].nr_queues)
+                       if (!set->map[j].nr_queues) {
+                               ctx->hctxs[j] = blk_mq_map_queue_type(q,
+                                               HCTX_TYPE_DEFAULT, i);
                                continue;
+                       }
 
                        hctx = blk_mq_map_queue_type(q, j, i);
-
+                       ctx->hctxs[j] = hctx;
                        /*
                         * If the CPU is already set in the mask, then we've
                         * mapped this one already. This can happen if
@@ -2463,6 +2470,10 @@ static void blk_mq_map_swqueue(struct request_queue *q)
                         */
                        BUG_ON(!hctx->nr_ctx);
                }
+
+               for (; j < HCTX_MAX_TYPES; j++)
+                       ctx->hctxs[j] = blk_mq_map_queue_type(q,
+                                       HCTX_TYPE_DEFAULT, i);
        }
 
        mutex_unlock(&q->sysfs_lock);
@@ -2734,7 +2745,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
                int node;
                struct blk_mq_hw_ctx *hctx;
 
-               node = blk_mq_hw_queue_to_node(&set->map[0], i);
+               node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], i);
                /*
                 * If the hw queue has been mapped to another numa node,
                 * we need to realloc the hctx. If allocation fails, fallback
@@ -2838,9 +2849,6 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
            set->map[HCTX_TYPE_POLL].nr_queues)
                blk_queue_flag_set(QUEUE_FLAG_POLL, q);
 
-       if (!(set->flags & BLK_MQ_F_SG_MERGE))
-               blk_queue_flag_set(QUEUE_FLAG_NO_SG_MERGE, q);
-
        q->sg_reserved_size = INT_MAX;
 
        INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
@@ -2857,7 +2865,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
        /*
         * Default to classic polling
         */
-       q->poll_nsec = -1;
+       q->poll_nsec = BLK_MQ_POLL_CLASSIC;
 
        blk_mq_init_cpu_queues(q, set->nr_hw_queues);
        blk_mq_add_queue_tag_set(set, q);
@@ -2968,7 +2976,7 @@ static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
                return set->ops->map_queues(set);
        } else {
                BUG_ON(set->nr_maps > 1);
-               return blk_mq_map_queues(&set->map[0]);
+               return blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
        }
 }
 
@@ -3090,6 +3098,9 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
        if (!set)
                return -EINVAL;
 
+       if (q->nr_requests == nr)
+               return 0;
+
        blk_mq_freeze_queue(q);
        blk_mq_quiesce_queue(q);
 
@@ -3235,7 +3246,7 @@ fallback:
                        pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n",
                                        nr_hw_queues, prev_nr_hw_queues);
                        set->nr_hw_queues = prev_nr_hw_queues;
-                       blk_mq_map_queues(&set->map[0]);
+                       blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
                        goto fallback;
                }
                blk_mq_map_swqueue(q);
@@ -3389,7 +3400,7 @@ static bool blk_mq_poll_hybrid(struct request_queue *q,
 {
        struct request *rq;
 
-       if (q->poll_nsec == -1)
+       if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
                return false;
 
        if (!blk_qc_t_is_internal(cookie))