scsi: ufs: remove the duplicated checking for supporting clkscaling
[platform/kernel/linux-rpi.git] / block / blk-mq-tag.c
index 5504eb7..54c8436 100644 (file)
@@ -90,9 +90,11 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
        return atomic_read(&hctx->nr_active) < depth;
 }
 
-static int __blk_mq_get_tag(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt)
+static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
+                           struct sbitmap_queue *bt)
 {
-       if (!hctx_may_queue(hctx, bt))
+       if (!(data->flags & BLK_MQ_REQ_INTERNAL) &&
+           !hctx_may_queue(data->hctx, bt))
                return -1;
        return __sbitmap_queue_get(bt);
 }
@@ -104,6 +106,7 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
        struct sbq_wait_state *ws;
        DEFINE_WAIT(wait);
        unsigned int tag_offset;
+       bool drop_ctx;
        int tag;
 
        if (data->flags & BLK_MQ_REQ_RESERVED) {
@@ -118,7 +121,7 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
                tag_offset = tags->nr_reserved_tags;
        }
 
-       tag = __blk_mq_get_tag(data->hctx, bt);
+       tag = __blk_mq_get_tag(data, bt);
        if (tag != -1)
                goto found_tag;
 
@@ -126,10 +129,11 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
                return BLK_MQ_TAG_FAIL;
 
        ws = bt_wait_ptr(bt, data->hctx);
+       drop_ctx = data->ctx == NULL;
        do {
                prepare_to_wait(&ws->wait, &wait, TASK_UNINTERRUPTIBLE);
 
-               tag = __blk_mq_get_tag(data->hctx, bt);
+               tag = __blk_mq_get_tag(data, bt);
                if (tag != -1)
                        break;
 
@@ -144,11 +148,12 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
                 * Retry tag allocation after running the hardware queue,
                 * as running the queue may also have found completions.
                 */
-               tag = __blk_mq_get_tag(data->hctx, bt);
+               tag = __blk_mq_get_tag(data, bt);
                if (tag != -1)
                        break;
 
-               blk_mq_put_ctx(data->ctx);
+               if (data->ctx)
+                       blk_mq_put_ctx(data->ctx);
 
                io_schedule();
 
@@ -164,6 +169,9 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
                ws = bt_wait_ptr(bt, data->hctx);
        } while (1);
 
+       if (drop_ctx && data->ctx)
+               blk_mq_put_ctx(data->ctx);
+
        finish_wait(&ws->wait, &wait);
 
 found_tag:
@@ -327,11 +335,6 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
 
 }
 
-static unsigned int bt_unused_tags(const struct sbitmap_queue *bt)
-{
-       return bt->sb.depth - sbitmap_weight(&bt->sb);
-}
-
 static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
                    bool round_robin, int node)
 {
@@ -387,19 +390,56 @@ void blk_mq_free_tags(struct blk_mq_tags *tags)
        kfree(tags);
 }
 
-int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth)
+int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
+                           struct blk_mq_tags **tagsptr, unsigned int tdepth,
+                           bool can_grow)
 {
-       tdepth -= tags->nr_reserved_tags;
-       if (tdepth > tags->nr_tags)
+       struct blk_mq_tags *tags = *tagsptr;
+
+       if (tdepth <= tags->nr_reserved_tags)
                return -EINVAL;
 
+       tdepth -= tags->nr_reserved_tags;
+
        /*
-        * Don't need (or can't) update reserved tags here, they remain
-        * static and should never need resizing.
+        * If we are allowed to grow beyond the original size, allocate
+        * a new set of tags before freeing the old one.
         */
-       sbitmap_queue_resize(&tags->bitmap_tags, tdepth);
+       if (tdepth > tags->nr_tags) {
+               struct blk_mq_tag_set *set = hctx->queue->tag_set;
+               struct blk_mq_tags *new;
+               bool ret;
+
+               if (!can_grow)
+                       return -EINVAL;
+
+               /*
+                * We need some sort of upper limit, set it high enough that
+                * no valid use cases should require more.
+                */
+               if (tdepth > 16 * BLKDEV_MAX_RQ)
+                       return -EINVAL;
+
+               new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth, 0);
+               if (!new)
+                       return -ENOMEM;
+               ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth);
+               if (ret) {
+                       blk_mq_free_rq_map(new);
+                       return -ENOMEM;
+               }
+
+               blk_mq_free_rqs(set, *tagsptr, hctx->queue_num);
+               blk_mq_free_rq_map(*tagsptr);
+               *tagsptr = new;
+       } else {
+               /*
+                * Don't need (or can't) update reserved tags here, they
+                * remain static and should never need resizing.
+                */
+               sbitmap_queue_resize(&tags->bitmap_tags, tdepth);
+       }
 
-       blk_mq_tag_wakeup_all(tags, false);
        return 0;
 }
 
@@ -430,25 +470,3 @@ u32 blk_mq_unique_tag(struct request *rq)
                (rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
 }
 EXPORT_SYMBOL(blk_mq_unique_tag);
-
-ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
-{
-       char *orig_page = page;
-       unsigned int free, res;
-
-       if (!tags)
-               return 0;
-
-       page += sprintf(page, "nr_tags=%u, reserved_tags=%u, "
-                       "bits_per_word=%u\n",
-                       tags->nr_tags, tags->nr_reserved_tags,
-                       1U << tags->bitmap_tags.sb.shift);
-
-       free = bt_unused_tags(&tags->bitmap_tags);
-       res = bt_unused_tags(&tags->breserved_tags);
-
-       page += sprintf(page, "nr_free=%u, nr_reserved=%u\n", free, res);
-       page += sprintf(page, "active_queues=%u\n", atomic_read(&tags->active_queues));
-
-       return page - orig_page;
-}